[PATCH 7/57][Arm][GAS] Add support for MVE instructions: vstr/vldr
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2019 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 /* Whether --fdpic was given. */
79 static int arm_fdpic;
80
81 #endif /* OBJ_ELF */
82
83 /* Results from operand parsing worker functions. */
84
85 typedef enum
86 {
87 PARSE_OPERAND_SUCCESS,
88 PARSE_OPERAND_FAIL,
89 PARSE_OPERAND_FAIL_NO_BACKTRACK
90 } parse_operand_result;
91
92 enum arm_float_abi
93 {
94 ARM_FLOAT_ABI_HARD,
95 ARM_FLOAT_ABI_SOFTFP,
96 ARM_FLOAT_ABI_SOFT
97 };
98
99 /* Types of processor to assemble for. */
100 #ifndef CPU_DEFAULT
101 /* The code that was here used to select a default CPU depending on compiler
102 pre-defines which were only present when doing native builds, thus
103 changing gas' default behaviour depending upon the build host.
104
105 If you have a target that requires a default CPU option then the you
106 should define CPU_DEFAULT here. */
107 #endif
108
109 #ifndef FPU_DEFAULT
110 # ifdef TE_LINUX
111 # define FPU_DEFAULT FPU_ARCH_FPA
112 # elif defined (TE_NetBSD)
113 # ifdef OBJ_ELF
114 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
115 # else
116 /* Legacy a.out format. */
117 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
118 # endif
119 # elif defined (TE_VXWORKS)
120 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
121 # else
122 /* For backwards compatibility, default to FPA. */
123 # define FPU_DEFAULT FPU_ARCH_FPA
124 # endif
125 #endif /* ifndef FPU_DEFAULT */
126
127 #define streq(a, b) (strcmp (a, b) == 0)
128
129 /* Current set of feature bits available (CPU+FPU). Different from
130 selected_cpu + selected_fpu in case of autodetection since the CPU
131 feature bits are then all set. */
132 static arm_feature_set cpu_variant;
133 /* Feature bits used in each execution state. Used to set build attribute
134 (in particular Tag_*_ISA_use) in CPU autodetection mode. */
135 static arm_feature_set arm_arch_used;
136 static arm_feature_set thumb_arch_used;
137
138 /* Flags stored in private area of BFD structure. */
139 static int uses_apcs_26 = FALSE;
140 static int atpcs = FALSE;
141 static int support_interwork = FALSE;
142 static int uses_apcs_float = FALSE;
143 static int pic_code = FALSE;
144 static int fix_v4bx = FALSE;
145 /* Warn on using deprecated features. */
146 static int warn_on_deprecated = TRUE;
147
148 /* Understand CodeComposer Studio assembly syntax. */
149 bfd_boolean codecomposer_syntax = FALSE;
150
151 /* Variables that we set while parsing command-line options. Once all
152 options have been read we re-process these values to set the real
153 assembly flags. */
154
155 /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
156 instead of -mcpu=arm1). */
157 static const arm_feature_set *legacy_cpu = NULL;
158 static const arm_feature_set *legacy_fpu = NULL;
159
160 /* CPU, extension and FPU feature bits selected by -mcpu. */
161 static const arm_feature_set *mcpu_cpu_opt = NULL;
162 static arm_feature_set *mcpu_ext_opt = NULL;
163 static const arm_feature_set *mcpu_fpu_opt = NULL;
164
165 /* CPU, extension and FPU feature bits selected by -march. */
166 static const arm_feature_set *march_cpu_opt = NULL;
167 static arm_feature_set *march_ext_opt = NULL;
168 static const arm_feature_set *march_fpu_opt = NULL;
169
170 /* Feature bits selected by -mfpu. */
171 static const arm_feature_set *mfpu_opt = NULL;
172
173 /* Constants for known architecture features. */
174 static const arm_feature_set fpu_default = FPU_DEFAULT;
175 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
176 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
177 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
178 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
179 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
180 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
181 #ifdef OBJ_ELF
182 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
183 #endif
184 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
185
186 #ifdef CPU_DEFAULT
187 static const arm_feature_set cpu_default = CPU_DEFAULT;
188 #endif
189
190 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
191 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V2);
192 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
193 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
194 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
195 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
196 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
197 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
198 static const arm_feature_set arm_ext_v4t_5 =
199 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
200 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
201 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
202 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
203 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
204 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
205 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
206 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
207 /* Only for compatability of hint instructions. */
208 static const arm_feature_set arm_ext_v6k_v6t2 =
209 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K | ARM_EXT_V6T2);
210 static const arm_feature_set arm_ext_v6_notm =
211 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
212 static const arm_feature_set arm_ext_v6_dsp =
213 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
214 static const arm_feature_set arm_ext_barrier =
215 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
216 static const arm_feature_set arm_ext_msr =
217 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
218 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
219 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
220 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
221 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
222 #ifdef OBJ_ELF
223 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
224 #endif
225 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
226 static const arm_feature_set arm_ext_m =
227 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_V7M,
228 ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
229 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
230 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
231 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
232 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
233 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
234 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
235 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
236 static const arm_feature_set arm_ext_v8m_main =
237 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
238 static const arm_feature_set arm_ext_v8_1m_main =
239 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN);
240 /* Instructions in ARMv8-M only found in M profile architectures. */
241 static const arm_feature_set arm_ext_v8m_m_only =
242 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
243 static const arm_feature_set arm_ext_v6t2_v8m =
244 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
245 /* Instructions shared between ARMv8-A and ARMv8-M. */
246 static const arm_feature_set arm_ext_atomics =
247 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
248 #ifdef OBJ_ELF
249 /* DSP instructions Tag_DSP_extension refers to. */
250 static const arm_feature_set arm_ext_dsp =
251 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
252 #endif
253 static const arm_feature_set arm_ext_ras =
254 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
255 /* FP16 instructions. */
256 static const arm_feature_set arm_ext_fp16 =
257 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
258 static const arm_feature_set arm_ext_fp16_fml =
259 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML);
260 static const arm_feature_set arm_ext_v8_2 =
261 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A);
262 static const arm_feature_set arm_ext_v8_3 =
263 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A);
264 static const arm_feature_set arm_ext_sb =
265 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB);
266 static const arm_feature_set arm_ext_predres =
267 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES);
268
269 static const arm_feature_set arm_arch_any = ARM_ANY;
270 #ifdef OBJ_ELF
271 static const arm_feature_set fpu_any = FPU_ANY;
272 #endif
273 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
274 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
275 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
276
277 static const arm_feature_set arm_cext_iwmmxt2 =
278 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
279 static const arm_feature_set arm_cext_iwmmxt =
280 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
281 static const arm_feature_set arm_cext_xscale =
282 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
283 static const arm_feature_set arm_cext_maverick =
284 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
285 static const arm_feature_set fpu_fpa_ext_v1 =
286 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
287 static const arm_feature_set fpu_fpa_ext_v2 =
288 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
289 static const arm_feature_set fpu_vfp_ext_v1xd =
290 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
291 static const arm_feature_set fpu_vfp_ext_v1 =
292 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
293 static const arm_feature_set fpu_vfp_ext_v2 =
294 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
295 static const arm_feature_set fpu_vfp_ext_v3xd =
296 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
297 static const arm_feature_set fpu_vfp_ext_v3 =
298 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
299 static const arm_feature_set fpu_vfp_ext_d32 =
300 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
301 static const arm_feature_set fpu_neon_ext_v1 =
302 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
303 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
304 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
305 static const arm_feature_set mve_ext =
306 ARM_FEATURE_COPROC (FPU_MVE);
307 static const arm_feature_set mve_fp_ext =
308 ARM_FEATURE_COPROC (FPU_MVE_FP);
309 #ifdef OBJ_ELF
310 static const arm_feature_set fpu_vfp_fp16 =
311 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
312 static const arm_feature_set fpu_neon_ext_fma =
313 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
314 #endif
315 static const arm_feature_set fpu_vfp_ext_fma =
316 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
317 static const arm_feature_set fpu_vfp_ext_armv8 =
318 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
319 static const arm_feature_set fpu_vfp_ext_armv8xd =
320 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
321 static const arm_feature_set fpu_neon_ext_armv8 =
322 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
323 static const arm_feature_set fpu_crypto_ext_armv8 =
324 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
325 static const arm_feature_set crc_ext_armv8 =
326 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
327 static const arm_feature_set fpu_neon_ext_v8_1 =
328 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
329 static const arm_feature_set fpu_neon_ext_dotprod =
330 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD);
331
332 static int mfloat_abi_opt = -1;
333 /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
334 directive. */
335 static arm_feature_set selected_arch = ARM_ARCH_NONE;
336 /* Extension feature bits selected by the last -mcpu/-march or .arch_extension
337 directive. */
338 static arm_feature_set selected_ext = ARM_ARCH_NONE;
339 /* Feature bits selected by the last -mcpu/-march or by the combination of the
340 last .cpu/.arch directive .arch_extension directives since that
341 directive. */
342 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
343 /* FPU feature bits selected by the last -mfpu or .fpu directive. */
344 static arm_feature_set selected_fpu = FPU_NONE;
345 /* Feature bits selected by the last .object_arch directive. */
346 static arm_feature_set selected_object_arch = ARM_ARCH_NONE;
347 /* Must be long enough to hold any of the names in arm_cpus. */
348 static char selected_cpu_name[20];
349
350 extern FLONUM_TYPE generic_floating_point_number;
351
352 /* Return if no cpu was selected on command-line. */
353 static bfd_boolean
354 no_cpu_selected (void)
355 {
356 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
357 }
358
359 #ifdef OBJ_ELF
360 # ifdef EABI_DEFAULT
361 static int meabi_flags = EABI_DEFAULT;
362 # else
363 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
364 # endif
365
366 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
367
368 bfd_boolean
369 arm_is_eabi (void)
370 {
371 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
372 }
373 #endif
374
375 #ifdef OBJ_ELF
376 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
377 symbolS * GOT_symbol;
378 #endif
379
380 /* 0: assemble for ARM,
381 1: assemble for Thumb,
382 2: assemble for Thumb even though target CPU does not support thumb
383 instructions. */
384 static int thumb_mode = 0;
385 /* A value distinct from the possible values for thumb_mode that we
386 can use to record whether thumb_mode has been copied into the
387 tc_frag_data field of a frag. */
388 #define MODE_RECORDED (1 << 4)
389
390 /* Specifies the intrinsic IT insn behavior mode. */
391 enum implicit_it_mode
392 {
393 IMPLICIT_IT_MODE_NEVER = 0x00,
394 IMPLICIT_IT_MODE_ARM = 0x01,
395 IMPLICIT_IT_MODE_THUMB = 0x02,
396 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
397 };
398 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
399
400 /* If unified_syntax is true, we are processing the new unified
401 ARM/Thumb syntax. Important differences from the old ARM mode:
402
403 - Immediate operands do not require a # prefix.
404 - Conditional affixes always appear at the end of the
405 instruction. (For backward compatibility, those instructions
406 that formerly had them in the middle, continue to accept them
407 there.)
408 - The IT instruction may appear, and if it does is validated
409 against subsequent conditional affixes. It does not generate
410 machine code.
411
412 Important differences from the old Thumb mode:
413
414 - Immediate operands do not require a # prefix.
415 - Most of the V6T2 instructions are only available in unified mode.
416 - The .N and .W suffixes are recognized and honored (it is an error
417 if they cannot be honored).
418 - All instructions set the flags if and only if they have an 's' affix.
419 - Conditional affixes may be used. They are validated against
420 preceding IT instructions. Unlike ARM mode, you cannot use a
421 conditional affix except in the scope of an IT instruction. */
422
423 static bfd_boolean unified_syntax = FALSE;
424
425 /* An immediate operand can start with #, and ld*, st*, pld operands
426 can contain [ and ]. We need to tell APP not to elide whitespace
427 before a [, which can appear as the first operand for pld.
428 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
429 const char arm_symbol_chars[] = "#[]{}";
430
431 enum neon_el_type
432 {
433 NT_invtype,
434 NT_untyped,
435 NT_integer,
436 NT_float,
437 NT_poly,
438 NT_signed,
439 NT_unsigned
440 };
441
442 struct neon_type_el
443 {
444 enum neon_el_type type;
445 unsigned size;
446 };
447
448 #define NEON_MAX_TYPE_ELS 4
449
450 struct neon_type
451 {
452 struct neon_type_el el[NEON_MAX_TYPE_ELS];
453 unsigned elems;
454 };
455
456 enum pred_instruction_type
457 {
458 OUTSIDE_PRED_INSN,
459 INSIDE_VPT_INSN,
460 INSIDE_IT_INSN,
461 INSIDE_IT_LAST_INSN,
462 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
463 if inside, should be the last one. */
464 NEUTRAL_IT_INSN, /* This could be either inside or outside,
465 i.e. BKPT and NOP. */
466 IT_INSN, /* The IT insn has been parsed. */
467 VPT_INSN, /* The VPT/VPST insn has been parsed. */
468 MVE_OUTSIDE_PRED_INSN , /* Instruction to indicate a MVE instruction without
469 a predication code. */
470 MVE_UNPREDICABLE_INSN /* MVE instruction that is non-predicable. */
471 };
472
473 /* The maximum number of operands we need. */
474 #define ARM_IT_MAX_OPERANDS 6
475 #define ARM_IT_MAX_RELOCS 3
476
477 struct arm_it
478 {
479 const char * error;
480 unsigned long instruction;
481 int size;
482 int size_req;
483 int cond;
484 /* "uncond_value" is set to the value in place of the conditional field in
485 unconditional versions of the instruction, or -1 if nothing is
486 appropriate. */
487 int uncond_value;
488 struct neon_type vectype;
489 /* This does not indicate an actual NEON instruction, only that
490 the mnemonic accepts neon-style type suffixes. */
491 int is_neon;
492 /* Set to the opcode if the instruction needs relaxation.
493 Zero if the instruction is not relaxed. */
494 unsigned long relax;
495 struct
496 {
497 bfd_reloc_code_real_type type;
498 expressionS exp;
499 int pc_rel;
500 } relocs[ARM_IT_MAX_RELOCS];
501
502 enum pred_instruction_type pred_insn_type;
503
504 struct
505 {
506 unsigned reg;
507 signed int imm;
508 struct neon_type_el vectype;
509 unsigned present : 1; /* Operand present. */
510 unsigned isreg : 1; /* Operand was a register. */
511 unsigned immisreg : 2; /* .imm field is a second register.
512 0: imm, 1: gpr, 2: MVE Q-register. */
513 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
514 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
515 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
516 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
517 instructions. This allows us to disambiguate ARM <-> vector insns. */
518 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
519 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
520 unsigned isquad : 1; /* Operand is SIMD quad register. */
521 unsigned issingle : 1; /* Operand is VFP single-precision register. */
522 unsigned hasreloc : 1; /* Operand has relocation suffix. */
523 unsigned writeback : 1; /* Operand has trailing ! */
524 unsigned preind : 1; /* Preindexed address. */
525 unsigned postind : 1; /* Postindexed address. */
526 unsigned negative : 1; /* Index register was negated. */
527 unsigned shifted : 1; /* Shift applied to operation. */
528 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
529 } operands[ARM_IT_MAX_OPERANDS];
530 };
531
532 static struct arm_it inst;
533
534 #define NUM_FLOAT_VALS 8
535
536 const char * fp_const[] =
537 {
538 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
539 };
540
541 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
542
543 #define FAIL (-1)
544 #define SUCCESS (0)
545
546 #define SUFF_S 1
547 #define SUFF_D 2
548 #define SUFF_E 3
549 #define SUFF_P 4
550
551 #define CP_T_X 0x00008000
552 #define CP_T_Y 0x00400000
553
554 #define CONDS_BIT 0x00100000
555 #define LOAD_BIT 0x00100000
556
557 #define DOUBLE_LOAD_FLAG 0x00000001
558
559 struct asm_cond
560 {
561 const char * template_name;
562 unsigned long value;
563 };
564
565 #define COND_ALWAYS 0xE
566
567 struct asm_psr
568 {
569 const char * template_name;
570 unsigned long field;
571 };
572
573 struct asm_barrier_opt
574 {
575 const char * template_name;
576 unsigned long value;
577 const arm_feature_set arch;
578 };
579
580 /* The bit that distinguishes CPSR and SPSR. */
581 #define SPSR_BIT (1 << 22)
582
583 /* The individual PSR flag bits. */
584 #define PSR_c (1 << 16)
585 #define PSR_x (1 << 17)
586 #define PSR_s (1 << 18)
587 #define PSR_f (1 << 19)
588
589 struct reloc_entry
590 {
591 const char * name;
592 bfd_reloc_code_real_type reloc;
593 };
594
595 enum vfp_reg_pos
596 {
597 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
598 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
599 };
600
601 enum vfp_ldstm_type
602 {
603 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
604 };
605
606 /* Bits for DEFINED field in neon_typed_alias. */
607 #define NTA_HASTYPE 1
608 #define NTA_HASINDEX 2
609
610 struct neon_typed_alias
611 {
612 unsigned char defined;
613 unsigned char index;
614 struct neon_type_el eltype;
615 };
616
617 /* ARM register categories. This includes coprocessor numbers and various
618 architecture extensions' registers. Each entry should have an error message
619 in reg_expected_msgs below. */
620 enum arm_reg_type
621 {
622 REG_TYPE_RN,
623 REG_TYPE_CP,
624 REG_TYPE_CN,
625 REG_TYPE_FN,
626 REG_TYPE_VFS,
627 REG_TYPE_VFD,
628 REG_TYPE_NQ,
629 REG_TYPE_VFSD,
630 REG_TYPE_NDQ,
631 REG_TYPE_NSD,
632 REG_TYPE_NSDQ,
633 REG_TYPE_VFC,
634 REG_TYPE_MVF,
635 REG_TYPE_MVD,
636 REG_TYPE_MVFX,
637 REG_TYPE_MVDX,
638 REG_TYPE_MVAX,
639 REG_TYPE_MQ,
640 REG_TYPE_DSPSC,
641 REG_TYPE_MMXWR,
642 REG_TYPE_MMXWC,
643 REG_TYPE_MMXWCG,
644 REG_TYPE_XSCALE,
645 REG_TYPE_RNB,
646 };
647
648 /* Structure for a hash table entry for a register.
649 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
650 information which states whether a vector type or index is specified (for a
651 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
652 struct reg_entry
653 {
654 const char * name;
655 unsigned int number;
656 unsigned char type;
657 unsigned char builtin;
658 struct neon_typed_alias * neon;
659 };
660
661 /* Diagnostics used when we don't get a register of the expected type. */
662 const char * const reg_expected_msgs[] =
663 {
664 [REG_TYPE_RN] = N_("ARM register expected"),
665 [REG_TYPE_CP] = N_("bad or missing co-processor number"),
666 [REG_TYPE_CN] = N_("co-processor register expected"),
667 [REG_TYPE_FN] = N_("FPA register expected"),
668 [REG_TYPE_VFS] = N_("VFP single precision register expected"),
669 [REG_TYPE_VFD] = N_("VFP/Neon double precision register expected"),
670 [REG_TYPE_NQ] = N_("Neon quad precision register expected"),
671 [REG_TYPE_VFSD] = N_("VFP single or double precision register expected"),
672 [REG_TYPE_NDQ] = N_("Neon double or quad precision register expected"),
673 [REG_TYPE_NSD] = N_("Neon single or double precision register expected"),
674 [REG_TYPE_NSDQ] = N_("VFP single, double or Neon quad precision register"
675 " expected"),
676 [REG_TYPE_VFC] = N_("VFP system register expected"),
677 [REG_TYPE_MVF] = N_("Maverick MVF register expected"),
678 [REG_TYPE_MVD] = N_("Maverick MVD register expected"),
679 [REG_TYPE_MVFX] = N_("Maverick MVFX register expected"),
680 [REG_TYPE_MVDX] = N_("Maverick MVDX register expected"),
681 [REG_TYPE_MVAX] = N_("Maverick MVAX register expected"),
682 [REG_TYPE_DSPSC] = N_("Maverick DSPSC register expected"),
683 [REG_TYPE_MMXWR] = N_("iWMMXt data register expected"),
684 [REG_TYPE_MMXWC] = N_("iWMMXt control register expected"),
685 [REG_TYPE_MMXWCG] = N_("iWMMXt scalar register expected"),
686 [REG_TYPE_XSCALE] = N_("XScale accumulator register expected"),
687 [REG_TYPE_MQ] = N_("MVE vector register expected"),
688 [REG_TYPE_RNB] = N_("")
689 };
690
691 /* Some well known registers that we refer to directly elsewhere. */
692 #define REG_R12 12
693 #define REG_SP 13
694 #define REG_LR 14
695 #define REG_PC 15
696
697 /* ARM instructions take 4bytes in the object file, Thumb instructions
698 take 2: */
699 #define INSN_SIZE 4
700
701 struct asm_opcode
702 {
703 /* Basic string to match. */
704 const char * template_name;
705
706 /* Parameters to instruction. */
707 unsigned int operands[8];
708
709 /* Conditional tag - see opcode_lookup. */
710 unsigned int tag : 4;
711
712 /* Basic instruction code. */
713 unsigned int avalue;
714
715 /* Thumb-format instruction code. */
716 unsigned int tvalue;
717
718 /* Which architecture variant provides this instruction. */
719 const arm_feature_set * avariant;
720 const arm_feature_set * tvariant;
721
722 /* Function to call to encode instruction in ARM format. */
723 void (* aencode) (void);
724
725 /* Function to call to encode instruction in Thumb format. */
726 void (* tencode) (void);
727
728 /* Indicates whether this instruction may be vector predicated. */
729 unsigned int mayBeVecPred : 1;
730 };
731
732 /* Defines for various bits that we will want to toggle. */
733 #define INST_IMMEDIATE 0x02000000
734 #define OFFSET_REG 0x02000000
735 #define HWOFFSET_IMM 0x00400000
736 #define SHIFT_BY_REG 0x00000010
737 #define PRE_INDEX 0x01000000
738 #define INDEX_UP 0x00800000
739 #define WRITE_BACK 0x00200000
740 #define LDM_TYPE_2_OR_3 0x00400000
741 #define CPSI_MMOD 0x00020000
742
743 #define LITERAL_MASK 0xf000f000
744 #define OPCODE_MASK 0xfe1fffff
745 #define V4_STR_BIT 0x00000020
746 #define VLDR_VMOV_SAME 0x0040f000
747
748 #define T2_SUBS_PC_LR 0xf3de8f00
749
750 #define DATA_OP_SHIFT 21
751 #define SBIT_SHIFT 20
752
753 #define T2_OPCODE_MASK 0xfe1fffff
754 #define T2_DATA_OP_SHIFT 21
755 #define T2_SBIT_SHIFT 20
756
757 #define A_COND_MASK 0xf0000000
758 #define A_PUSH_POP_OP_MASK 0x0fff0000
759
760 /* Opcodes for pushing/poping registers to/from the stack. */
761 #define A1_OPCODE_PUSH 0x092d0000
762 #define A2_OPCODE_PUSH 0x052d0004
763 #define A2_OPCODE_POP 0x049d0004
764
765 /* Codes to distinguish the arithmetic instructions. */
766 #define OPCODE_AND 0
767 #define OPCODE_EOR 1
768 #define OPCODE_SUB 2
769 #define OPCODE_RSB 3
770 #define OPCODE_ADD 4
771 #define OPCODE_ADC 5
772 #define OPCODE_SBC 6
773 #define OPCODE_RSC 7
774 #define OPCODE_TST 8
775 #define OPCODE_TEQ 9
776 #define OPCODE_CMP 10
777 #define OPCODE_CMN 11
778 #define OPCODE_ORR 12
779 #define OPCODE_MOV 13
780 #define OPCODE_BIC 14
781 #define OPCODE_MVN 15
782
783 #define T2_OPCODE_AND 0
784 #define T2_OPCODE_BIC 1
785 #define T2_OPCODE_ORR 2
786 #define T2_OPCODE_ORN 3
787 #define T2_OPCODE_EOR 4
788 #define T2_OPCODE_ADD 8
789 #define T2_OPCODE_ADC 10
790 #define T2_OPCODE_SBC 11
791 #define T2_OPCODE_SUB 13
792 #define T2_OPCODE_RSB 14
793
794 #define T_OPCODE_MUL 0x4340
795 #define T_OPCODE_TST 0x4200
796 #define T_OPCODE_CMN 0x42c0
797 #define T_OPCODE_NEG 0x4240
798 #define T_OPCODE_MVN 0x43c0
799
800 #define T_OPCODE_ADD_R3 0x1800
801 #define T_OPCODE_SUB_R3 0x1a00
802 #define T_OPCODE_ADD_HI 0x4400
803 #define T_OPCODE_ADD_ST 0xb000
804 #define T_OPCODE_SUB_ST 0xb080
805 #define T_OPCODE_ADD_SP 0xa800
806 #define T_OPCODE_ADD_PC 0xa000
807 #define T_OPCODE_ADD_I8 0x3000
808 #define T_OPCODE_SUB_I8 0x3800
809 #define T_OPCODE_ADD_I3 0x1c00
810 #define T_OPCODE_SUB_I3 0x1e00
811
812 #define T_OPCODE_ASR_R 0x4100
813 #define T_OPCODE_LSL_R 0x4080
814 #define T_OPCODE_LSR_R 0x40c0
815 #define T_OPCODE_ROR_R 0x41c0
816 #define T_OPCODE_ASR_I 0x1000
817 #define T_OPCODE_LSL_I 0x0000
818 #define T_OPCODE_LSR_I 0x0800
819
820 #define T_OPCODE_MOV_I8 0x2000
821 #define T_OPCODE_CMP_I8 0x2800
822 #define T_OPCODE_CMP_LR 0x4280
823 #define T_OPCODE_MOV_HR 0x4600
824 #define T_OPCODE_CMP_HR 0x4500
825
826 #define T_OPCODE_LDR_PC 0x4800
827 #define T_OPCODE_LDR_SP 0x9800
828 #define T_OPCODE_STR_SP 0x9000
829 #define T_OPCODE_LDR_IW 0x6800
830 #define T_OPCODE_STR_IW 0x6000
831 #define T_OPCODE_LDR_IH 0x8800
832 #define T_OPCODE_STR_IH 0x8000
833 #define T_OPCODE_LDR_IB 0x7800
834 #define T_OPCODE_STR_IB 0x7000
835 #define T_OPCODE_LDR_RW 0x5800
836 #define T_OPCODE_STR_RW 0x5000
837 #define T_OPCODE_LDR_RH 0x5a00
838 #define T_OPCODE_STR_RH 0x5200
839 #define T_OPCODE_LDR_RB 0x5c00
840 #define T_OPCODE_STR_RB 0x5400
841
842 #define T_OPCODE_PUSH 0xb400
843 #define T_OPCODE_POP 0xbc00
844
845 #define T_OPCODE_BRANCH 0xe000
846
847 #define THUMB_SIZE 2 /* Size of thumb instruction. */
848 #define THUMB_PP_PC_LR 0x0100
849 #define THUMB_LOAD_BIT 0x0800
850 #define THUMB2_LOAD_BIT 0x00100000
851
852 #define BAD_SYNTAX _("syntax error")
853 #define BAD_ARGS _("bad arguments to instruction")
854 #define BAD_SP _("r13 not allowed here")
855 #define BAD_PC _("r15 not allowed here")
856 #define BAD_ODD _("Odd register not allowed here")
857 #define BAD_EVEN _("Even register not allowed here")
858 #define BAD_COND _("instruction cannot be conditional")
859 #define BAD_OVERLAP _("registers may not be the same")
860 #define BAD_HIREG _("lo register required")
861 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
862 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode")
863 #define BAD_BRANCH _("branch must be last instruction in IT block")
864 #define BAD_BRANCH_OFF _("branch out of range or not a multiple of 2")
865 #define BAD_NOT_IT _("instruction not allowed in IT block")
866 #define BAD_NOT_VPT _("instruction missing MVE vector predication code")
867 #define BAD_FPU _("selected FPU does not support instruction")
868 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
869 #define BAD_OUT_VPT \
870 _("vector predicated instruction should be in VPT/VPST block")
871 #define BAD_IT_COND _("incorrect condition in IT block")
872 #define BAD_VPT_COND _("incorrect condition in VPT/VPST block")
873 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
874 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
875 #define BAD_PC_ADDRESSING \
876 _("cannot use register index with PC-relative addressing")
877 #define BAD_PC_WRITEBACK \
878 _("cannot use writeback with PC-relative addressing")
879 #define BAD_RANGE _("branch out of range")
880 #define BAD_FP16 _("selected processor does not support fp16 instruction")
881 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
882 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
883 #define MVE_NOT_IT _("Warning: instruction is UNPREDICTABLE in an IT " \
884 "block")
885 #define MVE_NOT_VPT _("Warning: instruction is UNPREDICTABLE in a VPT " \
886 "block")
887 #define MVE_BAD_PC _("Warning: instruction is UNPREDICTABLE with PC" \
888 " operand")
889 #define MVE_BAD_SP _("Warning: instruction is UNPREDICTABLE with SP" \
890 " operand")
891 #define BAD_SIMD_TYPE _("bad type in SIMD instruction")
892 #define BAD_MVE_AUTO \
893 _("GAS auto-detection mode and -march=all is deprecated for MVE, please" \
894 " use a valid -march or -mcpu option.")
895 #define BAD_MVE_SRCDEST _("Warning: 32-bit element size and same destination "\
896 "and source operands makes instruction UNPREDICTABLE")
897 #define BAD_EL_TYPE _("bad element type for instruction")
898
899 static struct hash_control * arm_ops_hsh;
900 static struct hash_control * arm_cond_hsh;
901 static struct hash_control * arm_vcond_hsh;
902 static struct hash_control * arm_shift_hsh;
903 static struct hash_control * arm_psr_hsh;
904 static struct hash_control * arm_v7m_psr_hsh;
905 static struct hash_control * arm_reg_hsh;
906 static struct hash_control * arm_reloc_hsh;
907 static struct hash_control * arm_barrier_opt_hsh;
908
909 /* Stuff needed to resolve the label ambiguity
910 As:
911 ...
912 label: <insn>
913 may differ from:
914 ...
915 label:
916 <insn> */
917
918 symbolS * last_label_seen;
919 static int label_is_thumb_function_name = FALSE;
920
921 /* Literal pool structure. Held on a per-section
922 and per-sub-section basis. */
923
924 #define MAX_LITERAL_POOL_SIZE 1024
925 typedef struct literal_pool
926 {
927 expressionS literals [MAX_LITERAL_POOL_SIZE];
928 unsigned int next_free_entry;
929 unsigned int id;
930 symbolS * symbol;
931 segT section;
932 subsegT sub_section;
933 #ifdef OBJ_ELF
934 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
935 #endif
936 struct literal_pool * next;
937 unsigned int alignment;
938 } literal_pool;
939
940 /* Pointer to a linked list of literal pools. */
941 literal_pool * list_of_pools = NULL;
942
943 typedef enum asmfunc_states
944 {
945 OUTSIDE_ASMFUNC,
946 WAITING_ASMFUNC_NAME,
947 WAITING_ENDASMFUNC
948 } asmfunc_states;
949
950 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
951
952 #ifdef OBJ_ELF
953 # define now_pred seg_info (now_seg)->tc_segment_info_data.current_pred
954 #else
955 static struct current_pred now_pred;
956 #endif
957
958 static inline int
959 now_pred_compatible (int cond)
960 {
961 return (cond & ~1) == (now_pred.cc & ~1);
962 }
963
964 static inline int
965 conditional_insn (void)
966 {
967 return inst.cond != COND_ALWAYS;
968 }
969
970 static int in_pred_block (void);
971
972 static int handle_pred_state (void);
973
974 static void force_automatic_it_block_close (void);
975
976 static void it_fsm_post_encode (void);
977
978 #define set_pred_insn_type(type) \
979 do \
980 { \
981 inst.pred_insn_type = type; \
982 if (handle_pred_state () == FAIL) \
983 return; \
984 } \
985 while (0)
986
987 #define set_pred_insn_type_nonvoid(type, failret) \
988 do \
989 { \
990 inst.pred_insn_type = type; \
991 if (handle_pred_state () == FAIL) \
992 return failret; \
993 } \
994 while(0)
995
996 #define set_pred_insn_type_last() \
997 do \
998 { \
999 if (inst.cond == COND_ALWAYS) \
1000 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN); \
1001 else \
1002 set_pred_insn_type (INSIDE_IT_LAST_INSN); \
1003 } \
1004 while (0)
1005
1006 /* Pure syntax. */
1007
1008 /* This array holds the chars that always start a comment. If the
1009 pre-processor is disabled, these aren't very useful. */
1010 char arm_comment_chars[] = "@";
1011
1012 /* This array holds the chars that only start a comment at the beginning of
1013 a line. If the line seems to have the form '# 123 filename'
1014 .line and .file directives will appear in the pre-processed output. */
1015 /* Note that input_file.c hand checks for '#' at the beginning of the
1016 first line of the input file. This is because the compiler outputs
1017 #NO_APP at the beginning of its output. */
1018 /* Also note that comments like this one will always work. */
1019 const char line_comment_chars[] = "#";
1020
1021 char arm_line_separator_chars[] = ";";
1022
1023 /* Chars that can be used to separate mant
1024 from exp in floating point numbers. */
1025 const char EXP_CHARS[] = "eE";
1026
1027 /* Chars that mean this number is a floating point constant. */
1028 /* As in 0f12.456 */
1029 /* or 0d1.2345e12 */
1030
1031 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
1032
1033 /* Prefix characters that indicate the start of an immediate
1034 value. */
1035 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
1036
1037 /* Separator character handling. */
1038
1039 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
1040
1041 static inline int
1042 skip_past_char (char ** str, char c)
1043 {
1044 /* PR gas/14987: Allow for whitespace before the expected character. */
1045 skip_whitespace (*str);
1046
1047 if (**str == c)
1048 {
1049 (*str)++;
1050 return SUCCESS;
1051 }
1052 else
1053 return FAIL;
1054 }
1055
1056 #define skip_past_comma(str) skip_past_char (str, ',')
1057
1058 /* Arithmetic expressions (possibly involving symbols). */
1059
1060 /* Return TRUE if anything in the expression is a bignum. */
1061
1062 static bfd_boolean
1063 walk_no_bignums (symbolS * sp)
1064 {
1065 if (symbol_get_value_expression (sp)->X_op == O_big)
1066 return TRUE;
1067
1068 if (symbol_get_value_expression (sp)->X_add_symbol)
1069 {
1070 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
1071 || (symbol_get_value_expression (sp)->X_op_symbol
1072 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
1073 }
1074
1075 return FALSE;
1076 }
1077
1078 static bfd_boolean in_my_get_expression = FALSE;
1079
1080 /* Third argument to my_get_expression. */
1081 #define GE_NO_PREFIX 0
1082 #define GE_IMM_PREFIX 1
1083 #define GE_OPT_PREFIX 2
1084 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1085 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1086 #define GE_OPT_PREFIX_BIG 3
1087
1088 static int
1089 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1090 {
1091 char * save_in;
1092
1093 /* In unified syntax, all prefixes are optional. */
1094 if (unified_syntax)
1095 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1096 : GE_OPT_PREFIX;
1097
1098 switch (prefix_mode)
1099 {
1100 case GE_NO_PREFIX: break;
1101 case GE_IMM_PREFIX:
1102 if (!is_immediate_prefix (**str))
1103 {
1104 inst.error = _("immediate expression requires a # prefix");
1105 return FAIL;
1106 }
1107 (*str)++;
1108 break;
1109 case GE_OPT_PREFIX:
1110 case GE_OPT_PREFIX_BIG:
1111 if (is_immediate_prefix (**str))
1112 (*str)++;
1113 break;
1114 default:
1115 abort ();
1116 }
1117
1118 memset (ep, 0, sizeof (expressionS));
1119
1120 save_in = input_line_pointer;
1121 input_line_pointer = *str;
1122 in_my_get_expression = TRUE;
1123 expression (ep);
1124 in_my_get_expression = FALSE;
1125
1126 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1127 {
1128 /* We found a bad or missing expression in md_operand(). */
1129 *str = input_line_pointer;
1130 input_line_pointer = save_in;
1131 if (inst.error == NULL)
1132 inst.error = (ep->X_op == O_absent
1133 ? _("missing expression") :_("bad expression"));
1134 return 1;
1135 }
1136
1137 /* Get rid of any bignums now, so that we don't generate an error for which
1138 we can't establish a line number later on. Big numbers are never valid
1139 in instructions, which is where this routine is always called. */
1140 if (prefix_mode != GE_OPT_PREFIX_BIG
1141 && (ep->X_op == O_big
1142 || (ep->X_add_symbol
1143 && (walk_no_bignums (ep->X_add_symbol)
1144 || (ep->X_op_symbol
1145 && walk_no_bignums (ep->X_op_symbol))))))
1146 {
1147 inst.error = _("invalid constant");
1148 *str = input_line_pointer;
1149 input_line_pointer = save_in;
1150 return 1;
1151 }
1152
1153 *str = input_line_pointer;
1154 input_line_pointer = save_in;
1155 return SUCCESS;
1156 }
1157
1158 /* Turn a string in input_line_pointer into a floating point constant
1159 of type TYPE, and store the appropriate bytes in *LITP. The number
1160 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1161 returned, or NULL on OK.
1162
1163 Note that fp constants aren't represent in the normal way on the ARM.
1164 In big endian mode, things are as expected. However, in little endian
1165 mode fp constants are big-endian word-wise, and little-endian byte-wise
1166 within the words. For example, (double) 1.1 in big endian mode is
1167 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1168 the byte sequence 99 99 f1 3f 9a 99 99 99.
1169
1170 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1171
1172 const char *
1173 md_atof (int type, char * litP, int * sizeP)
1174 {
1175 int prec;
1176 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1177 char *t;
1178 int i;
1179
1180 switch (type)
1181 {
1182 case 'f':
1183 case 'F':
1184 case 's':
1185 case 'S':
1186 prec = 2;
1187 break;
1188
1189 case 'd':
1190 case 'D':
1191 case 'r':
1192 case 'R':
1193 prec = 4;
1194 break;
1195
1196 case 'x':
1197 case 'X':
1198 prec = 5;
1199 break;
1200
1201 case 'p':
1202 case 'P':
1203 prec = 5;
1204 break;
1205
1206 default:
1207 *sizeP = 0;
1208 return _("Unrecognized or unsupported floating point constant");
1209 }
1210
1211 t = atof_ieee (input_line_pointer, type, words);
1212 if (t)
1213 input_line_pointer = t;
1214 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1215
1216 if (target_big_endian)
1217 {
1218 for (i = 0; i < prec; i++)
1219 {
1220 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1221 litP += sizeof (LITTLENUM_TYPE);
1222 }
1223 }
1224 else
1225 {
1226 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1227 for (i = prec - 1; i >= 0; i--)
1228 {
1229 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1230 litP += sizeof (LITTLENUM_TYPE);
1231 }
1232 else
1233 /* For a 4 byte float the order of elements in `words' is 1 0.
1234 For an 8 byte float the order is 1 0 3 2. */
1235 for (i = 0; i < prec; i += 2)
1236 {
1237 md_number_to_chars (litP, (valueT) words[i + 1],
1238 sizeof (LITTLENUM_TYPE));
1239 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1240 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1241 litP += 2 * sizeof (LITTLENUM_TYPE);
1242 }
1243 }
1244
1245 return NULL;
1246 }
1247
1248 /* We handle all bad expressions here, so that we can report the faulty
1249 instruction in the error message. */
1250
1251 void
1252 md_operand (expressionS * exp)
1253 {
1254 if (in_my_get_expression)
1255 exp->X_op = O_illegal;
1256 }
1257
1258 /* Immediate values. */
1259
1260 #ifdef OBJ_ELF
1261 /* Generic immediate-value read function for use in directives.
1262 Accepts anything that 'expression' can fold to a constant.
1263 *val receives the number. */
1264
1265 static int
1266 immediate_for_directive (int *val)
1267 {
1268 expressionS exp;
1269 exp.X_op = O_illegal;
1270
1271 if (is_immediate_prefix (*input_line_pointer))
1272 {
1273 input_line_pointer++;
1274 expression (&exp);
1275 }
1276
1277 if (exp.X_op != O_constant)
1278 {
1279 as_bad (_("expected #constant"));
1280 ignore_rest_of_line ();
1281 return FAIL;
1282 }
1283 *val = exp.X_add_number;
1284 return SUCCESS;
1285 }
1286 #endif
1287
1288 /* Register parsing. */
1289
1290 /* Generic register parser. CCP points to what should be the
1291 beginning of a register name. If it is indeed a valid register
1292 name, advance CCP over it and return the reg_entry structure;
1293 otherwise return NULL. Does not issue diagnostics. */
1294
1295 static struct reg_entry *
1296 arm_reg_parse_multi (char **ccp)
1297 {
1298 char *start = *ccp;
1299 char *p;
1300 struct reg_entry *reg;
1301
1302 skip_whitespace (start);
1303
1304 #ifdef REGISTER_PREFIX
1305 if (*start != REGISTER_PREFIX)
1306 return NULL;
1307 start++;
1308 #endif
1309 #ifdef OPTIONAL_REGISTER_PREFIX
1310 if (*start == OPTIONAL_REGISTER_PREFIX)
1311 start++;
1312 #endif
1313
1314 p = start;
1315 if (!ISALPHA (*p) || !is_name_beginner (*p))
1316 return NULL;
1317
1318 do
1319 p++;
1320 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1321
1322 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1323
1324 if (!reg)
1325 return NULL;
1326
1327 *ccp = p;
1328 return reg;
1329 }
1330
1331 static int
1332 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1333 enum arm_reg_type type)
1334 {
1335 /* Alternative syntaxes are accepted for a few register classes. */
1336 switch (type)
1337 {
1338 case REG_TYPE_MVF:
1339 case REG_TYPE_MVD:
1340 case REG_TYPE_MVFX:
1341 case REG_TYPE_MVDX:
1342 /* Generic coprocessor register names are allowed for these. */
1343 if (reg && reg->type == REG_TYPE_CN)
1344 return reg->number;
1345 break;
1346
1347 case REG_TYPE_CP:
1348 /* For backward compatibility, a bare number is valid here. */
1349 {
1350 unsigned long processor = strtoul (start, ccp, 10);
1351 if (*ccp != start && processor <= 15)
1352 return processor;
1353 }
1354 /* Fall through. */
1355
1356 case REG_TYPE_MMXWC:
1357 /* WC includes WCG. ??? I'm not sure this is true for all
1358 instructions that take WC registers. */
1359 if (reg && reg->type == REG_TYPE_MMXWCG)
1360 return reg->number;
1361 break;
1362
1363 default:
1364 break;
1365 }
1366
1367 return FAIL;
1368 }
1369
1370 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1371 return value is the register number or FAIL. */
1372
1373 static int
1374 arm_reg_parse (char **ccp, enum arm_reg_type type)
1375 {
1376 char *start = *ccp;
1377 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1378 int ret;
1379
1380 /* Do not allow a scalar (reg+index) to parse as a register. */
1381 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1382 return FAIL;
1383
1384 if (reg && reg->type == type)
1385 return reg->number;
1386
1387 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1388 return ret;
1389
1390 *ccp = start;
1391 return FAIL;
1392 }
1393
1394 /* Parse a Neon type specifier. *STR should point at the leading '.'
1395 character. Does no verification at this stage that the type fits the opcode
1396 properly. E.g.,
1397
1398 .i32.i32.s16
1399 .s32.f32
1400 .u16
1401
1402 Can all be legally parsed by this function.
1403
1404 Fills in neon_type struct pointer with parsed information, and updates STR
1405 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1406 type, FAIL if not. */
1407
1408 static int
1409 parse_neon_type (struct neon_type *type, char **str)
1410 {
1411 char *ptr = *str;
1412
1413 if (type)
1414 type->elems = 0;
1415
1416 while (type->elems < NEON_MAX_TYPE_ELS)
1417 {
1418 enum neon_el_type thistype = NT_untyped;
1419 unsigned thissize = -1u;
1420
1421 if (*ptr != '.')
1422 break;
1423
1424 ptr++;
1425
1426 /* Just a size without an explicit type. */
1427 if (ISDIGIT (*ptr))
1428 goto parsesize;
1429
1430 switch (TOLOWER (*ptr))
1431 {
1432 case 'i': thistype = NT_integer; break;
1433 case 'f': thistype = NT_float; break;
1434 case 'p': thistype = NT_poly; break;
1435 case 's': thistype = NT_signed; break;
1436 case 'u': thistype = NT_unsigned; break;
1437 case 'd':
1438 thistype = NT_float;
1439 thissize = 64;
1440 ptr++;
1441 goto done;
1442 default:
1443 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1444 return FAIL;
1445 }
1446
1447 ptr++;
1448
1449 /* .f is an abbreviation for .f32. */
1450 if (thistype == NT_float && !ISDIGIT (*ptr))
1451 thissize = 32;
1452 else
1453 {
1454 parsesize:
1455 thissize = strtoul (ptr, &ptr, 10);
1456
1457 if (thissize != 8 && thissize != 16 && thissize != 32
1458 && thissize != 64)
1459 {
1460 as_bad (_("bad size %d in type specifier"), thissize);
1461 return FAIL;
1462 }
1463 }
1464
1465 done:
1466 if (type)
1467 {
1468 type->el[type->elems].type = thistype;
1469 type->el[type->elems].size = thissize;
1470 type->elems++;
1471 }
1472 }
1473
1474 /* Empty/missing type is not a successful parse. */
1475 if (type->elems == 0)
1476 return FAIL;
1477
1478 *str = ptr;
1479
1480 return SUCCESS;
1481 }
1482
1483 /* Errors may be set multiple times during parsing or bit encoding
1484 (particularly in the Neon bits), but usually the earliest error which is set
1485 will be the most meaningful. Avoid overwriting it with later (cascading)
1486 errors by calling this function. */
1487
1488 static void
1489 first_error (const char *err)
1490 {
1491 if (!inst.error)
1492 inst.error = err;
1493 }
1494
1495 /* Parse a single type, e.g. ".s32", leading period included. */
1496 static int
1497 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1498 {
1499 char *str = *ccp;
1500 struct neon_type optype;
1501
1502 if (*str == '.')
1503 {
1504 if (parse_neon_type (&optype, &str) == SUCCESS)
1505 {
1506 if (optype.elems == 1)
1507 *vectype = optype.el[0];
1508 else
1509 {
1510 first_error (_("only one type should be specified for operand"));
1511 return FAIL;
1512 }
1513 }
1514 else
1515 {
1516 first_error (_("vector type expected"));
1517 return FAIL;
1518 }
1519 }
1520 else
1521 return FAIL;
1522
1523 *ccp = str;
1524
1525 return SUCCESS;
1526 }
1527
1528 /* Special meanings for indices (which have a range of 0-7), which will fit into
1529 a 4-bit integer. */
1530
1531 #define NEON_ALL_LANES 15
1532 #define NEON_INTERLEAVE_LANES 14
1533
1534 /* Record a use of the given feature. */
1535 static void
1536 record_feature_use (const arm_feature_set *feature)
1537 {
1538 if (thumb_mode)
1539 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
1540 else
1541 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
1542 }
1543
1544 /* If the given feature available in the selected CPU, mark it as used.
1545 Returns TRUE iff feature is available. */
1546 static bfd_boolean
1547 mark_feature_used (const arm_feature_set *feature)
1548 {
1549
1550 /* Do not support the use of MVE only instructions when in auto-detection or
1551 -march=all. */
1552 if (((feature == &mve_ext) || (feature == &mve_fp_ext))
1553 && ARM_CPU_IS_ANY (cpu_variant))
1554 {
1555 first_error (BAD_MVE_AUTO);
1556 return FALSE;
1557 }
1558 /* Ensure the option is valid on the current architecture. */
1559 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
1560 return FALSE;
1561
1562 /* Add the appropriate architecture feature for the barrier option used.
1563 */
1564 record_feature_use (feature);
1565
1566 return TRUE;
1567 }
1568
1569 /* Parse either a register or a scalar, with an optional type. Return the
1570 register number, and optionally fill in the actual type of the register
1571 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1572 type/index information in *TYPEINFO. */
1573
1574 static int
1575 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1576 enum arm_reg_type *rtype,
1577 struct neon_typed_alias *typeinfo)
1578 {
1579 char *str = *ccp;
1580 struct reg_entry *reg = arm_reg_parse_multi (&str);
1581 struct neon_typed_alias atype;
1582 struct neon_type_el parsetype;
1583
1584 atype.defined = 0;
1585 atype.index = -1;
1586 atype.eltype.type = NT_invtype;
1587 atype.eltype.size = -1;
1588
1589 /* Try alternate syntax for some types of register. Note these are mutually
1590 exclusive with the Neon syntax extensions. */
1591 if (reg == NULL)
1592 {
1593 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1594 if (altreg != FAIL)
1595 *ccp = str;
1596 if (typeinfo)
1597 *typeinfo = atype;
1598 return altreg;
1599 }
1600
1601 /* Undo polymorphism when a set of register types may be accepted. */
1602 if ((type == REG_TYPE_NDQ
1603 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1604 || (type == REG_TYPE_VFSD
1605 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1606 || (type == REG_TYPE_NSDQ
1607 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1608 || reg->type == REG_TYPE_NQ))
1609 || (type == REG_TYPE_NSD
1610 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1611 || (type == REG_TYPE_MMXWC
1612 && (reg->type == REG_TYPE_MMXWCG)))
1613 type = (enum arm_reg_type) reg->type;
1614
1615 if (type == REG_TYPE_MQ)
1616 {
1617 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
1618 return FAIL;
1619
1620 if (!reg || reg->type != REG_TYPE_NQ)
1621 return FAIL;
1622
1623 if (reg->number > 14 && !mark_feature_used (&fpu_vfp_ext_d32))
1624 {
1625 first_error (_("expected MVE register [q0..q7]"));
1626 return FAIL;
1627 }
1628 type = REG_TYPE_NQ;
1629 }
1630 else if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
1631 && (type == REG_TYPE_NQ))
1632 return FAIL;
1633
1634
1635 if (type != reg->type)
1636 return FAIL;
1637
1638 if (reg->neon)
1639 atype = *reg->neon;
1640
1641 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1642 {
1643 if ((atype.defined & NTA_HASTYPE) != 0)
1644 {
1645 first_error (_("can't redefine type for operand"));
1646 return FAIL;
1647 }
1648 atype.defined |= NTA_HASTYPE;
1649 atype.eltype = parsetype;
1650 }
1651
1652 if (skip_past_char (&str, '[') == SUCCESS)
1653 {
1654 if (type != REG_TYPE_VFD
1655 && !(type == REG_TYPE_VFS
1656 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_2)))
1657 {
1658 first_error (_("only D registers may be indexed"));
1659 return FAIL;
1660 }
1661
1662 if ((atype.defined & NTA_HASINDEX) != 0)
1663 {
1664 first_error (_("can't change index for operand"));
1665 return FAIL;
1666 }
1667
1668 atype.defined |= NTA_HASINDEX;
1669
1670 if (skip_past_char (&str, ']') == SUCCESS)
1671 atype.index = NEON_ALL_LANES;
1672 else
1673 {
1674 expressionS exp;
1675
1676 my_get_expression (&exp, &str, GE_NO_PREFIX);
1677
1678 if (exp.X_op != O_constant)
1679 {
1680 first_error (_("constant expression required"));
1681 return FAIL;
1682 }
1683
1684 if (skip_past_char (&str, ']') == FAIL)
1685 return FAIL;
1686
1687 atype.index = exp.X_add_number;
1688 }
1689 }
1690
1691 if (typeinfo)
1692 *typeinfo = atype;
1693
1694 if (rtype)
1695 *rtype = type;
1696
1697 *ccp = str;
1698
1699 return reg->number;
1700 }
1701
1702 /* Like arm_reg_parse, but also allow the following extra features:
1703 - If RTYPE is non-zero, return the (possibly restricted) type of the
1704 register (e.g. Neon double or quad reg when either has been requested).
1705 - If this is a Neon vector type with additional type information, fill
1706 in the struct pointed to by VECTYPE (if non-NULL).
1707 This function will fault on encountering a scalar. */
1708
1709 static int
1710 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1711 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1712 {
1713 struct neon_typed_alias atype;
1714 char *str = *ccp;
1715 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1716
1717 if (reg == FAIL)
1718 return FAIL;
1719
1720 /* Do not allow regname(... to parse as a register. */
1721 if (*str == '(')
1722 return FAIL;
1723
1724 /* Do not allow a scalar (reg+index) to parse as a register. */
1725 if ((atype.defined & NTA_HASINDEX) != 0)
1726 {
1727 first_error (_("register operand expected, but got scalar"));
1728 return FAIL;
1729 }
1730
1731 if (vectype)
1732 *vectype = atype.eltype;
1733
1734 *ccp = str;
1735
1736 return reg;
1737 }
1738
1739 #define NEON_SCALAR_REG(X) ((X) >> 4)
1740 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1741
1742 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1743 have enough information to be able to do a good job bounds-checking. So, we
1744 just do easy checks here, and do further checks later. */
1745
1746 static int
1747 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1748 {
1749 int reg;
1750 char *str = *ccp;
1751 struct neon_typed_alias atype;
1752 enum arm_reg_type reg_type = REG_TYPE_VFD;
1753
1754 if (elsize == 4)
1755 reg_type = REG_TYPE_VFS;
1756
1757 reg = parse_typed_reg_or_scalar (&str, reg_type, NULL, &atype);
1758
1759 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1760 return FAIL;
1761
1762 if (atype.index == NEON_ALL_LANES)
1763 {
1764 first_error (_("scalar must have an index"));
1765 return FAIL;
1766 }
1767 else if (atype.index >= 64 / elsize)
1768 {
1769 first_error (_("scalar index out of range"));
1770 return FAIL;
1771 }
1772
1773 if (type)
1774 *type = atype.eltype;
1775
1776 *ccp = str;
1777
1778 return reg * 16 + atype.index;
1779 }
1780
1781 /* Types of registers in a list. */
1782
1783 enum reg_list_els
1784 {
1785 REGLIST_RN,
1786 REGLIST_CLRM,
1787 REGLIST_VFP_S,
1788 REGLIST_VFP_S_VPR,
1789 REGLIST_VFP_D,
1790 REGLIST_VFP_D_VPR,
1791 REGLIST_NEON_D
1792 };
1793
1794 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1795
1796 static long
1797 parse_reg_list (char ** strp, enum reg_list_els etype)
1798 {
1799 char *str = *strp;
1800 long range = 0;
1801 int another_range;
1802
1803 gas_assert (etype == REGLIST_RN || etype == REGLIST_CLRM);
1804
1805 /* We come back here if we get ranges concatenated by '+' or '|'. */
1806 do
1807 {
1808 skip_whitespace (str);
1809
1810 another_range = 0;
1811
1812 if (*str == '{')
1813 {
1814 int in_range = 0;
1815 int cur_reg = -1;
1816
1817 str++;
1818 do
1819 {
1820 int reg;
1821 const char apsr_str[] = "apsr";
1822 int apsr_str_len = strlen (apsr_str);
1823
1824 reg = arm_reg_parse (&str, REGLIST_RN);
1825 if (etype == REGLIST_CLRM)
1826 {
1827 if (reg == REG_SP || reg == REG_PC)
1828 reg = FAIL;
1829 else if (reg == FAIL
1830 && !strncasecmp (str, apsr_str, apsr_str_len)
1831 && !ISALPHA (*(str + apsr_str_len)))
1832 {
1833 reg = 15;
1834 str += apsr_str_len;
1835 }
1836
1837 if (reg == FAIL)
1838 {
1839 first_error (_("r0-r12, lr or APSR expected"));
1840 return FAIL;
1841 }
1842 }
1843 else /* etype == REGLIST_RN. */
1844 {
1845 if (reg == FAIL)
1846 {
1847 first_error (_(reg_expected_msgs[REGLIST_RN]));
1848 return FAIL;
1849 }
1850 }
1851
1852 if (in_range)
1853 {
1854 int i;
1855
1856 if (reg <= cur_reg)
1857 {
1858 first_error (_("bad range in register list"));
1859 return FAIL;
1860 }
1861
1862 for (i = cur_reg + 1; i < reg; i++)
1863 {
1864 if (range & (1 << i))
1865 as_tsktsk
1866 (_("Warning: duplicated register (r%d) in register list"),
1867 i);
1868 else
1869 range |= 1 << i;
1870 }
1871 in_range = 0;
1872 }
1873
1874 if (range & (1 << reg))
1875 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1876 reg);
1877 else if (reg <= cur_reg)
1878 as_tsktsk (_("Warning: register range not in ascending order"));
1879
1880 range |= 1 << reg;
1881 cur_reg = reg;
1882 }
1883 while (skip_past_comma (&str) != FAIL
1884 || (in_range = 1, *str++ == '-'));
1885 str--;
1886
1887 if (skip_past_char (&str, '}') == FAIL)
1888 {
1889 first_error (_("missing `}'"));
1890 return FAIL;
1891 }
1892 }
1893 else if (etype == REGLIST_RN)
1894 {
1895 expressionS exp;
1896
1897 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1898 return FAIL;
1899
1900 if (exp.X_op == O_constant)
1901 {
1902 if (exp.X_add_number
1903 != (exp.X_add_number & 0x0000ffff))
1904 {
1905 inst.error = _("invalid register mask");
1906 return FAIL;
1907 }
1908
1909 if ((range & exp.X_add_number) != 0)
1910 {
1911 int regno = range & exp.X_add_number;
1912
1913 regno &= -regno;
1914 regno = (1 << regno) - 1;
1915 as_tsktsk
1916 (_("Warning: duplicated register (r%d) in register list"),
1917 regno);
1918 }
1919
1920 range |= exp.X_add_number;
1921 }
1922 else
1923 {
1924 if (inst.relocs[0].type != 0)
1925 {
1926 inst.error = _("expression too complex");
1927 return FAIL;
1928 }
1929
1930 memcpy (&inst.relocs[0].exp, &exp, sizeof (expressionS));
1931 inst.relocs[0].type = BFD_RELOC_ARM_MULTI;
1932 inst.relocs[0].pc_rel = 0;
1933 }
1934 }
1935
1936 if (*str == '|' || *str == '+')
1937 {
1938 str++;
1939 another_range = 1;
1940 }
1941 }
1942 while (another_range);
1943
1944 *strp = str;
1945 return range;
1946 }
1947
1948 /* Parse a VFP register list. If the string is invalid return FAIL.
1949 Otherwise return the number of registers, and set PBASE to the first
1950 register. Parses registers of type ETYPE.
1951 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1952 - Q registers can be used to specify pairs of D registers
1953 - { } can be omitted from around a singleton register list
1954 FIXME: This is not implemented, as it would require backtracking in
1955 some cases, e.g.:
1956 vtbl.8 d3,d4,d5
1957 This could be done (the meaning isn't really ambiguous), but doesn't
1958 fit in well with the current parsing framework.
1959 - 32 D registers may be used (also true for VFPv3).
1960 FIXME: Types are ignored in these register lists, which is probably a
1961 bug. */
1962
1963 static int
1964 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype,
1965 bfd_boolean *partial_match)
1966 {
1967 char *str = *ccp;
1968 int base_reg;
1969 int new_base;
1970 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1971 int max_regs = 0;
1972 int count = 0;
1973 int warned = 0;
1974 unsigned long mask = 0;
1975 int i;
1976 bfd_boolean vpr_seen = FALSE;
1977 bfd_boolean expect_vpr =
1978 (etype == REGLIST_VFP_S_VPR) || (etype == REGLIST_VFP_D_VPR);
1979
1980 if (skip_past_char (&str, '{') == FAIL)
1981 {
1982 inst.error = _("expecting {");
1983 return FAIL;
1984 }
1985
1986 switch (etype)
1987 {
1988 case REGLIST_VFP_S:
1989 case REGLIST_VFP_S_VPR:
1990 regtype = REG_TYPE_VFS;
1991 max_regs = 32;
1992 break;
1993
1994 case REGLIST_VFP_D:
1995 case REGLIST_VFP_D_VPR:
1996 regtype = REG_TYPE_VFD;
1997 break;
1998
1999 case REGLIST_NEON_D:
2000 regtype = REG_TYPE_NDQ;
2001 break;
2002
2003 default:
2004 gas_assert (0);
2005 }
2006
2007 if (etype != REGLIST_VFP_S && etype != REGLIST_VFP_S_VPR)
2008 {
2009 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
2010 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
2011 {
2012 max_regs = 32;
2013 if (thumb_mode)
2014 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
2015 fpu_vfp_ext_d32);
2016 else
2017 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
2018 fpu_vfp_ext_d32);
2019 }
2020 else
2021 max_regs = 16;
2022 }
2023
2024 base_reg = max_regs;
2025 *partial_match = FALSE;
2026
2027 do
2028 {
2029 int setmask = 1, addregs = 1;
2030 const char vpr_str[] = "vpr";
2031 int vpr_str_len = strlen (vpr_str);
2032
2033 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
2034
2035 if (expect_vpr)
2036 {
2037 if (new_base == FAIL
2038 && !strncasecmp (str, vpr_str, vpr_str_len)
2039 && !ISALPHA (*(str + vpr_str_len))
2040 && !vpr_seen)
2041 {
2042 vpr_seen = TRUE;
2043 str += vpr_str_len;
2044 if (count == 0)
2045 base_reg = 0; /* Canonicalize VPR only on d0 with 0 regs. */
2046 }
2047 else if (vpr_seen)
2048 {
2049 first_error (_("VPR expected last"));
2050 return FAIL;
2051 }
2052 else if (new_base == FAIL)
2053 {
2054 if (regtype == REG_TYPE_VFS)
2055 first_error (_("VFP single precision register or VPR "
2056 "expected"));
2057 else /* regtype == REG_TYPE_VFD. */
2058 first_error (_("VFP/Neon double precision register or VPR "
2059 "expected"));
2060 return FAIL;
2061 }
2062 }
2063 else if (new_base == FAIL)
2064 {
2065 first_error (_(reg_expected_msgs[regtype]));
2066 return FAIL;
2067 }
2068
2069 *partial_match = TRUE;
2070 if (vpr_seen)
2071 continue;
2072
2073 if (new_base >= max_regs)
2074 {
2075 first_error (_("register out of range in list"));
2076 return FAIL;
2077 }
2078
2079 /* Note: a value of 2 * n is returned for the register Q<n>. */
2080 if (regtype == REG_TYPE_NQ)
2081 {
2082 setmask = 3;
2083 addregs = 2;
2084 }
2085
2086 if (new_base < base_reg)
2087 base_reg = new_base;
2088
2089 if (mask & (setmask << new_base))
2090 {
2091 first_error (_("invalid register list"));
2092 return FAIL;
2093 }
2094
2095 if ((mask >> new_base) != 0 && ! warned && !vpr_seen)
2096 {
2097 as_tsktsk (_("register list not in ascending order"));
2098 warned = 1;
2099 }
2100
2101 mask |= setmask << new_base;
2102 count += addregs;
2103
2104 if (*str == '-') /* We have the start of a range expression */
2105 {
2106 int high_range;
2107
2108 str++;
2109
2110 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
2111 == FAIL)
2112 {
2113 inst.error = gettext (reg_expected_msgs[regtype]);
2114 return FAIL;
2115 }
2116
2117 if (high_range >= max_regs)
2118 {
2119 first_error (_("register out of range in list"));
2120 return FAIL;
2121 }
2122
2123 if (regtype == REG_TYPE_NQ)
2124 high_range = high_range + 1;
2125
2126 if (high_range <= new_base)
2127 {
2128 inst.error = _("register range not in ascending order");
2129 return FAIL;
2130 }
2131
2132 for (new_base += addregs; new_base <= high_range; new_base += addregs)
2133 {
2134 if (mask & (setmask << new_base))
2135 {
2136 inst.error = _("invalid register list");
2137 return FAIL;
2138 }
2139
2140 mask |= setmask << new_base;
2141 count += addregs;
2142 }
2143 }
2144 }
2145 while (skip_past_comma (&str) != FAIL);
2146
2147 str++;
2148
2149 /* Sanity check -- should have raised a parse error above. */
2150 if ((!vpr_seen && count == 0) || count > max_regs)
2151 abort ();
2152
2153 *pbase = base_reg;
2154
2155 if (expect_vpr && !vpr_seen)
2156 {
2157 first_error (_("VPR expected last"));
2158 return FAIL;
2159 }
2160
2161 /* Final test -- the registers must be consecutive. */
2162 mask >>= base_reg;
2163 for (i = 0; i < count; i++)
2164 {
2165 if ((mask & (1u << i)) == 0)
2166 {
2167 inst.error = _("non-contiguous register range");
2168 return FAIL;
2169 }
2170 }
2171
2172 *ccp = str;
2173
2174 return count;
2175 }
2176
2177 /* True if two alias types are the same. */
2178
2179 static bfd_boolean
2180 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
2181 {
2182 if (!a && !b)
2183 return TRUE;
2184
2185 if (!a || !b)
2186 return FALSE;
2187
2188 if (a->defined != b->defined)
2189 return FALSE;
2190
2191 if ((a->defined & NTA_HASTYPE) != 0
2192 && (a->eltype.type != b->eltype.type
2193 || a->eltype.size != b->eltype.size))
2194 return FALSE;
2195
2196 if ((a->defined & NTA_HASINDEX) != 0
2197 && (a->index != b->index))
2198 return FALSE;
2199
2200 return TRUE;
2201 }
2202
2203 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2204 The base register is put in *PBASE.
2205 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2206 the return value.
2207 The register stride (minus one) is put in bit 4 of the return value.
2208 Bits [6:5] encode the list length (minus one).
2209 The type of the list elements is put in *ELTYPE, if non-NULL. */
2210
2211 #define NEON_LANE(X) ((X) & 0xf)
2212 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2213 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2214
2215 static int
2216 parse_neon_el_struct_list (char **str, unsigned *pbase,
2217 int mve,
2218 struct neon_type_el *eltype)
2219 {
2220 char *ptr = *str;
2221 int base_reg = -1;
2222 int reg_incr = -1;
2223 int count = 0;
2224 int lane = -1;
2225 int leading_brace = 0;
2226 enum arm_reg_type rtype = REG_TYPE_NDQ;
2227 const char *const incr_error = mve ? _("register stride must be 1") :
2228 _("register stride must be 1 or 2");
2229 const char *const type_error = _("mismatched element/structure types in list");
2230 struct neon_typed_alias firsttype;
2231 firsttype.defined = 0;
2232 firsttype.eltype.type = NT_invtype;
2233 firsttype.eltype.size = -1;
2234 firsttype.index = -1;
2235
2236 if (skip_past_char (&ptr, '{') == SUCCESS)
2237 leading_brace = 1;
2238
2239 do
2240 {
2241 struct neon_typed_alias atype;
2242 if (mve)
2243 rtype = REG_TYPE_MQ;
2244 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2245
2246 if (getreg == FAIL)
2247 {
2248 first_error (_(reg_expected_msgs[rtype]));
2249 return FAIL;
2250 }
2251
2252 if (base_reg == -1)
2253 {
2254 base_reg = getreg;
2255 if (rtype == REG_TYPE_NQ)
2256 {
2257 reg_incr = 1;
2258 }
2259 firsttype = atype;
2260 }
2261 else if (reg_incr == -1)
2262 {
2263 reg_incr = getreg - base_reg;
2264 if (reg_incr < 1 || reg_incr > 2)
2265 {
2266 first_error (_(incr_error));
2267 return FAIL;
2268 }
2269 }
2270 else if (getreg != base_reg + reg_incr * count)
2271 {
2272 first_error (_(incr_error));
2273 return FAIL;
2274 }
2275
2276 if (! neon_alias_types_same (&atype, &firsttype))
2277 {
2278 first_error (_(type_error));
2279 return FAIL;
2280 }
2281
2282 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2283 modes. */
2284 if (ptr[0] == '-')
2285 {
2286 struct neon_typed_alias htype;
2287 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2288 if (lane == -1)
2289 lane = NEON_INTERLEAVE_LANES;
2290 else if (lane != NEON_INTERLEAVE_LANES)
2291 {
2292 first_error (_(type_error));
2293 return FAIL;
2294 }
2295 if (reg_incr == -1)
2296 reg_incr = 1;
2297 else if (reg_incr != 1)
2298 {
2299 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2300 return FAIL;
2301 }
2302 ptr++;
2303 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2304 if (hireg == FAIL)
2305 {
2306 first_error (_(reg_expected_msgs[rtype]));
2307 return FAIL;
2308 }
2309 if (! neon_alias_types_same (&htype, &firsttype))
2310 {
2311 first_error (_(type_error));
2312 return FAIL;
2313 }
2314 count += hireg + dregs - getreg;
2315 continue;
2316 }
2317
2318 /* If we're using Q registers, we can't use [] or [n] syntax. */
2319 if (rtype == REG_TYPE_NQ)
2320 {
2321 count += 2;
2322 continue;
2323 }
2324
2325 if ((atype.defined & NTA_HASINDEX) != 0)
2326 {
2327 if (lane == -1)
2328 lane = atype.index;
2329 else if (lane != atype.index)
2330 {
2331 first_error (_(type_error));
2332 return FAIL;
2333 }
2334 }
2335 else if (lane == -1)
2336 lane = NEON_INTERLEAVE_LANES;
2337 else if (lane != NEON_INTERLEAVE_LANES)
2338 {
2339 first_error (_(type_error));
2340 return FAIL;
2341 }
2342 count++;
2343 }
2344 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2345
2346 /* No lane set by [x]. We must be interleaving structures. */
2347 if (lane == -1)
2348 lane = NEON_INTERLEAVE_LANES;
2349
2350 /* Sanity check. */
2351 if (lane == -1 || base_reg == -1 || count < 1 || (!mve && count > 4)
2352 || (count > 1 && reg_incr == -1))
2353 {
2354 first_error (_("error parsing element/structure list"));
2355 return FAIL;
2356 }
2357
2358 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2359 {
2360 first_error (_("expected }"));
2361 return FAIL;
2362 }
2363
2364 if (reg_incr == -1)
2365 reg_incr = 1;
2366
2367 if (eltype)
2368 *eltype = firsttype.eltype;
2369
2370 *pbase = base_reg;
2371 *str = ptr;
2372
2373 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2374 }
2375
2376 /* Parse an explicit relocation suffix on an expression. This is
2377 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2378 arm_reloc_hsh contains no entries, so this function can only
2379 succeed if there is no () after the word. Returns -1 on error,
2380 BFD_RELOC_UNUSED if there wasn't any suffix. */
2381
2382 static int
2383 parse_reloc (char **str)
2384 {
2385 struct reloc_entry *r;
2386 char *p, *q;
2387
2388 if (**str != '(')
2389 return BFD_RELOC_UNUSED;
2390
2391 p = *str + 1;
2392 q = p;
2393
2394 while (*q && *q != ')' && *q != ',')
2395 q++;
2396 if (*q != ')')
2397 return -1;
2398
2399 if ((r = (struct reloc_entry *)
2400 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2401 return -1;
2402
2403 *str = q + 1;
2404 return r->reloc;
2405 }
2406
2407 /* Directives: register aliases. */
2408
2409 static struct reg_entry *
2410 insert_reg_alias (char *str, unsigned number, int type)
2411 {
2412 struct reg_entry *new_reg;
2413 const char *name;
2414
2415 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2416 {
2417 if (new_reg->builtin)
2418 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2419
2420 /* Only warn about a redefinition if it's not defined as the
2421 same register. */
2422 else if (new_reg->number != number || new_reg->type != type)
2423 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2424
2425 return NULL;
2426 }
2427
2428 name = xstrdup (str);
2429 new_reg = XNEW (struct reg_entry);
2430
2431 new_reg->name = name;
2432 new_reg->number = number;
2433 new_reg->type = type;
2434 new_reg->builtin = FALSE;
2435 new_reg->neon = NULL;
2436
2437 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2438 abort ();
2439
2440 return new_reg;
2441 }
2442
2443 static void
2444 insert_neon_reg_alias (char *str, int number, int type,
2445 struct neon_typed_alias *atype)
2446 {
2447 struct reg_entry *reg = insert_reg_alias (str, number, type);
2448
2449 if (!reg)
2450 {
2451 first_error (_("attempt to redefine typed alias"));
2452 return;
2453 }
2454
2455 if (atype)
2456 {
2457 reg->neon = XNEW (struct neon_typed_alias);
2458 *reg->neon = *atype;
2459 }
2460 }
2461
2462 /* Look for the .req directive. This is of the form:
2463
2464 new_register_name .req existing_register_name
2465
2466 If we find one, or if it looks sufficiently like one that we want to
2467 handle any error here, return TRUE. Otherwise return FALSE. */
2468
2469 static bfd_boolean
2470 create_register_alias (char * newname, char *p)
2471 {
2472 struct reg_entry *old;
2473 char *oldname, *nbuf;
2474 size_t nlen;
2475
2476 /* The input scrubber ensures that whitespace after the mnemonic is
2477 collapsed to single spaces. */
2478 oldname = p;
2479 if (strncmp (oldname, " .req ", 6) != 0)
2480 return FALSE;
2481
2482 oldname += 6;
2483 if (*oldname == '\0')
2484 return FALSE;
2485
2486 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2487 if (!old)
2488 {
2489 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2490 return TRUE;
2491 }
2492
2493 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2494 the desired alias name, and p points to its end. If not, then
2495 the desired alias name is in the global original_case_string. */
2496 #ifdef TC_CASE_SENSITIVE
2497 nlen = p - newname;
2498 #else
2499 newname = original_case_string;
2500 nlen = strlen (newname);
2501 #endif
2502
2503 nbuf = xmemdup0 (newname, nlen);
2504
2505 /* Create aliases under the new name as stated; an all-lowercase
2506 version of the new name; and an all-uppercase version of the new
2507 name. */
2508 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2509 {
2510 for (p = nbuf; *p; p++)
2511 *p = TOUPPER (*p);
2512
2513 if (strncmp (nbuf, newname, nlen))
2514 {
2515 /* If this attempt to create an additional alias fails, do not bother
2516 trying to create the all-lower case alias. We will fail and issue
2517 a second, duplicate error message. This situation arises when the
2518 programmer does something like:
2519 foo .req r0
2520 Foo .req r1
2521 The second .req creates the "Foo" alias but then fails to create
2522 the artificial FOO alias because it has already been created by the
2523 first .req. */
2524 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2525 {
2526 free (nbuf);
2527 return TRUE;
2528 }
2529 }
2530
2531 for (p = nbuf; *p; p++)
2532 *p = TOLOWER (*p);
2533
2534 if (strncmp (nbuf, newname, nlen))
2535 insert_reg_alias (nbuf, old->number, old->type);
2536 }
2537
2538 free (nbuf);
2539 return TRUE;
2540 }
2541
2542 /* Create a Neon typed/indexed register alias using directives, e.g.:
2543 X .dn d5.s32[1]
2544 Y .qn 6.s16
2545 Z .dn d7
2546 T .dn Z[0]
2547 These typed registers can be used instead of the types specified after the
2548 Neon mnemonic, so long as all operands given have types. Types can also be
2549 specified directly, e.g.:
2550 vadd d0.s32, d1.s32, d2.s32 */
2551
2552 static bfd_boolean
2553 create_neon_reg_alias (char *newname, char *p)
2554 {
2555 enum arm_reg_type basetype;
2556 struct reg_entry *basereg;
2557 struct reg_entry mybasereg;
2558 struct neon_type ntype;
2559 struct neon_typed_alias typeinfo;
2560 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2561 int namelen;
2562
2563 typeinfo.defined = 0;
2564 typeinfo.eltype.type = NT_invtype;
2565 typeinfo.eltype.size = -1;
2566 typeinfo.index = -1;
2567
2568 nameend = p;
2569
2570 if (strncmp (p, " .dn ", 5) == 0)
2571 basetype = REG_TYPE_VFD;
2572 else if (strncmp (p, " .qn ", 5) == 0)
2573 basetype = REG_TYPE_NQ;
2574 else
2575 return FALSE;
2576
2577 p += 5;
2578
2579 if (*p == '\0')
2580 return FALSE;
2581
2582 basereg = arm_reg_parse_multi (&p);
2583
2584 if (basereg && basereg->type != basetype)
2585 {
2586 as_bad (_("bad type for register"));
2587 return FALSE;
2588 }
2589
2590 if (basereg == NULL)
2591 {
2592 expressionS exp;
2593 /* Try parsing as an integer. */
2594 my_get_expression (&exp, &p, GE_NO_PREFIX);
2595 if (exp.X_op != O_constant)
2596 {
2597 as_bad (_("expression must be constant"));
2598 return FALSE;
2599 }
2600 basereg = &mybasereg;
2601 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2602 : exp.X_add_number;
2603 basereg->neon = 0;
2604 }
2605
2606 if (basereg->neon)
2607 typeinfo = *basereg->neon;
2608
2609 if (parse_neon_type (&ntype, &p) == SUCCESS)
2610 {
2611 /* We got a type. */
2612 if (typeinfo.defined & NTA_HASTYPE)
2613 {
2614 as_bad (_("can't redefine the type of a register alias"));
2615 return FALSE;
2616 }
2617
2618 typeinfo.defined |= NTA_HASTYPE;
2619 if (ntype.elems != 1)
2620 {
2621 as_bad (_("you must specify a single type only"));
2622 return FALSE;
2623 }
2624 typeinfo.eltype = ntype.el[0];
2625 }
2626
2627 if (skip_past_char (&p, '[') == SUCCESS)
2628 {
2629 expressionS exp;
2630 /* We got a scalar index. */
2631
2632 if (typeinfo.defined & NTA_HASINDEX)
2633 {
2634 as_bad (_("can't redefine the index of a scalar alias"));
2635 return FALSE;
2636 }
2637
2638 my_get_expression (&exp, &p, GE_NO_PREFIX);
2639
2640 if (exp.X_op != O_constant)
2641 {
2642 as_bad (_("scalar index must be constant"));
2643 return FALSE;
2644 }
2645
2646 typeinfo.defined |= NTA_HASINDEX;
2647 typeinfo.index = exp.X_add_number;
2648
2649 if (skip_past_char (&p, ']') == FAIL)
2650 {
2651 as_bad (_("expecting ]"));
2652 return FALSE;
2653 }
2654 }
2655
2656 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2657 the desired alias name, and p points to its end. If not, then
2658 the desired alias name is in the global original_case_string. */
2659 #ifdef TC_CASE_SENSITIVE
2660 namelen = nameend - newname;
2661 #else
2662 newname = original_case_string;
2663 namelen = strlen (newname);
2664 #endif
2665
2666 namebuf = xmemdup0 (newname, namelen);
2667
2668 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2669 typeinfo.defined != 0 ? &typeinfo : NULL);
2670
2671 /* Insert name in all uppercase. */
2672 for (p = namebuf; *p; p++)
2673 *p = TOUPPER (*p);
2674
2675 if (strncmp (namebuf, newname, namelen))
2676 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2677 typeinfo.defined != 0 ? &typeinfo : NULL);
2678
2679 /* Insert name in all lowercase. */
2680 for (p = namebuf; *p; p++)
2681 *p = TOLOWER (*p);
2682
2683 if (strncmp (namebuf, newname, namelen))
2684 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2685 typeinfo.defined != 0 ? &typeinfo : NULL);
2686
2687 free (namebuf);
2688 return TRUE;
2689 }
2690
2691 /* Should never be called, as .req goes between the alias and the
2692 register name, not at the beginning of the line. */
2693
2694 static void
2695 s_req (int a ATTRIBUTE_UNUSED)
2696 {
2697 as_bad (_("invalid syntax for .req directive"));
2698 }
2699
2700 static void
2701 s_dn (int a ATTRIBUTE_UNUSED)
2702 {
2703 as_bad (_("invalid syntax for .dn directive"));
2704 }
2705
2706 static void
2707 s_qn (int a ATTRIBUTE_UNUSED)
2708 {
2709 as_bad (_("invalid syntax for .qn directive"));
2710 }
2711
2712 /* The .unreq directive deletes an alias which was previously defined
2713 by .req. For example:
2714
2715 my_alias .req r11
2716 .unreq my_alias */
2717
2718 static void
2719 s_unreq (int a ATTRIBUTE_UNUSED)
2720 {
2721 char * name;
2722 char saved_char;
2723
2724 name = input_line_pointer;
2725
2726 while (*input_line_pointer != 0
2727 && *input_line_pointer != ' '
2728 && *input_line_pointer != '\n')
2729 ++input_line_pointer;
2730
2731 saved_char = *input_line_pointer;
2732 *input_line_pointer = 0;
2733
2734 if (!*name)
2735 as_bad (_("invalid syntax for .unreq directive"));
2736 else
2737 {
2738 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2739 name);
2740
2741 if (!reg)
2742 as_bad (_("unknown register alias '%s'"), name);
2743 else if (reg->builtin)
2744 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2745 name);
2746 else
2747 {
2748 char * p;
2749 char * nbuf;
2750
2751 hash_delete (arm_reg_hsh, name, FALSE);
2752 free ((char *) reg->name);
2753 if (reg->neon)
2754 free (reg->neon);
2755 free (reg);
2756
2757 /* Also locate the all upper case and all lower case versions.
2758 Do not complain if we cannot find one or the other as it
2759 was probably deleted above. */
2760
2761 nbuf = strdup (name);
2762 for (p = nbuf; *p; p++)
2763 *p = TOUPPER (*p);
2764 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2765 if (reg)
2766 {
2767 hash_delete (arm_reg_hsh, nbuf, FALSE);
2768 free ((char *) reg->name);
2769 if (reg->neon)
2770 free (reg->neon);
2771 free (reg);
2772 }
2773
2774 for (p = nbuf; *p; p++)
2775 *p = TOLOWER (*p);
2776 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2777 if (reg)
2778 {
2779 hash_delete (arm_reg_hsh, nbuf, FALSE);
2780 free ((char *) reg->name);
2781 if (reg->neon)
2782 free (reg->neon);
2783 free (reg);
2784 }
2785
2786 free (nbuf);
2787 }
2788 }
2789
2790 *input_line_pointer = saved_char;
2791 demand_empty_rest_of_line ();
2792 }
2793
2794 /* Directives: Instruction set selection. */
2795
2796 #ifdef OBJ_ELF
2797 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2798 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2799 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2800 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2801
2802 /* Create a new mapping symbol for the transition to STATE. */
2803
2804 static void
2805 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2806 {
2807 symbolS * symbolP;
2808 const char * symname;
2809 int type;
2810
2811 switch (state)
2812 {
2813 case MAP_DATA:
2814 symname = "$d";
2815 type = BSF_NO_FLAGS;
2816 break;
2817 case MAP_ARM:
2818 symname = "$a";
2819 type = BSF_NO_FLAGS;
2820 break;
2821 case MAP_THUMB:
2822 symname = "$t";
2823 type = BSF_NO_FLAGS;
2824 break;
2825 default:
2826 abort ();
2827 }
2828
2829 symbolP = symbol_new (symname, now_seg, value, frag);
2830 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2831
2832 switch (state)
2833 {
2834 case MAP_ARM:
2835 THUMB_SET_FUNC (symbolP, 0);
2836 ARM_SET_THUMB (symbolP, 0);
2837 ARM_SET_INTERWORK (symbolP, support_interwork);
2838 break;
2839
2840 case MAP_THUMB:
2841 THUMB_SET_FUNC (symbolP, 1);
2842 ARM_SET_THUMB (symbolP, 1);
2843 ARM_SET_INTERWORK (symbolP, support_interwork);
2844 break;
2845
2846 case MAP_DATA:
2847 default:
2848 break;
2849 }
2850
2851 /* Save the mapping symbols for future reference. Also check that
2852 we do not place two mapping symbols at the same offset within a
2853 frag. We'll handle overlap between frags in
2854 check_mapping_symbols.
2855
2856 If .fill or other data filling directive generates zero sized data,
2857 the mapping symbol for the following code will have the same value
2858 as the one generated for the data filling directive. In this case,
2859 we replace the old symbol with the new one at the same address. */
2860 if (value == 0)
2861 {
2862 if (frag->tc_frag_data.first_map != NULL)
2863 {
2864 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2865 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2866 }
2867 frag->tc_frag_data.first_map = symbolP;
2868 }
2869 if (frag->tc_frag_data.last_map != NULL)
2870 {
2871 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2872 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2873 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2874 }
2875 frag->tc_frag_data.last_map = symbolP;
2876 }
2877
2878 /* We must sometimes convert a region marked as code to data during
2879 code alignment, if an odd number of bytes have to be padded. The
2880 code mapping symbol is pushed to an aligned address. */
2881
2882 static void
2883 insert_data_mapping_symbol (enum mstate state,
2884 valueT value, fragS *frag, offsetT bytes)
2885 {
2886 /* If there was already a mapping symbol, remove it. */
2887 if (frag->tc_frag_data.last_map != NULL
2888 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2889 {
2890 symbolS *symp = frag->tc_frag_data.last_map;
2891
2892 if (value == 0)
2893 {
2894 know (frag->tc_frag_data.first_map == symp);
2895 frag->tc_frag_data.first_map = NULL;
2896 }
2897 frag->tc_frag_data.last_map = NULL;
2898 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2899 }
2900
2901 make_mapping_symbol (MAP_DATA, value, frag);
2902 make_mapping_symbol (state, value + bytes, frag);
2903 }
2904
2905 static void mapping_state_2 (enum mstate state, int max_chars);
2906
2907 /* Set the mapping state to STATE. Only call this when about to
2908 emit some STATE bytes to the file. */
2909
2910 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2911 void
2912 mapping_state (enum mstate state)
2913 {
2914 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2915
2916 if (mapstate == state)
2917 /* The mapping symbol has already been emitted.
2918 There is nothing else to do. */
2919 return;
2920
2921 if (state == MAP_ARM || state == MAP_THUMB)
2922 /* PR gas/12931
2923 All ARM instructions require 4-byte alignment.
2924 (Almost) all Thumb instructions require 2-byte alignment.
2925
2926 When emitting instructions into any section, mark the section
2927 appropriately.
2928
2929 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2930 but themselves require 2-byte alignment; this applies to some
2931 PC- relative forms. However, these cases will involve implicit
2932 literal pool generation or an explicit .align >=2, both of
2933 which will cause the section to me marked with sufficient
2934 alignment. Thus, we don't handle those cases here. */
2935 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2936
2937 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2938 /* This case will be evaluated later. */
2939 return;
2940
2941 mapping_state_2 (state, 0);
2942 }
2943
2944 /* Same as mapping_state, but MAX_CHARS bytes have already been
2945 allocated. Put the mapping symbol that far back. */
2946
2947 static void
2948 mapping_state_2 (enum mstate state, int max_chars)
2949 {
2950 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2951
2952 if (!SEG_NORMAL (now_seg))
2953 return;
2954
2955 if (mapstate == state)
2956 /* The mapping symbol has already been emitted.
2957 There is nothing else to do. */
2958 return;
2959
2960 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2961 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2962 {
2963 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2964 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2965
2966 if (add_symbol)
2967 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2968 }
2969
2970 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2971 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2972 }
2973 #undef TRANSITION
2974 #else
2975 #define mapping_state(x) ((void)0)
2976 #define mapping_state_2(x, y) ((void)0)
2977 #endif
2978
2979 /* Find the real, Thumb encoded start of a Thumb function. */
2980
2981 #ifdef OBJ_COFF
2982 static symbolS *
2983 find_real_start (symbolS * symbolP)
2984 {
2985 char * real_start;
2986 const char * name = S_GET_NAME (symbolP);
2987 symbolS * new_target;
2988
2989 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2990 #define STUB_NAME ".real_start_of"
2991
2992 if (name == NULL)
2993 abort ();
2994
2995 /* The compiler may generate BL instructions to local labels because
2996 it needs to perform a branch to a far away location. These labels
2997 do not have a corresponding ".real_start_of" label. We check
2998 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2999 the ".real_start_of" convention for nonlocal branches. */
3000 if (S_IS_LOCAL (symbolP) || name[0] == '.')
3001 return symbolP;
3002
3003 real_start = concat (STUB_NAME, name, NULL);
3004 new_target = symbol_find (real_start);
3005 free (real_start);
3006
3007 if (new_target == NULL)
3008 {
3009 as_warn (_("Failed to find real start of function: %s\n"), name);
3010 new_target = symbolP;
3011 }
3012
3013 return new_target;
3014 }
3015 #endif
3016
3017 static void
3018 opcode_select (int width)
3019 {
3020 switch (width)
3021 {
3022 case 16:
3023 if (! thumb_mode)
3024 {
3025 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
3026 as_bad (_("selected processor does not support THUMB opcodes"));
3027
3028 thumb_mode = 1;
3029 /* No need to force the alignment, since we will have been
3030 coming from ARM mode, which is word-aligned. */
3031 record_alignment (now_seg, 1);
3032 }
3033 break;
3034
3035 case 32:
3036 if (thumb_mode)
3037 {
3038 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
3039 as_bad (_("selected processor does not support ARM opcodes"));
3040
3041 thumb_mode = 0;
3042
3043 if (!need_pass_2)
3044 frag_align (2, 0, 0);
3045
3046 record_alignment (now_seg, 1);
3047 }
3048 break;
3049
3050 default:
3051 as_bad (_("invalid instruction size selected (%d)"), width);
3052 }
3053 }
3054
3055 static void
3056 s_arm (int ignore ATTRIBUTE_UNUSED)
3057 {
3058 opcode_select (32);
3059 demand_empty_rest_of_line ();
3060 }
3061
3062 static void
3063 s_thumb (int ignore ATTRIBUTE_UNUSED)
3064 {
3065 opcode_select (16);
3066 demand_empty_rest_of_line ();
3067 }
3068
3069 static void
3070 s_code (int unused ATTRIBUTE_UNUSED)
3071 {
3072 int temp;
3073
3074 temp = get_absolute_expression ();
3075 switch (temp)
3076 {
3077 case 16:
3078 case 32:
3079 opcode_select (temp);
3080 break;
3081
3082 default:
3083 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
3084 }
3085 }
3086
3087 static void
3088 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
3089 {
3090 /* If we are not already in thumb mode go into it, EVEN if
3091 the target processor does not support thumb instructions.
3092 This is used by gcc/config/arm/lib1funcs.asm for example
3093 to compile interworking support functions even if the
3094 target processor should not support interworking. */
3095 if (! thumb_mode)
3096 {
3097 thumb_mode = 2;
3098 record_alignment (now_seg, 1);
3099 }
3100
3101 demand_empty_rest_of_line ();
3102 }
3103
3104 static void
3105 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
3106 {
3107 s_thumb (0);
3108
3109 /* The following label is the name/address of the start of a Thumb function.
3110 We need to know this for the interworking support. */
3111 label_is_thumb_function_name = TRUE;
3112 }
3113
3114 /* Perform a .set directive, but also mark the alias as
3115 being a thumb function. */
3116
3117 static void
3118 s_thumb_set (int equiv)
3119 {
3120 /* XXX the following is a duplicate of the code for s_set() in read.c
3121 We cannot just call that code as we need to get at the symbol that
3122 is created. */
3123 char * name;
3124 char delim;
3125 char * end_name;
3126 symbolS * symbolP;
3127
3128 /* Especial apologies for the random logic:
3129 This just grew, and could be parsed much more simply!
3130 Dean - in haste. */
3131 delim = get_symbol_name (& name);
3132 end_name = input_line_pointer;
3133 (void) restore_line_pointer (delim);
3134
3135 if (*input_line_pointer != ',')
3136 {
3137 *end_name = 0;
3138 as_bad (_("expected comma after name \"%s\""), name);
3139 *end_name = delim;
3140 ignore_rest_of_line ();
3141 return;
3142 }
3143
3144 input_line_pointer++;
3145 *end_name = 0;
3146
3147 if (name[0] == '.' && name[1] == '\0')
3148 {
3149 /* XXX - this should not happen to .thumb_set. */
3150 abort ();
3151 }
3152
3153 if ((symbolP = symbol_find (name)) == NULL
3154 && (symbolP = md_undefined_symbol (name)) == NULL)
3155 {
3156 #ifndef NO_LISTING
3157 /* When doing symbol listings, play games with dummy fragments living
3158 outside the normal fragment chain to record the file and line info
3159 for this symbol. */
3160 if (listing & LISTING_SYMBOLS)
3161 {
3162 extern struct list_info_struct * listing_tail;
3163 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
3164
3165 memset (dummy_frag, 0, sizeof (fragS));
3166 dummy_frag->fr_type = rs_fill;
3167 dummy_frag->line = listing_tail;
3168 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
3169 dummy_frag->fr_symbol = symbolP;
3170 }
3171 else
3172 #endif
3173 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
3174
3175 #ifdef OBJ_COFF
3176 /* "set" symbols are local unless otherwise specified. */
3177 SF_SET_LOCAL (symbolP);
3178 #endif /* OBJ_COFF */
3179 } /* Make a new symbol. */
3180
3181 symbol_table_insert (symbolP);
3182
3183 * end_name = delim;
3184
3185 if (equiv
3186 && S_IS_DEFINED (symbolP)
3187 && S_GET_SEGMENT (symbolP) != reg_section)
3188 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
3189
3190 pseudo_set (symbolP);
3191
3192 demand_empty_rest_of_line ();
3193
3194 /* XXX Now we come to the Thumb specific bit of code. */
3195
3196 THUMB_SET_FUNC (symbolP, 1);
3197 ARM_SET_THUMB (symbolP, 1);
3198 #if defined OBJ_ELF || defined OBJ_COFF
3199 ARM_SET_INTERWORK (symbolP, support_interwork);
3200 #endif
3201 }
3202
3203 /* Directives: Mode selection. */
3204
3205 /* .syntax [unified|divided] - choose the new unified syntax
3206 (same for Arm and Thumb encoding, modulo slight differences in what
3207 can be represented) or the old divergent syntax for each mode. */
3208 static void
3209 s_syntax (int unused ATTRIBUTE_UNUSED)
3210 {
3211 char *name, delim;
3212
3213 delim = get_symbol_name (& name);
3214
3215 if (!strcasecmp (name, "unified"))
3216 unified_syntax = TRUE;
3217 else if (!strcasecmp (name, "divided"))
3218 unified_syntax = FALSE;
3219 else
3220 {
3221 as_bad (_("unrecognized syntax mode \"%s\""), name);
3222 return;
3223 }
3224 (void) restore_line_pointer (delim);
3225 demand_empty_rest_of_line ();
3226 }
3227
3228 /* Directives: sectioning and alignment. */
3229
3230 static void
3231 s_bss (int ignore ATTRIBUTE_UNUSED)
3232 {
3233 /* We don't support putting frags in the BSS segment, we fake it by
3234 marking in_bss, then looking at s_skip for clues. */
3235 subseg_set (bss_section, 0);
3236 demand_empty_rest_of_line ();
3237
3238 #ifdef md_elf_section_change_hook
3239 md_elf_section_change_hook ();
3240 #endif
3241 }
3242
3243 static void
3244 s_even (int ignore ATTRIBUTE_UNUSED)
3245 {
3246 /* Never make frag if expect extra pass. */
3247 if (!need_pass_2)
3248 frag_align (1, 0, 0);
3249
3250 record_alignment (now_seg, 1);
3251
3252 demand_empty_rest_of_line ();
3253 }
3254
3255 /* Directives: CodeComposer Studio. */
3256
3257 /* .ref (for CodeComposer Studio syntax only). */
3258 static void
3259 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3260 {
3261 if (codecomposer_syntax)
3262 ignore_rest_of_line ();
3263 else
3264 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3265 }
3266
3267 /* If name is not NULL, then it is used for marking the beginning of a
3268 function, whereas if it is NULL then it means the function end. */
3269 static void
3270 asmfunc_debug (const char * name)
3271 {
3272 static const char * last_name = NULL;
3273
3274 if (name != NULL)
3275 {
3276 gas_assert (last_name == NULL);
3277 last_name = name;
3278
3279 if (debug_type == DEBUG_STABS)
3280 stabs_generate_asm_func (name, name);
3281 }
3282 else
3283 {
3284 gas_assert (last_name != NULL);
3285
3286 if (debug_type == DEBUG_STABS)
3287 stabs_generate_asm_endfunc (last_name, last_name);
3288
3289 last_name = NULL;
3290 }
3291 }
3292
3293 static void
3294 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3295 {
3296 if (codecomposer_syntax)
3297 {
3298 switch (asmfunc_state)
3299 {
3300 case OUTSIDE_ASMFUNC:
3301 asmfunc_state = WAITING_ASMFUNC_NAME;
3302 break;
3303
3304 case WAITING_ASMFUNC_NAME:
3305 as_bad (_(".asmfunc repeated."));
3306 break;
3307
3308 case WAITING_ENDASMFUNC:
3309 as_bad (_(".asmfunc without function."));
3310 break;
3311 }
3312 demand_empty_rest_of_line ();
3313 }
3314 else
3315 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3316 }
3317
3318 static void
3319 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3320 {
3321 if (codecomposer_syntax)
3322 {
3323 switch (asmfunc_state)
3324 {
3325 case OUTSIDE_ASMFUNC:
3326 as_bad (_(".endasmfunc without a .asmfunc."));
3327 break;
3328
3329 case WAITING_ASMFUNC_NAME:
3330 as_bad (_(".endasmfunc without function."));
3331 break;
3332
3333 case WAITING_ENDASMFUNC:
3334 asmfunc_state = OUTSIDE_ASMFUNC;
3335 asmfunc_debug (NULL);
3336 break;
3337 }
3338 demand_empty_rest_of_line ();
3339 }
3340 else
3341 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3342 }
3343
3344 static void
3345 s_ccs_def (int name)
3346 {
3347 if (codecomposer_syntax)
3348 s_globl (name);
3349 else
3350 as_bad (_(".def pseudo-op only available with -mccs flag."));
3351 }
3352
3353 /* Directives: Literal pools. */
3354
3355 static literal_pool *
3356 find_literal_pool (void)
3357 {
3358 literal_pool * pool;
3359
3360 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3361 {
3362 if (pool->section == now_seg
3363 && pool->sub_section == now_subseg)
3364 break;
3365 }
3366
3367 return pool;
3368 }
3369
3370 static literal_pool *
3371 find_or_make_literal_pool (void)
3372 {
3373 /* Next literal pool ID number. */
3374 static unsigned int latest_pool_num = 1;
3375 literal_pool * pool;
3376
3377 pool = find_literal_pool ();
3378
3379 if (pool == NULL)
3380 {
3381 /* Create a new pool. */
3382 pool = XNEW (literal_pool);
3383 if (! pool)
3384 return NULL;
3385
3386 pool->next_free_entry = 0;
3387 pool->section = now_seg;
3388 pool->sub_section = now_subseg;
3389 pool->next = list_of_pools;
3390 pool->symbol = NULL;
3391 pool->alignment = 2;
3392
3393 /* Add it to the list. */
3394 list_of_pools = pool;
3395 }
3396
3397 /* New pools, and emptied pools, will have a NULL symbol. */
3398 if (pool->symbol == NULL)
3399 {
3400 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3401 (valueT) 0, &zero_address_frag);
3402 pool->id = latest_pool_num ++;
3403 }
3404
3405 /* Done. */
3406 return pool;
3407 }
3408
3409 /* Add the literal in the global 'inst'
3410 structure to the relevant literal pool. */
3411
3412 static int
3413 add_to_lit_pool (unsigned int nbytes)
3414 {
3415 #define PADDING_SLOT 0x1
3416 #define LIT_ENTRY_SIZE_MASK 0xFF
3417 literal_pool * pool;
3418 unsigned int entry, pool_size = 0;
3419 bfd_boolean padding_slot_p = FALSE;
3420 unsigned imm1 = 0;
3421 unsigned imm2 = 0;
3422
3423 if (nbytes == 8)
3424 {
3425 imm1 = inst.operands[1].imm;
3426 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3427 : inst.relocs[0].exp.X_unsigned ? 0
3428 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3429 if (target_big_endian)
3430 {
3431 imm1 = imm2;
3432 imm2 = inst.operands[1].imm;
3433 }
3434 }
3435
3436 pool = find_or_make_literal_pool ();
3437
3438 /* Check if this literal value is already in the pool. */
3439 for (entry = 0; entry < pool->next_free_entry; entry ++)
3440 {
3441 if (nbytes == 4)
3442 {
3443 if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3444 && (inst.relocs[0].exp.X_op == O_constant)
3445 && (pool->literals[entry].X_add_number
3446 == inst.relocs[0].exp.X_add_number)
3447 && (pool->literals[entry].X_md == nbytes)
3448 && (pool->literals[entry].X_unsigned
3449 == inst.relocs[0].exp.X_unsigned))
3450 break;
3451
3452 if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3453 && (inst.relocs[0].exp.X_op == O_symbol)
3454 && (pool->literals[entry].X_add_number
3455 == inst.relocs[0].exp.X_add_number)
3456 && (pool->literals[entry].X_add_symbol
3457 == inst.relocs[0].exp.X_add_symbol)
3458 && (pool->literals[entry].X_op_symbol
3459 == inst.relocs[0].exp.X_op_symbol)
3460 && (pool->literals[entry].X_md == nbytes))
3461 break;
3462 }
3463 else if ((nbytes == 8)
3464 && !(pool_size & 0x7)
3465 && ((entry + 1) != pool->next_free_entry)
3466 && (pool->literals[entry].X_op == O_constant)
3467 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3468 && (pool->literals[entry].X_unsigned
3469 == inst.relocs[0].exp.X_unsigned)
3470 && (pool->literals[entry + 1].X_op == O_constant)
3471 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3472 && (pool->literals[entry + 1].X_unsigned
3473 == inst.relocs[0].exp.X_unsigned))
3474 break;
3475
3476 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3477 if (padding_slot_p && (nbytes == 4))
3478 break;
3479
3480 pool_size += 4;
3481 }
3482
3483 /* Do we need to create a new entry? */
3484 if (entry == pool->next_free_entry)
3485 {
3486 if (entry >= MAX_LITERAL_POOL_SIZE)
3487 {
3488 inst.error = _("literal pool overflow");
3489 return FAIL;
3490 }
3491
3492 if (nbytes == 8)
3493 {
3494 /* For 8-byte entries, we align to an 8-byte boundary,
3495 and split it into two 4-byte entries, because on 32-bit
3496 host, 8-byte constants are treated as big num, thus
3497 saved in "generic_bignum" which will be overwritten
3498 by later assignments.
3499
3500 We also need to make sure there is enough space for
3501 the split.
3502
3503 We also check to make sure the literal operand is a
3504 constant number. */
3505 if (!(inst.relocs[0].exp.X_op == O_constant
3506 || inst.relocs[0].exp.X_op == O_big))
3507 {
3508 inst.error = _("invalid type for literal pool");
3509 return FAIL;
3510 }
3511 else if (pool_size & 0x7)
3512 {
3513 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3514 {
3515 inst.error = _("literal pool overflow");
3516 return FAIL;
3517 }
3518
3519 pool->literals[entry] = inst.relocs[0].exp;
3520 pool->literals[entry].X_op = O_constant;
3521 pool->literals[entry].X_add_number = 0;
3522 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3523 pool->next_free_entry += 1;
3524 pool_size += 4;
3525 }
3526 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3527 {
3528 inst.error = _("literal pool overflow");
3529 return FAIL;
3530 }
3531
3532 pool->literals[entry] = inst.relocs[0].exp;
3533 pool->literals[entry].X_op = O_constant;
3534 pool->literals[entry].X_add_number = imm1;
3535 pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3536 pool->literals[entry++].X_md = 4;
3537 pool->literals[entry] = inst.relocs[0].exp;
3538 pool->literals[entry].X_op = O_constant;
3539 pool->literals[entry].X_add_number = imm2;
3540 pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3541 pool->literals[entry].X_md = 4;
3542 pool->alignment = 3;
3543 pool->next_free_entry += 1;
3544 }
3545 else
3546 {
3547 pool->literals[entry] = inst.relocs[0].exp;
3548 pool->literals[entry].X_md = 4;
3549 }
3550
3551 #ifdef OBJ_ELF
3552 /* PR ld/12974: Record the location of the first source line to reference
3553 this entry in the literal pool. If it turns out during linking that the
3554 symbol does not exist we will be able to give an accurate line number for
3555 the (first use of the) missing reference. */
3556 if (debug_type == DEBUG_DWARF2)
3557 dwarf2_where (pool->locs + entry);
3558 #endif
3559 pool->next_free_entry += 1;
3560 }
3561 else if (padding_slot_p)
3562 {
3563 pool->literals[entry] = inst.relocs[0].exp;
3564 pool->literals[entry].X_md = nbytes;
3565 }
3566
3567 inst.relocs[0].exp.X_op = O_symbol;
3568 inst.relocs[0].exp.X_add_number = pool_size;
3569 inst.relocs[0].exp.X_add_symbol = pool->symbol;
3570
3571 return SUCCESS;
3572 }
3573
3574 bfd_boolean
3575 tc_start_label_without_colon (void)
3576 {
3577 bfd_boolean ret = TRUE;
3578
3579 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3580 {
3581 const char *label = input_line_pointer;
3582
3583 while (!is_end_of_line[(int) label[-1]])
3584 --label;
3585
3586 if (*label == '.')
3587 {
3588 as_bad (_("Invalid label '%s'"), label);
3589 ret = FALSE;
3590 }
3591
3592 asmfunc_debug (label);
3593
3594 asmfunc_state = WAITING_ENDASMFUNC;
3595 }
3596
3597 return ret;
3598 }
3599
3600 /* Can't use symbol_new here, so have to create a symbol and then at
3601 a later date assign it a value. That's what these functions do. */
3602
3603 static void
3604 symbol_locate (symbolS * symbolP,
3605 const char * name, /* It is copied, the caller can modify. */
3606 segT segment, /* Segment identifier (SEG_<something>). */
3607 valueT valu, /* Symbol value. */
3608 fragS * frag) /* Associated fragment. */
3609 {
3610 size_t name_length;
3611 char * preserved_copy_of_name;
3612
3613 name_length = strlen (name) + 1; /* +1 for \0. */
3614 obstack_grow (&notes, name, name_length);
3615 preserved_copy_of_name = (char *) obstack_finish (&notes);
3616
3617 #ifdef tc_canonicalize_symbol_name
3618 preserved_copy_of_name =
3619 tc_canonicalize_symbol_name (preserved_copy_of_name);
3620 #endif
3621
3622 S_SET_NAME (symbolP, preserved_copy_of_name);
3623
3624 S_SET_SEGMENT (symbolP, segment);
3625 S_SET_VALUE (symbolP, valu);
3626 symbol_clear_list_pointers (symbolP);
3627
3628 symbol_set_frag (symbolP, frag);
3629
3630 /* Link to end of symbol chain. */
3631 {
3632 extern int symbol_table_frozen;
3633
3634 if (symbol_table_frozen)
3635 abort ();
3636 }
3637
3638 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3639
3640 obj_symbol_new_hook (symbolP);
3641
3642 #ifdef tc_symbol_new_hook
3643 tc_symbol_new_hook (symbolP);
3644 #endif
3645
3646 #ifdef DEBUG_SYMS
3647 verify_symbol_chain (symbol_rootP, symbol_lastP);
3648 #endif /* DEBUG_SYMS */
3649 }
3650
3651 static void
3652 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3653 {
3654 unsigned int entry;
3655 literal_pool * pool;
3656 char sym_name[20];
3657
3658 pool = find_literal_pool ();
3659 if (pool == NULL
3660 || pool->symbol == NULL
3661 || pool->next_free_entry == 0)
3662 return;
3663
3664 /* Align pool as you have word accesses.
3665 Only make a frag if we have to. */
3666 if (!need_pass_2)
3667 frag_align (pool->alignment, 0, 0);
3668
3669 record_alignment (now_seg, 2);
3670
3671 #ifdef OBJ_ELF
3672 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3673 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3674 #endif
3675 sprintf (sym_name, "$$lit_\002%x", pool->id);
3676
3677 symbol_locate (pool->symbol, sym_name, now_seg,
3678 (valueT) frag_now_fix (), frag_now);
3679 symbol_table_insert (pool->symbol);
3680
3681 ARM_SET_THUMB (pool->symbol, thumb_mode);
3682
3683 #if defined OBJ_COFF || defined OBJ_ELF
3684 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3685 #endif
3686
3687 for (entry = 0; entry < pool->next_free_entry; entry ++)
3688 {
3689 #ifdef OBJ_ELF
3690 if (debug_type == DEBUG_DWARF2)
3691 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3692 #endif
3693 /* First output the expression in the instruction to the pool. */
3694 emit_expr (&(pool->literals[entry]),
3695 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3696 }
3697
3698 /* Mark the pool as empty. */
3699 pool->next_free_entry = 0;
3700 pool->symbol = NULL;
3701 }
3702
3703 #ifdef OBJ_ELF
3704 /* Forward declarations for functions below, in the MD interface
3705 section. */
3706 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3707 static valueT create_unwind_entry (int);
3708 static void start_unwind_section (const segT, int);
3709 static void add_unwind_opcode (valueT, int);
3710 static void flush_pending_unwind (void);
3711
3712 /* Directives: Data. */
3713
3714 static void
3715 s_arm_elf_cons (int nbytes)
3716 {
3717 expressionS exp;
3718
3719 #ifdef md_flush_pending_output
3720 md_flush_pending_output ();
3721 #endif
3722
3723 if (is_it_end_of_statement ())
3724 {
3725 demand_empty_rest_of_line ();
3726 return;
3727 }
3728
3729 #ifdef md_cons_align
3730 md_cons_align (nbytes);
3731 #endif
3732
3733 mapping_state (MAP_DATA);
3734 do
3735 {
3736 int reloc;
3737 char *base = input_line_pointer;
3738
3739 expression (& exp);
3740
3741 if (exp.X_op != O_symbol)
3742 emit_expr (&exp, (unsigned int) nbytes);
3743 else
3744 {
3745 char *before_reloc = input_line_pointer;
3746 reloc = parse_reloc (&input_line_pointer);
3747 if (reloc == -1)
3748 {
3749 as_bad (_("unrecognized relocation suffix"));
3750 ignore_rest_of_line ();
3751 return;
3752 }
3753 else if (reloc == BFD_RELOC_UNUSED)
3754 emit_expr (&exp, (unsigned int) nbytes);
3755 else
3756 {
3757 reloc_howto_type *howto = (reloc_howto_type *)
3758 bfd_reloc_type_lookup (stdoutput,
3759 (bfd_reloc_code_real_type) reloc);
3760 int size = bfd_get_reloc_size (howto);
3761
3762 if (reloc == BFD_RELOC_ARM_PLT32)
3763 {
3764 as_bad (_("(plt) is only valid on branch targets"));
3765 reloc = BFD_RELOC_UNUSED;
3766 size = 0;
3767 }
3768
3769 if (size > nbytes)
3770 as_bad (ngettext ("%s relocations do not fit in %d byte",
3771 "%s relocations do not fit in %d bytes",
3772 nbytes),
3773 howto->name, nbytes);
3774 else
3775 {
3776 /* We've parsed an expression stopping at O_symbol.
3777 But there may be more expression left now that we
3778 have parsed the relocation marker. Parse it again.
3779 XXX Surely there is a cleaner way to do this. */
3780 char *p = input_line_pointer;
3781 int offset;
3782 char *save_buf = XNEWVEC (char, input_line_pointer - base);
3783
3784 memcpy (save_buf, base, input_line_pointer - base);
3785 memmove (base + (input_line_pointer - before_reloc),
3786 base, before_reloc - base);
3787
3788 input_line_pointer = base + (input_line_pointer-before_reloc);
3789 expression (&exp);
3790 memcpy (base, save_buf, p - base);
3791
3792 offset = nbytes - size;
3793 p = frag_more (nbytes);
3794 memset (p, 0, nbytes);
3795 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3796 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3797 free (save_buf);
3798 }
3799 }
3800 }
3801 }
3802 while (*input_line_pointer++ == ',');
3803
3804 /* Put terminator back into stream. */
3805 input_line_pointer --;
3806 demand_empty_rest_of_line ();
3807 }
3808
3809 /* Emit an expression containing a 32-bit thumb instruction.
3810 Implementation based on put_thumb32_insn. */
3811
3812 static void
3813 emit_thumb32_expr (expressionS * exp)
3814 {
3815 expressionS exp_high = *exp;
3816
3817 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3818 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3819 exp->X_add_number &= 0xffff;
3820 emit_expr (exp, (unsigned int) THUMB_SIZE);
3821 }
3822
3823 /* Guess the instruction size based on the opcode. */
3824
3825 static int
3826 thumb_insn_size (int opcode)
3827 {
3828 if ((unsigned int) opcode < 0xe800u)
3829 return 2;
3830 else if ((unsigned int) opcode >= 0xe8000000u)
3831 return 4;
3832 else
3833 return 0;
3834 }
3835
3836 static bfd_boolean
3837 emit_insn (expressionS *exp, int nbytes)
3838 {
3839 int size = 0;
3840
3841 if (exp->X_op == O_constant)
3842 {
3843 size = nbytes;
3844
3845 if (size == 0)
3846 size = thumb_insn_size (exp->X_add_number);
3847
3848 if (size != 0)
3849 {
3850 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3851 {
3852 as_bad (_(".inst.n operand too big. "\
3853 "Use .inst.w instead"));
3854 size = 0;
3855 }
3856 else
3857 {
3858 if (now_pred.state == AUTOMATIC_PRED_BLOCK)
3859 set_pred_insn_type_nonvoid (OUTSIDE_PRED_INSN, 0);
3860 else
3861 set_pred_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3862
3863 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3864 emit_thumb32_expr (exp);
3865 else
3866 emit_expr (exp, (unsigned int) size);
3867
3868 it_fsm_post_encode ();
3869 }
3870 }
3871 else
3872 as_bad (_("cannot determine Thumb instruction size. " \
3873 "Use .inst.n/.inst.w instead"));
3874 }
3875 else
3876 as_bad (_("constant expression required"));
3877
3878 return (size != 0);
3879 }
3880
3881 /* Like s_arm_elf_cons but do not use md_cons_align and
3882 set the mapping state to MAP_ARM/MAP_THUMB. */
3883
3884 static void
3885 s_arm_elf_inst (int nbytes)
3886 {
3887 if (is_it_end_of_statement ())
3888 {
3889 demand_empty_rest_of_line ();
3890 return;
3891 }
3892
3893 /* Calling mapping_state () here will not change ARM/THUMB,
3894 but will ensure not to be in DATA state. */
3895
3896 if (thumb_mode)
3897 mapping_state (MAP_THUMB);
3898 else
3899 {
3900 if (nbytes != 0)
3901 {
3902 as_bad (_("width suffixes are invalid in ARM mode"));
3903 ignore_rest_of_line ();
3904 return;
3905 }
3906
3907 nbytes = 4;
3908
3909 mapping_state (MAP_ARM);
3910 }
3911
3912 do
3913 {
3914 expressionS exp;
3915
3916 expression (& exp);
3917
3918 if (! emit_insn (& exp, nbytes))
3919 {
3920 ignore_rest_of_line ();
3921 return;
3922 }
3923 }
3924 while (*input_line_pointer++ == ',');
3925
3926 /* Put terminator back into stream. */
3927 input_line_pointer --;
3928 demand_empty_rest_of_line ();
3929 }
3930
3931 /* Parse a .rel31 directive. */
3932
3933 static void
3934 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3935 {
3936 expressionS exp;
3937 char *p;
3938 valueT highbit;
3939
3940 highbit = 0;
3941 if (*input_line_pointer == '1')
3942 highbit = 0x80000000;
3943 else if (*input_line_pointer != '0')
3944 as_bad (_("expected 0 or 1"));
3945
3946 input_line_pointer++;
3947 if (*input_line_pointer != ',')
3948 as_bad (_("missing comma"));
3949 input_line_pointer++;
3950
3951 #ifdef md_flush_pending_output
3952 md_flush_pending_output ();
3953 #endif
3954
3955 #ifdef md_cons_align
3956 md_cons_align (4);
3957 #endif
3958
3959 mapping_state (MAP_DATA);
3960
3961 expression (&exp);
3962
3963 p = frag_more (4);
3964 md_number_to_chars (p, highbit, 4);
3965 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3966 BFD_RELOC_ARM_PREL31);
3967
3968 demand_empty_rest_of_line ();
3969 }
3970
3971 /* Directives: AEABI stack-unwind tables. */
3972
3973 /* Parse an unwind_fnstart directive. Simply records the current location. */
3974
3975 static void
3976 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3977 {
3978 demand_empty_rest_of_line ();
3979 if (unwind.proc_start)
3980 {
3981 as_bad (_("duplicate .fnstart directive"));
3982 return;
3983 }
3984
3985 /* Mark the start of the function. */
3986 unwind.proc_start = expr_build_dot ();
3987
3988 /* Reset the rest of the unwind info. */
3989 unwind.opcode_count = 0;
3990 unwind.table_entry = NULL;
3991 unwind.personality_routine = NULL;
3992 unwind.personality_index = -1;
3993 unwind.frame_size = 0;
3994 unwind.fp_offset = 0;
3995 unwind.fp_reg = REG_SP;
3996 unwind.fp_used = 0;
3997 unwind.sp_restored = 0;
3998 }
3999
4000
4001 /* Parse a handlerdata directive. Creates the exception handling table entry
4002 for the function. */
4003
4004 static void
4005 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
4006 {
4007 demand_empty_rest_of_line ();
4008 if (!unwind.proc_start)
4009 as_bad (MISSING_FNSTART);
4010
4011 if (unwind.table_entry)
4012 as_bad (_("duplicate .handlerdata directive"));
4013
4014 create_unwind_entry (1);
4015 }
4016
4017 /* Parse an unwind_fnend directive. Generates the index table entry. */
4018
4019 static void
4020 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
4021 {
4022 long where;
4023 char *ptr;
4024 valueT val;
4025 unsigned int marked_pr_dependency;
4026
4027 demand_empty_rest_of_line ();
4028
4029 if (!unwind.proc_start)
4030 {
4031 as_bad (_(".fnend directive without .fnstart"));
4032 return;
4033 }
4034
4035 /* Add eh table entry. */
4036 if (unwind.table_entry == NULL)
4037 val = create_unwind_entry (0);
4038 else
4039 val = 0;
4040
4041 /* Add index table entry. This is two words. */
4042 start_unwind_section (unwind.saved_seg, 1);
4043 frag_align (2, 0, 0);
4044 record_alignment (now_seg, 2);
4045
4046 ptr = frag_more (8);
4047 memset (ptr, 0, 8);
4048 where = frag_now_fix () - 8;
4049
4050 /* Self relative offset of the function start. */
4051 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
4052 BFD_RELOC_ARM_PREL31);
4053
4054 /* Indicate dependency on EHABI-defined personality routines to the
4055 linker, if it hasn't been done already. */
4056 marked_pr_dependency
4057 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
4058 if (unwind.personality_index >= 0 && unwind.personality_index < 3
4059 && !(marked_pr_dependency & (1 << unwind.personality_index)))
4060 {
4061 static const char *const name[] =
4062 {
4063 "__aeabi_unwind_cpp_pr0",
4064 "__aeabi_unwind_cpp_pr1",
4065 "__aeabi_unwind_cpp_pr2"
4066 };
4067 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
4068 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
4069 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
4070 |= 1 << unwind.personality_index;
4071 }
4072
4073 if (val)
4074 /* Inline exception table entry. */
4075 md_number_to_chars (ptr + 4, val, 4);
4076 else
4077 /* Self relative offset of the table entry. */
4078 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
4079 BFD_RELOC_ARM_PREL31);
4080
4081 /* Restore the original section. */
4082 subseg_set (unwind.saved_seg, unwind.saved_subseg);
4083
4084 unwind.proc_start = NULL;
4085 }
4086
4087
4088 /* Parse an unwind_cantunwind directive. */
4089
4090 static void
4091 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
4092 {
4093 demand_empty_rest_of_line ();
4094 if (!unwind.proc_start)
4095 as_bad (MISSING_FNSTART);
4096
4097 if (unwind.personality_routine || unwind.personality_index != -1)
4098 as_bad (_("personality routine specified for cantunwind frame"));
4099
4100 unwind.personality_index = -2;
4101 }
4102
4103
4104 /* Parse a personalityindex directive. */
4105
4106 static void
4107 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
4108 {
4109 expressionS exp;
4110
4111 if (!unwind.proc_start)
4112 as_bad (MISSING_FNSTART);
4113
4114 if (unwind.personality_routine || unwind.personality_index != -1)
4115 as_bad (_("duplicate .personalityindex directive"));
4116
4117 expression (&exp);
4118
4119 if (exp.X_op != O_constant
4120 || exp.X_add_number < 0 || exp.X_add_number > 15)
4121 {
4122 as_bad (_("bad personality routine number"));
4123 ignore_rest_of_line ();
4124 return;
4125 }
4126
4127 unwind.personality_index = exp.X_add_number;
4128
4129 demand_empty_rest_of_line ();
4130 }
4131
4132
4133 /* Parse a personality directive. */
4134
4135 static void
4136 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
4137 {
4138 char *name, *p, c;
4139
4140 if (!unwind.proc_start)
4141 as_bad (MISSING_FNSTART);
4142
4143 if (unwind.personality_routine || unwind.personality_index != -1)
4144 as_bad (_("duplicate .personality directive"));
4145
4146 c = get_symbol_name (& name);
4147 p = input_line_pointer;
4148 if (c == '"')
4149 ++ input_line_pointer;
4150 unwind.personality_routine = symbol_find_or_make (name);
4151 *p = c;
4152 demand_empty_rest_of_line ();
4153 }
4154
4155
4156 /* Parse a directive saving core registers. */
4157
4158 static void
4159 s_arm_unwind_save_core (void)
4160 {
4161 valueT op;
4162 long range;
4163 int n;
4164
4165 range = parse_reg_list (&input_line_pointer, REGLIST_RN);
4166 if (range == FAIL)
4167 {
4168 as_bad (_("expected register list"));
4169 ignore_rest_of_line ();
4170 return;
4171 }
4172
4173 demand_empty_rest_of_line ();
4174
4175 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
4176 into .unwind_save {..., sp...}. We aren't bothered about the value of
4177 ip because it is clobbered by calls. */
4178 if (unwind.sp_restored && unwind.fp_reg == 12
4179 && (range & 0x3000) == 0x1000)
4180 {
4181 unwind.opcode_count--;
4182 unwind.sp_restored = 0;
4183 range = (range | 0x2000) & ~0x1000;
4184 unwind.pending_offset = 0;
4185 }
4186
4187 /* Pop r4-r15. */
4188 if (range & 0xfff0)
4189 {
4190 /* See if we can use the short opcodes. These pop a block of up to 8
4191 registers starting with r4, plus maybe r14. */
4192 for (n = 0; n < 8; n++)
4193 {
4194 /* Break at the first non-saved register. */
4195 if ((range & (1 << (n + 4))) == 0)
4196 break;
4197 }
4198 /* See if there are any other bits set. */
4199 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
4200 {
4201 /* Use the long form. */
4202 op = 0x8000 | ((range >> 4) & 0xfff);
4203 add_unwind_opcode (op, 2);
4204 }
4205 else
4206 {
4207 /* Use the short form. */
4208 if (range & 0x4000)
4209 op = 0xa8; /* Pop r14. */
4210 else
4211 op = 0xa0; /* Do not pop r14. */
4212 op |= (n - 1);
4213 add_unwind_opcode (op, 1);
4214 }
4215 }
4216
4217 /* Pop r0-r3. */
4218 if (range & 0xf)
4219 {
4220 op = 0xb100 | (range & 0xf);
4221 add_unwind_opcode (op, 2);
4222 }
4223
4224 /* Record the number of bytes pushed. */
4225 for (n = 0; n < 16; n++)
4226 {
4227 if (range & (1 << n))
4228 unwind.frame_size += 4;
4229 }
4230 }
4231
4232
4233 /* Parse a directive saving FPA registers. */
4234
4235 static void
4236 s_arm_unwind_save_fpa (int reg)
4237 {
4238 expressionS exp;
4239 int num_regs;
4240 valueT op;
4241
4242 /* Get Number of registers to transfer. */
4243 if (skip_past_comma (&input_line_pointer) != FAIL)
4244 expression (&exp);
4245 else
4246 exp.X_op = O_illegal;
4247
4248 if (exp.X_op != O_constant)
4249 {
4250 as_bad (_("expected , <constant>"));
4251 ignore_rest_of_line ();
4252 return;
4253 }
4254
4255 num_regs = exp.X_add_number;
4256
4257 if (num_regs < 1 || num_regs > 4)
4258 {
4259 as_bad (_("number of registers must be in the range [1:4]"));
4260 ignore_rest_of_line ();
4261 return;
4262 }
4263
4264 demand_empty_rest_of_line ();
4265
4266 if (reg == 4)
4267 {
4268 /* Short form. */
4269 op = 0xb4 | (num_regs - 1);
4270 add_unwind_opcode (op, 1);
4271 }
4272 else
4273 {
4274 /* Long form. */
4275 op = 0xc800 | (reg << 4) | (num_regs - 1);
4276 add_unwind_opcode (op, 2);
4277 }
4278 unwind.frame_size += num_regs * 12;
4279 }
4280
4281
4282 /* Parse a directive saving VFP registers for ARMv6 and above. */
4283
4284 static void
4285 s_arm_unwind_save_vfp_armv6 (void)
4286 {
4287 int count;
4288 unsigned int start;
4289 valueT op;
4290 int num_vfpv3_regs = 0;
4291 int num_regs_below_16;
4292 bfd_boolean partial_match;
4293
4294 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D,
4295 &partial_match);
4296 if (count == FAIL)
4297 {
4298 as_bad (_("expected register list"));
4299 ignore_rest_of_line ();
4300 return;
4301 }
4302
4303 demand_empty_rest_of_line ();
4304
4305 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4306 than FSTMX/FLDMX-style ones). */
4307
4308 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4309 if (start >= 16)
4310 num_vfpv3_regs = count;
4311 else if (start + count > 16)
4312 num_vfpv3_regs = start + count - 16;
4313
4314 if (num_vfpv3_regs > 0)
4315 {
4316 int start_offset = start > 16 ? start - 16 : 0;
4317 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4318 add_unwind_opcode (op, 2);
4319 }
4320
4321 /* Generate opcode for registers numbered in the range 0 .. 15. */
4322 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4323 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4324 if (num_regs_below_16 > 0)
4325 {
4326 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4327 add_unwind_opcode (op, 2);
4328 }
4329
4330 unwind.frame_size += count * 8;
4331 }
4332
4333
4334 /* Parse a directive saving VFP registers for pre-ARMv6. */
4335
4336 static void
4337 s_arm_unwind_save_vfp (void)
4338 {
4339 int count;
4340 unsigned int reg;
4341 valueT op;
4342 bfd_boolean partial_match;
4343
4344 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D,
4345 &partial_match);
4346 if (count == FAIL)
4347 {
4348 as_bad (_("expected register list"));
4349 ignore_rest_of_line ();
4350 return;
4351 }
4352
4353 demand_empty_rest_of_line ();
4354
4355 if (reg == 8)
4356 {
4357 /* Short form. */
4358 op = 0xb8 | (count - 1);
4359 add_unwind_opcode (op, 1);
4360 }
4361 else
4362 {
4363 /* Long form. */
4364 op = 0xb300 | (reg << 4) | (count - 1);
4365 add_unwind_opcode (op, 2);
4366 }
4367 unwind.frame_size += count * 8 + 4;
4368 }
4369
4370
4371 /* Parse a directive saving iWMMXt data registers. */
4372
4373 static void
4374 s_arm_unwind_save_mmxwr (void)
4375 {
4376 int reg;
4377 int hi_reg;
4378 int i;
4379 unsigned mask = 0;
4380 valueT op;
4381
4382 if (*input_line_pointer == '{')
4383 input_line_pointer++;
4384
4385 do
4386 {
4387 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4388
4389 if (reg == FAIL)
4390 {
4391 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4392 goto error;
4393 }
4394
4395 if (mask >> reg)
4396 as_tsktsk (_("register list not in ascending order"));
4397 mask |= 1 << reg;
4398
4399 if (*input_line_pointer == '-')
4400 {
4401 input_line_pointer++;
4402 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4403 if (hi_reg == FAIL)
4404 {
4405 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4406 goto error;
4407 }
4408 else if (reg >= hi_reg)
4409 {
4410 as_bad (_("bad register range"));
4411 goto error;
4412 }
4413 for (; reg < hi_reg; reg++)
4414 mask |= 1 << reg;
4415 }
4416 }
4417 while (skip_past_comma (&input_line_pointer) != FAIL);
4418
4419 skip_past_char (&input_line_pointer, '}');
4420
4421 demand_empty_rest_of_line ();
4422
4423 /* Generate any deferred opcodes because we're going to be looking at
4424 the list. */
4425 flush_pending_unwind ();
4426
4427 for (i = 0; i < 16; i++)
4428 {
4429 if (mask & (1 << i))
4430 unwind.frame_size += 8;
4431 }
4432
4433 /* Attempt to combine with a previous opcode. We do this because gcc
4434 likes to output separate unwind directives for a single block of
4435 registers. */
4436 if (unwind.opcode_count > 0)
4437 {
4438 i = unwind.opcodes[unwind.opcode_count - 1];
4439 if ((i & 0xf8) == 0xc0)
4440 {
4441 i &= 7;
4442 /* Only merge if the blocks are contiguous. */
4443 if (i < 6)
4444 {
4445 if ((mask & 0xfe00) == (1 << 9))
4446 {
4447 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4448 unwind.opcode_count--;
4449 }
4450 }
4451 else if (i == 6 && unwind.opcode_count >= 2)
4452 {
4453 i = unwind.opcodes[unwind.opcode_count - 2];
4454 reg = i >> 4;
4455 i &= 0xf;
4456
4457 op = 0xffff << (reg - 1);
4458 if (reg > 0
4459 && ((mask & op) == (1u << (reg - 1))))
4460 {
4461 op = (1 << (reg + i + 1)) - 1;
4462 op &= ~((1 << reg) - 1);
4463 mask |= op;
4464 unwind.opcode_count -= 2;
4465 }
4466 }
4467 }
4468 }
4469
4470 hi_reg = 15;
4471 /* We want to generate opcodes in the order the registers have been
4472 saved, ie. descending order. */
4473 for (reg = 15; reg >= -1; reg--)
4474 {
4475 /* Save registers in blocks. */
4476 if (reg < 0
4477 || !(mask & (1 << reg)))
4478 {
4479 /* We found an unsaved reg. Generate opcodes to save the
4480 preceding block. */
4481 if (reg != hi_reg)
4482 {
4483 if (reg == 9)
4484 {
4485 /* Short form. */
4486 op = 0xc0 | (hi_reg - 10);
4487 add_unwind_opcode (op, 1);
4488 }
4489 else
4490 {
4491 /* Long form. */
4492 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4493 add_unwind_opcode (op, 2);
4494 }
4495 }
4496 hi_reg = reg - 1;
4497 }
4498 }
4499
4500 return;
4501 error:
4502 ignore_rest_of_line ();
4503 }
4504
4505 static void
4506 s_arm_unwind_save_mmxwcg (void)
4507 {
4508 int reg;
4509 int hi_reg;
4510 unsigned mask = 0;
4511 valueT op;
4512
4513 if (*input_line_pointer == '{')
4514 input_line_pointer++;
4515
4516 skip_whitespace (input_line_pointer);
4517
4518 do
4519 {
4520 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4521
4522 if (reg == FAIL)
4523 {
4524 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4525 goto error;
4526 }
4527
4528 reg -= 8;
4529 if (mask >> reg)
4530 as_tsktsk (_("register list not in ascending order"));
4531 mask |= 1 << reg;
4532
4533 if (*input_line_pointer == '-')
4534 {
4535 input_line_pointer++;
4536 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4537 if (hi_reg == FAIL)
4538 {
4539 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4540 goto error;
4541 }
4542 else if (reg >= hi_reg)
4543 {
4544 as_bad (_("bad register range"));
4545 goto error;
4546 }
4547 for (; reg < hi_reg; reg++)
4548 mask |= 1 << reg;
4549 }
4550 }
4551 while (skip_past_comma (&input_line_pointer) != FAIL);
4552
4553 skip_past_char (&input_line_pointer, '}');
4554
4555 demand_empty_rest_of_line ();
4556
4557 /* Generate any deferred opcodes because we're going to be looking at
4558 the list. */
4559 flush_pending_unwind ();
4560
4561 for (reg = 0; reg < 16; reg++)
4562 {
4563 if (mask & (1 << reg))
4564 unwind.frame_size += 4;
4565 }
4566 op = 0xc700 | mask;
4567 add_unwind_opcode (op, 2);
4568 return;
4569 error:
4570 ignore_rest_of_line ();
4571 }
4572
4573
4574 /* Parse an unwind_save directive.
4575 If the argument is non-zero, this is a .vsave directive. */
4576
4577 static void
4578 s_arm_unwind_save (int arch_v6)
4579 {
4580 char *peek;
4581 struct reg_entry *reg;
4582 bfd_boolean had_brace = FALSE;
4583
4584 if (!unwind.proc_start)
4585 as_bad (MISSING_FNSTART);
4586
4587 /* Figure out what sort of save we have. */
4588 peek = input_line_pointer;
4589
4590 if (*peek == '{')
4591 {
4592 had_brace = TRUE;
4593 peek++;
4594 }
4595
4596 reg = arm_reg_parse_multi (&peek);
4597
4598 if (!reg)
4599 {
4600 as_bad (_("register expected"));
4601 ignore_rest_of_line ();
4602 return;
4603 }
4604
4605 switch (reg->type)
4606 {
4607 case REG_TYPE_FN:
4608 if (had_brace)
4609 {
4610 as_bad (_("FPA .unwind_save does not take a register list"));
4611 ignore_rest_of_line ();
4612 return;
4613 }
4614 input_line_pointer = peek;
4615 s_arm_unwind_save_fpa (reg->number);
4616 return;
4617
4618 case REG_TYPE_RN:
4619 s_arm_unwind_save_core ();
4620 return;
4621
4622 case REG_TYPE_VFD:
4623 if (arch_v6)
4624 s_arm_unwind_save_vfp_armv6 ();
4625 else
4626 s_arm_unwind_save_vfp ();
4627 return;
4628
4629 case REG_TYPE_MMXWR:
4630 s_arm_unwind_save_mmxwr ();
4631 return;
4632
4633 case REG_TYPE_MMXWCG:
4634 s_arm_unwind_save_mmxwcg ();
4635 return;
4636
4637 default:
4638 as_bad (_(".unwind_save does not support this kind of register"));
4639 ignore_rest_of_line ();
4640 }
4641 }
4642
4643
4644 /* Parse an unwind_movsp directive. */
4645
4646 static void
4647 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4648 {
4649 int reg;
4650 valueT op;
4651 int offset;
4652
4653 if (!unwind.proc_start)
4654 as_bad (MISSING_FNSTART);
4655
4656 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4657 if (reg == FAIL)
4658 {
4659 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4660 ignore_rest_of_line ();
4661 return;
4662 }
4663
4664 /* Optional constant. */
4665 if (skip_past_comma (&input_line_pointer) != FAIL)
4666 {
4667 if (immediate_for_directive (&offset) == FAIL)
4668 return;
4669 }
4670 else
4671 offset = 0;
4672
4673 demand_empty_rest_of_line ();
4674
4675 if (reg == REG_SP || reg == REG_PC)
4676 {
4677 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4678 return;
4679 }
4680
4681 if (unwind.fp_reg != REG_SP)
4682 as_bad (_("unexpected .unwind_movsp directive"));
4683
4684 /* Generate opcode to restore the value. */
4685 op = 0x90 | reg;
4686 add_unwind_opcode (op, 1);
4687
4688 /* Record the information for later. */
4689 unwind.fp_reg = reg;
4690 unwind.fp_offset = unwind.frame_size - offset;
4691 unwind.sp_restored = 1;
4692 }
4693
4694 /* Parse an unwind_pad directive. */
4695
4696 static void
4697 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4698 {
4699 int offset;
4700
4701 if (!unwind.proc_start)
4702 as_bad (MISSING_FNSTART);
4703
4704 if (immediate_for_directive (&offset) == FAIL)
4705 return;
4706
4707 if (offset & 3)
4708 {
4709 as_bad (_("stack increment must be multiple of 4"));
4710 ignore_rest_of_line ();
4711 return;
4712 }
4713
4714 /* Don't generate any opcodes, just record the details for later. */
4715 unwind.frame_size += offset;
4716 unwind.pending_offset += offset;
4717
4718 demand_empty_rest_of_line ();
4719 }
4720
4721 /* Parse an unwind_setfp directive. */
4722
4723 static void
4724 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4725 {
4726 int sp_reg;
4727 int fp_reg;
4728 int offset;
4729
4730 if (!unwind.proc_start)
4731 as_bad (MISSING_FNSTART);
4732
4733 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4734 if (skip_past_comma (&input_line_pointer) == FAIL)
4735 sp_reg = FAIL;
4736 else
4737 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4738
4739 if (fp_reg == FAIL || sp_reg == FAIL)
4740 {
4741 as_bad (_("expected <reg>, <reg>"));
4742 ignore_rest_of_line ();
4743 return;
4744 }
4745
4746 /* Optional constant. */
4747 if (skip_past_comma (&input_line_pointer) != FAIL)
4748 {
4749 if (immediate_for_directive (&offset) == FAIL)
4750 return;
4751 }
4752 else
4753 offset = 0;
4754
4755 demand_empty_rest_of_line ();
4756
4757 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4758 {
4759 as_bad (_("register must be either sp or set by a previous"
4760 "unwind_movsp directive"));
4761 return;
4762 }
4763
4764 /* Don't generate any opcodes, just record the information for later. */
4765 unwind.fp_reg = fp_reg;
4766 unwind.fp_used = 1;
4767 if (sp_reg == REG_SP)
4768 unwind.fp_offset = unwind.frame_size - offset;
4769 else
4770 unwind.fp_offset -= offset;
4771 }
4772
4773 /* Parse an unwind_raw directive. */
4774
4775 static void
4776 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4777 {
4778 expressionS exp;
4779 /* This is an arbitrary limit. */
4780 unsigned char op[16];
4781 int count;
4782
4783 if (!unwind.proc_start)
4784 as_bad (MISSING_FNSTART);
4785
4786 expression (&exp);
4787 if (exp.X_op == O_constant
4788 && skip_past_comma (&input_line_pointer) != FAIL)
4789 {
4790 unwind.frame_size += exp.X_add_number;
4791 expression (&exp);
4792 }
4793 else
4794 exp.X_op = O_illegal;
4795
4796 if (exp.X_op != O_constant)
4797 {
4798 as_bad (_("expected <offset>, <opcode>"));
4799 ignore_rest_of_line ();
4800 return;
4801 }
4802
4803 count = 0;
4804
4805 /* Parse the opcode. */
4806 for (;;)
4807 {
4808 if (count >= 16)
4809 {
4810 as_bad (_("unwind opcode too long"));
4811 ignore_rest_of_line ();
4812 }
4813 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4814 {
4815 as_bad (_("invalid unwind opcode"));
4816 ignore_rest_of_line ();
4817 return;
4818 }
4819 op[count++] = exp.X_add_number;
4820
4821 /* Parse the next byte. */
4822 if (skip_past_comma (&input_line_pointer) == FAIL)
4823 break;
4824
4825 expression (&exp);
4826 }
4827
4828 /* Add the opcode bytes in reverse order. */
4829 while (count--)
4830 add_unwind_opcode (op[count], 1);
4831
4832 demand_empty_rest_of_line ();
4833 }
4834
4835
4836 /* Parse a .eabi_attribute directive. */
4837
4838 static void
4839 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4840 {
4841 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4842
4843 if (tag >= 0 && tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4844 attributes_set_explicitly[tag] = 1;
4845 }
4846
4847 /* Emit a tls fix for the symbol. */
4848
4849 static void
4850 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4851 {
4852 char *p;
4853 expressionS exp;
4854 #ifdef md_flush_pending_output
4855 md_flush_pending_output ();
4856 #endif
4857
4858 #ifdef md_cons_align
4859 md_cons_align (4);
4860 #endif
4861
4862 /* Since we're just labelling the code, there's no need to define a
4863 mapping symbol. */
4864 expression (&exp);
4865 p = obstack_next_free (&frchain_now->frch_obstack);
4866 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4867 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4868 : BFD_RELOC_ARM_TLS_DESCSEQ);
4869 }
4870 #endif /* OBJ_ELF */
4871
4872 static void s_arm_arch (int);
4873 static void s_arm_object_arch (int);
4874 static void s_arm_cpu (int);
4875 static void s_arm_fpu (int);
4876 static void s_arm_arch_extension (int);
4877
4878 #ifdef TE_PE
4879
4880 static void
4881 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4882 {
4883 expressionS exp;
4884
4885 do
4886 {
4887 expression (&exp);
4888 if (exp.X_op == O_symbol)
4889 exp.X_op = O_secrel;
4890
4891 emit_expr (&exp, 4);
4892 }
4893 while (*input_line_pointer++ == ',');
4894
4895 input_line_pointer--;
4896 demand_empty_rest_of_line ();
4897 }
4898 #endif /* TE_PE */
4899
4900 /* This table describes all the machine specific pseudo-ops the assembler
4901 has to support. The fields are:
4902 pseudo-op name without dot
4903 function to call to execute this pseudo-op
4904 Integer arg to pass to the function. */
4905
4906 const pseudo_typeS md_pseudo_table[] =
4907 {
4908 /* Never called because '.req' does not start a line. */
4909 { "req", s_req, 0 },
4910 /* Following two are likewise never called. */
4911 { "dn", s_dn, 0 },
4912 { "qn", s_qn, 0 },
4913 { "unreq", s_unreq, 0 },
4914 { "bss", s_bss, 0 },
4915 { "align", s_align_ptwo, 2 },
4916 { "arm", s_arm, 0 },
4917 { "thumb", s_thumb, 0 },
4918 { "code", s_code, 0 },
4919 { "force_thumb", s_force_thumb, 0 },
4920 { "thumb_func", s_thumb_func, 0 },
4921 { "thumb_set", s_thumb_set, 0 },
4922 { "even", s_even, 0 },
4923 { "ltorg", s_ltorg, 0 },
4924 { "pool", s_ltorg, 0 },
4925 { "syntax", s_syntax, 0 },
4926 { "cpu", s_arm_cpu, 0 },
4927 { "arch", s_arm_arch, 0 },
4928 { "object_arch", s_arm_object_arch, 0 },
4929 { "fpu", s_arm_fpu, 0 },
4930 { "arch_extension", s_arm_arch_extension, 0 },
4931 #ifdef OBJ_ELF
4932 { "word", s_arm_elf_cons, 4 },
4933 { "long", s_arm_elf_cons, 4 },
4934 { "inst.n", s_arm_elf_inst, 2 },
4935 { "inst.w", s_arm_elf_inst, 4 },
4936 { "inst", s_arm_elf_inst, 0 },
4937 { "rel31", s_arm_rel31, 0 },
4938 { "fnstart", s_arm_unwind_fnstart, 0 },
4939 { "fnend", s_arm_unwind_fnend, 0 },
4940 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4941 { "personality", s_arm_unwind_personality, 0 },
4942 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4943 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4944 { "save", s_arm_unwind_save, 0 },
4945 { "vsave", s_arm_unwind_save, 1 },
4946 { "movsp", s_arm_unwind_movsp, 0 },
4947 { "pad", s_arm_unwind_pad, 0 },
4948 { "setfp", s_arm_unwind_setfp, 0 },
4949 { "unwind_raw", s_arm_unwind_raw, 0 },
4950 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4951 { "tlsdescseq", s_arm_tls_descseq, 0 },
4952 #else
4953 { "word", cons, 4},
4954
4955 /* These are used for dwarf. */
4956 {"2byte", cons, 2},
4957 {"4byte", cons, 4},
4958 {"8byte", cons, 8},
4959 /* These are used for dwarf2. */
4960 { "file", dwarf2_directive_file, 0 },
4961 { "loc", dwarf2_directive_loc, 0 },
4962 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4963 #endif
4964 { "extend", float_cons, 'x' },
4965 { "ldouble", float_cons, 'x' },
4966 { "packed", float_cons, 'p' },
4967 #ifdef TE_PE
4968 {"secrel32", pe_directive_secrel, 0},
4969 #endif
4970
4971 /* These are for compatibility with CodeComposer Studio. */
4972 {"ref", s_ccs_ref, 0},
4973 {"def", s_ccs_def, 0},
4974 {"asmfunc", s_ccs_asmfunc, 0},
4975 {"endasmfunc", s_ccs_endasmfunc, 0},
4976
4977 { 0, 0, 0 }
4978 };
4979 \f
4980 /* Parser functions used exclusively in instruction operands. */
4981
4982 /* Generic immediate-value read function for use in insn parsing.
4983 STR points to the beginning of the immediate (the leading #);
4984 VAL receives the value; if the value is outside [MIN, MAX]
4985 issue an error. PREFIX_OPT is true if the immediate prefix is
4986 optional. */
4987
4988 static int
4989 parse_immediate (char **str, int *val, int min, int max,
4990 bfd_boolean prefix_opt)
4991 {
4992 expressionS exp;
4993
4994 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4995 if (exp.X_op != O_constant)
4996 {
4997 inst.error = _("constant expression required");
4998 return FAIL;
4999 }
5000
5001 if (exp.X_add_number < min || exp.X_add_number > max)
5002 {
5003 inst.error = _("immediate value out of range");
5004 return FAIL;
5005 }
5006
5007 *val = exp.X_add_number;
5008 return SUCCESS;
5009 }
5010
5011 /* Less-generic immediate-value read function with the possibility of loading a
5012 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
5013 instructions. Puts the result directly in inst.operands[i]. */
5014
5015 static int
5016 parse_big_immediate (char **str, int i, expressionS *in_exp,
5017 bfd_boolean allow_symbol_p)
5018 {
5019 expressionS exp;
5020 expressionS *exp_p = in_exp ? in_exp : &exp;
5021 char *ptr = *str;
5022
5023 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
5024
5025 if (exp_p->X_op == O_constant)
5026 {
5027 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
5028 /* If we're on a 64-bit host, then a 64-bit number can be returned using
5029 O_constant. We have to be careful not to break compilation for
5030 32-bit X_add_number, though. */
5031 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
5032 {
5033 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
5034 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
5035 & 0xffffffff);
5036 inst.operands[i].regisimm = 1;
5037 }
5038 }
5039 else if (exp_p->X_op == O_big
5040 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
5041 {
5042 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
5043
5044 /* Bignums have their least significant bits in
5045 generic_bignum[0]. Make sure we put 32 bits in imm and
5046 32 bits in reg, in a (hopefully) portable way. */
5047 gas_assert (parts != 0);
5048
5049 /* Make sure that the number is not too big.
5050 PR 11972: Bignums can now be sign-extended to the
5051 size of a .octa so check that the out of range bits
5052 are all zero or all one. */
5053 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
5054 {
5055 LITTLENUM_TYPE m = -1;
5056
5057 if (generic_bignum[parts * 2] != 0
5058 && generic_bignum[parts * 2] != m)
5059 return FAIL;
5060
5061 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
5062 if (generic_bignum[j] != generic_bignum[j-1])
5063 return FAIL;
5064 }
5065
5066 inst.operands[i].imm = 0;
5067 for (j = 0; j < parts; j++, idx++)
5068 inst.operands[i].imm |= generic_bignum[idx]
5069 << (LITTLENUM_NUMBER_OF_BITS * j);
5070 inst.operands[i].reg = 0;
5071 for (j = 0; j < parts; j++, idx++)
5072 inst.operands[i].reg |= generic_bignum[idx]
5073 << (LITTLENUM_NUMBER_OF_BITS * j);
5074 inst.operands[i].regisimm = 1;
5075 }
5076 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
5077 return FAIL;
5078
5079 *str = ptr;
5080
5081 return SUCCESS;
5082 }
5083
5084 /* Returns the pseudo-register number of an FPA immediate constant,
5085 or FAIL if there isn't a valid constant here. */
5086
5087 static int
5088 parse_fpa_immediate (char ** str)
5089 {
5090 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5091 char * save_in;
5092 expressionS exp;
5093 int i;
5094 int j;
5095
5096 /* First try and match exact strings, this is to guarantee
5097 that some formats will work even for cross assembly. */
5098
5099 for (i = 0; fp_const[i]; i++)
5100 {
5101 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
5102 {
5103 char *start = *str;
5104
5105 *str += strlen (fp_const[i]);
5106 if (is_end_of_line[(unsigned char) **str])
5107 return i + 8;
5108 *str = start;
5109 }
5110 }
5111
5112 /* Just because we didn't get a match doesn't mean that the constant
5113 isn't valid, just that it is in a format that we don't
5114 automatically recognize. Try parsing it with the standard
5115 expression routines. */
5116
5117 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
5118
5119 /* Look for a raw floating point number. */
5120 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
5121 && is_end_of_line[(unsigned char) *save_in])
5122 {
5123 for (i = 0; i < NUM_FLOAT_VALS; i++)
5124 {
5125 for (j = 0; j < MAX_LITTLENUMS; j++)
5126 {
5127 if (words[j] != fp_values[i][j])
5128 break;
5129 }
5130
5131 if (j == MAX_LITTLENUMS)
5132 {
5133 *str = save_in;
5134 return i + 8;
5135 }
5136 }
5137 }
5138
5139 /* Try and parse a more complex expression, this will probably fail
5140 unless the code uses a floating point prefix (eg "0f"). */
5141 save_in = input_line_pointer;
5142 input_line_pointer = *str;
5143 if (expression (&exp) == absolute_section
5144 && exp.X_op == O_big
5145 && exp.X_add_number < 0)
5146 {
5147 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
5148 Ditto for 15. */
5149 #define X_PRECISION 5
5150 #define E_PRECISION 15L
5151 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
5152 {
5153 for (i = 0; i < NUM_FLOAT_VALS; i++)
5154 {
5155 for (j = 0; j < MAX_LITTLENUMS; j++)
5156 {
5157 if (words[j] != fp_values[i][j])
5158 break;
5159 }
5160
5161 if (j == MAX_LITTLENUMS)
5162 {
5163 *str = input_line_pointer;
5164 input_line_pointer = save_in;
5165 return i + 8;
5166 }
5167 }
5168 }
5169 }
5170
5171 *str = input_line_pointer;
5172 input_line_pointer = save_in;
5173 inst.error = _("invalid FPA immediate expression");
5174 return FAIL;
5175 }
5176
5177 /* Returns 1 if a number has "quarter-precision" float format
5178 0baBbbbbbc defgh000 00000000 00000000. */
5179
5180 static int
5181 is_quarter_float (unsigned imm)
5182 {
5183 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
5184 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
5185 }
5186
5187
5188 /* Detect the presence of a floating point or integer zero constant,
5189 i.e. #0.0 or #0. */
5190
5191 static bfd_boolean
5192 parse_ifimm_zero (char **in)
5193 {
5194 int error_code;
5195
5196 if (!is_immediate_prefix (**in))
5197 {
5198 /* In unified syntax, all prefixes are optional. */
5199 if (!unified_syntax)
5200 return FALSE;
5201 }
5202 else
5203 ++*in;
5204
5205 /* Accept #0x0 as a synonym for #0. */
5206 if (strncmp (*in, "0x", 2) == 0)
5207 {
5208 int val;
5209 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
5210 return FALSE;
5211 return TRUE;
5212 }
5213
5214 error_code = atof_generic (in, ".", EXP_CHARS,
5215 &generic_floating_point_number);
5216
5217 if (!error_code
5218 && generic_floating_point_number.sign == '+'
5219 && (generic_floating_point_number.low
5220 > generic_floating_point_number.leader))
5221 return TRUE;
5222
5223 return FALSE;
5224 }
5225
5226 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5227 0baBbbbbbc defgh000 00000000 00000000.
5228 The zero and minus-zero cases need special handling, since they can't be
5229 encoded in the "quarter-precision" float format, but can nonetheless be
5230 loaded as integer constants. */
5231
5232 static unsigned
5233 parse_qfloat_immediate (char **ccp, int *immed)
5234 {
5235 char *str = *ccp;
5236 char *fpnum;
5237 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5238 int found_fpchar = 0;
5239
5240 skip_past_char (&str, '#');
5241
5242 /* We must not accidentally parse an integer as a floating-point number. Make
5243 sure that the value we parse is not an integer by checking for special
5244 characters '.' or 'e'.
5245 FIXME: This is a horrible hack, but doing better is tricky because type
5246 information isn't in a very usable state at parse time. */
5247 fpnum = str;
5248 skip_whitespace (fpnum);
5249
5250 if (strncmp (fpnum, "0x", 2) == 0)
5251 return FAIL;
5252 else
5253 {
5254 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5255 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5256 {
5257 found_fpchar = 1;
5258 break;
5259 }
5260
5261 if (!found_fpchar)
5262 return FAIL;
5263 }
5264
5265 if ((str = atof_ieee (str, 's', words)) != NULL)
5266 {
5267 unsigned fpword = 0;
5268 int i;
5269
5270 /* Our FP word must be 32 bits (single-precision FP). */
5271 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5272 {
5273 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5274 fpword |= words[i];
5275 }
5276
5277 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5278 *immed = fpword;
5279 else
5280 return FAIL;
5281
5282 *ccp = str;
5283
5284 return SUCCESS;
5285 }
5286
5287 return FAIL;
5288 }
5289
5290 /* Shift operands. */
5291 enum shift_kind
5292 {
5293 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX, SHIFT_UXTW
5294 };
5295
5296 struct asm_shift_name
5297 {
5298 const char *name;
5299 enum shift_kind kind;
5300 };
5301
5302 /* Third argument to parse_shift. */
5303 enum parse_shift_mode
5304 {
5305 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5306 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5307 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5308 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5309 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5310 SHIFT_UXTW_IMMEDIATE /* Shift must be UXTW immediate. */
5311 };
5312
5313 /* Parse a <shift> specifier on an ARM data processing instruction.
5314 This has three forms:
5315
5316 (LSL|LSR|ASL|ASR|ROR) Rs
5317 (LSL|LSR|ASL|ASR|ROR) #imm
5318 RRX
5319
5320 Note that ASL is assimilated to LSL in the instruction encoding, and
5321 RRX to ROR #0 (which cannot be written as such). */
5322
5323 static int
5324 parse_shift (char **str, int i, enum parse_shift_mode mode)
5325 {
5326 const struct asm_shift_name *shift_name;
5327 enum shift_kind shift;
5328 char *s = *str;
5329 char *p = s;
5330 int reg;
5331
5332 for (p = *str; ISALPHA (*p); p++)
5333 ;
5334
5335 if (p == *str)
5336 {
5337 inst.error = _("shift expression expected");
5338 return FAIL;
5339 }
5340
5341 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5342 p - *str);
5343
5344 if (shift_name == NULL)
5345 {
5346 inst.error = _("shift expression expected");
5347 return FAIL;
5348 }
5349
5350 shift = shift_name->kind;
5351
5352 switch (mode)
5353 {
5354 case NO_SHIFT_RESTRICT:
5355 case SHIFT_IMMEDIATE:
5356 if (shift == SHIFT_UXTW)
5357 {
5358 inst.error = _("'UXTW' not allowed here");
5359 return FAIL;
5360 }
5361 break;
5362
5363 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5364 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5365 {
5366 inst.error = _("'LSL' or 'ASR' required");
5367 return FAIL;
5368 }
5369 break;
5370
5371 case SHIFT_LSL_IMMEDIATE:
5372 if (shift != SHIFT_LSL)
5373 {
5374 inst.error = _("'LSL' required");
5375 return FAIL;
5376 }
5377 break;
5378
5379 case SHIFT_ASR_IMMEDIATE:
5380 if (shift != SHIFT_ASR)
5381 {
5382 inst.error = _("'ASR' required");
5383 return FAIL;
5384 }
5385 break;
5386 case SHIFT_UXTW_IMMEDIATE:
5387 if (shift != SHIFT_UXTW)
5388 {
5389 inst.error = _("'UXTW' required");
5390 return FAIL;
5391 }
5392 break;
5393
5394 default: abort ();
5395 }
5396
5397 if (shift != SHIFT_RRX)
5398 {
5399 /* Whitespace can appear here if the next thing is a bare digit. */
5400 skip_whitespace (p);
5401
5402 if (mode == NO_SHIFT_RESTRICT
5403 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5404 {
5405 inst.operands[i].imm = reg;
5406 inst.operands[i].immisreg = 1;
5407 }
5408 else if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5409 return FAIL;
5410 }
5411 inst.operands[i].shift_kind = shift;
5412 inst.operands[i].shifted = 1;
5413 *str = p;
5414 return SUCCESS;
5415 }
5416
5417 /* Parse a <shifter_operand> for an ARM data processing instruction:
5418
5419 #<immediate>
5420 #<immediate>, <rotate>
5421 <Rm>
5422 <Rm>, <shift>
5423
5424 where <shift> is defined by parse_shift above, and <rotate> is a
5425 multiple of 2 between 0 and 30. Validation of immediate operands
5426 is deferred to md_apply_fix. */
5427
5428 static int
5429 parse_shifter_operand (char **str, int i)
5430 {
5431 int value;
5432 expressionS exp;
5433
5434 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5435 {
5436 inst.operands[i].reg = value;
5437 inst.operands[i].isreg = 1;
5438
5439 /* parse_shift will override this if appropriate */
5440 inst.relocs[0].exp.X_op = O_constant;
5441 inst.relocs[0].exp.X_add_number = 0;
5442
5443 if (skip_past_comma (str) == FAIL)
5444 return SUCCESS;
5445
5446 /* Shift operation on register. */
5447 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5448 }
5449
5450 if (my_get_expression (&inst.relocs[0].exp, str, GE_IMM_PREFIX))
5451 return FAIL;
5452
5453 if (skip_past_comma (str) == SUCCESS)
5454 {
5455 /* #x, y -- ie explicit rotation by Y. */
5456 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5457 return FAIL;
5458
5459 if (exp.X_op != O_constant || inst.relocs[0].exp.X_op != O_constant)
5460 {
5461 inst.error = _("constant expression expected");
5462 return FAIL;
5463 }
5464
5465 value = exp.X_add_number;
5466 if (value < 0 || value > 30 || value % 2 != 0)
5467 {
5468 inst.error = _("invalid rotation");
5469 return FAIL;
5470 }
5471 if (inst.relocs[0].exp.X_add_number < 0
5472 || inst.relocs[0].exp.X_add_number > 255)
5473 {
5474 inst.error = _("invalid constant");
5475 return FAIL;
5476 }
5477
5478 /* Encode as specified. */
5479 inst.operands[i].imm = inst.relocs[0].exp.X_add_number | value << 7;
5480 return SUCCESS;
5481 }
5482
5483 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
5484 inst.relocs[0].pc_rel = 0;
5485 return SUCCESS;
5486 }
5487
5488 /* Group relocation information. Each entry in the table contains the
5489 textual name of the relocation as may appear in assembler source
5490 and must end with a colon.
5491 Along with this textual name are the relocation codes to be used if
5492 the corresponding instruction is an ALU instruction (ADD or SUB only),
5493 an LDR, an LDRS, or an LDC. */
5494
5495 struct group_reloc_table_entry
5496 {
5497 const char *name;
5498 int alu_code;
5499 int ldr_code;
5500 int ldrs_code;
5501 int ldc_code;
5502 };
5503
5504 typedef enum
5505 {
5506 /* Varieties of non-ALU group relocation. */
5507
5508 GROUP_LDR,
5509 GROUP_LDRS,
5510 GROUP_LDC,
5511 GROUP_MVE
5512 } group_reloc_type;
5513
5514 static struct group_reloc_table_entry group_reloc_table[] =
5515 { /* Program counter relative: */
5516 { "pc_g0_nc",
5517 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5518 0, /* LDR */
5519 0, /* LDRS */
5520 0 }, /* LDC */
5521 { "pc_g0",
5522 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5523 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5524 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5525 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5526 { "pc_g1_nc",
5527 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5528 0, /* LDR */
5529 0, /* LDRS */
5530 0 }, /* LDC */
5531 { "pc_g1",
5532 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5533 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5534 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5535 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5536 { "pc_g2",
5537 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5538 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5539 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5540 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5541 /* Section base relative */
5542 { "sb_g0_nc",
5543 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5544 0, /* LDR */
5545 0, /* LDRS */
5546 0 }, /* LDC */
5547 { "sb_g0",
5548 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5549 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5550 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5551 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5552 { "sb_g1_nc",
5553 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5554 0, /* LDR */
5555 0, /* LDRS */
5556 0 }, /* LDC */
5557 { "sb_g1",
5558 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5559 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5560 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5561 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5562 { "sb_g2",
5563 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5564 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5565 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5566 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5567 /* Absolute thumb alu relocations. */
5568 { "lower0_7",
5569 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5570 0, /* LDR. */
5571 0, /* LDRS. */
5572 0 }, /* LDC. */
5573 { "lower8_15",
5574 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5575 0, /* LDR. */
5576 0, /* LDRS. */
5577 0 }, /* LDC. */
5578 { "upper0_7",
5579 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5580 0, /* LDR. */
5581 0, /* LDRS. */
5582 0 }, /* LDC. */
5583 { "upper8_15",
5584 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5585 0, /* LDR. */
5586 0, /* LDRS. */
5587 0 } }; /* LDC. */
5588
5589 /* Given the address of a pointer pointing to the textual name of a group
5590 relocation as may appear in assembler source, attempt to find its details
5591 in group_reloc_table. The pointer will be updated to the character after
5592 the trailing colon. On failure, FAIL will be returned; SUCCESS
5593 otherwise. On success, *entry will be updated to point at the relevant
5594 group_reloc_table entry. */
5595
5596 static int
5597 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5598 {
5599 unsigned int i;
5600 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5601 {
5602 int length = strlen (group_reloc_table[i].name);
5603
5604 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5605 && (*str)[length] == ':')
5606 {
5607 *out = &group_reloc_table[i];
5608 *str += (length + 1);
5609 return SUCCESS;
5610 }
5611 }
5612
5613 return FAIL;
5614 }
5615
5616 /* Parse a <shifter_operand> for an ARM data processing instruction
5617 (as for parse_shifter_operand) where group relocations are allowed:
5618
5619 #<immediate>
5620 #<immediate>, <rotate>
5621 #:<group_reloc>:<expression>
5622 <Rm>
5623 <Rm>, <shift>
5624
5625 where <group_reloc> is one of the strings defined in group_reloc_table.
5626 The hashes are optional.
5627
5628 Everything else is as for parse_shifter_operand. */
5629
5630 static parse_operand_result
5631 parse_shifter_operand_group_reloc (char **str, int i)
5632 {
5633 /* Determine if we have the sequence of characters #: or just :
5634 coming next. If we do, then we check for a group relocation.
5635 If we don't, punt the whole lot to parse_shifter_operand. */
5636
5637 if (((*str)[0] == '#' && (*str)[1] == ':')
5638 || (*str)[0] == ':')
5639 {
5640 struct group_reloc_table_entry *entry;
5641
5642 if ((*str)[0] == '#')
5643 (*str) += 2;
5644 else
5645 (*str)++;
5646
5647 /* Try to parse a group relocation. Anything else is an error. */
5648 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5649 {
5650 inst.error = _("unknown group relocation");
5651 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5652 }
5653
5654 /* We now have the group relocation table entry corresponding to
5655 the name in the assembler source. Next, we parse the expression. */
5656 if (my_get_expression (&inst.relocs[0].exp, str, GE_NO_PREFIX))
5657 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5658
5659 /* Record the relocation type (always the ALU variant here). */
5660 inst.relocs[0].type = (bfd_reloc_code_real_type) entry->alu_code;
5661 gas_assert (inst.relocs[0].type != 0);
5662
5663 return PARSE_OPERAND_SUCCESS;
5664 }
5665 else
5666 return parse_shifter_operand (str, i) == SUCCESS
5667 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5668
5669 /* Never reached. */
5670 }
5671
5672 /* Parse a Neon alignment expression. Information is written to
5673 inst.operands[i]. We assume the initial ':' has been skipped.
5674
5675 align .imm = align << 8, .immisalign=1, .preind=0 */
5676 static parse_operand_result
5677 parse_neon_alignment (char **str, int i)
5678 {
5679 char *p = *str;
5680 expressionS exp;
5681
5682 my_get_expression (&exp, &p, GE_NO_PREFIX);
5683
5684 if (exp.X_op != O_constant)
5685 {
5686 inst.error = _("alignment must be constant");
5687 return PARSE_OPERAND_FAIL;
5688 }
5689
5690 inst.operands[i].imm = exp.X_add_number << 8;
5691 inst.operands[i].immisalign = 1;
5692 /* Alignments are not pre-indexes. */
5693 inst.operands[i].preind = 0;
5694
5695 *str = p;
5696 return PARSE_OPERAND_SUCCESS;
5697 }
5698
5699 /* Parse all forms of an ARM address expression. Information is written
5700 to inst.operands[i] and/or inst.relocs[0].
5701
5702 Preindexed addressing (.preind=1):
5703
5704 [Rn, #offset] .reg=Rn .relocs[0].exp=offset
5705 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5706 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5707 .shift_kind=shift .relocs[0].exp=shift_imm
5708
5709 These three may have a trailing ! which causes .writeback to be set also.
5710
5711 Postindexed addressing (.postind=1, .writeback=1):
5712
5713 [Rn], #offset .reg=Rn .relocs[0].exp=offset
5714 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5715 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5716 .shift_kind=shift .relocs[0].exp=shift_imm
5717
5718 Unindexed addressing (.preind=0, .postind=0):
5719
5720 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5721
5722 Other:
5723
5724 [Rn]{!} shorthand for [Rn,#0]{!}
5725 =immediate .isreg=0 .relocs[0].exp=immediate
5726 label .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label
5727
5728 It is the caller's responsibility to check for addressing modes not
5729 supported by the instruction, and to set inst.relocs[0].type. */
5730
5731 static parse_operand_result
5732 parse_address_main (char **str, int i, int group_relocations,
5733 group_reloc_type group_type)
5734 {
5735 char *p = *str;
5736 int reg;
5737
5738 if (skip_past_char (&p, '[') == FAIL)
5739 {
5740 if (skip_past_char (&p, '=') == FAIL)
5741 {
5742 /* Bare address - translate to PC-relative offset. */
5743 inst.relocs[0].pc_rel = 1;
5744 inst.operands[i].reg = REG_PC;
5745 inst.operands[i].isreg = 1;
5746 inst.operands[i].preind = 1;
5747
5748 if (my_get_expression (&inst.relocs[0].exp, &p, GE_OPT_PREFIX_BIG))
5749 return PARSE_OPERAND_FAIL;
5750 }
5751 else if (parse_big_immediate (&p, i, &inst.relocs[0].exp,
5752 /*allow_symbol_p=*/TRUE))
5753 return PARSE_OPERAND_FAIL;
5754
5755 *str = p;
5756 return PARSE_OPERAND_SUCCESS;
5757 }
5758
5759 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5760 skip_whitespace (p);
5761
5762 if (group_type == GROUP_MVE)
5763 {
5764 enum arm_reg_type rtype = REG_TYPE_MQ;
5765 struct neon_type_el et;
5766 if ((reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
5767 {
5768 inst.operands[i].isquad = 1;
5769 }
5770 else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5771 {
5772 inst.error = BAD_ADDR_MODE;
5773 return PARSE_OPERAND_FAIL;
5774 }
5775 }
5776 else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5777 {
5778 if (group_type == GROUP_MVE)
5779 inst.error = BAD_ADDR_MODE;
5780 else
5781 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5782 return PARSE_OPERAND_FAIL;
5783 }
5784 inst.operands[i].reg = reg;
5785 inst.operands[i].isreg = 1;
5786
5787 if (skip_past_comma (&p) == SUCCESS)
5788 {
5789 inst.operands[i].preind = 1;
5790
5791 if (*p == '+') p++;
5792 else if (*p == '-') p++, inst.operands[i].negative = 1;
5793
5794 enum arm_reg_type rtype = REG_TYPE_MQ;
5795 struct neon_type_el et;
5796 if (group_type == GROUP_MVE
5797 && (reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
5798 {
5799 inst.operands[i].immisreg = 2;
5800 inst.operands[i].imm = reg;
5801
5802 if (skip_past_comma (&p) == SUCCESS)
5803 {
5804 if (parse_shift (&p, i, SHIFT_UXTW_IMMEDIATE) == SUCCESS)
5805 {
5806 inst.operands[i].imm |= inst.relocs[0].exp.X_add_number << 5;
5807 inst.relocs[0].exp.X_add_number = 0;
5808 }
5809 else
5810 return PARSE_OPERAND_FAIL;
5811 }
5812 }
5813 else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5814 {
5815 inst.operands[i].imm = reg;
5816 inst.operands[i].immisreg = 1;
5817
5818 if (skip_past_comma (&p) == SUCCESS)
5819 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5820 return PARSE_OPERAND_FAIL;
5821 }
5822 else if (skip_past_char (&p, ':') == SUCCESS)
5823 {
5824 /* FIXME: '@' should be used here, but it's filtered out by generic
5825 code before we get to see it here. This may be subject to
5826 change. */
5827 parse_operand_result result = parse_neon_alignment (&p, i);
5828
5829 if (result != PARSE_OPERAND_SUCCESS)
5830 return result;
5831 }
5832 else
5833 {
5834 if (inst.operands[i].negative)
5835 {
5836 inst.operands[i].negative = 0;
5837 p--;
5838 }
5839
5840 if (group_relocations
5841 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5842 {
5843 struct group_reloc_table_entry *entry;
5844
5845 /* Skip over the #: or : sequence. */
5846 if (*p == '#')
5847 p += 2;
5848 else
5849 p++;
5850
5851 /* Try to parse a group relocation. Anything else is an
5852 error. */
5853 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5854 {
5855 inst.error = _("unknown group relocation");
5856 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5857 }
5858
5859 /* We now have the group relocation table entry corresponding to
5860 the name in the assembler source. Next, we parse the
5861 expression. */
5862 if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
5863 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5864
5865 /* Record the relocation type. */
5866 switch (group_type)
5867 {
5868 case GROUP_LDR:
5869 inst.relocs[0].type
5870 = (bfd_reloc_code_real_type) entry->ldr_code;
5871 break;
5872
5873 case GROUP_LDRS:
5874 inst.relocs[0].type
5875 = (bfd_reloc_code_real_type) entry->ldrs_code;
5876 break;
5877
5878 case GROUP_LDC:
5879 inst.relocs[0].type
5880 = (bfd_reloc_code_real_type) entry->ldc_code;
5881 break;
5882
5883 default:
5884 gas_assert (0);
5885 }
5886
5887 if (inst.relocs[0].type == 0)
5888 {
5889 inst.error = _("this group relocation is not allowed on this instruction");
5890 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5891 }
5892 }
5893 else
5894 {
5895 char *q = p;
5896
5897 if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5898 return PARSE_OPERAND_FAIL;
5899 /* If the offset is 0, find out if it's a +0 or -0. */
5900 if (inst.relocs[0].exp.X_op == O_constant
5901 && inst.relocs[0].exp.X_add_number == 0)
5902 {
5903 skip_whitespace (q);
5904 if (*q == '#')
5905 {
5906 q++;
5907 skip_whitespace (q);
5908 }
5909 if (*q == '-')
5910 inst.operands[i].negative = 1;
5911 }
5912 }
5913 }
5914 }
5915 else if (skip_past_char (&p, ':') == SUCCESS)
5916 {
5917 /* FIXME: '@' should be used here, but it's filtered out by generic code
5918 before we get to see it here. This may be subject to change. */
5919 parse_operand_result result = parse_neon_alignment (&p, i);
5920
5921 if (result != PARSE_OPERAND_SUCCESS)
5922 return result;
5923 }
5924
5925 if (skip_past_char (&p, ']') == FAIL)
5926 {
5927 inst.error = _("']' expected");
5928 return PARSE_OPERAND_FAIL;
5929 }
5930
5931 if (skip_past_char (&p, '!') == SUCCESS)
5932 inst.operands[i].writeback = 1;
5933
5934 else if (skip_past_comma (&p) == SUCCESS)
5935 {
5936 if (skip_past_char (&p, '{') == SUCCESS)
5937 {
5938 /* [Rn], {expr} - unindexed, with option */
5939 if (parse_immediate (&p, &inst.operands[i].imm,
5940 0, 255, TRUE) == FAIL)
5941 return PARSE_OPERAND_FAIL;
5942
5943 if (skip_past_char (&p, '}') == FAIL)
5944 {
5945 inst.error = _("'}' expected at end of 'option' field");
5946 return PARSE_OPERAND_FAIL;
5947 }
5948 if (inst.operands[i].preind)
5949 {
5950 inst.error = _("cannot combine index with option");
5951 return PARSE_OPERAND_FAIL;
5952 }
5953 *str = p;
5954 return PARSE_OPERAND_SUCCESS;
5955 }
5956 else
5957 {
5958 inst.operands[i].postind = 1;
5959 inst.operands[i].writeback = 1;
5960
5961 if (inst.operands[i].preind)
5962 {
5963 inst.error = _("cannot combine pre- and post-indexing");
5964 return PARSE_OPERAND_FAIL;
5965 }
5966
5967 if (*p == '+') p++;
5968 else if (*p == '-') p++, inst.operands[i].negative = 1;
5969
5970 enum arm_reg_type rtype = REG_TYPE_MQ;
5971 struct neon_type_el et;
5972 if (group_type == GROUP_MVE
5973 && (reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
5974 {
5975 inst.operands[i].immisreg = 2;
5976 inst.operands[i].imm = reg;
5977 }
5978 else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5979 {
5980 /* We might be using the immediate for alignment already. If we
5981 are, OR the register number into the low-order bits. */
5982 if (inst.operands[i].immisalign)
5983 inst.operands[i].imm |= reg;
5984 else
5985 inst.operands[i].imm = reg;
5986 inst.operands[i].immisreg = 1;
5987
5988 if (skip_past_comma (&p) == SUCCESS)
5989 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5990 return PARSE_OPERAND_FAIL;
5991 }
5992 else
5993 {
5994 char *q = p;
5995
5996 if (inst.operands[i].negative)
5997 {
5998 inst.operands[i].negative = 0;
5999 p--;
6000 }
6001 if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
6002 return PARSE_OPERAND_FAIL;
6003 /* If the offset is 0, find out if it's a +0 or -0. */
6004 if (inst.relocs[0].exp.X_op == O_constant
6005 && inst.relocs[0].exp.X_add_number == 0)
6006 {
6007 skip_whitespace (q);
6008 if (*q == '#')
6009 {
6010 q++;
6011 skip_whitespace (q);
6012 }
6013 if (*q == '-')
6014 inst.operands[i].negative = 1;
6015 }
6016 }
6017 }
6018 }
6019
6020 /* If at this point neither .preind nor .postind is set, we have a
6021 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
6022 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
6023 {
6024 inst.operands[i].preind = 1;
6025 inst.relocs[0].exp.X_op = O_constant;
6026 inst.relocs[0].exp.X_add_number = 0;
6027 }
6028 *str = p;
6029 return PARSE_OPERAND_SUCCESS;
6030 }
6031
6032 static int
6033 parse_address (char **str, int i)
6034 {
6035 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
6036 ? SUCCESS : FAIL;
6037 }
6038
6039 static parse_operand_result
6040 parse_address_group_reloc (char **str, int i, group_reloc_type type)
6041 {
6042 return parse_address_main (str, i, 1, type);
6043 }
6044
6045 /* Parse an operand for a MOVW or MOVT instruction. */
6046 static int
6047 parse_half (char **str)
6048 {
6049 char * p;
6050
6051 p = *str;
6052 skip_past_char (&p, '#');
6053 if (strncasecmp (p, ":lower16:", 9) == 0)
6054 inst.relocs[0].type = BFD_RELOC_ARM_MOVW;
6055 else if (strncasecmp (p, ":upper16:", 9) == 0)
6056 inst.relocs[0].type = BFD_RELOC_ARM_MOVT;
6057
6058 if (inst.relocs[0].type != BFD_RELOC_UNUSED)
6059 {
6060 p += 9;
6061 skip_whitespace (p);
6062 }
6063
6064 if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
6065 return FAIL;
6066
6067 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
6068 {
6069 if (inst.relocs[0].exp.X_op != O_constant)
6070 {
6071 inst.error = _("constant expression expected");
6072 return FAIL;
6073 }
6074 if (inst.relocs[0].exp.X_add_number < 0
6075 || inst.relocs[0].exp.X_add_number > 0xffff)
6076 {
6077 inst.error = _("immediate value out of range");
6078 return FAIL;
6079 }
6080 }
6081 *str = p;
6082 return SUCCESS;
6083 }
6084
6085 /* Miscellaneous. */
6086
6087 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
6088 or a bitmask suitable to be or-ed into the ARM msr instruction. */
6089 static int
6090 parse_psr (char **str, bfd_boolean lhs)
6091 {
6092 char *p;
6093 unsigned long psr_field;
6094 const struct asm_psr *psr;
6095 char *start;
6096 bfd_boolean is_apsr = FALSE;
6097 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
6098
6099 /* PR gas/12698: If the user has specified -march=all then m_profile will
6100 be TRUE, but we want to ignore it in this case as we are building for any
6101 CPU type, including non-m variants. */
6102 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
6103 m_profile = FALSE;
6104
6105 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
6106 feature for ease of use and backwards compatibility. */
6107 p = *str;
6108 if (strncasecmp (p, "SPSR", 4) == 0)
6109 {
6110 if (m_profile)
6111 goto unsupported_psr;
6112
6113 psr_field = SPSR_BIT;
6114 }
6115 else if (strncasecmp (p, "CPSR", 4) == 0)
6116 {
6117 if (m_profile)
6118 goto unsupported_psr;
6119
6120 psr_field = 0;
6121 }
6122 else if (strncasecmp (p, "APSR", 4) == 0)
6123 {
6124 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
6125 and ARMv7-R architecture CPUs. */
6126 is_apsr = TRUE;
6127 psr_field = 0;
6128 }
6129 else if (m_profile)
6130 {
6131 start = p;
6132 do
6133 p++;
6134 while (ISALNUM (*p) || *p == '_');
6135
6136 if (strncasecmp (start, "iapsr", 5) == 0
6137 || strncasecmp (start, "eapsr", 5) == 0
6138 || strncasecmp (start, "xpsr", 4) == 0
6139 || strncasecmp (start, "psr", 3) == 0)
6140 p = start + strcspn (start, "rR") + 1;
6141
6142 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
6143 p - start);
6144
6145 if (!psr)
6146 return FAIL;
6147
6148 /* If APSR is being written, a bitfield may be specified. Note that
6149 APSR itself is handled above. */
6150 if (psr->field <= 3)
6151 {
6152 psr_field = psr->field;
6153 is_apsr = TRUE;
6154 goto check_suffix;
6155 }
6156
6157 *str = p;
6158 /* M-profile MSR instructions have the mask field set to "10", except
6159 *PSR variants which modify APSR, which may use a different mask (and
6160 have been handled already). Do that by setting the PSR_f field
6161 here. */
6162 return psr->field | (lhs ? PSR_f : 0);
6163 }
6164 else
6165 goto unsupported_psr;
6166
6167 p += 4;
6168 check_suffix:
6169 if (*p == '_')
6170 {
6171 /* A suffix follows. */
6172 p++;
6173 start = p;
6174
6175 do
6176 p++;
6177 while (ISALNUM (*p) || *p == '_');
6178
6179 if (is_apsr)
6180 {
6181 /* APSR uses a notation for bits, rather than fields. */
6182 unsigned int nzcvq_bits = 0;
6183 unsigned int g_bit = 0;
6184 char *bit;
6185
6186 for (bit = start; bit != p; bit++)
6187 {
6188 switch (TOLOWER (*bit))
6189 {
6190 case 'n':
6191 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
6192 break;
6193
6194 case 'z':
6195 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
6196 break;
6197
6198 case 'c':
6199 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
6200 break;
6201
6202 case 'v':
6203 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
6204 break;
6205
6206 case 'q':
6207 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
6208 break;
6209
6210 case 'g':
6211 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
6212 break;
6213
6214 default:
6215 inst.error = _("unexpected bit specified after APSR");
6216 return FAIL;
6217 }
6218 }
6219
6220 if (nzcvq_bits == 0x1f)
6221 psr_field |= PSR_f;
6222
6223 if (g_bit == 0x1)
6224 {
6225 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
6226 {
6227 inst.error = _("selected processor does not "
6228 "support DSP extension");
6229 return FAIL;
6230 }
6231
6232 psr_field |= PSR_s;
6233 }
6234
6235 if ((nzcvq_bits & 0x20) != 0
6236 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
6237 || (g_bit & 0x2) != 0)
6238 {
6239 inst.error = _("bad bitmask specified after APSR");
6240 return FAIL;
6241 }
6242 }
6243 else
6244 {
6245 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
6246 p - start);
6247 if (!psr)
6248 goto error;
6249
6250 psr_field |= psr->field;
6251 }
6252 }
6253 else
6254 {
6255 if (ISALNUM (*p))
6256 goto error; /* Garbage after "[CS]PSR". */
6257
6258 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
6259 is deprecated, but allow it anyway. */
6260 if (is_apsr && lhs)
6261 {
6262 psr_field |= PSR_f;
6263 as_tsktsk (_("writing to APSR without specifying a bitmask is "
6264 "deprecated"));
6265 }
6266 else if (!m_profile)
6267 /* These bits are never right for M-profile devices: don't set them
6268 (only code paths which read/write APSR reach here). */
6269 psr_field |= (PSR_c | PSR_f);
6270 }
6271 *str = p;
6272 return psr_field;
6273
6274 unsupported_psr:
6275 inst.error = _("selected processor does not support requested special "
6276 "purpose register");
6277 return FAIL;
6278
6279 error:
6280 inst.error = _("flag for {c}psr instruction expected");
6281 return FAIL;
6282 }
6283
6284 static int
6285 parse_sys_vldr_vstr (char **str)
6286 {
6287 unsigned i;
6288 int val = FAIL;
6289 struct {
6290 const char *name;
6291 int regl;
6292 int regh;
6293 } sysregs[] = {
6294 {"FPSCR", 0x1, 0x0},
6295 {"FPSCR_nzcvqc", 0x2, 0x0},
6296 {"VPR", 0x4, 0x1},
6297 {"P0", 0x5, 0x1},
6298 {"FPCXTNS", 0x6, 0x1},
6299 {"FPCXTS", 0x7, 0x1}
6300 };
6301 char *op_end = strchr (*str, ',');
6302 size_t op_strlen = op_end - *str;
6303
6304 for (i = 0; i < sizeof (sysregs) / sizeof (sysregs[0]); i++)
6305 {
6306 if (!strncmp (*str, sysregs[i].name, op_strlen))
6307 {
6308 val = sysregs[i].regl | (sysregs[i].regh << 3);
6309 *str = op_end;
6310 break;
6311 }
6312 }
6313
6314 return val;
6315 }
6316
6317 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6318 value suitable for splatting into the AIF field of the instruction. */
6319
6320 static int
6321 parse_cps_flags (char **str)
6322 {
6323 int val = 0;
6324 int saw_a_flag = 0;
6325 char *s = *str;
6326
6327 for (;;)
6328 switch (*s++)
6329 {
6330 case '\0': case ',':
6331 goto done;
6332
6333 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6334 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6335 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6336
6337 default:
6338 inst.error = _("unrecognized CPS flag");
6339 return FAIL;
6340 }
6341
6342 done:
6343 if (saw_a_flag == 0)
6344 {
6345 inst.error = _("missing CPS flags");
6346 return FAIL;
6347 }
6348
6349 *str = s - 1;
6350 return val;
6351 }
6352
6353 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6354 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6355
6356 static int
6357 parse_endian_specifier (char **str)
6358 {
6359 int little_endian;
6360 char *s = *str;
6361
6362 if (strncasecmp (s, "BE", 2))
6363 little_endian = 0;
6364 else if (strncasecmp (s, "LE", 2))
6365 little_endian = 1;
6366 else
6367 {
6368 inst.error = _("valid endian specifiers are be or le");
6369 return FAIL;
6370 }
6371
6372 if (ISALNUM (s[2]) || s[2] == '_')
6373 {
6374 inst.error = _("valid endian specifiers are be or le");
6375 return FAIL;
6376 }
6377
6378 *str = s + 2;
6379 return little_endian;
6380 }
6381
6382 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6383 value suitable for poking into the rotate field of an sxt or sxta
6384 instruction, or FAIL on error. */
6385
6386 static int
6387 parse_ror (char **str)
6388 {
6389 int rot;
6390 char *s = *str;
6391
6392 if (strncasecmp (s, "ROR", 3) == 0)
6393 s += 3;
6394 else
6395 {
6396 inst.error = _("missing rotation field after comma");
6397 return FAIL;
6398 }
6399
6400 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6401 return FAIL;
6402
6403 switch (rot)
6404 {
6405 case 0: *str = s; return 0x0;
6406 case 8: *str = s; return 0x1;
6407 case 16: *str = s; return 0x2;
6408 case 24: *str = s; return 0x3;
6409
6410 default:
6411 inst.error = _("rotation can only be 0, 8, 16, or 24");
6412 return FAIL;
6413 }
6414 }
6415
6416 /* Parse a conditional code (from conds[] below). The value returned is in the
6417 range 0 .. 14, or FAIL. */
6418 static int
6419 parse_cond (char **str)
6420 {
6421 char *q;
6422 const struct asm_cond *c;
6423 int n;
6424 /* Condition codes are always 2 characters, so matching up to
6425 3 characters is sufficient. */
6426 char cond[3];
6427
6428 q = *str;
6429 n = 0;
6430 while (ISALPHA (*q) && n < 3)
6431 {
6432 cond[n] = TOLOWER (*q);
6433 q++;
6434 n++;
6435 }
6436
6437 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6438 if (!c)
6439 {
6440 inst.error = _("condition required");
6441 return FAIL;
6442 }
6443
6444 *str = q;
6445 return c->value;
6446 }
6447
6448 /* Parse an option for a barrier instruction. Returns the encoding for the
6449 option, or FAIL. */
6450 static int
6451 parse_barrier (char **str)
6452 {
6453 char *p, *q;
6454 const struct asm_barrier_opt *o;
6455
6456 p = q = *str;
6457 while (ISALPHA (*q))
6458 q++;
6459
6460 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6461 q - p);
6462 if (!o)
6463 return FAIL;
6464
6465 if (!mark_feature_used (&o->arch))
6466 return FAIL;
6467
6468 *str = q;
6469 return o->value;
6470 }
6471
6472 /* Parse the operands of a table branch instruction. Similar to a memory
6473 operand. */
6474 static int
6475 parse_tb (char **str)
6476 {
6477 char * p = *str;
6478 int reg;
6479
6480 if (skip_past_char (&p, '[') == FAIL)
6481 {
6482 inst.error = _("'[' expected");
6483 return FAIL;
6484 }
6485
6486 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6487 {
6488 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6489 return FAIL;
6490 }
6491 inst.operands[0].reg = reg;
6492
6493 if (skip_past_comma (&p) == FAIL)
6494 {
6495 inst.error = _("',' expected");
6496 return FAIL;
6497 }
6498
6499 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6500 {
6501 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6502 return FAIL;
6503 }
6504 inst.operands[0].imm = reg;
6505
6506 if (skip_past_comma (&p) == SUCCESS)
6507 {
6508 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6509 return FAIL;
6510 if (inst.relocs[0].exp.X_add_number != 1)
6511 {
6512 inst.error = _("invalid shift");
6513 return FAIL;
6514 }
6515 inst.operands[0].shifted = 1;
6516 }
6517
6518 if (skip_past_char (&p, ']') == FAIL)
6519 {
6520 inst.error = _("']' expected");
6521 return FAIL;
6522 }
6523 *str = p;
6524 return SUCCESS;
6525 }
6526
6527 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6528 information on the types the operands can take and how they are encoded.
6529 Up to four operands may be read; this function handles setting the
6530 ".present" field for each read operand itself.
6531 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6532 else returns FAIL. */
6533
6534 static int
6535 parse_neon_mov (char **str, int *which_operand)
6536 {
6537 int i = *which_operand, val;
6538 enum arm_reg_type rtype;
6539 char *ptr = *str;
6540 struct neon_type_el optype;
6541
6542 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6543 {
6544 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6545 inst.operands[i].reg = val;
6546 inst.operands[i].isscalar = 1;
6547 inst.operands[i].vectype = optype;
6548 inst.operands[i++].present = 1;
6549
6550 if (skip_past_comma (&ptr) == FAIL)
6551 goto wanted_comma;
6552
6553 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6554 goto wanted_arm;
6555
6556 inst.operands[i].reg = val;
6557 inst.operands[i].isreg = 1;
6558 inst.operands[i].present = 1;
6559 }
6560 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6561 != FAIL)
6562 {
6563 /* Cases 0, 1, 2, 3, 5 (D only). */
6564 if (skip_past_comma (&ptr) == FAIL)
6565 goto wanted_comma;
6566
6567 inst.operands[i].reg = val;
6568 inst.operands[i].isreg = 1;
6569 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6570 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6571 inst.operands[i].isvec = 1;
6572 inst.operands[i].vectype = optype;
6573 inst.operands[i++].present = 1;
6574
6575 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6576 {
6577 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6578 Case 13: VMOV <Sd>, <Rm> */
6579 inst.operands[i].reg = val;
6580 inst.operands[i].isreg = 1;
6581 inst.operands[i].present = 1;
6582
6583 if (rtype == REG_TYPE_NQ)
6584 {
6585 first_error (_("can't use Neon quad register here"));
6586 return FAIL;
6587 }
6588 else if (rtype != REG_TYPE_VFS)
6589 {
6590 i++;
6591 if (skip_past_comma (&ptr) == FAIL)
6592 goto wanted_comma;
6593 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6594 goto wanted_arm;
6595 inst.operands[i].reg = val;
6596 inst.operands[i].isreg = 1;
6597 inst.operands[i].present = 1;
6598 }
6599 }
6600 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6601 &optype)) != FAIL)
6602 {
6603 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6604 Case 1: VMOV<c><q> <Dd>, <Dm>
6605 Case 8: VMOV.F32 <Sd>, <Sm>
6606 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6607
6608 inst.operands[i].reg = val;
6609 inst.operands[i].isreg = 1;
6610 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6611 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6612 inst.operands[i].isvec = 1;
6613 inst.operands[i].vectype = optype;
6614 inst.operands[i].present = 1;
6615
6616 if (skip_past_comma (&ptr) == SUCCESS)
6617 {
6618 /* Case 15. */
6619 i++;
6620
6621 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6622 goto wanted_arm;
6623
6624 inst.operands[i].reg = val;
6625 inst.operands[i].isreg = 1;
6626 inst.operands[i++].present = 1;
6627
6628 if (skip_past_comma (&ptr) == FAIL)
6629 goto wanted_comma;
6630
6631 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6632 goto wanted_arm;
6633
6634 inst.operands[i].reg = val;
6635 inst.operands[i].isreg = 1;
6636 inst.operands[i].present = 1;
6637 }
6638 }
6639 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6640 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6641 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6642 Case 10: VMOV.F32 <Sd>, #<imm>
6643 Case 11: VMOV.F64 <Dd>, #<imm> */
6644 inst.operands[i].immisfloat = 1;
6645 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6646 == SUCCESS)
6647 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6648 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6649 ;
6650 else
6651 {
6652 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6653 return FAIL;
6654 }
6655 }
6656 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6657 {
6658 /* Cases 6, 7. */
6659 inst.operands[i].reg = val;
6660 inst.operands[i].isreg = 1;
6661 inst.operands[i++].present = 1;
6662
6663 if (skip_past_comma (&ptr) == FAIL)
6664 goto wanted_comma;
6665
6666 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6667 {
6668 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6669 inst.operands[i].reg = val;
6670 inst.operands[i].isscalar = 1;
6671 inst.operands[i].present = 1;
6672 inst.operands[i].vectype = optype;
6673 }
6674 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6675 {
6676 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6677 inst.operands[i].reg = val;
6678 inst.operands[i].isreg = 1;
6679 inst.operands[i++].present = 1;
6680
6681 if (skip_past_comma (&ptr) == FAIL)
6682 goto wanted_comma;
6683
6684 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6685 == FAIL)
6686 {
6687 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6688 return FAIL;
6689 }
6690
6691 inst.operands[i].reg = val;
6692 inst.operands[i].isreg = 1;
6693 inst.operands[i].isvec = 1;
6694 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6695 inst.operands[i].vectype = optype;
6696 inst.operands[i].present = 1;
6697
6698 if (rtype == REG_TYPE_VFS)
6699 {
6700 /* Case 14. */
6701 i++;
6702 if (skip_past_comma (&ptr) == FAIL)
6703 goto wanted_comma;
6704 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6705 &optype)) == FAIL)
6706 {
6707 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6708 return FAIL;
6709 }
6710 inst.operands[i].reg = val;
6711 inst.operands[i].isreg = 1;
6712 inst.operands[i].isvec = 1;
6713 inst.operands[i].issingle = 1;
6714 inst.operands[i].vectype = optype;
6715 inst.operands[i].present = 1;
6716 }
6717 }
6718 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6719 != FAIL)
6720 {
6721 /* Case 13. */
6722 inst.operands[i].reg = val;
6723 inst.operands[i].isreg = 1;
6724 inst.operands[i].isvec = 1;
6725 inst.operands[i].issingle = 1;
6726 inst.operands[i].vectype = optype;
6727 inst.operands[i].present = 1;
6728 }
6729 }
6730 else
6731 {
6732 first_error (_("parse error"));
6733 return FAIL;
6734 }
6735
6736 /* Successfully parsed the operands. Update args. */
6737 *which_operand = i;
6738 *str = ptr;
6739 return SUCCESS;
6740
6741 wanted_comma:
6742 first_error (_("expected comma"));
6743 return FAIL;
6744
6745 wanted_arm:
6746 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6747 return FAIL;
6748 }
6749
6750 /* Use this macro when the operand constraints are different
6751 for ARM and THUMB (e.g. ldrd). */
6752 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6753 ((arm_operand) | ((thumb_operand) << 16))
6754
6755 /* Matcher codes for parse_operands. */
6756 enum operand_parse_code
6757 {
6758 OP_stop, /* end of line */
6759
6760 OP_RR, /* ARM register */
6761 OP_RRnpc, /* ARM register, not r15 */
6762 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6763 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6764 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6765 optional trailing ! */
6766 OP_RRw, /* ARM register, not r15, optional trailing ! */
6767 OP_RCP, /* Coprocessor number */
6768 OP_RCN, /* Coprocessor register */
6769 OP_RF, /* FPA register */
6770 OP_RVS, /* VFP single precision register */
6771 OP_RVD, /* VFP double precision register (0..15) */
6772 OP_RND, /* Neon double precision register (0..31) */
6773 OP_RNDMQ, /* Neon double precision (0..31) or MVE vector register. */
6774 OP_RNDMQR, /* Neon double precision (0..31), MVE vector or ARM register.
6775 */
6776 OP_RNQ, /* Neon quad precision register */
6777 OP_RNQMQ, /* Neon quad or MVE vector register. */
6778 OP_RVSD, /* VFP single or double precision register */
6779 OP_RNSD, /* Neon single or double precision register */
6780 OP_RNDQ, /* Neon double or quad precision register */
6781 OP_RNDQMQ, /* Neon double, quad or MVE vector register. */
6782 OP_RNSDQ, /* Neon single, double or quad precision register */
6783 OP_RNSC, /* Neon scalar D[X] */
6784 OP_RVC, /* VFP control register */
6785 OP_RMF, /* Maverick F register */
6786 OP_RMD, /* Maverick D register */
6787 OP_RMFX, /* Maverick FX register */
6788 OP_RMDX, /* Maverick DX register */
6789 OP_RMAX, /* Maverick AX register */
6790 OP_RMDS, /* Maverick DSPSC register */
6791 OP_RIWR, /* iWMMXt wR register */
6792 OP_RIWC, /* iWMMXt wC register */
6793 OP_RIWG, /* iWMMXt wCG register */
6794 OP_RXA, /* XScale accumulator register */
6795
6796 OP_RNSDQMQ, /* Neon single, double or quad register or MVE vector register
6797 */
6798 OP_RNSDQMQR, /* Neon single, double or quad register, MVE vector register or
6799 GPR (no SP/SP) */
6800 OP_RMQ, /* MVE vector register. */
6801
6802 /* New operands for Armv8.1-M Mainline. */
6803 OP_LR, /* ARM LR register */
6804 OP_RRe, /* ARM register, only even numbered. */
6805 OP_RRo, /* ARM register, only odd numbered, not r13 or r15. */
6806 OP_RRnpcsp_I32, /* ARM register (no BadReg) or literal 1 .. 32 */
6807
6808 OP_REGLST, /* ARM register list */
6809 OP_CLRMLST, /* CLRM register list */
6810 OP_VRSLST, /* VFP single-precision register list */
6811 OP_VRDLST, /* VFP double-precision register list */
6812 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6813 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6814 OP_NSTRLST, /* Neon element/structure list */
6815 OP_VRSDVLST, /* VFP single or double-precision register list and VPR */
6816 OP_MSTRLST2, /* MVE vector list with two elements. */
6817 OP_MSTRLST4, /* MVE vector list with four elements. */
6818
6819 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6820 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6821 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6822 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6823 OP_RNSD_RNSC, /* Neon S or D reg, or Neon scalar. */
6824 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6825 OP_RNSDQ_RNSC_MQ, /* Vector S, D or Q reg, Neon scalar or MVE vector register.
6826 */
6827 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6828 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6829 OP_VMOV, /* Neon VMOV operands. */
6830 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6831 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6832 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6833 OP_VLDR, /* VLDR operand. */
6834
6835 OP_I0, /* immediate zero */
6836 OP_I7, /* immediate value 0 .. 7 */
6837 OP_I15, /* 0 .. 15 */
6838 OP_I16, /* 1 .. 16 */
6839 OP_I16z, /* 0 .. 16 */
6840 OP_I31, /* 0 .. 31 */
6841 OP_I31w, /* 0 .. 31, optional trailing ! */
6842 OP_I32, /* 1 .. 32 */
6843 OP_I32z, /* 0 .. 32 */
6844 OP_I63, /* 0 .. 63 */
6845 OP_I63s, /* -64 .. 63 */
6846 OP_I64, /* 1 .. 64 */
6847 OP_I64z, /* 0 .. 64 */
6848 OP_I255, /* 0 .. 255 */
6849
6850 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6851 OP_I7b, /* 0 .. 7 */
6852 OP_I15b, /* 0 .. 15 */
6853 OP_I31b, /* 0 .. 31 */
6854
6855 OP_SH, /* shifter operand */
6856 OP_SHG, /* shifter operand with possible group relocation */
6857 OP_ADDR, /* Memory address expression (any mode) */
6858 OP_ADDRMVE, /* Memory address expression for MVE's VSTR/VLDR. */
6859 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6860 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6861 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6862 OP_EXP, /* arbitrary expression */
6863 OP_EXPi, /* same, with optional immediate prefix */
6864 OP_EXPr, /* same, with optional relocation suffix */
6865 OP_EXPs, /* same, with optional non-first operand relocation suffix */
6866 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6867 OP_IROT1, /* VCADD rotate immediate: 90, 270. */
6868 OP_IROT2, /* VCMLA rotate immediate: 0, 90, 180, 270. */
6869
6870 OP_CPSF, /* CPS flags */
6871 OP_ENDI, /* Endianness specifier */
6872 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6873 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6874 OP_COND, /* conditional code */
6875 OP_TB, /* Table branch. */
6876
6877 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6878
6879 OP_RRnpc_I0, /* ARM register or literal 0 */
6880 OP_RR_EXr, /* ARM register or expression with opt. reloc stuff. */
6881 OP_RR_EXi, /* ARM register or expression with imm prefix */
6882 OP_RF_IF, /* FPA register or immediate */
6883 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6884 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6885
6886 /* Optional operands. */
6887 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6888 OP_oI31b, /* 0 .. 31 */
6889 OP_oI32b, /* 1 .. 32 */
6890 OP_oI32z, /* 0 .. 32 */
6891 OP_oIffffb, /* 0 .. 65535 */
6892 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6893
6894 OP_oRR, /* ARM register */
6895 OP_oLR, /* ARM LR register */
6896 OP_oRRnpc, /* ARM register, not the PC */
6897 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6898 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6899 OP_oRND, /* Optional Neon double precision register */
6900 OP_oRNQ, /* Optional Neon quad precision register */
6901 OP_oRNDQMQ, /* Optional Neon double, quad or MVE vector register. */
6902 OP_oRNDQ, /* Optional Neon double or quad precision register */
6903 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6904 OP_oRNSDQMQ, /* Optional single, double or quad register or MVE vector
6905 register. */
6906 OP_oSHll, /* LSL immediate */
6907 OP_oSHar, /* ASR immediate */
6908 OP_oSHllar, /* LSL or ASR immediate */
6909 OP_oROR, /* ROR 0/8/16/24 */
6910 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6911
6912 /* Some pre-defined mixed (ARM/THUMB) operands. */
6913 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6914 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6915 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6916
6917 OP_FIRST_OPTIONAL = OP_oI7b
6918 };
6919
6920 /* Generic instruction operand parser. This does no encoding and no
6921 semantic validation; it merely squirrels values away in the inst
6922 structure. Returns SUCCESS or FAIL depending on whether the
6923 specified grammar matched. */
6924 static int
6925 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6926 {
6927 unsigned const int *upat = pattern;
6928 char *backtrack_pos = 0;
6929 const char *backtrack_error = 0;
6930 int i, val = 0, backtrack_index = 0;
6931 enum arm_reg_type rtype;
6932 parse_operand_result result;
6933 unsigned int op_parse_code;
6934 bfd_boolean partial_match;
6935
6936 #define po_char_or_fail(chr) \
6937 do \
6938 { \
6939 if (skip_past_char (&str, chr) == FAIL) \
6940 goto bad_args; \
6941 } \
6942 while (0)
6943
6944 #define po_reg_or_fail(regtype) \
6945 do \
6946 { \
6947 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6948 & inst.operands[i].vectype); \
6949 if (val == FAIL) \
6950 { \
6951 first_error (_(reg_expected_msgs[regtype])); \
6952 goto failure; \
6953 } \
6954 inst.operands[i].reg = val; \
6955 inst.operands[i].isreg = 1; \
6956 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6957 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6958 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6959 || rtype == REG_TYPE_VFD \
6960 || rtype == REG_TYPE_NQ); \
6961 } \
6962 while (0)
6963
6964 #define po_reg_or_goto(regtype, label) \
6965 do \
6966 { \
6967 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6968 & inst.operands[i].vectype); \
6969 if (val == FAIL) \
6970 goto label; \
6971 \
6972 inst.operands[i].reg = val; \
6973 inst.operands[i].isreg = 1; \
6974 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6975 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6976 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6977 || rtype == REG_TYPE_VFD \
6978 || rtype == REG_TYPE_NQ); \
6979 } \
6980 while (0)
6981
6982 #define po_imm_or_fail(min, max, popt) \
6983 do \
6984 { \
6985 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6986 goto failure; \
6987 inst.operands[i].imm = val; \
6988 } \
6989 while (0)
6990
6991 #define po_scalar_or_goto(elsz, label) \
6992 do \
6993 { \
6994 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6995 if (val == FAIL) \
6996 goto label; \
6997 inst.operands[i].reg = val; \
6998 inst.operands[i].isscalar = 1; \
6999 } \
7000 while (0)
7001
7002 #define po_misc_or_fail(expr) \
7003 do \
7004 { \
7005 if (expr) \
7006 goto failure; \
7007 } \
7008 while (0)
7009
7010 #define po_misc_or_fail_no_backtrack(expr) \
7011 do \
7012 { \
7013 result = expr; \
7014 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
7015 backtrack_pos = 0; \
7016 if (result != PARSE_OPERAND_SUCCESS) \
7017 goto failure; \
7018 } \
7019 while (0)
7020
7021 #define po_barrier_or_imm(str) \
7022 do \
7023 { \
7024 val = parse_barrier (&str); \
7025 if (val == FAIL && ! ISALPHA (*str)) \
7026 goto immediate; \
7027 if (val == FAIL \
7028 /* ISB can only take SY as an option. */ \
7029 || ((inst.instruction & 0xf0) == 0x60 \
7030 && val != 0xf)) \
7031 { \
7032 inst.error = _("invalid barrier type"); \
7033 backtrack_pos = 0; \
7034 goto failure; \
7035 } \
7036 } \
7037 while (0)
7038
7039 skip_whitespace (str);
7040
7041 for (i = 0; upat[i] != OP_stop; i++)
7042 {
7043 op_parse_code = upat[i];
7044 if (op_parse_code >= 1<<16)
7045 op_parse_code = thumb ? (op_parse_code >> 16)
7046 : (op_parse_code & ((1<<16)-1));
7047
7048 if (op_parse_code >= OP_FIRST_OPTIONAL)
7049 {
7050 /* Remember where we are in case we need to backtrack. */
7051 gas_assert (!backtrack_pos);
7052 backtrack_pos = str;
7053 backtrack_error = inst.error;
7054 backtrack_index = i;
7055 }
7056
7057 if (i > 0 && (i > 1 || inst.operands[0].present))
7058 po_char_or_fail (',');
7059
7060 switch (op_parse_code)
7061 {
7062 /* Registers */
7063 case OP_oRRnpc:
7064 case OP_oRRnpcsp:
7065 case OP_RRnpc:
7066 case OP_RRnpcsp:
7067 case OP_oRR:
7068 case OP_RRe:
7069 case OP_RRo:
7070 case OP_LR:
7071 case OP_oLR:
7072 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
7073 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
7074 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
7075 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
7076 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
7077 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
7078 case OP_oRND:
7079 case OP_RNDMQR:
7080 po_reg_or_goto (REG_TYPE_RN, try_rndmq);
7081 break;
7082 try_rndmq:
7083 case OP_RNDMQ:
7084 po_reg_or_goto (REG_TYPE_MQ, try_rnd);
7085 break;
7086 try_rnd:
7087 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
7088 case OP_RVC:
7089 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
7090 break;
7091 /* Also accept generic coprocessor regs for unknown registers. */
7092 coproc_reg:
7093 po_reg_or_fail (REG_TYPE_CN);
7094 break;
7095 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
7096 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
7097 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
7098 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
7099 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
7100 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
7101 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
7102 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
7103 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
7104 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
7105 case OP_oRNQ:
7106 case OP_RNQMQ:
7107 po_reg_or_goto (REG_TYPE_MQ, try_nq);
7108 break;
7109 try_nq:
7110 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
7111 case OP_RNSD: po_reg_or_fail (REG_TYPE_NSD); break;
7112 case OP_oRNDQMQ:
7113 case OP_RNDQMQ:
7114 po_reg_or_goto (REG_TYPE_MQ, try_rndq);
7115 break;
7116 try_rndq:
7117 case OP_oRNDQ:
7118 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
7119 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
7120 case OP_oRNSDQ:
7121 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
7122 case OP_RNSDQMQR:
7123 po_reg_or_goto (REG_TYPE_RN, try_mq);
7124 break;
7125 try_mq:
7126 case OP_oRNSDQMQ:
7127 case OP_RNSDQMQ:
7128 po_reg_or_goto (REG_TYPE_MQ, try_nsdq2);
7129 break;
7130 try_nsdq2:
7131 po_reg_or_fail (REG_TYPE_NSDQ);
7132 inst.error = 0;
7133 break;
7134 case OP_RMQ:
7135 po_reg_or_fail (REG_TYPE_MQ);
7136 break;
7137 /* Neon scalar. Using an element size of 8 means that some invalid
7138 scalars are accepted here, so deal with those in later code. */
7139 case OP_RNSC: po_scalar_or_goto (8, failure); break;
7140
7141 case OP_RNDQ_I0:
7142 {
7143 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
7144 break;
7145 try_imm0:
7146 po_imm_or_fail (0, 0, TRUE);
7147 }
7148 break;
7149
7150 case OP_RVSD_I0:
7151 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
7152 break;
7153
7154 case OP_RSVD_FI0:
7155 {
7156 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
7157 break;
7158 try_ifimm0:
7159 if (parse_ifimm_zero (&str))
7160 inst.operands[i].imm = 0;
7161 else
7162 {
7163 inst.error
7164 = _("only floating point zero is allowed as immediate value");
7165 goto failure;
7166 }
7167 }
7168 break;
7169
7170 case OP_RR_RNSC:
7171 {
7172 po_scalar_or_goto (8, try_rr);
7173 break;
7174 try_rr:
7175 po_reg_or_fail (REG_TYPE_RN);
7176 }
7177 break;
7178
7179 case OP_RNSDQ_RNSC_MQ:
7180 po_reg_or_goto (REG_TYPE_MQ, try_rnsdq_rnsc);
7181 break;
7182 try_rnsdq_rnsc:
7183 case OP_RNSDQ_RNSC:
7184 {
7185 po_scalar_or_goto (8, try_nsdq);
7186 break;
7187 try_nsdq:
7188 po_reg_or_fail (REG_TYPE_NSDQ);
7189 }
7190 break;
7191
7192 case OP_RNSD_RNSC:
7193 {
7194 po_scalar_or_goto (8, try_s_scalar);
7195 break;
7196 try_s_scalar:
7197 po_scalar_or_goto (4, try_nsd);
7198 break;
7199 try_nsd:
7200 po_reg_or_fail (REG_TYPE_NSD);
7201 }
7202 break;
7203
7204 case OP_RNDQ_RNSC:
7205 {
7206 po_scalar_or_goto (8, try_ndq);
7207 break;
7208 try_ndq:
7209 po_reg_or_fail (REG_TYPE_NDQ);
7210 }
7211 break;
7212
7213 case OP_RND_RNSC:
7214 {
7215 po_scalar_or_goto (8, try_vfd);
7216 break;
7217 try_vfd:
7218 po_reg_or_fail (REG_TYPE_VFD);
7219 }
7220 break;
7221
7222 case OP_VMOV:
7223 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
7224 not careful then bad things might happen. */
7225 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
7226 break;
7227
7228 case OP_RNDQ_Ibig:
7229 {
7230 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
7231 break;
7232 try_immbig:
7233 /* There's a possibility of getting a 64-bit immediate here, so
7234 we need special handling. */
7235 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
7236 == FAIL)
7237 {
7238 inst.error = _("immediate value is out of range");
7239 goto failure;
7240 }
7241 }
7242 break;
7243
7244 case OP_RNDQ_I63b:
7245 {
7246 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
7247 break;
7248 try_shimm:
7249 po_imm_or_fail (0, 63, TRUE);
7250 }
7251 break;
7252
7253 case OP_RRnpcb:
7254 po_char_or_fail ('[');
7255 po_reg_or_fail (REG_TYPE_RN);
7256 po_char_or_fail (']');
7257 break;
7258
7259 case OP_RRnpctw:
7260 case OP_RRw:
7261 case OP_oRRw:
7262 po_reg_or_fail (REG_TYPE_RN);
7263 if (skip_past_char (&str, '!') == SUCCESS)
7264 inst.operands[i].writeback = 1;
7265 break;
7266
7267 /* Immediates */
7268 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
7269 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
7270 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
7271 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
7272 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
7273 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
7274 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
7275 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
7276 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
7277 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
7278 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
7279 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
7280
7281 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
7282 case OP_oI7b:
7283 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
7284 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
7285 case OP_oI31b:
7286 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
7287 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
7288 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
7289 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
7290
7291 /* Immediate variants */
7292 case OP_oI255c:
7293 po_char_or_fail ('{');
7294 po_imm_or_fail (0, 255, TRUE);
7295 po_char_or_fail ('}');
7296 break;
7297
7298 case OP_I31w:
7299 /* The expression parser chokes on a trailing !, so we have
7300 to find it first and zap it. */
7301 {
7302 char *s = str;
7303 while (*s && *s != ',')
7304 s++;
7305 if (s[-1] == '!')
7306 {
7307 s[-1] = '\0';
7308 inst.operands[i].writeback = 1;
7309 }
7310 po_imm_or_fail (0, 31, TRUE);
7311 if (str == s - 1)
7312 str = s;
7313 }
7314 break;
7315
7316 /* Expressions */
7317 case OP_EXPi: EXPi:
7318 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7319 GE_OPT_PREFIX));
7320 break;
7321
7322 case OP_EXP:
7323 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7324 GE_NO_PREFIX));
7325 break;
7326
7327 case OP_EXPr: EXPr:
7328 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7329 GE_NO_PREFIX));
7330 if (inst.relocs[0].exp.X_op == O_symbol)
7331 {
7332 val = parse_reloc (&str);
7333 if (val == -1)
7334 {
7335 inst.error = _("unrecognized relocation suffix");
7336 goto failure;
7337 }
7338 else if (val != BFD_RELOC_UNUSED)
7339 {
7340 inst.operands[i].imm = val;
7341 inst.operands[i].hasreloc = 1;
7342 }
7343 }
7344 break;
7345
7346 case OP_EXPs:
7347 po_misc_or_fail (my_get_expression (&inst.relocs[i].exp, &str,
7348 GE_NO_PREFIX));
7349 if (inst.relocs[i].exp.X_op == O_symbol)
7350 {
7351 inst.operands[i].hasreloc = 1;
7352 }
7353 else if (inst.relocs[i].exp.X_op == O_constant)
7354 {
7355 inst.operands[i].imm = inst.relocs[i].exp.X_add_number;
7356 inst.operands[i].hasreloc = 0;
7357 }
7358 break;
7359
7360 /* Operand for MOVW or MOVT. */
7361 case OP_HALF:
7362 po_misc_or_fail (parse_half (&str));
7363 break;
7364
7365 /* Register or expression. */
7366 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
7367 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
7368
7369 /* Register or immediate. */
7370 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
7371 I0: po_imm_or_fail (0, 0, FALSE); break;
7372
7373 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
7374 IF:
7375 if (!is_immediate_prefix (*str))
7376 goto bad_args;
7377 str++;
7378 val = parse_fpa_immediate (&str);
7379 if (val == FAIL)
7380 goto failure;
7381 /* FPA immediates are encoded as registers 8-15.
7382 parse_fpa_immediate has already applied the offset. */
7383 inst.operands[i].reg = val;
7384 inst.operands[i].isreg = 1;
7385 break;
7386
7387 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
7388 I32z: po_imm_or_fail (0, 32, FALSE); break;
7389
7390 /* Two kinds of register. */
7391 case OP_RIWR_RIWC:
7392 {
7393 struct reg_entry *rege = arm_reg_parse_multi (&str);
7394 if (!rege
7395 || (rege->type != REG_TYPE_MMXWR
7396 && rege->type != REG_TYPE_MMXWC
7397 && rege->type != REG_TYPE_MMXWCG))
7398 {
7399 inst.error = _("iWMMXt data or control register expected");
7400 goto failure;
7401 }
7402 inst.operands[i].reg = rege->number;
7403 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
7404 }
7405 break;
7406
7407 case OP_RIWC_RIWG:
7408 {
7409 struct reg_entry *rege = arm_reg_parse_multi (&str);
7410 if (!rege
7411 || (rege->type != REG_TYPE_MMXWC
7412 && rege->type != REG_TYPE_MMXWCG))
7413 {
7414 inst.error = _("iWMMXt control register expected");
7415 goto failure;
7416 }
7417 inst.operands[i].reg = rege->number;
7418 inst.operands[i].isreg = 1;
7419 }
7420 break;
7421
7422 /* Misc */
7423 case OP_CPSF: val = parse_cps_flags (&str); break;
7424 case OP_ENDI: val = parse_endian_specifier (&str); break;
7425 case OP_oROR: val = parse_ror (&str); break;
7426 case OP_COND: val = parse_cond (&str); break;
7427 case OP_oBARRIER_I15:
7428 po_barrier_or_imm (str); break;
7429 immediate:
7430 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
7431 goto failure;
7432 break;
7433
7434 case OP_wPSR:
7435 case OP_rPSR:
7436 po_reg_or_goto (REG_TYPE_RNB, try_psr);
7437 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7438 {
7439 inst.error = _("Banked registers are not available with this "
7440 "architecture.");
7441 goto failure;
7442 }
7443 break;
7444 try_psr:
7445 val = parse_psr (&str, op_parse_code == OP_wPSR);
7446 break;
7447
7448 case OP_VLDR:
7449 po_reg_or_goto (REG_TYPE_VFSD, try_sysreg);
7450 break;
7451 try_sysreg:
7452 val = parse_sys_vldr_vstr (&str);
7453 break;
7454
7455 case OP_APSR_RR:
7456 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7457 break;
7458 try_apsr:
7459 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7460 instruction). */
7461 if (strncasecmp (str, "APSR_", 5) == 0)
7462 {
7463 unsigned found = 0;
7464 str += 5;
7465 while (found < 15)
7466 switch (*str++)
7467 {
7468 case 'c': found = (found & 1) ? 16 : found | 1; break;
7469 case 'n': found = (found & 2) ? 16 : found | 2; break;
7470 case 'z': found = (found & 4) ? 16 : found | 4; break;
7471 case 'v': found = (found & 8) ? 16 : found | 8; break;
7472 default: found = 16;
7473 }
7474 if (found != 15)
7475 goto failure;
7476 inst.operands[i].isvec = 1;
7477 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7478 inst.operands[i].reg = REG_PC;
7479 }
7480 else
7481 goto failure;
7482 break;
7483
7484 case OP_TB:
7485 po_misc_or_fail (parse_tb (&str));
7486 break;
7487
7488 /* Register lists. */
7489 case OP_REGLST:
7490 val = parse_reg_list (&str, REGLIST_RN);
7491 if (*str == '^')
7492 {
7493 inst.operands[i].writeback = 1;
7494 str++;
7495 }
7496 break;
7497
7498 case OP_CLRMLST:
7499 val = parse_reg_list (&str, REGLIST_CLRM);
7500 break;
7501
7502 case OP_VRSLST:
7503 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S,
7504 &partial_match);
7505 break;
7506
7507 case OP_VRDLST:
7508 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D,
7509 &partial_match);
7510 break;
7511
7512 case OP_VRSDLST:
7513 /* Allow Q registers too. */
7514 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7515 REGLIST_NEON_D, &partial_match);
7516 if (val == FAIL)
7517 {
7518 inst.error = NULL;
7519 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7520 REGLIST_VFP_S, &partial_match);
7521 inst.operands[i].issingle = 1;
7522 }
7523 break;
7524
7525 case OP_VRSDVLST:
7526 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7527 REGLIST_VFP_D_VPR, &partial_match);
7528 if (val == FAIL && !partial_match)
7529 {
7530 inst.error = NULL;
7531 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7532 REGLIST_VFP_S_VPR, &partial_match);
7533 inst.operands[i].issingle = 1;
7534 }
7535 break;
7536
7537 case OP_NRDLST:
7538 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7539 REGLIST_NEON_D, &partial_match);
7540 break;
7541
7542 case OP_MSTRLST4:
7543 case OP_MSTRLST2:
7544 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7545 1, &inst.operands[i].vectype);
7546 if (val != (((op_parse_code == OP_MSTRLST2) ? 3 : 7) << 5 | 0xe))
7547 goto failure;
7548 break;
7549 case OP_NSTRLST:
7550 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7551 0, &inst.operands[i].vectype);
7552 break;
7553
7554 /* Addressing modes */
7555 case OP_ADDRMVE:
7556 po_misc_or_fail (parse_address_group_reloc (&str, i, GROUP_MVE));
7557 break;
7558
7559 case OP_ADDR:
7560 po_misc_or_fail (parse_address (&str, i));
7561 break;
7562
7563 case OP_ADDRGLDR:
7564 po_misc_or_fail_no_backtrack (
7565 parse_address_group_reloc (&str, i, GROUP_LDR));
7566 break;
7567
7568 case OP_ADDRGLDRS:
7569 po_misc_or_fail_no_backtrack (
7570 parse_address_group_reloc (&str, i, GROUP_LDRS));
7571 break;
7572
7573 case OP_ADDRGLDC:
7574 po_misc_or_fail_no_backtrack (
7575 parse_address_group_reloc (&str, i, GROUP_LDC));
7576 break;
7577
7578 case OP_SH:
7579 po_misc_or_fail (parse_shifter_operand (&str, i));
7580 break;
7581
7582 case OP_SHG:
7583 po_misc_or_fail_no_backtrack (
7584 parse_shifter_operand_group_reloc (&str, i));
7585 break;
7586
7587 case OP_oSHll:
7588 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7589 break;
7590
7591 case OP_oSHar:
7592 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7593 break;
7594
7595 case OP_oSHllar:
7596 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7597 break;
7598
7599 default:
7600 as_fatal (_("unhandled operand code %d"), op_parse_code);
7601 }
7602
7603 /* Various value-based sanity checks and shared operations. We
7604 do not signal immediate failures for the register constraints;
7605 this allows a syntax error to take precedence. */
7606 switch (op_parse_code)
7607 {
7608 case OP_oRRnpc:
7609 case OP_RRnpc:
7610 case OP_RRnpcb:
7611 case OP_RRw:
7612 case OP_oRRw:
7613 case OP_RRnpc_I0:
7614 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7615 inst.error = BAD_PC;
7616 break;
7617
7618 case OP_oRRnpcsp:
7619 case OP_RRnpcsp:
7620 if (inst.operands[i].isreg)
7621 {
7622 if (inst.operands[i].reg == REG_PC)
7623 inst.error = BAD_PC;
7624 else if (inst.operands[i].reg == REG_SP
7625 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7626 relaxed since ARMv8-A. */
7627 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
7628 {
7629 gas_assert (thumb);
7630 inst.error = BAD_SP;
7631 }
7632 }
7633 break;
7634
7635 case OP_RRnpctw:
7636 if (inst.operands[i].isreg
7637 && inst.operands[i].reg == REG_PC
7638 && (inst.operands[i].writeback || thumb))
7639 inst.error = BAD_PC;
7640 break;
7641
7642 case OP_VLDR:
7643 if (inst.operands[i].isreg)
7644 break;
7645 /* fall through. */
7646 case OP_CPSF:
7647 case OP_ENDI:
7648 case OP_oROR:
7649 case OP_wPSR:
7650 case OP_rPSR:
7651 case OP_COND:
7652 case OP_oBARRIER_I15:
7653 case OP_REGLST:
7654 case OP_CLRMLST:
7655 case OP_VRSLST:
7656 case OP_VRDLST:
7657 case OP_VRSDLST:
7658 case OP_VRSDVLST:
7659 case OP_NRDLST:
7660 case OP_NSTRLST:
7661 case OP_MSTRLST2:
7662 case OP_MSTRLST4:
7663 if (val == FAIL)
7664 goto failure;
7665 inst.operands[i].imm = val;
7666 break;
7667
7668 case OP_LR:
7669 case OP_oLR:
7670 if (inst.operands[i].reg != REG_LR)
7671 inst.error = _("operand must be LR register");
7672 break;
7673
7674 case OP_RRe:
7675 if (inst.operands[i].isreg
7676 && (inst.operands[i].reg & 0x00000001) != 0)
7677 inst.error = BAD_ODD;
7678 break;
7679
7680 case OP_RRo:
7681 if (inst.operands[i].isreg)
7682 {
7683 if ((inst.operands[i].reg & 0x00000001) != 1)
7684 inst.error = BAD_EVEN;
7685 else if (inst.operands[i].reg == REG_SP)
7686 as_tsktsk (MVE_BAD_SP);
7687 else if (inst.operands[i].reg == REG_PC)
7688 inst.error = BAD_PC;
7689 }
7690 break;
7691
7692 default:
7693 break;
7694 }
7695
7696 /* If we get here, this operand was successfully parsed. */
7697 inst.operands[i].present = 1;
7698 continue;
7699
7700 bad_args:
7701 inst.error = BAD_ARGS;
7702
7703 failure:
7704 if (!backtrack_pos)
7705 {
7706 /* The parse routine should already have set inst.error, but set a
7707 default here just in case. */
7708 if (!inst.error)
7709 inst.error = BAD_SYNTAX;
7710 return FAIL;
7711 }
7712
7713 /* Do not backtrack over a trailing optional argument that
7714 absorbed some text. We will only fail again, with the
7715 'garbage following instruction' error message, which is
7716 probably less helpful than the current one. */
7717 if (backtrack_index == i && backtrack_pos != str
7718 && upat[i+1] == OP_stop)
7719 {
7720 if (!inst.error)
7721 inst.error = BAD_SYNTAX;
7722 return FAIL;
7723 }
7724
7725 /* Try again, skipping the optional argument at backtrack_pos. */
7726 str = backtrack_pos;
7727 inst.error = backtrack_error;
7728 inst.operands[backtrack_index].present = 0;
7729 i = backtrack_index;
7730 backtrack_pos = 0;
7731 }
7732
7733 /* Check that we have parsed all the arguments. */
7734 if (*str != '\0' && !inst.error)
7735 inst.error = _("garbage following instruction");
7736
7737 return inst.error ? FAIL : SUCCESS;
7738 }
7739
7740 #undef po_char_or_fail
7741 #undef po_reg_or_fail
7742 #undef po_reg_or_goto
7743 #undef po_imm_or_fail
7744 #undef po_scalar_or_fail
7745 #undef po_barrier_or_imm
7746
7747 /* Shorthand macro for instruction encoding functions issuing errors. */
7748 #define constraint(expr, err) \
7749 do \
7750 { \
7751 if (expr) \
7752 { \
7753 inst.error = err; \
7754 return; \
7755 } \
7756 } \
7757 while (0)
7758
7759 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7760 instructions are unpredictable if these registers are used. This
7761 is the BadReg predicate in ARM's Thumb-2 documentation.
7762
7763 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7764 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
7765 #define reject_bad_reg(reg) \
7766 do \
7767 if (reg == REG_PC) \
7768 { \
7769 inst.error = BAD_PC; \
7770 return; \
7771 } \
7772 else if (reg == REG_SP \
7773 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
7774 { \
7775 inst.error = BAD_SP; \
7776 return; \
7777 } \
7778 while (0)
7779
7780 /* If REG is R13 (the stack pointer), warn that its use is
7781 deprecated. */
7782 #define warn_deprecated_sp(reg) \
7783 do \
7784 if (warn_on_deprecated && reg == REG_SP) \
7785 as_tsktsk (_("use of r13 is deprecated")); \
7786 while (0)
7787
7788 /* Functions for operand encoding. ARM, then Thumb. */
7789
7790 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7791
7792 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7793
7794 The only binary encoding difference is the Coprocessor number. Coprocessor
7795 9 is used for half-precision calculations or conversions. The format of the
7796 instruction is the same as the equivalent Coprocessor 10 instruction that
7797 exists for Single-Precision operation. */
7798
7799 static void
7800 do_scalar_fp16_v82_encode (void)
7801 {
7802 if (inst.cond < COND_ALWAYS)
7803 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7804 " the behaviour is UNPREDICTABLE"));
7805 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
7806 _(BAD_FP16));
7807
7808 inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
7809 mark_feature_used (&arm_ext_fp16);
7810 }
7811
7812 /* If VAL can be encoded in the immediate field of an ARM instruction,
7813 return the encoded form. Otherwise, return FAIL. */
7814
7815 static unsigned int
7816 encode_arm_immediate (unsigned int val)
7817 {
7818 unsigned int a, i;
7819
7820 if (val <= 0xff)
7821 return val;
7822
7823 for (i = 2; i < 32; i += 2)
7824 if ((a = rotate_left (val, i)) <= 0xff)
7825 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7826
7827 return FAIL;
7828 }
7829
7830 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7831 return the encoded form. Otherwise, return FAIL. */
7832 static unsigned int
7833 encode_thumb32_immediate (unsigned int val)
7834 {
7835 unsigned int a, i;
7836
7837 if (val <= 0xff)
7838 return val;
7839
7840 for (i = 1; i <= 24; i++)
7841 {
7842 a = val >> i;
7843 if ((val & ~(0xff << i)) == 0)
7844 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7845 }
7846
7847 a = val & 0xff;
7848 if (val == ((a << 16) | a))
7849 return 0x100 | a;
7850 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7851 return 0x300 | a;
7852
7853 a = val & 0xff00;
7854 if (val == ((a << 16) | a))
7855 return 0x200 | (a >> 8);
7856
7857 return FAIL;
7858 }
7859 /* Encode a VFP SP or DP register number into inst.instruction. */
7860
7861 static void
7862 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7863 {
7864 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7865 && reg > 15)
7866 {
7867 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7868 {
7869 if (thumb_mode)
7870 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7871 fpu_vfp_ext_d32);
7872 else
7873 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7874 fpu_vfp_ext_d32);
7875 }
7876 else
7877 {
7878 first_error (_("D register out of range for selected VFP version"));
7879 return;
7880 }
7881 }
7882
7883 switch (pos)
7884 {
7885 case VFP_REG_Sd:
7886 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7887 break;
7888
7889 case VFP_REG_Sn:
7890 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7891 break;
7892
7893 case VFP_REG_Sm:
7894 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7895 break;
7896
7897 case VFP_REG_Dd:
7898 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7899 break;
7900
7901 case VFP_REG_Dn:
7902 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7903 break;
7904
7905 case VFP_REG_Dm:
7906 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7907 break;
7908
7909 default:
7910 abort ();
7911 }
7912 }
7913
7914 /* Encode a <shift> in an ARM-format instruction. The immediate,
7915 if any, is handled by md_apply_fix. */
7916 static void
7917 encode_arm_shift (int i)
7918 {
7919 /* register-shifted register. */
7920 if (inst.operands[i].immisreg)
7921 {
7922 int op_index;
7923 for (op_index = 0; op_index <= i; ++op_index)
7924 {
7925 /* Check the operand only when it's presented. In pre-UAL syntax,
7926 if the destination register is the same as the first operand, two
7927 register form of the instruction can be used. */
7928 if (inst.operands[op_index].present && inst.operands[op_index].isreg
7929 && inst.operands[op_index].reg == REG_PC)
7930 as_warn (UNPRED_REG ("r15"));
7931 }
7932
7933 if (inst.operands[i].imm == REG_PC)
7934 as_warn (UNPRED_REG ("r15"));
7935 }
7936
7937 if (inst.operands[i].shift_kind == SHIFT_RRX)
7938 inst.instruction |= SHIFT_ROR << 5;
7939 else
7940 {
7941 inst.instruction |= inst.operands[i].shift_kind << 5;
7942 if (inst.operands[i].immisreg)
7943 {
7944 inst.instruction |= SHIFT_BY_REG;
7945 inst.instruction |= inst.operands[i].imm << 8;
7946 }
7947 else
7948 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
7949 }
7950 }
7951
7952 static void
7953 encode_arm_shifter_operand (int i)
7954 {
7955 if (inst.operands[i].isreg)
7956 {
7957 inst.instruction |= inst.operands[i].reg;
7958 encode_arm_shift (i);
7959 }
7960 else
7961 {
7962 inst.instruction |= INST_IMMEDIATE;
7963 if (inst.relocs[0].type != BFD_RELOC_ARM_IMMEDIATE)
7964 inst.instruction |= inst.operands[i].imm;
7965 }
7966 }
7967
7968 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7969 static void
7970 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7971 {
7972 /* PR 14260:
7973 Generate an error if the operand is not a register. */
7974 constraint (!inst.operands[i].isreg,
7975 _("Instruction does not support =N addresses"));
7976
7977 inst.instruction |= inst.operands[i].reg << 16;
7978
7979 if (inst.operands[i].preind)
7980 {
7981 if (is_t)
7982 {
7983 inst.error = _("instruction does not accept preindexed addressing");
7984 return;
7985 }
7986 inst.instruction |= PRE_INDEX;
7987 if (inst.operands[i].writeback)
7988 inst.instruction |= WRITE_BACK;
7989
7990 }
7991 else if (inst.operands[i].postind)
7992 {
7993 gas_assert (inst.operands[i].writeback);
7994 if (is_t)
7995 inst.instruction |= WRITE_BACK;
7996 }
7997 else /* unindexed - only for coprocessor */
7998 {
7999 inst.error = _("instruction does not accept unindexed addressing");
8000 return;
8001 }
8002
8003 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
8004 && (((inst.instruction & 0x000f0000) >> 16)
8005 == ((inst.instruction & 0x0000f000) >> 12)))
8006 as_warn ((inst.instruction & LOAD_BIT)
8007 ? _("destination register same as write-back base")
8008 : _("source register same as write-back base"));
8009 }
8010
8011 /* inst.operands[i] was set up by parse_address. Encode it into an
8012 ARM-format mode 2 load or store instruction. If is_t is true,
8013 reject forms that cannot be used with a T instruction (i.e. not
8014 post-indexed). */
8015 static void
8016 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
8017 {
8018 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
8019
8020 encode_arm_addr_mode_common (i, is_t);
8021
8022 if (inst.operands[i].immisreg)
8023 {
8024 constraint ((inst.operands[i].imm == REG_PC
8025 || (is_pc && inst.operands[i].writeback)),
8026 BAD_PC_ADDRESSING);
8027 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
8028 inst.instruction |= inst.operands[i].imm;
8029 if (!inst.operands[i].negative)
8030 inst.instruction |= INDEX_UP;
8031 if (inst.operands[i].shifted)
8032 {
8033 if (inst.operands[i].shift_kind == SHIFT_RRX)
8034 inst.instruction |= SHIFT_ROR << 5;
8035 else
8036 {
8037 inst.instruction |= inst.operands[i].shift_kind << 5;
8038 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
8039 }
8040 }
8041 }
8042 else /* immediate offset in inst.relocs[0] */
8043 {
8044 if (is_pc && !inst.relocs[0].pc_rel)
8045 {
8046 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
8047
8048 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
8049 cannot use PC in addressing.
8050 PC cannot be used in writeback addressing, either. */
8051 constraint ((is_t || inst.operands[i].writeback),
8052 BAD_PC_ADDRESSING);
8053
8054 /* Use of PC in str is deprecated for ARMv7. */
8055 if (warn_on_deprecated
8056 && !is_load
8057 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
8058 as_tsktsk (_("use of PC in this instruction is deprecated"));
8059 }
8060
8061 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
8062 {
8063 /* Prefer + for zero encoded value. */
8064 if (!inst.operands[i].negative)
8065 inst.instruction |= INDEX_UP;
8066 inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM;
8067 }
8068 }
8069 }
8070
8071 /* inst.operands[i] was set up by parse_address. Encode it into an
8072 ARM-format mode 3 load or store instruction. Reject forms that
8073 cannot be used with such instructions. If is_t is true, reject
8074 forms that cannot be used with a T instruction (i.e. not
8075 post-indexed). */
8076 static void
8077 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
8078 {
8079 if (inst.operands[i].immisreg && inst.operands[i].shifted)
8080 {
8081 inst.error = _("instruction does not accept scaled register index");
8082 return;
8083 }
8084
8085 encode_arm_addr_mode_common (i, is_t);
8086
8087 if (inst.operands[i].immisreg)
8088 {
8089 constraint ((inst.operands[i].imm == REG_PC
8090 || (is_t && inst.operands[i].reg == REG_PC)),
8091 BAD_PC_ADDRESSING);
8092 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
8093 BAD_PC_WRITEBACK);
8094 inst.instruction |= inst.operands[i].imm;
8095 if (!inst.operands[i].negative)
8096 inst.instruction |= INDEX_UP;
8097 }
8098 else /* immediate offset in inst.relocs[0] */
8099 {
8100 constraint ((inst.operands[i].reg == REG_PC && !inst.relocs[0].pc_rel
8101 && inst.operands[i].writeback),
8102 BAD_PC_WRITEBACK);
8103 inst.instruction |= HWOFFSET_IMM;
8104 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
8105 {
8106 /* Prefer + for zero encoded value. */
8107 if (!inst.operands[i].negative)
8108 inst.instruction |= INDEX_UP;
8109
8110 inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM8;
8111 }
8112 }
8113 }
8114
8115 /* Write immediate bits [7:0] to the following locations:
8116
8117 |28/24|23 19|18 16|15 4|3 0|
8118 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
8119
8120 This function is used by VMOV/VMVN/VORR/VBIC. */
8121
8122 static void
8123 neon_write_immbits (unsigned immbits)
8124 {
8125 inst.instruction |= immbits & 0xf;
8126 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
8127 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
8128 }
8129
8130 /* Invert low-order SIZE bits of XHI:XLO. */
8131
8132 static void
8133 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
8134 {
8135 unsigned immlo = xlo ? *xlo : 0;
8136 unsigned immhi = xhi ? *xhi : 0;
8137
8138 switch (size)
8139 {
8140 case 8:
8141 immlo = (~immlo) & 0xff;
8142 break;
8143
8144 case 16:
8145 immlo = (~immlo) & 0xffff;
8146 break;
8147
8148 case 64:
8149 immhi = (~immhi) & 0xffffffff;
8150 /* fall through. */
8151
8152 case 32:
8153 immlo = (~immlo) & 0xffffffff;
8154 break;
8155
8156 default:
8157 abort ();
8158 }
8159
8160 if (xlo)
8161 *xlo = immlo;
8162
8163 if (xhi)
8164 *xhi = immhi;
8165 }
8166
8167 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
8168 A, B, C, D. */
8169
8170 static int
8171 neon_bits_same_in_bytes (unsigned imm)
8172 {
8173 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
8174 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
8175 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
8176 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
8177 }
8178
8179 /* For immediate of above form, return 0bABCD. */
8180
8181 static unsigned
8182 neon_squash_bits (unsigned imm)
8183 {
8184 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
8185 | ((imm & 0x01000000) >> 21);
8186 }
8187
8188 /* Compress quarter-float representation to 0b...000 abcdefgh. */
8189
8190 static unsigned
8191 neon_qfloat_bits (unsigned imm)
8192 {
8193 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
8194 }
8195
8196 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
8197 the instruction. *OP is passed as the initial value of the op field, and
8198 may be set to a different value depending on the constant (i.e.
8199 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
8200 MVN). If the immediate looks like a repeated pattern then also
8201 try smaller element sizes. */
8202
8203 static int
8204 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
8205 unsigned *immbits, int *op, int size,
8206 enum neon_el_type type)
8207 {
8208 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
8209 float. */
8210 if (type == NT_float && !float_p)
8211 return FAIL;
8212
8213 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
8214 {
8215 if (size != 32 || *op == 1)
8216 return FAIL;
8217 *immbits = neon_qfloat_bits (immlo);
8218 return 0xf;
8219 }
8220
8221 if (size == 64)
8222 {
8223 if (neon_bits_same_in_bytes (immhi)
8224 && neon_bits_same_in_bytes (immlo))
8225 {
8226 if (*op == 1)
8227 return FAIL;
8228 *immbits = (neon_squash_bits (immhi) << 4)
8229 | neon_squash_bits (immlo);
8230 *op = 1;
8231 return 0xe;
8232 }
8233
8234 if (immhi != immlo)
8235 return FAIL;
8236 }
8237
8238 if (size >= 32)
8239 {
8240 if (immlo == (immlo & 0x000000ff))
8241 {
8242 *immbits = immlo;
8243 return 0x0;
8244 }
8245 else if (immlo == (immlo & 0x0000ff00))
8246 {
8247 *immbits = immlo >> 8;
8248 return 0x2;
8249 }
8250 else if (immlo == (immlo & 0x00ff0000))
8251 {
8252 *immbits = immlo >> 16;
8253 return 0x4;
8254 }
8255 else if (immlo == (immlo & 0xff000000))
8256 {
8257 *immbits = immlo >> 24;
8258 return 0x6;
8259 }
8260 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
8261 {
8262 *immbits = (immlo >> 8) & 0xff;
8263 return 0xc;
8264 }
8265 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
8266 {
8267 *immbits = (immlo >> 16) & 0xff;
8268 return 0xd;
8269 }
8270
8271 if ((immlo & 0xffff) != (immlo >> 16))
8272 return FAIL;
8273 immlo &= 0xffff;
8274 }
8275
8276 if (size >= 16)
8277 {
8278 if (immlo == (immlo & 0x000000ff))
8279 {
8280 *immbits = immlo;
8281 return 0x8;
8282 }
8283 else if (immlo == (immlo & 0x0000ff00))
8284 {
8285 *immbits = immlo >> 8;
8286 return 0xa;
8287 }
8288
8289 if ((immlo & 0xff) != (immlo >> 8))
8290 return FAIL;
8291 immlo &= 0xff;
8292 }
8293
8294 if (immlo == (immlo & 0x000000ff))
8295 {
8296 /* Don't allow MVN with 8-bit immediate. */
8297 if (*op == 1)
8298 return FAIL;
8299 *immbits = immlo;
8300 return 0xe;
8301 }
8302
8303 return FAIL;
8304 }
8305
8306 #if defined BFD_HOST_64_BIT
8307 /* Returns TRUE if double precision value V may be cast
8308 to single precision without loss of accuracy. */
8309
8310 static bfd_boolean
8311 is_double_a_single (bfd_int64_t v)
8312 {
8313 int exp = (int)((v >> 52) & 0x7FF);
8314 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
8315
8316 return (exp == 0 || exp == 0x7FF
8317 || (exp >= 1023 - 126 && exp <= 1023 + 127))
8318 && (mantissa & 0x1FFFFFFFl) == 0;
8319 }
8320
8321 /* Returns a double precision value casted to single precision
8322 (ignoring the least significant bits in exponent and mantissa). */
8323
8324 static int
8325 double_to_single (bfd_int64_t v)
8326 {
8327 int sign = (int) ((v >> 63) & 1l);
8328 int exp = (int) ((v >> 52) & 0x7FF);
8329 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
8330
8331 if (exp == 0x7FF)
8332 exp = 0xFF;
8333 else
8334 {
8335 exp = exp - 1023 + 127;
8336 if (exp >= 0xFF)
8337 {
8338 /* Infinity. */
8339 exp = 0x7F;
8340 mantissa = 0;
8341 }
8342 else if (exp < 0)
8343 {
8344 /* No denormalized numbers. */
8345 exp = 0;
8346 mantissa = 0;
8347 }
8348 }
8349 mantissa >>= 29;
8350 return (sign << 31) | (exp << 23) | mantissa;
8351 }
8352 #endif /* BFD_HOST_64_BIT */
8353
8354 enum lit_type
8355 {
8356 CONST_THUMB,
8357 CONST_ARM,
8358 CONST_VEC
8359 };
8360
8361 static void do_vfp_nsyn_opcode (const char *);
8362
8363 /* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
8364 Determine whether it can be performed with a move instruction; if
8365 it can, convert inst.instruction to that move instruction and
8366 return TRUE; if it can't, convert inst.instruction to a literal-pool
8367 load and return FALSE. If this is not a valid thing to do in the
8368 current context, set inst.error and return TRUE.
8369
8370 inst.operands[i] describes the destination register. */
8371
8372 static bfd_boolean
8373 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
8374 {
8375 unsigned long tbit;
8376 bfd_boolean thumb_p = (t == CONST_THUMB);
8377 bfd_boolean arm_p = (t == CONST_ARM);
8378
8379 if (thumb_p)
8380 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
8381 else
8382 tbit = LOAD_BIT;
8383
8384 if ((inst.instruction & tbit) == 0)
8385 {
8386 inst.error = _("invalid pseudo operation");
8387 return TRUE;
8388 }
8389
8390 if (inst.relocs[0].exp.X_op != O_constant
8391 && inst.relocs[0].exp.X_op != O_symbol
8392 && inst.relocs[0].exp.X_op != O_big)
8393 {
8394 inst.error = _("constant expression expected");
8395 return TRUE;
8396 }
8397
8398 if (inst.relocs[0].exp.X_op == O_constant
8399 || inst.relocs[0].exp.X_op == O_big)
8400 {
8401 #if defined BFD_HOST_64_BIT
8402 bfd_int64_t v;
8403 #else
8404 offsetT v;
8405 #endif
8406 if (inst.relocs[0].exp.X_op == O_big)
8407 {
8408 LITTLENUM_TYPE w[X_PRECISION];
8409 LITTLENUM_TYPE * l;
8410
8411 if (inst.relocs[0].exp.X_add_number == -1)
8412 {
8413 gen_to_words (w, X_PRECISION, E_PRECISION);
8414 l = w;
8415 /* FIXME: Should we check words w[2..5] ? */
8416 }
8417 else
8418 l = generic_bignum;
8419
8420 #if defined BFD_HOST_64_BIT
8421 v =
8422 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
8423 << LITTLENUM_NUMBER_OF_BITS)
8424 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
8425 << LITTLENUM_NUMBER_OF_BITS)
8426 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
8427 << LITTLENUM_NUMBER_OF_BITS)
8428 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
8429 #else
8430 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
8431 | (l[0] & LITTLENUM_MASK);
8432 #endif
8433 }
8434 else
8435 v = inst.relocs[0].exp.X_add_number;
8436
8437 if (!inst.operands[i].issingle)
8438 {
8439 if (thumb_p)
8440 {
8441 /* LDR should not use lead in a flag-setting instruction being
8442 chosen so we do not check whether movs can be used. */
8443
8444 if ((ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
8445 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8446 && inst.operands[i].reg != 13
8447 && inst.operands[i].reg != 15)
8448 {
8449 /* Check if on thumb2 it can be done with a mov.w, mvn or
8450 movw instruction. */
8451 unsigned int newimm;
8452 bfd_boolean isNegated;
8453
8454 newimm = encode_thumb32_immediate (v);
8455 if (newimm != (unsigned int) FAIL)
8456 isNegated = FALSE;
8457 else
8458 {
8459 newimm = encode_thumb32_immediate (~v);
8460 if (newimm != (unsigned int) FAIL)
8461 isNegated = TRUE;
8462 }
8463
8464 /* The number can be loaded with a mov.w or mvn
8465 instruction. */
8466 if (newimm != (unsigned int) FAIL
8467 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
8468 {
8469 inst.instruction = (0xf04f0000 /* MOV.W. */
8470 | (inst.operands[i].reg << 8));
8471 /* Change to MOVN. */
8472 inst.instruction |= (isNegated ? 0x200000 : 0);
8473 inst.instruction |= (newimm & 0x800) << 15;
8474 inst.instruction |= (newimm & 0x700) << 4;
8475 inst.instruction |= (newimm & 0x0ff);
8476 return TRUE;
8477 }
8478 /* The number can be loaded with a movw instruction. */
8479 else if ((v & ~0xFFFF) == 0
8480 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8481 {
8482 int imm = v & 0xFFFF;
8483
8484 inst.instruction = 0xf2400000; /* MOVW. */
8485 inst.instruction |= (inst.operands[i].reg << 8);
8486 inst.instruction |= (imm & 0xf000) << 4;
8487 inst.instruction |= (imm & 0x0800) << 15;
8488 inst.instruction |= (imm & 0x0700) << 4;
8489 inst.instruction |= (imm & 0x00ff);
8490 return TRUE;
8491 }
8492 }
8493 }
8494 else if (arm_p)
8495 {
8496 int value = encode_arm_immediate (v);
8497
8498 if (value != FAIL)
8499 {
8500 /* This can be done with a mov instruction. */
8501 inst.instruction &= LITERAL_MASK;
8502 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
8503 inst.instruction |= value & 0xfff;
8504 return TRUE;
8505 }
8506
8507 value = encode_arm_immediate (~ v);
8508 if (value != FAIL)
8509 {
8510 /* This can be done with a mvn instruction. */
8511 inst.instruction &= LITERAL_MASK;
8512 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8513 inst.instruction |= value & 0xfff;
8514 return TRUE;
8515 }
8516 }
8517 else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8518 {
8519 int op = 0;
8520 unsigned immbits = 0;
8521 unsigned immlo = inst.operands[1].imm;
8522 unsigned immhi = inst.operands[1].regisimm
8523 ? inst.operands[1].reg
8524 : inst.relocs[0].exp.X_unsigned
8525 ? 0
8526 : ((bfd_int64_t)((int) immlo)) >> 32;
8527 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8528 &op, 64, NT_invtype);
8529
8530 if (cmode == FAIL)
8531 {
8532 neon_invert_size (&immlo, &immhi, 64);
8533 op = !op;
8534 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8535 &op, 64, NT_invtype);
8536 }
8537
8538 if (cmode != FAIL)
8539 {
8540 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8541 | (1 << 23)
8542 | (cmode << 8)
8543 | (op << 5)
8544 | (1 << 4);
8545
8546 /* Fill other bits in vmov encoding for both thumb and arm. */
8547 if (thumb_mode)
8548 inst.instruction |= (0x7U << 29) | (0xF << 24);
8549 else
8550 inst.instruction |= (0xFU << 28) | (0x1 << 25);
8551 neon_write_immbits (immbits);
8552 return TRUE;
8553 }
8554 }
8555 }
8556
8557 if (t == CONST_VEC)
8558 {
8559 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8560 if (inst.operands[i].issingle
8561 && is_quarter_float (inst.operands[1].imm)
8562 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8563 {
8564 inst.operands[1].imm =
8565 neon_qfloat_bits (v);
8566 do_vfp_nsyn_opcode ("fconsts");
8567 return TRUE;
8568 }
8569
8570 /* If our host does not support a 64-bit type then we cannot perform
8571 the following optimization. This mean that there will be a
8572 discrepancy between the output produced by an assembler built for
8573 a 32-bit-only host and the output produced from a 64-bit host, but
8574 this cannot be helped. */
8575 #if defined BFD_HOST_64_BIT
8576 else if (!inst.operands[1].issingle
8577 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8578 {
8579 if (is_double_a_single (v)
8580 && is_quarter_float (double_to_single (v)))
8581 {
8582 inst.operands[1].imm =
8583 neon_qfloat_bits (double_to_single (v));
8584 do_vfp_nsyn_opcode ("fconstd");
8585 return TRUE;
8586 }
8587 }
8588 #endif
8589 }
8590 }
8591
8592 if (add_to_lit_pool ((!inst.operands[i].isvec
8593 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
8594 return TRUE;
8595
8596 inst.operands[1].reg = REG_PC;
8597 inst.operands[1].isreg = 1;
8598 inst.operands[1].preind = 1;
8599 inst.relocs[0].pc_rel = 1;
8600 inst.relocs[0].type = (thumb_p
8601 ? BFD_RELOC_ARM_THUMB_OFFSET
8602 : (mode_3
8603 ? BFD_RELOC_ARM_HWLITERAL
8604 : BFD_RELOC_ARM_LITERAL));
8605 return FALSE;
8606 }
8607
8608 /* inst.operands[i] was set up by parse_address. Encode it into an
8609 ARM-format instruction. Reject all forms which cannot be encoded
8610 into a coprocessor load/store instruction. If wb_ok is false,
8611 reject use of writeback; if unind_ok is false, reject use of
8612 unindexed addressing. If reloc_override is not 0, use it instead
8613 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8614 (in which case it is preserved). */
8615
8616 static int
8617 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8618 {
8619 if (!inst.operands[i].isreg)
8620 {
8621 /* PR 18256 */
8622 if (! inst.operands[0].isvec)
8623 {
8624 inst.error = _("invalid co-processor operand");
8625 return FAIL;
8626 }
8627 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8628 return SUCCESS;
8629 }
8630
8631 inst.instruction |= inst.operands[i].reg << 16;
8632
8633 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8634
8635 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8636 {
8637 gas_assert (!inst.operands[i].writeback);
8638 if (!unind_ok)
8639 {
8640 inst.error = _("instruction does not support unindexed addressing");
8641 return FAIL;
8642 }
8643 inst.instruction |= inst.operands[i].imm;
8644 inst.instruction |= INDEX_UP;
8645 return SUCCESS;
8646 }
8647
8648 if (inst.operands[i].preind)
8649 inst.instruction |= PRE_INDEX;
8650
8651 if (inst.operands[i].writeback)
8652 {
8653 if (inst.operands[i].reg == REG_PC)
8654 {
8655 inst.error = _("pc may not be used with write-back");
8656 return FAIL;
8657 }
8658 if (!wb_ok)
8659 {
8660 inst.error = _("instruction does not support writeback");
8661 return FAIL;
8662 }
8663 inst.instruction |= WRITE_BACK;
8664 }
8665
8666 if (reloc_override)
8667 inst.relocs[0].type = (bfd_reloc_code_real_type) reloc_override;
8668 else if ((inst.relocs[0].type < BFD_RELOC_ARM_ALU_PC_G0_NC
8669 || inst.relocs[0].type > BFD_RELOC_ARM_LDC_SB_G2)
8670 && inst.relocs[0].type != BFD_RELOC_ARM_LDR_PC_G0)
8671 {
8672 if (thumb_mode)
8673 inst.relocs[0].type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8674 else
8675 inst.relocs[0].type = BFD_RELOC_ARM_CP_OFF_IMM;
8676 }
8677
8678 /* Prefer + for zero encoded value. */
8679 if (!inst.operands[i].negative)
8680 inst.instruction |= INDEX_UP;
8681
8682 return SUCCESS;
8683 }
8684
8685 /* Functions for instruction encoding, sorted by sub-architecture.
8686 First some generics; their names are taken from the conventional
8687 bit positions for register arguments in ARM format instructions. */
8688
8689 static void
8690 do_noargs (void)
8691 {
8692 }
8693
8694 static void
8695 do_rd (void)
8696 {
8697 inst.instruction |= inst.operands[0].reg << 12;
8698 }
8699
8700 static void
8701 do_rn (void)
8702 {
8703 inst.instruction |= inst.operands[0].reg << 16;
8704 }
8705
8706 static void
8707 do_rd_rm (void)
8708 {
8709 inst.instruction |= inst.operands[0].reg << 12;
8710 inst.instruction |= inst.operands[1].reg;
8711 }
8712
8713 static void
8714 do_rm_rn (void)
8715 {
8716 inst.instruction |= inst.operands[0].reg;
8717 inst.instruction |= inst.operands[1].reg << 16;
8718 }
8719
8720 static void
8721 do_rd_rn (void)
8722 {
8723 inst.instruction |= inst.operands[0].reg << 12;
8724 inst.instruction |= inst.operands[1].reg << 16;
8725 }
8726
8727 static void
8728 do_rn_rd (void)
8729 {
8730 inst.instruction |= inst.operands[0].reg << 16;
8731 inst.instruction |= inst.operands[1].reg << 12;
8732 }
8733
8734 static void
8735 do_tt (void)
8736 {
8737 inst.instruction |= inst.operands[0].reg << 8;
8738 inst.instruction |= inst.operands[1].reg << 16;
8739 }
8740
8741 static bfd_boolean
8742 check_obsolete (const arm_feature_set *feature, const char *msg)
8743 {
8744 if (ARM_CPU_IS_ANY (cpu_variant))
8745 {
8746 as_tsktsk ("%s", msg);
8747 return TRUE;
8748 }
8749 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8750 {
8751 as_bad ("%s", msg);
8752 return TRUE;
8753 }
8754
8755 return FALSE;
8756 }
8757
8758 static void
8759 do_rd_rm_rn (void)
8760 {
8761 unsigned Rn = inst.operands[2].reg;
8762 /* Enforce restrictions on SWP instruction. */
8763 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8764 {
8765 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8766 _("Rn must not overlap other operands"));
8767
8768 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8769 */
8770 if (!check_obsolete (&arm_ext_v8,
8771 _("swp{b} use is obsoleted for ARMv8 and later"))
8772 && warn_on_deprecated
8773 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8774 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8775 }
8776
8777 inst.instruction |= inst.operands[0].reg << 12;
8778 inst.instruction |= inst.operands[1].reg;
8779 inst.instruction |= Rn << 16;
8780 }
8781
8782 static void
8783 do_rd_rn_rm (void)
8784 {
8785 inst.instruction |= inst.operands[0].reg << 12;
8786 inst.instruction |= inst.operands[1].reg << 16;
8787 inst.instruction |= inst.operands[2].reg;
8788 }
8789
8790 static void
8791 do_rm_rd_rn (void)
8792 {
8793 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8794 constraint (((inst.relocs[0].exp.X_op != O_constant
8795 && inst.relocs[0].exp.X_op != O_illegal)
8796 || inst.relocs[0].exp.X_add_number != 0),
8797 BAD_ADDR_MODE);
8798 inst.instruction |= inst.operands[0].reg;
8799 inst.instruction |= inst.operands[1].reg << 12;
8800 inst.instruction |= inst.operands[2].reg << 16;
8801 }
8802
8803 static void
8804 do_imm0 (void)
8805 {
8806 inst.instruction |= inst.operands[0].imm;
8807 }
8808
8809 static void
8810 do_rd_cpaddr (void)
8811 {
8812 inst.instruction |= inst.operands[0].reg << 12;
8813 encode_arm_cp_address (1, TRUE, TRUE, 0);
8814 }
8815
8816 /* ARM instructions, in alphabetical order by function name (except
8817 that wrapper functions appear immediately after the function they
8818 wrap). */
8819
8820 /* This is a pseudo-op of the form "adr rd, label" to be converted
8821 into a relative address of the form "add rd, pc, #label-.-8". */
8822
8823 static void
8824 do_adr (void)
8825 {
8826 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8827
8828 /* Frag hacking will turn this into a sub instruction if the offset turns
8829 out to be negative. */
8830 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
8831 inst.relocs[0].pc_rel = 1;
8832 inst.relocs[0].exp.X_add_number -= 8;
8833
8834 if (support_interwork
8835 && inst.relocs[0].exp.X_op == O_symbol
8836 && inst.relocs[0].exp.X_add_symbol != NULL
8837 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
8838 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
8839 inst.relocs[0].exp.X_add_number |= 1;
8840 }
8841
8842 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8843 into a relative address of the form:
8844 add rd, pc, #low(label-.-8)"
8845 add rd, rd, #high(label-.-8)" */
8846
8847 static void
8848 do_adrl (void)
8849 {
8850 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8851
8852 /* Frag hacking will turn this into a sub instruction if the offset turns
8853 out to be negative. */
8854 inst.relocs[0].type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8855 inst.relocs[0].pc_rel = 1;
8856 inst.size = INSN_SIZE * 2;
8857 inst.relocs[0].exp.X_add_number -= 8;
8858
8859 if (support_interwork
8860 && inst.relocs[0].exp.X_op == O_symbol
8861 && inst.relocs[0].exp.X_add_symbol != NULL
8862 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
8863 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
8864 inst.relocs[0].exp.X_add_number |= 1;
8865 }
8866
8867 static void
8868 do_arit (void)
8869 {
8870 constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8871 && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
8872 THUMB1_RELOC_ONLY);
8873 if (!inst.operands[1].present)
8874 inst.operands[1].reg = inst.operands[0].reg;
8875 inst.instruction |= inst.operands[0].reg << 12;
8876 inst.instruction |= inst.operands[1].reg << 16;
8877 encode_arm_shifter_operand (2);
8878 }
8879
8880 static void
8881 do_barrier (void)
8882 {
8883 if (inst.operands[0].present)
8884 inst.instruction |= inst.operands[0].imm;
8885 else
8886 inst.instruction |= 0xf;
8887 }
8888
8889 static void
8890 do_bfc (void)
8891 {
8892 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8893 constraint (msb > 32, _("bit-field extends past end of register"));
8894 /* The instruction encoding stores the LSB and MSB,
8895 not the LSB and width. */
8896 inst.instruction |= inst.operands[0].reg << 12;
8897 inst.instruction |= inst.operands[1].imm << 7;
8898 inst.instruction |= (msb - 1) << 16;
8899 }
8900
8901 static void
8902 do_bfi (void)
8903 {
8904 unsigned int msb;
8905
8906 /* #0 in second position is alternative syntax for bfc, which is
8907 the same instruction but with REG_PC in the Rm field. */
8908 if (!inst.operands[1].isreg)
8909 inst.operands[1].reg = REG_PC;
8910
8911 msb = inst.operands[2].imm + inst.operands[3].imm;
8912 constraint (msb > 32, _("bit-field extends past end of register"));
8913 /* The instruction encoding stores the LSB and MSB,
8914 not the LSB and width. */
8915 inst.instruction |= inst.operands[0].reg << 12;
8916 inst.instruction |= inst.operands[1].reg;
8917 inst.instruction |= inst.operands[2].imm << 7;
8918 inst.instruction |= (msb - 1) << 16;
8919 }
8920
8921 static void
8922 do_bfx (void)
8923 {
8924 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8925 _("bit-field extends past end of register"));
8926 inst.instruction |= inst.operands[0].reg << 12;
8927 inst.instruction |= inst.operands[1].reg;
8928 inst.instruction |= inst.operands[2].imm << 7;
8929 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8930 }
8931
8932 /* ARM V5 breakpoint instruction (argument parse)
8933 BKPT <16 bit unsigned immediate>
8934 Instruction is not conditional.
8935 The bit pattern given in insns[] has the COND_ALWAYS condition,
8936 and it is an error if the caller tried to override that. */
8937
8938 static void
8939 do_bkpt (void)
8940 {
8941 /* Top 12 of 16 bits to bits 19:8. */
8942 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8943
8944 /* Bottom 4 of 16 bits to bits 3:0. */
8945 inst.instruction |= inst.operands[0].imm & 0xf;
8946 }
8947
8948 static void
8949 encode_branch (int default_reloc)
8950 {
8951 if (inst.operands[0].hasreloc)
8952 {
8953 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8954 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8955 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8956 inst.relocs[0].type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8957 ? BFD_RELOC_ARM_PLT32
8958 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8959 }
8960 else
8961 inst.relocs[0].type = (bfd_reloc_code_real_type) default_reloc;
8962 inst.relocs[0].pc_rel = 1;
8963 }
8964
8965 static void
8966 do_branch (void)
8967 {
8968 #ifdef OBJ_ELF
8969 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8970 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8971 else
8972 #endif
8973 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8974 }
8975
8976 static void
8977 do_bl (void)
8978 {
8979 #ifdef OBJ_ELF
8980 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8981 {
8982 if (inst.cond == COND_ALWAYS)
8983 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8984 else
8985 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8986 }
8987 else
8988 #endif
8989 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8990 }
8991
8992 /* ARM V5 branch-link-exchange instruction (argument parse)
8993 BLX <target_addr> ie BLX(1)
8994 BLX{<condition>} <Rm> ie BLX(2)
8995 Unfortunately, there are two different opcodes for this mnemonic.
8996 So, the insns[].value is not used, and the code here zaps values
8997 into inst.instruction.
8998 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8999
9000 static void
9001 do_blx (void)
9002 {
9003 if (inst.operands[0].isreg)
9004 {
9005 /* Arg is a register; the opcode provided by insns[] is correct.
9006 It is not illegal to do "blx pc", just useless. */
9007 if (inst.operands[0].reg == REG_PC)
9008 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
9009
9010 inst.instruction |= inst.operands[0].reg;
9011 }
9012 else
9013 {
9014 /* Arg is an address; this instruction cannot be executed
9015 conditionally, and the opcode must be adjusted.
9016 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
9017 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
9018 constraint (inst.cond != COND_ALWAYS, BAD_COND);
9019 inst.instruction = 0xfa000000;
9020 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
9021 }
9022 }
9023
9024 static void
9025 do_bx (void)
9026 {
9027 bfd_boolean want_reloc;
9028
9029 if (inst.operands[0].reg == REG_PC)
9030 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
9031
9032 inst.instruction |= inst.operands[0].reg;
9033 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
9034 it is for ARMv4t or earlier. */
9035 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
9036 if (!ARM_FEATURE_ZERO (selected_object_arch)
9037 && !ARM_CPU_HAS_FEATURE (selected_object_arch, arm_ext_v5))
9038 want_reloc = TRUE;
9039
9040 #ifdef OBJ_ELF
9041 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
9042 #endif
9043 want_reloc = FALSE;
9044
9045 if (want_reloc)
9046 inst.relocs[0].type = BFD_RELOC_ARM_V4BX;
9047 }
9048
9049
9050 /* ARM v5TEJ. Jump to Jazelle code. */
9051
9052 static void
9053 do_bxj (void)
9054 {
9055 if (inst.operands[0].reg == REG_PC)
9056 as_tsktsk (_("use of r15 in bxj is not really useful"));
9057
9058 inst.instruction |= inst.operands[0].reg;
9059 }
9060
9061 /* Co-processor data operation:
9062 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
9063 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
9064 static void
9065 do_cdp (void)
9066 {
9067 inst.instruction |= inst.operands[0].reg << 8;
9068 inst.instruction |= inst.operands[1].imm << 20;
9069 inst.instruction |= inst.operands[2].reg << 12;
9070 inst.instruction |= inst.operands[3].reg << 16;
9071 inst.instruction |= inst.operands[4].reg;
9072 inst.instruction |= inst.operands[5].imm << 5;
9073 }
9074
9075 static void
9076 do_cmp (void)
9077 {
9078 inst.instruction |= inst.operands[0].reg << 16;
9079 encode_arm_shifter_operand (1);
9080 }
9081
9082 /* Transfer between coprocessor and ARM registers.
9083 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
9084 MRC2
9085 MCR{cond}
9086 MCR2
9087
9088 No special properties. */
9089
9090 struct deprecated_coproc_regs_s
9091 {
9092 unsigned cp;
9093 int opc1;
9094 unsigned crn;
9095 unsigned crm;
9096 int opc2;
9097 arm_feature_set deprecated;
9098 arm_feature_set obsoleted;
9099 const char *dep_msg;
9100 const char *obs_msg;
9101 };
9102
9103 #define DEPR_ACCESS_V8 \
9104 N_("This coprocessor register access is deprecated in ARMv8")
9105
9106 /* Table of all deprecated coprocessor registers. */
9107 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
9108 {
9109 {15, 0, 7, 10, 5, /* CP15DMB. */
9110 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9111 DEPR_ACCESS_V8, NULL},
9112 {15, 0, 7, 10, 4, /* CP15DSB. */
9113 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9114 DEPR_ACCESS_V8, NULL},
9115 {15, 0, 7, 5, 4, /* CP15ISB. */
9116 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9117 DEPR_ACCESS_V8, NULL},
9118 {14, 6, 1, 0, 0, /* TEEHBR. */
9119 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9120 DEPR_ACCESS_V8, NULL},
9121 {14, 6, 0, 0, 0, /* TEECR. */
9122 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9123 DEPR_ACCESS_V8, NULL},
9124 };
9125
9126 #undef DEPR_ACCESS_V8
9127
9128 static const size_t deprecated_coproc_reg_count =
9129 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
9130
9131 static void
9132 do_co_reg (void)
9133 {
9134 unsigned Rd;
9135 size_t i;
9136
9137 Rd = inst.operands[2].reg;
9138 if (thumb_mode)
9139 {
9140 if (inst.instruction == 0xee000010
9141 || inst.instruction == 0xfe000010)
9142 /* MCR, MCR2 */
9143 reject_bad_reg (Rd);
9144 else if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9145 /* MRC, MRC2 */
9146 constraint (Rd == REG_SP, BAD_SP);
9147 }
9148 else
9149 {
9150 /* MCR */
9151 if (inst.instruction == 0xe000010)
9152 constraint (Rd == REG_PC, BAD_PC);
9153 }
9154
9155 for (i = 0; i < deprecated_coproc_reg_count; ++i)
9156 {
9157 const struct deprecated_coproc_regs_s *r =
9158 deprecated_coproc_regs + i;
9159
9160 if (inst.operands[0].reg == r->cp
9161 && inst.operands[1].imm == r->opc1
9162 && inst.operands[3].reg == r->crn
9163 && inst.operands[4].reg == r->crm
9164 && inst.operands[5].imm == r->opc2)
9165 {
9166 if (! ARM_CPU_IS_ANY (cpu_variant)
9167 && warn_on_deprecated
9168 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
9169 as_tsktsk ("%s", r->dep_msg);
9170 }
9171 }
9172
9173 inst.instruction |= inst.operands[0].reg << 8;
9174 inst.instruction |= inst.operands[1].imm << 21;
9175 inst.instruction |= Rd << 12;
9176 inst.instruction |= inst.operands[3].reg << 16;
9177 inst.instruction |= inst.operands[4].reg;
9178 inst.instruction |= inst.operands[5].imm << 5;
9179 }
9180
9181 /* Transfer between coprocessor register and pair of ARM registers.
9182 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
9183 MCRR2
9184 MRRC{cond}
9185 MRRC2
9186
9187 Two XScale instructions are special cases of these:
9188
9189 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
9190 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
9191
9192 Result unpredictable if Rd or Rn is R15. */
9193
9194 static void
9195 do_co_reg2c (void)
9196 {
9197 unsigned Rd, Rn;
9198
9199 Rd = inst.operands[2].reg;
9200 Rn = inst.operands[3].reg;
9201
9202 if (thumb_mode)
9203 {
9204 reject_bad_reg (Rd);
9205 reject_bad_reg (Rn);
9206 }
9207 else
9208 {
9209 constraint (Rd == REG_PC, BAD_PC);
9210 constraint (Rn == REG_PC, BAD_PC);
9211 }
9212
9213 /* Only check the MRRC{2} variants. */
9214 if ((inst.instruction & 0x0FF00000) == 0x0C500000)
9215 {
9216 /* If Rd == Rn, error that the operation is
9217 unpredictable (example MRRC p3,#1,r1,r1,c4). */
9218 constraint (Rd == Rn, BAD_OVERLAP);
9219 }
9220
9221 inst.instruction |= inst.operands[0].reg << 8;
9222 inst.instruction |= inst.operands[1].imm << 4;
9223 inst.instruction |= Rd << 12;
9224 inst.instruction |= Rn << 16;
9225 inst.instruction |= inst.operands[4].reg;
9226 }
9227
9228 static void
9229 do_cpsi (void)
9230 {
9231 inst.instruction |= inst.operands[0].imm << 6;
9232 if (inst.operands[1].present)
9233 {
9234 inst.instruction |= CPSI_MMOD;
9235 inst.instruction |= inst.operands[1].imm;
9236 }
9237 }
9238
9239 static void
9240 do_dbg (void)
9241 {
9242 inst.instruction |= inst.operands[0].imm;
9243 }
9244
9245 static void
9246 do_div (void)
9247 {
9248 unsigned Rd, Rn, Rm;
9249
9250 Rd = inst.operands[0].reg;
9251 Rn = (inst.operands[1].present
9252 ? inst.operands[1].reg : Rd);
9253 Rm = inst.operands[2].reg;
9254
9255 constraint ((Rd == REG_PC), BAD_PC);
9256 constraint ((Rn == REG_PC), BAD_PC);
9257 constraint ((Rm == REG_PC), BAD_PC);
9258
9259 inst.instruction |= Rd << 16;
9260 inst.instruction |= Rn << 0;
9261 inst.instruction |= Rm << 8;
9262 }
9263
9264 static void
9265 do_it (void)
9266 {
9267 /* There is no IT instruction in ARM mode. We
9268 process it to do the validation as if in
9269 thumb mode, just in case the code gets
9270 assembled for thumb using the unified syntax. */
9271
9272 inst.size = 0;
9273 if (unified_syntax)
9274 {
9275 set_pred_insn_type (IT_INSN);
9276 now_pred.mask = (inst.instruction & 0xf) | 0x10;
9277 now_pred.cc = inst.operands[0].imm;
9278 }
9279 }
9280
9281 /* If there is only one register in the register list,
9282 then return its register number. Otherwise return -1. */
9283 static int
9284 only_one_reg_in_list (int range)
9285 {
9286 int i = ffs (range) - 1;
9287 return (i > 15 || range != (1 << i)) ? -1 : i;
9288 }
9289
9290 static void
9291 encode_ldmstm(int from_push_pop_mnem)
9292 {
9293 int base_reg = inst.operands[0].reg;
9294 int range = inst.operands[1].imm;
9295 int one_reg;
9296
9297 inst.instruction |= base_reg << 16;
9298 inst.instruction |= range;
9299
9300 if (inst.operands[1].writeback)
9301 inst.instruction |= LDM_TYPE_2_OR_3;
9302
9303 if (inst.operands[0].writeback)
9304 {
9305 inst.instruction |= WRITE_BACK;
9306 /* Check for unpredictable uses of writeback. */
9307 if (inst.instruction & LOAD_BIT)
9308 {
9309 /* Not allowed in LDM type 2. */
9310 if ((inst.instruction & LDM_TYPE_2_OR_3)
9311 && ((range & (1 << REG_PC)) == 0))
9312 as_warn (_("writeback of base register is UNPREDICTABLE"));
9313 /* Only allowed if base reg not in list for other types. */
9314 else if (range & (1 << base_reg))
9315 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
9316 }
9317 else /* STM. */
9318 {
9319 /* Not allowed for type 2. */
9320 if (inst.instruction & LDM_TYPE_2_OR_3)
9321 as_warn (_("writeback of base register is UNPREDICTABLE"));
9322 /* Only allowed if base reg not in list, or first in list. */
9323 else if ((range & (1 << base_reg))
9324 && (range & ((1 << base_reg) - 1)))
9325 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
9326 }
9327 }
9328
9329 /* If PUSH/POP has only one register, then use the A2 encoding. */
9330 one_reg = only_one_reg_in_list (range);
9331 if (from_push_pop_mnem && one_reg >= 0)
9332 {
9333 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
9334
9335 if (is_push && one_reg == 13 /* SP */)
9336 /* PR 22483: The A2 encoding cannot be used when
9337 pushing the stack pointer as this is UNPREDICTABLE. */
9338 return;
9339
9340 inst.instruction &= A_COND_MASK;
9341 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
9342 inst.instruction |= one_reg << 12;
9343 }
9344 }
9345
9346 static void
9347 do_ldmstm (void)
9348 {
9349 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
9350 }
9351
9352 /* ARMv5TE load-consecutive (argument parse)
9353 Mode is like LDRH.
9354
9355 LDRccD R, mode
9356 STRccD R, mode. */
9357
9358 static void
9359 do_ldrd (void)
9360 {
9361 constraint (inst.operands[0].reg % 2 != 0,
9362 _("first transfer register must be even"));
9363 constraint (inst.operands[1].present
9364 && inst.operands[1].reg != inst.operands[0].reg + 1,
9365 _("can only transfer two consecutive registers"));
9366 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9367 constraint (!inst.operands[2].isreg, _("'[' expected"));
9368
9369 if (!inst.operands[1].present)
9370 inst.operands[1].reg = inst.operands[0].reg + 1;
9371
9372 /* encode_arm_addr_mode_3 will diagnose overlap between the base
9373 register and the first register written; we have to diagnose
9374 overlap between the base and the second register written here. */
9375
9376 if (inst.operands[2].reg == inst.operands[1].reg
9377 && (inst.operands[2].writeback || inst.operands[2].postind))
9378 as_warn (_("base register written back, and overlaps "
9379 "second transfer register"));
9380
9381 if (!(inst.instruction & V4_STR_BIT))
9382 {
9383 /* For an index-register load, the index register must not overlap the
9384 destination (even if not write-back). */
9385 if (inst.operands[2].immisreg
9386 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
9387 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
9388 as_warn (_("index register overlaps transfer register"));
9389 }
9390 inst.instruction |= inst.operands[0].reg << 12;
9391 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
9392 }
9393
9394 static void
9395 do_ldrex (void)
9396 {
9397 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9398 || inst.operands[1].postind || inst.operands[1].writeback
9399 || inst.operands[1].immisreg || inst.operands[1].shifted
9400 || inst.operands[1].negative
9401 /* This can arise if the programmer has written
9402 strex rN, rM, foo
9403 or if they have mistakenly used a register name as the last
9404 operand, eg:
9405 strex rN, rM, rX
9406 It is very difficult to distinguish between these two cases
9407 because "rX" might actually be a label. ie the register
9408 name has been occluded by a symbol of the same name. So we
9409 just generate a general 'bad addressing mode' type error
9410 message and leave it up to the programmer to discover the
9411 true cause and fix their mistake. */
9412 || (inst.operands[1].reg == REG_PC),
9413 BAD_ADDR_MODE);
9414
9415 constraint (inst.relocs[0].exp.X_op != O_constant
9416 || inst.relocs[0].exp.X_add_number != 0,
9417 _("offset must be zero in ARM encoding"));
9418
9419 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
9420
9421 inst.instruction |= inst.operands[0].reg << 12;
9422 inst.instruction |= inst.operands[1].reg << 16;
9423 inst.relocs[0].type = BFD_RELOC_UNUSED;
9424 }
9425
9426 static void
9427 do_ldrexd (void)
9428 {
9429 constraint (inst.operands[0].reg % 2 != 0,
9430 _("even register required"));
9431 constraint (inst.operands[1].present
9432 && inst.operands[1].reg != inst.operands[0].reg + 1,
9433 _("can only load two consecutive registers"));
9434 /* If op 1 were present and equal to PC, this function wouldn't
9435 have been called in the first place. */
9436 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9437
9438 inst.instruction |= inst.operands[0].reg << 12;
9439 inst.instruction |= inst.operands[2].reg << 16;
9440 }
9441
9442 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9443 which is not a multiple of four is UNPREDICTABLE. */
9444 static void
9445 check_ldr_r15_aligned (void)
9446 {
9447 constraint (!(inst.operands[1].immisreg)
9448 && (inst.operands[0].reg == REG_PC
9449 && inst.operands[1].reg == REG_PC
9450 && (inst.relocs[0].exp.X_add_number & 0x3)),
9451 _("ldr to register 15 must be 4-byte aligned"));
9452 }
9453
9454 static void
9455 do_ldst (void)
9456 {
9457 inst.instruction |= inst.operands[0].reg << 12;
9458 if (!inst.operands[1].isreg)
9459 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
9460 return;
9461 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
9462 check_ldr_r15_aligned ();
9463 }
9464
9465 static void
9466 do_ldstt (void)
9467 {
9468 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9469 reject [Rn,...]. */
9470 if (inst.operands[1].preind)
9471 {
9472 constraint (inst.relocs[0].exp.X_op != O_constant
9473 || inst.relocs[0].exp.X_add_number != 0,
9474 _("this instruction requires a post-indexed address"));
9475
9476 inst.operands[1].preind = 0;
9477 inst.operands[1].postind = 1;
9478 inst.operands[1].writeback = 1;
9479 }
9480 inst.instruction |= inst.operands[0].reg << 12;
9481 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
9482 }
9483
9484 /* Halfword and signed-byte load/store operations. */
9485
9486 static void
9487 do_ldstv4 (void)
9488 {
9489 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9490 inst.instruction |= inst.operands[0].reg << 12;
9491 if (!inst.operands[1].isreg)
9492 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
9493 return;
9494 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
9495 }
9496
9497 static void
9498 do_ldsttv4 (void)
9499 {
9500 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9501 reject [Rn,...]. */
9502 if (inst.operands[1].preind)
9503 {
9504 constraint (inst.relocs[0].exp.X_op != O_constant
9505 || inst.relocs[0].exp.X_add_number != 0,
9506 _("this instruction requires a post-indexed address"));
9507
9508 inst.operands[1].preind = 0;
9509 inst.operands[1].postind = 1;
9510 inst.operands[1].writeback = 1;
9511 }
9512 inst.instruction |= inst.operands[0].reg << 12;
9513 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
9514 }
9515
9516 /* Co-processor register load/store.
9517 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9518 static void
9519 do_lstc (void)
9520 {
9521 inst.instruction |= inst.operands[0].reg << 8;
9522 inst.instruction |= inst.operands[1].reg << 12;
9523 encode_arm_cp_address (2, TRUE, TRUE, 0);
9524 }
9525
9526 static void
9527 do_mlas (void)
9528 {
9529 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9530 if (inst.operands[0].reg == inst.operands[1].reg
9531 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9532 && !(inst.instruction & 0x00400000))
9533 as_tsktsk (_("Rd and Rm should be different in mla"));
9534
9535 inst.instruction |= inst.operands[0].reg << 16;
9536 inst.instruction |= inst.operands[1].reg;
9537 inst.instruction |= inst.operands[2].reg << 8;
9538 inst.instruction |= inst.operands[3].reg << 12;
9539 }
9540
9541 static void
9542 do_mov (void)
9543 {
9544 constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9545 && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9546 THUMB1_RELOC_ONLY);
9547 inst.instruction |= inst.operands[0].reg << 12;
9548 encode_arm_shifter_operand (1);
9549 }
9550
9551 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9552 static void
9553 do_mov16 (void)
9554 {
9555 bfd_vma imm;
9556 bfd_boolean top;
9557
9558 top = (inst.instruction & 0x00400000) != 0;
9559 constraint (top && inst.relocs[0].type == BFD_RELOC_ARM_MOVW,
9560 _(":lower16: not allowed in this instruction"));
9561 constraint (!top && inst.relocs[0].type == BFD_RELOC_ARM_MOVT,
9562 _(":upper16: not allowed in this instruction"));
9563 inst.instruction |= inst.operands[0].reg << 12;
9564 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
9565 {
9566 imm = inst.relocs[0].exp.X_add_number;
9567 /* The value is in two pieces: 0:11, 16:19. */
9568 inst.instruction |= (imm & 0x00000fff);
9569 inst.instruction |= (imm & 0x0000f000) << 4;
9570 }
9571 }
9572
9573 static int
9574 do_vfp_nsyn_mrs (void)
9575 {
9576 if (inst.operands[0].isvec)
9577 {
9578 if (inst.operands[1].reg != 1)
9579 first_error (_("operand 1 must be FPSCR"));
9580 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
9581 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
9582 do_vfp_nsyn_opcode ("fmstat");
9583 }
9584 else if (inst.operands[1].isvec)
9585 do_vfp_nsyn_opcode ("fmrx");
9586 else
9587 return FAIL;
9588
9589 return SUCCESS;
9590 }
9591
9592 static int
9593 do_vfp_nsyn_msr (void)
9594 {
9595 if (inst.operands[0].isvec)
9596 do_vfp_nsyn_opcode ("fmxr");
9597 else
9598 return FAIL;
9599
9600 return SUCCESS;
9601 }
9602
9603 static void
9604 do_vmrs (void)
9605 {
9606 unsigned Rt = inst.operands[0].reg;
9607
9608 if (thumb_mode && Rt == REG_SP)
9609 {
9610 inst.error = BAD_SP;
9611 return;
9612 }
9613
9614 /* MVFR2 is only valid at ARMv8-A. */
9615 if (inst.operands[1].reg == 5)
9616 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9617 _(BAD_FPU));
9618
9619 /* APSR_ sets isvec. All other refs to PC are illegal. */
9620 if (!inst.operands[0].isvec && Rt == REG_PC)
9621 {
9622 inst.error = BAD_PC;
9623 return;
9624 }
9625
9626 /* If we get through parsing the register name, we just insert the number
9627 generated into the instruction without further validation. */
9628 inst.instruction |= (inst.operands[1].reg << 16);
9629 inst.instruction |= (Rt << 12);
9630 }
9631
9632 static void
9633 do_vmsr (void)
9634 {
9635 unsigned Rt = inst.operands[1].reg;
9636
9637 if (thumb_mode)
9638 reject_bad_reg (Rt);
9639 else if (Rt == REG_PC)
9640 {
9641 inst.error = BAD_PC;
9642 return;
9643 }
9644
9645 /* MVFR2 is only valid for ARMv8-A. */
9646 if (inst.operands[0].reg == 5)
9647 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9648 _(BAD_FPU));
9649
9650 /* If we get through parsing the register name, we just insert the number
9651 generated into the instruction without further validation. */
9652 inst.instruction |= (inst.operands[0].reg << 16);
9653 inst.instruction |= (Rt << 12);
9654 }
9655
9656 static void
9657 do_mrs (void)
9658 {
9659 unsigned br;
9660
9661 if (do_vfp_nsyn_mrs () == SUCCESS)
9662 return;
9663
9664 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9665 inst.instruction |= inst.operands[0].reg << 12;
9666
9667 if (inst.operands[1].isreg)
9668 {
9669 br = inst.operands[1].reg;
9670 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf0000))
9671 as_bad (_("bad register for mrs"));
9672 }
9673 else
9674 {
9675 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9676 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9677 != (PSR_c|PSR_f),
9678 _("'APSR', 'CPSR' or 'SPSR' expected"));
9679 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9680 }
9681
9682 inst.instruction |= br;
9683 }
9684
9685 /* Two possible forms:
9686 "{C|S}PSR_<field>, Rm",
9687 "{C|S}PSR_f, #expression". */
9688
9689 static void
9690 do_msr (void)
9691 {
9692 if (do_vfp_nsyn_msr () == SUCCESS)
9693 return;
9694
9695 inst.instruction |= inst.operands[0].imm;
9696 if (inst.operands[1].isreg)
9697 inst.instruction |= inst.operands[1].reg;
9698 else
9699 {
9700 inst.instruction |= INST_IMMEDIATE;
9701 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
9702 inst.relocs[0].pc_rel = 0;
9703 }
9704 }
9705
9706 static void
9707 do_mul (void)
9708 {
9709 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9710
9711 if (!inst.operands[2].present)
9712 inst.operands[2].reg = inst.operands[0].reg;
9713 inst.instruction |= inst.operands[0].reg << 16;
9714 inst.instruction |= inst.operands[1].reg;
9715 inst.instruction |= inst.operands[2].reg << 8;
9716
9717 if (inst.operands[0].reg == inst.operands[1].reg
9718 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9719 as_tsktsk (_("Rd and Rm should be different in mul"));
9720 }
9721
9722 /* Long Multiply Parser
9723 UMULL RdLo, RdHi, Rm, Rs
9724 SMULL RdLo, RdHi, Rm, Rs
9725 UMLAL RdLo, RdHi, Rm, Rs
9726 SMLAL RdLo, RdHi, Rm, Rs. */
9727
9728 static void
9729 do_mull (void)
9730 {
9731 inst.instruction |= inst.operands[0].reg << 12;
9732 inst.instruction |= inst.operands[1].reg << 16;
9733 inst.instruction |= inst.operands[2].reg;
9734 inst.instruction |= inst.operands[3].reg << 8;
9735
9736 /* rdhi and rdlo must be different. */
9737 if (inst.operands[0].reg == inst.operands[1].reg)
9738 as_tsktsk (_("rdhi and rdlo must be different"));
9739
9740 /* rdhi, rdlo and rm must all be different before armv6. */
9741 if ((inst.operands[0].reg == inst.operands[2].reg
9742 || inst.operands[1].reg == inst.operands[2].reg)
9743 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9744 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9745 }
9746
9747 static void
9748 do_nop (void)
9749 {
9750 if (inst.operands[0].present
9751 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9752 {
9753 /* Architectural NOP hints are CPSR sets with no bits selected. */
9754 inst.instruction &= 0xf0000000;
9755 inst.instruction |= 0x0320f000;
9756 if (inst.operands[0].present)
9757 inst.instruction |= inst.operands[0].imm;
9758 }
9759 }
9760
9761 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9762 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9763 Condition defaults to COND_ALWAYS.
9764 Error if Rd, Rn or Rm are R15. */
9765
9766 static void
9767 do_pkhbt (void)
9768 {
9769 inst.instruction |= inst.operands[0].reg << 12;
9770 inst.instruction |= inst.operands[1].reg << 16;
9771 inst.instruction |= inst.operands[2].reg;
9772 if (inst.operands[3].present)
9773 encode_arm_shift (3);
9774 }
9775
9776 /* ARM V6 PKHTB (Argument Parse). */
9777
9778 static void
9779 do_pkhtb (void)
9780 {
9781 if (!inst.operands[3].present)
9782 {
9783 /* If the shift specifier is omitted, turn the instruction
9784 into pkhbt rd, rm, rn. */
9785 inst.instruction &= 0xfff00010;
9786 inst.instruction |= inst.operands[0].reg << 12;
9787 inst.instruction |= inst.operands[1].reg;
9788 inst.instruction |= inst.operands[2].reg << 16;
9789 }
9790 else
9791 {
9792 inst.instruction |= inst.operands[0].reg << 12;
9793 inst.instruction |= inst.operands[1].reg << 16;
9794 inst.instruction |= inst.operands[2].reg;
9795 encode_arm_shift (3);
9796 }
9797 }
9798
9799 /* ARMv5TE: Preload-Cache
9800 MP Extensions: Preload for write
9801
9802 PLD(W) <addr_mode>
9803
9804 Syntactically, like LDR with B=1, W=0, L=1. */
9805
9806 static void
9807 do_pld (void)
9808 {
9809 constraint (!inst.operands[0].isreg,
9810 _("'[' expected after PLD mnemonic"));
9811 constraint (inst.operands[0].postind,
9812 _("post-indexed expression used in preload instruction"));
9813 constraint (inst.operands[0].writeback,
9814 _("writeback used in preload instruction"));
9815 constraint (!inst.operands[0].preind,
9816 _("unindexed addressing used in preload instruction"));
9817 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9818 }
9819
9820 /* ARMv7: PLI <addr_mode> */
9821 static void
9822 do_pli (void)
9823 {
9824 constraint (!inst.operands[0].isreg,
9825 _("'[' expected after PLI mnemonic"));
9826 constraint (inst.operands[0].postind,
9827 _("post-indexed expression used in preload instruction"));
9828 constraint (inst.operands[0].writeback,
9829 _("writeback used in preload instruction"));
9830 constraint (!inst.operands[0].preind,
9831 _("unindexed addressing used in preload instruction"));
9832 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9833 inst.instruction &= ~PRE_INDEX;
9834 }
9835
9836 static void
9837 do_push_pop (void)
9838 {
9839 constraint (inst.operands[0].writeback,
9840 _("push/pop do not support {reglist}^"));
9841 inst.operands[1] = inst.operands[0];
9842 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9843 inst.operands[0].isreg = 1;
9844 inst.operands[0].writeback = 1;
9845 inst.operands[0].reg = REG_SP;
9846 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9847 }
9848
9849 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9850 word at the specified address and the following word
9851 respectively.
9852 Unconditionally executed.
9853 Error if Rn is R15. */
9854
9855 static void
9856 do_rfe (void)
9857 {
9858 inst.instruction |= inst.operands[0].reg << 16;
9859 if (inst.operands[0].writeback)
9860 inst.instruction |= WRITE_BACK;
9861 }
9862
9863 /* ARM V6 ssat (argument parse). */
9864
9865 static void
9866 do_ssat (void)
9867 {
9868 inst.instruction |= inst.operands[0].reg << 12;
9869 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9870 inst.instruction |= inst.operands[2].reg;
9871
9872 if (inst.operands[3].present)
9873 encode_arm_shift (3);
9874 }
9875
9876 /* ARM V6 usat (argument parse). */
9877
9878 static void
9879 do_usat (void)
9880 {
9881 inst.instruction |= inst.operands[0].reg << 12;
9882 inst.instruction |= inst.operands[1].imm << 16;
9883 inst.instruction |= inst.operands[2].reg;
9884
9885 if (inst.operands[3].present)
9886 encode_arm_shift (3);
9887 }
9888
9889 /* ARM V6 ssat16 (argument parse). */
9890
9891 static void
9892 do_ssat16 (void)
9893 {
9894 inst.instruction |= inst.operands[0].reg << 12;
9895 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9896 inst.instruction |= inst.operands[2].reg;
9897 }
9898
9899 static void
9900 do_usat16 (void)
9901 {
9902 inst.instruction |= inst.operands[0].reg << 12;
9903 inst.instruction |= inst.operands[1].imm << 16;
9904 inst.instruction |= inst.operands[2].reg;
9905 }
9906
9907 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9908 preserving the other bits.
9909
9910 setend <endian_specifier>, where <endian_specifier> is either
9911 BE or LE. */
9912
9913 static void
9914 do_setend (void)
9915 {
9916 if (warn_on_deprecated
9917 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9918 as_tsktsk (_("setend use is deprecated for ARMv8"));
9919
9920 if (inst.operands[0].imm)
9921 inst.instruction |= 0x200;
9922 }
9923
9924 static void
9925 do_shift (void)
9926 {
9927 unsigned int Rm = (inst.operands[1].present
9928 ? inst.operands[1].reg
9929 : inst.operands[0].reg);
9930
9931 inst.instruction |= inst.operands[0].reg << 12;
9932 inst.instruction |= Rm;
9933 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9934 {
9935 inst.instruction |= inst.operands[2].reg << 8;
9936 inst.instruction |= SHIFT_BY_REG;
9937 /* PR 12854: Error on extraneous shifts. */
9938 constraint (inst.operands[2].shifted,
9939 _("extraneous shift as part of operand to shift insn"));
9940 }
9941 else
9942 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
9943 }
9944
9945 static void
9946 do_smc (void)
9947 {
9948 inst.relocs[0].type = BFD_RELOC_ARM_SMC;
9949 inst.relocs[0].pc_rel = 0;
9950 }
9951
9952 static void
9953 do_hvc (void)
9954 {
9955 inst.relocs[0].type = BFD_RELOC_ARM_HVC;
9956 inst.relocs[0].pc_rel = 0;
9957 }
9958
9959 static void
9960 do_swi (void)
9961 {
9962 inst.relocs[0].type = BFD_RELOC_ARM_SWI;
9963 inst.relocs[0].pc_rel = 0;
9964 }
9965
9966 static void
9967 do_setpan (void)
9968 {
9969 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9970 _("selected processor does not support SETPAN instruction"));
9971
9972 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9973 }
9974
9975 static void
9976 do_t_setpan (void)
9977 {
9978 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9979 _("selected processor does not support SETPAN instruction"));
9980
9981 inst.instruction |= (inst.operands[0].imm << 3);
9982 }
9983
9984 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9985 SMLAxy{cond} Rd,Rm,Rs,Rn
9986 SMLAWy{cond} Rd,Rm,Rs,Rn
9987 Error if any register is R15. */
9988
9989 static void
9990 do_smla (void)
9991 {
9992 inst.instruction |= inst.operands[0].reg << 16;
9993 inst.instruction |= inst.operands[1].reg;
9994 inst.instruction |= inst.operands[2].reg << 8;
9995 inst.instruction |= inst.operands[3].reg << 12;
9996 }
9997
9998 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9999 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
10000 Error if any register is R15.
10001 Warning if Rdlo == Rdhi. */
10002
10003 static void
10004 do_smlal (void)
10005 {
10006 inst.instruction |= inst.operands[0].reg << 12;
10007 inst.instruction |= inst.operands[1].reg << 16;
10008 inst.instruction |= inst.operands[2].reg;
10009 inst.instruction |= inst.operands[3].reg << 8;
10010
10011 if (inst.operands[0].reg == inst.operands[1].reg)
10012 as_tsktsk (_("rdhi and rdlo must be different"));
10013 }
10014
10015 /* ARM V5E (El Segundo) signed-multiply (argument parse)
10016 SMULxy{cond} Rd,Rm,Rs
10017 Error if any register is R15. */
10018
10019 static void
10020 do_smul (void)
10021 {
10022 inst.instruction |= inst.operands[0].reg << 16;
10023 inst.instruction |= inst.operands[1].reg;
10024 inst.instruction |= inst.operands[2].reg << 8;
10025 }
10026
10027 /* ARM V6 srs (argument parse). The variable fields in the encoding are
10028 the same for both ARM and Thumb-2. */
10029
10030 static void
10031 do_srs (void)
10032 {
10033 int reg;
10034
10035 if (inst.operands[0].present)
10036 {
10037 reg = inst.operands[0].reg;
10038 constraint (reg != REG_SP, _("SRS base register must be r13"));
10039 }
10040 else
10041 reg = REG_SP;
10042
10043 inst.instruction |= reg << 16;
10044 inst.instruction |= inst.operands[1].imm;
10045 if (inst.operands[0].writeback || inst.operands[1].writeback)
10046 inst.instruction |= WRITE_BACK;
10047 }
10048
10049 /* ARM V6 strex (argument parse). */
10050
10051 static void
10052 do_strex (void)
10053 {
10054 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10055 || inst.operands[2].postind || inst.operands[2].writeback
10056 || inst.operands[2].immisreg || inst.operands[2].shifted
10057 || inst.operands[2].negative
10058 /* See comment in do_ldrex(). */
10059 || (inst.operands[2].reg == REG_PC),
10060 BAD_ADDR_MODE);
10061
10062 constraint (inst.operands[0].reg == inst.operands[1].reg
10063 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10064
10065 constraint (inst.relocs[0].exp.X_op != O_constant
10066 || inst.relocs[0].exp.X_add_number != 0,
10067 _("offset must be zero in ARM encoding"));
10068
10069 inst.instruction |= inst.operands[0].reg << 12;
10070 inst.instruction |= inst.operands[1].reg;
10071 inst.instruction |= inst.operands[2].reg << 16;
10072 inst.relocs[0].type = BFD_RELOC_UNUSED;
10073 }
10074
10075 static void
10076 do_t_strexbh (void)
10077 {
10078 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10079 || inst.operands[2].postind || inst.operands[2].writeback
10080 || inst.operands[2].immisreg || inst.operands[2].shifted
10081 || inst.operands[2].negative,
10082 BAD_ADDR_MODE);
10083
10084 constraint (inst.operands[0].reg == inst.operands[1].reg
10085 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10086
10087 do_rm_rd_rn ();
10088 }
10089
10090 static void
10091 do_strexd (void)
10092 {
10093 constraint (inst.operands[1].reg % 2 != 0,
10094 _("even register required"));
10095 constraint (inst.operands[2].present
10096 && inst.operands[2].reg != inst.operands[1].reg + 1,
10097 _("can only store two consecutive registers"));
10098 /* If op 2 were present and equal to PC, this function wouldn't
10099 have been called in the first place. */
10100 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
10101
10102 constraint (inst.operands[0].reg == inst.operands[1].reg
10103 || inst.operands[0].reg == inst.operands[1].reg + 1
10104 || inst.operands[0].reg == inst.operands[3].reg,
10105 BAD_OVERLAP);
10106
10107 inst.instruction |= inst.operands[0].reg << 12;
10108 inst.instruction |= inst.operands[1].reg;
10109 inst.instruction |= inst.operands[3].reg << 16;
10110 }
10111
10112 /* ARM V8 STRL. */
10113 static void
10114 do_stlex (void)
10115 {
10116 constraint (inst.operands[0].reg == inst.operands[1].reg
10117 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10118
10119 do_rd_rm_rn ();
10120 }
10121
10122 static void
10123 do_t_stlex (void)
10124 {
10125 constraint (inst.operands[0].reg == inst.operands[1].reg
10126 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10127
10128 do_rm_rd_rn ();
10129 }
10130
10131 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
10132 extends it to 32-bits, and adds the result to a value in another
10133 register. You can specify a rotation by 0, 8, 16, or 24 bits
10134 before extracting the 16-bit value.
10135 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
10136 Condition defaults to COND_ALWAYS.
10137 Error if any register uses R15. */
10138
10139 static void
10140 do_sxtah (void)
10141 {
10142 inst.instruction |= inst.operands[0].reg << 12;
10143 inst.instruction |= inst.operands[1].reg << 16;
10144 inst.instruction |= inst.operands[2].reg;
10145 inst.instruction |= inst.operands[3].imm << 10;
10146 }
10147
10148 /* ARM V6 SXTH.
10149
10150 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
10151 Condition defaults to COND_ALWAYS.
10152 Error if any register uses R15. */
10153
10154 static void
10155 do_sxth (void)
10156 {
10157 inst.instruction |= inst.operands[0].reg << 12;
10158 inst.instruction |= inst.operands[1].reg;
10159 inst.instruction |= inst.operands[2].imm << 10;
10160 }
10161 \f
10162 /* VFP instructions. In a logical order: SP variant first, monad
10163 before dyad, arithmetic then move then load/store. */
10164
10165 static void
10166 do_vfp_sp_monadic (void)
10167 {
10168 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10169 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
10170 }
10171
10172 static void
10173 do_vfp_sp_dyadic (void)
10174 {
10175 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10176 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
10177 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
10178 }
10179
10180 static void
10181 do_vfp_sp_compare_z (void)
10182 {
10183 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10184 }
10185
10186 static void
10187 do_vfp_dp_sp_cvt (void)
10188 {
10189 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10190 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
10191 }
10192
10193 static void
10194 do_vfp_sp_dp_cvt (void)
10195 {
10196 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10197 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
10198 }
10199
10200 static void
10201 do_vfp_reg_from_sp (void)
10202 {
10203 inst.instruction |= inst.operands[0].reg << 12;
10204 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
10205 }
10206
10207 static void
10208 do_vfp_reg2_from_sp2 (void)
10209 {
10210 constraint (inst.operands[2].imm != 2,
10211 _("only two consecutive VFP SP registers allowed here"));
10212 inst.instruction |= inst.operands[0].reg << 12;
10213 inst.instruction |= inst.operands[1].reg << 16;
10214 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
10215 }
10216
10217 static void
10218 do_vfp_sp_from_reg (void)
10219 {
10220 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
10221 inst.instruction |= inst.operands[1].reg << 12;
10222 }
10223
10224 static void
10225 do_vfp_sp2_from_reg2 (void)
10226 {
10227 constraint (inst.operands[0].imm != 2,
10228 _("only two consecutive VFP SP registers allowed here"));
10229 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
10230 inst.instruction |= inst.operands[1].reg << 12;
10231 inst.instruction |= inst.operands[2].reg << 16;
10232 }
10233
10234 static void
10235 do_vfp_sp_ldst (void)
10236 {
10237 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10238 encode_arm_cp_address (1, FALSE, TRUE, 0);
10239 }
10240
10241 static void
10242 do_vfp_dp_ldst (void)
10243 {
10244 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10245 encode_arm_cp_address (1, FALSE, TRUE, 0);
10246 }
10247
10248
10249 static void
10250 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
10251 {
10252 if (inst.operands[0].writeback)
10253 inst.instruction |= WRITE_BACK;
10254 else
10255 constraint (ldstm_type != VFP_LDSTMIA,
10256 _("this addressing mode requires base-register writeback"));
10257 inst.instruction |= inst.operands[0].reg << 16;
10258 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
10259 inst.instruction |= inst.operands[1].imm;
10260 }
10261
10262 static void
10263 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
10264 {
10265 int count;
10266
10267 if (inst.operands[0].writeback)
10268 inst.instruction |= WRITE_BACK;
10269 else
10270 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
10271 _("this addressing mode requires base-register writeback"));
10272
10273 inst.instruction |= inst.operands[0].reg << 16;
10274 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10275
10276 count = inst.operands[1].imm << 1;
10277 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
10278 count += 1;
10279
10280 inst.instruction |= count;
10281 }
10282
10283 static void
10284 do_vfp_sp_ldstmia (void)
10285 {
10286 vfp_sp_ldstm (VFP_LDSTMIA);
10287 }
10288
10289 static void
10290 do_vfp_sp_ldstmdb (void)
10291 {
10292 vfp_sp_ldstm (VFP_LDSTMDB);
10293 }
10294
10295 static void
10296 do_vfp_dp_ldstmia (void)
10297 {
10298 vfp_dp_ldstm (VFP_LDSTMIA);
10299 }
10300
10301 static void
10302 do_vfp_dp_ldstmdb (void)
10303 {
10304 vfp_dp_ldstm (VFP_LDSTMDB);
10305 }
10306
10307 static void
10308 do_vfp_xp_ldstmia (void)
10309 {
10310 vfp_dp_ldstm (VFP_LDSTMIAX);
10311 }
10312
10313 static void
10314 do_vfp_xp_ldstmdb (void)
10315 {
10316 vfp_dp_ldstm (VFP_LDSTMDBX);
10317 }
10318
10319 static void
10320 do_vfp_dp_rd_rm (void)
10321 {
10322 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10323 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
10324 }
10325
10326 static void
10327 do_vfp_dp_rn_rd (void)
10328 {
10329 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
10330 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10331 }
10332
10333 static void
10334 do_vfp_dp_rd_rn (void)
10335 {
10336 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10337 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10338 }
10339
10340 static void
10341 do_vfp_dp_rd_rn_rm (void)
10342 {
10343 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10344 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10345 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
10346 }
10347
10348 static void
10349 do_vfp_dp_rd (void)
10350 {
10351 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10352 }
10353
10354 static void
10355 do_vfp_dp_rm_rd_rn (void)
10356 {
10357 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
10358 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10359 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
10360 }
10361
10362 /* VFPv3 instructions. */
10363 static void
10364 do_vfp_sp_const (void)
10365 {
10366 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10367 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10368 inst.instruction |= (inst.operands[1].imm & 0x0f);
10369 }
10370
10371 static void
10372 do_vfp_dp_const (void)
10373 {
10374 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10375 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10376 inst.instruction |= (inst.operands[1].imm & 0x0f);
10377 }
10378
10379 static void
10380 vfp_conv (int srcsize)
10381 {
10382 int immbits = srcsize - inst.operands[1].imm;
10383
10384 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
10385 {
10386 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
10387 i.e. immbits must be in range 0 - 16. */
10388 inst.error = _("immediate value out of range, expected range [0, 16]");
10389 return;
10390 }
10391 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
10392 {
10393 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
10394 i.e. immbits must be in range 0 - 31. */
10395 inst.error = _("immediate value out of range, expected range [1, 32]");
10396 return;
10397 }
10398
10399 inst.instruction |= (immbits & 1) << 5;
10400 inst.instruction |= (immbits >> 1);
10401 }
10402
10403 static void
10404 do_vfp_sp_conv_16 (void)
10405 {
10406 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10407 vfp_conv (16);
10408 }
10409
10410 static void
10411 do_vfp_dp_conv_16 (void)
10412 {
10413 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10414 vfp_conv (16);
10415 }
10416
10417 static void
10418 do_vfp_sp_conv_32 (void)
10419 {
10420 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10421 vfp_conv (32);
10422 }
10423
10424 static void
10425 do_vfp_dp_conv_32 (void)
10426 {
10427 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10428 vfp_conv (32);
10429 }
10430 \f
10431 /* FPA instructions. Also in a logical order. */
10432
10433 static void
10434 do_fpa_cmp (void)
10435 {
10436 inst.instruction |= inst.operands[0].reg << 16;
10437 inst.instruction |= inst.operands[1].reg;
10438 }
10439
10440 static void
10441 do_fpa_ldmstm (void)
10442 {
10443 inst.instruction |= inst.operands[0].reg << 12;
10444 switch (inst.operands[1].imm)
10445 {
10446 case 1: inst.instruction |= CP_T_X; break;
10447 case 2: inst.instruction |= CP_T_Y; break;
10448 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
10449 case 4: break;
10450 default: abort ();
10451 }
10452
10453 if (inst.instruction & (PRE_INDEX | INDEX_UP))
10454 {
10455 /* The instruction specified "ea" or "fd", so we can only accept
10456 [Rn]{!}. The instruction does not really support stacking or
10457 unstacking, so we have to emulate these by setting appropriate
10458 bits and offsets. */
10459 constraint (inst.relocs[0].exp.X_op != O_constant
10460 || inst.relocs[0].exp.X_add_number != 0,
10461 _("this instruction does not support indexing"));
10462
10463 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
10464 inst.relocs[0].exp.X_add_number = 12 * inst.operands[1].imm;
10465
10466 if (!(inst.instruction & INDEX_UP))
10467 inst.relocs[0].exp.X_add_number = -inst.relocs[0].exp.X_add_number;
10468
10469 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
10470 {
10471 inst.operands[2].preind = 0;
10472 inst.operands[2].postind = 1;
10473 }
10474 }
10475
10476 encode_arm_cp_address (2, TRUE, TRUE, 0);
10477 }
10478 \f
10479 /* iWMMXt instructions: strictly in alphabetical order. */
10480
10481 static void
10482 do_iwmmxt_tandorc (void)
10483 {
10484 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
10485 }
10486
10487 static void
10488 do_iwmmxt_textrc (void)
10489 {
10490 inst.instruction |= inst.operands[0].reg << 12;
10491 inst.instruction |= inst.operands[1].imm;
10492 }
10493
10494 static void
10495 do_iwmmxt_textrm (void)
10496 {
10497 inst.instruction |= inst.operands[0].reg << 12;
10498 inst.instruction |= inst.operands[1].reg << 16;
10499 inst.instruction |= inst.operands[2].imm;
10500 }
10501
10502 static void
10503 do_iwmmxt_tinsr (void)
10504 {
10505 inst.instruction |= inst.operands[0].reg << 16;
10506 inst.instruction |= inst.operands[1].reg << 12;
10507 inst.instruction |= inst.operands[2].imm;
10508 }
10509
10510 static void
10511 do_iwmmxt_tmia (void)
10512 {
10513 inst.instruction |= inst.operands[0].reg << 5;
10514 inst.instruction |= inst.operands[1].reg;
10515 inst.instruction |= inst.operands[2].reg << 12;
10516 }
10517
10518 static void
10519 do_iwmmxt_waligni (void)
10520 {
10521 inst.instruction |= inst.operands[0].reg << 12;
10522 inst.instruction |= inst.operands[1].reg << 16;
10523 inst.instruction |= inst.operands[2].reg;
10524 inst.instruction |= inst.operands[3].imm << 20;
10525 }
10526
10527 static void
10528 do_iwmmxt_wmerge (void)
10529 {
10530 inst.instruction |= inst.operands[0].reg << 12;
10531 inst.instruction |= inst.operands[1].reg << 16;
10532 inst.instruction |= inst.operands[2].reg;
10533 inst.instruction |= inst.operands[3].imm << 21;
10534 }
10535
10536 static void
10537 do_iwmmxt_wmov (void)
10538 {
10539 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10540 inst.instruction |= inst.operands[0].reg << 12;
10541 inst.instruction |= inst.operands[1].reg << 16;
10542 inst.instruction |= inst.operands[1].reg;
10543 }
10544
10545 static void
10546 do_iwmmxt_wldstbh (void)
10547 {
10548 int reloc;
10549 inst.instruction |= inst.operands[0].reg << 12;
10550 if (thumb_mode)
10551 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
10552 else
10553 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
10554 encode_arm_cp_address (1, TRUE, FALSE, reloc);
10555 }
10556
10557 static void
10558 do_iwmmxt_wldstw (void)
10559 {
10560 /* RIWR_RIWC clears .isreg for a control register. */
10561 if (!inst.operands[0].isreg)
10562 {
10563 constraint (inst.cond != COND_ALWAYS, BAD_COND);
10564 inst.instruction |= 0xf0000000;
10565 }
10566
10567 inst.instruction |= inst.operands[0].reg << 12;
10568 encode_arm_cp_address (1, TRUE, TRUE, 0);
10569 }
10570
10571 static void
10572 do_iwmmxt_wldstd (void)
10573 {
10574 inst.instruction |= inst.operands[0].reg << 12;
10575 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
10576 && inst.operands[1].immisreg)
10577 {
10578 inst.instruction &= ~0x1a000ff;
10579 inst.instruction |= (0xfU << 28);
10580 if (inst.operands[1].preind)
10581 inst.instruction |= PRE_INDEX;
10582 if (!inst.operands[1].negative)
10583 inst.instruction |= INDEX_UP;
10584 if (inst.operands[1].writeback)
10585 inst.instruction |= WRITE_BACK;
10586 inst.instruction |= inst.operands[1].reg << 16;
10587 inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
10588 inst.instruction |= inst.operands[1].imm;
10589 }
10590 else
10591 encode_arm_cp_address (1, TRUE, FALSE, 0);
10592 }
10593
10594 static void
10595 do_iwmmxt_wshufh (void)
10596 {
10597 inst.instruction |= inst.operands[0].reg << 12;
10598 inst.instruction |= inst.operands[1].reg << 16;
10599 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
10600 inst.instruction |= (inst.operands[2].imm & 0x0f);
10601 }
10602
10603 static void
10604 do_iwmmxt_wzero (void)
10605 {
10606 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10607 inst.instruction |= inst.operands[0].reg;
10608 inst.instruction |= inst.operands[0].reg << 12;
10609 inst.instruction |= inst.operands[0].reg << 16;
10610 }
10611
10612 static void
10613 do_iwmmxt_wrwrwr_or_imm5 (void)
10614 {
10615 if (inst.operands[2].isreg)
10616 do_rd_rn_rm ();
10617 else {
10618 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
10619 _("immediate operand requires iWMMXt2"));
10620 do_rd_rn ();
10621 if (inst.operands[2].imm == 0)
10622 {
10623 switch ((inst.instruction >> 20) & 0xf)
10624 {
10625 case 4:
10626 case 5:
10627 case 6:
10628 case 7:
10629 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10630 inst.operands[2].imm = 16;
10631 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
10632 break;
10633 case 8:
10634 case 9:
10635 case 10:
10636 case 11:
10637 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10638 inst.operands[2].imm = 32;
10639 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
10640 break;
10641 case 12:
10642 case 13:
10643 case 14:
10644 case 15:
10645 {
10646 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10647 unsigned long wrn;
10648 wrn = (inst.instruction >> 16) & 0xf;
10649 inst.instruction &= 0xff0fff0f;
10650 inst.instruction |= wrn;
10651 /* Bail out here; the instruction is now assembled. */
10652 return;
10653 }
10654 }
10655 }
10656 /* Map 32 -> 0, etc. */
10657 inst.operands[2].imm &= 0x1f;
10658 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10659 }
10660 }
10661 \f
10662 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10663 operations first, then control, shift, and load/store. */
10664
10665 /* Insns like "foo X,Y,Z". */
10666
10667 static void
10668 do_mav_triple (void)
10669 {
10670 inst.instruction |= inst.operands[0].reg << 16;
10671 inst.instruction |= inst.operands[1].reg;
10672 inst.instruction |= inst.operands[2].reg << 12;
10673 }
10674
10675 /* Insns like "foo W,X,Y,Z".
10676 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10677
10678 static void
10679 do_mav_quad (void)
10680 {
10681 inst.instruction |= inst.operands[0].reg << 5;
10682 inst.instruction |= inst.operands[1].reg << 12;
10683 inst.instruction |= inst.operands[2].reg << 16;
10684 inst.instruction |= inst.operands[3].reg;
10685 }
10686
10687 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10688 static void
10689 do_mav_dspsc (void)
10690 {
10691 inst.instruction |= inst.operands[1].reg << 12;
10692 }
10693
10694 /* Maverick shift immediate instructions.
10695 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10696 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10697
10698 static void
10699 do_mav_shift (void)
10700 {
10701 int imm = inst.operands[2].imm;
10702
10703 inst.instruction |= inst.operands[0].reg << 12;
10704 inst.instruction |= inst.operands[1].reg << 16;
10705
10706 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10707 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10708 Bit 4 should be 0. */
10709 imm = (imm & 0xf) | ((imm & 0x70) << 1);
10710
10711 inst.instruction |= imm;
10712 }
10713 \f
10714 /* XScale instructions. Also sorted arithmetic before move. */
10715
10716 /* Xscale multiply-accumulate (argument parse)
10717 MIAcc acc0,Rm,Rs
10718 MIAPHcc acc0,Rm,Rs
10719 MIAxycc acc0,Rm,Rs. */
10720
10721 static void
10722 do_xsc_mia (void)
10723 {
10724 inst.instruction |= inst.operands[1].reg;
10725 inst.instruction |= inst.operands[2].reg << 12;
10726 }
10727
10728 /* Xscale move-accumulator-register (argument parse)
10729
10730 MARcc acc0,RdLo,RdHi. */
10731
10732 static void
10733 do_xsc_mar (void)
10734 {
10735 inst.instruction |= inst.operands[1].reg << 12;
10736 inst.instruction |= inst.operands[2].reg << 16;
10737 }
10738
10739 /* Xscale move-register-accumulator (argument parse)
10740
10741 MRAcc RdLo,RdHi,acc0. */
10742
10743 static void
10744 do_xsc_mra (void)
10745 {
10746 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10747 inst.instruction |= inst.operands[0].reg << 12;
10748 inst.instruction |= inst.operands[1].reg << 16;
10749 }
10750 \f
10751 /* Encoding functions relevant only to Thumb. */
10752
10753 /* inst.operands[i] is a shifted-register operand; encode
10754 it into inst.instruction in the format used by Thumb32. */
10755
10756 static void
10757 encode_thumb32_shifted_operand (int i)
10758 {
10759 unsigned int value = inst.relocs[0].exp.X_add_number;
10760 unsigned int shift = inst.operands[i].shift_kind;
10761
10762 constraint (inst.operands[i].immisreg,
10763 _("shift by register not allowed in thumb mode"));
10764 inst.instruction |= inst.operands[i].reg;
10765 if (shift == SHIFT_RRX)
10766 inst.instruction |= SHIFT_ROR << 4;
10767 else
10768 {
10769 constraint (inst.relocs[0].exp.X_op != O_constant,
10770 _("expression too complex"));
10771
10772 constraint (value > 32
10773 || (value == 32 && (shift == SHIFT_LSL
10774 || shift == SHIFT_ROR)),
10775 _("shift expression is too large"));
10776
10777 if (value == 0)
10778 shift = SHIFT_LSL;
10779 else if (value == 32)
10780 value = 0;
10781
10782 inst.instruction |= shift << 4;
10783 inst.instruction |= (value & 0x1c) << 10;
10784 inst.instruction |= (value & 0x03) << 6;
10785 }
10786 }
10787
10788
10789 /* inst.operands[i] was set up by parse_address. Encode it into a
10790 Thumb32 format load or store instruction. Reject forms that cannot
10791 be used with such instructions. If is_t is true, reject forms that
10792 cannot be used with a T instruction; if is_d is true, reject forms
10793 that cannot be used with a D instruction. If it is a store insn,
10794 reject PC in Rn. */
10795
10796 static void
10797 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10798 {
10799 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10800
10801 constraint (!inst.operands[i].isreg,
10802 _("Instruction does not support =N addresses"));
10803
10804 inst.instruction |= inst.operands[i].reg << 16;
10805 if (inst.operands[i].immisreg)
10806 {
10807 constraint (is_pc, BAD_PC_ADDRESSING);
10808 constraint (is_t || is_d, _("cannot use register index with this instruction"));
10809 constraint (inst.operands[i].negative,
10810 _("Thumb does not support negative register indexing"));
10811 constraint (inst.operands[i].postind,
10812 _("Thumb does not support register post-indexing"));
10813 constraint (inst.operands[i].writeback,
10814 _("Thumb does not support register indexing with writeback"));
10815 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10816 _("Thumb supports only LSL in shifted register indexing"));
10817
10818 inst.instruction |= inst.operands[i].imm;
10819 if (inst.operands[i].shifted)
10820 {
10821 constraint (inst.relocs[0].exp.X_op != O_constant,
10822 _("expression too complex"));
10823 constraint (inst.relocs[0].exp.X_add_number < 0
10824 || inst.relocs[0].exp.X_add_number > 3,
10825 _("shift out of range"));
10826 inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
10827 }
10828 inst.relocs[0].type = BFD_RELOC_UNUSED;
10829 }
10830 else if (inst.operands[i].preind)
10831 {
10832 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10833 constraint (is_t && inst.operands[i].writeback,
10834 _("cannot use writeback with this instruction"));
10835 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10836 BAD_PC_ADDRESSING);
10837
10838 if (is_d)
10839 {
10840 inst.instruction |= 0x01000000;
10841 if (inst.operands[i].writeback)
10842 inst.instruction |= 0x00200000;
10843 }
10844 else
10845 {
10846 inst.instruction |= 0x00000c00;
10847 if (inst.operands[i].writeback)
10848 inst.instruction |= 0x00000100;
10849 }
10850 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10851 }
10852 else if (inst.operands[i].postind)
10853 {
10854 gas_assert (inst.operands[i].writeback);
10855 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10856 constraint (is_t, _("cannot use post-indexing with this instruction"));
10857
10858 if (is_d)
10859 inst.instruction |= 0x00200000;
10860 else
10861 inst.instruction |= 0x00000900;
10862 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10863 }
10864 else /* unindexed - only for coprocessor */
10865 inst.error = _("instruction does not accept unindexed addressing");
10866 }
10867
10868 /* Table of Thumb instructions which exist in both 16- and 32-bit
10869 encodings (the latter only in post-V6T2 cores). The index is the
10870 value used in the insns table below. When there is more than one
10871 possible 16-bit encoding for the instruction, this table always
10872 holds variant (1).
10873 Also contains several pseudo-instructions used during relaxation. */
10874 #define T16_32_TAB \
10875 X(_adc, 4140, eb400000), \
10876 X(_adcs, 4140, eb500000), \
10877 X(_add, 1c00, eb000000), \
10878 X(_adds, 1c00, eb100000), \
10879 X(_addi, 0000, f1000000), \
10880 X(_addis, 0000, f1100000), \
10881 X(_add_pc,000f, f20f0000), \
10882 X(_add_sp,000d, f10d0000), \
10883 X(_adr, 000f, f20f0000), \
10884 X(_and, 4000, ea000000), \
10885 X(_ands, 4000, ea100000), \
10886 X(_asr, 1000, fa40f000), \
10887 X(_asrs, 1000, fa50f000), \
10888 X(_b, e000, f000b000), \
10889 X(_bcond, d000, f0008000), \
10890 X(_bf, 0000, f040e001), \
10891 X(_bfcsel,0000, f000e001), \
10892 X(_bfx, 0000, f060e001), \
10893 X(_bfl, 0000, f000c001), \
10894 X(_bflx, 0000, f070e001), \
10895 X(_bic, 4380, ea200000), \
10896 X(_bics, 4380, ea300000), \
10897 X(_cmn, 42c0, eb100f00), \
10898 X(_cmp, 2800, ebb00f00), \
10899 X(_cpsie, b660, f3af8400), \
10900 X(_cpsid, b670, f3af8600), \
10901 X(_cpy, 4600, ea4f0000), \
10902 X(_dec_sp,80dd, f1ad0d00), \
10903 X(_dls, 0000, f040e001), \
10904 X(_eor, 4040, ea800000), \
10905 X(_eors, 4040, ea900000), \
10906 X(_inc_sp,00dd, f10d0d00), \
10907 X(_ldmia, c800, e8900000), \
10908 X(_ldr, 6800, f8500000), \
10909 X(_ldrb, 7800, f8100000), \
10910 X(_ldrh, 8800, f8300000), \
10911 X(_ldrsb, 5600, f9100000), \
10912 X(_ldrsh, 5e00, f9300000), \
10913 X(_ldr_pc,4800, f85f0000), \
10914 X(_ldr_pc2,4800, f85f0000), \
10915 X(_ldr_sp,9800, f85d0000), \
10916 X(_le, 0000, f00fc001), \
10917 X(_lsl, 0000, fa00f000), \
10918 X(_lsls, 0000, fa10f000), \
10919 X(_lsr, 0800, fa20f000), \
10920 X(_lsrs, 0800, fa30f000), \
10921 X(_mov, 2000, ea4f0000), \
10922 X(_movs, 2000, ea5f0000), \
10923 X(_mul, 4340, fb00f000), \
10924 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10925 X(_mvn, 43c0, ea6f0000), \
10926 X(_mvns, 43c0, ea7f0000), \
10927 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10928 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10929 X(_orr, 4300, ea400000), \
10930 X(_orrs, 4300, ea500000), \
10931 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10932 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10933 X(_rev, ba00, fa90f080), \
10934 X(_rev16, ba40, fa90f090), \
10935 X(_revsh, bac0, fa90f0b0), \
10936 X(_ror, 41c0, fa60f000), \
10937 X(_rors, 41c0, fa70f000), \
10938 X(_sbc, 4180, eb600000), \
10939 X(_sbcs, 4180, eb700000), \
10940 X(_stmia, c000, e8800000), \
10941 X(_str, 6000, f8400000), \
10942 X(_strb, 7000, f8000000), \
10943 X(_strh, 8000, f8200000), \
10944 X(_str_sp,9000, f84d0000), \
10945 X(_sub, 1e00, eba00000), \
10946 X(_subs, 1e00, ebb00000), \
10947 X(_subi, 8000, f1a00000), \
10948 X(_subis, 8000, f1b00000), \
10949 X(_sxtb, b240, fa4ff080), \
10950 X(_sxth, b200, fa0ff080), \
10951 X(_tst, 4200, ea100f00), \
10952 X(_uxtb, b2c0, fa5ff080), \
10953 X(_uxth, b280, fa1ff080), \
10954 X(_nop, bf00, f3af8000), \
10955 X(_yield, bf10, f3af8001), \
10956 X(_wfe, bf20, f3af8002), \
10957 X(_wfi, bf30, f3af8003), \
10958 X(_wls, 0000, f040c001), \
10959 X(_sev, bf40, f3af8004), \
10960 X(_sevl, bf50, f3af8005), \
10961 X(_udf, de00, f7f0a000)
10962
10963 /* To catch errors in encoding functions, the codes are all offset by
10964 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10965 as 16-bit instructions. */
10966 #define X(a,b,c) T_MNEM##a
10967 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10968 #undef X
10969
10970 #define X(a,b,c) 0x##b
10971 static const unsigned short thumb_op16[] = { T16_32_TAB };
10972 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10973 #undef X
10974
10975 #define X(a,b,c) 0x##c
10976 static const unsigned int thumb_op32[] = { T16_32_TAB };
10977 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10978 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10979 #undef X
10980 #undef T16_32_TAB
10981
10982 /* Thumb instruction encoders, in alphabetical order. */
10983
10984 /* ADDW or SUBW. */
10985
10986 static void
10987 do_t_add_sub_w (void)
10988 {
10989 int Rd, Rn;
10990
10991 Rd = inst.operands[0].reg;
10992 Rn = inst.operands[1].reg;
10993
10994 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10995 is the SP-{plus,minus}-immediate form of the instruction. */
10996 if (Rn == REG_SP)
10997 constraint (Rd == REG_PC, BAD_PC);
10998 else
10999 reject_bad_reg (Rd);
11000
11001 inst.instruction |= (Rn << 16) | (Rd << 8);
11002 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
11003 }
11004
11005 /* Parse an add or subtract instruction. We get here with inst.instruction
11006 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
11007
11008 static void
11009 do_t_add_sub (void)
11010 {
11011 int Rd, Rs, Rn;
11012
11013 Rd = inst.operands[0].reg;
11014 Rs = (inst.operands[1].present
11015 ? inst.operands[1].reg /* Rd, Rs, foo */
11016 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11017
11018 if (Rd == REG_PC)
11019 set_pred_insn_type_last ();
11020
11021 if (unified_syntax)
11022 {
11023 bfd_boolean flags;
11024 bfd_boolean narrow;
11025 int opcode;
11026
11027 flags = (inst.instruction == T_MNEM_adds
11028 || inst.instruction == T_MNEM_subs);
11029 if (flags)
11030 narrow = !in_pred_block ();
11031 else
11032 narrow = in_pred_block ();
11033 if (!inst.operands[2].isreg)
11034 {
11035 int add;
11036
11037 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11038 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
11039
11040 add = (inst.instruction == T_MNEM_add
11041 || inst.instruction == T_MNEM_adds);
11042 opcode = 0;
11043 if (inst.size_req != 4)
11044 {
11045 /* Attempt to use a narrow opcode, with relaxation if
11046 appropriate. */
11047 if (Rd == REG_SP && Rs == REG_SP && !flags)
11048 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
11049 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
11050 opcode = T_MNEM_add_sp;
11051 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
11052 opcode = T_MNEM_add_pc;
11053 else if (Rd <= 7 && Rs <= 7 && narrow)
11054 {
11055 if (flags)
11056 opcode = add ? T_MNEM_addis : T_MNEM_subis;
11057 else
11058 opcode = add ? T_MNEM_addi : T_MNEM_subi;
11059 }
11060 if (opcode)
11061 {
11062 inst.instruction = THUMB_OP16(opcode);
11063 inst.instruction |= (Rd << 4) | Rs;
11064 if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11065 || (inst.relocs[0].type
11066 > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC))
11067 {
11068 if (inst.size_req == 2)
11069 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11070 else
11071 inst.relax = opcode;
11072 }
11073 }
11074 else
11075 constraint (inst.size_req == 2, BAD_HIREG);
11076 }
11077 if (inst.size_req == 4
11078 || (inst.size_req != 2 && !opcode))
11079 {
11080 constraint ((inst.relocs[0].type
11081 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
11082 && (inst.relocs[0].type
11083 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
11084 THUMB1_RELOC_ONLY);
11085 if (Rd == REG_PC)
11086 {
11087 constraint (add, BAD_PC);
11088 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
11089 _("only SUBS PC, LR, #const allowed"));
11090 constraint (inst.relocs[0].exp.X_op != O_constant,
11091 _("expression too complex"));
11092 constraint (inst.relocs[0].exp.X_add_number < 0
11093 || inst.relocs[0].exp.X_add_number > 0xff,
11094 _("immediate value out of range"));
11095 inst.instruction = T2_SUBS_PC_LR
11096 | inst.relocs[0].exp.X_add_number;
11097 inst.relocs[0].type = BFD_RELOC_UNUSED;
11098 return;
11099 }
11100 else if (Rs == REG_PC)
11101 {
11102 /* Always use addw/subw. */
11103 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
11104 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
11105 }
11106 else
11107 {
11108 inst.instruction = THUMB_OP32 (inst.instruction);
11109 inst.instruction = (inst.instruction & 0xe1ffffff)
11110 | 0x10000000;
11111 if (flags)
11112 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11113 else
11114 inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_IMM;
11115 }
11116 inst.instruction |= Rd << 8;
11117 inst.instruction |= Rs << 16;
11118 }
11119 }
11120 else
11121 {
11122 unsigned int value = inst.relocs[0].exp.X_add_number;
11123 unsigned int shift = inst.operands[2].shift_kind;
11124
11125 Rn = inst.operands[2].reg;
11126 /* See if we can do this with a 16-bit instruction. */
11127 if (!inst.operands[2].shifted && inst.size_req != 4)
11128 {
11129 if (Rd > 7 || Rs > 7 || Rn > 7)
11130 narrow = FALSE;
11131
11132 if (narrow)
11133 {
11134 inst.instruction = ((inst.instruction == T_MNEM_adds
11135 || inst.instruction == T_MNEM_add)
11136 ? T_OPCODE_ADD_R3
11137 : T_OPCODE_SUB_R3);
11138 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
11139 return;
11140 }
11141
11142 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
11143 {
11144 /* Thumb-1 cores (except v6-M) require at least one high
11145 register in a narrow non flag setting add. */
11146 if (Rd > 7 || Rn > 7
11147 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
11148 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
11149 {
11150 if (Rd == Rn)
11151 {
11152 Rn = Rs;
11153 Rs = Rd;
11154 }
11155 inst.instruction = T_OPCODE_ADD_HI;
11156 inst.instruction |= (Rd & 8) << 4;
11157 inst.instruction |= (Rd & 7);
11158 inst.instruction |= Rn << 3;
11159 return;
11160 }
11161 }
11162 }
11163
11164 constraint (Rd == REG_PC, BAD_PC);
11165 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11166 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
11167 constraint (Rs == REG_PC, BAD_PC);
11168 reject_bad_reg (Rn);
11169
11170 /* If we get here, it can't be done in 16 bits. */
11171 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
11172 _("shift must be constant"));
11173 inst.instruction = THUMB_OP32 (inst.instruction);
11174 inst.instruction |= Rd << 8;
11175 inst.instruction |= Rs << 16;
11176 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
11177 _("shift value over 3 not allowed in thumb mode"));
11178 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
11179 _("only LSL shift allowed in thumb mode"));
11180 encode_thumb32_shifted_operand (2);
11181 }
11182 }
11183 else
11184 {
11185 constraint (inst.instruction == T_MNEM_adds
11186 || inst.instruction == T_MNEM_subs,
11187 BAD_THUMB32);
11188
11189 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
11190 {
11191 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
11192 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
11193 BAD_HIREG);
11194
11195 inst.instruction = (inst.instruction == T_MNEM_add
11196 ? 0x0000 : 0x8000);
11197 inst.instruction |= (Rd << 4) | Rs;
11198 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11199 return;
11200 }
11201
11202 Rn = inst.operands[2].reg;
11203 constraint (inst.operands[2].shifted, _("unshifted register required"));
11204
11205 /* We now have Rd, Rs, and Rn set to registers. */
11206 if (Rd > 7 || Rs > 7 || Rn > 7)
11207 {
11208 /* Can't do this for SUB. */
11209 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
11210 inst.instruction = T_OPCODE_ADD_HI;
11211 inst.instruction |= (Rd & 8) << 4;
11212 inst.instruction |= (Rd & 7);
11213 if (Rs == Rd)
11214 inst.instruction |= Rn << 3;
11215 else if (Rn == Rd)
11216 inst.instruction |= Rs << 3;
11217 else
11218 constraint (1, _("dest must overlap one source register"));
11219 }
11220 else
11221 {
11222 inst.instruction = (inst.instruction == T_MNEM_add
11223 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
11224 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
11225 }
11226 }
11227 }
11228
11229 static void
11230 do_t_adr (void)
11231 {
11232 unsigned Rd;
11233
11234 Rd = inst.operands[0].reg;
11235 reject_bad_reg (Rd);
11236
11237 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
11238 {
11239 /* Defer to section relaxation. */
11240 inst.relax = inst.instruction;
11241 inst.instruction = THUMB_OP16 (inst.instruction);
11242 inst.instruction |= Rd << 4;
11243 }
11244 else if (unified_syntax && inst.size_req != 2)
11245 {
11246 /* Generate a 32-bit opcode. */
11247 inst.instruction = THUMB_OP32 (inst.instruction);
11248 inst.instruction |= Rd << 8;
11249 inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_PC12;
11250 inst.relocs[0].pc_rel = 1;
11251 }
11252 else
11253 {
11254 /* Generate a 16-bit opcode. */
11255 inst.instruction = THUMB_OP16 (inst.instruction);
11256 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11257 inst.relocs[0].exp.X_add_number -= 4; /* PC relative adjust. */
11258 inst.relocs[0].pc_rel = 1;
11259 inst.instruction |= Rd << 4;
11260 }
11261
11262 if (inst.relocs[0].exp.X_op == O_symbol
11263 && inst.relocs[0].exp.X_add_symbol != NULL
11264 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
11265 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
11266 inst.relocs[0].exp.X_add_number += 1;
11267 }
11268
11269 /* Arithmetic instructions for which there is just one 16-bit
11270 instruction encoding, and it allows only two low registers.
11271 For maximal compatibility with ARM syntax, we allow three register
11272 operands even when Thumb-32 instructions are not available, as long
11273 as the first two are identical. For instance, both "sbc r0,r1" and
11274 "sbc r0,r0,r1" are allowed. */
11275 static void
11276 do_t_arit3 (void)
11277 {
11278 int Rd, Rs, Rn;
11279
11280 Rd = inst.operands[0].reg;
11281 Rs = (inst.operands[1].present
11282 ? inst.operands[1].reg /* Rd, Rs, foo */
11283 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11284 Rn = inst.operands[2].reg;
11285
11286 reject_bad_reg (Rd);
11287 reject_bad_reg (Rs);
11288 if (inst.operands[2].isreg)
11289 reject_bad_reg (Rn);
11290
11291 if (unified_syntax)
11292 {
11293 if (!inst.operands[2].isreg)
11294 {
11295 /* For an immediate, we always generate a 32-bit opcode;
11296 section relaxation will shrink it later if possible. */
11297 inst.instruction = THUMB_OP32 (inst.instruction);
11298 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11299 inst.instruction |= Rd << 8;
11300 inst.instruction |= Rs << 16;
11301 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11302 }
11303 else
11304 {
11305 bfd_boolean narrow;
11306
11307 /* See if we can do this with a 16-bit instruction. */
11308 if (THUMB_SETS_FLAGS (inst.instruction))
11309 narrow = !in_pred_block ();
11310 else
11311 narrow = in_pred_block ();
11312
11313 if (Rd > 7 || Rn > 7 || Rs > 7)
11314 narrow = FALSE;
11315 if (inst.operands[2].shifted)
11316 narrow = FALSE;
11317 if (inst.size_req == 4)
11318 narrow = FALSE;
11319
11320 if (narrow
11321 && Rd == Rs)
11322 {
11323 inst.instruction = THUMB_OP16 (inst.instruction);
11324 inst.instruction |= Rd;
11325 inst.instruction |= Rn << 3;
11326 return;
11327 }
11328
11329 /* If we get here, it can't be done in 16 bits. */
11330 constraint (inst.operands[2].shifted
11331 && inst.operands[2].immisreg,
11332 _("shift must be constant"));
11333 inst.instruction = THUMB_OP32 (inst.instruction);
11334 inst.instruction |= Rd << 8;
11335 inst.instruction |= Rs << 16;
11336 encode_thumb32_shifted_operand (2);
11337 }
11338 }
11339 else
11340 {
11341 /* On its face this is a lie - the instruction does set the
11342 flags. However, the only supported mnemonic in this mode
11343 says it doesn't. */
11344 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11345
11346 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11347 _("unshifted register required"));
11348 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11349 constraint (Rd != Rs,
11350 _("dest and source1 must be the same register"));
11351
11352 inst.instruction = THUMB_OP16 (inst.instruction);
11353 inst.instruction |= Rd;
11354 inst.instruction |= Rn << 3;
11355 }
11356 }
11357
11358 /* Similarly, but for instructions where the arithmetic operation is
11359 commutative, so we can allow either of them to be different from
11360 the destination operand in a 16-bit instruction. For instance, all
11361 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
11362 accepted. */
11363 static void
11364 do_t_arit3c (void)
11365 {
11366 int Rd, Rs, Rn;
11367
11368 Rd = inst.operands[0].reg;
11369 Rs = (inst.operands[1].present
11370 ? inst.operands[1].reg /* Rd, Rs, foo */
11371 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11372 Rn = inst.operands[2].reg;
11373
11374 reject_bad_reg (Rd);
11375 reject_bad_reg (Rs);
11376 if (inst.operands[2].isreg)
11377 reject_bad_reg (Rn);
11378
11379 if (unified_syntax)
11380 {
11381 if (!inst.operands[2].isreg)
11382 {
11383 /* For an immediate, we always generate a 32-bit opcode;
11384 section relaxation will shrink it later if possible. */
11385 inst.instruction = THUMB_OP32 (inst.instruction);
11386 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11387 inst.instruction |= Rd << 8;
11388 inst.instruction |= Rs << 16;
11389 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11390 }
11391 else
11392 {
11393 bfd_boolean narrow;
11394
11395 /* See if we can do this with a 16-bit instruction. */
11396 if (THUMB_SETS_FLAGS (inst.instruction))
11397 narrow = !in_pred_block ();
11398 else
11399 narrow = in_pred_block ();
11400
11401 if (Rd > 7 || Rn > 7 || Rs > 7)
11402 narrow = FALSE;
11403 if (inst.operands[2].shifted)
11404 narrow = FALSE;
11405 if (inst.size_req == 4)
11406 narrow = FALSE;
11407
11408 if (narrow)
11409 {
11410 if (Rd == Rs)
11411 {
11412 inst.instruction = THUMB_OP16 (inst.instruction);
11413 inst.instruction |= Rd;
11414 inst.instruction |= Rn << 3;
11415 return;
11416 }
11417 if (Rd == Rn)
11418 {
11419 inst.instruction = THUMB_OP16 (inst.instruction);
11420 inst.instruction |= Rd;
11421 inst.instruction |= Rs << 3;
11422 return;
11423 }
11424 }
11425
11426 /* If we get here, it can't be done in 16 bits. */
11427 constraint (inst.operands[2].shifted
11428 && inst.operands[2].immisreg,
11429 _("shift must be constant"));
11430 inst.instruction = THUMB_OP32 (inst.instruction);
11431 inst.instruction |= Rd << 8;
11432 inst.instruction |= Rs << 16;
11433 encode_thumb32_shifted_operand (2);
11434 }
11435 }
11436 else
11437 {
11438 /* On its face this is a lie - the instruction does set the
11439 flags. However, the only supported mnemonic in this mode
11440 says it doesn't. */
11441 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11442
11443 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11444 _("unshifted register required"));
11445 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11446
11447 inst.instruction = THUMB_OP16 (inst.instruction);
11448 inst.instruction |= Rd;
11449
11450 if (Rd == Rs)
11451 inst.instruction |= Rn << 3;
11452 else if (Rd == Rn)
11453 inst.instruction |= Rs << 3;
11454 else
11455 constraint (1, _("dest must overlap one source register"));
11456 }
11457 }
11458
11459 static void
11460 do_t_bfc (void)
11461 {
11462 unsigned Rd;
11463 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
11464 constraint (msb > 32, _("bit-field extends past end of register"));
11465 /* The instruction encoding stores the LSB and MSB,
11466 not the LSB and width. */
11467 Rd = inst.operands[0].reg;
11468 reject_bad_reg (Rd);
11469 inst.instruction |= Rd << 8;
11470 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
11471 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
11472 inst.instruction |= msb - 1;
11473 }
11474
11475 static void
11476 do_t_bfi (void)
11477 {
11478 int Rd, Rn;
11479 unsigned int msb;
11480
11481 Rd = inst.operands[0].reg;
11482 reject_bad_reg (Rd);
11483
11484 /* #0 in second position is alternative syntax for bfc, which is
11485 the same instruction but with REG_PC in the Rm field. */
11486 if (!inst.operands[1].isreg)
11487 Rn = REG_PC;
11488 else
11489 {
11490 Rn = inst.operands[1].reg;
11491 reject_bad_reg (Rn);
11492 }
11493
11494 msb = inst.operands[2].imm + inst.operands[3].imm;
11495 constraint (msb > 32, _("bit-field extends past end of register"));
11496 /* The instruction encoding stores the LSB and MSB,
11497 not the LSB and width. */
11498 inst.instruction |= Rd << 8;
11499 inst.instruction |= Rn << 16;
11500 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11501 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11502 inst.instruction |= msb - 1;
11503 }
11504
11505 static void
11506 do_t_bfx (void)
11507 {
11508 unsigned Rd, Rn;
11509
11510 Rd = inst.operands[0].reg;
11511 Rn = inst.operands[1].reg;
11512
11513 reject_bad_reg (Rd);
11514 reject_bad_reg (Rn);
11515
11516 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
11517 _("bit-field extends past end of register"));
11518 inst.instruction |= Rd << 8;
11519 inst.instruction |= Rn << 16;
11520 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11521 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11522 inst.instruction |= inst.operands[3].imm - 1;
11523 }
11524
11525 /* ARM V5 Thumb BLX (argument parse)
11526 BLX <target_addr> which is BLX(1)
11527 BLX <Rm> which is BLX(2)
11528 Unfortunately, there are two different opcodes for this mnemonic.
11529 So, the insns[].value is not used, and the code here zaps values
11530 into inst.instruction.
11531
11532 ??? How to take advantage of the additional two bits of displacement
11533 available in Thumb32 mode? Need new relocation? */
11534
11535 static void
11536 do_t_blx (void)
11537 {
11538 set_pred_insn_type_last ();
11539
11540 if (inst.operands[0].isreg)
11541 {
11542 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
11543 /* We have a register, so this is BLX(2). */
11544 inst.instruction |= inst.operands[0].reg << 3;
11545 }
11546 else
11547 {
11548 /* No register. This must be BLX(1). */
11549 inst.instruction = 0xf000e800;
11550 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
11551 }
11552 }
11553
11554 static void
11555 do_t_branch (void)
11556 {
11557 int opcode;
11558 int cond;
11559 bfd_reloc_code_real_type reloc;
11560
11561 cond = inst.cond;
11562 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN);
11563
11564 if (in_pred_block ())
11565 {
11566 /* Conditional branches inside IT blocks are encoded as unconditional
11567 branches. */
11568 cond = COND_ALWAYS;
11569 }
11570 else
11571 cond = inst.cond;
11572
11573 if (cond != COND_ALWAYS)
11574 opcode = T_MNEM_bcond;
11575 else
11576 opcode = inst.instruction;
11577
11578 if (unified_syntax
11579 && (inst.size_req == 4
11580 || (inst.size_req != 2
11581 && (inst.operands[0].hasreloc
11582 || inst.relocs[0].exp.X_op == O_constant))))
11583 {
11584 inst.instruction = THUMB_OP32(opcode);
11585 if (cond == COND_ALWAYS)
11586 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
11587 else
11588 {
11589 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
11590 _("selected architecture does not support "
11591 "wide conditional branch instruction"));
11592
11593 gas_assert (cond != 0xF);
11594 inst.instruction |= cond << 22;
11595 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
11596 }
11597 }
11598 else
11599 {
11600 inst.instruction = THUMB_OP16(opcode);
11601 if (cond == COND_ALWAYS)
11602 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
11603 else
11604 {
11605 inst.instruction |= cond << 8;
11606 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
11607 }
11608 /* Allow section relaxation. */
11609 if (unified_syntax && inst.size_req != 2)
11610 inst.relax = opcode;
11611 }
11612 inst.relocs[0].type = reloc;
11613 inst.relocs[0].pc_rel = 1;
11614 }
11615
11616 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11617 between the two is the maximum immediate allowed - which is passed in
11618 RANGE. */
11619 static void
11620 do_t_bkpt_hlt1 (int range)
11621 {
11622 constraint (inst.cond != COND_ALWAYS,
11623 _("instruction is always unconditional"));
11624 if (inst.operands[0].present)
11625 {
11626 constraint (inst.operands[0].imm > range,
11627 _("immediate value out of range"));
11628 inst.instruction |= inst.operands[0].imm;
11629 }
11630
11631 set_pred_insn_type (NEUTRAL_IT_INSN);
11632 }
11633
11634 static void
11635 do_t_hlt (void)
11636 {
11637 do_t_bkpt_hlt1 (63);
11638 }
11639
11640 static void
11641 do_t_bkpt (void)
11642 {
11643 do_t_bkpt_hlt1 (255);
11644 }
11645
11646 static void
11647 do_t_branch23 (void)
11648 {
11649 set_pred_insn_type_last ();
11650 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
11651
11652 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11653 this file. We used to simply ignore the PLT reloc type here --
11654 the branch encoding is now needed to deal with TLSCALL relocs.
11655 So if we see a PLT reloc now, put it back to how it used to be to
11656 keep the preexisting behaviour. */
11657 if (inst.relocs[0].type == BFD_RELOC_ARM_PLT32)
11658 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH23;
11659
11660 #if defined(OBJ_COFF)
11661 /* If the destination of the branch is a defined symbol which does not have
11662 the THUMB_FUNC attribute, then we must be calling a function which has
11663 the (interfacearm) attribute. We look for the Thumb entry point to that
11664 function and change the branch to refer to that function instead. */
11665 if ( inst.relocs[0].exp.X_op == O_symbol
11666 && inst.relocs[0].exp.X_add_symbol != NULL
11667 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
11668 && ! THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
11669 inst.relocs[0].exp.X_add_symbol
11670 = find_real_start (inst.relocs[0].exp.X_add_symbol);
11671 #endif
11672 }
11673
11674 static void
11675 do_t_bx (void)
11676 {
11677 set_pred_insn_type_last ();
11678 inst.instruction |= inst.operands[0].reg << 3;
11679 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11680 should cause the alignment to be checked once it is known. This is
11681 because BX PC only works if the instruction is word aligned. */
11682 }
11683
11684 static void
11685 do_t_bxj (void)
11686 {
11687 int Rm;
11688
11689 set_pred_insn_type_last ();
11690 Rm = inst.operands[0].reg;
11691 reject_bad_reg (Rm);
11692 inst.instruction |= Rm << 16;
11693 }
11694
11695 static void
11696 do_t_clz (void)
11697 {
11698 unsigned Rd;
11699 unsigned Rm;
11700
11701 Rd = inst.operands[0].reg;
11702 Rm = inst.operands[1].reg;
11703
11704 reject_bad_reg (Rd);
11705 reject_bad_reg (Rm);
11706
11707 inst.instruction |= Rd << 8;
11708 inst.instruction |= Rm << 16;
11709 inst.instruction |= Rm;
11710 }
11711
11712 static void
11713 do_t_csdb (void)
11714 {
11715 set_pred_insn_type (OUTSIDE_PRED_INSN);
11716 }
11717
11718 static void
11719 do_t_cps (void)
11720 {
11721 set_pred_insn_type (OUTSIDE_PRED_INSN);
11722 inst.instruction |= inst.operands[0].imm;
11723 }
11724
11725 static void
11726 do_t_cpsi (void)
11727 {
11728 set_pred_insn_type (OUTSIDE_PRED_INSN);
11729 if (unified_syntax
11730 && (inst.operands[1].present || inst.size_req == 4)
11731 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11732 {
11733 unsigned int imod = (inst.instruction & 0x0030) >> 4;
11734 inst.instruction = 0xf3af8000;
11735 inst.instruction |= imod << 9;
11736 inst.instruction |= inst.operands[0].imm << 5;
11737 if (inst.operands[1].present)
11738 inst.instruction |= 0x100 | inst.operands[1].imm;
11739 }
11740 else
11741 {
11742 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11743 && (inst.operands[0].imm & 4),
11744 _("selected processor does not support 'A' form "
11745 "of this instruction"));
11746 constraint (inst.operands[1].present || inst.size_req == 4,
11747 _("Thumb does not support the 2-argument "
11748 "form of this instruction"));
11749 inst.instruction |= inst.operands[0].imm;
11750 }
11751 }
11752
11753 /* THUMB CPY instruction (argument parse). */
11754
11755 static void
11756 do_t_cpy (void)
11757 {
11758 if (inst.size_req == 4)
11759 {
11760 inst.instruction = THUMB_OP32 (T_MNEM_mov);
11761 inst.instruction |= inst.operands[0].reg << 8;
11762 inst.instruction |= inst.operands[1].reg;
11763 }
11764 else
11765 {
11766 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11767 inst.instruction |= (inst.operands[0].reg & 0x7);
11768 inst.instruction |= inst.operands[1].reg << 3;
11769 }
11770 }
11771
11772 static void
11773 do_t_cbz (void)
11774 {
11775 set_pred_insn_type (OUTSIDE_PRED_INSN);
11776 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11777 inst.instruction |= inst.operands[0].reg;
11778 inst.relocs[0].pc_rel = 1;
11779 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11780 }
11781
11782 static void
11783 do_t_dbg (void)
11784 {
11785 inst.instruction |= inst.operands[0].imm;
11786 }
11787
11788 static void
11789 do_t_div (void)
11790 {
11791 unsigned Rd, Rn, Rm;
11792
11793 Rd = inst.operands[0].reg;
11794 Rn = (inst.operands[1].present
11795 ? inst.operands[1].reg : Rd);
11796 Rm = inst.operands[2].reg;
11797
11798 reject_bad_reg (Rd);
11799 reject_bad_reg (Rn);
11800 reject_bad_reg (Rm);
11801
11802 inst.instruction |= Rd << 8;
11803 inst.instruction |= Rn << 16;
11804 inst.instruction |= Rm;
11805 }
11806
11807 static void
11808 do_t_hint (void)
11809 {
11810 if (unified_syntax && inst.size_req == 4)
11811 inst.instruction = THUMB_OP32 (inst.instruction);
11812 else
11813 inst.instruction = THUMB_OP16 (inst.instruction);
11814 }
11815
11816 static void
11817 do_t_it (void)
11818 {
11819 unsigned int cond = inst.operands[0].imm;
11820
11821 set_pred_insn_type (IT_INSN);
11822 now_pred.mask = (inst.instruction & 0xf) | 0x10;
11823 now_pred.cc = cond;
11824 now_pred.warn_deprecated = FALSE;
11825 now_pred.type = SCALAR_PRED;
11826
11827 /* If the condition is a negative condition, invert the mask. */
11828 if ((cond & 0x1) == 0x0)
11829 {
11830 unsigned int mask = inst.instruction & 0x000f;
11831
11832 if ((mask & 0x7) == 0)
11833 {
11834 /* No conversion needed. */
11835 now_pred.block_length = 1;
11836 }
11837 else if ((mask & 0x3) == 0)
11838 {
11839 mask ^= 0x8;
11840 now_pred.block_length = 2;
11841 }
11842 else if ((mask & 0x1) == 0)
11843 {
11844 mask ^= 0xC;
11845 now_pred.block_length = 3;
11846 }
11847 else
11848 {
11849 mask ^= 0xE;
11850 now_pred.block_length = 4;
11851 }
11852
11853 inst.instruction &= 0xfff0;
11854 inst.instruction |= mask;
11855 }
11856
11857 inst.instruction |= cond << 4;
11858 }
11859
11860 static void
11861 do_mve_vpt (void)
11862 {
11863 /* We are dealing with a vector predicated block. */
11864 set_pred_insn_type (VPT_INSN);
11865 now_pred.cc = 0;
11866 now_pred.mask = ((inst.instruction & 0x00400000) >> 19)
11867 | ((inst.instruction & 0xe000) >> 13);
11868 now_pred.warn_deprecated = FALSE;
11869 now_pred.type = VECTOR_PRED;
11870 }
11871
11872 /* Helper function used for both push/pop and ldm/stm. */
11873 static void
11874 encode_thumb2_multi (bfd_boolean do_io, int base, unsigned mask,
11875 bfd_boolean writeback)
11876 {
11877 bfd_boolean load, store;
11878
11879 gas_assert (base != -1 || !do_io);
11880 load = do_io && ((inst.instruction & (1 << 20)) != 0);
11881 store = do_io && !load;
11882
11883 if (mask & (1 << 13))
11884 inst.error = _("SP not allowed in register list");
11885
11886 if (do_io && (mask & (1 << base)) != 0
11887 && writeback)
11888 inst.error = _("having the base register in the register list when "
11889 "using write back is UNPREDICTABLE");
11890
11891 if (load)
11892 {
11893 if (mask & (1 << 15))
11894 {
11895 if (mask & (1 << 14))
11896 inst.error = _("LR and PC should not both be in register list");
11897 else
11898 set_pred_insn_type_last ();
11899 }
11900 }
11901 else if (store)
11902 {
11903 if (mask & (1 << 15))
11904 inst.error = _("PC not allowed in register list");
11905 }
11906
11907 if (do_io && ((mask & (mask - 1)) == 0))
11908 {
11909 /* Single register transfers implemented as str/ldr. */
11910 if (writeback)
11911 {
11912 if (inst.instruction & (1 << 23))
11913 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11914 else
11915 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11916 }
11917 else
11918 {
11919 if (inst.instruction & (1 << 23))
11920 inst.instruction = 0x00800000; /* ia -> [base] */
11921 else
11922 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11923 }
11924
11925 inst.instruction |= 0xf8400000;
11926 if (load)
11927 inst.instruction |= 0x00100000;
11928
11929 mask = ffs (mask) - 1;
11930 mask <<= 12;
11931 }
11932 else if (writeback)
11933 inst.instruction |= WRITE_BACK;
11934
11935 inst.instruction |= mask;
11936 if (do_io)
11937 inst.instruction |= base << 16;
11938 }
11939
11940 static void
11941 do_t_ldmstm (void)
11942 {
11943 /* This really doesn't seem worth it. */
11944 constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
11945 _("expression too complex"));
11946 constraint (inst.operands[1].writeback,
11947 _("Thumb load/store multiple does not support {reglist}^"));
11948
11949 if (unified_syntax)
11950 {
11951 bfd_boolean narrow;
11952 unsigned mask;
11953
11954 narrow = FALSE;
11955 /* See if we can use a 16-bit instruction. */
11956 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11957 && inst.size_req != 4
11958 && !(inst.operands[1].imm & ~0xff))
11959 {
11960 mask = 1 << inst.operands[0].reg;
11961
11962 if (inst.operands[0].reg <= 7)
11963 {
11964 if (inst.instruction == T_MNEM_stmia
11965 ? inst.operands[0].writeback
11966 : (inst.operands[0].writeback
11967 == !(inst.operands[1].imm & mask)))
11968 {
11969 if (inst.instruction == T_MNEM_stmia
11970 && (inst.operands[1].imm & mask)
11971 && (inst.operands[1].imm & (mask - 1)))
11972 as_warn (_("value stored for r%d is UNKNOWN"),
11973 inst.operands[0].reg);
11974
11975 inst.instruction = THUMB_OP16 (inst.instruction);
11976 inst.instruction |= inst.operands[0].reg << 8;
11977 inst.instruction |= inst.operands[1].imm;
11978 narrow = TRUE;
11979 }
11980 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11981 {
11982 /* This means 1 register in reg list one of 3 situations:
11983 1. Instruction is stmia, but without writeback.
11984 2. lmdia without writeback, but with Rn not in
11985 reglist.
11986 3. ldmia with writeback, but with Rn in reglist.
11987 Case 3 is UNPREDICTABLE behaviour, so we handle
11988 case 1 and 2 which can be converted into a 16-bit
11989 str or ldr. The SP cases are handled below. */
11990 unsigned long opcode;
11991 /* First, record an error for Case 3. */
11992 if (inst.operands[1].imm & mask
11993 && inst.operands[0].writeback)
11994 inst.error =
11995 _("having the base register in the register list when "
11996 "using write back is UNPREDICTABLE");
11997
11998 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11999 : T_MNEM_ldr);
12000 inst.instruction = THUMB_OP16 (opcode);
12001 inst.instruction |= inst.operands[0].reg << 3;
12002 inst.instruction |= (ffs (inst.operands[1].imm)-1);
12003 narrow = TRUE;
12004 }
12005 }
12006 else if (inst.operands[0] .reg == REG_SP)
12007 {
12008 if (inst.operands[0].writeback)
12009 {
12010 inst.instruction =
12011 THUMB_OP16 (inst.instruction == T_MNEM_stmia
12012 ? T_MNEM_push : T_MNEM_pop);
12013 inst.instruction |= inst.operands[1].imm;
12014 narrow = TRUE;
12015 }
12016 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
12017 {
12018 inst.instruction =
12019 THUMB_OP16 (inst.instruction == T_MNEM_stmia
12020 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
12021 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
12022 narrow = TRUE;
12023 }
12024 }
12025 }
12026
12027 if (!narrow)
12028 {
12029 if (inst.instruction < 0xffff)
12030 inst.instruction = THUMB_OP32 (inst.instruction);
12031
12032 encode_thumb2_multi (TRUE /* do_io */, inst.operands[0].reg,
12033 inst.operands[1].imm,
12034 inst.operands[0].writeback);
12035 }
12036 }
12037 else
12038 {
12039 constraint (inst.operands[0].reg > 7
12040 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
12041 constraint (inst.instruction != T_MNEM_ldmia
12042 && inst.instruction != T_MNEM_stmia,
12043 _("Thumb-2 instruction only valid in unified syntax"));
12044 if (inst.instruction == T_MNEM_stmia)
12045 {
12046 if (!inst.operands[0].writeback)
12047 as_warn (_("this instruction will write back the base register"));
12048 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
12049 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
12050 as_warn (_("value stored for r%d is UNKNOWN"),
12051 inst.operands[0].reg);
12052 }
12053 else
12054 {
12055 if (!inst.operands[0].writeback
12056 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
12057 as_warn (_("this instruction will write back the base register"));
12058 else if (inst.operands[0].writeback
12059 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
12060 as_warn (_("this instruction will not write back the base register"));
12061 }
12062
12063 inst.instruction = THUMB_OP16 (inst.instruction);
12064 inst.instruction |= inst.operands[0].reg << 8;
12065 inst.instruction |= inst.operands[1].imm;
12066 }
12067 }
12068
12069 static void
12070 do_t_ldrex (void)
12071 {
12072 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
12073 || inst.operands[1].postind || inst.operands[1].writeback
12074 || inst.operands[1].immisreg || inst.operands[1].shifted
12075 || inst.operands[1].negative,
12076 BAD_ADDR_MODE);
12077
12078 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
12079
12080 inst.instruction |= inst.operands[0].reg << 12;
12081 inst.instruction |= inst.operands[1].reg << 16;
12082 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
12083 }
12084
12085 static void
12086 do_t_ldrexd (void)
12087 {
12088 if (!inst.operands[1].present)
12089 {
12090 constraint (inst.operands[0].reg == REG_LR,
12091 _("r14 not allowed as first register "
12092 "when second register is omitted"));
12093 inst.operands[1].reg = inst.operands[0].reg + 1;
12094 }
12095 constraint (inst.operands[0].reg == inst.operands[1].reg,
12096 BAD_OVERLAP);
12097
12098 inst.instruction |= inst.operands[0].reg << 12;
12099 inst.instruction |= inst.operands[1].reg << 8;
12100 inst.instruction |= inst.operands[2].reg << 16;
12101 }
12102
12103 static void
12104 do_t_ldst (void)
12105 {
12106 unsigned long opcode;
12107 int Rn;
12108
12109 if (inst.operands[0].isreg
12110 && !inst.operands[0].preind
12111 && inst.operands[0].reg == REG_PC)
12112 set_pred_insn_type_last ();
12113
12114 opcode = inst.instruction;
12115 if (unified_syntax)
12116 {
12117 if (!inst.operands[1].isreg)
12118 {
12119 if (opcode <= 0xffff)
12120 inst.instruction = THUMB_OP32 (opcode);
12121 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
12122 return;
12123 }
12124 if (inst.operands[1].isreg
12125 && !inst.operands[1].writeback
12126 && !inst.operands[1].shifted && !inst.operands[1].postind
12127 && !inst.operands[1].negative && inst.operands[0].reg <= 7
12128 && opcode <= 0xffff
12129 && inst.size_req != 4)
12130 {
12131 /* Insn may have a 16-bit form. */
12132 Rn = inst.operands[1].reg;
12133 if (inst.operands[1].immisreg)
12134 {
12135 inst.instruction = THUMB_OP16 (opcode);
12136 /* [Rn, Rik] */
12137 if (Rn <= 7 && inst.operands[1].imm <= 7)
12138 goto op16;
12139 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
12140 reject_bad_reg (inst.operands[1].imm);
12141 }
12142 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
12143 && opcode != T_MNEM_ldrsb)
12144 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
12145 || (Rn == REG_SP && opcode == T_MNEM_str))
12146 {
12147 /* [Rn, #const] */
12148 if (Rn > 7)
12149 {
12150 if (Rn == REG_PC)
12151 {
12152 if (inst.relocs[0].pc_rel)
12153 opcode = T_MNEM_ldr_pc2;
12154 else
12155 opcode = T_MNEM_ldr_pc;
12156 }
12157 else
12158 {
12159 if (opcode == T_MNEM_ldr)
12160 opcode = T_MNEM_ldr_sp;
12161 else
12162 opcode = T_MNEM_str_sp;
12163 }
12164 inst.instruction = inst.operands[0].reg << 8;
12165 }
12166 else
12167 {
12168 inst.instruction = inst.operands[0].reg;
12169 inst.instruction |= inst.operands[1].reg << 3;
12170 }
12171 inst.instruction |= THUMB_OP16 (opcode);
12172 if (inst.size_req == 2)
12173 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12174 else
12175 inst.relax = opcode;
12176 return;
12177 }
12178 }
12179 /* Definitely a 32-bit variant. */
12180
12181 /* Warning for Erratum 752419. */
12182 if (opcode == T_MNEM_ldr
12183 && inst.operands[0].reg == REG_SP
12184 && inst.operands[1].writeback == 1
12185 && !inst.operands[1].immisreg)
12186 {
12187 if (no_cpu_selected ()
12188 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
12189 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
12190 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
12191 as_warn (_("This instruction may be unpredictable "
12192 "if executed on M-profile cores "
12193 "with interrupts enabled."));
12194 }
12195
12196 /* Do some validations regarding addressing modes. */
12197 if (inst.operands[1].immisreg)
12198 reject_bad_reg (inst.operands[1].imm);
12199
12200 constraint (inst.operands[1].writeback == 1
12201 && inst.operands[0].reg == inst.operands[1].reg,
12202 BAD_OVERLAP);
12203
12204 inst.instruction = THUMB_OP32 (opcode);
12205 inst.instruction |= inst.operands[0].reg << 12;
12206 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
12207 check_ldr_r15_aligned ();
12208 return;
12209 }
12210
12211 constraint (inst.operands[0].reg > 7, BAD_HIREG);
12212
12213 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
12214 {
12215 /* Only [Rn,Rm] is acceptable. */
12216 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
12217 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
12218 || inst.operands[1].postind || inst.operands[1].shifted
12219 || inst.operands[1].negative,
12220 _("Thumb does not support this addressing mode"));
12221 inst.instruction = THUMB_OP16 (inst.instruction);
12222 goto op16;
12223 }
12224
12225 inst.instruction = THUMB_OP16 (inst.instruction);
12226 if (!inst.operands[1].isreg)
12227 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
12228 return;
12229
12230 constraint (!inst.operands[1].preind
12231 || inst.operands[1].shifted
12232 || inst.operands[1].writeback,
12233 _("Thumb does not support this addressing mode"));
12234 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
12235 {
12236 constraint (inst.instruction & 0x0600,
12237 _("byte or halfword not valid for base register"));
12238 constraint (inst.operands[1].reg == REG_PC
12239 && !(inst.instruction & THUMB_LOAD_BIT),
12240 _("r15 based store not allowed"));
12241 constraint (inst.operands[1].immisreg,
12242 _("invalid base register for register offset"));
12243
12244 if (inst.operands[1].reg == REG_PC)
12245 inst.instruction = T_OPCODE_LDR_PC;
12246 else if (inst.instruction & THUMB_LOAD_BIT)
12247 inst.instruction = T_OPCODE_LDR_SP;
12248 else
12249 inst.instruction = T_OPCODE_STR_SP;
12250
12251 inst.instruction |= inst.operands[0].reg << 8;
12252 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12253 return;
12254 }
12255
12256 constraint (inst.operands[1].reg > 7, BAD_HIREG);
12257 if (!inst.operands[1].immisreg)
12258 {
12259 /* Immediate offset. */
12260 inst.instruction |= inst.operands[0].reg;
12261 inst.instruction |= inst.operands[1].reg << 3;
12262 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12263 return;
12264 }
12265
12266 /* Register offset. */
12267 constraint (inst.operands[1].imm > 7, BAD_HIREG);
12268 constraint (inst.operands[1].negative,
12269 _("Thumb does not support this addressing mode"));
12270
12271 op16:
12272 switch (inst.instruction)
12273 {
12274 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
12275 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
12276 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
12277 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
12278 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
12279 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
12280 case 0x5600 /* ldrsb */:
12281 case 0x5e00 /* ldrsh */: break;
12282 default: abort ();
12283 }
12284
12285 inst.instruction |= inst.operands[0].reg;
12286 inst.instruction |= inst.operands[1].reg << 3;
12287 inst.instruction |= inst.operands[1].imm << 6;
12288 }
12289
12290 static void
12291 do_t_ldstd (void)
12292 {
12293 if (!inst.operands[1].present)
12294 {
12295 inst.operands[1].reg = inst.operands[0].reg + 1;
12296 constraint (inst.operands[0].reg == REG_LR,
12297 _("r14 not allowed here"));
12298 constraint (inst.operands[0].reg == REG_R12,
12299 _("r12 not allowed here"));
12300 }
12301
12302 if (inst.operands[2].writeback
12303 && (inst.operands[0].reg == inst.operands[2].reg
12304 || inst.operands[1].reg == inst.operands[2].reg))
12305 as_warn (_("base register written back, and overlaps "
12306 "one of transfer registers"));
12307
12308 inst.instruction |= inst.operands[0].reg << 12;
12309 inst.instruction |= inst.operands[1].reg << 8;
12310 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
12311 }
12312
12313 static void
12314 do_t_ldstt (void)
12315 {
12316 inst.instruction |= inst.operands[0].reg << 12;
12317 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
12318 }
12319
12320 static void
12321 do_t_mla (void)
12322 {
12323 unsigned Rd, Rn, Rm, Ra;
12324
12325 Rd = inst.operands[0].reg;
12326 Rn = inst.operands[1].reg;
12327 Rm = inst.operands[2].reg;
12328 Ra = inst.operands[3].reg;
12329
12330 reject_bad_reg (Rd);
12331 reject_bad_reg (Rn);
12332 reject_bad_reg (Rm);
12333 reject_bad_reg (Ra);
12334
12335 inst.instruction |= Rd << 8;
12336 inst.instruction |= Rn << 16;
12337 inst.instruction |= Rm;
12338 inst.instruction |= Ra << 12;
12339 }
12340
12341 static void
12342 do_t_mlal (void)
12343 {
12344 unsigned RdLo, RdHi, Rn, Rm;
12345
12346 RdLo = inst.operands[0].reg;
12347 RdHi = inst.operands[1].reg;
12348 Rn = inst.operands[2].reg;
12349 Rm = inst.operands[3].reg;
12350
12351 reject_bad_reg (RdLo);
12352 reject_bad_reg (RdHi);
12353 reject_bad_reg (Rn);
12354 reject_bad_reg (Rm);
12355
12356 inst.instruction |= RdLo << 12;
12357 inst.instruction |= RdHi << 8;
12358 inst.instruction |= Rn << 16;
12359 inst.instruction |= Rm;
12360 }
12361
12362 static void
12363 do_t_mov_cmp (void)
12364 {
12365 unsigned Rn, Rm;
12366
12367 Rn = inst.operands[0].reg;
12368 Rm = inst.operands[1].reg;
12369
12370 if (Rn == REG_PC)
12371 set_pred_insn_type_last ();
12372
12373 if (unified_syntax)
12374 {
12375 int r0off = (inst.instruction == T_MNEM_mov
12376 || inst.instruction == T_MNEM_movs) ? 8 : 16;
12377 unsigned long opcode;
12378 bfd_boolean narrow;
12379 bfd_boolean low_regs;
12380
12381 low_regs = (Rn <= 7 && Rm <= 7);
12382 opcode = inst.instruction;
12383 if (in_pred_block ())
12384 narrow = opcode != T_MNEM_movs;
12385 else
12386 narrow = opcode != T_MNEM_movs || low_regs;
12387 if (inst.size_req == 4
12388 || inst.operands[1].shifted)
12389 narrow = FALSE;
12390
12391 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
12392 if (opcode == T_MNEM_movs && inst.operands[1].isreg
12393 && !inst.operands[1].shifted
12394 && Rn == REG_PC
12395 && Rm == REG_LR)
12396 {
12397 inst.instruction = T2_SUBS_PC_LR;
12398 return;
12399 }
12400
12401 if (opcode == T_MNEM_cmp)
12402 {
12403 constraint (Rn == REG_PC, BAD_PC);
12404 if (narrow)
12405 {
12406 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
12407 but valid. */
12408 warn_deprecated_sp (Rm);
12409 /* R15 was documented as a valid choice for Rm in ARMv6,
12410 but as UNPREDICTABLE in ARMv7. ARM's proprietary
12411 tools reject R15, so we do too. */
12412 constraint (Rm == REG_PC, BAD_PC);
12413 }
12414 else
12415 reject_bad_reg (Rm);
12416 }
12417 else if (opcode == T_MNEM_mov
12418 || opcode == T_MNEM_movs)
12419 {
12420 if (inst.operands[1].isreg)
12421 {
12422 if (opcode == T_MNEM_movs)
12423 {
12424 reject_bad_reg (Rn);
12425 reject_bad_reg (Rm);
12426 }
12427 else if (narrow)
12428 {
12429 /* This is mov.n. */
12430 if ((Rn == REG_SP || Rn == REG_PC)
12431 && (Rm == REG_SP || Rm == REG_PC))
12432 {
12433 as_tsktsk (_("Use of r%u as a source register is "
12434 "deprecated when r%u is the destination "
12435 "register."), Rm, Rn);
12436 }
12437 }
12438 else
12439 {
12440 /* This is mov.w. */
12441 constraint (Rn == REG_PC, BAD_PC);
12442 constraint (Rm == REG_PC, BAD_PC);
12443 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12444 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
12445 }
12446 }
12447 else
12448 reject_bad_reg (Rn);
12449 }
12450
12451 if (!inst.operands[1].isreg)
12452 {
12453 /* Immediate operand. */
12454 if (!in_pred_block () && opcode == T_MNEM_mov)
12455 narrow = 0;
12456 if (low_regs && narrow)
12457 {
12458 inst.instruction = THUMB_OP16 (opcode);
12459 inst.instruction |= Rn << 8;
12460 if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12461 || inst.relocs[0].type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
12462 {
12463 if (inst.size_req == 2)
12464 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
12465 else
12466 inst.relax = opcode;
12467 }
12468 }
12469 else
12470 {
12471 constraint ((inst.relocs[0].type
12472 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
12473 && (inst.relocs[0].type
12474 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
12475 THUMB1_RELOC_ONLY);
12476
12477 inst.instruction = THUMB_OP32 (inst.instruction);
12478 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12479 inst.instruction |= Rn << r0off;
12480 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
12481 }
12482 }
12483 else if (inst.operands[1].shifted && inst.operands[1].immisreg
12484 && (inst.instruction == T_MNEM_mov
12485 || inst.instruction == T_MNEM_movs))
12486 {
12487 /* Register shifts are encoded as separate shift instructions. */
12488 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
12489
12490 if (in_pred_block ())
12491 narrow = !flags;
12492 else
12493 narrow = flags;
12494
12495 if (inst.size_req == 4)
12496 narrow = FALSE;
12497
12498 if (!low_regs || inst.operands[1].imm > 7)
12499 narrow = FALSE;
12500
12501 if (Rn != Rm)
12502 narrow = FALSE;
12503
12504 switch (inst.operands[1].shift_kind)
12505 {
12506 case SHIFT_LSL:
12507 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
12508 break;
12509 case SHIFT_ASR:
12510 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
12511 break;
12512 case SHIFT_LSR:
12513 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
12514 break;
12515 case SHIFT_ROR:
12516 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
12517 break;
12518 default:
12519 abort ();
12520 }
12521
12522 inst.instruction = opcode;
12523 if (narrow)
12524 {
12525 inst.instruction |= Rn;
12526 inst.instruction |= inst.operands[1].imm << 3;
12527 }
12528 else
12529 {
12530 if (flags)
12531 inst.instruction |= CONDS_BIT;
12532
12533 inst.instruction |= Rn << 8;
12534 inst.instruction |= Rm << 16;
12535 inst.instruction |= inst.operands[1].imm;
12536 }
12537 }
12538 else if (!narrow)
12539 {
12540 /* Some mov with immediate shift have narrow variants.
12541 Register shifts are handled above. */
12542 if (low_regs && inst.operands[1].shifted
12543 && (inst.instruction == T_MNEM_mov
12544 || inst.instruction == T_MNEM_movs))
12545 {
12546 if (in_pred_block ())
12547 narrow = (inst.instruction == T_MNEM_mov);
12548 else
12549 narrow = (inst.instruction == T_MNEM_movs);
12550 }
12551
12552 if (narrow)
12553 {
12554 switch (inst.operands[1].shift_kind)
12555 {
12556 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12557 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12558 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12559 default: narrow = FALSE; break;
12560 }
12561 }
12562
12563 if (narrow)
12564 {
12565 inst.instruction |= Rn;
12566 inst.instruction |= Rm << 3;
12567 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
12568 }
12569 else
12570 {
12571 inst.instruction = THUMB_OP32 (inst.instruction);
12572 inst.instruction |= Rn << r0off;
12573 encode_thumb32_shifted_operand (1);
12574 }
12575 }
12576 else
12577 switch (inst.instruction)
12578 {
12579 case T_MNEM_mov:
12580 /* In v4t or v5t a move of two lowregs produces unpredictable
12581 results. Don't allow this. */
12582 if (low_regs)
12583 {
12584 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
12585 "MOV Rd, Rs with two low registers is not "
12586 "permitted on this architecture");
12587 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12588 arm_ext_v6);
12589 }
12590
12591 inst.instruction = T_OPCODE_MOV_HR;
12592 inst.instruction |= (Rn & 0x8) << 4;
12593 inst.instruction |= (Rn & 0x7);
12594 inst.instruction |= Rm << 3;
12595 break;
12596
12597 case T_MNEM_movs:
12598 /* We know we have low registers at this point.
12599 Generate LSLS Rd, Rs, #0. */
12600 inst.instruction = T_OPCODE_LSL_I;
12601 inst.instruction |= Rn;
12602 inst.instruction |= Rm << 3;
12603 break;
12604
12605 case T_MNEM_cmp:
12606 if (low_regs)
12607 {
12608 inst.instruction = T_OPCODE_CMP_LR;
12609 inst.instruction |= Rn;
12610 inst.instruction |= Rm << 3;
12611 }
12612 else
12613 {
12614 inst.instruction = T_OPCODE_CMP_HR;
12615 inst.instruction |= (Rn & 0x8) << 4;
12616 inst.instruction |= (Rn & 0x7);
12617 inst.instruction |= Rm << 3;
12618 }
12619 break;
12620 }
12621 return;
12622 }
12623
12624 inst.instruction = THUMB_OP16 (inst.instruction);
12625
12626 /* PR 10443: Do not silently ignore shifted operands. */
12627 constraint (inst.operands[1].shifted,
12628 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12629
12630 if (inst.operands[1].isreg)
12631 {
12632 if (Rn < 8 && Rm < 8)
12633 {
12634 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12635 since a MOV instruction produces unpredictable results. */
12636 if (inst.instruction == T_OPCODE_MOV_I8)
12637 inst.instruction = T_OPCODE_ADD_I3;
12638 else
12639 inst.instruction = T_OPCODE_CMP_LR;
12640
12641 inst.instruction |= Rn;
12642 inst.instruction |= Rm << 3;
12643 }
12644 else
12645 {
12646 if (inst.instruction == T_OPCODE_MOV_I8)
12647 inst.instruction = T_OPCODE_MOV_HR;
12648 else
12649 inst.instruction = T_OPCODE_CMP_HR;
12650 do_t_cpy ();
12651 }
12652 }
12653 else
12654 {
12655 constraint (Rn > 7,
12656 _("only lo regs allowed with immediate"));
12657 inst.instruction |= Rn << 8;
12658 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
12659 }
12660 }
12661
12662 static void
12663 do_t_mov16 (void)
12664 {
12665 unsigned Rd;
12666 bfd_vma imm;
12667 bfd_boolean top;
12668
12669 top = (inst.instruction & 0x00800000) != 0;
12670 if (inst.relocs[0].type == BFD_RELOC_ARM_MOVW)
12671 {
12672 constraint (top, _(":lower16: not allowed in this instruction"));
12673 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVW;
12674 }
12675 else if (inst.relocs[0].type == BFD_RELOC_ARM_MOVT)
12676 {
12677 constraint (!top, _(":upper16: not allowed in this instruction"));
12678 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVT;
12679 }
12680
12681 Rd = inst.operands[0].reg;
12682 reject_bad_reg (Rd);
12683
12684 inst.instruction |= Rd << 8;
12685 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
12686 {
12687 imm = inst.relocs[0].exp.X_add_number;
12688 inst.instruction |= (imm & 0xf000) << 4;
12689 inst.instruction |= (imm & 0x0800) << 15;
12690 inst.instruction |= (imm & 0x0700) << 4;
12691 inst.instruction |= (imm & 0x00ff);
12692 }
12693 }
12694
12695 static void
12696 do_t_mvn_tst (void)
12697 {
12698 unsigned Rn, Rm;
12699
12700 Rn = inst.operands[0].reg;
12701 Rm = inst.operands[1].reg;
12702
12703 if (inst.instruction == T_MNEM_cmp
12704 || inst.instruction == T_MNEM_cmn)
12705 constraint (Rn == REG_PC, BAD_PC);
12706 else
12707 reject_bad_reg (Rn);
12708 reject_bad_reg (Rm);
12709
12710 if (unified_syntax)
12711 {
12712 int r0off = (inst.instruction == T_MNEM_mvn
12713 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
12714 bfd_boolean narrow;
12715
12716 if (inst.size_req == 4
12717 || inst.instruction > 0xffff
12718 || inst.operands[1].shifted
12719 || Rn > 7 || Rm > 7)
12720 narrow = FALSE;
12721 else if (inst.instruction == T_MNEM_cmn
12722 || inst.instruction == T_MNEM_tst)
12723 narrow = TRUE;
12724 else if (THUMB_SETS_FLAGS (inst.instruction))
12725 narrow = !in_pred_block ();
12726 else
12727 narrow = in_pred_block ();
12728
12729 if (!inst.operands[1].isreg)
12730 {
12731 /* For an immediate, we always generate a 32-bit opcode;
12732 section relaxation will shrink it later if possible. */
12733 if (inst.instruction < 0xffff)
12734 inst.instruction = THUMB_OP32 (inst.instruction);
12735 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12736 inst.instruction |= Rn << r0off;
12737 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
12738 }
12739 else
12740 {
12741 /* See if we can do this with a 16-bit instruction. */
12742 if (narrow)
12743 {
12744 inst.instruction = THUMB_OP16 (inst.instruction);
12745 inst.instruction |= Rn;
12746 inst.instruction |= Rm << 3;
12747 }
12748 else
12749 {
12750 constraint (inst.operands[1].shifted
12751 && inst.operands[1].immisreg,
12752 _("shift must be constant"));
12753 if (inst.instruction < 0xffff)
12754 inst.instruction = THUMB_OP32 (inst.instruction);
12755 inst.instruction |= Rn << r0off;
12756 encode_thumb32_shifted_operand (1);
12757 }
12758 }
12759 }
12760 else
12761 {
12762 constraint (inst.instruction > 0xffff
12763 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12764 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12765 _("unshifted register required"));
12766 constraint (Rn > 7 || Rm > 7,
12767 BAD_HIREG);
12768
12769 inst.instruction = THUMB_OP16 (inst.instruction);
12770 inst.instruction |= Rn;
12771 inst.instruction |= Rm << 3;
12772 }
12773 }
12774
12775 static void
12776 do_t_mrs (void)
12777 {
12778 unsigned Rd;
12779
12780 if (do_vfp_nsyn_mrs () == SUCCESS)
12781 return;
12782
12783 Rd = inst.operands[0].reg;
12784 reject_bad_reg (Rd);
12785 inst.instruction |= Rd << 8;
12786
12787 if (inst.operands[1].isreg)
12788 {
12789 unsigned br = inst.operands[1].reg;
12790 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12791 as_bad (_("bad register for mrs"));
12792
12793 inst.instruction |= br & (0xf << 16);
12794 inst.instruction |= (br & 0x300) >> 4;
12795 inst.instruction |= (br & SPSR_BIT) >> 2;
12796 }
12797 else
12798 {
12799 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12800
12801 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12802 {
12803 /* PR gas/12698: The constraint is only applied for m_profile.
12804 If the user has specified -march=all, we want to ignore it as
12805 we are building for any CPU type, including non-m variants. */
12806 bfd_boolean m_profile =
12807 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12808 constraint ((flags != 0) && m_profile, _("selected processor does "
12809 "not support requested special purpose register"));
12810 }
12811 else
12812 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12813 devices). */
12814 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12815 _("'APSR', 'CPSR' or 'SPSR' expected"));
12816
12817 inst.instruction |= (flags & SPSR_BIT) >> 2;
12818 inst.instruction |= inst.operands[1].imm & 0xff;
12819 inst.instruction |= 0xf0000;
12820 }
12821 }
12822
12823 static void
12824 do_t_msr (void)
12825 {
12826 int flags;
12827 unsigned Rn;
12828
12829 if (do_vfp_nsyn_msr () == SUCCESS)
12830 return;
12831
12832 constraint (!inst.operands[1].isreg,
12833 _("Thumb encoding does not support an immediate here"));
12834
12835 if (inst.operands[0].isreg)
12836 flags = (int)(inst.operands[0].reg);
12837 else
12838 flags = inst.operands[0].imm;
12839
12840 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12841 {
12842 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12843
12844 /* PR gas/12698: The constraint is only applied for m_profile.
12845 If the user has specified -march=all, we want to ignore it as
12846 we are building for any CPU type, including non-m variants. */
12847 bfd_boolean m_profile =
12848 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12849 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12850 && (bits & ~(PSR_s | PSR_f)) != 0)
12851 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12852 && bits != PSR_f)) && m_profile,
12853 _("selected processor does not support requested special "
12854 "purpose register"));
12855 }
12856 else
12857 constraint ((flags & 0xff) != 0, _("selected processor does not support "
12858 "requested special purpose register"));
12859
12860 Rn = inst.operands[1].reg;
12861 reject_bad_reg (Rn);
12862
12863 inst.instruction |= (flags & SPSR_BIT) >> 2;
12864 inst.instruction |= (flags & 0xf0000) >> 8;
12865 inst.instruction |= (flags & 0x300) >> 4;
12866 inst.instruction |= (flags & 0xff);
12867 inst.instruction |= Rn << 16;
12868 }
12869
12870 static void
12871 do_t_mul (void)
12872 {
12873 bfd_boolean narrow;
12874 unsigned Rd, Rn, Rm;
12875
12876 if (!inst.operands[2].present)
12877 inst.operands[2].reg = inst.operands[0].reg;
12878
12879 Rd = inst.operands[0].reg;
12880 Rn = inst.operands[1].reg;
12881 Rm = inst.operands[2].reg;
12882
12883 if (unified_syntax)
12884 {
12885 if (inst.size_req == 4
12886 || (Rd != Rn
12887 && Rd != Rm)
12888 || Rn > 7
12889 || Rm > 7)
12890 narrow = FALSE;
12891 else if (inst.instruction == T_MNEM_muls)
12892 narrow = !in_pred_block ();
12893 else
12894 narrow = in_pred_block ();
12895 }
12896 else
12897 {
12898 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12899 constraint (Rn > 7 || Rm > 7,
12900 BAD_HIREG);
12901 narrow = TRUE;
12902 }
12903
12904 if (narrow)
12905 {
12906 /* 16-bit MULS/Conditional MUL. */
12907 inst.instruction = THUMB_OP16 (inst.instruction);
12908 inst.instruction |= Rd;
12909
12910 if (Rd == Rn)
12911 inst.instruction |= Rm << 3;
12912 else if (Rd == Rm)
12913 inst.instruction |= Rn << 3;
12914 else
12915 constraint (1, _("dest must overlap one source register"));
12916 }
12917 else
12918 {
12919 constraint (inst.instruction != T_MNEM_mul,
12920 _("Thumb-2 MUL must not set flags"));
12921 /* 32-bit MUL. */
12922 inst.instruction = THUMB_OP32 (inst.instruction);
12923 inst.instruction |= Rd << 8;
12924 inst.instruction |= Rn << 16;
12925 inst.instruction |= Rm << 0;
12926
12927 reject_bad_reg (Rd);
12928 reject_bad_reg (Rn);
12929 reject_bad_reg (Rm);
12930 }
12931 }
12932
12933 static void
12934 do_t_mull (void)
12935 {
12936 unsigned RdLo, RdHi, Rn, Rm;
12937
12938 RdLo = inst.operands[0].reg;
12939 RdHi = inst.operands[1].reg;
12940 Rn = inst.operands[2].reg;
12941 Rm = inst.operands[3].reg;
12942
12943 reject_bad_reg (RdLo);
12944 reject_bad_reg (RdHi);
12945 reject_bad_reg (Rn);
12946 reject_bad_reg (Rm);
12947
12948 inst.instruction |= RdLo << 12;
12949 inst.instruction |= RdHi << 8;
12950 inst.instruction |= Rn << 16;
12951 inst.instruction |= Rm;
12952
12953 if (RdLo == RdHi)
12954 as_tsktsk (_("rdhi and rdlo must be different"));
12955 }
12956
12957 static void
12958 do_t_nop (void)
12959 {
12960 set_pred_insn_type (NEUTRAL_IT_INSN);
12961
12962 if (unified_syntax)
12963 {
12964 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12965 {
12966 inst.instruction = THUMB_OP32 (inst.instruction);
12967 inst.instruction |= inst.operands[0].imm;
12968 }
12969 else
12970 {
12971 /* PR9722: Check for Thumb2 availability before
12972 generating a thumb2 nop instruction. */
12973 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12974 {
12975 inst.instruction = THUMB_OP16 (inst.instruction);
12976 inst.instruction |= inst.operands[0].imm << 4;
12977 }
12978 else
12979 inst.instruction = 0x46c0;
12980 }
12981 }
12982 else
12983 {
12984 constraint (inst.operands[0].present,
12985 _("Thumb does not support NOP with hints"));
12986 inst.instruction = 0x46c0;
12987 }
12988 }
12989
12990 static void
12991 do_t_neg (void)
12992 {
12993 if (unified_syntax)
12994 {
12995 bfd_boolean narrow;
12996
12997 if (THUMB_SETS_FLAGS (inst.instruction))
12998 narrow = !in_pred_block ();
12999 else
13000 narrow = in_pred_block ();
13001 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
13002 narrow = FALSE;
13003 if (inst.size_req == 4)
13004 narrow = FALSE;
13005
13006 if (!narrow)
13007 {
13008 inst.instruction = THUMB_OP32 (inst.instruction);
13009 inst.instruction |= inst.operands[0].reg << 8;
13010 inst.instruction |= inst.operands[1].reg << 16;
13011 }
13012 else
13013 {
13014 inst.instruction = THUMB_OP16 (inst.instruction);
13015 inst.instruction |= inst.operands[0].reg;
13016 inst.instruction |= inst.operands[1].reg << 3;
13017 }
13018 }
13019 else
13020 {
13021 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
13022 BAD_HIREG);
13023 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
13024
13025 inst.instruction = THUMB_OP16 (inst.instruction);
13026 inst.instruction |= inst.operands[0].reg;
13027 inst.instruction |= inst.operands[1].reg << 3;
13028 }
13029 }
13030
13031 static void
13032 do_t_orn (void)
13033 {
13034 unsigned Rd, Rn;
13035
13036 Rd = inst.operands[0].reg;
13037 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
13038
13039 reject_bad_reg (Rd);
13040 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
13041 reject_bad_reg (Rn);
13042
13043 inst.instruction |= Rd << 8;
13044 inst.instruction |= Rn << 16;
13045
13046 if (!inst.operands[2].isreg)
13047 {
13048 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13049 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13050 }
13051 else
13052 {
13053 unsigned Rm;
13054
13055 Rm = inst.operands[2].reg;
13056 reject_bad_reg (Rm);
13057
13058 constraint (inst.operands[2].shifted
13059 && inst.operands[2].immisreg,
13060 _("shift must be constant"));
13061 encode_thumb32_shifted_operand (2);
13062 }
13063 }
13064
13065 static void
13066 do_t_pkhbt (void)
13067 {
13068 unsigned Rd, Rn, Rm;
13069
13070 Rd = inst.operands[0].reg;
13071 Rn = inst.operands[1].reg;
13072 Rm = inst.operands[2].reg;
13073
13074 reject_bad_reg (Rd);
13075 reject_bad_reg (Rn);
13076 reject_bad_reg (Rm);
13077
13078 inst.instruction |= Rd << 8;
13079 inst.instruction |= Rn << 16;
13080 inst.instruction |= Rm;
13081 if (inst.operands[3].present)
13082 {
13083 unsigned int val = inst.relocs[0].exp.X_add_number;
13084 constraint (inst.relocs[0].exp.X_op != O_constant,
13085 _("expression too complex"));
13086 inst.instruction |= (val & 0x1c) << 10;
13087 inst.instruction |= (val & 0x03) << 6;
13088 }
13089 }
13090
13091 static void
13092 do_t_pkhtb (void)
13093 {
13094 if (!inst.operands[3].present)
13095 {
13096 unsigned Rtmp;
13097
13098 inst.instruction &= ~0x00000020;
13099
13100 /* PR 10168. Swap the Rm and Rn registers. */
13101 Rtmp = inst.operands[1].reg;
13102 inst.operands[1].reg = inst.operands[2].reg;
13103 inst.operands[2].reg = Rtmp;
13104 }
13105 do_t_pkhbt ();
13106 }
13107
13108 static void
13109 do_t_pld (void)
13110 {
13111 if (inst.operands[0].immisreg)
13112 reject_bad_reg (inst.operands[0].imm);
13113
13114 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
13115 }
13116
13117 static void
13118 do_t_push_pop (void)
13119 {
13120 unsigned mask;
13121
13122 constraint (inst.operands[0].writeback,
13123 _("push/pop do not support {reglist}^"));
13124 constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
13125 _("expression too complex"));
13126
13127 mask = inst.operands[0].imm;
13128 if (inst.size_req != 4 && (mask & ~0xff) == 0)
13129 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
13130 else if (inst.size_req != 4
13131 && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
13132 ? REG_LR : REG_PC)))
13133 {
13134 inst.instruction = THUMB_OP16 (inst.instruction);
13135 inst.instruction |= THUMB_PP_PC_LR;
13136 inst.instruction |= mask & 0xff;
13137 }
13138 else if (unified_syntax)
13139 {
13140 inst.instruction = THUMB_OP32 (inst.instruction);
13141 encode_thumb2_multi (TRUE /* do_io */, 13, mask, TRUE);
13142 }
13143 else
13144 {
13145 inst.error = _("invalid register list to push/pop instruction");
13146 return;
13147 }
13148 }
13149
13150 static void
13151 do_t_clrm (void)
13152 {
13153 if (unified_syntax)
13154 encode_thumb2_multi (FALSE /* do_io */, -1, inst.operands[0].imm, FALSE);
13155 else
13156 {
13157 inst.error = _("invalid register list to push/pop instruction");
13158 return;
13159 }
13160 }
13161
13162 static void
13163 do_t_vscclrm (void)
13164 {
13165 if (inst.operands[0].issingle)
13166 {
13167 inst.instruction |= (inst.operands[0].reg & 0x1) << 22;
13168 inst.instruction |= (inst.operands[0].reg & 0x1e) << 11;
13169 inst.instruction |= inst.operands[0].imm;
13170 }
13171 else
13172 {
13173 inst.instruction |= (inst.operands[0].reg & 0x10) << 18;
13174 inst.instruction |= (inst.operands[0].reg & 0xf) << 12;
13175 inst.instruction |= 1 << 8;
13176 inst.instruction |= inst.operands[0].imm << 1;
13177 }
13178 }
13179
13180 static void
13181 do_t_rbit (void)
13182 {
13183 unsigned Rd, Rm;
13184
13185 Rd = inst.operands[0].reg;
13186 Rm = inst.operands[1].reg;
13187
13188 reject_bad_reg (Rd);
13189 reject_bad_reg (Rm);
13190
13191 inst.instruction |= Rd << 8;
13192 inst.instruction |= Rm << 16;
13193 inst.instruction |= Rm;
13194 }
13195
13196 static void
13197 do_t_rev (void)
13198 {
13199 unsigned Rd, Rm;
13200
13201 Rd = inst.operands[0].reg;
13202 Rm = inst.operands[1].reg;
13203
13204 reject_bad_reg (Rd);
13205 reject_bad_reg (Rm);
13206
13207 if (Rd <= 7 && Rm <= 7
13208 && inst.size_req != 4)
13209 {
13210 inst.instruction = THUMB_OP16 (inst.instruction);
13211 inst.instruction |= Rd;
13212 inst.instruction |= Rm << 3;
13213 }
13214 else if (unified_syntax)
13215 {
13216 inst.instruction = THUMB_OP32 (inst.instruction);
13217 inst.instruction |= Rd << 8;
13218 inst.instruction |= Rm << 16;
13219 inst.instruction |= Rm;
13220 }
13221 else
13222 inst.error = BAD_HIREG;
13223 }
13224
13225 static void
13226 do_t_rrx (void)
13227 {
13228 unsigned Rd, Rm;
13229
13230 Rd = inst.operands[0].reg;
13231 Rm = inst.operands[1].reg;
13232
13233 reject_bad_reg (Rd);
13234 reject_bad_reg (Rm);
13235
13236 inst.instruction |= Rd << 8;
13237 inst.instruction |= Rm;
13238 }
13239
13240 static void
13241 do_t_rsb (void)
13242 {
13243 unsigned Rd, Rs;
13244
13245 Rd = inst.operands[0].reg;
13246 Rs = (inst.operands[1].present
13247 ? inst.operands[1].reg /* Rd, Rs, foo */
13248 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
13249
13250 reject_bad_reg (Rd);
13251 reject_bad_reg (Rs);
13252 if (inst.operands[2].isreg)
13253 reject_bad_reg (inst.operands[2].reg);
13254
13255 inst.instruction |= Rd << 8;
13256 inst.instruction |= Rs << 16;
13257 if (!inst.operands[2].isreg)
13258 {
13259 bfd_boolean narrow;
13260
13261 if ((inst.instruction & 0x00100000) != 0)
13262 narrow = !in_pred_block ();
13263 else
13264 narrow = in_pred_block ();
13265
13266 if (Rd > 7 || Rs > 7)
13267 narrow = FALSE;
13268
13269 if (inst.size_req == 4 || !unified_syntax)
13270 narrow = FALSE;
13271
13272 if (inst.relocs[0].exp.X_op != O_constant
13273 || inst.relocs[0].exp.X_add_number != 0)
13274 narrow = FALSE;
13275
13276 /* Turn rsb #0 into 16-bit neg. We should probably do this via
13277 relaxation, but it doesn't seem worth the hassle. */
13278 if (narrow)
13279 {
13280 inst.relocs[0].type = BFD_RELOC_UNUSED;
13281 inst.instruction = THUMB_OP16 (T_MNEM_negs);
13282 inst.instruction |= Rs << 3;
13283 inst.instruction |= Rd;
13284 }
13285 else
13286 {
13287 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13288 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13289 }
13290 }
13291 else
13292 encode_thumb32_shifted_operand (2);
13293 }
13294
13295 static void
13296 do_t_setend (void)
13297 {
13298 if (warn_on_deprecated
13299 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13300 as_tsktsk (_("setend use is deprecated for ARMv8"));
13301
13302 set_pred_insn_type (OUTSIDE_PRED_INSN);
13303 if (inst.operands[0].imm)
13304 inst.instruction |= 0x8;
13305 }
13306
13307 static void
13308 do_t_shift (void)
13309 {
13310 if (!inst.operands[1].present)
13311 inst.operands[1].reg = inst.operands[0].reg;
13312
13313 if (unified_syntax)
13314 {
13315 bfd_boolean narrow;
13316 int shift_kind;
13317
13318 switch (inst.instruction)
13319 {
13320 case T_MNEM_asr:
13321 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
13322 case T_MNEM_lsl:
13323 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
13324 case T_MNEM_lsr:
13325 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
13326 case T_MNEM_ror:
13327 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
13328 default: abort ();
13329 }
13330
13331 if (THUMB_SETS_FLAGS (inst.instruction))
13332 narrow = !in_pred_block ();
13333 else
13334 narrow = in_pred_block ();
13335 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
13336 narrow = FALSE;
13337 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
13338 narrow = FALSE;
13339 if (inst.operands[2].isreg
13340 && (inst.operands[1].reg != inst.operands[0].reg
13341 || inst.operands[2].reg > 7))
13342 narrow = FALSE;
13343 if (inst.size_req == 4)
13344 narrow = FALSE;
13345
13346 reject_bad_reg (inst.operands[0].reg);
13347 reject_bad_reg (inst.operands[1].reg);
13348
13349 if (!narrow)
13350 {
13351 if (inst.operands[2].isreg)
13352 {
13353 reject_bad_reg (inst.operands[2].reg);
13354 inst.instruction = THUMB_OP32 (inst.instruction);
13355 inst.instruction |= inst.operands[0].reg << 8;
13356 inst.instruction |= inst.operands[1].reg << 16;
13357 inst.instruction |= inst.operands[2].reg;
13358
13359 /* PR 12854: Error on extraneous shifts. */
13360 constraint (inst.operands[2].shifted,
13361 _("extraneous shift as part of operand to shift insn"));
13362 }
13363 else
13364 {
13365 inst.operands[1].shifted = 1;
13366 inst.operands[1].shift_kind = shift_kind;
13367 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
13368 ? T_MNEM_movs : T_MNEM_mov);
13369 inst.instruction |= inst.operands[0].reg << 8;
13370 encode_thumb32_shifted_operand (1);
13371 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
13372 inst.relocs[0].type = BFD_RELOC_UNUSED;
13373 }
13374 }
13375 else
13376 {
13377 if (inst.operands[2].isreg)
13378 {
13379 switch (shift_kind)
13380 {
13381 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
13382 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
13383 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
13384 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
13385 default: abort ();
13386 }
13387
13388 inst.instruction |= inst.operands[0].reg;
13389 inst.instruction |= inst.operands[2].reg << 3;
13390
13391 /* PR 12854: Error on extraneous shifts. */
13392 constraint (inst.operands[2].shifted,
13393 _("extraneous shift as part of operand to shift insn"));
13394 }
13395 else
13396 {
13397 switch (shift_kind)
13398 {
13399 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
13400 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
13401 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
13402 default: abort ();
13403 }
13404 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13405 inst.instruction |= inst.operands[0].reg;
13406 inst.instruction |= inst.operands[1].reg << 3;
13407 }
13408 }
13409 }
13410 else
13411 {
13412 constraint (inst.operands[0].reg > 7
13413 || inst.operands[1].reg > 7, BAD_HIREG);
13414 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
13415
13416 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
13417 {
13418 constraint (inst.operands[2].reg > 7, BAD_HIREG);
13419 constraint (inst.operands[0].reg != inst.operands[1].reg,
13420 _("source1 and dest must be same register"));
13421
13422 switch (inst.instruction)
13423 {
13424 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
13425 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
13426 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
13427 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
13428 default: abort ();
13429 }
13430
13431 inst.instruction |= inst.operands[0].reg;
13432 inst.instruction |= inst.operands[2].reg << 3;
13433
13434 /* PR 12854: Error on extraneous shifts. */
13435 constraint (inst.operands[2].shifted,
13436 _("extraneous shift as part of operand to shift insn"));
13437 }
13438 else
13439 {
13440 switch (inst.instruction)
13441 {
13442 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
13443 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
13444 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
13445 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
13446 default: abort ();
13447 }
13448 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13449 inst.instruction |= inst.operands[0].reg;
13450 inst.instruction |= inst.operands[1].reg << 3;
13451 }
13452 }
13453 }
13454
13455 static void
13456 do_t_simd (void)
13457 {
13458 unsigned Rd, Rn, Rm;
13459
13460 Rd = inst.operands[0].reg;
13461 Rn = inst.operands[1].reg;
13462 Rm = inst.operands[2].reg;
13463
13464 reject_bad_reg (Rd);
13465 reject_bad_reg (Rn);
13466 reject_bad_reg (Rm);
13467
13468 inst.instruction |= Rd << 8;
13469 inst.instruction |= Rn << 16;
13470 inst.instruction |= Rm;
13471 }
13472
13473 static void
13474 do_t_simd2 (void)
13475 {
13476 unsigned Rd, Rn, Rm;
13477
13478 Rd = inst.operands[0].reg;
13479 Rm = inst.operands[1].reg;
13480 Rn = inst.operands[2].reg;
13481
13482 reject_bad_reg (Rd);
13483 reject_bad_reg (Rn);
13484 reject_bad_reg (Rm);
13485
13486 inst.instruction |= Rd << 8;
13487 inst.instruction |= Rn << 16;
13488 inst.instruction |= Rm;
13489 }
13490
13491 static void
13492 do_t_smc (void)
13493 {
13494 unsigned int value = inst.relocs[0].exp.X_add_number;
13495 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
13496 _("SMC is not permitted on this architecture"));
13497 constraint (inst.relocs[0].exp.X_op != O_constant,
13498 _("expression too complex"));
13499 inst.relocs[0].type = BFD_RELOC_UNUSED;
13500 inst.instruction |= (value & 0xf000) >> 12;
13501 inst.instruction |= (value & 0x0ff0);
13502 inst.instruction |= (value & 0x000f) << 16;
13503 /* PR gas/15623: SMC instructions must be last in an IT block. */
13504 set_pred_insn_type_last ();
13505 }
13506
13507 static void
13508 do_t_hvc (void)
13509 {
13510 unsigned int value = inst.relocs[0].exp.X_add_number;
13511
13512 inst.relocs[0].type = BFD_RELOC_UNUSED;
13513 inst.instruction |= (value & 0x0fff);
13514 inst.instruction |= (value & 0xf000) << 4;
13515 }
13516
13517 static void
13518 do_t_ssat_usat (int bias)
13519 {
13520 unsigned Rd, Rn;
13521
13522 Rd = inst.operands[0].reg;
13523 Rn = inst.operands[2].reg;
13524
13525 reject_bad_reg (Rd);
13526 reject_bad_reg (Rn);
13527
13528 inst.instruction |= Rd << 8;
13529 inst.instruction |= inst.operands[1].imm - bias;
13530 inst.instruction |= Rn << 16;
13531
13532 if (inst.operands[3].present)
13533 {
13534 offsetT shift_amount = inst.relocs[0].exp.X_add_number;
13535
13536 inst.relocs[0].type = BFD_RELOC_UNUSED;
13537
13538 constraint (inst.relocs[0].exp.X_op != O_constant,
13539 _("expression too complex"));
13540
13541 if (shift_amount != 0)
13542 {
13543 constraint (shift_amount > 31,
13544 _("shift expression is too large"));
13545
13546 if (inst.operands[3].shift_kind == SHIFT_ASR)
13547 inst.instruction |= 0x00200000; /* sh bit. */
13548
13549 inst.instruction |= (shift_amount & 0x1c) << 10;
13550 inst.instruction |= (shift_amount & 0x03) << 6;
13551 }
13552 }
13553 }
13554
13555 static void
13556 do_t_ssat (void)
13557 {
13558 do_t_ssat_usat (1);
13559 }
13560
13561 static void
13562 do_t_ssat16 (void)
13563 {
13564 unsigned Rd, Rn;
13565
13566 Rd = inst.operands[0].reg;
13567 Rn = inst.operands[2].reg;
13568
13569 reject_bad_reg (Rd);
13570 reject_bad_reg (Rn);
13571
13572 inst.instruction |= Rd << 8;
13573 inst.instruction |= inst.operands[1].imm - 1;
13574 inst.instruction |= Rn << 16;
13575 }
13576
13577 static void
13578 do_t_strex (void)
13579 {
13580 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
13581 || inst.operands[2].postind || inst.operands[2].writeback
13582 || inst.operands[2].immisreg || inst.operands[2].shifted
13583 || inst.operands[2].negative,
13584 BAD_ADDR_MODE);
13585
13586 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
13587
13588 inst.instruction |= inst.operands[0].reg << 8;
13589 inst.instruction |= inst.operands[1].reg << 12;
13590 inst.instruction |= inst.operands[2].reg << 16;
13591 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
13592 }
13593
13594 static void
13595 do_t_strexd (void)
13596 {
13597 if (!inst.operands[2].present)
13598 inst.operands[2].reg = inst.operands[1].reg + 1;
13599
13600 constraint (inst.operands[0].reg == inst.operands[1].reg
13601 || inst.operands[0].reg == inst.operands[2].reg
13602 || inst.operands[0].reg == inst.operands[3].reg,
13603 BAD_OVERLAP);
13604
13605 inst.instruction |= inst.operands[0].reg;
13606 inst.instruction |= inst.operands[1].reg << 12;
13607 inst.instruction |= inst.operands[2].reg << 8;
13608 inst.instruction |= inst.operands[3].reg << 16;
13609 }
13610
13611 static void
13612 do_t_sxtah (void)
13613 {
13614 unsigned Rd, Rn, Rm;
13615
13616 Rd = inst.operands[0].reg;
13617 Rn = inst.operands[1].reg;
13618 Rm = inst.operands[2].reg;
13619
13620 reject_bad_reg (Rd);
13621 reject_bad_reg (Rn);
13622 reject_bad_reg (Rm);
13623
13624 inst.instruction |= Rd << 8;
13625 inst.instruction |= Rn << 16;
13626 inst.instruction |= Rm;
13627 inst.instruction |= inst.operands[3].imm << 4;
13628 }
13629
13630 static void
13631 do_t_sxth (void)
13632 {
13633 unsigned Rd, Rm;
13634
13635 Rd = inst.operands[0].reg;
13636 Rm = inst.operands[1].reg;
13637
13638 reject_bad_reg (Rd);
13639 reject_bad_reg (Rm);
13640
13641 if (inst.instruction <= 0xffff
13642 && inst.size_req != 4
13643 && Rd <= 7 && Rm <= 7
13644 && (!inst.operands[2].present || inst.operands[2].imm == 0))
13645 {
13646 inst.instruction = THUMB_OP16 (inst.instruction);
13647 inst.instruction |= Rd;
13648 inst.instruction |= Rm << 3;
13649 }
13650 else if (unified_syntax)
13651 {
13652 if (inst.instruction <= 0xffff)
13653 inst.instruction = THUMB_OP32 (inst.instruction);
13654 inst.instruction |= Rd << 8;
13655 inst.instruction |= Rm;
13656 inst.instruction |= inst.operands[2].imm << 4;
13657 }
13658 else
13659 {
13660 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
13661 _("Thumb encoding does not support rotation"));
13662 constraint (1, BAD_HIREG);
13663 }
13664 }
13665
13666 static void
13667 do_t_swi (void)
13668 {
13669 inst.relocs[0].type = BFD_RELOC_ARM_SWI;
13670 }
13671
13672 static void
13673 do_t_tb (void)
13674 {
13675 unsigned Rn, Rm;
13676 int half;
13677
13678 half = (inst.instruction & 0x10) != 0;
13679 set_pred_insn_type_last ();
13680 constraint (inst.operands[0].immisreg,
13681 _("instruction requires register index"));
13682
13683 Rn = inst.operands[0].reg;
13684 Rm = inst.operands[0].imm;
13685
13686 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13687 constraint (Rn == REG_SP, BAD_SP);
13688 reject_bad_reg (Rm);
13689
13690 constraint (!half && inst.operands[0].shifted,
13691 _("instruction does not allow shifted index"));
13692 inst.instruction |= (Rn << 16) | Rm;
13693 }
13694
13695 static void
13696 do_t_udf (void)
13697 {
13698 if (!inst.operands[0].present)
13699 inst.operands[0].imm = 0;
13700
13701 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
13702 {
13703 constraint (inst.size_req == 2,
13704 _("immediate value out of range"));
13705 inst.instruction = THUMB_OP32 (inst.instruction);
13706 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
13707 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
13708 }
13709 else
13710 {
13711 inst.instruction = THUMB_OP16 (inst.instruction);
13712 inst.instruction |= inst.operands[0].imm;
13713 }
13714
13715 set_pred_insn_type (NEUTRAL_IT_INSN);
13716 }
13717
13718
13719 static void
13720 do_t_usat (void)
13721 {
13722 do_t_ssat_usat (0);
13723 }
13724
13725 static void
13726 do_t_usat16 (void)
13727 {
13728 unsigned Rd, Rn;
13729
13730 Rd = inst.operands[0].reg;
13731 Rn = inst.operands[2].reg;
13732
13733 reject_bad_reg (Rd);
13734 reject_bad_reg (Rn);
13735
13736 inst.instruction |= Rd << 8;
13737 inst.instruction |= inst.operands[1].imm;
13738 inst.instruction |= Rn << 16;
13739 }
13740
13741 /* Checking the range of the branch offset (VAL) with NBITS bits
13742 and IS_SIGNED signedness. Also checks the LSB to be 0. */
13743 static int
13744 v8_1_branch_value_check (int val, int nbits, int is_signed)
13745 {
13746 gas_assert (nbits > 0 && nbits <= 32);
13747 if (is_signed)
13748 {
13749 int cmp = (1 << (nbits - 1));
13750 if ((val < -cmp) || (val >= cmp) || (val & 0x01))
13751 return FAIL;
13752 }
13753 else
13754 {
13755 if ((val <= 0) || (val >= (1 << nbits)) || (val & 0x1))
13756 return FAIL;
13757 }
13758 return SUCCESS;
13759 }
13760
13761 /* For branches in Armv8.1-M Mainline. */
13762 static void
13763 do_t_branch_future (void)
13764 {
13765 unsigned long insn = inst.instruction;
13766
13767 inst.instruction = THUMB_OP32 (inst.instruction);
13768 if (inst.operands[0].hasreloc == 0)
13769 {
13770 if (v8_1_branch_value_check (inst.operands[0].imm, 5, FALSE) == FAIL)
13771 as_bad (BAD_BRANCH_OFF);
13772
13773 inst.instruction |= ((inst.operands[0].imm & 0x1f) >> 1) << 23;
13774 }
13775 else
13776 {
13777 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH5;
13778 inst.relocs[0].pc_rel = 1;
13779 }
13780
13781 switch (insn)
13782 {
13783 case T_MNEM_bf:
13784 if (inst.operands[1].hasreloc == 0)
13785 {
13786 int val = inst.operands[1].imm;
13787 if (v8_1_branch_value_check (inst.operands[1].imm, 17, TRUE) == FAIL)
13788 as_bad (BAD_BRANCH_OFF);
13789
13790 int immA = (val & 0x0001f000) >> 12;
13791 int immB = (val & 0x00000ffc) >> 2;
13792 int immC = (val & 0x00000002) >> 1;
13793 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
13794 }
13795 else
13796 {
13797 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF17;
13798 inst.relocs[1].pc_rel = 1;
13799 }
13800 break;
13801
13802 case T_MNEM_bfl:
13803 if (inst.operands[1].hasreloc == 0)
13804 {
13805 int val = inst.operands[1].imm;
13806 if (v8_1_branch_value_check (inst.operands[1].imm, 19, TRUE) == FAIL)
13807 as_bad (BAD_BRANCH_OFF);
13808
13809 int immA = (val & 0x0007f000) >> 12;
13810 int immB = (val & 0x00000ffc) >> 2;
13811 int immC = (val & 0x00000002) >> 1;
13812 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
13813 }
13814 else
13815 {
13816 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF19;
13817 inst.relocs[1].pc_rel = 1;
13818 }
13819 break;
13820
13821 case T_MNEM_bfcsel:
13822 /* Operand 1. */
13823 if (inst.operands[1].hasreloc == 0)
13824 {
13825 int val = inst.operands[1].imm;
13826 int immA = (val & 0x00001000) >> 12;
13827 int immB = (val & 0x00000ffc) >> 2;
13828 int immC = (val & 0x00000002) >> 1;
13829 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
13830 }
13831 else
13832 {
13833 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF13;
13834 inst.relocs[1].pc_rel = 1;
13835 }
13836
13837 /* Operand 2. */
13838 if (inst.operands[2].hasreloc == 0)
13839 {
13840 constraint ((inst.operands[0].hasreloc != 0), BAD_ARGS);
13841 int val2 = inst.operands[2].imm;
13842 int val0 = inst.operands[0].imm & 0x1f;
13843 int diff = val2 - val0;
13844 if (diff == 4)
13845 inst.instruction |= 1 << 17; /* T bit. */
13846 else if (diff != 2)
13847 as_bad (_("out of range label-relative fixup value"));
13848 }
13849 else
13850 {
13851 constraint ((inst.operands[0].hasreloc == 0), BAD_ARGS);
13852 inst.relocs[2].type = BFD_RELOC_THUMB_PCREL_BFCSEL;
13853 inst.relocs[2].pc_rel = 1;
13854 }
13855
13856 /* Operand 3. */
13857 constraint (inst.cond != COND_ALWAYS, BAD_COND);
13858 inst.instruction |= (inst.operands[3].imm & 0xf) << 18;
13859 break;
13860
13861 case T_MNEM_bfx:
13862 case T_MNEM_bflx:
13863 inst.instruction |= inst.operands[1].reg << 16;
13864 break;
13865
13866 default: abort ();
13867 }
13868 }
13869
13870 /* Helper function for do_t_loloop to handle relocations. */
13871 static void
13872 v8_1_loop_reloc (int is_le)
13873 {
13874 if (inst.relocs[0].exp.X_op == O_constant)
13875 {
13876 int value = inst.relocs[0].exp.X_add_number;
13877 value = (is_le) ? -value : value;
13878
13879 if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
13880 as_bad (BAD_BRANCH_OFF);
13881
13882 int imml, immh;
13883
13884 immh = (value & 0x00000ffc) >> 2;
13885 imml = (value & 0x00000002) >> 1;
13886
13887 inst.instruction |= (imml << 11) | (immh << 1);
13888 }
13889 else
13890 {
13891 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_LOOP12;
13892 inst.relocs[0].pc_rel = 1;
13893 }
13894 }
13895
13896 /* To handle the Scalar Low Overhead Loop instructions
13897 in Armv8.1-M Mainline. */
13898 static void
13899 do_t_loloop (void)
13900 {
13901 unsigned long insn = inst.instruction;
13902
13903 set_pred_insn_type (OUTSIDE_PRED_INSN);
13904 inst.instruction = THUMB_OP32 (inst.instruction);
13905
13906 switch (insn)
13907 {
13908 case T_MNEM_le:
13909 /* le <label>. */
13910 if (!inst.operands[0].present)
13911 inst.instruction |= 1 << 21;
13912
13913 v8_1_loop_reloc (TRUE);
13914 break;
13915
13916 case T_MNEM_wls:
13917 v8_1_loop_reloc (FALSE);
13918 /* Fall through. */
13919 case T_MNEM_dls:
13920 constraint (inst.operands[1].isreg != 1, BAD_ARGS);
13921 inst.instruction |= (inst.operands[1].reg << 16);
13922 break;
13923
13924 default: abort();
13925 }
13926 }
13927
13928 /* MVE instruction encoder helpers. */
13929 #define M_MNEM_vabav 0xee800f01
13930 #define M_MNEM_vmladav 0xeef00e00
13931 #define M_MNEM_vmladava 0xeef00e20
13932 #define M_MNEM_vmladavx 0xeef01e00
13933 #define M_MNEM_vmladavax 0xeef01e20
13934 #define M_MNEM_vmlsdav 0xeef00e01
13935 #define M_MNEM_vmlsdava 0xeef00e21
13936 #define M_MNEM_vmlsdavx 0xeef01e01
13937 #define M_MNEM_vmlsdavax 0xeef01e21
13938 #define M_MNEM_vmullt 0xee011e00
13939 #define M_MNEM_vmullb 0xee010e00
13940 #define M_MNEM_vst20 0xfc801e00
13941 #define M_MNEM_vst21 0xfc801e20
13942 #define M_MNEM_vst40 0xfc801e01
13943 #define M_MNEM_vst41 0xfc801e21
13944 #define M_MNEM_vst42 0xfc801e41
13945 #define M_MNEM_vst43 0xfc801e61
13946 #define M_MNEM_vld20 0xfc901e00
13947 #define M_MNEM_vld21 0xfc901e20
13948 #define M_MNEM_vld40 0xfc901e01
13949 #define M_MNEM_vld41 0xfc901e21
13950 #define M_MNEM_vld42 0xfc901e41
13951 #define M_MNEM_vld43 0xfc901e61
13952 #define M_MNEM_vstrb 0xec000e00
13953 #define M_MNEM_vstrh 0xec000e10
13954 #define M_MNEM_vstrw 0xec000e40
13955 #define M_MNEM_vstrd 0xec000e50
13956 #define M_MNEM_vldrb 0xec100e00
13957 #define M_MNEM_vldrh 0xec100e10
13958 #define M_MNEM_vldrw 0xec100e40
13959 #define M_MNEM_vldrd 0xec100e50
13960
13961 /* Neon instruction encoder helpers. */
13962
13963 /* Encodings for the different types for various Neon opcodes. */
13964
13965 /* An "invalid" code for the following tables. */
13966 #define N_INV -1u
13967
13968 struct neon_tab_entry
13969 {
13970 unsigned integer;
13971 unsigned float_or_poly;
13972 unsigned scalar_or_imm;
13973 };
13974
13975 /* Map overloaded Neon opcodes to their respective encodings. */
13976 #define NEON_ENC_TAB \
13977 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13978 X(vabdl, 0x0800700, N_INV, N_INV), \
13979 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13980 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13981 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13982 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13983 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13984 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13985 X(vaddl, 0x0800000, N_INV, N_INV), \
13986 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13987 X(vsubl, 0x0800200, N_INV, N_INV), \
13988 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13989 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13990 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13991 /* Register variants of the following two instructions are encoded as
13992 vcge / vcgt with the operands reversed. */ \
13993 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13994 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13995 X(vfma, N_INV, 0x0000c10, N_INV), \
13996 X(vfms, N_INV, 0x0200c10, N_INV), \
13997 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13998 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13999 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
14000 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
14001 X(vmlal, 0x0800800, N_INV, 0x0800240), \
14002 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
14003 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
14004 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
14005 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
14006 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
14007 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
14008 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
14009 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
14010 X(vshl, 0x0000400, N_INV, 0x0800510), \
14011 X(vqshl, 0x0000410, N_INV, 0x0800710), \
14012 X(vand, 0x0000110, N_INV, 0x0800030), \
14013 X(vbic, 0x0100110, N_INV, 0x0800030), \
14014 X(veor, 0x1000110, N_INV, N_INV), \
14015 X(vorn, 0x0300110, N_INV, 0x0800010), \
14016 X(vorr, 0x0200110, N_INV, 0x0800010), \
14017 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
14018 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
14019 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
14020 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
14021 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
14022 X(vst1, 0x0000000, 0x0800000, N_INV), \
14023 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
14024 X(vst2, 0x0000100, 0x0800100, N_INV), \
14025 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
14026 X(vst3, 0x0000200, 0x0800200, N_INV), \
14027 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
14028 X(vst4, 0x0000300, 0x0800300, N_INV), \
14029 X(vmovn, 0x1b20200, N_INV, N_INV), \
14030 X(vtrn, 0x1b20080, N_INV, N_INV), \
14031 X(vqmovn, 0x1b20200, N_INV, N_INV), \
14032 X(vqmovun, 0x1b20240, N_INV, N_INV), \
14033 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
14034 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
14035 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
14036 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
14037 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
14038 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
14039 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
14040 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
14041 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
14042 X(vseleq, 0xe000a00, N_INV, N_INV), \
14043 X(vselvs, 0xe100a00, N_INV, N_INV), \
14044 X(vselge, 0xe200a00, N_INV, N_INV), \
14045 X(vselgt, 0xe300a00, N_INV, N_INV), \
14046 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
14047 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
14048 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
14049 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
14050 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
14051 X(aes, 0x3b00300, N_INV, N_INV), \
14052 X(sha3op, 0x2000c00, N_INV, N_INV), \
14053 X(sha1h, 0x3b902c0, N_INV, N_INV), \
14054 X(sha2op, 0x3ba0380, N_INV, N_INV)
14055
14056 enum neon_opc
14057 {
14058 #define X(OPC,I,F,S) N_MNEM_##OPC
14059 NEON_ENC_TAB
14060 #undef X
14061 };
14062
14063 static const struct neon_tab_entry neon_enc_tab[] =
14064 {
14065 #define X(OPC,I,F,S) { (I), (F), (S) }
14066 NEON_ENC_TAB
14067 #undef X
14068 };
14069
14070 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
14071 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14072 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14073 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14074 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14075 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14076 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14077 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14078 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14079 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14080 #define NEON_ENC_SINGLE_(X) \
14081 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
14082 #define NEON_ENC_DOUBLE_(X) \
14083 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
14084 #define NEON_ENC_FPV8_(X) \
14085 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
14086
14087 #define NEON_ENCODE(type, inst) \
14088 do \
14089 { \
14090 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
14091 inst.is_neon = 1; \
14092 } \
14093 while (0)
14094
14095 #define check_neon_suffixes \
14096 do \
14097 { \
14098 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
14099 { \
14100 as_bad (_("invalid neon suffix for non neon instruction")); \
14101 return; \
14102 } \
14103 } \
14104 while (0)
14105
14106 /* Define shapes for instruction operands. The following mnemonic characters
14107 are used in this table:
14108
14109 F - VFP S<n> register
14110 D - Neon D<n> register
14111 Q - Neon Q<n> register
14112 I - Immediate
14113 S - Scalar
14114 R - ARM register
14115 L - D<n> register list
14116
14117 This table is used to generate various data:
14118 - enumerations of the form NS_DDR to be used as arguments to
14119 neon_select_shape.
14120 - a table classifying shapes into single, double, quad, mixed.
14121 - a table used to drive neon_select_shape. */
14122
14123 #define NEON_SHAPE_DEF \
14124 X(3, (R, Q, Q), QUAD), \
14125 X(3, (D, D, D), DOUBLE), \
14126 X(3, (Q, Q, Q), QUAD), \
14127 X(3, (D, D, I), DOUBLE), \
14128 X(3, (Q, Q, I), QUAD), \
14129 X(3, (D, D, S), DOUBLE), \
14130 X(3, (Q, Q, S), QUAD), \
14131 X(3, (Q, Q, R), QUAD), \
14132 X(2, (D, D), DOUBLE), \
14133 X(2, (Q, Q), QUAD), \
14134 X(2, (D, S), DOUBLE), \
14135 X(2, (Q, S), QUAD), \
14136 X(2, (D, R), DOUBLE), \
14137 X(2, (Q, R), QUAD), \
14138 X(2, (D, I), DOUBLE), \
14139 X(2, (Q, I), QUAD), \
14140 X(3, (D, L, D), DOUBLE), \
14141 X(2, (D, Q), MIXED), \
14142 X(2, (Q, D), MIXED), \
14143 X(3, (D, Q, I), MIXED), \
14144 X(3, (Q, D, I), MIXED), \
14145 X(3, (Q, D, D), MIXED), \
14146 X(3, (D, Q, Q), MIXED), \
14147 X(3, (Q, Q, D), MIXED), \
14148 X(3, (Q, D, S), MIXED), \
14149 X(3, (D, Q, S), MIXED), \
14150 X(4, (D, D, D, I), DOUBLE), \
14151 X(4, (Q, Q, Q, I), QUAD), \
14152 X(4, (D, D, S, I), DOUBLE), \
14153 X(4, (Q, Q, S, I), QUAD), \
14154 X(2, (F, F), SINGLE), \
14155 X(3, (F, F, F), SINGLE), \
14156 X(2, (F, I), SINGLE), \
14157 X(2, (F, D), MIXED), \
14158 X(2, (D, F), MIXED), \
14159 X(3, (F, F, I), MIXED), \
14160 X(4, (R, R, F, F), SINGLE), \
14161 X(4, (F, F, R, R), SINGLE), \
14162 X(3, (D, R, R), DOUBLE), \
14163 X(3, (R, R, D), DOUBLE), \
14164 X(2, (S, R), SINGLE), \
14165 X(2, (R, S), SINGLE), \
14166 X(2, (F, R), SINGLE), \
14167 X(2, (R, F), SINGLE), \
14168 /* Half float shape supported so far. */\
14169 X (2, (H, D), MIXED), \
14170 X (2, (D, H), MIXED), \
14171 X (2, (H, F), MIXED), \
14172 X (2, (F, H), MIXED), \
14173 X (2, (H, H), HALF), \
14174 X (2, (H, R), HALF), \
14175 X (2, (R, H), HALF), \
14176 X (2, (H, I), HALF), \
14177 X (3, (H, H, H), HALF), \
14178 X (3, (H, F, I), MIXED), \
14179 X (3, (F, H, I), MIXED), \
14180 X (3, (D, H, H), MIXED), \
14181 X (3, (D, H, S), MIXED)
14182
14183 #define S2(A,B) NS_##A##B
14184 #define S3(A,B,C) NS_##A##B##C
14185 #define S4(A,B,C,D) NS_##A##B##C##D
14186
14187 #define X(N, L, C) S##N L
14188
14189 enum neon_shape
14190 {
14191 NEON_SHAPE_DEF,
14192 NS_NULL
14193 };
14194
14195 #undef X
14196 #undef S2
14197 #undef S3
14198 #undef S4
14199
14200 enum neon_shape_class
14201 {
14202 SC_HALF,
14203 SC_SINGLE,
14204 SC_DOUBLE,
14205 SC_QUAD,
14206 SC_MIXED
14207 };
14208
14209 #define X(N, L, C) SC_##C
14210
14211 static enum neon_shape_class neon_shape_class[] =
14212 {
14213 NEON_SHAPE_DEF
14214 };
14215
14216 #undef X
14217
14218 enum neon_shape_el
14219 {
14220 SE_H,
14221 SE_F,
14222 SE_D,
14223 SE_Q,
14224 SE_I,
14225 SE_S,
14226 SE_R,
14227 SE_L
14228 };
14229
14230 /* Register widths of above. */
14231 static unsigned neon_shape_el_size[] =
14232 {
14233 16,
14234 32,
14235 64,
14236 128,
14237 0,
14238 32,
14239 32,
14240 0
14241 };
14242
14243 struct neon_shape_info
14244 {
14245 unsigned els;
14246 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
14247 };
14248
14249 #define S2(A,B) { SE_##A, SE_##B }
14250 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
14251 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
14252
14253 #define X(N, L, C) { N, S##N L }
14254
14255 static struct neon_shape_info neon_shape_tab[] =
14256 {
14257 NEON_SHAPE_DEF
14258 };
14259
14260 #undef X
14261 #undef S2
14262 #undef S3
14263 #undef S4
14264
14265 /* Bit masks used in type checking given instructions.
14266 'N_EQK' means the type must be the same as (or based on in some way) the key
14267 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
14268 set, various other bits can be set as well in order to modify the meaning of
14269 the type constraint. */
14270
14271 enum neon_type_mask
14272 {
14273 N_S8 = 0x0000001,
14274 N_S16 = 0x0000002,
14275 N_S32 = 0x0000004,
14276 N_S64 = 0x0000008,
14277 N_U8 = 0x0000010,
14278 N_U16 = 0x0000020,
14279 N_U32 = 0x0000040,
14280 N_U64 = 0x0000080,
14281 N_I8 = 0x0000100,
14282 N_I16 = 0x0000200,
14283 N_I32 = 0x0000400,
14284 N_I64 = 0x0000800,
14285 N_8 = 0x0001000,
14286 N_16 = 0x0002000,
14287 N_32 = 0x0004000,
14288 N_64 = 0x0008000,
14289 N_P8 = 0x0010000,
14290 N_P16 = 0x0020000,
14291 N_F16 = 0x0040000,
14292 N_F32 = 0x0080000,
14293 N_F64 = 0x0100000,
14294 N_P64 = 0x0200000,
14295 N_KEY = 0x1000000, /* Key element (main type specifier). */
14296 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
14297 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
14298 N_UNT = 0x8000000, /* Must be explicitly untyped. */
14299 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
14300 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
14301 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
14302 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
14303 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
14304 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
14305 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
14306 N_UTYP = 0,
14307 N_MAX_NONSPECIAL = N_P64
14308 };
14309
14310 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
14311
14312 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
14313 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14314 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
14315 #define N_S_32 (N_S8 | N_S16 | N_S32)
14316 #define N_F_16_32 (N_F16 | N_F32)
14317 #define N_SUF_32 (N_SU_32 | N_F_16_32)
14318 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
14319 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
14320 #define N_F_ALL (N_F16 | N_F32 | N_F64)
14321 #define N_I_MVE (N_I8 | N_I16 | N_I32)
14322 #define N_F_MVE (N_F16 | N_F32)
14323 #define N_SU_MVE (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14324
14325 /* Pass this as the first type argument to neon_check_type to ignore types
14326 altogether. */
14327 #define N_IGNORE_TYPE (N_KEY | N_EQK)
14328
14329 /* Select a "shape" for the current instruction (describing register types or
14330 sizes) from a list of alternatives. Return NS_NULL if the current instruction
14331 doesn't fit. For non-polymorphic shapes, checking is usually done as a
14332 function of operand parsing, so this function doesn't need to be called.
14333 Shapes should be listed in order of decreasing length. */
14334
14335 static enum neon_shape
14336 neon_select_shape (enum neon_shape shape, ...)
14337 {
14338 va_list ap;
14339 enum neon_shape first_shape = shape;
14340
14341 /* Fix missing optional operands. FIXME: we don't know at this point how
14342 many arguments we should have, so this makes the assumption that we have
14343 > 1. This is true of all current Neon opcodes, I think, but may not be
14344 true in the future. */
14345 if (!inst.operands[1].present)
14346 inst.operands[1] = inst.operands[0];
14347
14348 va_start (ap, shape);
14349
14350 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
14351 {
14352 unsigned j;
14353 int matches = 1;
14354
14355 for (j = 0; j < neon_shape_tab[shape].els; j++)
14356 {
14357 if (!inst.operands[j].present)
14358 {
14359 matches = 0;
14360 break;
14361 }
14362
14363 switch (neon_shape_tab[shape].el[j])
14364 {
14365 /* If a .f16, .16, .u16, .s16 type specifier is given over
14366 a VFP single precision register operand, it's essentially
14367 means only half of the register is used.
14368
14369 If the type specifier is given after the mnemonics, the
14370 information is stored in inst.vectype. If the type specifier
14371 is given after register operand, the information is stored
14372 in inst.operands[].vectype.
14373
14374 When there is only one type specifier, and all the register
14375 operands are the same type of hardware register, the type
14376 specifier applies to all register operands.
14377
14378 If no type specifier is given, the shape is inferred from
14379 operand information.
14380
14381 for example:
14382 vadd.f16 s0, s1, s2: NS_HHH
14383 vabs.f16 s0, s1: NS_HH
14384 vmov.f16 s0, r1: NS_HR
14385 vmov.f16 r0, s1: NS_RH
14386 vcvt.f16 r0, s1: NS_RH
14387 vcvt.f16.s32 s2, s2, #29: NS_HFI
14388 vcvt.f16.s32 s2, s2: NS_HF
14389 */
14390 case SE_H:
14391 if (!(inst.operands[j].isreg
14392 && inst.operands[j].isvec
14393 && inst.operands[j].issingle
14394 && !inst.operands[j].isquad
14395 && ((inst.vectype.elems == 1
14396 && inst.vectype.el[0].size == 16)
14397 || (inst.vectype.elems > 1
14398 && inst.vectype.el[j].size == 16)
14399 || (inst.vectype.elems == 0
14400 && inst.operands[j].vectype.type != NT_invtype
14401 && inst.operands[j].vectype.size == 16))))
14402 matches = 0;
14403 break;
14404
14405 case SE_F:
14406 if (!(inst.operands[j].isreg
14407 && inst.operands[j].isvec
14408 && inst.operands[j].issingle
14409 && !inst.operands[j].isquad
14410 && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
14411 || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
14412 || (inst.vectype.elems == 0
14413 && (inst.operands[j].vectype.size == 32
14414 || inst.operands[j].vectype.type == NT_invtype)))))
14415 matches = 0;
14416 break;
14417
14418 case SE_D:
14419 if (!(inst.operands[j].isreg
14420 && inst.operands[j].isvec
14421 && !inst.operands[j].isquad
14422 && !inst.operands[j].issingle))
14423 matches = 0;
14424 break;
14425
14426 case SE_R:
14427 if (!(inst.operands[j].isreg
14428 && !inst.operands[j].isvec))
14429 matches = 0;
14430 break;
14431
14432 case SE_Q:
14433 if (!(inst.operands[j].isreg
14434 && inst.operands[j].isvec
14435 && inst.operands[j].isquad
14436 && !inst.operands[j].issingle))
14437 matches = 0;
14438 break;
14439
14440 case SE_I:
14441 if (!(!inst.operands[j].isreg
14442 && !inst.operands[j].isscalar))
14443 matches = 0;
14444 break;
14445
14446 case SE_S:
14447 if (!(!inst.operands[j].isreg
14448 && inst.operands[j].isscalar))
14449 matches = 0;
14450 break;
14451
14452 case SE_L:
14453 break;
14454 }
14455 if (!matches)
14456 break;
14457 }
14458 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
14459 /* We've matched all the entries in the shape table, and we don't
14460 have any left over operands which have not been matched. */
14461 break;
14462 }
14463
14464 va_end (ap);
14465
14466 if (shape == NS_NULL && first_shape != NS_NULL)
14467 first_error (_("invalid instruction shape"));
14468
14469 return shape;
14470 }
14471
14472 /* True if SHAPE is predominantly a quadword operation (most of the time, this
14473 means the Q bit should be set). */
14474
14475 static int
14476 neon_quad (enum neon_shape shape)
14477 {
14478 return neon_shape_class[shape] == SC_QUAD;
14479 }
14480
14481 static void
14482 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
14483 unsigned *g_size)
14484 {
14485 /* Allow modification to be made to types which are constrained to be
14486 based on the key element, based on bits set alongside N_EQK. */
14487 if ((typebits & N_EQK) != 0)
14488 {
14489 if ((typebits & N_HLF) != 0)
14490 *g_size /= 2;
14491 else if ((typebits & N_DBL) != 0)
14492 *g_size *= 2;
14493 if ((typebits & N_SGN) != 0)
14494 *g_type = NT_signed;
14495 else if ((typebits & N_UNS) != 0)
14496 *g_type = NT_unsigned;
14497 else if ((typebits & N_INT) != 0)
14498 *g_type = NT_integer;
14499 else if ((typebits & N_FLT) != 0)
14500 *g_type = NT_float;
14501 else if ((typebits & N_SIZ) != 0)
14502 *g_type = NT_untyped;
14503 }
14504 }
14505
14506 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
14507 operand type, i.e. the single type specified in a Neon instruction when it
14508 is the only one given. */
14509
14510 static struct neon_type_el
14511 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
14512 {
14513 struct neon_type_el dest = *key;
14514
14515 gas_assert ((thisarg & N_EQK) != 0);
14516
14517 neon_modify_type_size (thisarg, &dest.type, &dest.size);
14518
14519 return dest;
14520 }
14521
14522 /* Convert Neon type and size into compact bitmask representation. */
14523
14524 static enum neon_type_mask
14525 type_chk_of_el_type (enum neon_el_type type, unsigned size)
14526 {
14527 switch (type)
14528 {
14529 case NT_untyped:
14530 switch (size)
14531 {
14532 case 8: return N_8;
14533 case 16: return N_16;
14534 case 32: return N_32;
14535 case 64: return N_64;
14536 default: ;
14537 }
14538 break;
14539
14540 case NT_integer:
14541 switch (size)
14542 {
14543 case 8: return N_I8;
14544 case 16: return N_I16;
14545 case 32: return N_I32;
14546 case 64: return N_I64;
14547 default: ;
14548 }
14549 break;
14550
14551 case NT_float:
14552 switch (size)
14553 {
14554 case 16: return N_F16;
14555 case 32: return N_F32;
14556 case 64: return N_F64;
14557 default: ;
14558 }
14559 break;
14560
14561 case NT_poly:
14562 switch (size)
14563 {
14564 case 8: return N_P8;
14565 case 16: return N_P16;
14566 case 64: return N_P64;
14567 default: ;
14568 }
14569 break;
14570
14571 case NT_signed:
14572 switch (size)
14573 {
14574 case 8: return N_S8;
14575 case 16: return N_S16;
14576 case 32: return N_S32;
14577 case 64: return N_S64;
14578 default: ;
14579 }
14580 break;
14581
14582 case NT_unsigned:
14583 switch (size)
14584 {
14585 case 8: return N_U8;
14586 case 16: return N_U16;
14587 case 32: return N_U32;
14588 case 64: return N_U64;
14589 default: ;
14590 }
14591 break;
14592
14593 default: ;
14594 }
14595
14596 return N_UTYP;
14597 }
14598
14599 /* Convert compact Neon bitmask type representation to a type and size. Only
14600 handles the case where a single bit is set in the mask. */
14601
14602 static int
14603 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
14604 enum neon_type_mask mask)
14605 {
14606 if ((mask & N_EQK) != 0)
14607 return FAIL;
14608
14609 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
14610 *size = 8;
14611 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
14612 *size = 16;
14613 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
14614 *size = 32;
14615 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
14616 *size = 64;
14617 else
14618 return FAIL;
14619
14620 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
14621 *type = NT_signed;
14622 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
14623 *type = NT_unsigned;
14624 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
14625 *type = NT_integer;
14626 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
14627 *type = NT_untyped;
14628 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
14629 *type = NT_poly;
14630 else if ((mask & (N_F_ALL)) != 0)
14631 *type = NT_float;
14632 else
14633 return FAIL;
14634
14635 return SUCCESS;
14636 }
14637
14638 /* Modify a bitmask of allowed types. This is only needed for type
14639 relaxation. */
14640
14641 static unsigned
14642 modify_types_allowed (unsigned allowed, unsigned mods)
14643 {
14644 unsigned size;
14645 enum neon_el_type type;
14646 unsigned destmask;
14647 int i;
14648
14649 destmask = 0;
14650
14651 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
14652 {
14653 if (el_type_of_type_chk (&type, &size,
14654 (enum neon_type_mask) (allowed & i)) == SUCCESS)
14655 {
14656 neon_modify_type_size (mods, &type, &size);
14657 destmask |= type_chk_of_el_type (type, size);
14658 }
14659 }
14660
14661 return destmask;
14662 }
14663
14664 /* Check type and return type classification.
14665 The manual states (paraphrase): If one datatype is given, it indicates the
14666 type given in:
14667 - the second operand, if there is one
14668 - the operand, if there is no second operand
14669 - the result, if there are no operands.
14670 This isn't quite good enough though, so we use a concept of a "key" datatype
14671 which is set on a per-instruction basis, which is the one which matters when
14672 only one data type is written.
14673 Note: this function has side-effects (e.g. filling in missing operands). All
14674 Neon instructions should call it before performing bit encoding. */
14675
14676 static struct neon_type_el
14677 neon_check_type (unsigned els, enum neon_shape ns, ...)
14678 {
14679 va_list ap;
14680 unsigned i, pass, key_el = 0;
14681 unsigned types[NEON_MAX_TYPE_ELS];
14682 enum neon_el_type k_type = NT_invtype;
14683 unsigned k_size = -1u;
14684 struct neon_type_el badtype = {NT_invtype, -1};
14685 unsigned key_allowed = 0;
14686
14687 /* Optional registers in Neon instructions are always (not) in operand 1.
14688 Fill in the missing operand here, if it was omitted. */
14689 if (els > 1 && !inst.operands[1].present)
14690 inst.operands[1] = inst.operands[0];
14691
14692 /* Suck up all the varargs. */
14693 va_start (ap, ns);
14694 for (i = 0; i < els; i++)
14695 {
14696 unsigned thisarg = va_arg (ap, unsigned);
14697 if (thisarg == N_IGNORE_TYPE)
14698 {
14699 va_end (ap);
14700 return badtype;
14701 }
14702 types[i] = thisarg;
14703 if ((thisarg & N_KEY) != 0)
14704 key_el = i;
14705 }
14706 va_end (ap);
14707
14708 if (inst.vectype.elems > 0)
14709 for (i = 0; i < els; i++)
14710 if (inst.operands[i].vectype.type != NT_invtype)
14711 {
14712 first_error (_("types specified in both the mnemonic and operands"));
14713 return badtype;
14714 }
14715
14716 /* Duplicate inst.vectype elements here as necessary.
14717 FIXME: No idea if this is exactly the same as the ARM assembler,
14718 particularly when an insn takes one register and one non-register
14719 operand. */
14720 if (inst.vectype.elems == 1 && els > 1)
14721 {
14722 unsigned j;
14723 inst.vectype.elems = els;
14724 inst.vectype.el[key_el] = inst.vectype.el[0];
14725 for (j = 0; j < els; j++)
14726 if (j != key_el)
14727 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
14728 types[j]);
14729 }
14730 else if (inst.vectype.elems == 0 && els > 0)
14731 {
14732 unsigned j;
14733 /* No types were given after the mnemonic, so look for types specified
14734 after each operand. We allow some flexibility here; as long as the
14735 "key" operand has a type, we can infer the others. */
14736 for (j = 0; j < els; j++)
14737 if (inst.operands[j].vectype.type != NT_invtype)
14738 inst.vectype.el[j] = inst.operands[j].vectype;
14739
14740 if (inst.operands[key_el].vectype.type != NT_invtype)
14741 {
14742 for (j = 0; j < els; j++)
14743 if (inst.operands[j].vectype.type == NT_invtype)
14744 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
14745 types[j]);
14746 }
14747 else
14748 {
14749 first_error (_("operand types can't be inferred"));
14750 return badtype;
14751 }
14752 }
14753 else if (inst.vectype.elems != els)
14754 {
14755 first_error (_("type specifier has the wrong number of parts"));
14756 return badtype;
14757 }
14758
14759 for (pass = 0; pass < 2; pass++)
14760 {
14761 for (i = 0; i < els; i++)
14762 {
14763 unsigned thisarg = types[i];
14764 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
14765 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
14766 enum neon_el_type g_type = inst.vectype.el[i].type;
14767 unsigned g_size = inst.vectype.el[i].size;
14768
14769 /* Decay more-specific signed & unsigned types to sign-insensitive
14770 integer types if sign-specific variants are unavailable. */
14771 if ((g_type == NT_signed || g_type == NT_unsigned)
14772 && (types_allowed & N_SU_ALL) == 0)
14773 g_type = NT_integer;
14774
14775 /* If only untyped args are allowed, decay any more specific types to
14776 them. Some instructions only care about signs for some element
14777 sizes, so handle that properly. */
14778 if (((types_allowed & N_UNT) == 0)
14779 && ((g_size == 8 && (types_allowed & N_8) != 0)
14780 || (g_size == 16 && (types_allowed & N_16) != 0)
14781 || (g_size == 32 && (types_allowed & N_32) != 0)
14782 || (g_size == 64 && (types_allowed & N_64) != 0)))
14783 g_type = NT_untyped;
14784
14785 if (pass == 0)
14786 {
14787 if ((thisarg & N_KEY) != 0)
14788 {
14789 k_type = g_type;
14790 k_size = g_size;
14791 key_allowed = thisarg & ~N_KEY;
14792
14793 /* Check architecture constraint on FP16 extension. */
14794 if (k_size == 16
14795 && k_type == NT_float
14796 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14797 {
14798 inst.error = _(BAD_FP16);
14799 return badtype;
14800 }
14801 }
14802 }
14803 else
14804 {
14805 if ((thisarg & N_VFP) != 0)
14806 {
14807 enum neon_shape_el regshape;
14808 unsigned regwidth, match;
14809
14810 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14811 if (ns == NS_NULL)
14812 {
14813 first_error (_("invalid instruction shape"));
14814 return badtype;
14815 }
14816 regshape = neon_shape_tab[ns].el[i];
14817 regwidth = neon_shape_el_size[regshape];
14818
14819 /* In VFP mode, operands must match register widths. If we
14820 have a key operand, use its width, else use the width of
14821 the current operand. */
14822 if (k_size != -1u)
14823 match = k_size;
14824 else
14825 match = g_size;
14826
14827 /* FP16 will use a single precision register. */
14828 if (regwidth == 32 && match == 16)
14829 {
14830 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14831 match = regwidth;
14832 else
14833 {
14834 inst.error = _(BAD_FP16);
14835 return badtype;
14836 }
14837 }
14838
14839 if (regwidth != match)
14840 {
14841 first_error (_("operand size must match register width"));
14842 return badtype;
14843 }
14844 }
14845
14846 if ((thisarg & N_EQK) == 0)
14847 {
14848 unsigned given_type = type_chk_of_el_type (g_type, g_size);
14849
14850 if ((given_type & types_allowed) == 0)
14851 {
14852 first_error (BAD_SIMD_TYPE);
14853 return badtype;
14854 }
14855 }
14856 else
14857 {
14858 enum neon_el_type mod_k_type = k_type;
14859 unsigned mod_k_size = k_size;
14860 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
14861 if (g_type != mod_k_type || g_size != mod_k_size)
14862 {
14863 first_error (_("inconsistent types in Neon instruction"));
14864 return badtype;
14865 }
14866 }
14867 }
14868 }
14869 }
14870
14871 return inst.vectype.el[key_el];
14872 }
14873
14874 /* Neon-style VFP instruction forwarding. */
14875
14876 /* Thumb VFP instructions have 0xE in the condition field. */
14877
14878 static void
14879 do_vfp_cond_or_thumb (void)
14880 {
14881 inst.is_neon = 1;
14882
14883 if (thumb_mode)
14884 inst.instruction |= 0xe0000000;
14885 else
14886 inst.instruction |= inst.cond << 28;
14887 }
14888
14889 /* Look up and encode a simple mnemonic, for use as a helper function for the
14890 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14891 etc. It is assumed that operand parsing has already been done, and that the
14892 operands are in the form expected by the given opcode (this isn't necessarily
14893 the same as the form in which they were parsed, hence some massaging must
14894 take place before this function is called).
14895 Checks current arch version against that in the looked-up opcode. */
14896
14897 static void
14898 do_vfp_nsyn_opcode (const char *opname)
14899 {
14900 const struct asm_opcode *opcode;
14901
14902 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
14903
14904 if (!opcode)
14905 abort ();
14906
14907 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
14908 thumb_mode ? *opcode->tvariant : *opcode->avariant),
14909 _(BAD_FPU));
14910
14911 inst.is_neon = 1;
14912
14913 if (thumb_mode)
14914 {
14915 inst.instruction = opcode->tvalue;
14916 opcode->tencode ();
14917 }
14918 else
14919 {
14920 inst.instruction = (inst.cond << 28) | opcode->avalue;
14921 opcode->aencode ();
14922 }
14923 }
14924
14925 static void
14926 do_vfp_nsyn_add_sub (enum neon_shape rs)
14927 {
14928 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
14929
14930 if (rs == NS_FFF || rs == NS_HHH)
14931 {
14932 if (is_add)
14933 do_vfp_nsyn_opcode ("fadds");
14934 else
14935 do_vfp_nsyn_opcode ("fsubs");
14936
14937 /* ARMv8.2 fp16 instruction. */
14938 if (rs == NS_HHH)
14939 do_scalar_fp16_v82_encode ();
14940 }
14941 else
14942 {
14943 if (is_add)
14944 do_vfp_nsyn_opcode ("faddd");
14945 else
14946 do_vfp_nsyn_opcode ("fsubd");
14947 }
14948 }
14949
14950 /* Check operand types to see if this is a VFP instruction, and if so call
14951 PFN (). */
14952
14953 static int
14954 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
14955 {
14956 enum neon_shape rs;
14957 struct neon_type_el et;
14958
14959 switch (args)
14960 {
14961 case 2:
14962 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14963 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14964 break;
14965
14966 case 3:
14967 rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14968 et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14969 N_F_ALL | N_KEY | N_VFP);
14970 break;
14971
14972 default:
14973 abort ();
14974 }
14975
14976 if (et.type != NT_invtype)
14977 {
14978 pfn (rs);
14979 return SUCCESS;
14980 }
14981
14982 inst.error = NULL;
14983 return FAIL;
14984 }
14985
14986 static void
14987 do_vfp_nsyn_mla_mls (enum neon_shape rs)
14988 {
14989 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
14990
14991 if (rs == NS_FFF || rs == NS_HHH)
14992 {
14993 if (is_mla)
14994 do_vfp_nsyn_opcode ("fmacs");
14995 else
14996 do_vfp_nsyn_opcode ("fnmacs");
14997
14998 /* ARMv8.2 fp16 instruction. */
14999 if (rs == NS_HHH)
15000 do_scalar_fp16_v82_encode ();
15001 }
15002 else
15003 {
15004 if (is_mla)
15005 do_vfp_nsyn_opcode ("fmacd");
15006 else
15007 do_vfp_nsyn_opcode ("fnmacd");
15008 }
15009 }
15010
15011 static void
15012 do_vfp_nsyn_fma_fms (enum neon_shape rs)
15013 {
15014 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
15015
15016 if (rs == NS_FFF || rs == NS_HHH)
15017 {
15018 if (is_fma)
15019 do_vfp_nsyn_opcode ("ffmas");
15020 else
15021 do_vfp_nsyn_opcode ("ffnmas");
15022
15023 /* ARMv8.2 fp16 instruction. */
15024 if (rs == NS_HHH)
15025 do_scalar_fp16_v82_encode ();
15026 }
15027 else
15028 {
15029 if (is_fma)
15030 do_vfp_nsyn_opcode ("ffmad");
15031 else
15032 do_vfp_nsyn_opcode ("ffnmad");
15033 }
15034 }
15035
15036 static void
15037 do_vfp_nsyn_mul (enum neon_shape rs)
15038 {
15039 if (rs == NS_FFF || rs == NS_HHH)
15040 {
15041 do_vfp_nsyn_opcode ("fmuls");
15042
15043 /* ARMv8.2 fp16 instruction. */
15044 if (rs == NS_HHH)
15045 do_scalar_fp16_v82_encode ();
15046 }
15047 else
15048 do_vfp_nsyn_opcode ("fmuld");
15049 }
15050
15051 static void
15052 do_vfp_nsyn_abs_neg (enum neon_shape rs)
15053 {
15054 int is_neg = (inst.instruction & 0x80) != 0;
15055 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
15056
15057 if (rs == NS_FF || rs == NS_HH)
15058 {
15059 if (is_neg)
15060 do_vfp_nsyn_opcode ("fnegs");
15061 else
15062 do_vfp_nsyn_opcode ("fabss");
15063
15064 /* ARMv8.2 fp16 instruction. */
15065 if (rs == NS_HH)
15066 do_scalar_fp16_v82_encode ();
15067 }
15068 else
15069 {
15070 if (is_neg)
15071 do_vfp_nsyn_opcode ("fnegd");
15072 else
15073 do_vfp_nsyn_opcode ("fabsd");
15074 }
15075 }
15076
15077 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
15078 insns belong to Neon, and are handled elsewhere. */
15079
15080 static void
15081 do_vfp_nsyn_ldm_stm (int is_dbmode)
15082 {
15083 int is_ldm = (inst.instruction & (1 << 20)) != 0;
15084 if (is_ldm)
15085 {
15086 if (is_dbmode)
15087 do_vfp_nsyn_opcode ("fldmdbs");
15088 else
15089 do_vfp_nsyn_opcode ("fldmias");
15090 }
15091 else
15092 {
15093 if (is_dbmode)
15094 do_vfp_nsyn_opcode ("fstmdbs");
15095 else
15096 do_vfp_nsyn_opcode ("fstmias");
15097 }
15098 }
15099
15100 static void
15101 do_vfp_nsyn_sqrt (void)
15102 {
15103 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
15104 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
15105
15106 if (rs == NS_FF || rs == NS_HH)
15107 {
15108 do_vfp_nsyn_opcode ("fsqrts");
15109
15110 /* ARMv8.2 fp16 instruction. */
15111 if (rs == NS_HH)
15112 do_scalar_fp16_v82_encode ();
15113 }
15114 else
15115 do_vfp_nsyn_opcode ("fsqrtd");
15116 }
15117
15118 static void
15119 do_vfp_nsyn_div (void)
15120 {
15121 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15122 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15123 N_F_ALL | N_KEY | N_VFP);
15124
15125 if (rs == NS_FFF || rs == NS_HHH)
15126 {
15127 do_vfp_nsyn_opcode ("fdivs");
15128
15129 /* ARMv8.2 fp16 instruction. */
15130 if (rs == NS_HHH)
15131 do_scalar_fp16_v82_encode ();
15132 }
15133 else
15134 do_vfp_nsyn_opcode ("fdivd");
15135 }
15136
15137 static void
15138 do_vfp_nsyn_nmul (void)
15139 {
15140 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15141 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15142 N_F_ALL | N_KEY | N_VFP);
15143
15144 if (rs == NS_FFF || rs == NS_HHH)
15145 {
15146 NEON_ENCODE (SINGLE, inst);
15147 do_vfp_sp_dyadic ();
15148
15149 /* ARMv8.2 fp16 instruction. */
15150 if (rs == NS_HHH)
15151 do_scalar_fp16_v82_encode ();
15152 }
15153 else
15154 {
15155 NEON_ENCODE (DOUBLE, inst);
15156 do_vfp_dp_rd_rn_rm ();
15157 }
15158 do_vfp_cond_or_thumb ();
15159
15160 }
15161
15162 static void
15163 do_vfp_nsyn_cmp (void)
15164 {
15165 enum neon_shape rs;
15166 if (inst.operands[1].isreg)
15167 {
15168 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
15169 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
15170
15171 if (rs == NS_FF || rs == NS_HH)
15172 {
15173 NEON_ENCODE (SINGLE, inst);
15174 do_vfp_sp_monadic ();
15175 }
15176 else
15177 {
15178 NEON_ENCODE (DOUBLE, inst);
15179 do_vfp_dp_rd_rm ();
15180 }
15181 }
15182 else
15183 {
15184 rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
15185 neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
15186
15187 switch (inst.instruction & 0x0fffffff)
15188 {
15189 case N_MNEM_vcmp:
15190 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
15191 break;
15192 case N_MNEM_vcmpe:
15193 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
15194 break;
15195 default:
15196 abort ();
15197 }
15198
15199 if (rs == NS_FI || rs == NS_HI)
15200 {
15201 NEON_ENCODE (SINGLE, inst);
15202 do_vfp_sp_compare_z ();
15203 }
15204 else
15205 {
15206 NEON_ENCODE (DOUBLE, inst);
15207 do_vfp_dp_rd ();
15208 }
15209 }
15210 do_vfp_cond_or_thumb ();
15211
15212 /* ARMv8.2 fp16 instruction. */
15213 if (rs == NS_HI || rs == NS_HH)
15214 do_scalar_fp16_v82_encode ();
15215 }
15216
15217 static void
15218 nsyn_insert_sp (void)
15219 {
15220 inst.operands[1] = inst.operands[0];
15221 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
15222 inst.operands[0].reg = REG_SP;
15223 inst.operands[0].isreg = 1;
15224 inst.operands[0].writeback = 1;
15225 inst.operands[0].present = 1;
15226 }
15227
15228 static void
15229 do_vfp_nsyn_push (void)
15230 {
15231 nsyn_insert_sp ();
15232
15233 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
15234 _("register list must contain at least 1 and at most 16 "
15235 "registers"));
15236
15237 if (inst.operands[1].issingle)
15238 do_vfp_nsyn_opcode ("fstmdbs");
15239 else
15240 do_vfp_nsyn_opcode ("fstmdbd");
15241 }
15242
15243 static void
15244 do_vfp_nsyn_pop (void)
15245 {
15246 nsyn_insert_sp ();
15247
15248 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
15249 _("register list must contain at least 1 and at most 16 "
15250 "registers"));
15251
15252 if (inst.operands[1].issingle)
15253 do_vfp_nsyn_opcode ("fldmias");
15254 else
15255 do_vfp_nsyn_opcode ("fldmiad");
15256 }
15257
15258 /* Fix up Neon data-processing instructions, ORing in the correct bits for
15259 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
15260
15261 static void
15262 neon_dp_fixup (struct arm_it* insn)
15263 {
15264 unsigned int i = insn->instruction;
15265 insn->is_neon = 1;
15266
15267 if (thumb_mode)
15268 {
15269 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
15270 if (i & (1 << 24))
15271 i |= 1 << 28;
15272
15273 i &= ~(1 << 24);
15274
15275 i |= 0xef000000;
15276 }
15277 else
15278 i |= 0xf2000000;
15279
15280 insn->instruction = i;
15281 }
15282
15283 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
15284 (0, 1, 2, 3). */
15285
15286 static unsigned
15287 neon_logbits (unsigned x)
15288 {
15289 return ffs (x) - 4;
15290 }
15291
15292 #define LOW4(R) ((R) & 0xf)
15293 #define HI1(R) (((R) >> 4) & 1)
15294
15295 static void
15296 mve_encode_qqr (int size, int fp)
15297 {
15298 if (inst.operands[2].reg == REG_SP)
15299 as_tsktsk (MVE_BAD_SP);
15300 else if (inst.operands[2].reg == REG_PC)
15301 as_tsktsk (MVE_BAD_PC);
15302
15303 if (fp)
15304 {
15305 /* vadd. */
15306 if (((unsigned)inst.instruction) == 0xd00)
15307 inst.instruction = 0xee300f40;
15308 /* vsub. */
15309 else if (((unsigned)inst.instruction) == 0x200d00)
15310 inst.instruction = 0xee301f40;
15311
15312 /* Setting size which is 1 for F16 and 0 for F32. */
15313 inst.instruction |= (size == 16) << 28;
15314 }
15315 else
15316 {
15317 /* vadd. */
15318 if (((unsigned)inst.instruction) == 0x800)
15319 inst.instruction = 0xee010f40;
15320 /* vsub. */
15321 else if (((unsigned)inst.instruction) == 0x1000800)
15322 inst.instruction = 0xee011f40;
15323 /* Setting bits for size. */
15324 inst.instruction |= neon_logbits (size) << 20;
15325 }
15326 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15327 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15328 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15329 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15330 inst.instruction |= inst.operands[2].reg;
15331 inst.is_neon = 1;
15332 }
15333
15334 static void
15335 mve_encode_rqq (unsigned bit28, unsigned size)
15336 {
15337 inst.instruction |= bit28 << 28;
15338 inst.instruction |= neon_logbits (size) << 20;
15339 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15340 inst.instruction |= inst.operands[0].reg << 12;
15341 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15342 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15343 inst.instruction |= LOW4 (inst.operands[2].reg);
15344 inst.is_neon = 1;
15345 }
15346
15347 static void
15348 mve_encode_qqq (int ubit, int size)
15349 {
15350
15351 inst.instruction |= (ubit != 0) << 28;
15352 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15353 inst.instruction |= neon_logbits (size) << 20;
15354 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15355 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15356 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15357 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15358 inst.instruction |= LOW4 (inst.operands[2].reg);
15359
15360 inst.is_neon = 1;
15361 }
15362
15363
15364 /* Encode insns with bit pattern:
15365
15366 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15367 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
15368
15369 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
15370 different meaning for some instruction. */
15371
15372 static void
15373 neon_three_same (int isquad, int ubit, int size)
15374 {
15375 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15376 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15377 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15378 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15379 inst.instruction |= LOW4 (inst.operands[2].reg);
15380 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15381 inst.instruction |= (isquad != 0) << 6;
15382 inst.instruction |= (ubit != 0) << 24;
15383 if (size != -1)
15384 inst.instruction |= neon_logbits (size) << 20;
15385
15386 neon_dp_fixup (&inst);
15387 }
15388
15389 /* Encode instructions of the form:
15390
15391 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
15392 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
15393
15394 Don't write size if SIZE == -1. */
15395
15396 static void
15397 neon_two_same (int qbit, int ubit, int size)
15398 {
15399 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15400 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15401 inst.instruction |= LOW4 (inst.operands[1].reg);
15402 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15403 inst.instruction |= (qbit != 0) << 6;
15404 inst.instruction |= (ubit != 0) << 24;
15405
15406 if (size != -1)
15407 inst.instruction |= neon_logbits (size) << 18;
15408
15409 neon_dp_fixup (&inst);
15410 }
15411
15412 /* Neon instruction encoders, in approximate order of appearance. */
15413
15414 static void
15415 do_neon_dyadic_i_su (void)
15416 {
15417 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15418 struct neon_type_el et = neon_check_type (3, rs,
15419 N_EQK, N_EQK, N_SU_32 | N_KEY);
15420 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15421 }
15422
15423 static void
15424 do_neon_dyadic_i64_su (void)
15425 {
15426 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15427 struct neon_type_el et = neon_check_type (3, rs,
15428 N_EQK, N_EQK, N_SU_ALL | N_KEY);
15429 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15430 }
15431
15432 static void
15433 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
15434 unsigned immbits)
15435 {
15436 unsigned size = et.size >> 3;
15437 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15438 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15439 inst.instruction |= LOW4 (inst.operands[1].reg);
15440 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15441 inst.instruction |= (isquad != 0) << 6;
15442 inst.instruction |= immbits << 16;
15443 inst.instruction |= (size >> 3) << 7;
15444 inst.instruction |= (size & 0x7) << 19;
15445 if (write_ubit)
15446 inst.instruction |= (uval != 0) << 24;
15447
15448 neon_dp_fixup (&inst);
15449 }
15450
15451 static void
15452 do_neon_shl_imm (void)
15453 {
15454 if (!inst.operands[2].isreg)
15455 {
15456 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15457 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
15458 int imm = inst.operands[2].imm;
15459
15460 constraint (imm < 0 || (unsigned)imm >= et.size,
15461 _("immediate out of range for shift"));
15462 NEON_ENCODE (IMMED, inst);
15463 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15464 }
15465 else
15466 {
15467 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15468 struct neon_type_el et = neon_check_type (3, rs,
15469 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
15470 unsigned int tmp;
15471
15472 /* VSHL/VQSHL 3-register variants have syntax such as:
15473 vshl.xx Dd, Dm, Dn
15474 whereas other 3-register operations encoded by neon_three_same have
15475 syntax like:
15476 vadd.xx Dd, Dn, Dm
15477 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
15478 here. */
15479 tmp = inst.operands[2].reg;
15480 inst.operands[2].reg = inst.operands[1].reg;
15481 inst.operands[1].reg = tmp;
15482 NEON_ENCODE (INTEGER, inst);
15483 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15484 }
15485 }
15486
15487 static void
15488 do_neon_qshl_imm (void)
15489 {
15490 if (!inst.operands[2].isreg)
15491 {
15492 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15493 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
15494 int imm = inst.operands[2].imm;
15495
15496 constraint (imm < 0 || (unsigned)imm >= et.size,
15497 _("immediate out of range for shift"));
15498 NEON_ENCODE (IMMED, inst);
15499 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
15500 }
15501 else
15502 {
15503 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15504 struct neon_type_el et = neon_check_type (3, rs,
15505 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
15506 unsigned int tmp;
15507
15508 /* See note in do_neon_shl_imm. */
15509 tmp = inst.operands[2].reg;
15510 inst.operands[2].reg = inst.operands[1].reg;
15511 inst.operands[1].reg = tmp;
15512 NEON_ENCODE (INTEGER, inst);
15513 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15514 }
15515 }
15516
15517 static void
15518 do_neon_rshl (void)
15519 {
15520 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15521 struct neon_type_el et = neon_check_type (3, rs,
15522 N_EQK, N_EQK, N_SU_ALL | N_KEY);
15523 unsigned int tmp;
15524
15525 tmp = inst.operands[2].reg;
15526 inst.operands[2].reg = inst.operands[1].reg;
15527 inst.operands[1].reg = tmp;
15528 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15529 }
15530
15531 static int
15532 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
15533 {
15534 /* Handle .I8 pseudo-instructions. */
15535 if (size == 8)
15536 {
15537 /* Unfortunately, this will make everything apart from zero out-of-range.
15538 FIXME is this the intended semantics? There doesn't seem much point in
15539 accepting .I8 if so. */
15540 immediate |= immediate << 8;
15541 size = 16;
15542 }
15543
15544 if (size >= 32)
15545 {
15546 if (immediate == (immediate & 0x000000ff))
15547 {
15548 *immbits = immediate;
15549 return 0x1;
15550 }
15551 else if (immediate == (immediate & 0x0000ff00))
15552 {
15553 *immbits = immediate >> 8;
15554 return 0x3;
15555 }
15556 else if (immediate == (immediate & 0x00ff0000))
15557 {
15558 *immbits = immediate >> 16;
15559 return 0x5;
15560 }
15561 else if (immediate == (immediate & 0xff000000))
15562 {
15563 *immbits = immediate >> 24;
15564 return 0x7;
15565 }
15566 if ((immediate & 0xffff) != (immediate >> 16))
15567 goto bad_immediate;
15568 immediate &= 0xffff;
15569 }
15570
15571 if (immediate == (immediate & 0x000000ff))
15572 {
15573 *immbits = immediate;
15574 return 0x9;
15575 }
15576 else if (immediate == (immediate & 0x0000ff00))
15577 {
15578 *immbits = immediate >> 8;
15579 return 0xb;
15580 }
15581
15582 bad_immediate:
15583 first_error (_("immediate value out of range"));
15584 return FAIL;
15585 }
15586
15587 static void
15588 do_neon_logic (void)
15589 {
15590 if (inst.operands[2].present && inst.operands[2].isreg)
15591 {
15592 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15593 neon_check_type (3, rs, N_IGNORE_TYPE);
15594 /* U bit and size field were set as part of the bitmask. */
15595 NEON_ENCODE (INTEGER, inst);
15596 neon_three_same (neon_quad (rs), 0, -1);
15597 }
15598 else
15599 {
15600 const int three_ops_form = (inst.operands[2].present
15601 && !inst.operands[2].isreg);
15602 const int immoperand = (three_ops_form ? 2 : 1);
15603 enum neon_shape rs = (three_ops_form
15604 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
15605 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
15606 struct neon_type_el et = neon_check_type (2, rs,
15607 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15608 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
15609 unsigned immbits;
15610 int cmode;
15611
15612 if (et.type == NT_invtype)
15613 return;
15614
15615 if (three_ops_form)
15616 constraint (inst.operands[0].reg != inst.operands[1].reg,
15617 _("first and second operands shall be the same register"));
15618
15619 NEON_ENCODE (IMMED, inst);
15620
15621 immbits = inst.operands[immoperand].imm;
15622 if (et.size == 64)
15623 {
15624 /* .i64 is a pseudo-op, so the immediate must be a repeating
15625 pattern. */
15626 if (immbits != (inst.operands[immoperand].regisimm ?
15627 inst.operands[immoperand].reg : 0))
15628 {
15629 /* Set immbits to an invalid constant. */
15630 immbits = 0xdeadbeef;
15631 }
15632 }
15633
15634 switch (opcode)
15635 {
15636 case N_MNEM_vbic:
15637 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15638 break;
15639
15640 case N_MNEM_vorr:
15641 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15642 break;
15643
15644 case N_MNEM_vand:
15645 /* Pseudo-instruction for VBIC. */
15646 neon_invert_size (&immbits, 0, et.size);
15647 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15648 break;
15649
15650 case N_MNEM_vorn:
15651 /* Pseudo-instruction for VORR. */
15652 neon_invert_size (&immbits, 0, et.size);
15653 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15654 break;
15655
15656 default:
15657 abort ();
15658 }
15659
15660 if (cmode == FAIL)
15661 return;
15662
15663 inst.instruction |= neon_quad (rs) << 6;
15664 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15665 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15666 inst.instruction |= cmode << 8;
15667 neon_write_immbits (immbits);
15668
15669 neon_dp_fixup (&inst);
15670 }
15671 }
15672
15673 static void
15674 do_neon_bitfield (void)
15675 {
15676 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15677 neon_check_type (3, rs, N_IGNORE_TYPE);
15678 neon_three_same (neon_quad (rs), 0, -1);
15679 }
15680
15681 static void
15682 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
15683 unsigned destbits)
15684 {
15685 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
15686 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
15687 types | N_KEY);
15688 if (et.type == NT_float)
15689 {
15690 NEON_ENCODE (FLOAT, inst);
15691 if (rs == NS_QQR)
15692 mve_encode_qqr (et.size, 1);
15693 else
15694 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
15695 }
15696 else
15697 {
15698 NEON_ENCODE (INTEGER, inst);
15699 if (rs == NS_QQR)
15700 mve_encode_qqr (et.size, 0);
15701 else
15702 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
15703 }
15704 }
15705
15706
15707 static void
15708 do_neon_dyadic_if_su_d (void)
15709 {
15710 /* This version only allow D registers, but that constraint is enforced during
15711 operand parsing so we don't need to do anything extra here. */
15712 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
15713 }
15714
15715 static void
15716 do_neon_dyadic_if_i_d (void)
15717 {
15718 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15719 affected if we specify unsigned args. */
15720 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15721 }
15722
15723 enum vfp_or_neon_is_neon_bits
15724 {
15725 NEON_CHECK_CC = 1,
15726 NEON_CHECK_ARCH = 2,
15727 NEON_CHECK_ARCH8 = 4
15728 };
15729
15730 /* Call this function if an instruction which may have belonged to the VFP or
15731 Neon instruction sets, but turned out to be a Neon instruction (due to the
15732 operand types involved, etc.). We have to check and/or fix-up a couple of
15733 things:
15734
15735 - Make sure the user hasn't attempted to make a Neon instruction
15736 conditional.
15737 - Alter the value in the condition code field if necessary.
15738 - Make sure that the arch supports Neon instructions.
15739
15740 Which of these operations take place depends on bits from enum
15741 vfp_or_neon_is_neon_bits.
15742
15743 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
15744 current instruction's condition is COND_ALWAYS, the condition field is
15745 changed to inst.uncond_value. This is necessary because instructions shared
15746 between VFP and Neon may be conditional for the VFP variants only, and the
15747 unconditional Neon version must have, e.g., 0xF in the condition field. */
15748
15749 static int
15750 vfp_or_neon_is_neon (unsigned check)
15751 {
15752 /* Conditions are always legal in Thumb mode (IT blocks). */
15753 if (!thumb_mode && (check & NEON_CHECK_CC))
15754 {
15755 if (inst.cond != COND_ALWAYS)
15756 {
15757 first_error (_(BAD_COND));
15758 return FAIL;
15759 }
15760 if (inst.uncond_value != -1)
15761 inst.instruction |= inst.uncond_value << 28;
15762 }
15763
15764
15765 if (((check & NEON_CHECK_ARCH) && !mark_feature_used (&fpu_neon_ext_v1))
15766 || ((check & NEON_CHECK_ARCH8)
15767 && !mark_feature_used (&fpu_neon_ext_armv8)))
15768 {
15769 first_error (_(BAD_FPU));
15770 return FAIL;
15771 }
15772
15773 return SUCCESS;
15774 }
15775
15776 static int
15777 check_simd_pred_availability (int fp, unsigned check)
15778 {
15779 if (inst.cond > COND_ALWAYS)
15780 {
15781 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
15782 {
15783 inst.error = BAD_FPU;
15784 return 1;
15785 }
15786 inst.pred_insn_type = INSIDE_VPT_INSN;
15787 }
15788 else if (inst.cond < COND_ALWAYS)
15789 {
15790 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
15791 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
15792 else if (vfp_or_neon_is_neon (check) == FAIL)
15793 return 2;
15794 }
15795 else
15796 {
15797 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fp ? mve_fp_ext : mve_ext)
15798 && vfp_or_neon_is_neon (check) == FAIL)
15799 return 3;
15800
15801 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
15802 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
15803 }
15804 return 0;
15805 }
15806
15807 static void
15808 do_mve_vstr_vldr_QI (int size, int elsize, int load)
15809 {
15810 constraint (size < 32, BAD_ADDR_MODE);
15811 constraint (size != elsize, BAD_EL_TYPE);
15812 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
15813 constraint (!inst.operands[1].preind, BAD_ADDR_MODE);
15814 constraint (load && inst.operands[0].reg == inst.operands[1].reg,
15815 _("destination register and offset register may not be the"
15816 " same"));
15817
15818 int imm = inst.relocs[0].exp.X_add_number;
15819 int add = 1;
15820 if (imm < 0)
15821 {
15822 add = 0;
15823 imm = -imm;
15824 }
15825 constraint ((imm % (size / 8) != 0)
15826 || imm > (0x7f << neon_logbits (size)),
15827 (size == 32) ? _("immediate must be a multiple of 4 in the"
15828 " range of +/-[0,508]")
15829 : _("immediate must be a multiple of 8 in the"
15830 " range of +/-[0,1016]"));
15831 inst.instruction |= 0x11 << 24;
15832 inst.instruction |= add << 23;
15833 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15834 inst.instruction |= inst.operands[1].writeback << 21;
15835 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15836 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15837 inst.instruction |= 1 << 12;
15838 inst.instruction |= (size == 64) << 8;
15839 inst.instruction &= 0xffffff00;
15840 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15841 inst.instruction |= imm >> neon_logbits (size);
15842 }
15843
15844 static void
15845 do_mve_vstr_vldr_RQ (int size, int elsize, int load)
15846 {
15847 unsigned os = inst.operands[1].imm >> 5;
15848 constraint (os != 0 && size == 8,
15849 _("can not shift offsets when accessing less than half-word"));
15850 constraint (os && os != neon_logbits (size),
15851 _("shift immediate must be 1, 2 or 3 for half-word, word"
15852 " or double-word accesses respectively"));
15853 if (inst.operands[1].reg == REG_PC)
15854 as_tsktsk (MVE_BAD_PC);
15855
15856 switch (size)
15857 {
15858 case 8:
15859 constraint (elsize >= 64, BAD_EL_TYPE);
15860 break;
15861 case 16:
15862 constraint (elsize < 16 || elsize >= 64, BAD_EL_TYPE);
15863 break;
15864 case 32:
15865 case 64:
15866 constraint (elsize != size, BAD_EL_TYPE);
15867 break;
15868 default:
15869 break;
15870 }
15871 constraint (inst.operands[1].writeback || !inst.operands[1].preind,
15872 BAD_ADDR_MODE);
15873 if (load)
15874 {
15875 constraint (inst.operands[0].reg == (inst.operands[1].imm & 0x1f),
15876 _("destination register and offset register may not be"
15877 " the same"));
15878 constraint (size == elsize && inst.vectype.el[0].type != NT_unsigned,
15879 BAD_EL_TYPE);
15880 constraint (inst.vectype.el[0].type != NT_unsigned
15881 && inst.vectype.el[0].type != NT_signed, BAD_EL_TYPE);
15882 inst.instruction |= (inst.vectype.el[0].type == NT_unsigned) << 28;
15883 }
15884 else
15885 {
15886 constraint (inst.vectype.el[0].type != NT_untyped, BAD_EL_TYPE);
15887 }
15888
15889 inst.instruction |= 1 << 23;
15890 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15891 inst.instruction |= inst.operands[1].reg << 16;
15892 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15893 inst.instruction |= neon_logbits (elsize) << 7;
15894 inst.instruction |= HI1 (inst.operands[1].imm) << 5;
15895 inst.instruction |= LOW4 (inst.operands[1].imm);
15896 inst.instruction |= !!os;
15897 }
15898
15899 static void
15900 do_mve_vstr_vldr_RI (int size, int elsize, int load)
15901 {
15902 enum neon_el_type type = inst.vectype.el[0].type;
15903
15904 constraint (size >= 64, BAD_ADDR_MODE);
15905 switch (size)
15906 {
15907 case 16:
15908 constraint (elsize < 16 || elsize >= 64, BAD_EL_TYPE);
15909 break;
15910 case 32:
15911 constraint (elsize != size, BAD_EL_TYPE);
15912 break;
15913 default:
15914 break;
15915 }
15916 if (load)
15917 {
15918 constraint (elsize != size && type != NT_unsigned
15919 && type != NT_signed, BAD_EL_TYPE);
15920 }
15921 else
15922 {
15923 constraint (elsize != size && type != NT_untyped, BAD_EL_TYPE);
15924 }
15925
15926 int imm = inst.relocs[0].exp.X_add_number;
15927 int add = 1;
15928 if (imm < 0)
15929 {
15930 add = 0;
15931 imm = -imm;
15932 }
15933
15934 if ((imm % (size / 8) != 0) || imm > (0x7f << neon_logbits (size)))
15935 {
15936 switch (size)
15937 {
15938 case 8:
15939 constraint (1, _("immediate must be in the range of +/-[0,127]"));
15940 break;
15941 case 16:
15942 constraint (1, _("immediate must be a multiple of 2 in the"
15943 " range of +/-[0,254]"));
15944 break;
15945 case 32:
15946 constraint (1, _("immediate must be a multiple of 4 in the"
15947 " range of +/-[0,508]"));
15948 break;
15949 }
15950 }
15951
15952 if (size != elsize)
15953 {
15954 constraint (inst.operands[1].reg > 7, BAD_HIREG);
15955 constraint (inst.operands[0].reg > 14,
15956 _("MVE vector register in the range [Q0..Q7] expected"));
15957 inst.instruction |= (load && type == NT_unsigned) << 28;
15958 inst.instruction |= (size == 16) << 19;
15959 inst.instruction |= neon_logbits (elsize) << 7;
15960 }
15961 else
15962 {
15963 if (inst.operands[1].reg == REG_PC)
15964 as_tsktsk (MVE_BAD_PC);
15965 else if (inst.operands[1].reg == REG_SP && inst.operands[1].writeback)
15966 as_tsktsk (MVE_BAD_SP);
15967 inst.instruction |= 1 << 12;
15968 inst.instruction |= neon_logbits (size) << 7;
15969 }
15970 inst.instruction |= inst.operands[1].preind << 24;
15971 inst.instruction |= add << 23;
15972 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15973 inst.instruction |= inst.operands[1].writeback << 21;
15974 inst.instruction |= inst.operands[1].reg << 16;
15975 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15976 inst.instruction &= 0xffffff80;
15977 inst.instruction |= imm >> neon_logbits (size);
15978
15979 }
15980
15981 static void
15982 do_mve_vstr_vldr (void)
15983 {
15984 unsigned size;
15985 int load = 0;
15986
15987 if (inst.cond > COND_ALWAYS)
15988 inst.pred_insn_type = INSIDE_VPT_INSN;
15989 else
15990 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
15991
15992 switch (inst.instruction)
15993 {
15994 default:
15995 gas_assert (0);
15996 break;
15997 case M_MNEM_vldrb:
15998 load = 1;
15999 /* fall through. */
16000 case M_MNEM_vstrb:
16001 size = 8;
16002 break;
16003 case M_MNEM_vldrh:
16004 load = 1;
16005 /* fall through. */
16006 case M_MNEM_vstrh:
16007 size = 16;
16008 break;
16009 case M_MNEM_vldrw:
16010 load = 1;
16011 /* fall through. */
16012 case M_MNEM_vstrw:
16013 size = 32;
16014 break;
16015 case M_MNEM_vldrd:
16016 load = 1;
16017 /* fall through. */
16018 case M_MNEM_vstrd:
16019 size = 64;
16020 break;
16021 }
16022 unsigned elsize = inst.vectype.el[0].size;
16023
16024 if (inst.operands[1].isquad)
16025 {
16026 /* We are dealing with [Q, imm]{!} cases. */
16027 do_mve_vstr_vldr_QI (size, elsize, load);
16028 }
16029 else
16030 {
16031 if (inst.operands[1].immisreg == 2)
16032 {
16033 /* We are dealing with [R, Q, {UXTW #os}] cases. */
16034 do_mve_vstr_vldr_RQ (size, elsize, load);
16035 }
16036 else if (!inst.operands[1].immisreg)
16037 {
16038 /* We are dealing with [R, Imm]{!}/[R], Imm cases. */
16039 do_mve_vstr_vldr_RI (size, elsize, load);
16040 }
16041 else
16042 constraint (1, BAD_ADDR_MODE);
16043 }
16044
16045 inst.is_neon = 1;
16046 }
16047
16048 static void
16049 do_mve_vst_vld (void)
16050 {
16051 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16052 return;
16053
16054 constraint (!inst.operands[1].preind || inst.relocs[0].exp.X_add_symbol != 0
16055 || inst.relocs[0].exp.X_add_number != 0
16056 || inst.operands[1].immisreg != 0,
16057 BAD_ADDR_MODE);
16058 constraint (inst.vectype.el[0].size > 32, BAD_EL_TYPE);
16059 if (inst.operands[1].reg == REG_PC)
16060 as_tsktsk (MVE_BAD_PC);
16061 else if (inst.operands[1].reg == REG_SP && inst.operands[1].writeback)
16062 as_tsktsk (MVE_BAD_SP);
16063
16064
16065 /* These instructions are one of the "exceptions" mentioned in
16066 handle_pred_state. They are MVE instructions that are not VPT compatible
16067 and do not accept a VPT code, thus appending such a code is a syntax
16068 error. */
16069 if (inst.cond > COND_ALWAYS)
16070 first_error (BAD_SYNTAX);
16071 /* If we append a scalar condition code we can set this to
16072 MVE_OUTSIDE_PRED_INSN as it will also lead to a syntax error. */
16073 else if (inst.cond < COND_ALWAYS)
16074 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16075 else
16076 inst.pred_insn_type = MVE_UNPREDICABLE_INSN;
16077
16078 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16079 inst.instruction |= inst.operands[1].writeback << 21;
16080 inst.instruction |= inst.operands[1].reg << 16;
16081 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16082 inst.instruction |= neon_logbits (inst.vectype.el[0].size) << 7;
16083 inst.is_neon = 1;
16084 }
16085
16086 static void
16087 do_neon_dyadic_if_su (void)
16088 {
16089 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
16090 struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
16091 N_SUF_32 | N_KEY);
16092
16093 if (check_simd_pred_availability (et.type == NT_float,
16094 NEON_CHECK_ARCH | NEON_CHECK_CC))
16095 return;
16096
16097 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
16098 }
16099
16100 static void
16101 do_neon_addsub_if_i (void)
16102 {
16103 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
16104 && try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
16105 return;
16106
16107 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
16108 struct neon_type_el et = neon_check_type (3, rs, N_EQK,
16109 N_EQK, N_IF_32 | N_I64 | N_KEY);
16110
16111 constraint (rs == NS_QQR && et.size == 64, BAD_FPU);
16112 /* If we are parsing Q registers and the element types match MVE, which NEON
16113 also supports, then we must check whether this is an instruction that can
16114 be used by both MVE/NEON. This distinction can be made based on whether
16115 they are predicated or not. */
16116 if ((rs == NS_QQQ || rs == NS_QQR) && et.size != 64)
16117 {
16118 if (check_simd_pred_availability (et.type == NT_float,
16119 NEON_CHECK_ARCH | NEON_CHECK_CC))
16120 return;
16121 }
16122 else
16123 {
16124 /* If they are either in a D register or are using an unsupported. */
16125 if (rs != NS_QQR
16126 && vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16127 return;
16128 }
16129
16130 /* The "untyped" case can't happen. Do this to stop the "U" bit being
16131 affected if we specify unsigned args. */
16132 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
16133 }
16134
16135 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
16136 result to be:
16137 V<op> A,B (A is operand 0, B is operand 2)
16138 to mean:
16139 V<op> A,B,A
16140 not:
16141 V<op> A,B,B
16142 so handle that case specially. */
16143
16144 static void
16145 neon_exchange_operands (void)
16146 {
16147 if (inst.operands[1].present)
16148 {
16149 void *scratch = xmalloc (sizeof (inst.operands[0]));
16150
16151 /* Swap operands[1] and operands[2]. */
16152 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
16153 inst.operands[1] = inst.operands[2];
16154 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
16155 free (scratch);
16156 }
16157 else
16158 {
16159 inst.operands[1] = inst.operands[2];
16160 inst.operands[2] = inst.operands[0];
16161 }
16162 }
16163
16164 static void
16165 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
16166 {
16167 if (inst.operands[2].isreg)
16168 {
16169 if (invert)
16170 neon_exchange_operands ();
16171 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
16172 }
16173 else
16174 {
16175 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16176 struct neon_type_el et = neon_check_type (2, rs,
16177 N_EQK | N_SIZ, immtypes | N_KEY);
16178
16179 NEON_ENCODE (IMMED, inst);
16180 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16181 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16182 inst.instruction |= LOW4 (inst.operands[1].reg);
16183 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16184 inst.instruction |= neon_quad (rs) << 6;
16185 inst.instruction |= (et.type == NT_float) << 10;
16186 inst.instruction |= neon_logbits (et.size) << 18;
16187
16188 neon_dp_fixup (&inst);
16189 }
16190 }
16191
16192 static void
16193 do_neon_cmp (void)
16194 {
16195 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
16196 }
16197
16198 static void
16199 do_neon_cmp_inv (void)
16200 {
16201 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
16202 }
16203
16204 static void
16205 do_neon_ceq (void)
16206 {
16207 neon_compare (N_IF_32, N_IF_32, FALSE);
16208 }
16209
16210 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
16211 scalars, which are encoded in 5 bits, M : Rm.
16212 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
16213 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
16214 index in M.
16215
16216 Dot Product instructions are similar to multiply instructions except elsize
16217 should always be 32.
16218
16219 This function translates SCALAR, which is GAS's internal encoding of indexed
16220 scalar register, to raw encoding. There is also register and index range
16221 check based on ELSIZE. */
16222
16223 static unsigned
16224 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
16225 {
16226 unsigned regno = NEON_SCALAR_REG (scalar);
16227 unsigned elno = NEON_SCALAR_INDEX (scalar);
16228
16229 switch (elsize)
16230 {
16231 case 16:
16232 if (regno > 7 || elno > 3)
16233 goto bad_scalar;
16234 return regno | (elno << 3);
16235
16236 case 32:
16237 if (regno > 15 || elno > 1)
16238 goto bad_scalar;
16239 return regno | (elno << 4);
16240
16241 default:
16242 bad_scalar:
16243 first_error (_("scalar out of range for multiply instruction"));
16244 }
16245
16246 return 0;
16247 }
16248
16249 /* Encode multiply / multiply-accumulate scalar instructions. */
16250
16251 static void
16252 neon_mul_mac (struct neon_type_el et, int ubit)
16253 {
16254 unsigned scalar;
16255
16256 /* Give a more helpful error message if we have an invalid type. */
16257 if (et.type == NT_invtype)
16258 return;
16259
16260 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
16261 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16262 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16263 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16264 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16265 inst.instruction |= LOW4 (scalar);
16266 inst.instruction |= HI1 (scalar) << 5;
16267 inst.instruction |= (et.type == NT_float) << 8;
16268 inst.instruction |= neon_logbits (et.size) << 20;
16269 inst.instruction |= (ubit != 0) << 24;
16270
16271 neon_dp_fixup (&inst);
16272 }
16273
16274 static void
16275 do_neon_mac_maybe_scalar (void)
16276 {
16277 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
16278 return;
16279
16280 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16281 return;
16282
16283 if (inst.operands[2].isscalar)
16284 {
16285 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
16286 struct neon_type_el et = neon_check_type (3, rs,
16287 N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
16288 NEON_ENCODE (SCALAR, inst);
16289 neon_mul_mac (et, neon_quad (rs));
16290 }
16291 else
16292 {
16293 /* The "untyped" case can't happen. Do this to stop the "U" bit being
16294 affected if we specify unsigned args. */
16295 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
16296 }
16297 }
16298
16299 static void
16300 do_neon_fmac (void)
16301 {
16302 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
16303 return;
16304
16305 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16306 return;
16307
16308 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
16309 }
16310
16311 static void
16312 do_neon_tst (void)
16313 {
16314 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16315 struct neon_type_el et = neon_check_type (3, rs,
16316 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
16317 neon_three_same (neon_quad (rs), 0, et.size);
16318 }
16319
16320 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
16321 same types as the MAC equivalents. The polynomial type for this instruction
16322 is encoded the same as the integer type. */
16323
16324 static void
16325 do_neon_mul (void)
16326 {
16327 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
16328 return;
16329
16330 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16331 return;
16332
16333 if (inst.operands[2].isscalar)
16334 do_neon_mac_maybe_scalar ();
16335 else
16336 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
16337 }
16338
16339 static void
16340 do_neon_qdmulh (void)
16341 {
16342 if (inst.operands[2].isscalar)
16343 {
16344 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
16345 struct neon_type_el et = neon_check_type (3, rs,
16346 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
16347 NEON_ENCODE (SCALAR, inst);
16348 neon_mul_mac (et, neon_quad (rs));
16349 }
16350 else
16351 {
16352 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16353 struct neon_type_el et = neon_check_type (3, rs,
16354 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
16355 NEON_ENCODE (INTEGER, inst);
16356 /* The U bit (rounding) comes from bit mask. */
16357 neon_three_same (neon_quad (rs), 0, et.size);
16358 }
16359 }
16360
16361 static void
16362 do_mve_vmull (void)
16363 {
16364
16365 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_DDS,
16366 NS_QQS, NS_QQQ, NS_QQR, NS_NULL);
16367 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
16368 && inst.cond == COND_ALWAYS
16369 && ((unsigned)inst.instruction) == M_MNEM_vmullt)
16370 {
16371 if (rs == NS_QQQ)
16372 {
16373
16374 struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
16375 N_SUF_32 | N_F64 | N_P8
16376 | N_P16 | N_I_MVE | N_KEY);
16377 if (((et.type == NT_poly) && et.size == 8
16378 && ARM_CPU_IS_ANY (cpu_variant))
16379 || (et.type == NT_integer) || (et.type == NT_float))
16380 goto neon_vmul;
16381 }
16382 else
16383 goto neon_vmul;
16384 }
16385
16386 constraint (rs != NS_QQQ, BAD_FPU);
16387 struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
16388 N_SU_32 | N_P8 | N_P16 | N_KEY);
16389
16390 /* We are dealing with MVE's vmullt. */
16391 if (et.size == 32
16392 && (inst.operands[0].reg == inst.operands[1].reg
16393 || inst.operands[0].reg == inst.operands[2].reg))
16394 as_tsktsk (BAD_MVE_SRCDEST);
16395
16396 if (inst.cond > COND_ALWAYS)
16397 inst.pred_insn_type = INSIDE_VPT_INSN;
16398 else
16399 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16400
16401 if (et.type == NT_poly)
16402 mve_encode_qqq (neon_logbits (et.size), 64);
16403 else
16404 mve_encode_qqq (et.type == NT_unsigned, et.size);
16405
16406 return;
16407
16408 neon_vmul:
16409 inst.instruction = N_MNEM_vmul;
16410 inst.cond = 0xb;
16411 if (thumb_mode)
16412 inst.pred_insn_type = INSIDE_IT_INSN;
16413 do_neon_mul ();
16414 }
16415
16416 static void
16417 do_mve_vabav (void)
16418 {
16419 enum neon_shape rs = neon_select_shape (NS_RQQ, NS_NULL);
16420
16421 if (rs == NS_NULL)
16422 return;
16423
16424 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16425 return;
16426
16427 struct neon_type_el et = neon_check_type (2, NS_NULL, N_EQK, N_KEY | N_S8
16428 | N_S16 | N_S32 | N_U8 | N_U16
16429 | N_U32);
16430
16431 if (inst.cond > COND_ALWAYS)
16432 inst.pred_insn_type = INSIDE_VPT_INSN;
16433 else
16434 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16435
16436 mve_encode_rqq (et.type == NT_unsigned, et.size);
16437 }
16438
16439 static void
16440 do_mve_vmladav (void)
16441 {
16442 enum neon_shape rs = neon_select_shape (NS_RQQ, NS_NULL);
16443 struct neon_type_el et = neon_check_type (3, rs,
16444 N_EQK, N_EQK, N_SU_MVE | N_KEY);
16445
16446 if (et.type == NT_unsigned
16447 && (inst.instruction == M_MNEM_vmladavx
16448 || inst.instruction == M_MNEM_vmladavax
16449 || inst.instruction == M_MNEM_vmlsdav
16450 || inst.instruction == M_MNEM_vmlsdava
16451 || inst.instruction == M_MNEM_vmlsdavx
16452 || inst.instruction == M_MNEM_vmlsdavax))
16453 first_error (BAD_SIMD_TYPE);
16454
16455 constraint (inst.operands[2].reg > 14,
16456 _("MVE vector register in the range [Q0..Q7] expected"));
16457
16458 if (inst.cond > COND_ALWAYS)
16459 inst.pred_insn_type = INSIDE_VPT_INSN;
16460 else
16461 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16462
16463 if (inst.instruction == M_MNEM_vmlsdav
16464 || inst.instruction == M_MNEM_vmlsdava
16465 || inst.instruction == M_MNEM_vmlsdavx
16466 || inst.instruction == M_MNEM_vmlsdavax)
16467 inst.instruction |= (et.size == 8) << 28;
16468 else
16469 inst.instruction |= (et.size == 8) << 8;
16470
16471 mve_encode_rqq (et.type == NT_unsigned, 64);
16472 inst.instruction |= (et.size == 32) << 16;
16473 }
16474
16475 static void
16476 do_neon_qrdmlah (void)
16477 {
16478 /* Check we're on the correct architecture. */
16479 if (!mark_feature_used (&fpu_neon_ext_armv8))
16480 inst.error =
16481 _("instruction form not available on this architecture.");
16482 else if (!mark_feature_used (&fpu_neon_ext_v8_1))
16483 {
16484 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
16485 record_feature_use (&fpu_neon_ext_v8_1);
16486 }
16487
16488 if (inst.operands[2].isscalar)
16489 {
16490 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
16491 struct neon_type_el et = neon_check_type (3, rs,
16492 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
16493 NEON_ENCODE (SCALAR, inst);
16494 neon_mul_mac (et, neon_quad (rs));
16495 }
16496 else
16497 {
16498 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16499 struct neon_type_el et = neon_check_type (3, rs,
16500 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
16501 NEON_ENCODE (INTEGER, inst);
16502 /* The U bit (rounding) comes from bit mask. */
16503 neon_three_same (neon_quad (rs), 0, et.size);
16504 }
16505 }
16506
16507 static void
16508 do_neon_fcmp_absolute (void)
16509 {
16510 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16511 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
16512 N_F_16_32 | N_KEY);
16513 /* Size field comes from bit mask. */
16514 neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
16515 }
16516
16517 static void
16518 do_neon_fcmp_absolute_inv (void)
16519 {
16520 neon_exchange_operands ();
16521 do_neon_fcmp_absolute ();
16522 }
16523
16524 static void
16525 do_neon_step (void)
16526 {
16527 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16528 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
16529 N_F_16_32 | N_KEY);
16530 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
16531 }
16532
16533 static void
16534 do_neon_abs_neg (void)
16535 {
16536 enum neon_shape rs;
16537 struct neon_type_el et;
16538
16539 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
16540 return;
16541
16542 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16543 et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
16544
16545 if (check_simd_pred_availability (et.type == NT_float,
16546 NEON_CHECK_ARCH | NEON_CHECK_CC))
16547 return;
16548
16549 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16550 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16551 inst.instruction |= LOW4 (inst.operands[1].reg);
16552 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16553 inst.instruction |= neon_quad (rs) << 6;
16554 inst.instruction |= (et.type == NT_float) << 10;
16555 inst.instruction |= neon_logbits (et.size) << 18;
16556
16557 neon_dp_fixup (&inst);
16558 }
16559
16560 static void
16561 do_neon_sli (void)
16562 {
16563 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16564 struct neon_type_el et = neon_check_type (2, rs,
16565 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16566 int imm = inst.operands[2].imm;
16567 constraint (imm < 0 || (unsigned)imm >= et.size,
16568 _("immediate out of range for insert"));
16569 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
16570 }
16571
16572 static void
16573 do_neon_sri (void)
16574 {
16575 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16576 struct neon_type_el et = neon_check_type (2, rs,
16577 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16578 int imm = inst.operands[2].imm;
16579 constraint (imm < 1 || (unsigned)imm > et.size,
16580 _("immediate out of range for insert"));
16581 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
16582 }
16583
16584 static void
16585 do_neon_qshlu_imm (void)
16586 {
16587 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16588 struct neon_type_el et = neon_check_type (2, rs,
16589 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
16590 int imm = inst.operands[2].imm;
16591 constraint (imm < 0 || (unsigned)imm >= et.size,
16592 _("immediate out of range for shift"));
16593 /* Only encodes the 'U present' variant of the instruction.
16594 In this case, signed types have OP (bit 8) set to 0.
16595 Unsigned types have OP set to 1. */
16596 inst.instruction |= (et.type == NT_unsigned) << 8;
16597 /* The rest of the bits are the same as other immediate shifts. */
16598 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
16599 }
16600
16601 static void
16602 do_neon_qmovn (void)
16603 {
16604 struct neon_type_el et = neon_check_type (2, NS_DQ,
16605 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
16606 /* Saturating move where operands can be signed or unsigned, and the
16607 destination has the same signedness. */
16608 NEON_ENCODE (INTEGER, inst);
16609 if (et.type == NT_unsigned)
16610 inst.instruction |= 0xc0;
16611 else
16612 inst.instruction |= 0x80;
16613 neon_two_same (0, 1, et.size / 2);
16614 }
16615
16616 static void
16617 do_neon_qmovun (void)
16618 {
16619 struct neon_type_el et = neon_check_type (2, NS_DQ,
16620 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
16621 /* Saturating move with unsigned results. Operands must be signed. */
16622 NEON_ENCODE (INTEGER, inst);
16623 neon_two_same (0, 1, et.size / 2);
16624 }
16625
16626 static void
16627 do_neon_rshift_sat_narrow (void)
16628 {
16629 /* FIXME: Types for narrowing. If operands are signed, results can be signed
16630 or unsigned. If operands are unsigned, results must also be unsigned. */
16631 struct neon_type_el et = neon_check_type (2, NS_DQI,
16632 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
16633 int imm = inst.operands[2].imm;
16634 /* This gets the bounds check, size encoding and immediate bits calculation
16635 right. */
16636 et.size /= 2;
16637
16638 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
16639 VQMOVN.I<size> <Dd>, <Qm>. */
16640 if (imm == 0)
16641 {
16642 inst.operands[2].present = 0;
16643 inst.instruction = N_MNEM_vqmovn;
16644 do_neon_qmovn ();
16645 return;
16646 }
16647
16648 constraint (imm < 1 || (unsigned)imm > et.size,
16649 _("immediate out of range"));
16650 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
16651 }
16652
16653 static void
16654 do_neon_rshift_sat_narrow_u (void)
16655 {
16656 /* FIXME: Types for narrowing. If operands are signed, results can be signed
16657 or unsigned. If operands are unsigned, results must also be unsigned. */
16658 struct neon_type_el et = neon_check_type (2, NS_DQI,
16659 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
16660 int imm = inst.operands[2].imm;
16661 /* This gets the bounds check, size encoding and immediate bits calculation
16662 right. */
16663 et.size /= 2;
16664
16665 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
16666 VQMOVUN.I<size> <Dd>, <Qm>. */
16667 if (imm == 0)
16668 {
16669 inst.operands[2].present = 0;
16670 inst.instruction = N_MNEM_vqmovun;
16671 do_neon_qmovun ();
16672 return;
16673 }
16674
16675 constraint (imm < 1 || (unsigned)imm > et.size,
16676 _("immediate out of range"));
16677 /* FIXME: The manual is kind of unclear about what value U should have in
16678 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
16679 must be 1. */
16680 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
16681 }
16682
16683 static void
16684 do_neon_movn (void)
16685 {
16686 struct neon_type_el et = neon_check_type (2, NS_DQ,
16687 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
16688 NEON_ENCODE (INTEGER, inst);
16689 neon_two_same (0, 1, et.size / 2);
16690 }
16691
16692 static void
16693 do_neon_rshift_narrow (void)
16694 {
16695 struct neon_type_el et = neon_check_type (2, NS_DQI,
16696 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
16697 int imm = inst.operands[2].imm;
16698 /* This gets the bounds check, size encoding and immediate bits calculation
16699 right. */
16700 et.size /= 2;
16701
16702 /* If immediate is zero then we are a pseudo-instruction for
16703 VMOVN.I<size> <Dd>, <Qm> */
16704 if (imm == 0)
16705 {
16706 inst.operands[2].present = 0;
16707 inst.instruction = N_MNEM_vmovn;
16708 do_neon_movn ();
16709 return;
16710 }
16711
16712 constraint (imm < 1 || (unsigned)imm > et.size,
16713 _("immediate out of range for narrowing operation"));
16714 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
16715 }
16716
16717 static void
16718 do_neon_shll (void)
16719 {
16720 /* FIXME: Type checking when lengthening. */
16721 struct neon_type_el et = neon_check_type (2, NS_QDI,
16722 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
16723 unsigned imm = inst.operands[2].imm;
16724
16725 if (imm == et.size)
16726 {
16727 /* Maximum shift variant. */
16728 NEON_ENCODE (INTEGER, inst);
16729 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16730 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16731 inst.instruction |= LOW4 (inst.operands[1].reg);
16732 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16733 inst.instruction |= neon_logbits (et.size) << 18;
16734
16735 neon_dp_fixup (&inst);
16736 }
16737 else
16738 {
16739 /* A more-specific type check for non-max versions. */
16740 et = neon_check_type (2, NS_QDI,
16741 N_EQK | N_DBL, N_SU_32 | N_KEY);
16742 NEON_ENCODE (IMMED, inst);
16743 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
16744 }
16745 }
16746
16747 /* Check the various types for the VCVT instruction, and return which version
16748 the current instruction is. */
16749
16750 #define CVT_FLAVOUR_VAR \
16751 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
16752 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
16753 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
16754 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
16755 /* Half-precision conversions. */ \
16756 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
16757 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
16758 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
16759 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
16760 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
16761 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
16762 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
16763 Compared with single/double precision variants, only the co-processor \
16764 field is different, so the encoding flow is reused here. */ \
16765 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
16766 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
16767 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
16768 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
16769 /* VFP instructions. */ \
16770 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
16771 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
16772 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
16773 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
16774 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
16775 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
16776 /* VFP instructions with bitshift. */ \
16777 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
16778 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
16779 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
16780 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
16781 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
16782 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
16783 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
16784 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
16785
16786 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
16787 neon_cvt_flavour_##C,
16788
16789 /* The different types of conversions we can do. */
16790 enum neon_cvt_flavour
16791 {
16792 CVT_FLAVOUR_VAR
16793 neon_cvt_flavour_invalid,
16794 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
16795 };
16796
16797 #undef CVT_VAR
16798
16799 static enum neon_cvt_flavour
16800 get_neon_cvt_flavour (enum neon_shape rs)
16801 {
16802 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
16803 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
16804 if (et.type != NT_invtype) \
16805 { \
16806 inst.error = NULL; \
16807 return (neon_cvt_flavour_##C); \
16808 }
16809
16810 struct neon_type_el et;
16811 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
16812 || rs == NS_FF) ? N_VFP : 0;
16813 /* The instruction versions which take an immediate take one register
16814 argument, which is extended to the width of the full register. Thus the
16815 "source" and "destination" registers must have the same width. Hack that
16816 here by making the size equal to the key (wider, in this case) operand. */
16817 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
16818
16819 CVT_FLAVOUR_VAR;
16820
16821 return neon_cvt_flavour_invalid;
16822 #undef CVT_VAR
16823 }
16824
16825 enum neon_cvt_mode
16826 {
16827 neon_cvt_mode_a,
16828 neon_cvt_mode_n,
16829 neon_cvt_mode_p,
16830 neon_cvt_mode_m,
16831 neon_cvt_mode_z,
16832 neon_cvt_mode_x,
16833 neon_cvt_mode_r
16834 };
16835
16836 /* Neon-syntax VFP conversions. */
16837
16838 static void
16839 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
16840 {
16841 const char *opname = 0;
16842
16843 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
16844 || rs == NS_FHI || rs == NS_HFI)
16845 {
16846 /* Conversions with immediate bitshift. */
16847 const char *enc[] =
16848 {
16849 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
16850 CVT_FLAVOUR_VAR
16851 NULL
16852 #undef CVT_VAR
16853 };
16854
16855 if (flavour < (int) ARRAY_SIZE (enc))
16856 {
16857 opname = enc[flavour];
16858 constraint (inst.operands[0].reg != inst.operands[1].reg,
16859 _("operands 0 and 1 must be the same register"));
16860 inst.operands[1] = inst.operands[2];
16861 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
16862 }
16863 }
16864 else
16865 {
16866 /* Conversions without bitshift. */
16867 const char *enc[] =
16868 {
16869 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
16870 CVT_FLAVOUR_VAR
16871 NULL
16872 #undef CVT_VAR
16873 };
16874
16875 if (flavour < (int) ARRAY_SIZE (enc))
16876 opname = enc[flavour];
16877 }
16878
16879 if (opname)
16880 do_vfp_nsyn_opcode (opname);
16881
16882 /* ARMv8.2 fp16 VCVT instruction. */
16883 if (flavour == neon_cvt_flavour_s32_f16
16884 || flavour == neon_cvt_flavour_u32_f16
16885 || flavour == neon_cvt_flavour_f16_u32
16886 || flavour == neon_cvt_flavour_f16_s32)
16887 do_scalar_fp16_v82_encode ();
16888 }
16889
16890 static void
16891 do_vfp_nsyn_cvtz (void)
16892 {
16893 enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
16894 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
16895 const char *enc[] =
16896 {
16897 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
16898 CVT_FLAVOUR_VAR
16899 NULL
16900 #undef CVT_VAR
16901 };
16902
16903 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
16904 do_vfp_nsyn_opcode (enc[flavour]);
16905 }
16906
16907 static void
16908 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
16909 enum neon_cvt_mode mode)
16910 {
16911 int sz, op;
16912 int rm;
16913
16914 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16915 D register operands. */
16916 if (flavour == neon_cvt_flavour_s32_f64
16917 || flavour == neon_cvt_flavour_u32_f64)
16918 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16919 _(BAD_FPU));
16920
16921 if (flavour == neon_cvt_flavour_s32_f16
16922 || flavour == neon_cvt_flavour_u32_f16)
16923 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
16924 _(BAD_FP16));
16925
16926 set_pred_insn_type (OUTSIDE_PRED_INSN);
16927
16928 switch (flavour)
16929 {
16930 case neon_cvt_flavour_s32_f64:
16931 sz = 1;
16932 op = 1;
16933 break;
16934 case neon_cvt_flavour_s32_f32:
16935 sz = 0;
16936 op = 1;
16937 break;
16938 case neon_cvt_flavour_s32_f16:
16939 sz = 0;
16940 op = 1;
16941 break;
16942 case neon_cvt_flavour_u32_f64:
16943 sz = 1;
16944 op = 0;
16945 break;
16946 case neon_cvt_flavour_u32_f32:
16947 sz = 0;
16948 op = 0;
16949 break;
16950 case neon_cvt_flavour_u32_f16:
16951 sz = 0;
16952 op = 0;
16953 break;
16954 default:
16955 first_error (_("invalid instruction shape"));
16956 return;
16957 }
16958
16959 switch (mode)
16960 {
16961 case neon_cvt_mode_a: rm = 0; break;
16962 case neon_cvt_mode_n: rm = 1; break;
16963 case neon_cvt_mode_p: rm = 2; break;
16964 case neon_cvt_mode_m: rm = 3; break;
16965 default: first_error (_("invalid rounding mode")); return;
16966 }
16967
16968 NEON_ENCODE (FPV8, inst);
16969 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
16970 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
16971 inst.instruction |= sz << 8;
16972
16973 /* ARMv8.2 fp16 VCVT instruction. */
16974 if (flavour == neon_cvt_flavour_s32_f16
16975 ||flavour == neon_cvt_flavour_u32_f16)
16976 do_scalar_fp16_v82_encode ();
16977 inst.instruction |= op << 7;
16978 inst.instruction |= rm << 16;
16979 inst.instruction |= 0xf0000000;
16980 inst.is_neon = TRUE;
16981 }
16982
16983 static void
16984 do_neon_cvt_1 (enum neon_cvt_mode mode)
16985 {
16986 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
16987 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
16988 NS_FH, NS_HF, NS_FHI, NS_HFI,
16989 NS_NULL);
16990 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
16991
16992 if (flavour == neon_cvt_flavour_invalid)
16993 return;
16994
16995 /* PR11109: Handle round-to-zero for VCVT conversions. */
16996 if (mode == neon_cvt_mode_z
16997 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
16998 && (flavour == neon_cvt_flavour_s16_f16
16999 || flavour == neon_cvt_flavour_u16_f16
17000 || flavour == neon_cvt_flavour_s32_f32
17001 || flavour == neon_cvt_flavour_u32_f32
17002 || flavour == neon_cvt_flavour_s32_f64
17003 || flavour == neon_cvt_flavour_u32_f64)
17004 && (rs == NS_FD || rs == NS_FF))
17005 {
17006 do_vfp_nsyn_cvtz ();
17007 return;
17008 }
17009
17010 /* ARMv8.2 fp16 VCVT conversions. */
17011 if (mode == neon_cvt_mode_z
17012 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
17013 && (flavour == neon_cvt_flavour_s32_f16
17014 || flavour == neon_cvt_flavour_u32_f16)
17015 && (rs == NS_FH))
17016 {
17017 do_vfp_nsyn_cvtz ();
17018 do_scalar_fp16_v82_encode ();
17019 return;
17020 }
17021
17022 /* VFP rather than Neon conversions. */
17023 if (flavour >= neon_cvt_flavour_first_fp)
17024 {
17025 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
17026 do_vfp_nsyn_cvt (rs, flavour);
17027 else
17028 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
17029
17030 return;
17031 }
17032
17033 switch (rs)
17034 {
17035 case NS_DDI:
17036 case NS_QQI:
17037 {
17038 unsigned immbits;
17039 unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
17040 0x0000100, 0x1000100, 0x0, 0x1000000};
17041
17042 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
17043 return;
17044
17045 /* Fixed-point conversion with #0 immediate is encoded as an
17046 integer conversion. */
17047 if (inst.operands[2].present && inst.operands[2].imm == 0)
17048 goto int_encode;
17049 NEON_ENCODE (IMMED, inst);
17050 if (flavour != neon_cvt_flavour_invalid)
17051 inst.instruction |= enctab[flavour];
17052 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17053 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17054 inst.instruction |= LOW4 (inst.operands[1].reg);
17055 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17056 inst.instruction |= neon_quad (rs) << 6;
17057 inst.instruction |= 1 << 21;
17058 if (flavour < neon_cvt_flavour_s16_f16)
17059 {
17060 inst.instruction |= 1 << 21;
17061 immbits = 32 - inst.operands[2].imm;
17062 inst.instruction |= immbits << 16;
17063 }
17064 else
17065 {
17066 inst.instruction |= 3 << 20;
17067 immbits = 16 - inst.operands[2].imm;
17068 inst.instruction |= immbits << 16;
17069 inst.instruction &= ~(1 << 9);
17070 }
17071
17072 neon_dp_fixup (&inst);
17073 }
17074 break;
17075
17076 case NS_DD:
17077 case NS_QQ:
17078 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
17079 {
17080 NEON_ENCODE (FLOAT, inst);
17081 set_pred_insn_type (OUTSIDE_PRED_INSN);
17082
17083 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17084 return;
17085
17086 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17087 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17088 inst.instruction |= LOW4 (inst.operands[1].reg);
17089 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17090 inst.instruction |= neon_quad (rs) << 6;
17091 inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
17092 || flavour == neon_cvt_flavour_u32_f32) << 7;
17093 inst.instruction |= mode << 8;
17094 if (flavour == neon_cvt_flavour_u16_f16
17095 || flavour == neon_cvt_flavour_s16_f16)
17096 /* Mask off the original size bits and reencode them. */
17097 inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
17098
17099 if (thumb_mode)
17100 inst.instruction |= 0xfc000000;
17101 else
17102 inst.instruction |= 0xf0000000;
17103 }
17104 else
17105 {
17106 int_encode:
17107 {
17108 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
17109 0x100, 0x180, 0x0, 0x080};
17110
17111 NEON_ENCODE (INTEGER, inst);
17112
17113 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
17114 return;
17115
17116 if (flavour != neon_cvt_flavour_invalid)
17117 inst.instruction |= enctab[flavour];
17118
17119 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17120 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17121 inst.instruction |= LOW4 (inst.operands[1].reg);
17122 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17123 inst.instruction |= neon_quad (rs) << 6;
17124 if (flavour >= neon_cvt_flavour_s16_f16
17125 && flavour <= neon_cvt_flavour_f16_u16)
17126 /* Half precision. */
17127 inst.instruction |= 1 << 18;
17128 else
17129 inst.instruction |= 2 << 18;
17130
17131 neon_dp_fixup (&inst);
17132 }
17133 }
17134 break;
17135
17136 /* Half-precision conversions for Advanced SIMD -- neon. */
17137 case NS_QD:
17138 case NS_DQ:
17139 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
17140 return;
17141
17142 if ((rs == NS_DQ)
17143 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
17144 {
17145 as_bad (_("operand size must match register width"));
17146 break;
17147 }
17148
17149 if ((rs == NS_QD)
17150 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
17151 {
17152 as_bad (_("operand size must match register width"));
17153 break;
17154 }
17155
17156 if (rs == NS_DQ)
17157 inst.instruction = 0x3b60600;
17158 else
17159 inst.instruction = 0x3b60700;
17160
17161 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17162 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17163 inst.instruction |= LOW4 (inst.operands[1].reg);
17164 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17165 neon_dp_fixup (&inst);
17166 break;
17167
17168 default:
17169 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
17170 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
17171 do_vfp_nsyn_cvt (rs, flavour);
17172 else
17173 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
17174 }
17175 }
17176
17177 static void
17178 do_neon_cvtr (void)
17179 {
17180 do_neon_cvt_1 (neon_cvt_mode_x);
17181 }
17182
17183 static void
17184 do_neon_cvt (void)
17185 {
17186 do_neon_cvt_1 (neon_cvt_mode_z);
17187 }
17188
17189 static void
17190 do_neon_cvta (void)
17191 {
17192 do_neon_cvt_1 (neon_cvt_mode_a);
17193 }
17194
17195 static void
17196 do_neon_cvtn (void)
17197 {
17198 do_neon_cvt_1 (neon_cvt_mode_n);
17199 }
17200
17201 static void
17202 do_neon_cvtp (void)
17203 {
17204 do_neon_cvt_1 (neon_cvt_mode_p);
17205 }
17206
17207 static void
17208 do_neon_cvtm (void)
17209 {
17210 do_neon_cvt_1 (neon_cvt_mode_m);
17211 }
17212
17213 static void
17214 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
17215 {
17216 if (is_double)
17217 mark_feature_used (&fpu_vfp_ext_armv8);
17218
17219 encode_arm_vfp_reg (inst.operands[0].reg,
17220 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
17221 encode_arm_vfp_reg (inst.operands[1].reg,
17222 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
17223 inst.instruction |= to ? 0x10000 : 0;
17224 inst.instruction |= t ? 0x80 : 0;
17225 inst.instruction |= is_double ? 0x100 : 0;
17226 do_vfp_cond_or_thumb ();
17227 }
17228
17229 static void
17230 do_neon_cvttb_1 (bfd_boolean t)
17231 {
17232 enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
17233 NS_DF, NS_DH, NS_NULL);
17234
17235 if (rs == NS_NULL)
17236 return;
17237 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
17238 {
17239 inst.error = NULL;
17240 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
17241 }
17242 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
17243 {
17244 inst.error = NULL;
17245 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
17246 }
17247 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
17248 {
17249 /* The VCVTB and VCVTT instructions with D-register operands
17250 don't work for SP only targets. */
17251 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17252 _(BAD_FPU));
17253
17254 inst.error = NULL;
17255 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
17256 }
17257 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
17258 {
17259 /* The VCVTB and VCVTT instructions with D-register operands
17260 don't work for SP only targets. */
17261 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17262 _(BAD_FPU));
17263
17264 inst.error = NULL;
17265 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
17266 }
17267 else
17268 return;
17269 }
17270
17271 static void
17272 do_neon_cvtb (void)
17273 {
17274 do_neon_cvttb_1 (FALSE);
17275 }
17276
17277
17278 static void
17279 do_neon_cvtt (void)
17280 {
17281 do_neon_cvttb_1 (TRUE);
17282 }
17283
17284 static void
17285 neon_move_immediate (void)
17286 {
17287 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
17288 struct neon_type_el et = neon_check_type (2, rs,
17289 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
17290 unsigned immlo, immhi = 0, immbits;
17291 int op, cmode, float_p;
17292
17293 constraint (et.type == NT_invtype,
17294 _("operand size must be specified for immediate VMOV"));
17295
17296 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
17297 op = (inst.instruction & (1 << 5)) != 0;
17298
17299 immlo = inst.operands[1].imm;
17300 if (inst.operands[1].regisimm)
17301 immhi = inst.operands[1].reg;
17302
17303 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
17304 _("immediate has bits set outside the operand size"));
17305
17306 float_p = inst.operands[1].immisfloat;
17307
17308 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
17309 et.size, et.type)) == FAIL)
17310 {
17311 /* Invert relevant bits only. */
17312 neon_invert_size (&immlo, &immhi, et.size);
17313 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
17314 with one or the other; those cases are caught by
17315 neon_cmode_for_move_imm. */
17316 op = !op;
17317 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
17318 &op, et.size, et.type)) == FAIL)
17319 {
17320 first_error (_("immediate out of range"));
17321 return;
17322 }
17323 }
17324
17325 inst.instruction &= ~(1 << 5);
17326 inst.instruction |= op << 5;
17327
17328 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17329 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17330 inst.instruction |= neon_quad (rs) << 6;
17331 inst.instruction |= cmode << 8;
17332
17333 neon_write_immbits (immbits);
17334 }
17335
17336 static void
17337 do_neon_mvn (void)
17338 {
17339 if (inst.operands[1].isreg)
17340 {
17341 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17342
17343 NEON_ENCODE (INTEGER, inst);
17344 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17345 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17346 inst.instruction |= LOW4 (inst.operands[1].reg);
17347 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17348 inst.instruction |= neon_quad (rs) << 6;
17349 }
17350 else
17351 {
17352 NEON_ENCODE (IMMED, inst);
17353 neon_move_immediate ();
17354 }
17355
17356 neon_dp_fixup (&inst);
17357 }
17358
17359 /* Encode instructions of form:
17360
17361 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
17362 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
17363
17364 static void
17365 neon_mixed_length (struct neon_type_el et, unsigned size)
17366 {
17367 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17368 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17369 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17370 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17371 inst.instruction |= LOW4 (inst.operands[2].reg);
17372 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
17373 inst.instruction |= (et.type == NT_unsigned) << 24;
17374 inst.instruction |= neon_logbits (size) << 20;
17375
17376 neon_dp_fixup (&inst);
17377 }
17378
17379 static void
17380 do_neon_dyadic_long (void)
17381 {
17382 enum neon_shape rs = neon_select_shape (NS_QDD, NS_QQQ, NS_QQR, NS_NULL);
17383 if (rs == NS_QDD)
17384 {
17385 if (vfp_or_neon_is_neon (NEON_CHECK_ARCH | NEON_CHECK_CC) == FAIL)
17386 return;
17387
17388 NEON_ENCODE (INTEGER, inst);
17389 /* FIXME: Type checking for lengthening op. */
17390 struct neon_type_el et = neon_check_type (3, NS_QDD,
17391 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
17392 neon_mixed_length (et, et.size);
17393 }
17394 else if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
17395 && (inst.cond == 0xf || inst.cond == 0x10))
17396 {
17397 /* If parsing for MVE, vaddl/vsubl/vabdl{e,t} can only be vadd/vsub/vabd
17398 in an IT block with le/lt conditions. */
17399
17400 if (inst.cond == 0xf)
17401 inst.cond = 0xb;
17402 else if (inst.cond == 0x10)
17403 inst.cond = 0xd;
17404
17405 inst.pred_insn_type = INSIDE_IT_INSN;
17406
17407 if (inst.instruction == N_MNEM_vaddl)
17408 {
17409 inst.instruction = N_MNEM_vadd;
17410 do_neon_addsub_if_i ();
17411 }
17412 else if (inst.instruction == N_MNEM_vsubl)
17413 {
17414 inst.instruction = N_MNEM_vsub;
17415 do_neon_addsub_if_i ();
17416 }
17417 else if (inst.instruction == N_MNEM_vabdl)
17418 {
17419 inst.instruction = N_MNEM_vabd;
17420 do_neon_dyadic_if_su ();
17421 }
17422 }
17423 else
17424 first_error (BAD_FPU);
17425 }
17426
17427 static void
17428 do_neon_abal (void)
17429 {
17430 struct neon_type_el et = neon_check_type (3, NS_QDD,
17431 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
17432 neon_mixed_length (et, et.size);
17433 }
17434
17435 static void
17436 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
17437 {
17438 if (inst.operands[2].isscalar)
17439 {
17440 struct neon_type_el et = neon_check_type (3, NS_QDS,
17441 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
17442 NEON_ENCODE (SCALAR, inst);
17443 neon_mul_mac (et, et.type == NT_unsigned);
17444 }
17445 else
17446 {
17447 struct neon_type_el et = neon_check_type (3, NS_QDD,
17448 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
17449 NEON_ENCODE (INTEGER, inst);
17450 neon_mixed_length (et, et.size);
17451 }
17452 }
17453
17454 static void
17455 do_neon_mac_maybe_scalar_long (void)
17456 {
17457 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
17458 }
17459
17460 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
17461 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
17462
17463 static unsigned
17464 neon_scalar_for_fmac_fp16_long (unsigned scalar, unsigned quad_p)
17465 {
17466 unsigned regno = NEON_SCALAR_REG (scalar);
17467 unsigned elno = NEON_SCALAR_INDEX (scalar);
17468
17469 if (quad_p)
17470 {
17471 if (regno > 7 || elno > 3)
17472 goto bad_scalar;
17473
17474 return ((regno & 0x7)
17475 | ((elno & 0x1) << 3)
17476 | (((elno >> 1) & 0x1) << 5));
17477 }
17478 else
17479 {
17480 if (regno > 15 || elno > 1)
17481 goto bad_scalar;
17482
17483 return (((regno & 0x1) << 5)
17484 | ((regno >> 1) & 0x7)
17485 | ((elno & 0x1) << 3));
17486 }
17487
17488 bad_scalar:
17489 first_error (_("scalar out of range for multiply instruction"));
17490 return 0;
17491 }
17492
17493 static void
17494 do_neon_fmac_maybe_scalar_long (int subtype)
17495 {
17496 enum neon_shape rs;
17497 int high8;
17498 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
17499 field (bits[21:20]) has different meaning. For scalar index variant, it's
17500 used to differentiate add and subtract, otherwise it's with fixed value
17501 0x2. */
17502 int size = -1;
17503
17504 if (inst.cond != COND_ALWAYS)
17505 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
17506 "behaviour is UNPREDICTABLE"));
17507
17508 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16_fml),
17509 _(BAD_FP16));
17510
17511 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17512 _(BAD_FPU));
17513
17514 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
17515 be a scalar index register. */
17516 if (inst.operands[2].isscalar)
17517 {
17518 high8 = 0xfe000000;
17519 if (subtype)
17520 size = 16;
17521 rs = neon_select_shape (NS_DHS, NS_QDS, NS_NULL);
17522 }
17523 else
17524 {
17525 high8 = 0xfc000000;
17526 size = 32;
17527 if (subtype)
17528 inst.instruction |= (0x1 << 23);
17529 rs = neon_select_shape (NS_DHH, NS_QDD, NS_NULL);
17530 }
17531
17532 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16);
17533
17534 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
17535 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
17536 so we simply pass -1 as size. */
17537 unsigned quad_p = (rs == NS_QDD || rs == NS_QDS);
17538 neon_three_same (quad_p, 0, size);
17539
17540 /* Undo neon_dp_fixup. Redo the high eight bits. */
17541 inst.instruction &= 0x00ffffff;
17542 inst.instruction |= high8;
17543
17544 #define LOW1(R) ((R) & 0x1)
17545 #define HI4(R) (((R) >> 1) & 0xf)
17546 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
17547 whether the instruction is in Q form and whether Vm is a scalar indexed
17548 operand. */
17549 if (inst.operands[2].isscalar)
17550 {
17551 unsigned rm
17552 = neon_scalar_for_fmac_fp16_long (inst.operands[2].reg, quad_p);
17553 inst.instruction &= 0xffffffd0;
17554 inst.instruction |= rm;
17555
17556 if (!quad_p)
17557 {
17558 /* Redo Rn as well. */
17559 inst.instruction &= 0xfff0ff7f;
17560 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
17561 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
17562 }
17563 }
17564 else if (!quad_p)
17565 {
17566 /* Redo Rn and Rm. */
17567 inst.instruction &= 0xfff0ff50;
17568 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
17569 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
17570 inst.instruction |= HI4 (inst.operands[2].reg);
17571 inst.instruction |= LOW1 (inst.operands[2].reg) << 5;
17572 }
17573 }
17574
17575 static void
17576 do_neon_vfmal (void)
17577 {
17578 return do_neon_fmac_maybe_scalar_long (0);
17579 }
17580
17581 static void
17582 do_neon_vfmsl (void)
17583 {
17584 return do_neon_fmac_maybe_scalar_long (1);
17585 }
17586
17587 static void
17588 do_neon_dyadic_wide (void)
17589 {
17590 struct neon_type_el et = neon_check_type (3, NS_QQD,
17591 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
17592 neon_mixed_length (et, et.size);
17593 }
17594
17595 static void
17596 do_neon_dyadic_narrow (void)
17597 {
17598 struct neon_type_el et = neon_check_type (3, NS_QDD,
17599 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
17600 /* Operand sign is unimportant, and the U bit is part of the opcode,
17601 so force the operand type to integer. */
17602 et.type = NT_integer;
17603 neon_mixed_length (et, et.size / 2);
17604 }
17605
17606 static void
17607 do_neon_mul_sat_scalar_long (void)
17608 {
17609 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
17610 }
17611
17612 static void
17613 do_neon_vmull (void)
17614 {
17615 if (inst.operands[2].isscalar)
17616 do_neon_mac_maybe_scalar_long ();
17617 else
17618 {
17619 struct neon_type_el et = neon_check_type (3, NS_QDD,
17620 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
17621
17622 if (et.type == NT_poly)
17623 NEON_ENCODE (POLY, inst);
17624 else
17625 NEON_ENCODE (INTEGER, inst);
17626
17627 /* For polynomial encoding the U bit must be zero, and the size must
17628 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
17629 obviously, as 0b10). */
17630 if (et.size == 64)
17631 {
17632 /* Check we're on the correct architecture. */
17633 if (!mark_feature_used (&fpu_crypto_ext_armv8))
17634 inst.error =
17635 _("Instruction form not available on this architecture.");
17636
17637 et.size = 32;
17638 }
17639
17640 neon_mixed_length (et, et.size);
17641 }
17642 }
17643
17644 static void
17645 do_neon_ext (void)
17646 {
17647 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
17648 struct neon_type_el et = neon_check_type (3, rs,
17649 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
17650 unsigned imm = (inst.operands[3].imm * et.size) / 8;
17651
17652 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
17653 _("shift out of range"));
17654 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17655 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17656 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17657 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17658 inst.instruction |= LOW4 (inst.operands[2].reg);
17659 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
17660 inst.instruction |= neon_quad (rs) << 6;
17661 inst.instruction |= imm << 8;
17662
17663 neon_dp_fixup (&inst);
17664 }
17665
17666 static void
17667 do_neon_rev (void)
17668 {
17669 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17670 struct neon_type_el et = neon_check_type (2, rs,
17671 N_EQK, N_8 | N_16 | N_32 | N_KEY);
17672 unsigned op = (inst.instruction >> 7) & 3;
17673 /* N (width of reversed regions) is encoded as part of the bitmask. We
17674 extract it here to check the elements to be reversed are smaller.
17675 Otherwise we'd get a reserved instruction. */
17676 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
17677 gas_assert (elsize != 0);
17678 constraint (et.size >= elsize,
17679 _("elements must be smaller than reversal region"));
17680 neon_two_same (neon_quad (rs), 1, et.size);
17681 }
17682
17683 static void
17684 do_neon_dup (void)
17685 {
17686 if (inst.operands[1].isscalar)
17687 {
17688 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
17689 struct neon_type_el et = neon_check_type (2, rs,
17690 N_EQK, N_8 | N_16 | N_32 | N_KEY);
17691 unsigned sizebits = et.size >> 3;
17692 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
17693 int logsize = neon_logbits (et.size);
17694 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
17695
17696 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
17697 return;
17698
17699 NEON_ENCODE (SCALAR, inst);
17700 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17701 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17702 inst.instruction |= LOW4 (dm);
17703 inst.instruction |= HI1 (dm) << 5;
17704 inst.instruction |= neon_quad (rs) << 6;
17705 inst.instruction |= x << 17;
17706 inst.instruction |= sizebits << 16;
17707
17708 neon_dp_fixup (&inst);
17709 }
17710 else
17711 {
17712 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
17713 struct neon_type_el et = neon_check_type (2, rs,
17714 N_8 | N_16 | N_32 | N_KEY, N_EQK);
17715 /* Duplicate ARM register to lanes of vector. */
17716 NEON_ENCODE (ARMREG, inst);
17717 switch (et.size)
17718 {
17719 case 8: inst.instruction |= 0x400000; break;
17720 case 16: inst.instruction |= 0x000020; break;
17721 case 32: inst.instruction |= 0x000000; break;
17722 default: break;
17723 }
17724 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
17725 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
17726 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
17727 inst.instruction |= neon_quad (rs) << 21;
17728 /* The encoding for this instruction is identical for the ARM and Thumb
17729 variants, except for the condition field. */
17730 do_vfp_cond_or_thumb ();
17731 }
17732 }
17733
17734 /* VMOV has particularly many variations. It can be one of:
17735 0. VMOV<c><q> <Qd>, <Qm>
17736 1. VMOV<c><q> <Dd>, <Dm>
17737 (Register operations, which are VORR with Rm = Rn.)
17738 2. VMOV<c><q>.<dt> <Qd>, #<imm>
17739 3. VMOV<c><q>.<dt> <Dd>, #<imm>
17740 (Immediate loads.)
17741 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
17742 (ARM register to scalar.)
17743 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
17744 (Two ARM registers to vector.)
17745 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
17746 (Scalar to ARM register.)
17747 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
17748 (Vector to two ARM registers.)
17749 8. VMOV.F32 <Sd>, <Sm>
17750 9. VMOV.F64 <Dd>, <Dm>
17751 (VFP register moves.)
17752 10. VMOV.F32 <Sd>, #imm
17753 11. VMOV.F64 <Dd>, #imm
17754 (VFP float immediate load.)
17755 12. VMOV <Rd>, <Sm>
17756 (VFP single to ARM reg.)
17757 13. VMOV <Sd>, <Rm>
17758 (ARM reg to VFP single.)
17759 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
17760 (Two ARM regs to two VFP singles.)
17761 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
17762 (Two VFP singles to two ARM regs.)
17763
17764 These cases can be disambiguated using neon_select_shape, except cases 1/9
17765 and 3/11 which depend on the operand type too.
17766
17767 All the encoded bits are hardcoded by this function.
17768
17769 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
17770 Cases 5, 7 may be used with VFPv2 and above.
17771
17772 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
17773 can specify a type where it doesn't make sense to, and is ignored). */
17774
17775 static void
17776 do_neon_mov (void)
17777 {
17778 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
17779 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR,
17780 NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
17781 NS_HR, NS_RH, NS_HI, NS_NULL);
17782 struct neon_type_el et;
17783 const char *ldconst = 0;
17784
17785 switch (rs)
17786 {
17787 case NS_DD: /* case 1/9. */
17788 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
17789 /* It is not an error here if no type is given. */
17790 inst.error = NULL;
17791 if (et.type == NT_float && et.size == 64)
17792 {
17793 do_vfp_nsyn_opcode ("fcpyd");
17794 break;
17795 }
17796 /* fall through. */
17797
17798 case NS_QQ: /* case 0/1. */
17799 {
17800 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
17801 return;
17802 /* The architecture manual I have doesn't explicitly state which
17803 value the U bit should have for register->register moves, but
17804 the equivalent VORR instruction has U = 0, so do that. */
17805 inst.instruction = 0x0200110;
17806 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17807 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17808 inst.instruction |= LOW4 (inst.operands[1].reg);
17809 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17810 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17811 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17812 inst.instruction |= neon_quad (rs) << 6;
17813
17814 neon_dp_fixup (&inst);
17815 }
17816 break;
17817
17818 case NS_DI: /* case 3/11. */
17819 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
17820 inst.error = NULL;
17821 if (et.type == NT_float && et.size == 64)
17822 {
17823 /* case 11 (fconstd). */
17824 ldconst = "fconstd";
17825 goto encode_fconstd;
17826 }
17827 /* fall through. */
17828
17829 case NS_QI: /* case 2/3. */
17830 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
17831 return;
17832 inst.instruction = 0x0800010;
17833 neon_move_immediate ();
17834 neon_dp_fixup (&inst);
17835 break;
17836
17837 case NS_SR: /* case 4. */
17838 {
17839 unsigned bcdebits = 0;
17840 int logsize;
17841 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
17842 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
17843
17844 /* .<size> is optional here, defaulting to .32. */
17845 if (inst.vectype.elems == 0
17846 && inst.operands[0].vectype.type == NT_invtype
17847 && inst.operands[1].vectype.type == NT_invtype)
17848 {
17849 inst.vectype.el[0].type = NT_untyped;
17850 inst.vectype.el[0].size = 32;
17851 inst.vectype.elems = 1;
17852 }
17853
17854 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
17855 logsize = neon_logbits (et.size);
17856
17857 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
17858 _(BAD_FPU));
17859 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
17860 && et.size != 32, _(BAD_FPU));
17861 constraint (et.type == NT_invtype, _("bad type for scalar"));
17862 constraint (x >= 64 / et.size, _("scalar index out of range"));
17863
17864 switch (et.size)
17865 {
17866 case 8: bcdebits = 0x8; break;
17867 case 16: bcdebits = 0x1; break;
17868 case 32: bcdebits = 0x0; break;
17869 default: ;
17870 }
17871
17872 bcdebits |= x << logsize;
17873
17874 inst.instruction = 0xe000b10;
17875 do_vfp_cond_or_thumb ();
17876 inst.instruction |= LOW4 (dn) << 16;
17877 inst.instruction |= HI1 (dn) << 7;
17878 inst.instruction |= inst.operands[1].reg << 12;
17879 inst.instruction |= (bcdebits & 3) << 5;
17880 inst.instruction |= (bcdebits >> 2) << 21;
17881 }
17882 break;
17883
17884 case NS_DRR: /* case 5 (fmdrr). */
17885 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
17886 _(BAD_FPU));
17887
17888 inst.instruction = 0xc400b10;
17889 do_vfp_cond_or_thumb ();
17890 inst.instruction |= LOW4 (inst.operands[0].reg);
17891 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
17892 inst.instruction |= inst.operands[1].reg << 12;
17893 inst.instruction |= inst.operands[2].reg << 16;
17894 break;
17895
17896 case NS_RS: /* case 6. */
17897 {
17898 unsigned logsize;
17899 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
17900 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
17901 unsigned abcdebits = 0;
17902
17903 /* .<dt> is optional here, defaulting to .32. */
17904 if (inst.vectype.elems == 0
17905 && inst.operands[0].vectype.type == NT_invtype
17906 && inst.operands[1].vectype.type == NT_invtype)
17907 {
17908 inst.vectype.el[0].type = NT_untyped;
17909 inst.vectype.el[0].size = 32;
17910 inst.vectype.elems = 1;
17911 }
17912
17913 et = neon_check_type (2, NS_NULL,
17914 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
17915 logsize = neon_logbits (et.size);
17916
17917 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
17918 _(BAD_FPU));
17919 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
17920 && et.size != 32, _(BAD_FPU));
17921 constraint (et.type == NT_invtype, _("bad type for scalar"));
17922 constraint (x >= 64 / et.size, _("scalar index out of range"));
17923
17924 switch (et.size)
17925 {
17926 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
17927 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
17928 case 32: abcdebits = 0x00; break;
17929 default: ;
17930 }
17931
17932 abcdebits |= x << logsize;
17933 inst.instruction = 0xe100b10;
17934 do_vfp_cond_or_thumb ();
17935 inst.instruction |= LOW4 (dn) << 16;
17936 inst.instruction |= HI1 (dn) << 7;
17937 inst.instruction |= inst.operands[0].reg << 12;
17938 inst.instruction |= (abcdebits & 3) << 5;
17939 inst.instruction |= (abcdebits >> 2) << 21;
17940 }
17941 break;
17942
17943 case NS_RRD: /* case 7 (fmrrd). */
17944 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
17945 _(BAD_FPU));
17946
17947 inst.instruction = 0xc500b10;
17948 do_vfp_cond_or_thumb ();
17949 inst.instruction |= inst.operands[0].reg << 12;
17950 inst.instruction |= inst.operands[1].reg << 16;
17951 inst.instruction |= LOW4 (inst.operands[2].reg);
17952 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
17953 break;
17954
17955 case NS_FF: /* case 8 (fcpys). */
17956 do_vfp_nsyn_opcode ("fcpys");
17957 break;
17958
17959 case NS_HI:
17960 case NS_FI: /* case 10 (fconsts). */
17961 ldconst = "fconsts";
17962 encode_fconstd:
17963 if (!inst.operands[1].immisfloat)
17964 {
17965 unsigned new_imm;
17966 /* Immediate has to fit in 8 bits so float is enough. */
17967 float imm = (float) inst.operands[1].imm;
17968 memcpy (&new_imm, &imm, sizeof (float));
17969 /* But the assembly may have been written to provide an integer
17970 bit pattern that equates to a float, so check that the
17971 conversion has worked. */
17972 if (is_quarter_float (new_imm))
17973 {
17974 if (is_quarter_float (inst.operands[1].imm))
17975 as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
17976
17977 inst.operands[1].imm = new_imm;
17978 inst.operands[1].immisfloat = 1;
17979 }
17980 }
17981
17982 if (is_quarter_float (inst.operands[1].imm))
17983 {
17984 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
17985 do_vfp_nsyn_opcode (ldconst);
17986
17987 /* ARMv8.2 fp16 vmov.f16 instruction. */
17988 if (rs == NS_HI)
17989 do_scalar_fp16_v82_encode ();
17990 }
17991 else
17992 first_error (_("immediate out of range"));
17993 break;
17994
17995 case NS_RH:
17996 case NS_RF: /* case 12 (fmrs). */
17997 do_vfp_nsyn_opcode ("fmrs");
17998 /* ARMv8.2 fp16 vmov.f16 instruction. */
17999 if (rs == NS_RH)
18000 do_scalar_fp16_v82_encode ();
18001 break;
18002
18003 case NS_HR:
18004 case NS_FR: /* case 13 (fmsr). */
18005 do_vfp_nsyn_opcode ("fmsr");
18006 /* ARMv8.2 fp16 vmov.f16 instruction. */
18007 if (rs == NS_HR)
18008 do_scalar_fp16_v82_encode ();
18009 break;
18010
18011 /* The encoders for the fmrrs and fmsrr instructions expect three operands
18012 (one of which is a list), but we have parsed four. Do some fiddling to
18013 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
18014 expect. */
18015 case NS_RRFF: /* case 14 (fmrrs). */
18016 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
18017 _("VFP registers must be adjacent"));
18018 inst.operands[2].imm = 2;
18019 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
18020 do_vfp_nsyn_opcode ("fmrrs");
18021 break;
18022
18023 case NS_FFRR: /* case 15 (fmsrr). */
18024 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
18025 _("VFP registers must be adjacent"));
18026 inst.operands[1] = inst.operands[2];
18027 inst.operands[2] = inst.operands[3];
18028 inst.operands[0].imm = 2;
18029 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
18030 do_vfp_nsyn_opcode ("fmsrr");
18031 break;
18032
18033 case NS_NULL:
18034 /* neon_select_shape has determined that the instruction
18035 shape is wrong and has already set the error message. */
18036 break;
18037
18038 default:
18039 abort ();
18040 }
18041 }
18042
18043 static void
18044 do_neon_rshift_round_imm (void)
18045 {
18046 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
18047 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
18048 int imm = inst.operands[2].imm;
18049
18050 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
18051 if (imm == 0)
18052 {
18053 inst.operands[2].present = 0;
18054 do_neon_mov ();
18055 return;
18056 }
18057
18058 constraint (imm < 1 || (unsigned)imm > et.size,
18059 _("immediate out of range for shift"));
18060 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
18061 et.size - imm);
18062 }
18063
18064 static void
18065 do_neon_movhf (void)
18066 {
18067 enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
18068 constraint (rs != NS_HH, _("invalid suffix"));
18069
18070 if (inst.cond != COND_ALWAYS)
18071 {
18072 if (thumb_mode)
18073 {
18074 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
18075 " the behaviour is UNPREDICTABLE"));
18076 }
18077 else
18078 {
18079 inst.error = BAD_COND;
18080 return;
18081 }
18082 }
18083
18084 do_vfp_sp_monadic ();
18085
18086 inst.is_neon = 1;
18087 inst.instruction |= 0xf0000000;
18088 }
18089
18090 static void
18091 do_neon_movl (void)
18092 {
18093 struct neon_type_el et = neon_check_type (2, NS_QD,
18094 N_EQK | N_DBL, N_SU_32 | N_KEY);
18095 unsigned sizebits = et.size >> 3;
18096 inst.instruction |= sizebits << 19;
18097 neon_two_same (0, et.type == NT_unsigned, -1);
18098 }
18099
18100 static void
18101 do_neon_trn (void)
18102 {
18103 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
18104 struct neon_type_el et = neon_check_type (2, rs,
18105 N_EQK, N_8 | N_16 | N_32 | N_KEY);
18106 NEON_ENCODE (INTEGER, inst);
18107 neon_two_same (neon_quad (rs), 1, et.size);
18108 }
18109
18110 static void
18111 do_neon_zip_uzp (void)
18112 {
18113 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
18114 struct neon_type_el et = neon_check_type (2, rs,
18115 N_EQK, N_8 | N_16 | N_32 | N_KEY);
18116 if (rs == NS_DD && et.size == 32)
18117 {
18118 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
18119 inst.instruction = N_MNEM_vtrn;
18120 do_neon_trn ();
18121 return;
18122 }
18123 neon_two_same (neon_quad (rs), 1, et.size);
18124 }
18125
18126 static void
18127 do_neon_sat_abs_neg (void)
18128 {
18129 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
18130 struct neon_type_el et = neon_check_type (2, rs,
18131 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18132 neon_two_same (neon_quad (rs), 1, et.size);
18133 }
18134
18135 static void
18136 do_neon_pair_long (void)
18137 {
18138 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
18139 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
18140 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
18141 inst.instruction |= (et.type == NT_unsigned) << 7;
18142 neon_two_same (neon_quad (rs), 1, et.size);
18143 }
18144
18145 static void
18146 do_neon_recip_est (void)
18147 {
18148 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
18149 struct neon_type_el et = neon_check_type (2, rs,
18150 N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
18151 inst.instruction |= (et.type == NT_float) << 8;
18152 neon_two_same (neon_quad (rs), 1, et.size);
18153 }
18154
18155 static void
18156 do_neon_cls (void)
18157 {
18158 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
18159 struct neon_type_el et = neon_check_type (2, rs,
18160 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18161 neon_two_same (neon_quad (rs), 1, et.size);
18162 }
18163
18164 static void
18165 do_neon_clz (void)
18166 {
18167 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
18168 struct neon_type_el et = neon_check_type (2, rs,
18169 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
18170 neon_two_same (neon_quad (rs), 1, et.size);
18171 }
18172
18173 static void
18174 do_neon_cnt (void)
18175 {
18176 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
18177 struct neon_type_el et = neon_check_type (2, rs,
18178 N_EQK | N_INT, N_8 | N_KEY);
18179 neon_two_same (neon_quad (rs), 1, et.size);
18180 }
18181
18182 static void
18183 do_neon_swp (void)
18184 {
18185 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
18186 neon_two_same (neon_quad (rs), 1, -1);
18187 }
18188
18189 static void
18190 do_neon_tbl_tbx (void)
18191 {
18192 unsigned listlenbits;
18193 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
18194
18195 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
18196 {
18197 first_error (_("bad list length for table lookup"));
18198 return;
18199 }
18200
18201 listlenbits = inst.operands[1].imm - 1;
18202 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18203 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18204 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
18205 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
18206 inst.instruction |= LOW4 (inst.operands[2].reg);
18207 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
18208 inst.instruction |= listlenbits << 8;
18209
18210 neon_dp_fixup (&inst);
18211 }
18212
18213 static void
18214 do_neon_ldm_stm (void)
18215 {
18216 /* P, U and L bits are part of bitmask. */
18217 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
18218 unsigned offsetbits = inst.operands[1].imm * 2;
18219
18220 if (inst.operands[1].issingle)
18221 {
18222 do_vfp_nsyn_ldm_stm (is_dbmode);
18223 return;
18224 }
18225
18226 constraint (is_dbmode && !inst.operands[0].writeback,
18227 _("writeback (!) must be used for VLDMDB and VSTMDB"));
18228
18229 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
18230 _("register list must contain at least 1 and at most 16 "
18231 "registers"));
18232
18233 inst.instruction |= inst.operands[0].reg << 16;
18234 inst.instruction |= inst.operands[0].writeback << 21;
18235 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
18236 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
18237
18238 inst.instruction |= offsetbits;
18239
18240 do_vfp_cond_or_thumb ();
18241 }
18242
18243 static void
18244 do_neon_ldr_str (void)
18245 {
18246 int is_ldr = (inst.instruction & (1 << 20)) != 0;
18247
18248 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
18249 And is UNPREDICTABLE in thumb mode. */
18250 if (!is_ldr
18251 && inst.operands[1].reg == REG_PC
18252 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
18253 {
18254 if (thumb_mode)
18255 inst.error = _("Use of PC here is UNPREDICTABLE");
18256 else if (warn_on_deprecated)
18257 as_tsktsk (_("Use of PC here is deprecated"));
18258 }
18259
18260 if (inst.operands[0].issingle)
18261 {
18262 if (is_ldr)
18263 do_vfp_nsyn_opcode ("flds");
18264 else
18265 do_vfp_nsyn_opcode ("fsts");
18266
18267 /* ARMv8.2 vldr.16/vstr.16 instruction. */
18268 if (inst.vectype.el[0].size == 16)
18269 do_scalar_fp16_v82_encode ();
18270 }
18271 else
18272 {
18273 if (is_ldr)
18274 do_vfp_nsyn_opcode ("fldd");
18275 else
18276 do_vfp_nsyn_opcode ("fstd");
18277 }
18278 }
18279
18280 static void
18281 do_t_vldr_vstr_sysreg (void)
18282 {
18283 int fp_vldr_bitno = 20, sysreg_vldr_bitno = 20;
18284 bfd_boolean is_vldr = ((inst.instruction & (1 << fp_vldr_bitno)) != 0);
18285
18286 /* Use of PC is UNPREDICTABLE. */
18287 if (inst.operands[1].reg == REG_PC)
18288 inst.error = _("Use of PC here is UNPREDICTABLE");
18289
18290 if (inst.operands[1].immisreg)
18291 inst.error = _("instruction does not accept register index");
18292
18293 if (!inst.operands[1].isreg)
18294 inst.error = _("instruction does not accept PC-relative addressing");
18295
18296 if (abs (inst.operands[1].imm) >= (1 << 7))
18297 inst.error = _("immediate value out of range");
18298
18299 inst.instruction = 0xec000f80;
18300 if (is_vldr)
18301 inst.instruction |= 1 << sysreg_vldr_bitno;
18302 encode_arm_cp_address (1, TRUE, FALSE, BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM);
18303 inst.instruction |= (inst.operands[0].imm & 0x7) << 13;
18304 inst.instruction |= (inst.operands[0].imm & 0x8) << 19;
18305 }
18306
18307 static void
18308 do_vldr_vstr (void)
18309 {
18310 bfd_boolean sysreg_op = !inst.operands[0].isreg;
18311
18312 /* VLDR/VSTR (System Register). */
18313 if (sysreg_op)
18314 {
18315 if (!mark_feature_used (&arm_ext_v8_1m_main))
18316 as_bad (_("Instruction not permitted on this architecture"));
18317
18318 do_t_vldr_vstr_sysreg ();
18319 }
18320 /* VLDR/VSTR. */
18321 else
18322 {
18323 if (!mark_feature_used (&fpu_vfp_ext_v1xd))
18324 as_bad (_("Instruction not permitted on this architecture"));
18325 do_neon_ldr_str ();
18326 }
18327 }
18328
18329 /* "interleave" version also handles non-interleaving register VLD1/VST1
18330 instructions. */
18331
18332 static void
18333 do_neon_ld_st_interleave (void)
18334 {
18335 struct neon_type_el et = neon_check_type (1, NS_NULL,
18336 N_8 | N_16 | N_32 | N_64);
18337 unsigned alignbits = 0;
18338 unsigned idx;
18339 /* The bits in this table go:
18340 0: register stride of one (0) or two (1)
18341 1,2: register list length, minus one (1, 2, 3, 4).
18342 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
18343 We use -1 for invalid entries. */
18344 const int typetable[] =
18345 {
18346 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
18347 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
18348 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
18349 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
18350 };
18351 int typebits;
18352
18353 if (et.type == NT_invtype)
18354 return;
18355
18356 if (inst.operands[1].immisalign)
18357 switch (inst.operands[1].imm >> 8)
18358 {
18359 case 64: alignbits = 1; break;
18360 case 128:
18361 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
18362 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
18363 goto bad_alignment;
18364 alignbits = 2;
18365 break;
18366 case 256:
18367 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
18368 goto bad_alignment;
18369 alignbits = 3;
18370 break;
18371 default:
18372 bad_alignment:
18373 first_error (_("bad alignment"));
18374 return;
18375 }
18376
18377 inst.instruction |= alignbits << 4;
18378 inst.instruction |= neon_logbits (et.size) << 6;
18379
18380 /* Bits [4:6] of the immediate in a list specifier encode register stride
18381 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
18382 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
18383 up the right value for "type" in a table based on this value and the given
18384 list style, then stick it back. */
18385 idx = ((inst.operands[0].imm >> 4) & 7)
18386 | (((inst.instruction >> 8) & 3) << 3);
18387
18388 typebits = typetable[idx];
18389
18390 constraint (typebits == -1, _("bad list type for instruction"));
18391 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
18392 BAD_EL_TYPE);
18393
18394 inst.instruction &= ~0xf00;
18395 inst.instruction |= typebits << 8;
18396 }
18397
18398 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
18399 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
18400 otherwise. The variable arguments are a list of pairs of legal (size, align)
18401 values, terminated with -1. */
18402
18403 static int
18404 neon_alignment_bit (int size, int align, int *do_alignment, ...)
18405 {
18406 va_list ap;
18407 int result = FAIL, thissize, thisalign;
18408
18409 if (!inst.operands[1].immisalign)
18410 {
18411 *do_alignment = 0;
18412 return SUCCESS;
18413 }
18414
18415 va_start (ap, do_alignment);
18416
18417 do
18418 {
18419 thissize = va_arg (ap, int);
18420 if (thissize == -1)
18421 break;
18422 thisalign = va_arg (ap, int);
18423
18424 if (size == thissize && align == thisalign)
18425 result = SUCCESS;
18426 }
18427 while (result != SUCCESS);
18428
18429 va_end (ap);
18430
18431 if (result == SUCCESS)
18432 *do_alignment = 1;
18433 else
18434 first_error (_("unsupported alignment for instruction"));
18435
18436 return result;
18437 }
18438
18439 static void
18440 do_neon_ld_st_lane (void)
18441 {
18442 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
18443 int align_good, do_alignment = 0;
18444 int logsize = neon_logbits (et.size);
18445 int align = inst.operands[1].imm >> 8;
18446 int n = (inst.instruction >> 8) & 3;
18447 int max_el = 64 / et.size;
18448
18449 if (et.type == NT_invtype)
18450 return;
18451
18452 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
18453 _("bad list length"));
18454 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
18455 _("scalar index out of range"));
18456 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
18457 && et.size == 8,
18458 _("stride of 2 unavailable when element size is 8"));
18459
18460 switch (n)
18461 {
18462 case 0: /* VLD1 / VST1. */
18463 align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
18464 32, 32, -1);
18465 if (align_good == FAIL)
18466 return;
18467 if (do_alignment)
18468 {
18469 unsigned alignbits = 0;
18470 switch (et.size)
18471 {
18472 case 16: alignbits = 0x1; break;
18473 case 32: alignbits = 0x3; break;
18474 default: ;
18475 }
18476 inst.instruction |= alignbits << 4;
18477 }
18478 break;
18479
18480 case 1: /* VLD2 / VST2. */
18481 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
18482 16, 32, 32, 64, -1);
18483 if (align_good == FAIL)
18484 return;
18485 if (do_alignment)
18486 inst.instruction |= 1 << 4;
18487 break;
18488
18489 case 2: /* VLD3 / VST3. */
18490 constraint (inst.operands[1].immisalign,
18491 _("can't use alignment with this instruction"));
18492 break;
18493
18494 case 3: /* VLD4 / VST4. */
18495 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
18496 16, 64, 32, 64, 32, 128, -1);
18497 if (align_good == FAIL)
18498 return;
18499 if (do_alignment)
18500 {
18501 unsigned alignbits = 0;
18502 switch (et.size)
18503 {
18504 case 8: alignbits = 0x1; break;
18505 case 16: alignbits = 0x1; break;
18506 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
18507 default: ;
18508 }
18509 inst.instruction |= alignbits << 4;
18510 }
18511 break;
18512
18513 default: ;
18514 }
18515
18516 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
18517 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
18518 inst.instruction |= 1 << (4 + logsize);
18519
18520 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
18521 inst.instruction |= logsize << 10;
18522 }
18523
18524 /* Encode single n-element structure to all lanes VLD<n> instructions. */
18525
18526 static void
18527 do_neon_ld_dup (void)
18528 {
18529 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
18530 int align_good, do_alignment = 0;
18531
18532 if (et.type == NT_invtype)
18533 return;
18534
18535 switch ((inst.instruction >> 8) & 3)
18536 {
18537 case 0: /* VLD1. */
18538 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
18539 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
18540 &do_alignment, 16, 16, 32, 32, -1);
18541 if (align_good == FAIL)
18542 return;
18543 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
18544 {
18545 case 1: break;
18546 case 2: inst.instruction |= 1 << 5; break;
18547 default: first_error (_("bad list length")); return;
18548 }
18549 inst.instruction |= neon_logbits (et.size) << 6;
18550 break;
18551
18552 case 1: /* VLD2. */
18553 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
18554 &do_alignment, 8, 16, 16, 32, 32, 64,
18555 -1);
18556 if (align_good == FAIL)
18557 return;
18558 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
18559 _("bad list length"));
18560 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
18561 inst.instruction |= 1 << 5;
18562 inst.instruction |= neon_logbits (et.size) << 6;
18563 break;
18564
18565 case 2: /* VLD3. */
18566 constraint (inst.operands[1].immisalign,
18567 _("can't use alignment with this instruction"));
18568 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
18569 _("bad list length"));
18570 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
18571 inst.instruction |= 1 << 5;
18572 inst.instruction |= neon_logbits (et.size) << 6;
18573 break;
18574
18575 case 3: /* VLD4. */
18576 {
18577 int align = inst.operands[1].imm >> 8;
18578 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
18579 16, 64, 32, 64, 32, 128, -1);
18580 if (align_good == FAIL)
18581 return;
18582 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
18583 _("bad list length"));
18584 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
18585 inst.instruction |= 1 << 5;
18586 if (et.size == 32 && align == 128)
18587 inst.instruction |= 0x3 << 6;
18588 else
18589 inst.instruction |= neon_logbits (et.size) << 6;
18590 }
18591 break;
18592
18593 default: ;
18594 }
18595
18596 inst.instruction |= do_alignment << 4;
18597 }
18598
18599 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
18600 apart from bits [11:4]. */
18601
18602 static void
18603 do_neon_ldx_stx (void)
18604 {
18605 if (inst.operands[1].isreg)
18606 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
18607
18608 switch (NEON_LANE (inst.operands[0].imm))
18609 {
18610 case NEON_INTERLEAVE_LANES:
18611 NEON_ENCODE (INTERLV, inst);
18612 do_neon_ld_st_interleave ();
18613 break;
18614
18615 case NEON_ALL_LANES:
18616 NEON_ENCODE (DUP, inst);
18617 if (inst.instruction == N_INV)
18618 {
18619 first_error ("only loads support such operands");
18620 break;
18621 }
18622 do_neon_ld_dup ();
18623 break;
18624
18625 default:
18626 NEON_ENCODE (LANE, inst);
18627 do_neon_ld_st_lane ();
18628 }
18629
18630 /* L bit comes from bit mask. */
18631 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18632 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18633 inst.instruction |= inst.operands[1].reg << 16;
18634
18635 if (inst.operands[1].postind)
18636 {
18637 int postreg = inst.operands[1].imm & 0xf;
18638 constraint (!inst.operands[1].immisreg,
18639 _("post-index must be a register"));
18640 constraint (postreg == 0xd || postreg == 0xf,
18641 _("bad register for post-index"));
18642 inst.instruction |= postreg;
18643 }
18644 else
18645 {
18646 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
18647 constraint (inst.relocs[0].exp.X_op != O_constant
18648 || inst.relocs[0].exp.X_add_number != 0,
18649 BAD_ADDR_MODE);
18650
18651 if (inst.operands[1].writeback)
18652 {
18653 inst.instruction |= 0xd;
18654 }
18655 else
18656 inst.instruction |= 0xf;
18657 }
18658
18659 if (thumb_mode)
18660 inst.instruction |= 0xf9000000;
18661 else
18662 inst.instruction |= 0xf4000000;
18663 }
18664
18665 /* FP v8. */
18666 static void
18667 do_vfp_nsyn_fpv8 (enum neon_shape rs)
18668 {
18669 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
18670 D register operands. */
18671 if (neon_shape_class[rs] == SC_DOUBLE)
18672 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
18673 _(BAD_FPU));
18674
18675 NEON_ENCODE (FPV8, inst);
18676
18677 if (rs == NS_FFF || rs == NS_HHH)
18678 {
18679 do_vfp_sp_dyadic ();
18680
18681 /* ARMv8.2 fp16 instruction. */
18682 if (rs == NS_HHH)
18683 do_scalar_fp16_v82_encode ();
18684 }
18685 else
18686 do_vfp_dp_rd_rn_rm ();
18687
18688 if (rs == NS_DDD)
18689 inst.instruction |= 0x100;
18690
18691 inst.instruction |= 0xf0000000;
18692 }
18693
18694 static void
18695 do_vsel (void)
18696 {
18697 set_pred_insn_type (OUTSIDE_PRED_INSN);
18698
18699 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
18700 first_error (_("invalid instruction shape"));
18701 }
18702
18703 static void
18704 do_vmaxnm (void)
18705 {
18706 set_pred_insn_type (OUTSIDE_PRED_INSN);
18707
18708 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
18709 return;
18710
18711 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
18712 return;
18713
18714 neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
18715 }
18716
18717 static void
18718 do_vrint_1 (enum neon_cvt_mode mode)
18719 {
18720 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
18721 struct neon_type_el et;
18722
18723 if (rs == NS_NULL)
18724 return;
18725
18726 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
18727 D register operands. */
18728 if (neon_shape_class[rs] == SC_DOUBLE)
18729 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
18730 _(BAD_FPU));
18731
18732 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
18733 | N_VFP);
18734 if (et.type != NT_invtype)
18735 {
18736 /* VFP encodings. */
18737 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
18738 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
18739 set_pred_insn_type (OUTSIDE_PRED_INSN);
18740
18741 NEON_ENCODE (FPV8, inst);
18742 if (rs == NS_FF || rs == NS_HH)
18743 do_vfp_sp_monadic ();
18744 else
18745 do_vfp_dp_rd_rm ();
18746
18747 switch (mode)
18748 {
18749 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
18750 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
18751 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
18752 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
18753 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
18754 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
18755 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
18756 default: abort ();
18757 }
18758
18759 inst.instruction |= (rs == NS_DD) << 8;
18760 do_vfp_cond_or_thumb ();
18761
18762 /* ARMv8.2 fp16 vrint instruction. */
18763 if (rs == NS_HH)
18764 do_scalar_fp16_v82_encode ();
18765 }
18766 else
18767 {
18768 /* Neon encodings (or something broken...). */
18769 inst.error = NULL;
18770 et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
18771
18772 if (et.type == NT_invtype)
18773 return;
18774
18775 set_pred_insn_type (OUTSIDE_PRED_INSN);
18776 NEON_ENCODE (FLOAT, inst);
18777
18778 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
18779 return;
18780
18781 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18782 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18783 inst.instruction |= LOW4 (inst.operands[1].reg);
18784 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
18785 inst.instruction |= neon_quad (rs) << 6;
18786 /* Mask off the original size bits and reencode them. */
18787 inst.instruction = ((inst.instruction & 0xfff3ffff)
18788 | neon_logbits (et.size) << 18);
18789
18790 switch (mode)
18791 {
18792 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
18793 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
18794 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
18795 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
18796 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
18797 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
18798 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
18799 default: abort ();
18800 }
18801
18802 if (thumb_mode)
18803 inst.instruction |= 0xfc000000;
18804 else
18805 inst.instruction |= 0xf0000000;
18806 }
18807 }
18808
18809 static void
18810 do_vrintx (void)
18811 {
18812 do_vrint_1 (neon_cvt_mode_x);
18813 }
18814
18815 static void
18816 do_vrintz (void)
18817 {
18818 do_vrint_1 (neon_cvt_mode_z);
18819 }
18820
18821 static void
18822 do_vrintr (void)
18823 {
18824 do_vrint_1 (neon_cvt_mode_r);
18825 }
18826
18827 static void
18828 do_vrinta (void)
18829 {
18830 do_vrint_1 (neon_cvt_mode_a);
18831 }
18832
18833 static void
18834 do_vrintn (void)
18835 {
18836 do_vrint_1 (neon_cvt_mode_n);
18837 }
18838
18839 static void
18840 do_vrintp (void)
18841 {
18842 do_vrint_1 (neon_cvt_mode_p);
18843 }
18844
18845 static void
18846 do_vrintm (void)
18847 {
18848 do_vrint_1 (neon_cvt_mode_m);
18849 }
18850
18851 static unsigned
18852 neon_scalar_for_vcmla (unsigned opnd, unsigned elsize)
18853 {
18854 unsigned regno = NEON_SCALAR_REG (opnd);
18855 unsigned elno = NEON_SCALAR_INDEX (opnd);
18856
18857 if (elsize == 16 && elno < 2 && regno < 16)
18858 return regno | (elno << 4);
18859 else if (elsize == 32 && elno == 0)
18860 return regno;
18861
18862 first_error (_("scalar out of range"));
18863 return 0;
18864 }
18865
18866 static void
18867 do_vcmla (void)
18868 {
18869 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
18870 _(BAD_FPU));
18871 constraint (inst.relocs[0].exp.X_op != O_constant,
18872 _("expression too complex"));
18873 unsigned rot = inst.relocs[0].exp.X_add_number;
18874 constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
18875 _("immediate out of range"));
18876 rot /= 90;
18877 if (inst.operands[2].isscalar)
18878 {
18879 enum neon_shape rs = neon_select_shape (NS_DDSI, NS_QQSI, NS_NULL);
18880 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
18881 N_KEY | N_F16 | N_F32).size;
18882 unsigned m = neon_scalar_for_vcmla (inst.operands[2].reg, size);
18883 inst.is_neon = 1;
18884 inst.instruction = 0xfe000800;
18885 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18886 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18887 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
18888 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
18889 inst.instruction |= LOW4 (m);
18890 inst.instruction |= HI1 (m) << 5;
18891 inst.instruction |= neon_quad (rs) << 6;
18892 inst.instruction |= rot << 20;
18893 inst.instruction |= (size == 32) << 23;
18894 }
18895 else
18896 {
18897 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
18898 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
18899 N_KEY | N_F16 | N_F32).size;
18900 neon_three_same (neon_quad (rs), 0, -1);
18901 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
18902 inst.instruction |= 0xfc200800;
18903 inst.instruction |= rot << 23;
18904 inst.instruction |= (size == 32) << 20;
18905 }
18906 }
18907
18908 static void
18909 do_vcadd (void)
18910 {
18911 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
18912 _(BAD_FPU));
18913 constraint (inst.relocs[0].exp.X_op != O_constant,
18914 _("expression too complex"));
18915 unsigned rot = inst.relocs[0].exp.X_add_number;
18916 constraint (rot != 90 && rot != 270, _("immediate out of range"));
18917 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
18918 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
18919 N_KEY | N_F16 | N_F32).size;
18920 neon_three_same (neon_quad (rs), 0, -1);
18921 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
18922 inst.instruction |= 0xfc800800;
18923 inst.instruction |= (rot == 270) << 24;
18924 inst.instruction |= (size == 32) << 20;
18925 }
18926
18927 /* Dot Product instructions encoding support. */
18928
18929 static void
18930 do_neon_dotproduct (int unsigned_p)
18931 {
18932 enum neon_shape rs;
18933 unsigned scalar_oprd2 = 0;
18934 int high8;
18935
18936 if (inst.cond != COND_ALWAYS)
18937 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
18938 "is UNPREDICTABLE"));
18939
18940 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
18941 _(BAD_FPU));
18942
18943 /* Dot Product instructions are in three-same D/Q register format or the third
18944 operand can be a scalar index register. */
18945 if (inst.operands[2].isscalar)
18946 {
18947 scalar_oprd2 = neon_scalar_for_mul (inst.operands[2].reg, 32);
18948 high8 = 0xfe000000;
18949 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
18950 }
18951 else
18952 {
18953 high8 = 0xfc000000;
18954 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18955 }
18956
18957 if (unsigned_p)
18958 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_U8);
18959 else
18960 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_S8);
18961
18962 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
18963 Product instruction, so we pass 0 as the "ubit" parameter. And the
18964 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
18965 neon_three_same (neon_quad (rs), 0, 32);
18966
18967 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
18968 different NEON three-same encoding. */
18969 inst.instruction &= 0x00ffffff;
18970 inst.instruction |= high8;
18971 /* Encode 'U' bit which indicates signedness. */
18972 inst.instruction |= (unsigned_p ? 1 : 0) << 4;
18973 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
18974 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
18975 the instruction encoding. */
18976 if (inst.operands[2].isscalar)
18977 {
18978 inst.instruction &= 0xffffffd0;
18979 inst.instruction |= LOW4 (scalar_oprd2);
18980 inst.instruction |= HI1 (scalar_oprd2) << 5;
18981 }
18982 }
18983
18984 /* Dot Product instructions for signed integer. */
18985
18986 static void
18987 do_neon_dotproduct_s (void)
18988 {
18989 return do_neon_dotproduct (0);
18990 }
18991
18992 /* Dot Product instructions for unsigned integer. */
18993
18994 static void
18995 do_neon_dotproduct_u (void)
18996 {
18997 return do_neon_dotproduct (1);
18998 }
18999
19000 /* Crypto v1 instructions. */
19001 static void
19002 do_crypto_2op_1 (unsigned elttype, int op)
19003 {
19004 set_pred_insn_type (OUTSIDE_PRED_INSN);
19005
19006 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
19007 == NT_invtype)
19008 return;
19009
19010 inst.error = NULL;
19011
19012 NEON_ENCODE (INTEGER, inst);
19013 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19014 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19015 inst.instruction |= LOW4 (inst.operands[1].reg);
19016 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19017 if (op != -1)
19018 inst.instruction |= op << 6;
19019
19020 if (thumb_mode)
19021 inst.instruction |= 0xfc000000;
19022 else
19023 inst.instruction |= 0xf0000000;
19024 }
19025
19026 static void
19027 do_crypto_3op_1 (int u, int op)
19028 {
19029 set_pred_insn_type (OUTSIDE_PRED_INSN);
19030
19031 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
19032 N_32 | N_UNT | N_KEY).type == NT_invtype)
19033 return;
19034
19035 inst.error = NULL;
19036
19037 NEON_ENCODE (INTEGER, inst);
19038 neon_three_same (1, u, 8 << op);
19039 }
19040
19041 static void
19042 do_aese (void)
19043 {
19044 do_crypto_2op_1 (N_8, 0);
19045 }
19046
19047 static void
19048 do_aesd (void)
19049 {
19050 do_crypto_2op_1 (N_8, 1);
19051 }
19052
19053 static void
19054 do_aesmc (void)
19055 {
19056 do_crypto_2op_1 (N_8, 2);
19057 }
19058
19059 static void
19060 do_aesimc (void)
19061 {
19062 do_crypto_2op_1 (N_8, 3);
19063 }
19064
19065 static void
19066 do_sha1c (void)
19067 {
19068 do_crypto_3op_1 (0, 0);
19069 }
19070
19071 static void
19072 do_sha1p (void)
19073 {
19074 do_crypto_3op_1 (0, 1);
19075 }
19076
19077 static void
19078 do_sha1m (void)
19079 {
19080 do_crypto_3op_1 (0, 2);
19081 }
19082
19083 static void
19084 do_sha1su0 (void)
19085 {
19086 do_crypto_3op_1 (0, 3);
19087 }
19088
19089 static void
19090 do_sha256h (void)
19091 {
19092 do_crypto_3op_1 (1, 0);
19093 }
19094
19095 static void
19096 do_sha256h2 (void)
19097 {
19098 do_crypto_3op_1 (1, 1);
19099 }
19100
19101 static void
19102 do_sha256su1 (void)
19103 {
19104 do_crypto_3op_1 (1, 2);
19105 }
19106
19107 static void
19108 do_sha1h (void)
19109 {
19110 do_crypto_2op_1 (N_32, -1);
19111 }
19112
19113 static void
19114 do_sha1su1 (void)
19115 {
19116 do_crypto_2op_1 (N_32, 0);
19117 }
19118
19119 static void
19120 do_sha256su0 (void)
19121 {
19122 do_crypto_2op_1 (N_32, 1);
19123 }
19124
19125 static void
19126 do_crc32_1 (unsigned int poly, unsigned int sz)
19127 {
19128 unsigned int Rd = inst.operands[0].reg;
19129 unsigned int Rn = inst.operands[1].reg;
19130 unsigned int Rm = inst.operands[2].reg;
19131
19132 set_pred_insn_type (OUTSIDE_PRED_INSN);
19133 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
19134 inst.instruction |= LOW4 (Rn) << 16;
19135 inst.instruction |= LOW4 (Rm);
19136 inst.instruction |= sz << (thumb_mode ? 4 : 21);
19137 inst.instruction |= poly << (thumb_mode ? 20 : 9);
19138
19139 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
19140 as_warn (UNPRED_REG ("r15"));
19141 }
19142
19143 static void
19144 do_crc32b (void)
19145 {
19146 do_crc32_1 (0, 0);
19147 }
19148
19149 static void
19150 do_crc32h (void)
19151 {
19152 do_crc32_1 (0, 1);
19153 }
19154
19155 static void
19156 do_crc32w (void)
19157 {
19158 do_crc32_1 (0, 2);
19159 }
19160
19161 static void
19162 do_crc32cb (void)
19163 {
19164 do_crc32_1 (1, 0);
19165 }
19166
19167 static void
19168 do_crc32ch (void)
19169 {
19170 do_crc32_1 (1, 1);
19171 }
19172
19173 static void
19174 do_crc32cw (void)
19175 {
19176 do_crc32_1 (1, 2);
19177 }
19178
19179 static void
19180 do_vjcvt (void)
19181 {
19182 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
19183 _(BAD_FPU));
19184 neon_check_type (2, NS_FD, N_S32, N_F64);
19185 do_vfp_sp_dp_cvt ();
19186 do_vfp_cond_or_thumb ();
19187 }
19188
19189 \f
19190 /* Overall per-instruction processing. */
19191
19192 /* We need to be able to fix up arbitrary expressions in some statements.
19193 This is so that we can handle symbols that are an arbitrary distance from
19194 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
19195 which returns part of an address in a form which will be valid for
19196 a data instruction. We do this by pushing the expression into a symbol
19197 in the expr_section, and creating a fix for that. */
19198
19199 static void
19200 fix_new_arm (fragS * frag,
19201 int where,
19202 short int size,
19203 expressionS * exp,
19204 int pc_rel,
19205 int reloc)
19206 {
19207 fixS * new_fix;
19208
19209 switch (exp->X_op)
19210 {
19211 case O_constant:
19212 if (pc_rel)
19213 {
19214 /* Create an absolute valued symbol, so we have something to
19215 refer to in the object file. Unfortunately for us, gas's
19216 generic expression parsing will already have folded out
19217 any use of .set foo/.type foo %function that may have
19218 been used to set type information of the target location,
19219 that's being specified symbolically. We have to presume
19220 the user knows what they are doing. */
19221 char name[16 + 8];
19222 symbolS *symbol;
19223
19224 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
19225
19226 symbol = symbol_find_or_make (name);
19227 S_SET_SEGMENT (symbol, absolute_section);
19228 symbol_set_frag (symbol, &zero_address_frag);
19229 S_SET_VALUE (symbol, exp->X_add_number);
19230 exp->X_op = O_symbol;
19231 exp->X_add_symbol = symbol;
19232 exp->X_add_number = 0;
19233 }
19234 /* FALLTHROUGH */
19235 case O_symbol:
19236 case O_add:
19237 case O_subtract:
19238 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
19239 (enum bfd_reloc_code_real) reloc);
19240 break;
19241
19242 default:
19243 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
19244 pc_rel, (enum bfd_reloc_code_real) reloc);
19245 break;
19246 }
19247
19248 /* Mark whether the fix is to a THUMB instruction, or an ARM
19249 instruction. */
19250 new_fix->tc_fix_data = thumb_mode;
19251 }
19252
19253 /* Create a frg for an instruction requiring relaxation. */
19254 static void
19255 output_relax_insn (void)
19256 {
19257 char * to;
19258 symbolS *sym;
19259 int offset;
19260
19261 /* The size of the instruction is unknown, so tie the debug info to the
19262 start of the instruction. */
19263 dwarf2_emit_insn (0);
19264
19265 switch (inst.relocs[0].exp.X_op)
19266 {
19267 case O_symbol:
19268 sym = inst.relocs[0].exp.X_add_symbol;
19269 offset = inst.relocs[0].exp.X_add_number;
19270 break;
19271 case O_constant:
19272 sym = NULL;
19273 offset = inst.relocs[0].exp.X_add_number;
19274 break;
19275 default:
19276 sym = make_expr_symbol (&inst.relocs[0].exp);
19277 offset = 0;
19278 break;
19279 }
19280 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
19281 inst.relax, sym, offset, NULL/*offset, opcode*/);
19282 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
19283 }
19284
19285 /* Write a 32-bit thumb instruction to buf. */
19286 static void
19287 put_thumb32_insn (char * buf, unsigned long insn)
19288 {
19289 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
19290 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
19291 }
19292
19293 static void
19294 output_inst (const char * str)
19295 {
19296 char * to = NULL;
19297
19298 if (inst.error)
19299 {
19300 as_bad ("%s -- `%s'", inst.error, str);
19301 return;
19302 }
19303 if (inst.relax)
19304 {
19305 output_relax_insn ();
19306 return;
19307 }
19308 if (inst.size == 0)
19309 return;
19310
19311 to = frag_more (inst.size);
19312 /* PR 9814: Record the thumb mode into the current frag so that we know
19313 what type of NOP padding to use, if necessary. We override any previous
19314 setting so that if the mode has changed then the NOPS that we use will
19315 match the encoding of the last instruction in the frag. */
19316 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
19317
19318 if (thumb_mode && (inst.size > THUMB_SIZE))
19319 {
19320 gas_assert (inst.size == (2 * THUMB_SIZE));
19321 put_thumb32_insn (to, inst.instruction);
19322 }
19323 else if (inst.size > INSN_SIZE)
19324 {
19325 gas_assert (inst.size == (2 * INSN_SIZE));
19326 md_number_to_chars (to, inst.instruction, INSN_SIZE);
19327 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
19328 }
19329 else
19330 md_number_to_chars (to, inst.instruction, inst.size);
19331
19332 int r;
19333 for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
19334 {
19335 if (inst.relocs[r].type != BFD_RELOC_UNUSED)
19336 fix_new_arm (frag_now, to - frag_now->fr_literal,
19337 inst.size, & inst.relocs[r].exp, inst.relocs[r].pc_rel,
19338 inst.relocs[r].type);
19339 }
19340
19341 dwarf2_emit_insn (inst.size);
19342 }
19343
19344 static char *
19345 output_it_inst (int cond, int mask, char * to)
19346 {
19347 unsigned long instruction = 0xbf00;
19348
19349 mask &= 0xf;
19350 instruction |= mask;
19351 instruction |= cond << 4;
19352
19353 if (to == NULL)
19354 {
19355 to = frag_more (2);
19356 #ifdef OBJ_ELF
19357 dwarf2_emit_insn (2);
19358 #endif
19359 }
19360
19361 md_number_to_chars (to, instruction, 2);
19362
19363 return to;
19364 }
19365
19366 /* Tag values used in struct asm_opcode's tag field. */
19367 enum opcode_tag
19368 {
19369 OT_unconditional, /* Instruction cannot be conditionalized.
19370 The ARM condition field is still 0xE. */
19371 OT_unconditionalF, /* Instruction cannot be conditionalized
19372 and carries 0xF in its ARM condition field. */
19373 OT_csuffix, /* Instruction takes a conditional suffix. */
19374 OT_csuffixF, /* Some forms of the instruction take a scalar
19375 conditional suffix, others place 0xF where the
19376 condition field would be, others take a vector
19377 conditional suffix. */
19378 OT_cinfix3, /* Instruction takes a conditional infix,
19379 beginning at character index 3. (In
19380 unified mode, it becomes a suffix.) */
19381 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
19382 tsts, cmps, cmns, and teqs. */
19383 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
19384 character index 3, even in unified mode. Used for
19385 legacy instructions where suffix and infix forms
19386 may be ambiguous. */
19387 OT_csuf_or_in3, /* Instruction takes either a conditional
19388 suffix or an infix at character index 3. */
19389 OT_odd_infix_unc, /* This is the unconditional variant of an
19390 instruction that takes a conditional infix
19391 at an unusual position. In unified mode,
19392 this variant will accept a suffix. */
19393 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
19394 are the conditional variants of instructions that
19395 take conditional infixes in unusual positions.
19396 The infix appears at character index
19397 (tag - OT_odd_infix_0). These are not accepted
19398 in unified mode. */
19399 };
19400
19401 /* Subroutine of md_assemble, responsible for looking up the primary
19402 opcode from the mnemonic the user wrote. STR points to the
19403 beginning of the mnemonic.
19404
19405 This is not simply a hash table lookup, because of conditional
19406 variants. Most instructions have conditional variants, which are
19407 expressed with a _conditional affix_ to the mnemonic. If we were
19408 to encode each conditional variant as a literal string in the opcode
19409 table, it would have approximately 20,000 entries.
19410
19411 Most mnemonics take this affix as a suffix, and in unified syntax,
19412 'most' is upgraded to 'all'. However, in the divided syntax, some
19413 instructions take the affix as an infix, notably the s-variants of
19414 the arithmetic instructions. Of those instructions, all but six
19415 have the infix appear after the third character of the mnemonic.
19416
19417 Accordingly, the algorithm for looking up primary opcodes given
19418 an identifier is:
19419
19420 1. Look up the identifier in the opcode table.
19421 If we find a match, go to step U.
19422
19423 2. Look up the last two characters of the identifier in the
19424 conditions table. If we find a match, look up the first N-2
19425 characters of the identifier in the opcode table. If we
19426 find a match, go to step CE.
19427
19428 3. Look up the fourth and fifth characters of the identifier in
19429 the conditions table. If we find a match, extract those
19430 characters from the identifier, and look up the remaining
19431 characters in the opcode table. If we find a match, go
19432 to step CM.
19433
19434 4. Fail.
19435
19436 U. Examine the tag field of the opcode structure, in case this is
19437 one of the six instructions with its conditional infix in an
19438 unusual place. If it is, the tag tells us where to find the
19439 infix; look it up in the conditions table and set inst.cond
19440 accordingly. Otherwise, this is an unconditional instruction.
19441 Again set inst.cond accordingly. Return the opcode structure.
19442
19443 CE. Examine the tag field to make sure this is an instruction that
19444 should receive a conditional suffix. If it is not, fail.
19445 Otherwise, set inst.cond from the suffix we already looked up,
19446 and return the opcode structure.
19447
19448 CM. Examine the tag field to make sure this is an instruction that
19449 should receive a conditional infix after the third character.
19450 If it is not, fail. Otherwise, undo the edits to the current
19451 line of input and proceed as for case CE. */
19452
19453 static const struct asm_opcode *
19454 opcode_lookup (char **str)
19455 {
19456 char *end, *base;
19457 char *affix;
19458 const struct asm_opcode *opcode;
19459 const struct asm_cond *cond;
19460 char save[2];
19461
19462 /* Scan up to the end of the mnemonic, which must end in white space,
19463 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
19464 for (base = end = *str; *end != '\0'; end++)
19465 if (*end == ' ' || *end == '.')
19466 break;
19467
19468 if (end == base)
19469 return NULL;
19470
19471 /* Handle a possible width suffix and/or Neon type suffix. */
19472 if (end[0] == '.')
19473 {
19474 int offset = 2;
19475
19476 /* The .w and .n suffixes are only valid if the unified syntax is in
19477 use. */
19478 if (unified_syntax && end[1] == 'w')
19479 inst.size_req = 4;
19480 else if (unified_syntax && end[1] == 'n')
19481 inst.size_req = 2;
19482 else
19483 offset = 0;
19484
19485 inst.vectype.elems = 0;
19486
19487 *str = end + offset;
19488
19489 if (end[offset] == '.')
19490 {
19491 /* See if we have a Neon type suffix (possible in either unified or
19492 non-unified ARM syntax mode). */
19493 if (parse_neon_type (&inst.vectype, str) == FAIL)
19494 return NULL;
19495 }
19496 else if (end[offset] != '\0' && end[offset] != ' ')
19497 return NULL;
19498 }
19499 else
19500 *str = end;
19501
19502 /* Look for unaffixed or special-case affixed mnemonic. */
19503 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
19504 end - base);
19505 if (opcode)
19506 {
19507 /* step U */
19508 if (opcode->tag < OT_odd_infix_0)
19509 {
19510 inst.cond = COND_ALWAYS;
19511 return opcode;
19512 }
19513
19514 if (warn_on_deprecated && unified_syntax)
19515 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
19516 affix = base + (opcode->tag - OT_odd_infix_0);
19517 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
19518 gas_assert (cond);
19519
19520 inst.cond = cond->value;
19521 return opcode;
19522 }
19523 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19524 {
19525 /* Cannot have a conditional suffix on a mnemonic of less than a character.
19526 */
19527 if (end - base < 2)
19528 return NULL;
19529 affix = end - 1;
19530 cond = (const struct asm_cond *) hash_find_n (arm_vcond_hsh, affix, 1);
19531 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
19532 affix - base);
19533 /* If this opcode can not be vector predicated then don't accept it with a
19534 vector predication code. */
19535 if (opcode && !opcode->mayBeVecPred)
19536 opcode = NULL;
19537 }
19538 if (!opcode || !cond)
19539 {
19540 /* Cannot have a conditional suffix on a mnemonic of less than two
19541 characters. */
19542 if (end - base < 3)
19543 return NULL;
19544
19545 /* Look for suffixed mnemonic. */
19546 affix = end - 2;
19547 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
19548 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
19549 affix - base);
19550 }
19551
19552 if (opcode && cond)
19553 {
19554 /* step CE */
19555 switch (opcode->tag)
19556 {
19557 case OT_cinfix3_legacy:
19558 /* Ignore conditional suffixes matched on infix only mnemonics. */
19559 break;
19560
19561 case OT_cinfix3:
19562 case OT_cinfix3_deprecated:
19563 case OT_odd_infix_unc:
19564 if (!unified_syntax)
19565 return NULL;
19566 /* Fall through. */
19567
19568 case OT_csuffix:
19569 case OT_csuffixF:
19570 case OT_csuf_or_in3:
19571 inst.cond = cond->value;
19572 return opcode;
19573
19574 case OT_unconditional:
19575 case OT_unconditionalF:
19576 if (thumb_mode)
19577 inst.cond = cond->value;
19578 else
19579 {
19580 /* Delayed diagnostic. */
19581 inst.error = BAD_COND;
19582 inst.cond = COND_ALWAYS;
19583 }
19584 return opcode;
19585
19586 default:
19587 return NULL;
19588 }
19589 }
19590
19591 /* Cannot have a usual-position infix on a mnemonic of less than
19592 six characters (five would be a suffix). */
19593 if (end - base < 6)
19594 return NULL;
19595
19596 /* Look for infixed mnemonic in the usual position. */
19597 affix = base + 3;
19598 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
19599 if (!cond)
19600 return NULL;
19601
19602 memcpy (save, affix, 2);
19603 memmove (affix, affix + 2, (end - affix) - 2);
19604 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
19605 (end - base) - 2);
19606 memmove (affix + 2, affix, (end - affix) - 2);
19607 memcpy (affix, save, 2);
19608
19609 if (opcode
19610 && (opcode->tag == OT_cinfix3
19611 || opcode->tag == OT_cinfix3_deprecated
19612 || opcode->tag == OT_csuf_or_in3
19613 || opcode->tag == OT_cinfix3_legacy))
19614 {
19615 /* Step CM. */
19616 if (warn_on_deprecated && unified_syntax
19617 && (opcode->tag == OT_cinfix3
19618 || opcode->tag == OT_cinfix3_deprecated))
19619 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
19620
19621 inst.cond = cond->value;
19622 return opcode;
19623 }
19624
19625 return NULL;
19626 }
19627
19628 /* This function generates an initial IT instruction, leaving its block
19629 virtually open for the new instructions. Eventually,
19630 the mask will be updated by now_pred_add_mask () each time
19631 a new instruction needs to be included in the IT block.
19632 Finally, the block is closed with close_automatic_it_block ().
19633 The block closure can be requested either from md_assemble (),
19634 a tencode (), or due to a label hook. */
19635
19636 static void
19637 new_automatic_it_block (int cond)
19638 {
19639 now_pred.state = AUTOMATIC_PRED_BLOCK;
19640 now_pred.mask = 0x18;
19641 now_pred.cc = cond;
19642 now_pred.block_length = 1;
19643 mapping_state (MAP_THUMB);
19644 now_pred.insn = output_it_inst (cond, now_pred.mask, NULL);
19645 now_pred.warn_deprecated = FALSE;
19646 now_pred.insn_cond = TRUE;
19647 }
19648
19649 /* Close an automatic IT block.
19650 See comments in new_automatic_it_block (). */
19651
19652 static void
19653 close_automatic_it_block (void)
19654 {
19655 now_pred.mask = 0x10;
19656 now_pred.block_length = 0;
19657 }
19658
19659 /* Update the mask of the current automatically-generated IT
19660 instruction. See comments in new_automatic_it_block (). */
19661
19662 static void
19663 now_pred_add_mask (int cond)
19664 {
19665 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
19666 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
19667 | ((bitvalue) << (nbit)))
19668 const int resulting_bit = (cond & 1);
19669
19670 now_pred.mask &= 0xf;
19671 now_pred.mask = SET_BIT_VALUE (now_pred.mask,
19672 resulting_bit,
19673 (5 - now_pred.block_length));
19674 now_pred.mask = SET_BIT_VALUE (now_pred.mask,
19675 1,
19676 ((5 - now_pred.block_length) - 1));
19677 output_it_inst (now_pred.cc, now_pred.mask, now_pred.insn);
19678
19679 #undef CLEAR_BIT
19680 #undef SET_BIT_VALUE
19681 }
19682
19683 /* The IT blocks handling machinery is accessed through the these functions:
19684 it_fsm_pre_encode () from md_assemble ()
19685 set_pred_insn_type () optional, from the tencode functions
19686 set_pred_insn_type_last () ditto
19687 in_pred_block () ditto
19688 it_fsm_post_encode () from md_assemble ()
19689 force_automatic_it_block_close () from label handling functions
19690
19691 Rationale:
19692 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
19693 initializing the IT insn type with a generic initial value depending
19694 on the inst.condition.
19695 2) During the tencode function, two things may happen:
19696 a) The tencode function overrides the IT insn type by
19697 calling either set_pred_insn_type (type) or
19698 set_pred_insn_type_last ().
19699 b) The tencode function queries the IT block state by
19700 calling in_pred_block () (i.e. to determine narrow/not narrow mode).
19701
19702 Both set_pred_insn_type and in_pred_block run the internal FSM state
19703 handling function (handle_pred_state), because: a) setting the IT insn
19704 type may incur in an invalid state (exiting the function),
19705 and b) querying the state requires the FSM to be updated.
19706 Specifically we want to avoid creating an IT block for conditional
19707 branches, so it_fsm_pre_encode is actually a guess and we can't
19708 determine whether an IT block is required until the tencode () routine
19709 has decided what type of instruction this actually it.
19710 Because of this, if set_pred_insn_type and in_pred_block have to be
19711 used, set_pred_insn_type has to be called first.
19712
19713 set_pred_insn_type_last () is a wrapper of set_pred_insn_type (type),
19714 that determines the insn IT type depending on the inst.cond code.
19715 When a tencode () routine encodes an instruction that can be
19716 either outside an IT block, or, in the case of being inside, has to be
19717 the last one, set_pred_insn_type_last () will determine the proper
19718 IT instruction type based on the inst.cond code. Otherwise,
19719 set_pred_insn_type can be called for overriding that logic or
19720 for covering other cases.
19721
19722 Calling handle_pred_state () may not transition the IT block state to
19723 OUTSIDE_PRED_BLOCK immediately, since the (current) state could be
19724 still queried. Instead, if the FSM determines that the state should
19725 be transitioned to OUTSIDE_PRED_BLOCK, a flag is marked to be closed
19726 after the tencode () function: that's what it_fsm_post_encode () does.
19727
19728 Since in_pred_block () calls the state handling function to get an
19729 updated state, an error may occur (due to invalid insns combination).
19730 In that case, inst.error is set.
19731 Therefore, inst.error has to be checked after the execution of
19732 the tencode () routine.
19733
19734 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
19735 any pending state change (if any) that didn't take place in
19736 handle_pred_state () as explained above. */
19737
19738 static void
19739 it_fsm_pre_encode (void)
19740 {
19741 if (inst.cond != COND_ALWAYS)
19742 inst.pred_insn_type = INSIDE_IT_INSN;
19743 else
19744 inst.pred_insn_type = OUTSIDE_PRED_INSN;
19745
19746 now_pred.state_handled = 0;
19747 }
19748
19749 /* IT state FSM handling function. */
19750 /* MVE instructions and non-MVE instructions are handled differently because of
19751 the introduction of VPT blocks.
19752 Specifications say that any non-MVE instruction inside a VPT block is
19753 UNPREDICTABLE, with the exception of the BKPT instruction. Whereas most MVE
19754 instructions are deemed to be UNPREDICTABLE if inside an IT block. For the
19755 few exceptions we have MVE_UNPREDICABLE_INSN.
19756 The error messages provided depending on the different combinations possible
19757 are described in the cases below:
19758 For 'most' MVE instructions:
19759 1) In an IT block, with an IT code: syntax error
19760 2) In an IT block, with a VPT code: error: must be in a VPT block
19761 3) In an IT block, with no code: warning: UNPREDICTABLE
19762 4) In a VPT block, with an IT code: syntax error
19763 5) In a VPT block, with a VPT code: OK!
19764 6) In a VPT block, with no code: error: missing code
19765 7) Outside a pred block, with an IT code: error: syntax error
19766 8) Outside a pred block, with a VPT code: error: should be in a VPT block
19767 9) Outside a pred block, with no code: OK!
19768 For non-MVE instructions:
19769 10) In an IT block, with an IT code: OK!
19770 11) In an IT block, with a VPT code: syntax error
19771 12) In an IT block, with no code: error: missing code
19772 13) In a VPT block, with an IT code: error: should be in an IT block
19773 14) In a VPT block, with a VPT code: syntax error
19774 15) In a VPT block, with no code: UNPREDICTABLE
19775 16) Outside a pred block, with an IT code: error: should be in an IT block
19776 17) Outside a pred block, with a VPT code: syntax error
19777 18) Outside a pred block, with no code: OK!
19778 */
19779
19780
19781 static int
19782 handle_pred_state (void)
19783 {
19784 now_pred.state_handled = 1;
19785 now_pred.insn_cond = FALSE;
19786
19787 switch (now_pred.state)
19788 {
19789 case OUTSIDE_PRED_BLOCK:
19790 switch (inst.pred_insn_type)
19791 {
19792 case MVE_UNPREDICABLE_INSN:
19793 case MVE_OUTSIDE_PRED_INSN:
19794 if (inst.cond < COND_ALWAYS)
19795 {
19796 /* Case 7: Outside a pred block, with an IT code: error: syntax
19797 error. */
19798 inst.error = BAD_SYNTAX;
19799 return FAIL;
19800 }
19801 /* Case 9: Outside a pred block, with no code: OK! */
19802 break;
19803 case OUTSIDE_PRED_INSN:
19804 if (inst.cond > COND_ALWAYS)
19805 {
19806 /* Case 17: Outside a pred block, with a VPT code: syntax error.
19807 */
19808 inst.error = BAD_SYNTAX;
19809 return FAIL;
19810 }
19811 /* Case 18: Outside a pred block, with no code: OK! */
19812 break;
19813
19814 case INSIDE_VPT_INSN:
19815 /* Case 8: Outside a pred block, with a VPT code: error: should be in
19816 a VPT block. */
19817 inst.error = BAD_OUT_VPT;
19818 return FAIL;
19819
19820 case INSIDE_IT_INSN:
19821 case INSIDE_IT_LAST_INSN:
19822 if (inst.cond < COND_ALWAYS)
19823 {
19824 /* Case 16: Outside a pred block, with an IT code: error: should
19825 be in an IT block. */
19826 if (thumb_mode == 0)
19827 {
19828 if (unified_syntax
19829 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
19830 as_tsktsk (_("Warning: conditional outside an IT block"\
19831 " for Thumb."));
19832 }
19833 else
19834 {
19835 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
19836 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
19837 {
19838 /* Automatically generate the IT instruction. */
19839 new_automatic_it_block (inst.cond);
19840 if (inst.pred_insn_type == INSIDE_IT_LAST_INSN)
19841 close_automatic_it_block ();
19842 }
19843 else
19844 {
19845 inst.error = BAD_OUT_IT;
19846 return FAIL;
19847 }
19848 }
19849 break;
19850 }
19851 else if (inst.cond > COND_ALWAYS)
19852 {
19853 /* Case 17: Outside a pred block, with a VPT code: syntax error.
19854 */
19855 inst.error = BAD_SYNTAX;
19856 return FAIL;
19857 }
19858 else
19859 gas_assert (0);
19860 case IF_INSIDE_IT_LAST_INSN:
19861 case NEUTRAL_IT_INSN:
19862 break;
19863
19864 case VPT_INSN:
19865 if (inst.cond != COND_ALWAYS)
19866 first_error (BAD_SYNTAX);
19867 now_pred.state = MANUAL_PRED_BLOCK;
19868 now_pred.block_length = 0;
19869 now_pred.type = VECTOR_PRED;
19870 now_pred.cc = 0;
19871 break;
19872 case IT_INSN:
19873 now_pred.state = MANUAL_PRED_BLOCK;
19874 now_pred.block_length = 0;
19875 now_pred.type = SCALAR_PRED;
19876 break;
19877 }
19878 break;
19879
19880 case AUTOMATIC_PRED_BLOCK:
19881 /* Three things may happen now:
19882 a) We should increment current it block size;
19883 b) We should close current it block (closing insn or 4 insns);
19884 c) We should close current it block and start a new one (due
19885 to incompatible conditions or
19886 4 insns-length block reached). */
19887
19888 switch (inst.pred_insn_type)
19889 {
19890 case INSIDE_VPT_INSN:
19891 case VPT_INSN:
19892 case MVE_UNPREDICABLE_INSN:
19893 case MVE_OUTSIDE_PRED_INSN:
19894 gas_assert (0);
19895 case OUTSIDE_PRED_INSN:
19896 /* The closure of the block shall happen immediately,
19897 so any in_pred_block () call reports the block as closed. */
19898 force_automatic_it_block_close ();
19899 break;
19900
19901 case INSIDE_IT_INSN:
19902 case INSIDE_IT_LAST_INSN:
19903 case IF_INSIDE_IT_LAST_INSN:
19904 now_pred.block_length++;
19905
19906 if (now_pred.block_length > 4
19907 || !now_pred_compatible (inst.cond))
19908 {
19909 force_automatic_it_block_close ();
19910 if (inst.pred_insn_type != IF_INSIDE_IT_LAST_INSN)
19911 new_automatic_it_block (inst.cond);
19912 }
19913 else
19914 {
19915 now_pred.insn_cond = TRUE;
19916 now_pred_add_mask (inst.cond);
19917 }
19918
19919 if (now_pred.state == AUTOMATIC_PRED_BLOCK
19920 && (inst.pred_insn_type == INSIDE_IT_LAST_INSN
19921 || inst.pred_insn_type == IF_INSIDE_IT_LAST_INSN))
19922 close_automatic_it_block ();
19923 break;
19924
19925 case NEUTRAL_IT_INSN:
19926 now_pred.block_length++;
19927 now_pred.insn_cond = TRUE;
19928
19929 if (now_pred.block_length > 4)
19930 force_automatic_it_block_close ();
19931 else
19932 now_pred_add_mask (now_pred.cc & 1);
19933 break;
19934
19935 case IT_INSN:
19936 close_automatic_it_block ();
19937 now_pred.state = MANUAL_PRED_BLOCK;
19938 break;
19939 }
19940 break;
19941
19942 case MANUAL_PRED_BLOCK:
19943 {
19944 int cond, is_last;
19945 if (now_pred.type == SCALAR_PRED)
19946 {
19947 /* Check conditional suffixes. */
19948 cond = now_pred.cc ^ ((now_pred.mask >> 4) & 1) ^ 1;
19949 now_pred.mask <<= 1;
19950 now_pred.mask &= 0x1f;
19951 is_last = (now_pred.mask == 0x10);
19952 }
19953 else
19954 {
19955 now_pred.cc ^= (now_pred.mask >> 4);
19956 cond = now_pred.cc + 0xf;
19957 now_pred.mask <<= 1;
19958 now_pred.mask &= 0x1f;
19959 is_last = now_pred.mask == 0x10;
19960 }
19961 now_pred.insn_cond = TRUE;
19962
19963 switch (inst.pred_insn_type)
19964 {
19965 case OUTSIDE_PRED_INSN:
19966 if (now_pred.type == SCALAR_PRED)
19967 {
19968 if (inst.cond == COND_ALWAYS)
19969 {
19970 /* Case 12: In an IT block, with no code: error: missing
19971 code. */
19972 inst.error = BAD_NOT_IT;
19973 return FAIL;
19974 }
19975 else if (inst.cond > COND_ALWAYS)
19976 {
19977 /* Case 11: In an IT block, with a VPT code: syntax error.
19978 */
19979 inst.error = BAD_SYNTAX;
19980 return FAIL;
19981 }
19982 else if (thumb_mode)
19983 {
19984 /* This is for some special cases where a non-MVE
19985 instruction is not allowed in an IT block, such as cbz,
19986 but are put into one with a condition code.
19987 You could argue this should be a syntax error, but we
19988 gave the 'not allowed in IT block' diagnostic in the
19989 past so we will keep doing so. */
19990 inst.error = BAD_NOT_IT;
19991 return FAIL;
19992 }
19993 break;
19994 }
19995 else
19996 {
19997 /* Case 15: In a VPT block, with no code: UNPREDICTABLE. */
19998 as_tsktsk (MVE_NOT_VPT);
19999 return SUCCESS;
20000 }
20001 case MVE_OUTSIDE_PRED_INSN:
20002 if (now_pred.type == SCALAR_PRED)
20003 {
20004 if (inst.cond == COND_ALWAYS)
20005 {
20006 /* Case 3: In an IT block, with no code: warning:
20007 UNPREDICTABLE. */
20008 as_tsktsk (MVE_NOT_IT);
20009 return SUCCESS;
20010 }
20011 else if (inst.cond < COND_ALWAYS)
20012 {
20013 /* Case 1: In an IT block, with an IT code: syntax error.
20014 */
20015 inst.error = BAD_SYNTAX;
20016 return FAIL;
20017 }
20018 else
20019 gas_assert (0);
20020 }
20021 else
20022 {
20023 if (inst.cond < COND_ALWAYS)
20024 {
20025 /* Case 4: In a VPT block, with an IT code: syntax error.
20026 */
20027 inst.error = BAD_SYNTAX;
20028 return FAIL;
20029 }
20030 else if (inst.cond == COND_ALWAYS)
20031 {
20032 /* Case 6: In a VPT block, with no code: error: missing
20033 code. */
20034 inst.error = BAD_NOT_VPT;
20035 return FAIL;
20036 }
20037 else
20038 {
20039 gas_assert (0);
20040 }
20041 }
20042 case MVE_UNPREDICABLE_INSN:
20043 as_tsktsk (now_pred.type == SCALAR_PRED ? MVE_NOT_IT : MVE_NOT_VPT);
20044 return SUCCESS;
20045 case INSIDE_IT_INSN:
20046 if (inst.cond > COND_ALWAYS)
20047 {
20048 /* Case 11: In an IT block, with a VPT code: syntax error. */
20049 /* Case 14: In a VPT block, with a VPT code: syntax error. */
20050 inst.error = BAD_SYNTAX;
20051 return FAIL;
20052 }
20053 else if (now_pred.type == SCALAR_PRED)
20054 {
20055 /* Case 10: In an IT block, with an IT code: OK! */
20056 if (cond != inst.cond)
20057 {
20058 inst.error = now_pred.type == SCALAR_PRED ? BAD_IT_COND :
20059 BAD_VPT_COND;
20060 return FAIL;
20061 }
20062 }
20063 else
20064 {
20065 /* Case 13: In a VPT block, with an IT code: error: should be
20066 in an IT block. */
20067 inst.error = BAD_OUT_IT;
20068 return FAIL;
20069 }
20070 break;
20071
20072 case INSIDE_VPT_INSN:
20073 if (now_pred.type == SCALAR_PRED)
20074 {
20075 /* Case 2: In an IT block, with a VPT code: error: must be in a
20076 VPT block. */
20077 inst.error = BAD_OUT_VPT;
20078 return FAIL;
20079 }
20080 /* Case 5: In a VPT block, with a VPT code: OK! */
20081 else if (cond != inst.cond)
20082 {
20083 inst.error = BAD_VPT_COND;
20084 return FAIL;
20085 }
20086 break;
20087 case INSIDE_IT_LAST_INSN:
20088 case IF_INSIDE_IT_LAST_INSN:
20089 if (now_pred.type == VECTOR_PRED || inst.cond > COND_ALWAYS)
20090 {
20091 /* Case 4: In a VPT block, with an IT code: syntax error. */
20092 /* Case 11: In an IT block, with a VPT code: syntax error. */
20093 inst.error = BAD_SYNTAX;
20094 return FAIL;
20095 }
20096 else if (cond != inst.cond)
20097 {
20098 inst.error = BAD_IT_COND;
20099 return FAIL;
20100 }
20101 if (!is_last)
20102 {
20103 inst.error = BAD_BRANCH;
20104 return FAIL;
20105 }
20106 break;
20107
20108 case NEUTRAL_IT_INSN:
20109 /* The BKPT instruction is unconditional even in a IT or VPT
20110 block. */
20111 break;
20112
20113 case IT_INSN:
20114 if (now_pred.type == SCALAR_PRED)
20115 {
20116 inst.error = BAD_IT_IT;
20117 return FAIL;
20118 }
20119 /* fall through. */
20120 case VPT_INSN:
20121 if (inst.cond == COND_ALWAYS)
20122 {
20123 /* Executing a VPT/VPST instruction inside an IT block or a
20124 VPT/VPST/IT instruction inside a VPT block is UNPREDICTABLE.
20125 */
20126 if (now_pred.type == SCALAR_PRED)
20127 as_tsktsk (MVE_NOT_IT);
20128 else
20129 as_tsktsk (MVE_NOT_VPT);
20130 return SUCCESS;
20131 }
20132 else
20133 {
20134 /* VPT/VPST do not accept condition codes. */
20135 inst.error = BAD_SYNTAX;
20136 return FAIL;
20137 }
20138 }
20139 }
20140 break;
20141 }
20142
20143 return SUCCESS;
20144 }
20145
20146 struct depr_insn_mask
20147 {
20148 unsigned long pattern;
20149 unsigned long mask;
20150 const char* description;
20151 };
20152
20153 /* List of 16-bit instruction patterns deprecated in an IT block in
20154 ARMv8. */
20155 static const struct depr_insn_mask depr_it_insns[] = {
20156 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
20157 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
20158 { 0xa000, 0xb800, N_("ADR") },
20159 { 0x4800, 0xf800, N_("Literal loads") },
20160 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
20161 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
20162 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
20163 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
20164 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
20165 { 0, 0, NULL }
20166 };
20167
20168 static void
20169 it_fsm_post_encode (void)
20170 {
20171 int is_last;
20172
20173 if (!now_pred.state_handled)
20174 handle_pred_state ();
20175
20176 if (now_pred.insn_cond
20177 && !now_pred.warn_deprecated
20178 && warn_on_deprecated
20179 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)
20180 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m))
20181 {
20182 if (inst.instruction >= 0x10000)
20183 {
20184 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
20185 "performance deprecated in ARMv8-A and ARMv8-R"));
20186 now_pred.warn_deprecated = TRUE;
20187 }
20188 else
20189 {
20190 const struct depr_insn_mask *p = depr_it_insns;
20191
20192 while (p->mask != 0)
20193 {
20194 if ((inst.instruction & p->mask) == p->pattern)
20195 {
20196 as_tsktsk (_("IT blocks containing 16-bit Thumb "
20197 "instructions of the following class are "
20198 "performance deprecated in ARMv8-A and "
20199 "ARMv8-R: %s"), p->description);
20200 now_pred.warn_deprecated = TRUE;
20201 break;
20202 }
20203
20204 ++p;
20205 }
20206 }
20207
20208 if (now_pred.block_length > 1)
20209 {
20210 as_tsktsk (_("IT blocks containing more than one conditional "
20211 "instruction are performance deprecated in ARMv8-A and "
20212 "ARMv8-R"));
20213 now_pred.warn_deprecated = TRUE;
20214 }
20215 }
20216
20217 is_last = (now_pred.mask == 0x10);
20218 if (is_last)
20219 {
20220 now_pred.state = OUTSIDE_PRED_BLOCK;
20221 now_pred.mask = 0;
20222 }
20223 }
20224
20225 static void
20226 force_automatic_it_block_close (void)
20227 {
20228 if (now_pred.state == AUTOMATIC_PRED_BLOCK)
20229 {
20230 close_automatic_it_block ();
20231 now_pred.state = OUTSIDE_PRED_BLOCK;
20232 now_pred.mask = 0;
20233 }
20234 }
20235
20236 static int
20237 in_pred_block (void)
20238 {
20239 if (!now_pred.state_handled)
20240 handle_pred_state ();
20241
20242 return now_pred.state != OUTSIDE_PRED_BLOCK;
20243 }
20244
20245 /* Whether OPCODE only has T32 encoding. Since this function is only used by
20246 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
20247 here, hence the "known" in the function name. */
20248
20249 static bfd_boolean
20250 known_t32_only_insn (const struct asm_opcode *opcode)
20251 {
20252 /* Original Thumb-1 wide instruction. */
20253 if (opcode->tencode == do_t_blx
20254 || opcode->tencode == do_t_branch23
20255 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
20256 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
20257 return TRUE;
20258
20259 /* Wide-only instruction added to ARMv8-M Baseline. */
20260 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
20261 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
20262 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
20263 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
20264 return TRUE;
20265
20266 return FALSE;
20267 }
20268
20269 /* Whether wide instruction variant can be used if available for a valid OPCODE
20270 in ARCH. */
20271
20272 static bfd_boolean
20273 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
20274 {
20275 if (known_t32_only_insn (opcode))
20276 return TRUE;
20277
20278 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
20279 of variant T3 of B.W is checked in do_t_branch. */
20280 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
20281 && opcode->tencode == do_t_branch)
20282 return TRUE;
20283
20284 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
20285 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
20286 && opcode->tencode == do_t_mov_cmp
20287 /* Make sure CMP instruction is not affected. */
20288 && opcode->aencode == do_mov)
20289 return TRUE;
20290
20291 /* Wide instruction variants of all instructions with narrow *and* wide
20292 variants become available with ARMv6t2. Other opcodes are either
20293 narrow-only or wide-only and are thus available if OPCODE is valid. */
20294 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
20295 return TRUE;
20296
20297 /* OPCODE with narrow only instruction variant or wide variant not
20298 available. */
20299 return FALSE;
20300 }
20301
20302 void
20303 md_assemble (char *str)
20304 {
20305 char *p = str;
20306 const struct asm_opcode * opcode;
20307
20308 /* Align the previous label if needed. */
20309 if (last_label_seen != NULL)
20310 {
20311 symbol_set_frag (last_label_seen, frag_now);
20312 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
20313 S_SET_SEGMENT (last_label_seen, now_seg);
20314 }
20315
20316 memset (&inst, '\0', sizeof (inst));
20317 int r;
20318 for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
20319 inst.relocs[r].type = BFD_RELOC_UNUSED;
20320
20321 opcode = opcode_lookup (&p);
20322 if (!opcode)
20323 {
20324 /* It wasn't an instruction, but it might be a register alias of
20325 the form alias .req reg, or a Neon .dn/.qn directive. */
20326 if (! create_register_alias (str, p)
20327 && ! create_neon_reg_alias (str, p))
20328 as_bad (_("bad instruction `%s'"), str);
20329
20330 return;
20331 }
20332
20333 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
20334 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
20335
20336 /* The value which unconditional instructions should have in place of the
20337 condition field. */
20338 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
20339
20340 if (thumb_mode)
20341 {
20342 arm_feature_set variant;
20343
20344 variant = cpu_variant;
20345 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
20346 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
20347 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
20348 /* Check that this instruction is supported for this CPU. */
20349 if (!opcode->tvariant
20350 || (thumb_mode == 1
20351 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
20352 {
20353 if (opcode->tencode == do_t_swi)
20354 as_bad (_("SVC is not permitted on this architecture"));
20355 else
20356 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
20357 return;
20358 }
20359 if (inst.cond != COND_ALWAYS && !unified_syntax
20360 && opcode->tencode != do_t_branch)
20361 {
20362 as_bad (_("Thumb does not support conditional execution"));
20363 return;
20364 }
20365
20366 /* Two things are addressed here:
20367 1) Implicit require narrow instructions on Thumb-1.
20368 This avoids relaxation accidentally introducing Thumb-2
20369 instructions.
20370 2) Reject wide instructions in non Thumb-2 cores.
20371
20372 Only instructions with narrow and wide variants need to be handled
20373 but selecting all non wide-only instructions is easier. */
20374 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
20375 && !t32_insn_ok (variant, opcode))
20376 {
20377 if (inst.size_req == 0)
20378 inst.size_req = 2;
20379 else if (inst.size_req == 4)
20380 {
20381 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
20382 as_bad (_("selected processor does not support 32bit wide "
20383 "variant of instruction `%s'"), str);
20384 else
20385 as_bad (_("selected processor does not support `%s' in "
20386 "Thumb-2 mode"), str);
20387 return;
20388 }
20389 }
20390
20391 inst.instruction = opcode->tvalue;
20392
20393 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
20394 {
20395 /* Prepare the pred_insn_type for those encodings that don't set
20396 it. */
20397 it_fsm_pre_encode ();
20398
20399 opcode->tencode ();
20400
20401 it_fsm_post_encode ();
20402 }
20403
20404 if (!(inst.error || inst.relax))
20405 {
20406 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
20407 inst.size = (inst.instruction > 0xffff ? 4 : 2);
20408 if (inst.size_req && inst.size_req != inst.size)
20409 {
20410 as_bad (_("cannot honor width suffix -- `%s'"), str);
20411 return;
20412 }
20413 }
20414
20415 /* Something has gone badly wrong if we try to relax a fixed size
20416 instruction. */
20417 gas_assert (inst.size_req == 0 || !inst.relax);
20418
20419 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
20420 *opcode->tvariant);
20421 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
20422 set those bits when Thumb-2 32-bit instructions are seen. The impact
20423 of relaxable instructions will be considered later after we finish all
20424 relaxation. */
20425 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
20426 variant = arm_arch_none;
20427 else
20428 variant = cpu_variant;
20429 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
20430 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
20431 arm_ext_v6t2);
20432
20433 check_neon_suffixes;
20434
20435 if (!inst.error)
20436 {
20437 mapping_state (MAP_THUMB);
20438 }
20439 }
20440 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
20441 {
20442 bfd_boolean is_bx;
20443
20444 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
20445 is_bx = (opcode->aencode == do_bx);
20446
20447 /* Check that this instruction is supported for this CPU. */
20448 if (!(is_bx && fix_v4bx)
20449 && !(opcode->avariant &&
20450 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
20451 {
20452 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
20453 return;
20454 }
20455 if (inst.size_req)
20456 {
20457 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
20458 return;
20459 }
20460
20461 inst.instruction = opcode->avalue;
20462 if (opcode->tag == OT_unconditionalF)
20463 inst.instruction |= 0xFU << 28;
20464 else
20465 inst.instruction |= inst.cond << 28;
20466 inst.size = INSN_SIZE;
20467 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
20468 {
20469 it_fsm_pre_encode ();
20470 opcode->aencode ();
20471 it_fsm_post_encode ();
20472 }
20473 /* Arm mode bx is marked as both v4T and v5 because it's still required
20474 on a hypothetical non-thumb v5 core. */
20475 if (is_bx)
20476 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
20477 else
20478 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
20479 *opcode->avariant);
20480
20481 check_neon_suffixes;
20482
20483 if (!inst.error)
20484 {
20485 mapping_state (MAP_ARM);
20486 }
20487 }
20488 else
20489 {
20490 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
20491 "-- `%s'"), str);
20492 return;
20493 }
20494 output_inst (str);
20495 }
20496
20497 static void
20498 check_pred_blocks_finished (void)
20499 {
20500 #ifdef OBJ_ELF
20501 asection *sect;
20502
20503 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
20504 if (seg_info (sect)->tc_segment_info_data.current_pred.state
20505 == MANUAL_PRED_BLOCK)
20506 {
20507 if (now_pred.type == SCALAR_PRED)
20508 as_warn (_("section '%s' finished with an open IT block."),
20509 sect->name);
20510 else
20511 as_warn (_("section '%s' finished with an open VPT/VPST block."),
20512 sect->name);
20513 }
20514 #else
20515 if (now_pred.state == MANUAL_PRED_BLOCK)
20516 {
20517 if (now_pred.type == SCALAR_PRED)
20518 as_warn (_("file finished with an open IT block."));
20519 else
20520 as_warn (_("file finished with an open VPT/VPST block."));
20521 }
20522 #endif
20523 }
20524
20525 /* Various frobbings of labels and their addresses. */
20526
20527 void
20528 arm_start_line_hook (void)
20529 {
20530 last_label_seen = NULL;
20531 }
20532
20533 void
20534 arm_frob_label (symbolS * sym)
20535 {
20536 last_label_seen = sym;
20537
20538 ARM_SET_THUMB (sym, thumb_mode);
20539
20540 #if defined OBJ_COFF || defined OBJ_ELF
20541 ARM_SET_INTERWORK (sym, support_interwork);
20542 #endif
20543
20544 force_automatic_it_block_close ();
20545
20546 /* Note - do not allow local symbols (.Lxxx) to be labelled
20547 as Thumb functions. This is because these labels, whilst
20548 they exist inside Thumb code, are not the entry points for
20549 possible ARM->Thumb calls. Also, these labels can be used
20550 as part of a computed goto or switch statement. eg gcc
20551 can generate code that looks like this:
20552
20553 ldr r2, [pc, .Laaa]
20554 lsl r3, r3, #2
20555 ldr r2, [r3, r2]
20556 mov pc, r2
20557
20558 .Lbbb: .word .Lxxx
20559 .Lccc: .word .Lyyy
20560 ..etc...
20561 .Laaa: .word Lbbb
20562
20563 The first instruction loads the address of the jump table.
20564 The second instruction converts a table index into a byte offset.
20565 The third instruction gets the jump address out of the table.
20566 The fourth instruction performs the jump.
20567
20568 If the address stored at .Laaa is that of a symbol which has the
20569 Thumb_Func bit set, then the linker will arrange for this address
20570 to have the bottom bit set, which in turn would mean that the
20571 address computation performed by the third instruction would end
20572 up with the bottom bit set. Since the ARM is capable of unaligned
20573 word loads, the instruction would then load the incorrect address
20574 out of the jump table, and chaos would ensue. */
20575 if (label_is_thumb_function_name
20576 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
20577 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
20578 {
20579 /* When the address of a Thumb function is taken the bottom
20580 bit of that address should be set. This will allow
20581 interworking between Arm and Thumb functions to work
20582 correctly. */
20583
20584 THUMB_SET_FUNC (sym, 1);
20585
20586 label_is_thumb_function_name = FALSE;
20587 }
20588
20589 dwarf2_emit_label (sym);
20590 }
20591
20592 bfd_boolean
20593 arm_data_in_code (void)
20594 {
20595 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
20596 {
20597 *input_line_pointer = '/';
20598 input_line_pointer += 5;
20599 *input_line_pointer = 0;
20600 return TRUE;
20601 }
20602
20603 return FALSE;
20604 }
20605
20606 char *
20607 arm_canonicalize_symbol_name (char * name)
20608 {
20609 int len;
20610
20611 if (thumb_mode && (len = strlen (name)) > 5
20612 && streq (name + len - 5, "/data"))
20613 *(name + len - 5) = 0;
20614
20615 return name;
20616 }
20617 \f
20618 /* Table of all register names defined by default. The user can
20619 define additional names with .req. Note that all register names
20620 should appear in both upper and lowercase variants. Some registers
20621 also have mixed-case names. */
20622
20623 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
20624 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
20625 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
20626 #define REGSET(p,t) \
20627 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
20628 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
20629 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
20630 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
20631 #define REGSETH(p,t) \
20632 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
20633 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
20634 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
20635 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
20636 #define REGSET2(p,t) \
20637 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
20638 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
20639 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
20640 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
20641 #define SPLRBANK(base,bank,t) \
20642 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
20643 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
20644 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
20645 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
20646 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
20647 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
20648
20649 static const struct reg_entry reg_names[] =
20650 {
20651 /* ARM integer registers. */
20652 REGSET(r, RN), REGSET(R, RN),
20653
20654 /* ATPCS synonyms. */
20655 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
20656 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
20657 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
20658
20659 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
20660 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
20661 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
20662
20663 /* Well-known aliases. */
20664 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
20665 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
20666
20667 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
20668 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
20669
20670 /* Coprocessor numbers. */
20671 REGSET(p, CP), REGSET(P, CP),
20672
20673 /* Coprocessor register numbers. The "cr" variants are for backward
20674 compatibility. */
20675 REGSET(c, CN), REGSET(C, CN),
20676 REGSET(cr, CN), REGSET(CR, CN),
20677
20678 /* ARM banked registers. */
20679 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
20680 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
20681 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
20682 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
20683 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
20684 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
20685 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
20686
20687 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
20688 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
20689 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
20690 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
20691 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
20692 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
20693 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
20694 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
20695
20696 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
20697 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
20698 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
20699 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
20700 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
20701 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
20702 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
20703 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
20704 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
20705
20706 /* FPA registers. */
20707 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
20708 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
20709
20710 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
20711 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
20712
20713 /* VFP SP registers. */
20714 REGSET(s,VFS), REGSET(S,VFS),
20715 REGSETH(s,VFS), REGSETH(S,VFS),
20716
20717 /* VFP DP Registers. */
20718 REGSET(d,VFD), REGSET(D,VFD),
20719 /* Extra Neon DP registers. */
20720 REGSETH(d,VFD), REGSETH(D,VFD),
20721
20722 /* Neon QP registers. */
20723 REGSET2(q,NQ), REGSET2(Q,NQ),
20724
20725 /* VFP control registers. */
20726 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
20727 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
20728 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
20729 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
20730 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
20731 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
20732 REGDEF(mvfr2,5,VFC), REGDEF(MVFR2,5,VFC),
20733
20734 /* Maverick DSP coprocessor registers. */
20735 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
20736 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
20737
20738 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
20739 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
20740 REGDEF(dspsc,0,DSPSC),
20741
20742 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
20743 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
20744 REGDEF(DSPSC,0,DSPSC),
20745
20746 /* iWMMXt data registers - p0, c0-15. */
20747 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
20748
20749 /* iWMMXt control registers - p1, c0-3. */
20750 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
20751 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
20752 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
20753 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
20754
20755 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
20756 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
20757 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
20758 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
20759 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
20760
20761 /* XScale accumulator registers. */
20762 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
20763 };
20764 #undef REGDEF
20765 #undef REGNUM
20766 #undef REGSET
20767
20768 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
20769 within psr_required_here. */
20770 static const struct asm_psr psrs[] =
20771 {
20772 /* Backward compatibility notation. Note that "all" is no longer
20773 truly all possible PSR bits. */
20774 {"all", PSR_c | PSR_f},
20775 {"flg", PSR_f},
20776 {"ctl", PSR_c},
20777
20778 /* Individual flags. */
20779 {"f", PSR_f},
20780 {"c", PSR_c},
20781 {"x", PSR_x},
20782 {"s", PSR_s},
20783
20784 /* Combinations of flags. */
20785 {"fs", PSR_f | PSR_s},
20786 {"fx", PSR_f | PSR_x},
20787 {"fc", PSR_f | PSR_c},
20788 {"sf", PSR_s | PSR_f},
20789 {"sx", PSR_s | PSR_x},
20790 {"sc", PSR_s | PSR_c},
20791 {"xf", PSR_x | PSR_f},
20792 {"xs", PSR_x | PSR_s},
20793 {"xc", PSR_x | PSR_c},
20794 {"cf", PSR_c | PSR_f},
20795 {"cs", PSR_c | PSR_s},
20796 {"cx", PSR_c | PSR_x},
20797 {"fsx", PSR_f | PSR_s | PSR_x},
20798 {"fsc", PSR_f | PSR_s | PSR_c},
20799 {"fxs", PSR_f | PSR_x | PSR_s},
20800 {"fxc", PSR_f | PSR_x | PSR_c},
20801 {"fcs", PSR_f | PSR_c | PSR_s},
20802 {"fcx", PSR_f | PSR_c | PSR_x},
20803 {"sfx", PSR_s | PSR_f | PSR_x},
20804 {"sfc", PSR_s | PSR_f | PSR_c},
20805 {"sxf", PSR_s | PSR_x | PSR_f},
20806 {"sxc", PSR_s | PSR_x | PSR_c},
20807 {"scf", PSR_s | PSR_c | PSR_f},
20808 {"scx", PSR_s | PSR_c | PSR_x},
20809 {"xfs", PSR_x | PSR_f | PSR_s},
20810 {"xfc", PSR_x | PSR_f | PSR_c},
20811 {"xsf", PSR_x | PSR_s | PSR_f},
20812 {"xsc", PSR_x | PSR_s | PSR_c},
20813 {"xcf", PSR_x | PSR_c | PSR_f},
20814 {"xcs", PSR_x | PSR_c | PSR_s},
20815 {"cfs", PSR_c | PSR_f | PSR_s},
20816 {"cfx", PSR_c | PSR_f | PSR_x},
20817 {"csf", PSR_c | PSR_s | PSR_f},
20818 {"csx", PSR_c | PSR_s | PSR_x},
20819 {"cxf", PSR_c | PSR_x | PSR_f},
20820 {"cxs", PSR_c | PSR_x | PSR_s},
20821 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
20822 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
20823 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
20824 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
20825 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
20826 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
20827 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
20828 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
20829 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
20830 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
20831 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
20832 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
20833 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
20834 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
20835 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
20836 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
20837 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
20838 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
20839 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
20840 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
20841 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
20842 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
20843 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
20844 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
20845 };
20846
20847 /* Table of V7M psr names. */
20848 static const struct asm_psr v7m_psrs[] =
20849 {
20850 {"apsr", 0x0 }, {"APSR", 0x0 },
20851 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
20852 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
20853 {"psr", 0x3 }, {"PSR", 0x3 },
20854 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
20855 {"ipsr", 0x5 }, {"IPSR", 0x5 },
20856 {"epsr", 0x6 }, {"EPSR", 0x6 },
20857 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
20858 {"msp", 0x8 }, {"MSP", 0x8 },
20859 {"psp", 0x9 }, {"PSP", 0x9 },
20860 {"msplim", 0xa }, {"MSPLIM", 0xa },
20861 {"psplim", 0xb }, {"PSPLIM", 0xb },
20862 {"primask", 0x10}, {"PRIMASK", 0x10},
20863 {"basepri", 0x11}, {"BASEPRI", 0x11},
20864 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
20865 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
20866 {"control", 0x14}, {"CONTROL", 0x14},
20867 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
20868 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
20869 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
20870 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
20871 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
20872 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
20873 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
20874 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
20875 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
20876 };
20877
20878 /* Table of all shift-in-operand names. */
20879 static const struct asm_shift_name shift_names [] =
20880 {
20881 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
20882 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
20883 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
20884 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
20885 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
20886 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX },
20887 { "uxtw", SHIFT_UXTW}, { "UXTW", SHIFT_UXTW}
20888 };
20889
20890 /* Table of all explicit relocation names. */
20891 #ifdef OBJ_ELF
20892 static struct reloc_entry reloc_names[] =
20893 {
20894 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
20895 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
20896 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
20897 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
20898 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
20899 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
20900 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
20901 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
20902 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
20903 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
20904 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
20905 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
20906 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
20907 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
20908 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
20909 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
20910 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
20911 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ},
20912 { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC },
20913 { "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC },
20914 { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC },
20915 { "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC },
20916 { "funcdesc", BFD_RELOC_ARM_FUNCDESC },
20917 { "FUNCDESC", BFD_RELOC_ARM_FUNCDESC },
20918 { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC }, { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC },
20919 { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC }, { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC },
20920 { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC }, { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC },
20921 };
20922 #endif
20923
20924 /* Table of all conditional affixes. */
20925 static const struct asm_cond conds[] =
20926 {
20927 {"eq", 0x0},
20928 {"ne", 0x1},
20929 {"cs", 0x2}, {"hs", 0x2},
20930 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
20931 {"mi", 0x4},
20932 {"pl", 0x5},
20933 {"vs", 0x6},
20934 {"vc", 0x7},
20935 {"hi", 0x8},
20936 {"ls", 0x9},
20937 {"ge", 0xa},
20938 {"lt", 0xb},
20939 {"gt", 0xc},
20940 {"le", 0xd},
20941 {"al", 0xe}
20942 };
20943 static const struct asm_cond vconds[] =
20944 {
20945 {"t", 0xf},
20946 {"e", 0x10}
20947 };
20948
20949 #define UL_BARRIER(L,U,CODE,FEAT) \
20950 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
20951 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
20952
20953 static struct asm_barrier_opt barrier_opt_names[] =
20954 {
20955 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
20956 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
20957 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
20958 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
20959 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
20960 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
20961 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
20962 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
20963 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
20964 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
20965 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
20966 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
20967 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
20968 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
20969 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
20970 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
20971 };
20972
20973 #undef UL_BARRIER
20974
20975 /* Table of ARM-format instructions. */
20976
20977 /* Macros for gluing together operand strings. N.B. In all cases
20978 other than OPS0, the trailing OP_stop comes from default
20979 zero-initialization of the unspecified elements of the array. */
20980 #define OPS0() { OP_stop, }
20981 #define OPS1(a) { OP_##a, }
20982 #define OPS2(a,b) { OP_##a,OP_##b, }
20983 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
20984 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
20985 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
20986 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
20987
20988 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
20989 This is useful when mixing operands for ARM and THUMB, i.e. using the
20990 MIX_ARM_THUMB_OPERANDS macro.
20991 In order to use these macros, prefix the number of operands with _
20992 e.g. _3. */
20993 #define OPS_1(a) { a, }
20994 #define OPS_2(a,b) { a,b, }
20995 #define OPS_3(a,b,c) { a,b,c, }
20996 #define OPS_4(a,b,c,d) { a,b,c,d, }
20997 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
20998 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
20999
21000 /* These macros abstract out the exact format of the mnemonic table and
21001 save some repeated characters. */
21002
21003 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
21004 #define TxCE(mnem, op, top, nops, ops, ae, te) \
21005 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
21006 THUMB_VARIANT, do_##ae, do_##te, 0 }
21007
21008 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
21009 a T_MNEM_xyz enumerator. */
21010 #define TCE(mnem, aop, top, nops, ops, ae, te) \
21011 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
21012 #define tCE(mnem, aop, top, nops, ops, ae, te) \
21013 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
21014
21015 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
21016 infix after the third character. */
21017 #define TxC3(mnem, op, top, nops, ops, ae, te) \
21018 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
21019 THUMB_VARIANT, do_##ae, do_##te, 0 }
21020 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
21021 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
21022 THUMB_VARIANT, do_##ae, do_##te, 0 }
21023 #define TC3(mnem, aop, top, nops, ops, ae, te) \
21024 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
21025 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
21026 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
21027 #define tC3(mnem, aop, top, nops, ops, ae, te) \
21028 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
21029 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
21030 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
21031
21032 /* Mnemonic that cannot be conditionalized. The ARM condition-code
21033 field is still 0xE. Many of the Thumb variants can be executed
21034 conditionally, so this is checked separately. */
21035 #define TUE(mnem, op, top, nops, ops, ae, te) \
21036 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
21037 THUMB_VARIANT, do_##ae, do_##te, 0 }
21038
21039 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
21040 Used by mnemonics that have very minimal differences in the encoding for
21041 ARM and Thumb variants and can be handled in a common function. */
21042 #define TUEc(mnem, op, top, nops, ops, en) \
21043 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
21044 THUMB_VARIANT, do_##en, do_##en, 0 }
21045
21046 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
21047 condition code field. */
21048 #define TUF(mnem, op, top, nops, ops, ae, te) \
21049 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
21050 THUMB_VARIANT, do_##ae, do_##te, 0 }
21051
21052 /* ARM-only variants of all the above. */
21053 #define CE(mnem, op, nops, ops, ae) \
21054 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
21055
21056 #define C3(mnem, op, nops, ops, ae) \
21057 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
21058
21059 /* Thumb-only variants of TCE and TUE. */
21060 #define ToC(mnem, top, nops, ops, te) \
21061 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
21062 do_##te, 0 }
21063
21064 #define ToU(mnem, top, nops, ops, te) \
21065 { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
21066 NULL, do_##te, 0 }
21067
21068 /* T_MNEM_xyz enumerator variants of ToC. */
21069 #define toC(mnem, top, nops, ops, te) \
21070 { mnem, OPS##nops ops, OT_csuffix, 0x0, T_MNEM##top, 0, THUMB_VARIANT, NULL, \
21071 do_##te, 0 }
21072
21073 /* T_MNEM_xyz enumerator variants of ToU. */
21074 #define toU(mnem, top, nops, ops, te) \
21075 { mnem, OPS##nops ops, OT_unconditional, 0x0, T_MNEM##top, 0, THUMB_VARIANT, \
21076 NULL, do_##te, 0 }
21077
21078 /* Legacy mnemonics that always have conditional infix after the third
21079 character. */
21080 #define CL(mnem, op, nops, ops, ae) \
21081 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
21082 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
21083
21084 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
21085 #define cCE(mnem, op, nops, ops, ae) \
21086 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
21087
21088 /* Legacy coprocessor instructions where conditional infix and conditional
21089 suffix are ambiguous. For consistency this includes all FPA instructions,
21090 not just the potentially ambiguous ones. */
21091 #define cCL(mnem, op, nops, ops, ae) \
21092 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
21093 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
21094
21095 /* Coprocessor, takes either a suffix or a position-3 infix
21096 (for an FPA corner case). */
21097 #define C3E(mnem, op, nops, ops, ae) \
21098 { mnem, OPS##nops ops, OT_csuf_or_in3, \
21099 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
21100
21101 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
21102 { m1 #m2 m3, OPS##nops ops, \
21103 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
21104 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
21105
21106 #define CM(m1, m2, op, nops, ops, ae) \
21107 xCM_ (m1, , m2, op, nops, ops, ae), \
21108 xCM_ (m1, eq, m2, op, nops, ops, ae), \
21109 xCM_ (m1, ne, m2, op, nops, ops, ae), \
21110 xCM_ (m1, cs, m2, op, nops, ops, ae), \
21111 xCM_ (m1, hs, m2, op, nops, ops, ae), \
21112 xCM_ (m1, cc, m2, op, nops, ops, ae), \
21113 xCM_ (m1, ul, m2, op, nops, ops, ae), \
21114 xCM_ (m1, lo, m2, op, nops, ops, ae), \
21115 xCM_ (m1, mi, m2, op, nops, ops, ae), \
21116 xCM_ (m1, pl, m2, op, nops, ops, ae), \
21117 xCM_ (m1, vs, m2, op, nops, ops, ae), \
21118 xCM_ (m1, vc, m2, op, nops, ops, ae), \
21119 xCM_ (m1, hi, m2, op, nops, ops, ae), \
21120 xCM_ (m1, ls, m2, op, nops, ops, ae), \
21121 xCM_ (m1, ge, m2, op, nops, ops, ae), \
21122 xCM_ (m1, lt, m2, op, nops, ops, ae), \
21123 xCM_ (m1, gt, m2, op, nops, ops, ae), \
21124 xCM_ (m1, le, m2, op, nops, ops, ae), \
21125 xCM_ (m1, al, m2, op, nops, ops, ae)
21126
21127 #define UE(mnem, op, nops, ops, ae) \
21128 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
21129
21130 #define UF(mnem, op, nops, ops, ae) \
21131 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
21132
21133 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
21134 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
21135 use the same encoding function for each. */
21136 #define NUF(mnem, op, nops, ops, enc) \
21137 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
21138 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
21139
21140 /* Neon data processing, version which indirects through neon_enc_tab for
21141 the various overloaded versions of opcodes. */
21142 #define nUF(mnem, op, nops, ops, enc) \
21143 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
21144 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
21145
21146 /* Neon insn with conditional suffix for the ARM version, non-overloaded
21147 version. */
21148 #define NCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
21149 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
21150 THUMB_VARIANT, do_##enc, do_##enc, mve_p }
21151
21152 #define NCE(mnem, op, nops, ops, enc) \
21153 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
21154
21155 #define NCEF(mnem, op, nops, ops, enc) \
21156 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
21157
21158 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
21159 #define nCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
21160 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
21161 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, mve_p }
21162
21163 #define nCE(mnem, op, nops, ops, enc) \
21164 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
21165
21166 #define nCEF(mnem, op, nops, ops, enc) \
21167 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
21168
21169 /* */
21170 #define mCEF(mnem, op, nops, ops, enc) \
21171 { #mnem, OPS##nops ops, OT_csuffixF, M_MNEM##op, M_MNEM##op, \
21172 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
21173
21174
21175 /* nCEF but for MVE predicated instructions. */
21176 #define mnCEF(mnem, op, nops, ops, enc) \
21177 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
21178
21179 /* nCE but for MVE predicated instructions. */
21180 #define mnCE(mnem, op, nops, ops, enc) \
21181 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
21182
21183 /* NUF but for potentially MVE predicated instructions. */
21184 #define MNUF(mnem, op, nops, ops, enc) \
21185 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
21186 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
21187
21188 /* nUF but for potentially MVE predicated instructions. */
21189 #define mnUF(mnem, op, nops, ops, enc) \
21190 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
21191 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
21192
21193 /* ToC but for potentially MVE predicated instructions. */
21194 #define mToC(mnem, top, nops, ops, te) \
21195 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
21196 do_##te, 1 }
21197
21198 /* NCE but for MVE predicated instructions. */
21199 #define MNCE(mnem, op, nops, ops, enc) \
21200 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
21201
21202 /* NCEF but for MVE predicated instructions. */
21203 #define MNCEF(mnem, op, nops, ops, enc) \
21204 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
21205 #define do_0 0
21206
21207 static const struct asm_opcode insns[] =
21208 {
21209 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
21210 #define THUMB_VARIANT & arm_ext_v4t
21211 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
21212 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
21213 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
21214 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
21215 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
21216 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
21217 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
21218 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
21219 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
21220 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
21221 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
21222 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
21223 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
21224 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
21225 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
21226 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
21227
21228 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
21229 for setting PSR flag bits. They are obsolete in V6 and do not
21230 have Thumb equivalents. */
21231 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
21232 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
21233 CL("tstp", 110f000, 2, (RR, SH), cmp),
21234 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
21235 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
21236 CL("cmpp", 150f000, 2, (RR, SH), cmp),
21237 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
21238 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
21239 CL("cmnp", 170f000, 2, (RR, SH), cmp),
21240
21241 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
21242 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
21243 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
21244 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
21245
21246 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
21247 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
21248 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
21249 OP_RRnpc),
21250 OP_ADDRGLDR),ldst, t_ldst),
21251 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
21252
21253 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
21254 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
21255 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
21256 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
21257 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
21258 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
21259
21260 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
21261 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
21262
21263 /* Pseudo ops. */
21264 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
21265 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
21266 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
21267 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
21268
21269 /* Thumb-compatibility pseudo ops. */
21270 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
21271 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
21272 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
21273 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
21274 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
21275 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
21276 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
21277 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
21278 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
21279 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
21280 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
21281 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
21282
21283 /* These may simplify to neg. */
21284 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
21285 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
21286
21287 #undef THUMB_VARIANT
21288 #define THUMB_VARIANT & arm_ext_os
21289
21290 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
21291 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
21292
21293 #undef THUMB_VARIANT
21294 #define THUMB_VARIANT & arm_ext_v6
21295
21296 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
21297
21298 /* V1 instructions with no Thumb analogue prior to V6T2. */
21299 #undef THUMB_VARIANT
21300 #define THUMB_VARIANT & arm_ext_v6t2
21301
21302 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
21303 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
21304 CL("teqp", 130f000, 2, (RR, SH), cmp),
21305
21306 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
21307 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
21308 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
21309 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
21310
21311 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
21312 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
21313
21314 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
21315 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
21316
21317 /* V1 instructions with no Thumb analogue at all. */
21318 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
21319 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
21320
21321 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
21322 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
21323 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
21324 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
21325 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
21326 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
21327 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
21328 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
21329
21330 #undef ARM_VARIANT
21331 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
21332 #undef THUMB_VARIANT
21333 #define THUMB_VARIANT & arm_ext_v4t
21334
21335 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
21336 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
21337
21338 #undef THUMB_VARIANT
21339 #define THUMB_VARIANT & arm_ext_v6t2
21340
21341 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
21342 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
21343
21344 /* Generic coprocessor instructions. */
21345 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
21346 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21347 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21348 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21349 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21350 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
21351 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
21352
21353 #undef ARM_VARIANT
21354 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
21355
21356 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
21357 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
21358
21359 #undef ARM_VARIANT
21360 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
21361 #undef THUMB_VARIANT
21362 #define THUMB_VARIANT & arm_ext_msr
21363
21364 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
21365 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
21366
21367 #undef ARM_VARIANT
21368 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
21369 #undef THUMB_VARIANT
21370 #define THUMB_VARIANT & arm_ext_v6t2
21371
21372 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
21373 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
21374 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
21375 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
21376 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
21377 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
21378 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
21379 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
21380
21381 #undef ARM_VARIANT
21382 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
21383 #undef THUMB_VARIANT
21384 #define THUMB_VARIANT & arm_ext_v4t
21385
21386 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
21387 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
21388 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
21389 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
21390 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
21391 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
21392
21393 #undef ARM_VARIANT
21394 #define ARM_VARIANT & arm_ext_v4t_5
21395
21396 /* ARM Architecture 4T. */
21397 /* Note: bx (and blx) are required on V5, even if the processor does
21398 not support Thumb. */
21399 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
21400
21401 #undef ARM_VARIANT
21402 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
21403 #undef THUMB_VARIANT
21404 #define THUMB_VARIANT & arm_ext_v5t
21405
21406 /* Note: blx has 2 variants; the .value coded here is for
21407 BLX(2). Only this variant has conditional execution. */
21408 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
21409 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
21410
21411 #undef THUMB_VARIANT
21412 #define THUMB_VARIANT & arm_ext_v6t2
21413
21414 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
21415 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21416 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21417 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21418 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
21419 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
21420 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
21421 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
21422
21423 #undef ARM_VARIANT
21424 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
21425 #undef THUMB_VARIANT
21426 #define THUMB_VARIANT & arm_ext_v5exp
21427
21428 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
21429 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
21430 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
21431 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
21432
21433 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
21434 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
21435
21436 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
21437 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
21438 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
21439 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
21440
21441 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21442 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21443 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21444 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21445
21446 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21447 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21448
21449 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
21450 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
21451 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
21452 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
21453
21454 #undef ARM_VARIANT
21455 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
21456 #undef THUMB_VARIANT
21457 #define THUMB_VARIANT & arm_ext_v6t2
21458
21459 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
21460 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
21461 ldrd, t_ldstd),
21462 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
21463 ADDRGLDRS), ldrd, t_ldstd),
21464
21465 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
21466 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
21467
21468 #undef ARM_VARIANT
21469 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
21470
21471 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
21472
21473 #undef ARM_VARIANT
21474 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
21475 #undef THUMB_VARIANT
21476 #define THUMB_VARIANT & arm_ext_v6
21477
21478 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
21479 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
21480 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
21481 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
21482 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
21483 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
21484 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
21485 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
21486 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
21487 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
21488
21489 #undef THUMB_VARIANT
21490 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21491
21492 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
21493 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
21494 strex, t_strex),
21495 #undef THUMB_VARIANT
21496 #define THUMB_VARIANT & arm_ext_v6t2
21497
21498 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
21499 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
21500
21501 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
21502 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
21503
21504 /* ARM V6 not included in V7M. */
21505 #undef THUMB_VARIANT
21506 #define THUMB_VARIANT & arm_ext_v6_notm
21507 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
21508 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
21509 UF(rfeib, 9900a00, 1, (RRw), rfe),
21510 UF(rfeda, 8100a00, 1, (RRw), rfe),
21511 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
21512 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
21513 UF(rfefa, 8100a00, 1, (RRw), rfe),
21514 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
21515 UF(rfeed, 9900a00, 1, (RRw), rfe),
21516 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
21517 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
21518 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
21519 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
21520 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
21521 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
21522 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
21523 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
21524 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
21525 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
21526
21527 /* ARM V6 not included in V7M (eg. integer SIMD). */
21528 #undef THUMB_VARIANT
21529 #define THUMB_VARIANT & arm_ext_v6_dsp
21530 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
21531 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
21532 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21533 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21534 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21535 /* Old name for QASX. */
21536 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21537 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21538 /* Old name for QSAX. */
21539 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21540 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21541 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21542 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21543 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21544 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21545 /* Old name for SASX. */
21546 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21547 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21548 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21549 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21550 /* Old name for SHASX. */
21551 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21552 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21553 /* Old name for SHSAX. */
21554 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21555 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21556 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21557 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21558 /* Old name for SSAX. */
21559 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21560 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21561 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21562 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21563 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21564 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21565 /* Old name for UASX. */
21566 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21567 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21568 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21569 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21570 /* Old name for UHASX. */
21571 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21572 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21573 /* Old name for UHSAX. */
21574 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21575 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21576 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21577 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21578 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21579 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21580 /* Old name for UQASX. */
21581 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21582 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21583 /* Old name for UQSAX. */
21584 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21585 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21586 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21587 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21588 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21589 /* Old name for USAX. */
21590 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21591 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21592 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
21593 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
21594 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
21595 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
21596 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
21597 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
21598 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
21599 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
21600 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
21601 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21602 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21603 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
21604 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
21605 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21606 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21607 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
21608 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
21609 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21610 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21611 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21612 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21613 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21614 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21615 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21616 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21617 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21618 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21619 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
21620 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
21621 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
21622 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
21623 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
21624
21625 #undef ARM_VARIANT
21626 #define ARM_VARIANT & arm_ext_v6k_v6t2
21627 #undef THUMB_VARIANT
21628 #define THUMB_VARIANT & arm_ext_v6k_v6t2
21629
21630 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
21631 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
21632 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
21633 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
21634
21635 #undef THUMB_VARIANT
21636 #define THUMB_VARIANT & arm_ext_v6_notm
21637 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
21638 ldrexd, t_ldrexd),
21639 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
21640 RRnpcb), strexd, t_strexd),
21641
21642 #undef THUMB_VARIANT
21643 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21644 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
21645 rd_rn, rd_rn),
21646 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
21647 rd_rn, rd_rn),
21648 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
21649 strex, t_strexbh),
21650 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
21651 strex, t_strexbh),
21652 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
21653
21654 #undef ARM_VARIANT
21655 #define ARM_VARIANT & arm_ext_sec
21656 #undef THUMB_VARIANT
21657 #define THUMB_VARIANT & arm_ext_sec
21658
21659 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
21660
21661 #undef ARM_VARIANT
21662 #define ARM_VARIANT & arm_ext_virt
21663 #undef THUMB_VARIANT
21664 #define THUMB_VARIANT & arm_ext_virt
21665
21666 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
21667 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
21668
21669 #undef ARM_VARIANT
21670 #define ARM_VARIANT & arm_ext_pan
21671 #undef THUMB_VARIANT
21672 #define THUMB_VARIANT & arm_ext_pan
21673
21674 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
21675
21676 #undef ARM_VARIANT
21677 #define ARM_VARIANT & arm_ext_v6t2
21678 #undef THUMB_VARIANT
21679 #define THUMB_VARIANT & arm_ext_v6t2
21680
21681 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
21682 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
21683 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
21684 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
21685
21686 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
21687 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
21688
21689 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
21690 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
21691 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
21692 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
21693
21694 #undef ARM_VARIANT
21695 #define ARM_VARIANT & arm_ext_v3
21696 #undef THUMB_VARIANT
21697 #define THUMB_VARIANT & arm_ext_v6t2
21698
21699 TUE("csdb", 320f014, f3af8014, 0, (), noargs, t_csdb),
21700 TUF("ssbb", 57ff040, f3bf8f40, 0, (), noargs, t_csdb),
21701 TUF("pssbb", 57ff044, f3bf8f44, 0, (), noargs, t_csdb),
21702
21703 #undef ARM_VARIANT
21704 #define ARM_VARIANT & arm_ext_v6t2
21705 #undef THUMB_VARIANT
21706 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21707 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
21708 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
21709
21710 /* Thumb-only instructions. */
21711 #undef ARM_VARIANT
21712 #define ARM_VARIANT NULL
21713 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
21714 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
21715
21716 /* ARM does not really have an IT instruction, so always allow it.
21717 The opcode is copied from Thumb in order to allow warnings in
21718 -mimplicit-it=[never | arm] modes. */
21719 #undef ARM_VARIANT
21720 #define ARM_VARIANT & arm_ext_v1
21721 #undef THUMB_VARIANT
21722 #define THUMB_VARIANT & arm_ext_v6t2
21723
21724 TUE("it", bf08, bf08, 1, (COND), it, t_it),
21725 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
21726 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
21727 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
21728 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
21729 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
21730 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
21731 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
21732 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
21733 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
21734 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
21735 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
21736 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
21737 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
21738 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
21739 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
21740 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
21741 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
21742
21743 /* Thumb2 only instructions. */
21744 #undef ARM_VARIANT
21745 #define ARM_VARIANT NULL
21746
21747 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
21748 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
21749 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
21750 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
21751 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
21752 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
21753
21754 /* Hardware division instructions. */
21755 #undef ARM_VARIANT
21756 #define ARM_VARIANT & arm_ext_adiv
21757 #undef THUMB_VARIANT
21758 #define THUMB_VARIANT & arm_ext_div
21759
21760 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
21761 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
21762
21763 /* ARM V6M/V7 instructions. */
21764 #undef ARM_VARIANT
21765 #define ARM_VARIANT & arm_ext_barrier
21766 #undef THUMB_VARIANT
21767 #define THUMB_VARIANT & arm_ext_barrier
21768
21769 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
21770 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
21771 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
21772
21773 /* ARM V7 instructions. */
21774 #undef ARM_VARIANT
21775 #define ARM_VARIANT & arm_ext_v7
21776 #undef THUMB_VARIANT
21777 #define THUMB_VARIANT & arm_ext_v7
21778
21779 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
21780 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
21781
21782 #undef ARM_VARIANT
21783 #define ARM_VARIANT & arm_ext_mp
21784 #undef THUMB_VARIANT
21785 #define THUMB_VARIANT & arm_ext_mp
21786
21787 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
21788
21789 /* AArchv8 instructions. */
21790 #undef ARM_VARIANT
21791 #define ARM_VARIANT & arm_ext_v8
21792
21793 /* Instructions shared between armv8-a and armv8-m. */
21794 #undef THUMB_VARIANT
21795 #define THUMB_VARIANT & arm_ext_atomics
21796
21797 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
21798 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
21799 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
21800 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
21801 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
21802 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
21803 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
21804 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
21805 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
21806 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
21807 stlex, t_stlex),
21808 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
21809 stlex, t_stlex),
21810 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
21811 stlex, t_stlex),
21812 #undef THUMB_VARIANT
21813 #define THUMB_VARIANT & arm_ext_v8
21814
21815 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
21816 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
21817 ldrexd, t_ldrexd),
21818 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
21819 strexd, t_strexd),
21820
21821 /* Defined in V8 but is in undefined encoding space for earlier
21822 architectures. However earlier architectures are required to treat
21823 this instuction as a semihosting trap as well. Hence while not explicitly
21824 defined as such, it is in fact correct to define the instruction for all
21825 architectures. */
21826 #undef THUMB_VARIANT
21827 #define THUMB_VARIANT & arm_ext_v1
21828 #undef ARM_VARIANT
21829 #define ARM_VARIANT & arm_ext_v1
21830 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
21831
21832 /* ARMv8 T32 only. */
21833 #undef ARM_VARIANT
21834 #define ARM_VARIANT NULL
21835 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
21836 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
21837 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
21838
21839 /* FP for ARMv8. */
21840 #undef ARM_VARIANT
21841 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
21842 #undef THUMB_VARIANT
21843 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
21844
21845 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
21846 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
21847 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
21848 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
21849 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
21850 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
21851 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
21852 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
21853 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
21854 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
21855 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
21856 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
21857 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
21858 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
21859 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
21860 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
21861 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
21862
21863 /* Crypto v1 extensions. */
21864 #undef ARM_VARIANT
21865 #define ARM_VARIANT & fpu_crypto_ext_armv8
21866 #undef THUMB_VARIANT
21867 #define THUMB_VARIANT & fpu_crypto_ext_armv8
21868
21869 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
21870 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
21871 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
21872 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
21873 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
21874 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
21875 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
21876 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
21877 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
21878 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
21879 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
21880 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
21881 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
21882 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
21883
21884 #undef ARM_VARIANT
21885 #define ARM_VARIANT & crc_ext_armv8
21886 #undef THUMB_VARIANT
21887 #define THUMB_VARIANT & crc_ext_armv8
21888 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
21889 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
21890 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
21891 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
21892 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
21893 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
21894
21895 /* ARMv8.2 RAS extension. */
21896 #undef ARM_VARIANT
21897 #define ARM_VARIANT & arm_ext_ras
21898 #undef THUMB_VARIANT
21899 #define THUMB_VARIANT & arm_ext_ras
21900 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
21901
21902 #undef ARM_VARIANT
21903 #define ARM_VARIANT & arm_ext_v8_3
21904 #undef THUMB_VARIANT
21905 #define THUMB_VARIANT & arm_ext_v8_3
21906 NCE (vjcvt, eb90bc0, 2, (RVS, RVD), vjcvt),
21907 NUF (vcmla, 0, 4, (RNDQ, RNDQ, RNDQ_RNSC, EXPi), vcmla),
21908 NUF (vcadd, 0, 4, (RNDQ, RNDQ, RNDQ, EXPi), vcadd),
21909
21910 #undef ARM_VARIANT
21911 #define ARM_VARIANT & fpu_neon_ext_dotprod
21912 #undef THUMB_VARIANT
21913 #define THUMB_VARIANT & fpu_neon_ext_dotprod
21914 NUF (vsdot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_s),
21915 NUF (vudot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_u),
21916
21917 #undef ARM_VARIANT
21918 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
21919 #undef THUMB_VARIANT
21920 #define THUMB_VARIANT NULL
21921
21922 cCE("wfs", e200110, 1, (RR), rd),
21923 cCE("rfs", e300110, 1, (RR), rd),
21924 cCE("wfc", e400110, 1, (RR), rd),
21925 cCE("rfc", e500110, 1, (RR), rd),
21926
21927 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
21928 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
21929 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
21930 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
21931
21932 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
21933 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
21934 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
21935 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
21936
21937 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
21938 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
21939 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
21940 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
21941 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
21942 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
21943 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
21944 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
21945 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
21946 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
21947 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
21948 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
21949
21950 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
21951 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
21952 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
21953 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
21954 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
21955 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
21956 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
21957 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
21958 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
21959 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
21960 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
21961 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
21962
21963 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
21964 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
21965 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
21966 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
21967 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
21968 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
21969 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
21970 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
21971 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
21972 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
21973 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
21974 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
21975
21976 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
21977 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
21978 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
21979 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
21980 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
21981 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
21982 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
21983 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
21984 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
21985 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
21986 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
21987 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
21988
21989 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
21990 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
21991 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
21992 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
21993 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
21994 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
21995 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
21996 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
21997 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
21998 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
21999 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
22000 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
22001
22002 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
22003 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
22004 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
22005 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
22006 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
22007 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
22008 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
22009 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
22010 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
22011 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
22012 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
22013 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
22014
22015 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
22016 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
22017 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
22018 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
22019 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
22020 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
22021 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
22022 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
22023 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
22024 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
22025 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
22026 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
22027
22028 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
22029 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
22030 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
22031 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
22032 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
22033 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
22034 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
22035 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
22036 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
22037 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
22038 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
22039 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
22040
22041 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
22042 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
22043 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
22044 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
22045 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
22046 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
22047 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
22048 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
22049 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
22050 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
22051 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
22052 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
22053
22054 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
22055 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
22056 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
22057 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
22058 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
22059 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
22060 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
22061 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
22062 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
22063 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
22064 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
22065 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
22066
22067 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
22068 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
22069 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
22070 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
22071 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
22072 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
22073 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
22074 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
22075 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
22076 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
22077 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
22078 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
22079
22080 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
22081 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
22082 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
22083 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
22084 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
22085 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
22086 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
22087 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
22088 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
22089 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
22090 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
22091 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
22092
22093 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
22094 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
22095 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
22096 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
22097 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
22098 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
22099 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
22100 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
22101 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
22102 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
22103 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
22104 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
22105
22106 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
22107 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
22108 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
22109 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
22110 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
22111 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
22112 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
22113 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
22114 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
22115 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
22116 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
22117 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
22118
22119 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
22120 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
22121 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
22122 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
22123 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
22124 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
22125 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
22126 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
22127 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
22128 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
22129 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
22130 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
22131
22132 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
22133 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
22134 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
22135 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
22136 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
22137 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
22138 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
22139 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
22140 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
22141 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
22142 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
22143 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
22144
22145 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
22146 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
22147 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
22148 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
22149 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
22150 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22151 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22152 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22153 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
22154 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
22155 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
22156 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
22157
22158 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
22159 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
22160 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
22161 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
22162 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
22163 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22164 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22165 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22166 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
22167 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
22168 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
22169 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
22170
22171 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
22172 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
22173 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
22174 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
22175 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
22176 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22177 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22178 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22179 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
22180 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
22181 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
22182 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
22183
22184 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
22185 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
22186 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
22187 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
22188 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
22189 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22190 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22191 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22192 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
22193 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
22194 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
22195 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
22196
22197 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
22198 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
22199 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
22200 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
22201 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
22202 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22203 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22204 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22205 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
22206 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
22207 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
22208 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
22209
22210 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
22211 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
22212 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
22213 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
22214 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
22215 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22216 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22217 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22218 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
22219 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
22220 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
22221 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
22222
22223 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
22224 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
22225 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
22226 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
22227 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
22228 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22229 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22230 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22231 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
22232 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
22233 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
22234 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
22235
22236 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
22237 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
22238 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
22239 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
22240 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
22241 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22242 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22243 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22244 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
22245 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
22246 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
22247 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
22248
22249 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
22250 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
22251 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
22252 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
22253 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
22254 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22255 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22256 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22257 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
22258 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
22259 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
22260 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
22261
22262 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
22263 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
22264 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
22265 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
22266 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
22267 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22268 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22269 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22270 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
22271 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
22272 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
22273 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
22274
22275 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
22276 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
22277 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
22278 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
22279 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
22280 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22281 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22282 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22283 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
22284 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
22285 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
22286 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
22287
22288 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
22289 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
22290 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
22291 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
22292 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
22293 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22294 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22295 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22296 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
22297 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
22298 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
22299 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
22300
22301 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
22302 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
22303 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
22304 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
22305 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
22306 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
22307 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
22308 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
22309 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
22310 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
22311 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
22312 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
22313
22314 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
22315 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
22316 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
22317 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
22318
22319 cCL("flts", e000110, 2, (RF, RR), rn_rd),
22320 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
22321 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
22322 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
22323 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
22324 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
22325 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
22326 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
22327 cCL("flte", e080110, 2, (RF, RR), rn_rd),
22328 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
22329 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
22330 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
22331
22332 /* The implementation of the FIX instruction is broken on some
22333 assemblers, in that it accepts a precision specifier as well as a
22334 rounding specifier, despite the fact that this is meaningless.
22335 To be more compatible, we accept it as well, though of course it
22336 does not set any bits. */
22337 cCE("fix", e100110, 2, (RR, RF), rd_rm),
22338 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
22339 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
22340 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
22341 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
22342 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
22343 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
22344 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
22345 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
22346 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
22347 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
22348 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
22349 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
22350
22351 /* Instructions that were new with the real FPA, call them V2. */
22352 #undef ARM_VARIANT
22353 #define ARM_VARIANT & fpu_fpa_ext_v2
22354
22355 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
22356 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
22357 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
22358 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
22359 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
22360 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
22361
22362 #undef ARM_VARIANT
22363 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
22364
22365 /* Moves and type conversions. */
22366 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
22367 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
22368 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
22369 cCE("fmstat", ef1fa10, 0, (), noargs),
22370 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
22371 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
22372 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
22373 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
22374 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
22375 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
22376 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
22377 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
22378 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
22379 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
22380
22381 /* Memory operations. */
22382 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
22383 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
22384 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
22385 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
22386 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
22387 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
22388 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
22389 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
22390 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
22391 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
22392 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
22393 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
22394 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
22395 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
22396 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
22397 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
22398 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
22399 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
22400
22401 /* Monadic operations. */
22402 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
22403 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
22404 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
22405
22406 /* Dyadic operations. */
22407 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22408 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22409 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22410 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22411 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22412 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22413 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22414 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22415 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22416
22417 /* Comparisons. */
22418 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
22419 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
22420 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
22421 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
22422
22423 /* Double precision load/store are still present on single precision
22424 implementations. */
22425 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
22426 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
22427 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
22428 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
22429 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
22430 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
22431 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
22432 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
22433 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
22434 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
22435
22436 #undef ARM_VARIANT
22437 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
22438
22439 /* Moves and type conversions. */
22440 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
22441 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
22442 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
22443 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
22444 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
22445 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
22446 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
22447 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
22448 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
22449 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
22450 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
22451 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
22452 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
22453
22454 /* Monadic operations. */
22455 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
22456 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
22457 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
22458
22459 /* Dyadic operations. */
22460 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22461 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22462 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22463 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22464 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22465 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22466 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22467 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22468 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22469
22470 /* Comparisons. */
22471 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
22472 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
22473 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
22474 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
22475
22476 #undef ARM_VARIANT
22477 #define ARM_VARIANT & fpu_vfp_ext_v2
22478
22479 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
22480 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
22481 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
22482 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
22483
22484 /* Instructions which may belong to either the Neon or VFP instruction sets.
22485 Individual encoder functions perform additional architecture checks. */
22486 #undef ARM_VARIANT
22487 #define ARM_VARIANT & fpu_vfp_ext_v1xd
22488 #undef THUMB_VARIANT
22489 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
22490
22491 /* These mnemonics are unique to VFP. */
22492 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
22493 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
22494 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
22495 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
22496 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
22497 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
22498 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
22499 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
22500 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
22501 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
22502
22503 /* Mnemonics shared by Neon and VFP. */
22504 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
22505 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
22506 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
22507
22508 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
22509 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
22510 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
22511 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
22512 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
22513 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
22514
22515 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
22516 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
22517 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
22518 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
22519
22520
22521 /* NOTE: All VMOV encoding is special-cased! */
22522 NCE(vmov, 0, 1, (VMOV), neon_mov),
22523 NCE(vmovq, 0, 1, (VMOV), neon_mov),
22524
22525 #undef THUMB_VARIANT
22526 /* Could be either VLDR/VSTR or VLDR/VSTR (system register) which are guarded
22527 by different feature bits. Since we are setting the Thumb guard, we can
22528 require Thumb-1 which makes it a nop guard and set the right feature bit in
22529 do_vldr_vstr (). */
22530 #define THUMB_VARIANT & arm_ext_v4t
22531 NCE(vldr, d100b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
22532 NCE(vstr, d000b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
22533
22534 #undef ARM_VARIANT
22535 #define ARM_VARIANT & arm_ext_fp16
22536 #undef THUMB_VARIANT
22537 #define THUMB_VARIANT & arm_ext_fp16
22538 /* New instructions added from v8.2, allowing the extraction and insertion of
22539 the upper 16 bits of a 32-bit vector register. */
22540 NCE (vmovx, eb00a40, 2, (RVS, RVS), neon_movhf),
22541 NCE (vins, eb00ac0, 2, (RVS, RVS), neon_movhf),
22542
22543 /* New backported fma/fms instructions optional in v8.2. */
22544 NCE (vfmal, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmal),
22545 NCE (vfmsl, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmsl),
22546
22547 #undef THUMB_VARIANT
22548 #define THUMB_VARIANT & fpu_neon_ext_v1
22549 #undef ARM_VARIANT
22550 #define ARM_VARIANT & fpu_neon_ext_v1
22551
22552 /* Data processing with three registers of the same length. */
22553 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
22554 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
22555 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
22556 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
22557 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
22558 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
22559 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
22560 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
22561 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
22562 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
22563 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
22564 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
22565 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
22566 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
22567 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
22568 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
22569 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
22570 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
22571 /* If not immediate, fall back to neon_dyadic_i64_su.
22572 shl_imm should accept I8 I16 I32 I64,
22573 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
22574 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
22575 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
22576 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
22577 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
22578 /* Logic ops, types optional & ignored. */
22579 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
22580 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
22581 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
22582 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
22583 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
22584 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
22585 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
22586 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
22587 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
22588 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
22589 /* Bitfield ops, untyped. */
22590 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
22591 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
22592 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
22593 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
22594 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
22595 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
22596 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
22597 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
22598 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
22599 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
22600 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
22601 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
22602 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
22603 back to neon_dyadic_if_su. */
22604 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
22605 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
22606 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
22607 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
22608 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
22609 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
22610 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
22611 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
22612 /* Comparison. Type I8 I16 I32 F32. */
22613 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
22614 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
22615 /* As above, D registers only. */
22616 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
22617 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
22618 /* Int and float variants, signedness unimportant. */
22619 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
22620 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
22621 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
22622 /* Add/sub take types I8 I16 I32 I64 F32. */
22623 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
22624 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
22625 /* vtst takes sizes 8, 16, 32. */
22626 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
22627 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
22628 /* VMUL takes I8 I16 I32 F32 P8. */
22629 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
22630 /* VQD{R}MULH takes S16 S32. */
22631 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
22632 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
22633 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
22634 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
22635 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
22636 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
22637 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
22638 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
22639 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
22640 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
22641 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
22642 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
22643 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
22644 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
22645 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
22646 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
22647 /* ARM v8.1 extension. */
22648 nUF (vqrdmlah, _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
22649 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
22650 nUF (vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
22651 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
22652
22653 /* Two address, int/float. Types S8 S16 S32 F32. */
22654 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
22655 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
22656
22657 /* Data processing with two registers and a shift amount. */
22658 /* Right shifts, and variants with rounding.
22659 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
22660 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
22661 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
22662 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
22663 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
22664 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
22665 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
22666 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
22667 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
22668 /* Shift and insert. Sizes accepted 8 16 32 64. */
22669 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
22670 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
22671 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
22672 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
22673 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
22674 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
22675 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
22676 /* Right shift immediate, saturating & narrowing, with rounding variants.
22677 Types accepted S16 S32 S64 U16 U32 U64. */
22678 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
22679 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
22680 /* As above, unsigned. Types accepted S16 S32 S64. */
22681 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
22682 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
22683 /* Right shift narrowing. Types accepted I16 I32 I64. */
22684 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
22685 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
22686 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
22687 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
22688 /* CVT with optional immediate for fixed-point variant. */
22689 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
22690
22691 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
22692 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
22693
22694 /* Data processing, three registers of different lengths. */
22695 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
22696 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
22697 /* If not scalar, fall back to neon_dyadic_long.
22698 Vector types as above, scalar types S16 S32 U16 U32. */
22699 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
22700 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
22701 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
22702 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
22703 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
22704 /* Dyadic, narrowing insns. Types I16 I32 I64. */
22705 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
22706 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
22707 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
22708 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
22709 /* Saturating doubling multiplies. Types S16 S32. */
22710 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
22711 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
22712 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
22713 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
22714 S16 S32 U16 U32. */
22715 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
22716
22717 /* Extract. Size 8. */
22718 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
22719 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
22720
22721 /* Two registers, miscellaneous. */
22722 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
22723 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
22724 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
22725 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
22726 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
22727 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
22728 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
22729 /* Vector replicate. Sizes 8 16 32. */
22730 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
22731 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
22732 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
22733 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
22734 /* VMOVN. Types I16 I32 I64. */
22735 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
22736 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
22737 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
22738 /* VQMOVUN. Types S16 S32 S64. */
22739 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
22740 /* VZIP / VUZP. Sizes 8 16 32. */
22741 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
22742 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
22743 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
22744 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
22745 /* VQABS / VQNEG. Types S8 S16 S32. */
22746 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
22747 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
22748 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
22749 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
22750 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
22751 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
22752 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
22753 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
22754 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
22755 /* Reciprocal estimates. Types U32 F16 F32. */
22756 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
22757 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
22758 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
22759 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
22760 /* VCLS. Types S8 S16 S32. */
22761 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
22762 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
22763 /* VCLZ. Types I8 I16 I32. */
22764 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
22765 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
22766 /* VCNT. Size 8. */
22767 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
22768 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
22769 /* Two address, untyped. */
22770 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
22771 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
22772 /* VTRN. Sizes 8 16 32. */
22773 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
22774 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
22775
22776 /* Table lookup. Size 8. */
22777 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
22778 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
22779
22780 #undef THUMB_VARIANT
22781 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
22782 #undef ARM_VARIANT
22783 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
22784
22785 /* Neon element/structure load/store. */
22786 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
22787 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
22788 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
22789 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
22790 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
22791 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
22792 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
22793 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
22794
22795 #undef THUMB_VARIANT
22796 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
22797 #undef ARM_VARIANT
22798 #define ARM_VARIANT & fpu_vfp_ext_v3xd
22799 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
22800 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
22801 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
22802 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
22803 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
22804 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
22805 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
22806 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
22807 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
22808
22809 #undef THUMB_VARIANT
22810 #define THUMB_VARIANT & fpu_vfp_ext_v3
22811 #undef ARM_VARIANT
22812 #define ARM_VARIANT & fpu_vfp_ext_v3
22813
22814 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
22815 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
22816 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
22817 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
22818 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
22819 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
22820 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
22821 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
22822 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
22823
22824 #undef ARM_VARIANT
22825 #define ARM_VARIANT & fpu_vfp_ext_fma
22826 #undef THUMB_VARIANT
22827 #define THUMB_VARIANT & fpu_vfp_ext_fma
22828 /* Mnemonics shared by Neon and VFP. These are included in the
22829 VFP FMA variant; NEON and VFP FMA always includes the NEON
22830 FMA instructions. */
22831 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
22832 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
22833 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
22834 the v form should always be used. */
22835 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22836 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
22837 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22838 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
22839 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
22840 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
22841
22842 #undef THUMB_VARIANT
22843 #undef ARM_VARIANT
22844 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
22845
22846 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
22847 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
22848 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
22849 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
22850 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
22851 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
22852 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
22853 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
22854
22855 #undef ARM_VARIANT
22856 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
22857
22858 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
22859 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
22860 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
22861 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
22862 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
22863 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
22864 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
22865 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
22866 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
22867 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
22868 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
22869 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
22870 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
22871 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
22872 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
22873 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
22874 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
22875 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
22876 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
22877 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
22878 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
22879 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
22880 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
22881 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
22882 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
22883 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
22884 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
22885 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
22886 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
22887 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
22888 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
22889 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
22890 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
22891 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
22892 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
22893 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
22894 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
22895 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22896 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22897 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22898 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22899 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22900 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22901 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22902 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22903 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22904 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
22905 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22906 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22907 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22908 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22909 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22910 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22911 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22912 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22913 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22914 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22915 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22916 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22917 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22918 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22919 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22920 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22921 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22922 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22923 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22924 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
22925 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
22926 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
22927 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
22928 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22929 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22930 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22931 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22932 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22933 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22934 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22935 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22936 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22937 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22938 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22939 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22940 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22941 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22942 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22943 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22944 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22945 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22946 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
22947 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22948 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22949 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22950 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22951 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22952 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22953 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22954 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22955 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22956 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22957 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22958 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22959 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22960 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22961 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22962 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22963 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22964 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22965 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22966 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22967 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22968 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
22969 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22970 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22971 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22972 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22973 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22974 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22975 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22976 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22977 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22978 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22979 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22980 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22981 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22982 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22983 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22984 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22985 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
22986 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
22987 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
22988 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
22989 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
22990 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
22991 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22992 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22993 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22994 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22995 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22996 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22997 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22998 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
22999 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23000 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
23001 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
23002 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
23003 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
23004 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
23005 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
23006 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23007 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23008 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23009 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
23010 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
23011 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
23012 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
23013 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
23014 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
23015 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23016 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23017 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23018 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23019 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
23020
23021 #undef ARM_VARIANT
23022 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
23023
23024 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
23025 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
23026 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
23027 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
23028 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
23029 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
23030 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23031 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23032 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23033 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23034 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23035 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23036 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23037 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23038 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23039 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23040 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23041 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23042 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23043 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23044 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
23045 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23046 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23047 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23048 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23049 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23050 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23051 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23052 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23053 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23054 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23055 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23056 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23057 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23058 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23059 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23060 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23061 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23062 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23063 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23064 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23065 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23066 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23067 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23068 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23069 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23070 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23071 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23072 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23073 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23074 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23075 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23076 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23077 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23078 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23079 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23080 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
23081
23082 #undef ARM_VARIANT
23083 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
23084
23085 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
23086 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
23087 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
23088 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
23089 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
23090 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
23091 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
23092 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
23093 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
23094 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
23095 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
23096 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
23097 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
23098 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
23099 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
23100 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
23101 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
23102 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
23103 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
23104 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
23105 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
23106 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
23107 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
23108 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
23109 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
23110 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
23111 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
23112 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
23113 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
23114 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
23115 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
23116 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
23117 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
23118 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
23119 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
23120 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
23121 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
23122 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
23123 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
23124 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
23125 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
23126 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
23127 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
23128 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
23129 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
23130 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
23131 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
23132 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
23133 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
23134 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
23135 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
23136 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
23137 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
23138 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
23139 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
23140 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
23141 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
23142 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
23143 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
23144 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
23145 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
23146 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
23147 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
23148 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
23149 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
23150 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
23151 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
23152 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
23153 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
23154 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
23155 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
23156 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
23157 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
23158 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
23159 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
23160 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
23161
23162 /* ARMv8.5-A instructions. */
23163 #undef ARM_VARIANT
23164 #define ARM_VARIANT & arm_ext_sb
23165 #undef THUMB_VARIANT
23166 #define THUMB_VARIANT & arm_ext_sb
23167 TUF("sb", 57ff070, f3bf8f70, 0, (), noargs, noargs),
23168
23169 #undef ARM_VARIANT
23170 #define ARM_VARIANT & arm_ext_predres
23171 #undef THUMB_VARIANT
23172 #define THUMB_VARIANT & arm_ext_predres
23173 CE("cfprctx", e070f93, 1, (RRnpc), rd),
23174 CE("dvprctx", e070fb3, 1, (RRnpc), rd),
23175 CE("cpprctx", e070ff3, 1, (RRnpc), rd),
23176
23177 /* ARMv8-M instructions. */
23178 #undef ARM_VARIANT
23179 #define ARM_VARIANT NULL
23180 #undef THUMB_VARIANT
23181 #define THUMB_VARIANT & arm_ext_v8m
23182 ToU("sg", e97fe97f, 0, (), noargs),
23183 ToC("blxns", 4784, 1, (RRnpc), t_blx),
23184 ToC("bxns", 4704, 1, (RRnpc), t_bx),
23185 ToC("tt", e840f000, 2, (RRnpc, RRnpc), tt),
23186 ToC("ttt", e840f040, 2, (RRnpc, RRnpc), tt),
23187 ToC("tta", e840f080, 2, (RRnpc, RRnpc), tt),
23188 ToC("ttat", e840f0c0, 2, (RRnpc, RRnpc), tt),
23189
23190 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
23191 instructions behave as nop if no VFP is present. */
23192 #undef THUMB_VARIANT
23193 #define THUMB_VARIANT & arm_ext_v8m_main
23194 ToC("vlldm", ec300a00, 1, (RRnpc), rn),
23195 ToC("vlstm", ec200a00, 1, (RRnpc), rn),
23196
23197 /* Armv8.1-M Mainline instructions. */
23198 #undef THUMB_VARIANT
23199 #define THUMB_VARIANT & arm_ext_v8_1m_main
23200 toC("bf", _bf, 2, (EXPs, EXPs), t_branch_future),
23201 toU("bfcsel", _bfcsel, 4, (EXPs, EXPs, EXPs, COND), t_branch_future),
23202 toC("bfx", _bfx, 2, (EXPs, RRnpcsp), t_branch_future),
23203 toC("bfl", _bfl, 2, (EXPs, EXPs), t_branch_future),
23204 toC("bflx", _bflx, 2, (EXPs, RRnpcsp), t_branch_future),
23205
23206 toU("dls", _dls, 2, (LR, RRnpcsp), t_loloop),
23207 toU("wls", _wls, 3, (LR, RRnpcsp, EXP), t_loloop),
23208 toU("le", _le, 2, (oLR, EXP), t_loloop),
23209
23210 ToC("clrm", e89f0000, 1, (CLRMLST), t_clrm),
23211 ToC("vscclrm", ec9f0a00, 1, (VRSDVLST), t_vscclrm),
23212
23213 #undef THUMB_VARIANT
23214 #define THUMB_VARIANT & mve_ext
23215 ToC("vpst", fe710f4d, 0, (), mve_vpt),
23216 ToC("vpstt", fe318f4d, 0, (), mve_vpt),
23217 ToC("vpste", fe718f4d, 0, (), mve_vpt),
23218 ToC("vpsttt", fe314f4d, 0, (), mve_vpt),
23219 ToC("vpstte", fe31cf4d, 0, (), mve_vpt),
23220 ToC("vpstet", fe71cf4d, 0, (), mve_vpt),
23221 ToC("vpstee", fe714f4d, 0, (), mve_vpt),
23222 ToC("vpstttt", fe312f4d, 0, (), mve_vpt),
23223 ToC("vpsttte", fe316f4d, 0, (), mve_vpt),
23224 ToC("vpsttet", fe31ef4d, 0, (), mve_vpt),
23225 ToC("vpsttee", fe31af4d, 0, (), mve_vpt),
23226 ToC("vpstett", fe71af4d, 0, (), mve_vpt),
23227 ToC("vpstete", fe71ef4d, 0, (), mve_vpt),
23228 ToC("vpsteet", fe716f4d, 0, (), mve_vpt),
23229 ToC("vpsteee", fe712f4d, 0, (), mve_vpt),
23230
23231 /* MVE and MVE FP only. */
23232 mCEF(vmullb, _vmullb, 3, (RMQ, RMQ, RMQ), mve_vmull),
23233 mCEF(vabav, _vabav, 3, (RRnpcsp, RMQ, RMQ), mve_vabav),
23234 mCEF(vmladav, _vmladav, 3, (RRe, RMQ, RMQ), mve_vmladav),
23235 mCEF(vmladava, _vmladava, 3, (RRe, RMQ, RMQ), mve_vmladav),
23236 mCEF(vmladavx, _vmladavx, 3, (RRe, RMQ, RMQ), mve_vmladav),
23237 mCEF(vmladavax, _vmladavax, 3, (RRe, RMQ, RMQ), mve_vmladav),
23238 mCEF(vmlav, _vmladav, 3, (RRe, RMQ, RMQ), mve_vmladav),
23239 mCEF(vmlava, _vmladava, 3, (RRe, RMQ, RMQ), mve_vmladav),
23240 mCEF(vmlsdav, _vmlsdav, 3, (RRe, RMQ, RMQ), mve_vmladav),
23241 mCEF(vmlsdava, _vmlsdava, 3, (RRe, RMQ, RMQ), mve_vmladav),
23242 mCEF(vmlsdavx, _vmlsdavx, 3, (RRe, RMQ, RMQ), mve_vmladav),
23243 mCEF(vmlsdavax, _vmlsdavax, 3, (RRe, RMQ, RMQ), mve_vmladav),
23244
23245 mCEF(vst20, _vst20, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
23246 mCEF(vst21, _vst21, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
23247 mCEF(vst40, _vst40, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
23248 mCEF(vst41, _vst41, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
23249 mCEF(vst42, _vst42, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
23250 mCEF(vst43, _vst43, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
23251 mCEF(vld20, _vld20, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
23252 mCEF(vld21, _vld21, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
23253 mCEF(vld40, _vld40, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
23254 mCEF(vld41, _vld41, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
23255 mCEF(vld42, _vld42, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
23256 mCEF(vld43, _vld43, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
23257 mCEF(vstrb, _vstrb, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
23258 mCEF(vstrh, _vstrh, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
23259 mCEF(vstrw, _vstrw, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
23260 mCEF(vstrd, _vstrd, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
23261 mCEF(vldrb, _vldrb, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
23262 mCEF(vldrh, _vldrh, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
23263 mCEF(vldrw, _vldrw, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
23264 mCEF(vldrd, _vldrd, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
23265
23266 #undef ARM_VARIANT
23267 #define ARM_VARIANT & fpu_vfp_ext_v1xd
23268 #undef THUMB_VARIANT
23269 #define THUMB_VARIANT & arm_ext_v6t2
23270
23271 mCEF(vmullt, _vmullt, 3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ), mve_vmull),
23272 mnCEF(vadd, _vadd, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR), neon_addsub_if_i),
23273 mnCEF(vsub, _vsub, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR), neon_addsub_if_i),
23274
23275 MNCEF(vabs, 1b10300, 2, (RNSDQMQ, RNSDQMQ), neon_abs_neg),
23276 MNCEF(vneg, 1b10380, 2, (RNSDQMQ, RNSDQMQ), neon_abs_neg),
23277
23278 #undef ARM_VARIANT
23279 #define ARM_VARIANT & fpu_neon_ext_v1
23280 mnUF(vabd, _vabd, 3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
23281 mnUF(vabdl, _vabdl, 3, (RNQMQ, RNDMQ, RNDMQ), neon_dyadic_long),
23282 mnUF(vaddl, _vaddl, 3, (RNQMQ, RNDMQ, RNDMQR), neon_dyadic_long),
23283 mnUF(vsubl, _vsubl, 3, (RNQMQ, RNDMQ, RNDMQR), neon_dyadic_long),
23284 };
23285 #undef ARM_VARIANT
23286 #undef THUMB_VARIANT
23287 #undef TCE
23288 #undef TUE
23289 #undef TUF
23290 #undef TCC
23291 #undef cCE
23292 #undef cCL
23293 #undef C3E
23294 #undef C3
23295 #undef CE
23296 #undef CM
23297 #undef CL
23298 #undef UE
23299 #undef UF
23300 #undef UT
23301 #undef NUF
23302 #undef nUF
23303 #undef NCE
23304 #undef nCE
23305 #undef OPS0
23306 #undef OPS1
23307 #undef OPS2
23308 #undef OPS3
23309 #undef OPS4
23310 #undef OPS5
23311 #undef OPS6
23312 #undef do_0
23313 #undef ToC
23314 #undef toC
23315 #undef ToU
23316 #undef toU
23317 \f
23318 /* MD interface: bits in the object file. */
23319
23320 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
23321 for use in the a.out file, and stores them in the array pointed to by buf.
23322 This knows about the endian-ness of the target machine and does
23323 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
23324 2 (short) and 4 (long) Floating numbers are put out as a series of
23325 LITTLENUMS (shorts, here at least). */
23326
23327 void
23328 md_number_to_chars (char * buf, valueT val, int n)
23329 {
23330 if (target_big_endian)
23331 number_to_chars_bigendian (buf, val, n);
23332 else
23333 number_to_chars_littleendian (buf, val, n);
23334 }
23335
23336 static valueT
23337 md_chars_to_number (char * buf, int n)
23338 {
23339 valueT result = 0;
23340 unsigned char * where = (unsigned char *) buf;
23341
23342 if (target_big_endian)
23343 {
23344 while (n--)
23345 {
23346 result <<= 8;
23347 result |= (*where++ & 255);
23348 }
23349 }
23350 else
23351 {
23352 while (n--)
23353 {
23354 result <<= 8;
23355 result |= (where[n] & 255);
23356 }
23357 }
23358
23359 return result;
23360 }
23361
23362 /* MD interface: Sections. */
23363
23364 /* Calculate the maximum variable size (i.e., excluding fr_fix)
23365 that an rs_machine_dependent frag may reach. */
23366
23367 unsigned int
23368 arm_frag_max_var (fragS *fragp)
23369 {
23370 /* We only use rs_machine_dependent for variable-size Thumb instructions,
23371 which are either THUMB_SIZE (2) or INSN_SIZE (4).
23372
23373 Note that we generate relaxable instructions even for cases that don't
23374 really need it, like an immediate that's a trivial constant. So we're
23375 overestimating the instruction size for some of those cases. Rather
23376 than putting more intelligence here, it would probably be better to
23377 avoid generating a relaxation frag in the first place when it can be
23378 determined up front that a short instruction will suffice. */
23379
23380 gas_assert (fragp->fr_type == rs_machine_dependent);
23381 return INSN_SIZE;
23382 }
23383
23384 /* Estimate the size of a frag before relaxing. Assume everything fits in
23385 2 bytes. */
23386
23387 int
23388 md_estimate_size_before_relax (fragS * fragp,
23389 segT segtype ATTRIBUTE_UNUSED)
23390 {
23391 fragp->fr_var = 2;
23392 return 2;
23393 }
23394
23395 /* Convert a machine dependent frag. */
23396
23397 void
23398 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
23399 {
23400 unsigned long insn;
23401 unsigned long old_op;
23402 char *buf;
23403 expressionS exp;
23404 fixS *fixp;
23405 int reloc_type;
23406 int pc_rel;
23407 int opcode;
23408
23409 buf = fragp->fr_literal + fragp->fr_fix;
23410
23411 old_op = bfd_get_16(abfd, buf);
23412 if (fragp->fr_symbol)
23413 {
23414 exp.X_op = O_symbol;
23415 exp.X_add_symbol = fragp->fr_symbol;
23416 }
23417 else
23418 {
23419 exp.X_op = O_constant;
23420 }
23421 exp.X_add_number = fragp->fr_offset;
23422 opcode = fragp->fr_subtype;
23423 switch (opcode)
23424 {
23425 case T_MNEM_ldr_pc:
23426 case T_MNEM_ldr_pc2:
23427 case T_MNEM_ldr_sp:
23428 case T_MNEM_str_sp:
23429 case T_MNEM_ldr:
23430 case T_MNEM_ldrb:
23431 case T_MNEM_ldrh:
23432 case T_MNEM_str:
23433 case T_MNEM_strb:
23434 case T_MNEM_strh:
23435 if (fragp->fr_var == 4)
23436 {
23437 insn = THUMB_OP32 (opcode);
23438 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
23439 {
23440 insn |= (old_op & 0x700) << 4;
23441 }
23442 else
23443 {
23444 insn |= (old_op & 7) << 12;
23445 insn |= (old_op & 0x38) << 13;
23446 }
23447 insn |= 0x00000c00;
23448 put_thumb32_insn (buf, insn);
23449 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
23450 }
23451 else
23452 {
23453 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
23454 }
23455 pc_rel = (opcode == T_MNEM_ldr_pc2);
23456 break;
23457 case T_MNEM_adr:
23458 if (fragp->fr_var == 4)
23459 {
23460 insn = THUMB_OP32 (opcode);
23461 insn |= (old_op & 0xf0) << 4;
23462 put_thumb32_insn (buf, insn);
23463 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
23464 }
23465 else
23466 {
23467 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
23468 exp.X_add_number -= 4;
23469 }
23470 pc_rel = 1;
23471 break;
23472 case T_MNEM_mov:
23473 case T_MNEM_movs:
23474 case T_MNEM_cmp:
23475 case T_MNEM_cmn:
23476 if (fragp->fr_var == 4)
23477 {
23478 int r0off = (opcode == T_MNEM_mov
23479 || opcode == T_MNEM_movs) ? 0 : 8;
23480 insn = THUMB_OP32 (opcode);
23481 insn = (insn & 0xe1ffffff) | 0x10000000;
23482 insn |= (old_op & 0x700) << r0off;
23483 put_thumb32_insn (buf, insn);
23484 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
23485 }
23486 else
23487 {
23488 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
23489 }
23490 pc_rel = 0;
23491 break;
23492 case T_MNEM_b:
23493 if (fragp->fr_var == 4)
23494 {
23495 insn = THUMB_OP32(opcode);
23496 put_thumb32_insn (buf, insn);
23497 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
23498 }
23499 else
23500 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
23501 pc_rel = 1;
23502 break;
23503 case T_MNEM_bcond:
23504 if (fragp->fr_var == 4)
23505 {
23506 insn = THUMB_OP32(opcode);
23507 insn |= (old_op & 0xf00) << 14;
23508 put_thumb32_insn (buf, insn);
23509 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
23510 }
23511 else
23512 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
23513 pc_rel = 1;
23514 break;
23515 case T_MNEM_add_sp:
23516 case T_MNEM_add_pc:
23517 case T_MNEM_inc_sp:
23518 case T_MNEM_dec_sp:
23519 if (fragp->fr_var == 4)
23520 {
23521 /* ??? Choose between add and addw. */
23522 insn = THUMB_OP32 (opcode);
23523 insn |= (old_op & 0xf0) << 4;
23524 put_thumb32_insn (buf, insn);
23525 if (opcode == T_MNEM_add_pc)
23526 reloc_type = BFD_RELOC_ARM_T32_IMM12;
23527 else
23528 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
23529 }
23530 else
23531 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
23532 pc_rel = 0;
23533 break;
23534
23535 case T_MNEM_addi:
23536 case T_MNEM_addis:
23537 case T_MNEM_subi:
23538 case T_MNEM_subis:
23539 if (fragp->fr_var == 4)
23540 {
23541 insn = THUMB_OP32 (opcode);
23542 insn |= (old_op & 0xf0) << 4;
23543 insn |= (old_op & 0xf) << 16;
23544 put_thumb32_insn (buf, insn);
23545 if (insn & (1 << 20))
23546 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
23547 else
23548 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
23549 }
23550 else
23551 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
23552 pc_rel = 0;
23553 break;
23554 default:
23555 abort ();
23556 }
23557 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
23558 (enum bfd_reloc_code_real) reloc_type);
23559 fixp->fx_file = fragp->fr_file;
23560 fixp->fx_line = fragp->fr_line;
23561 fragp->fr_fix += fragp->fr_var;
23562
23563 /* Set whether we use thumb-2 ISA based on final relaxation results. */
23564 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
23565 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
23566 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
23567 }
23568
23569 /* Return the size of a relaxable immediate operand instruction.
23570 SHIFT and SIZE specify the form of the allowable immediate. */
23571 static int
23572 relax_immediate (fragS *fragp, int size, int shift)
23573 {
23574 offsetT offset;
23575 offsetT mask;
23576 offsetT low;
23577
23578 /* ??? Should be able to do better than this. */
23579 if (fragp->fr_symbol)
23580 return 4;
23581
23582 low = (1 << shift) - 1;
23583 mask = (1 << (shift + size)) - (1 << shift);
23584 offset = fragp->fr_offset;
23585 /* Force misaligned offsets to 32-bit variant. */
23586 if (offset & low)
23587 return 4;
23588 if (offset & ~mask)
23589 return 4;
23590 return 2;
23591 }
23592
23593 /* Get the address of a symbol during relaxation. */
23594 static addressT
23595 relaxed_symbol_addr (fragS *fragp, long stretch)
23596 {
23597 fragS *sym_frag;
23598 addressT addr;
23599 symbolS *sym;
23600
23601 sym = fragp->fr_symbol;
23602 sym_frag = symbol_get_frag (sym);
23603 know (S_GET_SEGMENT (sym) != absolute_section
23604 || sym_frag == &zero_address_frag);
23605 addr = S_GET_VALUE (sym) + fragp->fr_offset;
23606
23607 /* If frag has yet to be reached on this pass, assume it will
23608 move by STRETCH just as we did. If this is not so, it will
23609 be because some frag between grows, and that will force
23610 another pass. */
23611
23612 if (stretch != 0
23613 && sym_frag->relax_marker != fragp->relax_marker)
23614 {
23615 fragS *f;
23616
23617 /* Adjust stretch for any alignment frag. Note that if have
23618 been expanding the earlier code, the symbol may be
23619 defined in what appears to be an earlier frag. FIXME:
23620 This doesn't handle the fr_subtype field, which specifies
23621 a maximum number of bytes to skip when doing an
23622 alignment. */
23623 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
23624 {
23625 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
23626 {
23627 if (stretch < 0)
23628 stretch = - ((- stretch)
23629 & ~ ((1 << (int) f->fr_offset) - 1));
23630 else
23631 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
23632 if (stretch == 0)
23633 break;
23634 }
23635 }
23636 if (f != NULL)
23637 addr += stretch;
23638 }
23639
23640 return addr;
23641 }
23642
23643 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
23644 load. */
23645 static int
23646 relax_adr (fragS *fragp, asection *sec, long stretch)
23647 {
23648 addressT addr;
23649 offsetT val;
23650
23651 /* Assume worst case for symbols not known to be in the same section. */
23652 if (fragp->fr_symbol == NULL
23653 || !S_IS_DEFINED (fragp->fr_symbol)
23654 || sec != S_GET_SEGMENT (fragp->fr_symbol)
23655 || S_IS_WEAK (fragp->fr_symbol))
23656 return 4;
23657
23658 val = relaxed_symbol_addr (fragp, stretch);
23659 addr = fragp->fr_address + fragp->fr_fix;
23660 addr = (addr + 4) & ~3;
23661 /* Force misaligned targets to 32-bit variant. */
23662 if (val & 3)
23663 return 4;
23664 val -= addr;
23665 if (val < 0 || val > 1020)
23666 return 4;
23667 return 2;
23668 }
23669
23670 /* Return the size of a relaxable add/sub immediate instruction. */
23671 static int
23672 relax_addsub (fragS *fragp, asection *sec)
23673 {
23674 char *buf;
23675 int op;
23676
23677 buf = fragp->fr_literal + fragp->fr_fix;
23678 op = bfd_get_16(sec->owner, buf);
23679 if ((op & 0xf) == ((op >> 4) & 0xf))
23680 return relax_immediate (fragp, 8, 0);
23681 else
23682 return relax_immediate (fragp, 3, 0);
23683 }
23684
23685 /* Return TRUE iff the definition of symbol S could be pre-empted
23686 (overridden) at link or load time. */
23687 static bfd_boolean
23688 symbol_preemptible (symbolS *s)
23689 {
23690 /* Weak symbols can always be pre-empted. */
23691 if (S_IS_WEAK (s))
23692 return TRUE;
23693
23694 /* Non-global symbols cannot be pre-empted. */
23695 if (! S_IS_EXTERNAL (s))
23696 return FALSE;
23697
23698 #ifdef OBJ_ELF
23699 /* In ELF, a global symbol can be marked protected, or private. In that
23700 case it can't be pre-empted (other definitions in the same link unit
23701 would violate the ODR). */
23702 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
23703 return FALSE;
23704 #endif
23705
23706 /* Other global symbols might be pre-empted. */
23707 return TRUE;
23708 }
23709
23710 /* Return the size of a relaxable branch instruction. BITS is the
23711 size of the offset field in the narrow instruction. */
23712
23713 static int
23714 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
23715 {
23716 addressT addr;
23717 offsetT val;
23718 offsetT limit;
23719
23720 /* Assume worst case for symbols not known to be in the same section. */
23721 if (!S_IS_DEFINED (fragp->fr_symbol)
23722 || sec != S_GET_SEGMENT (fragp->fr_symbol)
23723 || S_IS_WEAK (fragp->fr_symbol))
23724 return 4;
23725
23726 #ifdef OBJ_ELF
23727 /* A branch to a function in ARM state will require interworking. */
23728 if (S_IS_DEFINED (fragp->fr_symbol)
23729 && ARM_IS_FUNC (fragp->fr_symbol))
23730 return 4;
23731 #endif
23732
23733 if (symbol_preemptible (fragp->fr_symbol))
23734 return 4;
23735
23736 val = relaxed_symbol_addr (fragp, stretch);
23737 addr = fragp->fr_address + fragp->fr_fix + 4;
23738 val -= addr;
23739
23740 /* Offset is a signed value *2 */
23741 limit = 1 << bits;
23742 if (val >= limit || val < -limit)
23743 return 4;
23744 return 2;
23745 }
23746
23747
23748 /* Relax a machine dependent frag. This returns the amount by which
23749 the current size of the frag should change. */
23750
23751 int
23752 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
23753 {
23754 int oldsize;
23755 int newsize;
23756
23757 oldsize = fragp->fr_var;
23758 switch (fragp->fr_subtype)
23759 {
23760 case T_MNEM_ldr_pc2:
23761 newsize = relax_adr (fragp, sec, stretch);
23762 break;
23763 case T_MNEM_ldr_pc:
23764 case T_MNEM_ldr_sp:
23765 case T_MNEM_str_sp:
23766 newsize = relax_immediate (fragp, 8, 2);
23767 break;
23768 case T_MNEM_ldr:
23769 case T_MNEM_str:
23770 newsize = relax_immediate (fragp, 5, 2);
23771 break;
23772 case T_MNEM_ldrh:
23773 case T_MNEM_strh:
23774 newsize = relax_immediate (fragp, 5, 1);
23775 break;
23776 case T_MNEM_ldrb:
23777 case T_MNEM_strb:
23778 newsize = relax_immediate (fragp, 5, 0);
23779 break;
23780 case T_MNEM_adr:
23781 newsize = relax_adr (fragp, sec, stretch);
23782 break;
23783 case T_MNEM_mov:
23784 case T_MNEM_movs:
23785 case T_MNEM_cmp:
23786 case T_MNEM_cmn:
23787 newsize = relax_immediate (fragp, 8, 0);
23788 break;
23789 case T_MNEM_b:
23790 newsize = relax_branch (fragp, sec, 11, stretch);
23791 break;
23792 case T_MNEM_bcond:
23793 newsize = relax_branch (fragp, sec, 8, stretch);
23794 break;
23795 case T_MNEM_add_sp:
23796 case T_MNEM_add_pc:
23797 newsize = relax_immediate (fragp, 8, 2);
23798 break;
23799 case T_MNEM_inc_sp:
23800 case T_MNEM_dec_sp:
23801 newsize = relax_immediate (fragp, 7, 2);
23802 break;
23803 case T_MNEM_addi:
23804 case T_MNEM_addis:
23805 case T_MNEM_subi:
23806 case T_MNEM_subis:
23807 newsize = relax_addsub (fragp, sec);
23808 break;
23809 default:
23810 abort ();
23811 }
23812
23813 fragp->fr_var = newsize;
23814 /* Freeze wide instructions that are at or before the same location as
23815 in the previous pass. This avoids infinite loops.
23816 Don't freeze them unconditionally because targets may be artificially
23817 misaligned by the expansion of preceding frags. */
23818 if (stretch <= 0 && newsize > 2)
23819 {
23820 md_convert_frag (sec->owner, sec, fragp);
23821 frag_wane (fragp);
23822 }
23823
23824 return newsize - oldsize;
23825 }
23826
23827 /* Round up a section size to the appropriate boundary. */
23828
23829 valueT
23830 md_section_align (segT segment ATTRIBUTE_UNUSED,
23831 valueT size)
23832 {
23833 return size;
23834 }
23835
23836 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
23837 of an rs_align_code fragment. */
23838
23839 void
23840 arm_handle_align (fragS * fragP)
23841 {
23842 static unsigned char const arm_noop[2][2][4] =
23843 {
23844 { /* ARMv1 */
23845 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
23846 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
23847 },
23848 { /* ARMv6k */
23849 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
23850 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
23851 },
23852 };
23853 static unsigned char const thumb_noop[2][2][2] =
23854 {
23855 { /* Thumb-1 */
23856 {0xc0, 0x46}, /* LE */
23857 {0x46, 0xc0}, /* BE */
23858 },
23859 { /* Thumb-2 */
23860 {0x00, 0xbf}, /* LE */
23861 {0xbf, 0x00} /* BE */
23862 }
23863 };
23864 static unsigned char const wide_thumb_noop[2][4] =
23865 { /* Wide Thumb-2 */
23866 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
23867 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
23868 };
23869
23870 unsigned bytes, fix, noop_size;
23871 char * p;
23872 const unsigned char * noop;
23873 const unsigned char *narrow_noop = NULL;
23874 #ifdef OBJ_ELF
23875 enum mstate state;
23876 #endif
23877
23878 if (fragP->fr_type != rs_align_code)
23879 return;
23880
23881 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
23882 p = fragP->fr_literal + fragP->fr_fix;
23883 fix = 0;
23884
23885 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
23886 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
23887
23888 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
23889
23890 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
23891 {
23892 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
23893 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
23894 {
23895 narrow_noop = thumb_noop[1][target_big_endian];
23896 noop = wide_thumb_noop[target_big_endian];
23897 }
23898 else
23899 noop = thumb_noop[0][target_big_endian];
23900 noop_size = 2;
23901 #ifdef OBJ_ELF
23902 state = MAP_THUMB;
23903 #endif
23904 }
23905 else
23906 {
23907 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
23908 ? selected_cpu : arm_arch_none,
23909 arm_ext_v6k) != 0]
23910 [target_big_endian];
23911 noop_size = 4;
23912 #ifdef OBJ_ELF
23913 state = MAP_ARM;
23914 #endif
23915 }
23916
23917 fragP->fr_var = noop_size;
23918
23919 if (bytes & (noop_size - 1))
23920 {
23921 fix = bytes & (noop_size - 1);
23922 #ifdef OBJ_ELF
23923 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
23924 #endif
23925 memset (p, 0, fix);
23926 p += fix;
23927 bytes -= fix;
23928 }
23929
23930 if (narrow_noop)
23931 {
23932 if (bytes & noop_size)
23933 {
23934 /* Insert a narrow noop. */
23935 memcpy (p, narrow_noop, noop_size);
23936 p += noop_size;
23937 bytes -= noop_size;
23938 fix += noop_size;
23939 }
23940
23941 /* Use wide noops for the remainder */
23942 noop_size = 4;
23943 }
23944
23945 while (bytes >= noop_size)
23946 {
23947 memcpy (p, noop, noop_size);
23948 p += noop_size;
23949 bytes -= noop_size;
23950 fix += noop_size;
23951 }
23952
23953 fragP->fr_fix += fix;
23954 }
23955
23956 /* Called from md_do_align. Used to create an alignment
23957 frag in a code section. */
23958
23959 void
23960 arm_frag_align_code (int n, int max)
23961 {
23962 char * p;
23963
23964 /* We assume that there will never be a requirement
23965 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
23966 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
23967 {
23968 char err_msg[128];
23969
23970 sprintf (err_msg,
23971 _("alignments greater than %d bytes not supported in .text sections."),
23972 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
23973 as_fatal ("%s", err_msg);
23974 }
23975
23976 p = frag_var (rs_align_code,
23977 MAX_MEM_FOR_RS_ALIGN_CODE,
23978 1,
23979 (relax_substateT) max,
23980 (symbolS *) NULL,
23981 (offsetT) n,
23982 (char *) NULL);
23983 *p = 0;
23984 }
23985
23986 /* Perform target specific initialisation of a frag.
23987 Note - despite the name this initialisation is not done when the frag
23988 is created, but only when its type is assigned. A frag can be created
23989 and used a long time before its type is set, so beware of assuming that
23990 this initialisation is performed first. */
23991
23992 #ifndef OBJ_ELF
23993 void
23994 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
23995 {
23996 /* Record whether this frag is in an ARM or a THUMB area. */
23997 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
23998 }
23999
24000 #else /* OBJ_ELF is defined. */
24001 void
24002 arm_init_frag (fragS * fragP, int max_chars)
24003 {
24004 bfd_boolean frag_thumb_mode;
24005
24006 /* If the current ARM vs THUMB mode has not already
24007 been recorded into this frag then do so now. */
24008 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
24009 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
24010
24011 /* PR 21809: Do not set a mapping state for debug sections
24012 - it just confuses other tools. */
24013 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
24014 return;
24015
24016 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
24017
24018 /* Record a mapping symbol for alignment frags. We will delete this
24019 later if the alignment ends up empty. */
24020 switch (fragP->fr_type)
24021 {
24022 case rs_align:
24023 case rs_align_test:
24024 case rs_fill:
24025 mapping_state_2 (MAP_DATA, max_chars);
24026 break;
24027 case rs_align_code:
24028 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
24029 break;
24030 default:
24031 break;
24032 }
24033 }
24034
24035 /* When we change sections we need to issue a new mapping symbol. */
24036
24037 void
24038 arm_elf_change_section (void)
24039 {
24040 /* Link an unlinked unwind index table section to the .text section. */
24041 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
24042 && elf_linked_to_section (now_seg) == NULL)
24043 elf_linked_to_section (now_seg) = text_section;
24044 }
24045
24046 int
24047 arm_elf_section_type (const char * str, size_t len)
24048 {
24049 if (len == 5 && strncmp (str, "exidx", 5) == 0)
24050 return SHT_ARM_EXIDX;
24051
24052 return -1;
24053 }
24054 \f
24055 /* Code to deal with unwinding tables. */
24056
24057 static void add_unwind_adjustsp (offsetT);
24058
24059 /* Generate any deferred unwind frame offset. */
24060
24061 static void
24062 flush_pending_unwind (void)
24063 {
24064 offsetT offset;
24065
24066 offset = unwind.pending_offset;
24067 unwind.pending_offset = 0;
24068 if (offset != 0)
24069 add_unwind_adjustsp (offset);
24070 }
24071
24072 /* Add an opcode to this list for this function. Two-byte opcodes should
24073 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
24074 order. */
24075
24076 static void
24077 add_unwind_opcode (valueT op, int length)
24078 {
24079 /* Add any deferred stack adjustment. */
24080 if (unwind.pending_offset)
24081 flush_pending_unwind ();
24082
24083 unwind.sp_restored = 0;
24084
24085 if (unwind.opcode_count + length > unwind.opcode_alloc)
24086 {
24087 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
24088 if (unwind.opcodes)
24089 unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
24090 unwind.opcode_alloc);
24091 else
24092 unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
24093 }
24094 while (length > 0)
24095 {
24096 length--;
24097 unwind.opcodes[unwind.opcode_count] = op & 0xff;
24098 op >>= 8;
24099 unwind.opcode_count++;
24100 }
24101 }
24102
24103 /* Add unwind opcodes to adjust the stack pointer. */
24104
24105 static void
24106 add_unwind_adjustsp (offsetT offset)
24107 {
24108 valueT op;
24109
24110 if (offset > 0x200)
24111 {
24112 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
24113 char bytes[5];
24114 int n;
24115 valueT o;
24116
24117 /* Long form: 0xb2, uleb128. */
24118 /* This might not fit in a word so add the individual bytes,
24119 remembering the list is built in reverse order. */
24120 o = (valueT) ((offset - 0x204) >> 2);
24121 if (o == 0)
24122 add_unwind_opcode (0, 1);
24123
24124 /* Calculate the uleb128 encoding of the offset. */
24125 n = 0;
24126 while (o)
24127 {
24128 bytes[n] = o & 0x7f;
24129 o >>= 7;
24130 if (o)
24131 bytes[n] |= 0x80;
24132 n++;
24133 }
24134 /* Add the insn. */
24135 for (; n; n--)
24136 add_unwind_opcode (bytes[n - 1], 1);
24137 add_unwind_opcode (0xb2, 1);
24138 }
24139 else if (offset > 0x100)
24140 {
24141 /* Two short opcodes. */
24142 add_unwind_opcode (0x3f, 1);
24143 op = (offset - 0x104) >> 2;
24144 add_unwind_opcode (op, 1);
24145 }
24146 else if (offset > 0)
24147 {
24148 /* Short opcode. */
24149 op = (offset - 4) >> 2;
24150 add_unwind_opcode (op, 1);
24151 }
24152 else if (offset < 0)
24153 {
24154 offset = -offset;
24155 while (offset > 0x100)
24156 {
24157 add_unwind_opcode (0x7f, 1);
24158 offset -= 0x100;
24159 }
24160 op = ((offset - 4) >> 2) | 0x40;
24161 add_unwind_opcode (op, 1);
24162 }
24163 }
24164
24165 /* Finish the list of unwind opcodes for this function. */
24166
24167 static void
24168 finish_unwind_opcodes (void)
24169 {
24170 valueT op;
24171
24172 if (unwind.fp_used)
24173 {
24174 /* Adjust sp as necessary. */
24175 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
24176 flush_pending_unwind ();
24177
24178 /* After restoring sp from the frame pointer. */
24179 op = 0x90 | unwind.fp_reg;
24180 add_unwind_opcode (op, 1);
24181 }
24182 else
24183 flush_pending_unwind ();
24184 }
24185
24186
24187 /* Start an exception table entry. If idx is nonzero this is an index table
24188 entry. */
24189
24190 static void
24191 start_unwind_section (const segT text_seg, int idx)
24192 {
24193 const char * text_name;
24194 const char * prefix;
24195 const char * prefix_once;
24196 const char * group_name;
24197 char * sec_name;
24198 int type;
24199 int flags;
24200 int linkonce;
24201
24202 if (idx)
24203 {
24204 prefix = ELF_STRING_ARM_unwind;
24205 prefix_once = ELF_STRING_ARM_unwind_once;
24206 type = SHT_ARM_EXIDX;
24207 }
24208 else
24209 {
24210 prefix = ELF_STRING_ARM_unwind_info;
24211 prefix_once = ELF_STRING_ARM_unwind_info_once;
24212 type = SHT_PROGBITS;
24213 }
24214
24215 text_name = segment_name (text_seg);
24216 if (streq (text_name, ".text"))
24217 text_name = "";
24218
24219 if (strncmp (text_name, ".gnu.linkonce.t.",
24220 strlen (".gnu.linkonce.t.")) == 0)
24221 {
24222 prefix = prefix_once;
24223 text_name += strlen (".gnu.linkonce.t.");
24224 }
24225
24226 sec_name = concat (prefix, text_name, (char *) NULL);
24227
24228 flags = SHF_ALLOC;
24229 linkonce = 0;
24230 group_name = 0;
24231
24232 /* Handle COMDAT group. */
24233 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
24234 {
24235 group_name = elf_group_name (text_seg);
24236 if (group_name == NULL)
24237 {
24238 as_bad (_("Group section `%s' has no group signature"),
24239 segment_name (text_seg));
24240 ignore_rest_of_line ();
24241 return;
24242 }
24243 flags |= SHF_GROUP;
24244 linkonce = 1;
24245 }
24246
24247 obj_elf_change_section (sec_name, type, 0, flags, 0, group_name,
24248 linkonce, 0);
24249
24250 /* Set the section link for index tables. */
24251 if (idx)
24252 elf_linked_to_section (now_seg) = text_seg;
24253 }
24254
24255
24256 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
24257 personality routine data. Returns zero, or the index table value for
24258 an inline entry. */
24259
24260 static valueT
24261 create_unwind_entry (int have_data)
24262 {
24263 int size;
24264 addressT where;
24265 char *ptr;
24266 /* The current word of data. */
24267 valueT data;
24268 /* The number of bytes left in this word. */
24269 int n;
24270
24271 finish_unwind_opcodes ();
24272
24273 /* Remember the current text section. */
24274 unwind.saved_seg = now_seg;
24275 unwind.saved_subseg = now_subseg;
24276
24277 start_unwind_section (now_seg, 0);
24278
24279 if (unwind.personality_routine == NULL)
24280 {
24281 if (unwind.personality_index == -2)
24282 {
24283 if (have_data)
24284 as_bad (_("handlerdata in cantunwind frame"));
24285 return 1; /* EXIDX_CANTUNWIND. */
24286 }
24287
24288 /* Use a default personality routine if none is specified. */
24289 if (unwind.personality_index == -1)
24290 {
24291 if (unwind.opcode_count > 3)
24292 unwind.personality_index = 1;
24293 else
24294 unwind.personality_index = 0;
24295 }
24296
24297 /* Space for the personality routine entry. */
24298 if (unwind.personality_index == 0)
24299 {
24300 if (unwind.opcode_count > 3)
24301 as_bad (_("too many unwind opcodes for personality routine 0"));
24302
24303 if (!have_data)
24304 {
24305 /* All the data is inline in the index table. */
24306 data = 0x80;
24307 n = 3;
24308 while (unwind.opcode_count > 0)
24309 {
24310 unwind.opcode_count--;
24311 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
24312 n--;
24313 }
24314
24315 /* Pad with "finish" opcodes. */
24316 while (n--)
24317 data = (data << 8) | 0xb0;
24318
24319 return data;
24320 }
24321 size = 0;
24322 }
24323 else
24324 /* We get two opcodes "free" in the first word. */
24325 size = unwind.opcode_count - 2;
24326 }
24327 else
24328 {
24329 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
24330 if (unwind.personality_index != -1)
24331 {
24332 as_bad (_("attempt to recreate an unwind entry"));
24333 return 1;
24334 }
24335
24336 /* An extra byte is required for the opcode count. */
24337 size = unwind.opcode_count + 1;
24338 }
24339
24340 size = (size + 3) >> 2;
24341 if (size > 0xff)
24342 as_bad (_("too many unwind opcodes"));
24343
24344 frag_align (2, 0, 0);
24345 record_alignment (now_seg, 2);
24346 unwind.table_entry = expr_build_dot ();
24347
24348 /* Allocate the table entry. */
24349 ptr = frag_more ((size << 2) + 4);
24350 /* PR 13449: Zero the table entries in case some of them are not used. */
24351 memset (ptr, 0, (size << 2) + 4);
24352 where = frag_now_fix () - ((size << 2) + 4);
24353
24354 switch (unwind.personality_index)
24355 {
24356 case -1:
24357 /* ??? Should this be a PLT generating relocation? */
24358 /* Custom personality routine. */
24359 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
24360 BFD_RELOC_ARM_PREL31);
24361
24362 where += 4;
24363 ptr += 4;
24364
24365 /* Set the first byte to the number of additional words. */
24366 data = size > 0 ? size - 1 : 0;
24367 n = 3;
24368 break;
24369
24370 /* ABI defined personality routines. */
24371 case 0:
24372 /* Three opcodes bytes are packed into the first word. */
24373 data = 0x80;
24374 n = 3;
24375 break;
24376
24377 case 1:
24378 case 2:
24379 /* The size and first two opcode bytes go in the first word. */
24380 data = ((0x80 + unwind.personality_index) << 8) | size;
24381 n = 2;
24382 break;
24383
24384 default:
24385 /* Should never happen. */
24386 abort ();
24387 }
24388
24389 /* Pack the opcodes into words (MSB first), reversing the list at the same
24390 time. */
24391 while (unwind.opcode_count > 0)
24392 {
24393 if (n == 0)
24394 {
24395 md_number_to_chars (ptr, data, 4);
24396 ptr += 4;
24397 n = 4;
24398 data = 0;
24399 }
24400 unwind.opcode_count--;
24401 n--;
24402 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
24403 }
24404
24405 /* Finish off the last word. */
24406 if (n < 4)
24407 {
24408 /* Pad with "finish" opcodes. */
24409 while (n--)
24410 data = (data << 8) | 0xb0;
24411
24412 md_number_to_chars (ptr, data, 4);
24413 }
24414
24415 if (!have_data)
24416 {
24417 /* Add an empty descriptor if there is no user-specified data. */
24418 ptr = frag_more (4);
24419 md_number_to_chars (ptr, 0, 4);
24420 }
24421
24422 return 0;
24423 }
24424
24425
24426 /* Initialize the DWARF-2 unwind information for this procedure. */
24427
24428 void
24429 tc_arm_frame_initial_instructions (void)
24430 {
24431 cfi_add_CFA_def_cfa (REG_SP, 0);
24432 }
24433 #endif /* OBJ_ELF */
24434
24435 /* Convert REGNAME to a DWARF-2 register number. */
24436
24437 int
24438 tc_arm_regname_to_dw2regnum (char *regname)
24439 {
24440 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
24441 if (reg != FAIL)
24442 return reg;
24443
24444 /* PR 16694: Allow VFP registers as well. */
24445 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
24446 if (reg != FAIL)
24447 return 64 + reg;
24448
24449 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
24450 if (reg != FAIL)
24451 return reg + 256;
24452
24453 return FAIL;
24454 }
24455
24456 #ifdef TE_PE
24457 void
24458 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
24459 {
24460 expressionS exp;
24461
24462 exp.X_op = O_secrel;
24463 exp.X_add_symbol = symbol;
24464 exp.X_add_number = 0;
24465 emit_expr (&exp, size);
24466 }
24467 #endif
24468
24469 /* MD interface: Symbol and relocation handling. */
24470
24471 /* Return the address within the segment that a PC-relative fixup is
24472 relative to. For ARM, PC-relative fixups applied to instructions
24473 are generally relative to the location of the fixup plus 8 bytes.
24474 Thumb branches are offset by 4, and Thumb loads relative to PC
24475 require special handling. */
24476
24477 long
24478 md_pcrel_from_section (fixS * fixP, segT seg)
24479 {
24480 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
24481
24482 /* If this is pc-relative and we are going to emit a relocation
24483 then we just want to put out any pipeline compensation that the linker
24484 will need. Otherwise we want to use the calculated base.
24485 For WinCE we skip the bias for externals as well, since this
24486 is how the MS ARM-CE assembler behaves and we want to be compatible. */
24487 if (fixP->fx_pcrel
24488 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
24489 || (arm_force_relocation (fixP)
24490 #ifdef TE_WINCE
24491 && !S_IS_EXTERNAL (fixP->fx_addsy)
24492 #endif
24493 )))
24494 base = 0;
24495
24496
24497 switch (fixP->fx_r_type)
24498 {
24499 /* PC relative addressing on the Thumb is slightly odd as the
24500 bottom two bits of the PC are forced to zero for the
24501 calculation. This happens *after* application of the
24502 pipeline offset. However, Thumb adrl already adjusts for
24503 this, so we need not do it again. */
24504 case BFD_RELOC_ARM_THUMB_ADD:
24505 return base & ~3;
24506
24507 case BFD_RELOC_ARM_THUMB_OFFSET:
24508 case BFD_RELOC_ARM_T32_OFFSET_IMM:
24509 case BFD_RELOC_ARM_T32_ADD_PC12:
24510 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
24511 return (base + 4) & ~3;
24512
24513 /* Thumb branches are simply offset by +4. */
24514 case BFD_RELOC_THUMB_PCREL_BRANCH5:
24515 case BFD_RELOC_THUMB_PCREL_BRANCH7:
24516 case BFD_RELOC_THUMB_PCREL_BRANCH9:
24517 case BFD_RELOC_THUMB_PCREL_BRANCH12:
24518 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24519 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24520 case BFD_RELOC_THUMB_PCREL_BFCSEL:
24521 case BFD_RELOC_ARM_THUMB_BF17:
24522 case BFD_RELOC_ARM_THUMB_BF19:
24523 case BFD_RELOC_ARM_THUMB_BF13:
24524 case BFD_RELOC_ARM_THUMB_LOOP12:
24525 return base + 4;
24526
24527 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24528 if (fixP->fx_addsy
24529 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24530 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24531 && ARM_IS_FUNC (fixP->fx_addsy)
24532 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
24533 base = fixP->fx_where + fixP->fx_frag->fr_address;
24534 return base + 4;
24535
24536 /* BLX is like branches above, but forces the low two bits of PC to
24537 zero. */
24538 case BFD_RELOC_THUMB_PCREL_BLX:
24539 if (fixP->fx_addsy
24540 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24541 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24542 && THUMB_IS_FUNC (fixP->fx_addsy)
24543 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
24544 base = fixP->fx_where + fixP->fx_frag->fr_address;
24545 return (base + 4) & ~3;
24546
24547 /* ARM mode branches are offset by +8. However, the Windows CE
24548 loader expects the relocation not to take this into account. */
24549 case BFD_RELOC_ARM_PCREL_BLX:
24550 if (fixP->fx_addsy
24551 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24552 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24553 && ARM_IS_FUNC (fixP->fx_addsy)
24554 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
24555 base = fixP->fx_where + fixP->fx_frag->fr_address;
24556 return base + 8;
24557
24558 case BFD_RELOC_ARM_PCREL_CALL:
24559 if (fixP->fx_addsy
24560 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24561 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24562 && THUMB_IS_FUNC (fixP->fx_addsy)
24563 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
24564 base = fixP->fx_where + fixP->fx_frag->fr_address;
24565 return base + 8;
24566
24567 case BFD_RELOC_ARM_PCREL_BRANCH:
24568 case BFD_RELOC_ARM_PCREL_JUMP:
24569 case BFD_RELOC_ARM_PLT32:
24570 #ifdef TE_WINCE
24571 /* When handling fixups immediately, because we have already
24572 discovered the value of a symbol, or the address of the frag involved
24573 we must account for the offset by +8, as the OS loader will never see the reloc.
24574 see fixup_segment() in write.c
24575 The S_IS_EXTERNAL test handles the case of global symbols.
24576 Those need the calculated base, not just the pipe compensation the linker will need. */
24577 if (fixP->fx_pcrel
24578 && fixP->fx_addsy != NULL
24579 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24580 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
24581 return base + 8;
24582 return base;
24583 #else
24584 return base + 8;
24585 #endif
24586
24587
24588 /* ARM mode loads relative to PC are also offset by +8. Unlike
24589 branches, the Windows CE loader *does* expect the relocation
24590 to take this into account. */
24591 case BFD_RELOC_ARM_OFFSET_IMM:
24592 case BFD_RELOC_ARM_OFFSET_IMM8:
24593 case BFD_RELOC_ARM_HWLITERAL:
24594 case BFD_RELOC_ARM_LITERAL:
24595 case BFD_RELOC_ARM_CP_OFF_IMM:
24596 return base + 8;
24597
24598
24599 /* Other PC-relative relocations are un-offset. */
24600 default:
24601 return base;
24602 }
24603 }
24604
24605 static bfd_boolean flag_warn_syms = TRUE;
24606
24607 bfd_boolean
24608 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
24609 {
24610 /* PR 18347 - Warn if the user attempts to create a symbol with the same
24611 name as an ARM instruction. Whilst strictly speaking it is allowed, it
24612 does mean that the resulting code might be very confusing to the reader.
24613 Also this warning can be triggered if the user omits an operand before
24614 an immediate address, eg:
24615
24616 LDR =foo
24617
24618 GAS treats this as an assignment of the value of the symbol foo to a
24619 symbol LDR, and so (without this code) it will not issue any kind of
24620 warning or error message.
24621
24622 Note - ARM instructions are case-insensitive but the strings in the hash
24623 table are all stored in lower case, so we must first ensure that name is
24624 lower case too. */
24625 if (flag_warn_syms && arm_ops_hsh)
24626 {
24627 char * nbuf = strdup (name);
24628 char * p;
24629
24630 for (p = nbuf; *p; p++)
24631 *p = TOLOWER (*p);
24632 if (hash_find (arm_ops_hsh, nbuf) != NULL)
24633 {
24634 static struct hash_control * already_warned = NULL;
24635
24636 if (already_warned == NULL)
24637 already_warned = hash_new ();
24638 /* Only warn about the symbol once. To keep the code
24639 simple we let hash_insert do the lookup for us. */
24640 if (hash_insert (already_warned, nbuf, NULL) == NULL)
24641 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
24642 }
24643 else
24644 free (nbuf);
24645 }
24646
24647 return FALSE;
24648 }
24649
24650 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
24651 Otherwise we have no need to default values of symbols. */
24652
24653 symbolS *
24654 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
24655 {
24656 #ifdef OBJ_ELF
24657 if (name[0] == '_' && name[1] == 'G'
24658 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
24659 {
24660 if (!GOT_symbol)
24661 {
24662 if (symbol_find (name))
24663 as_bad (_("GOT already in the symbol table"));
24664
24665 GOT_symbol = symbol_new (name, undefined_section,
24666 (valueT) 0, & zero_address_frag);
24667 }
24668
24669 return GOT_symbol;
24670 }
24671 #endif
24672
24673 return NULL;
24674 }
24675
24676 /* Subroutine of md_apply_fix. Check to see if an immediate can be
24677 computed as two separate immediate values, added together. We
24678 already know that this value cannot be computed by just one ARM
24679 instruction. */
24680
24681 static unsigned int
24682 validate_immediate_twopart (unsigned int val,
24683 unsigned int * highpart)
24684 {
24685 unsigned int a;
24686 unsigned int i;
24687
24688 for (i = 0; i < 32; i += 2)
24689 if (((a = rotate_left (val, i)) & 0xff) != 0)
24690 {
24691 if (a & 0xff00)
24692 {
24693 if (a & ~ 0xffff)
24694 continue;
24695 * highpart = (a >> 8) | ((i + 24) << 7);
24696 }
24697 else if (a & 0xff0000)
24698 {
24699 if (a & 0xff000000)
24700 continue;
24701 * highpart = (a >> 16) | ((i + 16) << 7);
24702 }
24703 else
24704 {
24705 gas_assert (a & 0xff000000);
24706 * highpart = (a >> 24) | ((i + 8) << 7);
24707 }
24708
24709 return (a & 0xff) | (i << 7);
24710 }
24711
24712 return FAIL;
24713 }
24714
24715 static int
24716 validate_offset_imm (unsigned int val, int hwse)
24717 {
24718 if ((hwse && val > 255) || val > 4095)
24719 return FAIL;
24720 return val;
24721 }
24722
24723 /* Subroutine of md_apply_fix. Do those data_ops which can take a
24724 negative immediate constant by altering the instruction. A bit of
24725 a hack really.
24726 MOV <-> MVN
24727 AND <-> BIC
24728 ADC <-> SBC
24729 by inverting the second operand, and
24730 ADD <-> SUB
24731 CMP <-> CMN
24732 by negating the second operand. */
24733
24734 static int
24735 negate_data_op (unsigned long * instruction,
24736 unsigned long value)
24737 {
24738 int op, new_inst;
24739 unsigned long negated, inverted;
24740
24741 negated = encode_arm_immediate (-value);
24742 inverted = encode_arm_immediate (~value);
24743
24744 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
24745 switch (op)
24746 {
24747 /* First negates. */
24748 case OPCODE_SUB: /* ADD <-> SUB */
24749 new_inst = OPCODE_ADD;
24750 value = negated;
24751 break;
24752
24753 case OPCODE_ADD:
24754 new_inst = OPCODE_SUB;
24755 value = negated;
24756 break;
24757
24758 case OPCODE_CMP: /* CMP <-> CMN */
24759 new_inst = OPCODE_CMN;
24760 value = negated;
24761 break;
24762
24763 case OPCODE_CMN:
24764 new_inst = OPCODE_CMP;
24765 value = negated;
24766 break;
24767
24768 /* Now Inverted ops. */
24769 case OPCODE_MOV: /* MOV <-> MVN */
24770 new_inst = OPCODE_MVN;
24771 value = inverted;
24772 break;
24773
24774 case OPCODE_MVN:
24775 new_inst = OPCODE_MOV;
24776 value = inverted;
24777 break;
24778
24779 case OPCODE_AND: /* AND <-> BIC */
24780 new_inst = OPCODE_BIC;
24781 value = inverted;
24782 break;
24783
24784 case OPCODE_BIC:
24785 new_inst = OPCODE_AND;
24786 value = inverted;
24787 break;
24788
24789 case OPCODE_ADC: /* ADC <-> SBC */
24790 new_inst = OPCODE_SBC;
24791 value = inverted;
24792 break;
24793
24794 case OPCODE_SBC:
24795 new_inst = OPCODE_ADC;
24796 value = inverted;
24797 break;
24798
24799 /* We cannot do anything. */
24800 default:
24801 return FAIL;
24802 }
24803
24804 if (value == (unsigned) FAIL)
24805 return FAIL;
24806
24807 *instruction &= OPCODE_MASK;
24808 *instruction |= new_inst << DATA_OP_SHIFT;
24809 return value;
24810 }
24811
24812 /* Like negate_data_op, but for Thumb-2. */
24813
24814 static unsigned int
24815 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
24816 {
24817 int op, new_inst;
24818 int rd;
24819 unsigned int negated, inverted;
24820
24821 negated = encode_thumb32_immediate (-value);
24822 inverted = encode_thumb32_immediate (~value);
24823
24824 rd = (*instruction >> 8) & 0xf;
24825 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
24826 switch (op)
24827 {
24828 /* ADD <-> SUB. Includes CMP <-> CMN. */
24829 case T2_OPCODE_SUB:
24830 new_inst = T2_OPCODE_ADD;
24831 value = negated;
24832 break;
24833
24834 case T2_OPCODE_ADD:
24835 new_inst = T2_OPCODE_SUB;
24836 value = negated;
24837 break;
24838
24839 /* ORR <-> ORN. Includes MOV <-> MVN. */
24840 case T2_OPCODE_ORR:
24841 new_inst = T2_OPCODE_ORN;
24842 value = inverted;
24843 break;
24844
24845 case T2_OPCODE_ORN:
24846 new_inst = T2_OPCODE_ORR;
24847 value = inverted;
24848 break;
24849
24850 /* AND <-> BIC. TST has no inverted equivalent. */
24851 case T2_OPCODE_AND:
24852 new_inst = T2_OPCODE_BIC;
24853 if (rd == 15)
24854 value = FAIL;
24855 else
24856 value = inverted;
24857 break;
24858
24859 case T2_OPCODE_BIC:
24860 new_inst = T2_OPCODE_AND;
24861 value = inverted;
24862 break;
24863
24864 /* ADC <-> SBC */
24865 case T2_OPCODE_ADC:
24866 new_inst = T2_OPCODE_SBC;
24867 value = inverted;
24868 break;
24869
24870 case T2_OPCODE_SBC:
24871 new_inst = T2_OPCODE_ADC;
24872 value = inverted;
24873 break;
24874
24875 /* We cannot do anything. */
24876 default:
24877 return FAIL;
24878 }
24879
24880 if (value == (unsigned int)FAIL)
24881 return FAIL;
24882
24883 *instruction &= T2_OPCODE_MASK;
24884 *instruction |= new_inst << T2_DATA_OP_SHIFT;
24885 return value;
24886 }
24887
24888 /* Read a 32-bit thumb instruction from buf. */
24889
24890 static unsigned long
24891 get_thumb32_insn (char * buf)
24892 {
24893 unsigned long insn;
24894 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
24895 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
24896
24897 return insn;
24898 }
24899
24900 /* We usually want to set the low bit on the address of thumb function
24901 symbols. In particular .word foo - . should have the low bit set.
24902 Generic code tries to fold the difference of two symbols to
24903 a constant. Prevent this and force a relocation when the first symbols
24904 is a thumb function. */
24905
24906 bfd_boolean
24907 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
24908 {
24909 if (op == O_subtract
24910 && l->X_op == O_symbol
24911 && r->X_op == O_symbol
24912 && THUMB_IS_FUNC (l->X_add_symbol))
24913 {
24914 l->X_op = O_subtract;
24915 l->X_op_symbol = r->X_add_symbol;
24916 l->X_add_number -= r->X_add_number;
24917 return TRUE;
24918 }
24919
24920 /* Process as normal. */
24921 return FALSE;
24922 }
24923
24924 /* Encode Thumb2 unconditional branches and calls. The encoding
24925 for the 2 are identical for the immediate values. */
24926
24927 static void
24928 encode_thumb2_b_bl_offset (char * buf, offsetT value)
24929 {
24930 #define T2I1I2MASK ((1 << 13) | (1 << 11))
24931 offsetT newval;
24932 offsetT newval2;
24933 addressT S, I1, I2, lo, hi;
24934
24935 S = (value >> 24) & 0x01;
24936 I1 = (value >> 23) & 0x01;
24937 I2 = (value >> 22) & 0x01;
24938 hi = (value >> 12) & 0x3ff;
24939 lo = (value >> 1) & 0x7ff;
24940 newval = md_chars_to_number (buf, THUMB_SIZE);
24941 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
24942 newval |= (S << 10) | hi;
24943 newval2 &= ~T2I1I2MASK;
24944 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
24945 md_number_to_chars (buf, newval, THUMB_SIZE);
24946 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
24947 }
24948
24949 void
24950 md_apply_fix (fixS * fixP,
24951 valueT * valP,
24952 segT seg)
24953 {
24954 offsetT value = * valP;
24955 offsetT newval;
24956 unsigned int newimm;
24957 unsigned long temp;
24958 int sign;
24959 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
24960
24961 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
24962
24963 /* Note whether this will delete the relocation. */
24964
24965 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
24966 fixP->fx_done = 1;
24967
24968 /* On a 64-bit host, silently truncate 'value' to 32 bits for
24969 consistency with the behaviour on 32-bit hosts. Remember value
24970 for emit_reloc. */
24971 value &= 0xffffffff;
24972 value ^= 0x80000000;
24973 value -= 0x80000000;
24974
24975 *valP = value;
24976 fixP->fx_addnumber = value;
24977
24978 /* Same treatment for fixP->fx_offset. */
24979 fixP->fx_offset &= 0xffffffff;
24980 fixP->fx_offset ^= 0x80000000;
24981 fixP->fx_offset -= 0x80000000;
24982
24983 switch (fixP->fx_r_type)
24984 {
24985 case BFD_RELOC_NONE:
24986 /* This will need to go in the object file. */
24987 fixP->fx_done = 0;
24988 break;
24989
24990 case BFD_RELOC_ARM_IMMEDIATE:
24991 /* We claim that this fixup has been processed here,
24992 even if in fact we generate an error because we do
24993 not have a reloc for it, so tc_gen_reloc will reject it. */
24994 fixP->fx_done = 1;
24995
24996 if (fixP->fx_addsy)
24997 {
24998 const char *msg = 0;
24999
25000 if (! S_IS_DEFINED (fixP->fx_addsy))
25001 msg = _("undefined symbol %s used as an immediate value");
25002 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
25003 msg = _("symbol %s is in a different section");
25004 else if (S_IS_WEAK (fixP->fx_addsy))
25005 msg = _("symbol %s is weak and may be overridden later");
25006
25007 if (msg)
25008 {
25009 as_bad_where (fixP->fx_file, fixP->fx_line,
25010 msg, S_GET_NAME (fixP->fx_addsy));
25011 break;
25012 }
25013 }
25014
25015 temp = md_chars_to_number (buf, INSN_SIZE);
25016
25017 /* If the offset is negative, we should use encoding A2 for ADR. */
25018 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
25019 newimm = negate_data_op (&temp, value);
25020 else
25021 {
25022 newimm = encode_arm_immediate (value);
25023
25024 /* If the instruction will fail, see if we can fix things up by
25025 changing the opcode. */
25026 if (newimm == (unsigned int) FAIL)
25027 newimm = negate_data_op (&temp, value);
25028 /* MOV accepts both ARM modified immediate (A1 encoding) and
25029 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
25030 When disassembling, MOV is preferred when there is no encoding
25031 overlap. */
25032 if (newimm == (unsigned int) FAIL
25033 && ((temp >> DATA_OP_SHIFT) & 0xf) == OPCODE_MOV
25034 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
25035 && !((temp >> SBIT_SHIFT) & 0x1)
25036 && value >= 0 && value <= 0xffff)
25037 {
25038 /* Clear bits[23:20] to change encoding from A1 to A2. */
25039 temp &= 0xff0fffff;
25040 /* Encoding high 4bits imm. Code below will encode the remaining
25041 low 12bits. */
25042 temp |= (value & 0x0000f000) << 4;
25043 newimm = value & 0x00000fff;
25044 }
25045 }
25046
25047 if (newimm == (unsigned int) FAIL)
25048 {
25049 as_bad_where (fixP->fx_file, fixP->fx_line,
25050 _("invalid constant (%lx) after fixup"),
25051 (unsigned long) value);
25052 break;
25053 }
25054
25055 newimm |= (temp & 0xfffff000);
25056 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
25057 break;
25058
25059 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
25060 {
25061 unsigned int highpart = 0;
25062 unsigned int newinsn = 0xe1a00000; /* nop. */
25063
25064 if (fixP->fx_addsy)
25065 {
25066 const char *msg = 0;
25067
25068 if (! S_IS_DEFINED (fixP->fx_addsy))
25069 msg = _("undefined symbol %s used as an immediate value");
25070 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
25071 msg = _("symbol %s is in a different section");
25072 else if (S_IS_WEAK (fixP->fx_addsy))
25073 msg = _("symbol %s is weak and may be overridden later");
25074
25075 if (msg)
25076 {
25077 as_bad_where (fixP->fx_file, fixP->fx_line,
25078 msg, S_GET_NAME (fixP->fx_addsy));
25079 break;
25080 }
25081 }
25082
25083 newimm = encode_arm_immediate (value);
25084 temp = md_chars_to_number (buf, INSN_SIZE);
25085
25086 /* If the instruction will fail, see if we can fix things up by
25087 changing the opcode. */
25088 if (newimm == (unsigned int) FAIL
25089 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
25090 {
25091 /* No ? OK - try using two ADD instructions to generate
25092 the value. */
25093 newimm = validate_immediate_twopart (value, & highpart);
25094
25095 /* Yes - then make sure that the second instruction is
25096 also an add. */
25097 if (newimm != (unsigned int) FAIL)
25098 newinsn = temp;
25099 /* Still No ? Try using a negated value. */
25100 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
25101 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
25102 /* Otherwise - give up. */
25103 else
25104 {
25105 as_bad_where (fixP->fx_file, fixP->fx_line,
25106 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
25107 (long) value);
25108 break;
25109 }
25110
25111 /* Replace the first operand in the 2nd instruction (which
25112 is the PC) with the destination register. We have
25113 already added in the PC in the first instruction and we
25114 do not want to do it again. */
25115 newinsn &= ~ 0xf0000;
25116 newinsn |= ((newinsn & 0x0f000) << 4);
25117 }
25118
25119 newimm |= (temp & 0xfffff000);
25120 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
25121
25122 highpart |= (newinsn & 0xfffff000);
25123 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
25124 }
25125 break;
25126
25127 case BFD_RELOC_ARM_OFFSET_IMM:
25128 if (!fixP->fx_done && seg->use_rela_p)
25129 value = 0;
25130 /* Fall through. */
25131
25132 case BFD_RELOC_ARM_LITERAL:
25133 sign = value > 0;
25134
25135 if (value < 0)
25136 value = - value;
25137
25138 if (validate_offset_imm (value, 0) == FAIL)
25139 {
25140 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
25141 as_bad_where (fixP->fx_file, fixP->fx_line,
25142 _("invalid literal constant: pool needs to be closer"));
25143 else
25144 as_bad_where (fixP->fx_file, fixP->fx_line,
25145 _("bad immediate value for offset (%ld)"),
25146 (long) value);
25147 break;
25148 }
25149
25150 newval = md_chars_to_number (buf, INSN_SIZE);
25151 if (value == 0)
25152 newval &= 0xfffff000;
25153 else
25154 {
25155 newval &= 0xff7ff000;
25156 newval |= value | (sign ? INDEX_UP : 0);
25157 }
25158 md_number_to_chars (buf, newval, INSN_SIZE);
25159 break;
25160
25161 case BFD_RELOC_ARM_OFFSET_IMM8:
25162 case BFD_RELOC_ARM_HWLITERAL:
25163 sign = value > 0;
25164
25165 if (value < 0)
25166 value = - value;
25167
25168 if (validate_offset_imm (value, 1) == FAIL)
25169 {
25170 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
25171 as_bad_where (fixP->fx_file, fixP->fx_line,
25172 _("invalid literal constant: pool needs to be closer"));
25173 else
25174 as_bad_where (fixP->fx_file, fixP->fx_line,
25175 _("bad immediate value for 8-bit offset (%ld)"),
25176 (long) value);
25177 break;
25178 }
25179
25180 newval = md_chars_to_number (buf, INSN_SIZE);
25181 if (value == 0)
25182 newval &= 0xfffff0f0;
25183 else
25184 {
25185 newval &= 0xff7ff0f0;
25186 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
25187 }
25188 md_number_to_chars (buf, newval, INSN_SIZE);
25189 break;
25190
25191 case BFD_RELOC_ARM_T32_OFFSET_U8:
25192 if (value < 0 || value > 1020 || value % 4 != 0)
25193 as_bad_where (fixP->fx_file, fixP->fx_line,
25194 _("bad immediate value for offset (%ld)"), (long) value);
25195 value /= 4;
25196
25197 newval = md_chars_to_number (buf+2, THUMB_SIZE);
25198 newval |= value;
25199 md_number_to_chars (buf+2, newval, THUMB_SIZE);
25200 break;
25201
25202 case BFD_RELOC_ARM_T32_OFFSET_IMM:
25203 /* This is a complicated relocation used for all varieties of Thumb32
25204 load/store instruction with immediate offset:
25205
25206 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
25207 *4, optional writeback(W)
25208 (doubleword load/store)
25209
25210 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
25211 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
25212 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
25213 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
25214 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
25215
25216 Uppercase letters indicate bits that are already encoded at
25217 this point. Lowercase letters are our problem. For the
25218 second block of instructions, the secondary opcode nybble
25219 (bits 8..11) is present, and bit 23 is zero, even if this is
25220 a PC-relative operation. */
25221 newval = md_chars_to_number (buf, THUMB_SIZE);
25222 newval <<= 16;
25223 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
25224
25225 if ((newval & 0xf0000000) == 0xe0000000)
25226 {
25227 /* Doubleword load/store: 8-bit offset, scaled by 4. */
25228 if (value >= 0)
25229 newval |= (1 << 23);
25230 else
25231 value = -value;
25232 if (value % 4 != 0)
25233 {
25234 as_bad_where (fixP->fx_file, fixP->fx_line,
25235 _("offset not a multiple of 4"));
25236 break;
25237 }
25238 value /= 4;
25239 if (value > 0xff)
25240 {
25241 as_bad_where (fixP->fx_file, fixP->fx_line,
25242 _("offset out of range"));
25243 break;
25244 }
25245 newval &= ~0xff;
25246 }
25247 else if ((newval & 0x000f0000) == 0x000f0000)
25248 {
25249 /* PC-relative, 12-bit offset. */
25250 if (value >= 0)
25251 newval |= (1 << 23);
25252 else
25253 value = -value;
25254 if (value > 0xfff)
25255 {
25256 as_bad_where (fixP->fx_file, fixP->fx_line,
25257 _("offset out of range"));
25258 break;
25259 }
25260 newval &= ~0xfff;
25261 }
25262 else if ((newval & 0x00000100) == 0x00000100)
25263 {
25264 /* Writeback: 8-bit, +/- offset. */
25265 if (value >= 0)
25266 newval |= (1 << 9);
25267 else
25268 value = -value;
25269 if (value > 0xff)
25270 {
25271 as_bad_where (fixP->fx_file, fixP->fx_line,
25272 _("offset out of range"));
25273 break;
25274 }
25275 newval &= ~0xff;
25276 }
25277 else if ((newval & 0x00000f00) == 0x00000e00)
25278 {
25279 /* T-instruction: positive 8-bit offset. */
25280 if (value < 0 || value > 0xff)
25281 {
25282 as_bad_where (fixP->fx_file, fixP->fx_line,
25283 _("offset out of range"));
25284 break;
25285 }
25286 newval &= ~0xff;
25287 newval |= value;
25288 }
25289 else
25290 {
25291 /* Positive 12-bit or negative 8-bit offset. */
25292 int limit;
25293 if (value >= 0)
25294 {
25295 newval |= (1 << 23);
25296 limit = 0xfff;
25297 }
25298 else
25299 {
25300 value = -value;
25301 limit = 0xff;
25302 }
25303 if (value > limit)
25304 {
25305 as_bad_where (fixP->fx_file, fixP->fx_line,
25306 _("offset out of range"));
25307 break;
25308 }
25309 newval &= ~limit;
25310 }
25311
25312 newval |= value;
25313 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
25314 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
25315 break;
25316
25317 case BFD_RELOC_ARM_SHIFT_IMM:
25318 newval = md_chars_to_number (buf, INSN_SIZE);
25319 if (((unsigned long) value) > 32
25320 || (value == 32
25321 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
25322 {
25323 as_bad_where (fixP->fx_file, fixP->fx_line,
25324 _("shift expression is too large"));
25325 break;
25326 }
25327
25328 if (value == 0)
25329 /* Shifts of zero must be done as lsl. */
25330 newval &= ~0x60;
25331 else if (value == 32)
25332 value = 0;
25333 newval &= 0xfffff07f;
25334 newval |= (value & 0x1f) << 7;
25335 md_number_to_chars (buf, newval, INSN_SIZE);
25336 break;
25337
25338 case BFD_RELOC_ARM_T32_IMMEDIATE:
25339 case BFD_RELOC_ARM_T32_ADD_IMM:
25340 case BFD_RELOC_ARM_T32_IMM12:
25341 case BFD_RELOC_ARM_T32_ADD_PC12:
25342 /* We claim that this fixup has been processed here,
25343 even if in fact we generate an error because we do
25344 not have a reloc for it, so tc_gen_reloc will reject it. */
25345 fixP->fx_done = 1;
25346
25347 if (fixP->fx_addsy
25348 && ! S_IS_DEFINED (fixP->fx_addsy))
25349 {
25350 as_bad_where (fixP->fx_file, fixP->fx_line,
25351 _("undefined symbol %s used as an immediate value"),
25352 S_GET_NAME (fixP->fx_addsy));
25353 break;
25354 }
25355
25356 newval = md_chars_to_number (buf, THUMB_SIZE);
25357 newval <<= 16;
25358 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
25359
25360 newimm = FAIL;
25361 if ((fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
25362 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
25363 Thumb2 modified immediate encoding (T2). */
25364 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
25365 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
25366 {
25367 newimm = encode_thumb32_immediate (value);
25368 if (newimm == (unsigned int) FAIL)
25369 newimm = thumb32_negate_data_op (&newval, value);
25370 }
25371 if (newimm == (unsigned int) FAIL)
25372 {
25373 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE)
25374 {
25375 /* Turn add/sum into addw/subw. */
25376 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
25377 newval = (newval & 0xfeffffff) | 0x02000000;
25378 /* No flat 12-bit imm encoding for addsw/subsw. */
25379 if ((newval & 0x00100000) == 0)
25380 {
25381 /* 12 bit immediate for addw/subw. */
25382 if (value < 0)
25383 {
25384 value = -value;
25385 newval ^= 0x00a00000;
25386 }
25387 if (value > 0xfff)
25388 newimm = (unsigned int) FAIL;
25389 else
25390 newimm = value;
25391 }
25392 }
25393 else
25394 {
25395 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
25396 UINT16 (T3 encoding), MOVW only accepts UINT16. When
25397 disassembling, MOV is preferred when there is no encoding
25398 overlap. */
25399 if (((newval >> T2_DATA_OP_SHIFT) & 0xf) == T2_OPCODE_ORR
25400 /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
25401 but with the Rn field [19:16] set to 1111. */
25402 && (((newval >> 16) & 0xf) == 0xf)
25403 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
25404 && !((newval >> T2_SBIT_SHIFT) & 0x1)
25405 && value >= 0 && value <= 0xffff)
25406 {
25407 /* Toggle bit[25] to change encoding from T2 to T3. */
25408 newval ^= 1 << 25;
25409 /* Clear bits[19:16]. */
25410 newval &= 0xfff0ffff;
25411 /* Encoding high 4bits imm. Code below will encode the
25412 remaining low 12bits. */
25413 newval |= (value & 0x0000f000) << 4;
25414 newimm = value & 0x00000fff;
25415 }
25416 }
25417 }
25418
25419 if (newimm == (unsigned int)FAIL)
25420 {
25421 as_bad_where (fixP->fx_file, fixP->fx_line,
25422 _("invalid constant (%lx) after fixup"),
25423 (unsigned long) value);
25424 break;
25425 }
25426
25427 newval |= (newimm & 0x800) << 15;
25428 newval |= (newimm & 0x700) << 4;
25429 newval |= (newimm & 0x0ff);
25430
25431 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
25432 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
25433 break;
25434
25435 case BFD_RELOC_ARM_SMC:
25436 if (((unsigned long) value) > 0xffff)
25437 as_bad_where (fixP->fx_file, fixP->fx_line,
25438 _("invalid smc expression"));
25439 newval = md_chars_to_number (buf, INSN_SIZE);
25440 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
25441 md_number_to_chars (buf, newval, INSN_SIZE);
25442 break;
25443
25444 case BFD_RELOC_ARM_HVC:
25445 if (((unsigned long) value) > 0xffff)
25446 as_bad_where (fixP->fx_file, fixP->fx_line,
25447 _("invalid hvc expression"));
25448 newval = md_chars_to_number (buf, INSN_SIZE);
25449 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
25450 md_number_to_chars (buf, newval, INSN_SIZE);
25451 break;
25452
25453 case BFD_RELOC_ARM_SWI:
25454 if (fixP->tc_fix_data != 0)
25455 {
25456 if (((unsigned long) value) > 0xff)
25457 as_bad_where (fixP->fx_file, fixP->fx_line,
25458 _("invalid swi expression"));
25459 newval = md_chars_to_number (buf, THUMB_SIZE);
25460 newval |= value;
25461 md_number_to_chars (buf, newval, THUMB_SIZE);
25462 }
25463 else
25464 {
25465 if (((unsigned long) value) > 0x00ffffff)
25466 as_bad_where (fixP->fx_file, fixP->fx_line,
25467 _("invalid swi expression"));
25468 newval = md_chars_to_number (buf, INSN_SIZE);
25469 newval |= value;
25470 md_number_to_chars (buf, newval, INSN_SIZE);
25471 }
25472 break;
25473
25474 case BFD_RELOC_ARM_MULTI:
25475 if (((unsigned long) value) > 0xffff)
25476 as_bad_where (fixP->fx_file, fixP->fx_line,
25477 _("invalid expression in load/store multiple"));
25478 newval = value | md_chars_to_number (buf, INSN_SIZE);
25479 md_number_to_chars (buf, newval, INSN_SIZE);
25480 break;
25481
25482 #ifdef OBJ_ELF
25483 case BFD_RELOC_ARM_PCREL_CALL:
25484
25485 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
25486 && fixP->fx_addsy
25487 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25488 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25489 && THUMB_IS_FUNC (fixP->fx_addsy))
25490 /* Flip the bl to blx. This is a simple flip
25491 bit here because we generate PCREL_CALL for
25492 unconditional bls. */
25493 {
25494 newval = md_chars_to_number (buf, INSN_SIZE);
25495 newval = newval | 0x10000000;
25496 md_number_to_chars (buf, newval, INSN_SIZE);
25497 temp = 1;
25498 fixP->fx_done = 1;
25499 }
25500 else
25501 temp = 3;
25502 goto arm_branch_common;
25503
25504 case BFD_RELOC_ARM_PCREL_JUMP:
25505 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
25506 && fixP->fx_addsy
25507 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25508 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25509 && THUMB_IS_FUNC (fixP->fx_addsy))
25510 {
25511 /* This would map to a bl<cond>, b<cond>,
25512 b<always> to a Thumb function. We
25513 need to force a relocation for this particular
25514 case. */
25515 newval = md_chars_to_number (buf, INSN_SIZE);
25516 fixP->fx_done = 0;
25517 }
25518 /* Fall through. */
25519
25520 case BFD_RELOC_ARM_PLT32:
25521 #endif
25522 case BFD_RELOC_ARM_PCREL_BRANCH:
25523 temp = 3;
25524 goto arm_branch_common;
25525
25526 case BFD_RELOC_ARM_PCREL_BLX:
25527
25528 temp = 1;
25529 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
25530 && fixP->fx_addsy
25531 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25532 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25533 && ARM_IS_FUNC (fixP->fx_addsy))
25534 {
25535 /* Flip the blx to a bl and warn. */
25536 const char *name = S_GET_NAME (fixP->fx_addsy);
25537 newval = 0xeb000000;
25538 as_warn_where (fixP->fx_file, fixP->fx_line,
25539 _("blx to '%s' an ARM ISA state function changed to bl"),
25540 name);
25541 md_number_to_chars (buf, newval, INSN_SIZE);
25542 temp = 3;
25543 fixP->fx_done = 1;
25544 }
25545
25546 #ifdef OBJ_ELF
25547 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
25548 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
25549 #endif
25550
25551 arm_branch_common:
25552 /* We are going to store value (shifted right by two) in the
25553 instruction, in a 24 bit, signed field. Bits 26 through 32 either
25554 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
25555 also be clear. */
25556 if (value & temp)
25557 as_bad_where (fixP->fx_file, fixP->fx_line,
25558 _("misaligned branch destination"));
25559 if ((value & (offsetT)0xfe000000) != (offsetT)0
25560 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
25561 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
25562
25563 if (fixP->fx_done || !seg->use_rela_p)
25564 {
25565 newval = md_chars_to_number (buf, INSN_SIZE);
25566 newval |= (value >> 2) & 0x00ffffff;
25567 /* Set the H bit on BLX instructions. */
25568 if (temp == 1)
25569 {
25570 if (value & 2)
25571 newval |= 0x01000000;
25572 else
25573 newval &= ~0x01000000;
25574 }
25575 md_number_to_chars (buf, newval, INSN_SIZE);
25576 }
25577 break;
25578
25579 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
25580 /* CBZ can only branch forward. */
25581
25582 /* Attempts to use CBZ to branch to the next instruction
25583 (which, strictly speaking, are prohibited) will be turned into
25584 no-ops.
25585
25586 FIXME: It may be better to remove the instruction completely and
25587 perform relaxation. */
25588 if (value == -2)
25589 {
25590 newval = md_chars_to_number (buf, THUMB_SIZE);
25591 newval = 0xbf00; /* NOP encoding T1 */
25592 md_number_to_chars (buf, newval, THUMB_SIZE);
25593 }
25594 else
25595 {
25596 if (value & ~0x7e)
25597 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
25598
25599 if (fixP->fx_done || !seg->use_rela_p)
25600 {
25601 newval = md_chars_to_number (buf, THUMB_SIZE);
25602 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
25603 md_number_to_chars (buf, newval, THUMB_SIZE);
25604 }
25605 }
25606 break;
25607
25608 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
25609 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
25610 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
25611
25612 if (fixP->fx_done || !seg->use_rela_p)
25613 {
25614 newval = md_chars_to_number (buf, THUMB_SIZE);
25615 newval |= (value & 0x1ff) >> 1;
25616 md_number_to_chars (buf, newval, THUMB_SIZE);
25617 }
25618 break;
25619
25620 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
25621 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
25622 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
25623
25624 if (fixP->fx_done || !seg->use_rela_p)
25625 {
25626 newval = md_chars_to_number (buf, THUMB_SIZE);
25627 newval |= (value & 0xfff) >> 1;
25628 md_number_to_chars (buf, newval, THUMB_SIZE);
25629 }
25630 break;
25631
25632 case BFD_RELOC_THUMB_PCREL_BRANCH20:
25633 if (fixP->fx_addsy
25634 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25635 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25636 && ARM_IS_FUNC (fixP->fx_addsy)
25637 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
25638 {
25639 /* Force a relocation for a branch 20 bits wide. */
25640 fixP->fx_done = 0;
25641 }
25642 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
25643 as_bad_where (fixP->fx_file, fixP->fx_line,
25644 _("conditional branch out of range"));
25645
25646 if (fixP->fx_done || !seg->use_rela_p)
25647 {
25648 offsetT newval2;
25649 addressT S, J1, J2, lo, hi;
25650
25651 S = (value & 0x00100000) >> 20;
25652 J2 = (value & 0x00080000) >> 19;
25653 J1 = (value & 0x00040000) >> 18;
25654 hi = (value & 0x0003f000) >> 12;
25655 lo = (value & 0x00000ffe) >> 1;
25656
25657 newval = md_chars_to_number (buf, THUMB_SIZE);
25658 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25659 newval |= (S << 10) | hi;
25660 newval2 |= (J1 << 13) | (J2 << 11) | lo;
25661 md_number_to_chars (buf, newval, THUMB_SIZE);
25662 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
25663 }
25664 break;
25665
25666 case BFD_RELOC_THUMB_PCREL_BLX:
25667 /* If there is a blx from a thumb state function to
25668 another thumb function flip this to a bl and warn
25669 about it. */
25670
25671 if (fixP->fx_addsy
25672 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25673 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25674 && THUMB_IS_FUNC (fixP->fx_addsy))
25675 {
25676 const char *name = S_GET_NAME (fixP->fx_addsy);
25677 as_warn_where (fixP->fx_file, fixP->fx_line,
25678 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
25679 name);
25680 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25681 newval = newval | 0x1000;
25682 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
25683 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
25684 fixP->fx_done = 1;
25685 }
25686
25687
25688 goto thumb_bl_common;
25689
25690 case BFD_RELOC_THUMB_PCREL_BRANCH23:
25691 /* A bl from Thumb state ISA to an internal ARM state function
25692 is converted to a blx. */
25693 if (fixP->fx_addsy
25694 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25695 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25696 && ARM_IS_FUNC (fixP->fx_addsy)
25697 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
25698 {
25699 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25700 newval = newval & ~0x1000;
25701 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
25702 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
25703 fixP->fx_done = 1;
25704 }
25705
25706 thumb_bl_common:
25707
25708 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
25709 /* For a BLX instruction, make sure that the relocation is rounded up
25710 to a word boundary. This follows the semantics of the instruction
25711 which specifies that bit 1 of the target address will come from bit
25712 1 of the base address. */
25713 value = (value + 3) & ~ 3;
25714
25715 #ifdef OBJ_ELF
25716 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
25717 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
25718 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
25719 #endif
25720
25721 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
25722 {
25723 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
25724 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
25725 else if ((value & ~0x1ffffff)
25726 && ((value & ~0x1ffffff) != ~0x1ffffff))
25727 as_bad_where (fixP->fx_file, fixP->fx_line,
25728 _("Thumb2 branch out of range"));
25729 }
25730
25731 if (fixP->fx_done || !seg->use_rela_p)
25732 encode_thumb2_b_bl_offset (buf, value);
25733
25734 break;
25735
25736 case BFD_RELOC_THUMB_PCREL_BRANCH25:
25737 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
25738 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
25739
25740 if (fixP->fx_done || !seg->use_rela_p)
25741 encode_thumb2_b_bl_offset (buf, value);
25742
25743 break;
25744
25745 case BFD_RELOC_8:
25746 if (fixP->fx_done || !seg->use_rela_p)
25747 *buf = value;
25748 break;
25749
25750 case BFD_RELOC_16:
25751 if (fixP->fx_done || !seg->use_rela_p)
25752 md_number_to_chars (buf, value, 2);
25753 break;
25754
25755 #ifdef OBJ_ELF
25756 case BFD_RELOC_ARM_TLS_CALL:
25757 case BFD_RELOC_ARM_THM_TLS_CALL:
25758 case BFD_RELOC_ARM_TLS_DESCSEQ:
25759 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
25760 case BFD_RELOC_ARM_TLS_GOTDESC:
25761 case BFD_RELOC_ARM_TLS_GD32:
25762 case BFD_RELOC_ARM_TLS_LE32:
25763 case BFD_RELOC_ARM_TLS_IE32:
25764 case BFD_RELOC_ARM_TLS_LDM32:
25765 case BFD_RELOC_ARM_TLS_LDO32:
25766 S_SET_THREAD_LOCAL (fixP->fx_addsy);
25767 break;
25768
25769 /* Same handling as above, but with the arm_fdpic guard. */
25770 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
25771 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
25772 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
25773 if (arm_fdpic)
25774 {
25775 S_SET_THREAD_LOCAL (fixP->fx_addsy);
25776 }
25777 else
25778 {
25779 as_bad_where (fixP->fx_file, fixP->fx_line,
25780 _("Relocation supported only in FDPIC mode"));
25781 }
25782 break;
25783
25784 case BFD_RELOC_ARM_GOT32:
25785 case BFD_RELOC_ARM_GOTOFF:
25786 break;
25787
25788 case BFD_RELOC_ARM_GOT_PREL:
25789 if (fixP->fx_done || !seg->use_rela_p)
25790 md_number_to_chars (buf, value, 4);
25791 break;
25792
25793 case BFD_RELOC_ARM_TARGET2:
25794 /* TARGET2 is not partial-inplace, so we need to write the
25795 addend here for REL targets, because it won't be written out
25796 during reloc processing later. */
25797 if (fixP->fx_done || !seg->use_rela_p)
25798 md_number_to_chars (buf, fixP->fx_offset, 4);
25799 break;
25800
25801 /* Relocations for FDPIC. */
25802 case BFD_RELOC_ARM_GOTFUNCDESC:
25803 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
25804 case BFD_RELOC_ARM_FUNCDESC:
25805 if (arm_fdpic)
25806 {
25807 if (fixP->fx_done || !seg->use_rela_p)
25808 md_number_to_chars (buf, 0, 4);
25809 }
25810 else
25811 {
25812 as_bad_where (fixP->fx_file, fixP->fx_line,
25813 _("Relocation supported only in FDPIC mode"));
25814 }
25815 break;
25816 #endif
25817
25818 case BFD_RELOC_RVA:
25819 case BFD_RELOC_32:
25820 case BFD_RELOC_ARM_TARGET1:
25821 case BFD_RELOC_ARM_ROSEGREL32:
25822 case BFD_RELOC_ARM_SBREL32:
25823 case BFD_RELOC_32_PCREL:
25824 #ifdef TE_PE
25825 case BFD_RELOC_32_SECREL:
25826 #endif
25827 if (fixP->fx_done || !seg->use_rela_p)
25828 #ifdef TE_WINCE
25829 /* For WinCE we only do this for pcrel fixups. */
25830 if (fixP->fx_done || fixP->fx_pcrel)
25831 #endif
25832 md_number_to_chars (buf, value, 4);
25833 break;
25834
25835 #ifdef OBJ_ELF
25836 case BFD_RELOC_ARM_PREL31:
25837 if (fixP->fx_done || !seg->use_rela_p)
25838 {
25839 newval = md_chars_to_number (buf, 4) & 0x80000000;
25840 if ((value ^ (value >> 1)) & 0x40000000)
25841 {
25842 as_bad_where (fixP->fx_file, fixP->fx_line,
25843 _("rel31 relocation overflow"));
25844 }
25845 newval |= value & 0x7fffffff;
25846 md_number_to_chars (buf, newval, 4);
25847 }
25848 break;
25849 #endif
25850
25851 case BFD_RELOC_ARM_CP_OFF_IMM:
25852 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
25853 case BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM:
25854 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
25855 newval = md_chars_to_number (buf, INSN_SIZE);
25856 else
25857 newval = get_thumb32_insn (buf);
25858 if ((newval & 0x0f200f00) == 0x0d000900)
25859 {
25860 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
25861 has permitted values that are multiples of 2, in the range 0
25862 to 510. */
25863 if (value < -510 || value > 510 || (value & 1))
25864 as_bad_where (fixP->fx_file, fixP->fx_line,
25865 _("co-processor offset out of range"));
25866 }
25867 else if ((newval & 0xfe001f80) == 0xec000f80)
25868 {
25869 if (value < -511 || value > 512 || (value & 3))
25870 as_bad_where (fixP->fx_file, fixP->fx_line,
25871 _("co-processor offset out of range"));
25872 }
25873 else if (value < -1023 || value > 1023 || (value & 3))
25874 as_bad_where (fixP->fx_file, fixP->fx_line,
25875 _("co-processor offset out of range"));
25876 cp_off_common:
25877 sign = value > 0;
25878 if (value < 0)
25879 value = -value;
25880 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
25881 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
25882 newval = md_chars_to_number (buf, INSN_SIZE);
25883 else
25884 newval = get_thumb32_insn (buf);
25885 if (value == 0)
25886 {
25887 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
25888 newval &= 0xffffff80;
25889 else
25890 newval &= 0xffffff00;
25891 }
25892 else
25893 {
25894 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
25895 newval &= 0xff7fff80;
25896 else
25897 newval &= 0xff7fff00;
25898 if ((newval & 0x0f200f00) == 0x0d000900)
25899 {
25900 /* This is a fp16 vstr/vldr.
25901
25902 It requires the immediate offset in the instruction is shifted
25903 left by 1 to be a half-word offset.
25904
25905 Here, left shift by 1 first, and later right shift by 2
25906 should get the right offset. */
25907 value <<= 1;
25908 }
25909 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
25910 }
25911 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
25912 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
25913 md_number_to_chars (buf, newval, INSN_SIZE);
25914 else
25915 put_thumb32_insn (buf, newval);
25916 break;
25917
25918 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
25919 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
25920 if (value < -255 || value > 255)
25921 as_bad_where (fixP->fx_file, fixP->fx_line,
25922 _("co-processor offset out of range"));
25923 value *= 4;
25924 goto cp_off_common;
25925
25926 case BFD_RELOC_ARM_THUMB_OFFSET:
25927 newval = md_chars_to_number (buf, THUMB_SIZE);
25928 /* Exactly what ranges, and where the offset is inserted depends
25929 on the type of instruction, we can establish this from the
25930 top 4 bits. */
25931 switch (newval >> 12)
25932 {
25933 case 4: /* PC load. */
25934 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
25935 forced to zero for these loads; md_pcrel_from has already
25936 compensated for this. */
25937 if (value & 3)
25938 as_bad_where (fixP->fx_file, fixP->fx_line,
25939 _("invalid offset, target not word aligned (0x%08lX)"),
25940 (((unsigned long) fixP->fx_frag->fr_address
25941 + (unsigned long) fixP->fx_where) & ~3)
25942 + (unsigned long) value);
25943
25944 if (value & ~0x3fc)
25945 as_bad_where (fixP->fx_file, fixP->fx_line,
25946 _("invalid offset, value too big (0x%08lX)"),
25947 (long) value);
25948
25949 newval |= value >> 2;
25950 break;
25951
25952 case 9: /* SP load/store. */
25953 if (value & ~0x3fc)
25954 as_bad_where (fixP->fx_file, fixP->fx_line,
25955 _("invalid offset, value too big (0x%08lX)"),
25956 (long) value);
25957 newval |= value >> 2;
25958 break;
25959
25960 case 6: /* Word load/store. */
25961 if (value & ~0x7c)
25962 as_bad_where (fixP->fx_file, fixP->fx_line,
25963 _("invalid offset, value too big (0x%08lX)"),
25964 (long) value);
25965 newval |= value << 4; /* 6 - 2. */
25966 break;
25967
25968 case 7: /* Byte load/store. */
25969 if (value & ~0x1f)
25970 as_bad_where (fixP->fx_file, fixP->fx_line,
25971 _("invalid offset, value too big (0x%08lX)"),
25972 (long) value);
25973 newval |= value << 6;
25974 break;
25975
25976 case 8: /* Halfword load/store. */
25977 if (value & ~0x3e)
25978 as_bad_where (fixP->fx_file, fixP->fx_line,
25979 _("invalid offset, value too big (0x%08lX)"),
25980 (long) value);
25981 newval |= value << 5; /* 6 - 1. */
25982 break;
25983
25984 default:
25985 as_bad_where (fixP->fx_file, fixP->fx_line,
25986 "Unable to process relocation for thumb opcode: %lx",
25987 (unsigned long) newval);
25988 break;
25989 }
25990 md_number_to_chars (buf, newval, THUMB_SIZE);
25991 break;
25992
25993 case BFD_RELOC_ARM_THUMB_ADD:
25994 /* This is a complicated relocation, since we use it for all of
25995 the following immediate relocations:
25996
25997 3bit ADD/SUB
25998 8bit ADD/SUB
25999 9bit ADD/SUB SP word-aligned
26000 10bit ADD PC/SP word-aligned
26001
26002 The type of instruction being processed is encoded in the
26003 instruction field:
26004
26005 0x8000 SUB
26006 0x00F0 Rd
26007 0x000F Rs
26008 */
26009 newval = md_chars_to_number (buf, THUMB_SIZE);
26010 {
26011 int rd = (newval >> 4) & 0xf;
26012 int rs = newval & 0xf;
26013 int subtract = !!(newval & 0x8000);
26014
26015 /* Check for HI regs, only very restricted cases allowed:
26016 Adjusting SP, and using PC or SP to get an address. */
26017 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
26018 || (rs > 7 && rs != REG_SP && rs != REG_PC))
26019 as_bad_where (fixP->fx_file, fixP->fx_line,
26020 _("invalid Hi register with immediate"));
26021
26022 /* If value is negative, choose the opposite instruction. */
26023 if (value < 0)
26024 {
26025 value = -value;
26026 subtract = !subtract;
26027 if (value < 0)
26028 as_bad_where (fixP->fx_file, fixP->fx_line,
26029 _("immediate value out of range"));
26030 }
26031
26032 if (rd == REG_SP)
26033 {
26034 if (value & ~0x1fc)
26035 as_bad_where (fixP->fx_file, fixP->fx_line,
26036 _("invalid immediate for stack address calculation"));
26037 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
26038 newval |= value >> 2;
26039 }
26040 else if (rs == REG_PC || rs == REG_SP)
26041 {
26042 /* PR gas/18541. If the addition is for a defined symbol
26043 within range of an ADR instruction then accept it. */
26044 if (subtract
26045 && value == 4
26046 && fixP->fx_addsy != NULL)
26047 {
26048 subtract = 0;
26049
26050 if (! S_IS_DEFINED (fixP->fx_addsy)
26051 || S_GET_SEGMENT (fixP->fx_addsy) != seg
26052 || S_IS_WEAK (fixP->fx_addsy))
26053 {
26054 as_bad_where (fixP->fx_file, fixP->fx_line,
26055 _("address calculation needs a strongly defined nearby symbol"));
26056 }
26057 else
26058 {
26059 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
26060
26061 /* Round up to the next 4-byte boundary. */
26062 if (v & 3)
26063 v = (v + 3) & ~ 3;
26064 else
26065 v += 4;
26066 v = S_GET_VALUE (fixP->fx_addsy) - v;
26067
26068 if (v & ~0x3fc)
26069 {
26070 as_bad_where (fixP->fx_file, fixP->fx_line,
26071 _("symbol too far away"));
26072 }
26073 else
26074 {
26075 fixP->fx_done = 1;
26076 value = v;
26077 }
26078 }
26079 }
26080
26081 if (subtract || value & ~0x3fc)
26082 as_bad_where (fixP->fx_file, fixP->fx_line,
26083 _("invalid immediate for address calculation (value = 0x%08lX)"),
26084 (unsigned long) (subtract ? - value : value));
26085 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
26086 newval |= rd << 8;
26087 newval |= value >> 2;
26088 }
26089 else if (rs == rd)
26090 {
26091 if (value & ~0xff)
26092 as_bad_where (fixP->fx_file, fixP->fx_line,
26093 _("immediate value out of range"));
26094 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
26095 newval |= (rd << 8) | value;
26096 }
26097 else
26098 {
26099 if (value & ~0x7)
26100 as_bad_where (fixP->fx_file, fixP->fx_line,
26101 _("immediate value out of range"));
26102 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
26103 newval |= rd | (rs << 3) | (value << 6);
26104 }
26105 }
26106 md_number_to_chars (buf, newval, THUMB_SIZE);
26107 break;
26108
26109 case BFD_RELOC_ARM_THUMB_IMM:
26110 newval = md_chars_to_number (buf, THUMB_SIZE);
26111 if (value < 0 || value > 255)
26112 as_bad_where (fixP->fx_file, fixP->fx_line,
26113 _("invalid immediate: %ld is out of range"),
26114 (long) value);
26115 newval |= value;
26116 md_number_to_chars (buf, newval, THUMB_SIZE);
26117 break;
26118
26119 case BFD_RELOC_ARM_THUMB_SHIFT:
26120 /* 5bit shift value (0..32). LSL cannot take 32. */
26121 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
26122 temp = newval & 0xf800;
26123 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
26124 as_bad_where (fixP->fx_file, fixP->fx_line,
26125 _("invalid shift value: %ld"), (long) value);
26126 /* Shifts of zero must be encoded as LSL. */
26127 if (value == 0)
26128 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
26129 /* Shifts of 32 are encoded as zero. */
26130 else if (value == 32)
26131 value = 0;
26132 newval |= value << 6;
26133 md_number_to_chars (buf, newval, THUMB_SIZE);
26134 break;
26135
26136 case BFD_RELOC_VTABLE_INHERIT:
26137 case BFD_RELOC_VTABLE_ENTRY:
26138 fixP->fx_done = 0;
26139 return;
26140
26141 case BFD_RELOC_ARM_MOVW:
26142 case BFD_RELOC_ARM_MOVT:
26143 case BFD_RELOC_ARM_THUMB_MOVW:
26144 case BFD_RELOC_ARM_THUMB_MOVT:
26145 if (fixP->fx_done || !seg->use_rela_p)
26146 {
26147 /* REL format relocations are limited to a 16-bit addend. */
26148 if (!fixP->fx_done)
26149 {
26150 if (value < -0x8000 || value > 0x7fff)
26151 as_bad_where (fixP->fx_file, fixP->fx_line,
26152 _("offset out of range"));
26153 }
26154 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
26155 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
26156 {
26157 value >>= 16;
26158 }
26159
26160 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
26161 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
26162 {
26163 newval = get_thumb32_insn (buf);
26164 newval &= 0xfbf08f00;
26165 newval |= (value & 0xf000) << 4;
26166 newval |= (value & 0x0800) << 15;
26167 newval |= (value & 0x0700) << 4;
26168 newval |= (value & 0x00ff);
26169 put_thumb32_insn (buf, newval);
26170 }
26171 else
26172 {
26173 newval = md_chars_to_number (buf, 4);
26174 newval &= 0xfff0f000;
26175 newval |= value & 0x0fff;
26176 newval |= (value & 0xf000) << 4;
26177 md_number_to_chars (buf, newval, 4);
26178 }
26179 }
26180 return;
26181
26182 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
26183 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
26184 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
26185 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
26186 gas_assert (!fixP->fx_done);
26187 {
26188 bfd_vma insn;
26189 bfd_boolean is_mov;
26190 bfd_vma encoded_addend = value;
26191
26192 /* Check that addend can be encoded in instruction. */
26193 if (!seg->use_rela_p && (value < 0 || value > 255))
26194 as_bad_where (fixP->fx_file, fixP->fx_line,
26195 _("the offset 0x%08lX is not representable"),
26196 (unsigned long) encoded_addend);
26197
26198 /* Extract the instruction. */
26199 insn = md_chars_to_number (buf, THUMB_SIZE);
26200 is_mov = (insn & 0xf800) == 0x2000;
26201
26202 /* Encode insn. */
26203 if (is_mov)
26204 {
26205 if (!seg->use_rela_p)
26206 insn |= encoded_addend;
26207 }
26208 else
26209 {
26210 int rd, rs;
26211
26212 /* Extract the instruction. */
26213 /* Encoding is the following
26214 0x8000 SUB
26215 0x00F0 Rd
26216 0x000F Rs
26217 */
26218 /* The following conditions must be true :
26219 - ADD
26220 - Rd == Rs
26221 - Rd <= 7
26222 */
26223 rd = (insn >> 4) & 0xf;
26224 rs = insn & 0xf;
26225 if ((insn & 0x8000) || (rd != rs) || rd > 7)
26226 as_bad_where (fixP->fx_file, fixP->fx_line,
26227 _("Unable to process relocation for thumb opcode: %lx"),
26228 (unsigned long) insn);
26229
26230 /* Encode as ADD immediate8 thumb 1 code. */
26231 insn = 0x3000 | (rd << 8);
26232
26233 /* Place the encoded addend into the first 8 bits of the
26234 instruction. */
26235 if (!seg->use_rela_p)
26236 insn |= encoded_addend;
26237 }
26238
26239 /* Update the instruction. */
26240 md_number_to_chars (buf, insn, THUMB_SIZE);
26241 }
26242 break;
26243
26244 case BFD_RELOC_ARM_ALU_PC_G0_NC:
26245 case BFD_RELOC_ARM_ALU_PC_G0:
26246 case BFD_RELOC_ARM_ALU_PC_G1_NC:
26247 case BFD_RELOC_ARM_ALU_PC_G1:
26248 case BFD_RELOC_ARM_ALU_PC_G2:
26249 case BFD_RELOC_ARM_ALU_SB_G0_NC:
26250 case BFD_RELOC_ARM_ALU_SB_G0:
26251 case BFD_RELOC_ARM_ALU_SB_G1_NC:
26252 case BFD_RELOC_ARM_ALU_SB_G1:
26253 case BFD_RELOC_ARM_ALU_SB_G2:
26254 gas_assert (!fixP->fx_done);
26255 if (!seg->use_rela_p)
26256 {
26257 bfd_vma insn;
26258 bfd_vma encoded_addend;
26259 bfd_vma addend_abs = llabs (value);
26260
26261 /* Check that the absolute value of the addend can be
26262 expressed as an 8-bit constant plus a rotation. */
26263 encoded_addend = encode_arm_immediate (addend_abs);
26264 if (encoded_addend == (unsigned int) FAIL)
26265 as_bad_where (fixP->fx_file, fixP->fx_line,
26266 _("the offset 0x%08lX is not representable"),
26267 (unsigned long) addend_abs);
26268
26269 /* Extract the instruction. */
26270 insn = md_chars_to_number (buf, INSN_SIZE);
26271
26272 /* If the addend is positive, use an ADD instruction.
26273 Otherwise use a SUB. Take care not to destroy the S bit. */
26274 insn &= 0xff1fffff;
26275 if (value < 0)
26276 insn |= 1 << 22;
26277 else
26278 insn |= 1 << 23;
26279
26280 /* Place the encoded addend into the first 12 bits of the
26281 instruction. */
26282 insn &= 0xfffff000;
26283 insn |= encoded_addend;
26284
26285 /* Update the instruction. */
26286 md_number_to_chars (buf, insn, INSN_SIZE);
26287 }
26288 break;
26289
26290 case BFD_RELOC_ARM_LDR_PC_G0:
26291 case BFD_RELOC_ARM_LDR_PC_G1:
26292 case BFD_RELOC_ARM_LDR_PC_G2:
26293 case BFD_RELOC_ARM_LDR_SB_G0:
26294 case BFD_RELOC_ARM_LDR_SB_G1:
26295 case BFD_RELOC_ARM_LDR_SB_G2:
26296 gas_assert (!fixP->fx_done);
26297 if (!seg->use_rela_p)
26298 {
26299 bfd_vma insn;
26300 bfd_vma addend_abs = llabs (value);
26301
26302 /* Check that the absolute value of the addend can be
26303 encoded in 12 bits. */
26304 if (addend_abs >= 0x1000)
26305 as_bad_where (fixP->fx_file, fixP->fx_line,
26306 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
26307 (unsigned long) addend_abs);
26308
26309 /* Extract the instruction. */
26310 insn = md_chars_to_number (buf, INSN_SIZE);
26311
26312 /* If the addend is negative, clear bit 23 of the instruction.
26313 Otherwise set it. */
26314 if (value < 0)
26315 insn &= ~(1 << 23);
26316 else
26317 insn |= 1 << 23;
26318
26319 /* Place the absolute value of the addend into the first 12 bits
26320 of the instruction. */
26321 insn &= 0xfffff000;
26322 insn |= addend_abs;
26323
26324 /* Update the instruction. */
26325 md_number_to_chars (buf, insn, INSN_SIZE);
26326 }
26327 break;
26328
26329 case BFD_RELOC_ARM_LDRS_PC_G0:
26330 case BFD_RELOC_ARM_LDRS_PC_G1:
26331 case BFD_RELOC_ARM_LDRS_PC_G2:
26332 case BFD_RELOC_ARM_LDRS_SB_G0:
26333 case BFD_RELOC_ARM_LDRS_SB_G1:
26334 case BFD_RELOC_ARM_LDRS_SB_G2:
26335 gas_assert (!fixP->fx_done);
26336 if (!seg->use_rela_p)
26337 {
26338 bfd_vma insn;
26339 bfd_vma addend_abs = llabs (value);
26340
26341 /* Check that the absolute value of the addend can be
26342 encoded in 8 bits. */
26343 if (addend_abs >= 0x100)
26344 as_bad_where (fixP->fx_file, fixP->fx_line,
26345 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
26346 (unsigned long) addend_abs);
26347
26348 /* Extract the instruction. */
26349 insn = md_chars_to_number (buf, INSN_SIZE);
26350
26351 /* If the addend is negative, clear bit 23 of the instruction.
26352 Otherwise set it. */
26353 if (value < 0)
26354 insn &= ~(1 << 23);
26355 else
26356 insn |= 1 << 23;
26357
26358 /* Place the first four bits of the absolute value of the addend
26359 into the first 4 bits of the instruction, and the remaining
26360 four into bits 8 .. 11. */
26361 insn &= 0xfffff0f0;
26362 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
26363
26364 /* Update the instruction. */
26365 md_number_to_chars (buf, insn, INSN_SIZE);
26366 }
26367 break;
26368
26369 case BFD_RELOC_ARM_LDC_PC_G0:
26370 case BFD_RELOC_ARM_LDC_PC_G1:
26371 case BFD_RELOC_ARM_LDC_PC_G2:
26372 case BFD_RELOC_ARM_LDC_SB_G0:
26373 case BFD_RELOC_ARM_LDC_SB_G1:
26374 case BFD_RELOC_ARM_LDC_SB_G2:
26375 gas_assert (!fixP->fx_done);
26376 if (!seg->use_rela_p)
26377 {
26378 bfd_vma insn;
26379 bfd_vma addend_abs = llabs (value);
26380
26381 /* Check that the absolute value of the addend is a multiple of
26382 four and, when divided by four, fits in 8 bits. */
26383 if (addend_abs & 0x3)
26384 as_bad_where (fixP->fx_file, fixP->fx_line,
26385 _("bad offset 0x%08lX (must be word-aligned)"),
26386 (unsigned long) addend_abs);
26387
26388 if ((addend_abs >> 2) > 0xff)
26389 as_bad_where (fixP->fx_file, fixP->fx_line,
26390 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
26391 (unsigned long) addend_abs);
26392
26393 /* Extract the instruction. */
26394 insn = md_chars_to_number (buf, INSN_SIZE);
26395
26396 /* If the addend is negative, clear bit 23 of the instruction.
26397 Otherwise set it. */
26398 if (value < 0)
26399 insn &= ~(1 << 23);
26400 else
26401 insn |= 1 << 23;
26402
26403 /* Place the addend (divided by four) into the first eight
26404 bits of the instruction. */
26405 insn &= 0xfffffff0;
26406 insn |= addend_abs >> 2;
26407
26408 /* Update the instruction. */
26409 md_number_to_chars (buf, insn, INSN_SIZE);
26410 }
26411 break;
26412
26413 case BFD_RELOC_THUMB_PCREL_BRANCH5:
26414 if (fixP->fx_addsy
26415 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
26416 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
26417 && ARM_IS_FUNC (fixP->fx_addsy)
26418 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
26419 {
26420 /* Force a relocation for a branch 5 bits wide. */
26421 fixP->fx_done = 0;
26422 }
26423 if (v8_1_branch_value_check (value, 5, FALSE) == FAIL)
26424 as_bad_where (fixP->fx_file, fixP->fx_line,
26425 BAD_BRANCH_OFF);
26426
26427 if (fixP->fx_done || !seg->use_rela_p)
26428 {
26429 addressT boff = value >> 1;
26430
26431 newval = md_chars_to_number (buf, THUMB_SIZE);
26432 newval |= (boff << 7);
26433 md_number_to_chars (buf, newval, THUMB_SIZE);
26434 }
26435 break;
26436
26437 case BFD_RELOC_THUMB_PCREL_BFCSEL:
26438 if (fixP->fx_addsy
26439 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
26440 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
26441 && ARM_IS_FUNC (fixP->fx_addsy)
26442 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
26443 {
26444 fixP->fx_done = 0;
26445 }
26446 if ((value & ~0x7f) && ((value & ~0x3f) != ~0x3f))
26447 as_bad_where (fixP->fx_file, fixP->fx_line,
26448 _("branch out of range"));
26449
26450 if (fixP->fx_done || !seg->use_rela_p)
26451 {
26452 newval = md_chars_to_number (buf, THUMB_SIZE);
26453
26454 addressT boff = ((newval & 0x0780) >> 7) << 1;
26455 addressT diff = value - boff;
26456
26457 if (diff == 4)
26458 {
26459 newval |= 1 << 1; /* T bit. */
26460 }
26461 else if (diff != 2)
26462 {
26463 as_bad_where (fixP->fx_file, fixP->fx_line,
26464 _("out of range label-relative fixup value"));
26465 }
26466 md_number_to_chars (buf, newval, THUMB_SIZE);
26467 }
26468 break;
26469
26470 case BFD_RELOC_ARM_THUMB_BF17:
26471 if (fixP->fx_addsy
26472 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
26473 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
26474 && ARM_IS_FUNC (fixP->fx_addsy)
26475 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
26476 {
26477 /* Force a relocation for a branch 17 bits wide. */
26478 fixP->fx_done = 0;
26479 }
26480
26481 if (v8_1_branch_value_check (value, 17, TRUE) == FAIL)
26482 as_bad_where (fixP->fx_file, fixP->fx_line,
26483 BAD_BRANCH_OFF);
26484
26485 if (fixP->fx_done || !seg->use_rela_p)
26486 {
26487 offsetT newval2;
26488 addressT immA, immB, immC;
26489
26490 immA = (value & 0x0001f000) >> 12;
26491 immB = (value & 0x00000ffc) >> 2;
26492 immC = (value & 0x00000002) >> 1;
26493
26494 newval = md_chars_to_number (buf, THUMB_SIZE);
26495 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
26496 newval |= immA;
26497 newval2 |= (immC << 11) | (immB << 1);
26498 md_number_to_chars (buf, newval, THUMB_SIZE);
26499 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
26500 }
26501 break;
26502
26503 case BFD_RELOC_ARM_THUMB_BF19:
26504 if (fixP->fx_addsy
26505 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
26506 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
26507 && ARM_IS_FUNC (fixP->fx_addsy)
26508 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
26509 {
26510 /* Force a relocation for a branch 19 bits wide. */
26511 fixP->fx_done = 0;
26512 }
26513
26514 if (v8_1_branch_value_check (value, 19, TRUE) == FAIL)
26515 as_bad_where (fixP->fx_file, fixP->fx_line,
26516 BAD_BRANCH_OFF);
26517
26518 if (fixP->fx_done || !seg->use_rela_p)
26519 {
26520 offsetT newval2;
26521 addressT immA, immB, immC;
26522
26523 immA = (value & 0x0007f000) >> 12;
26524 immB = (value & 0x00000ffc) >> 2;
26525 immC = (value & 0x00000002) >> 1;
26526
26527 newval = md_chars_to_number (buf, THUMB_SIZE);
26528 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
26529 newval |= immA;
26530 newval2 |= (immC << 11) | (immB << 1);
26531 md_number_to_chars (buf, newval, THUMB_SIZE);
26532 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
26533 }
26534 break;
26535
26536 case BFD_RELOC_ARM_THUMB_BF13:
26537 if (fixP->fx_addsy
26538 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
26539 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
26540 && ARM_IS_FUNC (fixP->fx_addsy)
26541 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
26542 {
26543 /* Force a relocation for a branch 13 bits wide. */
26544 fixP->fx_done = 0;
26545 }
26546
26547 if (v8_1_branch_value_check (value, 13, TRUE) == FAIL)
26548 as_bad_where (fixP->fx_file, fixP->fx_line,
26549 BAD_BRANCH_OFF);
26550
26551 if (fixP->fx_done || !seg->use_rela_p)
26552 {
26553 offsetT newval2;
26554 addressT immA, immB, immC;
26555
26556 immA = (value & 0x00001000) >> 12;
26557 immB = (value & 0x00000ffc) >> 2;
26558 immC = (value & 0x00000002) >> 1;
26559
26560 newval = md_chars_to_number (buf, THUMB_SIZE);
26561 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
26562 newval |= immA;
26563 newval2 |= (immC << 11) | (immB << 1);
26564 md_number_to_chars (buf, newval, THUMB_SIZE);
26565 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
26566 }
26567 break;
26568
26569 case BFD_RELOC_ARM_THUMB_LOOP12:
26570 if (fixP->fx_addsy
26571 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
26572 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
26573 && ARM_IS_FUNC (fixP->fx_addsy)
26574 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
26575 {
26576 /* Force a relocation for a branch 12 bits wide. */
26577 fixP->fx_done = 0;
26578 }
26579
26580 bfd_vma insn = get_thumb32_insn (buf);
26581 /* le lr, <label> or le <label> */
26582 if (((insn & 0xffffffff) == 0xf00fc001)
26583 || ((insn & 0xffffffff) == 0xf02fc001))
26584 value = -value;
26585
26586 if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
26587 as_bad_where (fixP->fx_file, fixP->fx_line,
26588 BAD_BRANCH_OFF);
26589 if (fixP->fx_done || !seg->use_rela_p)
26590 {
26591 addressT imml, immh;
26592
26593 immh = (value & 0x00000ffc) >> 2;
26594 imml = (value & 0x00000002) >> 1;
26595
26596 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
26597 newval |= (imml << 11) | (immh << 1);
26598 md_number_to_chars (buf + THUMB_SIZE, newval, THUMB_SIZE);
26599 }
26600 break;
26601
26602 case BFD_RELOC_ARM_V4BX:
26603 /* This will need to go in the object file. */
26604 fixP->fx_done = 0;
26605 break;
26606
26607 case BFD_RELOC_UNUSED:
26608 default:
26609 as_bad_where (fixP->fx_file, fixP->fx_line,
26610 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
26611 }
26612 }
26613
26614 /* Translate internal representation of relocation info to BFD target
26615 format. */
26616
26617 arelent *
26618 tc_gen_reloc (asection *section, fixS *fixp)
26619 {
26620 arelent * reloc;
26621 bfd_reloc_code_real_type code;
26622
26623 reloc = XNEW (arelent);
26624
26625 reloc->sym_ptr_ptr = XNEW (asymbol *);
26626 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
26627 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
26628
26629 if (fixp->fx_pcrel)
26630 {
26631 if (section->use_rela_p)
26632 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
26633 else
26634 fixp->fx_offset = reloc->address;
26635 }
26636 reloc->addend = fixp->fx_offset;
26637
26638 switch (fixp->fx_r_type)
26639 {
26640 case BFD_RELOC_8:
26641 if (fixp->fx_pcrel)
26642 {
26643 code = BFD_RELOC_8_PCREL;
26644 break;
26645 }
26646 /* Fall through. */
26647
26648 case BFD_RELOC_16:
26649 if (fixp->fx_pcrel)
26650 {
26651 code = BFD_RELOC_16_PCREL;
26652 break;
26653 }
26654 /* Fall through. */
26655
26656 case BFD_RELOC_32:
26657 if (fixp->fx_pcrel)
26658 {
26659 code = BFD_RELOC_32_PCREL;
26660 break;
26661 }
26662 /* Fall through. */
26663
26664 case BFD_RELOC_ARM_MOVW:
26665 if (fixp->fx_pcrel)
26666 {
26667 code = BFD_RELOC_ARM_MOVW_PCREL;
26668 break;
26669 }
26670 /* Fall through. */
26671
26672 case BFD_RELOC_ARM_MOVT:
26673 if (fixp->fx_pcrel)
26674 {
26675 code = BFD_RELOC_ARM_MOVT_PCREL;
26676 break;
26677 }
26678 /* Fall through. */
26679
26680 case BFD_RELOC_ARM_THUMB_MOVW:
26681 if (fixp->fx_pcrel)
26682 {
26683 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
26684 break;
26685 }
26686 /* Fall through. */
26687
26688 case BFD_RELOC_ARM_THUMB_MOVT:
26689 if (fixp->fx_pcrel)
26690 {
26691 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
26692 break;
26693 }
26694 /* Fall through. */
26695
26696 case BFD_RELOC_NONE:
26697 case BFD_RELOC_ARM_PCREL_BRANCH:
26698 case BFD_RELOC_ARM_PCREL_BLX:
26699 case BFD_RELOC_RVA:
26700 case BFD_RELOC_THUMB_PCREL_BRANCH7:
26701 case BFD_RELOC_THUMB_PCREL_BRANCH9:
26702 case BFD_RELOC_THUMB_PCREL_BRANCH12:
26703 case BFD_RELOC_THUMB_PCREL_BRANCH20:
26704 case BFD_RELOC_THUMB_PCREL_BRANCH23:
26705 case BFD_RELOC_THUMB_PCREL_BRANCH25:
26706 case BFD_RELOC_VTABLE_ENTRY:
26707 case BFD_RELOC_VTABLE_INHERIT:
26708 #ifdef TE_PE
26709 case BFD_RELOC_32_SECREL:
26710 #endif
26711 code = fixp->fx_r_type;
26712 break;
26713
26714 case BFD_RELOC_THUMB_PCREL_BLX:
26715 #ifdef OBJ_ELF
26716 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
26717 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
26718 else
26719 #endif
26720 code = BFD_RELOC_THUMB_PCREL_BLX;
26721 break;
26722
26723 case BFD_RELOC_ARM_LITERAL:
26724 case BFD_RELOC_ARM_HWLITERAL:
26725 /* If this is called then the a literal has
26726 been referenced across a section boundary. */
26727 as_bad_where (fixp->fx_file, fixp->fx_line,
26728 _("literal referenced across section boundary"));
26729 return NULL;
26730
26731 #ifdef OBJ_ELF
26732 case BFD_RELOC_ARM_TLS_CALL:
26733 case BFD_RELOC_ARM_THM_TLS_CALL:
26734 case BFD_RELOC_ARM_TLS_DESCSEQ:
26735 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
26736 case BFD_RELOC_ARM_GOT32:
26737 case BFD_RELOC_ARM_GOTOFF:
26738 case BFD_RELOC_ARM_GOT_PREL:
26739 case BFD_RELOC_ARM_PLT32:
26740 case BFD_RELOC_ARM_TARGET1:
26741 case BFD_RELOC_ARM_ROSEGREL32:
26742 case BFD_RELOC_ARM_SBREL32:
26743 case BFD_RELOC_ARM_PREL31:
26744 case BFD_RELOC_ARM_TARGET2:
26745 case BFD_RELOC_ARM_TLS_LDO32:
26746 case BFD_RELOC_ARM_PCREL_CALL:
26747 case BFD_RELOC_ARM_PCREL_JUMP:
26748 case BFD_RELOC_ARM_ALU_PC_G0_NC:
26749 case BFD_RELOC_ARM_ALU_PC_G0:
26750 case BFD_RELOC_ARM_ALU_PC_G1_NC:
26751 case BFD_RELOC_ARM_ALU_PC_G1:
26752 case BFD_RELOC_ARM_ALU_PC_G2:
26753 case BFD_RELOC_ARM_LDR_PC_G0:
26754 case BFD_RELOC_ARM_LDR_PC_G1:
26755 case BFD_RELOC_ARM_LDR_PC_G2:
26756 case BFD_RELOC_ARM_LDRS_PC_G0:
26757 case BFD_RELOC_ARM_LDRS_PC_G1:
26758 case BFD_RELOC_ARM_LDRS_PC_G2:
26759 case BFD_RELOC_ARM_LDC_PC_G0:
26760 case BFD_RELOC_ARM_LDC_PC_G1:
26761 case BFD_RELOC_ARM_LDC_PC_G2:
26762 case BFD_RELOC_ARM_ALU_SB_G0_NC:
26763 case BFD_RELOC_ARM_ALU_SB_G0:
26764 case BFD_RELOC_ARM_ALU_SB_G1_NC:
26765 case BFD_RELOC_ARM_ALU_SB_G1:
26766 case BFD_RELOC_ARM_ALU_SB_G2:
26767 case BFD_RELOC_ARM_LDR_SB_G0:
26768 case BFD_RELOC_ARM_LDR_SB_G1:
26769 case BFD_RELOC_ARM_LDR_SB_G2:
26770 case BFD_RELOC_ARM_LDRS_SB_G0:
26771 case BFD_RELOC_ARM_LDRS_SB_G1:
26772 case BFD_RELOC_ARM_LDRS_SB_G2:
26773 case BFD_RELOC_ARM_LDC_SB_G0:
26774 case BFD_RELOC_ARM_LDC_SB_G1:
26775 case BFD_RELOC_ARM_LDC_SB_G2:
26776 case BFD_RELOC_ARM_V4BX:
26777 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
26778 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
26779 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
26780 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
26781 case BFD_RELOC_ARM_GOTFUNCDESC:
26782 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
26783 case BFD_RELOC_ARM_FUNCDESC:
26784 case BFD_RELOC_ARM_THUMB_BF17:
26785 case BFD_RELOC_ARM_THUMB_BF19:
26786 case BFD_RELOC_ARM_THUMB_BF13:
26787 code = fixp->fx_r_type;
26788 break;
26789
26790 case BFD_RELOC_ARM_TLS_GOTDESC:
26791 case BFD_RELOC_ARM_TLS_GD32:
26792 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
26793 case BFD_RELOC_ARM_TLS_LE32:
26794 case BFD_RELOC_ARM_TLS_IE32:
26795 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
26796 case BFD_RELOC_ARM_TLS_LDM32:
26797 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
26798 /* BFD will include the symbol's address in the addend.
26799 But we don't want that, so subtract it out again here. */
26800 if (!S_IS_COMMON (fixp->fx_addsy))
26801 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
26802 code = fixp->fx_r_type;
26803 break;
26804 #endif
26805
26806 case BFD_RELOC_ARM_IMMEDIATE:
26807 as_bad_where (fixp->fx_file, fixp->fx_line,
26808 _("internal relocation (type: IMMEDIATE) not fixed up"));
26809 return NULL;
26810
26811 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
26812 as_bad_where (fixp->fx_file, fixp->fx_line,
26813 _("ADRL used for a symbol not defined in the same file"));
26814 return NULL;
26815
26816 case BFD_RELOC_THUMB_PCREL_BRANCH5:
26817 case BFD_RELOC_THUMB_PCREL_BFCSEL:
26818 case BFD_RELOC_ARM_THUMB_LOOP12:
26819 as_bad_where (fixp->fx_file, fixp->fx_line,
26820 _("%s used for a symbol not defined in the same file"),
26821 bfd_get_reloc_code_name (fixp->fx_r_type));
26822 return NULL;
26823
26824 case BFD_RELOC_ARM_OFFSET_IMM:
26825 if (section->use_rela_p)
26826 {
26827 code = fixp->fx_r_type;
26828 break;
26829 }
26830
26831 if (fixp->fx_addsy != NULL
26832 && !S_IS_DEFINED (fixp->fx_addsy)
26833 && S_IS_LOCAL (fixp->fx_addsy))
26834 {
26835 as_bad_where (fixp->fx_file, fixp->fx_line,
26836 _("undefined local label `%s'"),
26837 S_GET_NAME (fixp->fx_addsy));
26838 return NULL;
26839 }
26840
26841 as_bad_where (fixp->fx_file, fixp->fx_line,
26842 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
26843 return NULL;
26844
26845 default:
26846 {
26847 const char * type;
26848
26849 switch (fixp->fx_r_type)
26850 {
26851 case BFD_RELOC_NONE: type = "NONE"; break;
26852 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
26853 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
26854 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
26855 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
26856 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
26857 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
26858 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
26859 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
26860 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
26861 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
26862 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
26863 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
26864 default: type = _("<unknown>"); break;
26865 }
26866 as_bad_where (fixp->fx_file, fixp->fx_line,
26867 _("cannot represent %s relocation in this object file format"),
26868 type);
26869 return NULL;
26870 }
26871 }
26872
26873 #ifdef OBJ_ELF
26874 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
26875 && GOT_symbol
26876 && fixp->fx_addsy == GOT_symbol)
26877 {
26878 code = BFD_RELOC_ARM_GOTPC;
26879 reloc->addend = fixp->fx_offset = reloc->address;
26880 }
26881 #endif
26882
26883 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
26884
26885 if (reloc->howto == NULL)
26886 {
26887 as_bad_where (fixp->fx_file, fixp->fx_line,
26888 _("cannot represent %s relocation in this object file format"),
26889 bfd_get_reloc_code_name (code));
26890 return NULL;
26891 }
26892
26893 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
26894 vtable entry to be used in the relocation's section offset. */
26895 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
26896 reloc->address = fixp->fx_offset;
26897
26898 return reloc;
26899 }
26900
26901 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
26902
26903 void
26904 cons_fix_new_arm (fragS * frag,
26905 int where,
26906 int size,
26907 expressionS * exp,
26908 bfd_reloc_code_real_type reloc)
26909 {
26910 int pcrel = 0;
26911
26912 /* Pick a reloc.
26913 FIXME: @@ Should look at CPU word size. */
26914 switch (size)
26915 {
26916 case 1:
26917 reloc = BFD_RELOC_8;
26918 break;
26919 case 2:
26920 reloc = BFD_RELOC_16;
26921 break;
26922 case 4:
26923 default:
26924 reloc = BFD_RELOC_32;
26925 break;
26926 case 8:
26927 reloc = BFD_RELOC_64;
26928 break;
26929 }
26930
26931 #ifdef TE_PE
26932 if (exp->X_op == O_secrel)
26933 {
26934 exp->X_op = O_symbol;
26935 reloc = BFD_RELOC_32_SECREL;
26936 }
26937 #endif
26938
26939 fix_new_exp (frag, where, size, exp, pcrel, reloc);
26940 }
26941
26942 #if defined (OBJ_COFF)
26943 void
26944 arm_validate_fix (fixS * fixP)
26945 {
26946 /* If the destination of the branch is a defined symbol which does not have
26947 the THUMB_FUNC attribute, then we must be calling a function which has
26948 the (interfacearm) attribute. We look for the Thumb entry point to that
26949 function and change the branch to refer to that function instead. */
26950 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
26951 && fixP->fx_addsy != NULL
26952 && S_IS_DEFINED (fixP->fx_addsy)
26953 && ! THUMB_IS_FUNC (fixP->fx_addsy))
26954 {
26955 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
26956 }
26957 }
26958 #endif
26959
26960
26961 int
26962 arm_force_relocation (struct fix * fixp)
26963 {
26964 #if defined (OBJ_COFF) && defined (TE_PE)
26965 if (fixp->fx_r_type == BFD_RELOC_RVA)
26966 return 1;
26967 #endif
26968
26969 /* In case we have a call or a branch to a function in ARM ISA mode from
26970 a thumb function or vice-versa force the relocation. These relocations
26971 are cleared off for some cores that might have blx and simple transformations
26972 are possible. */
26973
26974 #ifdef OBJ_ELF
26975 switch (fixp->fx_r_type)
26976 {
26977 case BFD_RELOC_ARM_PCREL_JUMP:
26978 case BFD_RELOC_ARM_PCREL_CALL:
26979 case BFD_RELOC_THUMB_PCREL_BLX:
26980 if (THUMB_IS_FUNC (fixp->fx_addsy))
26981 return 1;
26982 break;
26983
26984 case BFD_RELOC_ARM_PCREL_BLX:
26985 case BFD_RELOC_THUMB_PCREL_BRANCH25:
26986 case BFD_RELOC_THUMB_PCREL_BRANCH20:
26987 case BFD_RELOC_THUMB_PCREL_BRANCH23:
26988 if (ARM_IS_FUNC (fixp->fx_addsy))
26989 return 1;
26990 break;
26991
26992 default:
26993 break;
26994 }
26995 #endif
26996
26997 /* Resolve these relocations even if the symbol is extern or weak.
26998 Technically this is probably wrong due to symbol preemption.
26999 In practice these relocations do not have enough range to be useful
27000 at dynamic link time, and some code (e.g. in the Linux kernel)
27001 expects these references to be resolved. */
27002 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
27003 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
27004 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
27005 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
27006 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
27007 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
27008 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
27009 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
27010 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
27011 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
27012 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
27013 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
27014 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
27015 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
27016 return 0;
27017
27018 /* Always leave these relocations for the linker. */
27019 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
27020 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
27021 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
27022 return 1;
27023
27024 /* Always generate relocations against function symbols. */
27025 if (fixp->fx_r_type == BFD_RELOC_32
27026 && fixp->fx_addsy
27027 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
27028 return 1;
27029
27030 return generic_force_reloc (fixp);
27031 }
27032
27033 #if defined (OBJ_ELF) || defined (OBJ_COFF)
27034 /* Relocations against function names must be left unadjusted,
27035 so that the linker can use this information to generate interworking
27036 stubs. The MIPS version of this function
27037 also prevents relocations that are mips-16 specific, but I do not
27038 know why it does this.
27039
27040 FIXME:
27041 There is one other problem that ought to be addressed here, but
27042 which currently is not: Taking the address of a label (rather
27043 than a function) and then later jumping to that address. Such
27044 addresses also ought to have their bottom bit set (assuming that
27045 they reside in Thumb code), but at the moment they will not. */
27046
27047 bfd_boolean
27048 arm_fix_adjustable (fixS * fixP)
27049 {
27050 if (fixP->fx_addsy == NULL)
27051 return 1;
27052
27053 /* Preserve relocations against symbols with function type. */
27054 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
27055 return FALSE;
27056
27057 if (THUMB_IS_FUNC (fixP->fx_addsy)
27058 && fixP->fx_subsy == NULL)
27059 return FALSE;
27060
27061 /* We need the symbol name for the VTABLE entries. */
27062 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
27063 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
27064 return FALSE;
27065
27066 /* Don't allow symbols to be discarded on GOT related relocs. */
27067 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
27068 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
27069 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
27070 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
27071 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32_FDPIC
27072 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
27073 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
27074 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32_FDPIC
27075 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
27076 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32_FDPIC
27077 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
27078 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
27079 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
27080 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
27081 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
27082 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
27083 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
27084 return FALSE;
27085
27086 /* Similarly for group relocations. */
27087 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
27088 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
27089 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
27090 return FALSE;
27091
27092 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
27093 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
27094 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
27095 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
27096 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
27097 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
27098 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
27099 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
27100 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
27101 return FALSE;
27102
27103 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
27104 offsets, so keep these symbols. */
27105 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
27106 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
27107 return FALSE;
27108
27109 return TRUE;
27110 }
27111 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
27112
27113 #ifdef OBJ_ELF
27114 const char *
27115 elf32_arm_target_format (void)
27116 {
27117 #ifdef TE_SYMBIAN
27118 return (target_big_endian
27119 ? "elf32-bigarm-symbian"
27120 : "elf32-littlearm-symbian");
27121 #elif defined (TE_VXWORKS)
27122 return (target_big_endian
27123 ? "elf32-bigarm-vxworks"
27124 : "elf32-littlearm-vxworks");
27125 #elif defined (TE_NACL)
27126 return (target_big_endian
27127 ? "elf32-bigarm-nacl"
27128 : "elf32-littlearm-nacl");
27129 #else
27130 if (arm_fdpic)
27131 {
27132 if (target_big_endian)
27133 return "elf32-bigarm-fdpic";
27134 else
27135 return "elf32-littlearm-fdpic";
27136 }
27137 else
27138 {
27139 if (target_big_endian)
27140 return "elf32-bigarm";
27141 else
27142 return "elf32-littlearm";
27143 }
27144 #endif
27145 }
27146
27147 void
27148 armelf_frob_symbol (symbolS * symp,
27149 int * puntp)
27150 {
27151 elf_frob_symbol (symp, puntp);
27152 }
27153 #endif
27154
27155 /* MD interface: Finalization. */
27156
27157 void
27158 arm_cleanup (void)
27159 {
27160 literal_pool * pool;
27161
27162 /* Ensure that all the predication blocks are properly closed. */
27163 check_pred_blocks_finished ();
27164
27165 for (pool = list_of_pools; pool; pool = pool->next)
27166 {
27167 /* Put it at the end of the relevant section. */
27168 subseg_set (pool->section, pool->sub_section);
27169 #ifdef OBJ_ELF
27170 arm_elf_change_section ();
27171 #endif
27172 s_ltorg (0);
27173 }
27174 }
27175
27176 #ifdef OBJ_ELF
27177 /* Remove any excess mapping symbols generated for alignment frags in
27178 SEC. We may have created a mapping symbol before a zero byte
27179 alignment; remove it if there's a mapping symbol after the
27180 alignment. */
27181 static void
27182 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
27183 void *dummy ATTRIBUTE_UNUSED)
27184 {
27185 segment_info_type *seginfo = seg_info (sec);
27186 fragS *fragp;
27187
27188 if (seginfo == NULL || seginfo->frchainP == NULL)
27189 return;
27190
27191 for (fragp = seginfo->frchainP->frch_root;
27192 fragp != NULL;
27193 fragp = fragp->fr_next)
27194 {
27195 symbolS *sym = fragp->tc_frag_data.last_map;
27196 fragS *next = fragp->fr_next;
27197
27198 /* Variable-sized frags have been converted to fixed size by
27199 this point. But if this was variable-sized to start with,
27200 there will be a fixed-size frag after it. So don't handle
27201 next == NULL. */
27202 if (sym == NULL || next == NULL)
27203 continue;
27204
27205 if (S_GET_VALUE (sym) < next->fr_address)
27206 /* Not at the end of this frag. */
27207 continue;
27208 know (S_GET_VALUE (sym) == next->fr_address);
27209
27210 do
27211 {
27212 if (next->tc_frag_data.first_map != NULL)
27213 {
27214 /* Next frag starts with a mapping symbol. Discard this
27215 one. */
27216 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
27217 break;
27218 }
27219
27220 if (next->fr_next == NULL)
27221 {
27222 /* This mapping symbol is at the end of the section. Discard
27223 it. */
27224 know (next->fr_fix == 0 && next->fr_var == 0);
27225 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
27226 break;
27227 }
27228
27229 /* As long as we have empty frags without any mapping symbols,
27230 keep looking. */
27231 /* If the next frag is non-empty and does not start with a
27232 mapping symbol, then this mapping symbol is required. */
27233 if (next->fr_address != next->fr_next->fr_address)
27234 break;
27235
27236 next = next->fr_next;
27237 }
27238 while (next != NULL);
27239 }
27240 }
27241 #endif
27242
27243 /* Adjust the symbol table. This marks Thumb symbols as distinct from
27244 ARM ones. */
27245
27246 void
27247 arm_adjust_symtab (void)
27248 {
27249 #ifdef OBJ_COFF
27250 symbolS * sym;
27251
27252 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
27253 {
27254 if (ARM_IS_THUMB (sym))
27255 {
27256 if (THUMB_IS_FUNC (sym))
27257 {
27258 /* Mark the symbol as a Thumb function. */
27259 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
27260 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
27261 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
27262
27263 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
27264 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
27265 else
27266 as_bad (_("%s: unexpected function type: %d"),
27267 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
27268 }
27269 else switch (S_GET_STORAGE_CLASS (sym))
27270 {
27271 case C_EXT:
27272 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
27273 break;
27274 case C_STAT:
27275 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
27276 break;
27277 case C_LABEL:
27278 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
27279 break;
27280 default:
27281 /* Do nothing. */
27282 break;
27283 }
27284 }
27285
27286 if (ARM_IS_INTERWORK (sym))
27287 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
27288 }
27289 #endif
27290 #ifdef OBJ_ELF
27291 symbolS * sym;
27292 char bind;
27293
27294 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
27295 {
27296 if (ARM_IS_THUMB (sym))
27297 {
27298 elf_symbol_type * elf_sym;
27299
27300 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
27301 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
27302
27303 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
27304 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
27305 {
27306 /* If it's a .thumb_func, declare it as so,
27307 otherwise tag label as .code 16. */
27308 if (THUMB_IS_FUNC (sym))
27309 ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
27310 ST_BRANCH_TO_THUMB);
27311 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
27312 elf_sym->internal_elf_sym.st_info =
27313 ELF_ST_INFO (bind, STT_ARM_16BIT);
27314 }
27315 }
27316 }
27317
27318 /* Remove any overlapping mapping symbols generated by alignment frags. */
27319 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
27320 /* Now do generic ELF adjustments. */
27321 elf_adjust_symtab ();
27322 #endif
27323 }
27324
27325 /* MD interface: Initialization. */
27326
27327 static void
27328 set_constant_flonums (void)
27329 {
27330 int i;
27331
27332 for (i = 0; i < NUM_FLOAT_VALS; i++)
27333 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
27334 abort ();
27335 }
27336
27337 /* Auto-select Thumb mode if it's the only available instruction set for the
27338 given architecture. */
27339
27340 static void
27341 autoselect_thumb_from_cpu_variant (void)
27342 {
27343 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
27344 opcode_select (16);
27345 }
27346
27347 void
27348 md_begin (void)
27349 {
27350 unsigned mach;
27351 unsigned int i;
27352
27353 if ( (arm_ops_hsh = hash_new ()) == NULL
27354 || (arm_cond_hsh = hash_new ()) == NULL
27355 || (arm_vcond_hsh = hash_new ()) == NULL
27356 || (arm_shift_hsh = hash_new ()) == NULL
27357 || (arm_psr_hsh = hash_new ()) == NULL
27358 || (arm_v7m_psr_hsh = hash_new ()) == NULL
27359 || (arm_reg_hsh = hash_new ()) == NULL
27360 || (arm_reloc_hsh = hash_new ()) == NULL
27361 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
27362 as_fatal (_("virtual memory exhausted"));
27363
27364 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
27365 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
27366 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
27367 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
27368 for (i = 0; i < sizeof (vconds) / sizeof (struct asm_cond); i++)
27369 hash_insert (arm_vcond_hsh, vconds[i].template_name, (void *) (vconds + i));
27370 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
27371 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
27372 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
27373 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
27374 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
27375 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
27376 (void *) (v7m_psrs + i));
27377 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
27378 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
27379 for (i = 0;
27380 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
27381 i++)
27382 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
27383 (void *) (barrier_opt_names + i));
27384 #ifdef OBJ_ELF
27385 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
27386 {
27387 struct reloc_entry * entry = reloc_names + i;
27388
27389 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
27390 /* This makes encode_branch() use the EABI versions of this relocation. */
27391 entry->reloc = BFD_RELOC_UNUSED;
27392
27393 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
27394 }
27395 #endif
27396
27397 set_constant_flonums ();
27398
27399 /* Set the cpu variant based on the command-line options. We prefer
27400 -mcpu= over -march= if both are set (as for GCC); and we prefer
27401 -mfpu= over any other way of setting the floating point unit.
27402 Use of legacy options with new options are faulted. */
27403 if (legacy_cpu)
27404 {
27405 if (mcpu_cpu_opt || march_cpu_opt)
27406 as_bad (_("use of old and new-style options to set CPU type"));
27407
27408 selected_arch = *legacy_cpu;
27409 }
27410 else if (mcpu_cpu_opt)
27411 {
27412 selected_arch = *mcpu_cpu_opt;
27413 selected_ext = *mcpu_ext_opt;
27414 }
27415 else if (march_cpu_opt)
27416 {
27417 selected_arch = *march_cpu_opt;
27418 selected_ext = *march_ext_opt;
27419 }
27420 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
27421
27422 if (legacy_fpu)
27423 {
27424 if (mfpu_opt)
27425 as_bad (_("use of old and new-style options to set FPU type"));
27426
27427 selected_fpu = *legacy_fpu;
27428 }
27429 else if (mfpu_opt)
27430 selected_fpu = *mfpu_opt;
27431 else
27432 {
27433 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
27434 || defined (TE_NetBSD) || defined (TE_VXWORKS))
27435 /* Some environments specify a default FPU. If they don't, infer it
27436 from the processor. */
27437 if (mcpu_fpu_opt)
27438 selected_fpu = *mcpu_fpu_opt;
27439 else if (march_fpu_opt)
27440 selected_fpu = *march_fpu_opt;
27441 #else
27442 selected_fpu = fpu_default;
27443 #endif
27444 }
27445
27446 if (ARM_FEATURE_ZERO (selected_fpu))
27447 {
27448 if (!no_cpu_selected ())
27449 selected_fpu = fpu_default;
27450 else
27451 selected_fpu = fpu_arch_fpa;
27452 }
27453
27454 #ifdef CPU_DEFAULT
27455 if (ARM_FEATURE_ZERO (selected_arch))
27456 {
27457 selected_arch = cpu_default;
27458 selected_cpu = selected_arch;
27459 }
27460 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
27461 #else
27462 /* Autodection of feature mode: allow all features in cpu_variant but leave
27463 selected_cpu unset. It will be set in aeabi_set_public_attributes ()
27464 after all instruction have been processed and we can decide what CPU
27465 should be selected. */
27466 if (ARM_FEATURE_ZERO (selected_arch))
27467 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
27468 else
27469 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
27470 #endif
27471
27472 autoselect_thumb_from_cpu_variant ();
27473
27474 arm_arch_used = thumb_arch_used = arm_arch_none;
27475
27476 #if defined OBJ_COFF || defined OBJ_ELF
27477 {
27478 unsigned int flags = 0;
27479
27480 #if defined OBJ_ELF
27481 flags = meabi_flags;
27482
27483 switch (meabi_flags)
27484 {
27485 case EF_ARM_EABI_UNKNOWN:
27486 #endif
27487 /* Set the flags in the private structure. */
27488 if (uses_apcs_26) flags |= F_APCS26;
27489 if (support_interwork) flags |= F_INTERWORK;
27490 if (uses_apcs_float) flags |= F_APCS_FLOAT;
27491 if (pic_code) flags |= F_PIC;
27492 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
27493 flags |= F_SOFT_FLOAT;
27494
27495 switch (mfloat_abi_opt)
27496 {
27497 case ARM_FLOAT_ABI_SOFT:
27498 case ARM_FLOAT_ABI_SOFTFP:
27499 flags |= F_SOFT_FLOAT;
27500 break;
27501
27502 case ARM_FLOAT_ABI_HARD:
27503 if (flags & F_SOFT_FLOAT)
27504 as_bad (_("hard-float conflicts with specified fpu"));
27505 break;
27506 }
27507
27508 /* Using pure-endian doubles (even if soft-float). */
27509 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
27510 flags |= F_VFP_FLOAT;
27511
27512 #if defined OBJ_ELF
27513 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
27514 flags |= EF_ARM_MAVERICK_FLOAT;
27515 break;
27516
27517 case EF_ARM_EABI_VER4:
27518 case EF_ARM_EABI_VER5:
27519 /* No additional flags to set. */
27520 break;
27521
27522 default:
27523 abort ();
27524 }
27525 #endif
27526 bfd_set_private_flags (stdoutput, flags);
27527
27528 /* We have run out flags in the COFF header to encode the
27529 status of ATPCS support, so instead we create a dummy,
27530 empty, debug section called .arm.atpcs. */
27531 if (atpcs)
27532 {
27533 asection * sec;
27534
27535 sec = bfd_make_section (stdoutput, ".arm.atpcs");
27536
27537 if (sec != NULL)
27538 {
27539 bfd_set_section_flags
27540 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
27541 bfd_set_section_size (stdoutput, sec, 0);
27542 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
27543 }
27544 }
27545 }
27546 #endif
27547
27548 /* Record the CPU type as well. */
27549 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
27550 mach = bfd_mach_arm_iWMMXt2;
27551 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
27552 mach = bfd_mach_arm_iWMMXt;
27553 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
27554 mach = bfd_mach_arm_XScale;
27555 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
27556 mach = bfd_mach_arm_ep9312;
27557 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
27558 mach = bfd_mach_arm_5TE;
27559 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
27560 {
27561 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
27562 mach = bfd_mach_arm_5T;
27563 else
27564 mach = bfd_mach_arm_5;
27565 }
27566 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
27567 {
27568 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
27569 mach = bfd_mach_arm_4T;
27570 else
27571 mach = bfd_mach_arm_4;
27572 }
27573 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
27574 mach = bfd_mach_arm_3M;
27575 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
27576 mach = bfd_mach_arm_3;
27577 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
27578 mach = bfd_mach_arm_2a;
27579 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
27580 mach = bfd_mach_arm_2;
27581 else
27582 mach = bfd_mach_arm_unknown;
27583
27584 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
27585 }
27586
27587 /* Command line processing. */
27588
27589 /* md_parse_option
27590 Invocation line includes a switch not recognized by the base assembler.
27591 See if it's a processor-specific option.
27592
27593 This routine is somewhat complicated by the need for backwards
27594 compatibility (since older releases of gcc can't be changed).
27595 The new options try to make the interface as compatible as
27596 possible with GCC.
27597
27598 New options (supported) are:
27599
27600 -mcpu=<cpu name> Assemble for selected processor
27601 -march=<architecture name> Assemble for selected architecture
27602 -mfpu=<fpu architecture> Assemble for selected FPU.
27603 -EB/-mbig-endian Big-endian
27604 -EL/-mlittle-endian Little-endian
27605 -k Generate PIC code
27606 -mthumb Start in Thumb mode
27607 -mthumb-interwork Code supports ARM/Thumb interworking
27608
27609 -m[no-]warn-deprecated Warn about deprecated features
27610 -m[no-]warn-syms Warn when symbols match instructions
27611
27612 For now we will also provide support for:
27613
27614 -mapcs-32 32-bit Program counter
27615 -mapcs-26 26-bit Program counter
27616 -macps-float Floats passed in FP registers
27617 -mapcs-reentrant Reentrant code
27618 -matpcs
27619 (sometime these will probably be replaced with -mapcs=<list of options>
27620 and -matpcs=<list of options>)
27621
27622 The remaining options are only supported for back-wards compatibility.
27623 Cpu variants, the arm part is optional:
27624 -m[arm]1 Currently not supported.
27625 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
27626 -m[arm]3 Arm 3 processor
27627 -m[arm]6[xx], Arm 6 processors
27628 -m[arm]7[xx][t][[d]m] Arm 7 processors
27629 -m[arm]8[10] Arm 8 processors
27630 -m[arm]9[20][tdmi] Arm 9 processors
27631 -mstrongarm[110[0]] StrongARM processors
27632 -mxscale XScale processors
27633 -m[arm]v[2345[t[e]]] Arm architectures
27634 -mall All (except the ARM1)
27635 FP variants:
27636 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
27637 -mfpe-old (No float load/store multiples)
27638 -mvfpxd VFP Single precision
27639 -mvfp All VFP
27640 -mno-fpu Disable all floating point instructions
27641
27642 The following CPU names are recognized:
27643 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
27644 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
27645 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
27646 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
27647 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
27648 arm10t arm10e, arm1020t, arm1020e, arm10200e,
27649 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
27650
27651 */
27652
27653 const char * md_shortopts = "m:k";
27654
27655 #ifdef ARM_BI_ENDIAN
27656 #define OPTION_EB (OPTION_MD_BASE + 0)
27657 #define OPTION_EL (OPTION_MD_BASE + 1)
27658 #else
27659 #if TARGET_BYTES_BIG_ENDIAN
27660 #define OPTION_EB (OPTION_MD_BASE + 0)
27661 #else
27662 #define OPTION_EL (OPTION_MD_BASE + 1)
27663 #endif
27664 #endif
27665 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
27666 #define OPTION_FDPIC (OPTION_MD_BASE + 3)
27667
27668 struct option md_longopts[] =
27669 {
27670 #ifdef OPTION_EB
27671 {"EB", no_argument, NULL, OPTION_EB},
27672 #endif
27673 #ifdef OPTION_EL
27674 {"EL", no_argument, NULL, OPTION_EL},
27675 #endif
27676 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
27677 #ifdef OBJ_ELF
27678 {"fdpic", no_argument, NULL, OPTION_FDPIC},
27679 #endif
27680 {NULL, no_argument, NULL, 0}
27681 };
27682
27683 size_t md_longopts_size = sizeof (md_longopts);
27684
27685 struct arm_option_table
27686 {
27687 const char * option; /* Option name to match. */
27688 const char * help; /* Help information. */
27689 int * var; /* Variable to change. */
27690 int value; /* What to change it to. */
27691 const char * deprecated; /* If non-null, print this message. */
27692 };
27693
27694 struct arm_option_table arm_opts[] =
27695 {
27696 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
27697 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
27698 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
27699 &support_interwork, 1, NULL},
27700 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
27701 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
27702 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
27703 1, NULL},
27704 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
27705 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
27706 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
27707 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
27708 NULL},
27709
27710 /* These are recognized by the assembler, but have no affect on code. */
27711 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
27712 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
27713
27714 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
27715 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
27716 &warn_on_deprecated, 0, NULL},
27717 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
27718 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
27719 {NULL, NULL, NULL, 0, NULL}
27720 };
27721
27722 struct arm_legacy_option_table
27723 {
27724 const char * option; /* Option name to match. */
27725 const arm_feature_set ** var; /* Variable to change. */
27726 const arm_feature_set value; /* What to change it to. */
27727 const char * deprecated; /* If non-null, print this message. */
27728 };
27729
27730 const struct arm_legacy_option_table arm_legacy_opts[] =
27731 {
27732 /* DON'T add any new processors to this list -- we want the whole list
27733 to go away... Add them to the processors table instead. */
27734 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
27735 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
27736 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
27737 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
27738 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
27739 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
27740 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
27741 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
27742 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
27743 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
27744 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
27745 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
27746 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
27747 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
27748 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
27749 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
27750 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
27751 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
27752 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
27753 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
27754 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
27755 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
27756 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
27757 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
27758 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
27759 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
27760 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
27761 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
27762 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
27763 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
27764 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
27765 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
27766 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
27767 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
27768 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
27769 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
27770 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
27771 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
27772 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
27773 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
27774 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
27775 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
27776 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
27777 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
27778 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
27779 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
27780 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
27781 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
27782 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
27783 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
27784 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
27785 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
27786 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
27787 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
27788 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
27789 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
27790 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
27791 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
27792 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
27793 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
27794 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
27795 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
27796 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
27797 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
27798 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
27799 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
27800 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
27801 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
27802 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
27803 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
27804 N_("use -mcpu=strongarm110")},
27805 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
27806 N_("use -mcpu=strongarm1100")},
27807 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
27808 N_("use -mcpu=strongarm1110")},
27809 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
27810 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
27811 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
27812
27813 /* Architecture variants -- don't add any more to this list either. */
27814 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
27815 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
27816 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
27817 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
27818 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
27819 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
27820 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
27821 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
27822 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
27823 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
27824 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
27825 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
27826 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
27827 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
27828 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
27829 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
27830 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
27831 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
27832
27833 /* Floating point variants -- don't add any more to this list either. */
27834 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
27835 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
27836 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
27837 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
27838 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
27839
27840 {NULL, NULL, ARM_ARCH_NONE, NULL}
27841 };
27842
27843 struct arm_cpu_option_table
27844 {
27845 const char * name;
27846 size_t name_len;
27847 const arm_feature_set value;
27848 const arm_feature_set ext;
27849 /* For some CPUs we assume an FPU unless the user explicitly sets
27850 -mfpu=... */
27851 const arm_feature_set default_fpu;
27852 /* The canonical name of the CPU, or NULL to use NAME converted to upper
27853 case. */
27854 const char * canonical_name;
27855 };
27856
27857 /* This list should, at a minimum, contain all the cpu names
27858 recognized by GCC. */
27859 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
27860
27861 static const struct arm_cpu_option_table arm_cpus[] =
27862 {
27863 ARM_CPU_OPT ("all", NULL, ARM_ANY,
27864 ARM_ARCH_NONE,
27865 FPU_ARCH_FPA),
27866 ARM_CPU_OPT ("arm1", NULL, ARM_ARCH_V1,
27867 ARM_ARCH_NONE,
27868 FPU_ARCH_FPA),
27869 ARM_CPU_OPT ("arm2", NULL, ARM_ARCH_V2,
27870 ARM_ARCH_NONE,
27871 FPU_ARCH_FPA),
27872 ARM_CPU_OPT ("arm250", NULL, ARM_ARCH_V2S,
27873 ARM_ARCH_NONE,
27874 FPU_ARCH_FPA),
27875 ARM_CPU_OPT ("arm3", NULL, ARM_ARCH_V2S,
27876 ARM_ARCH_NONE,
27877 FPU_ARCH_FPA),
27878 ARM_CPU_OPT ("arm6", NULL, ARM_ARCH_V3,
27879 ARM_ARCH_NONE,
27880 FPU_ARCH_FPA),
27881 ARM_CPU_OPT ("arm60", NULL, ARM_ARCH_V3,
27882 ARM_ARCH_NONE,
27883 FPU_ARCH_FPA),
27884 ARM_CPU_OPT ("arm600", NULL, ARM_ARCH_V3,
27885 ARM_ARCH_NONE,
27886 FPU_ARCH_FPA),
27887 ARM_CPU_OPT ("arm610", NULL, ARM_ARCH_V3,
27888 ARM_ARCH_NONE,
27889 FPU_ARCH_FPA),
27890 ARM_CPU_OPT ("arm620", NULL, ARM_ARCH_V3,
27891 ARM_ARCH_NONE,
27892 FPU_ARCH_FPA),
27893 ARM_CPU_OPT ("arm7", NULL, ARM_ARCH_V3,
27894 ARM_ARCH_NONE,
27895 FPU_ARCH_FPA),
27896 ARM_CPU_OPT ("arm7m", NULL, ARM_ARCH_V3M,
27897 ARM_ARCH_NONE,
27898 FPU_ARCH_FPA),
27899 ARM_CPU_OPT ("arm7d", NULL, ARM_ARCH_V3,
27900 ARM_ARCH_NONE,
27901 FPU_ARCH_FPA),
27902 ARM_CPU_OPT ("arm7dm", NULL, ARM_ARCH_V3M,
27903 ARM_ARCH_NONE,
27904 FPU_ARCH_FPA),
27905 ARM_CPU_OPT ("arm7di", NULL, ARM_ARCH_V3,
27906 ARM_ARCH_NONE,
27907 FPU_ARCH_FPA),
27908 ARM_CPU_OPT ("arm7dmi", NULL, ARM_ARCH_V3M,
27909 ARM_ARCH_NONE,
27910 FPU_ARCH_FPA),
27911 ARM_CPU_OPT ("arm70", NULL, ARM_ARCH_V3,
27912 ARM_ARCH_NONE,
27913 FPU_ARCH_FPA),
27914 ARM_CPU_OPT ("arm700", NULL, ARM_ARCH_V3,
27915 ARM_ARCH_NONE,
27916 FPU_ARCH_FPA),
27917 ARM_CPU_OPT ("arm700i", NULL, ARM_ARCH_V3,
27918 ARM_ARCH_NONE,
27919 FPU_ARCH_FPA),
27920 ARM_CPU_OPT ("arm710", NULL, ARM_ARCH_V3,
27921 ARM_ARCH_NONE,
27922 FPU_ARCH_FPA),
27923 ARM_CPU_OPT ("arm710t", NULL, ARM_ARCH_V4T,
27924 ARM_ARCH_NONE,
27925 FPU_ARCH_FPA),
27926 ARM_CPU_OPT ("arm720", NULL, ARM_ARCH_V3,
27927 ARM_ARCH_NONE,
27928 FPU_ARCH_FPA),
27929 ARM_CPU_OPT ("arm720t", NULL, ARM_ARCH_V4T,
27930 ARM_ARCH_NONE,
27931 FPU_ARCH_FPA),
27932 ARM_CPU_OPT ("arm740t", NULL, ARM_ARCH_V4T,
27933 ARM_ARCH_NONE,
27934 FPU_ARCH_FPA),
27935 ARM_CPU_OPT ("arm710c", NULL, ARM_ARCH_V3,
27936 ARM_ARCH_NONE,
27937 FPU_ARCH_FPA),
27938 ARM_CPU_OPT ("arm7100", NULL, ARM_ARCH_V3,
27939 ARM_ARCH_NONE,
27940 FPU_ARCH_FPA),
27941 ARM_CPU_OPT ("arm7500", NULL, ARM_ARCH_V3,
27942 ARM_ARCH_NONE,
27943 FPU_ARCH_FPA),
27944 ARM_CPU_OPT ("arm7500fe", NULL, ARM_ARCH_V3,
27945 ARM_ARCH_NONE,
27946 FPU_ARCH_FPA),
27947 ARM_CPU_OPT ("arm7t", NULL, ARM_ARCH_V4T,
27948 ARM_ARCH_NONE,
27949 FPU_ARCH_FPA),
27950 ARM_CPU_OPT ("arm7tdmi", NULL, ARM_ARCH_V4T,
27951 ARM_ARCH_NONE,
27952 FPU_ARCH_FPA),
27953 ARM_CPU_OPT ("arm7tdmi-s", NULL, ARM_ARCH_V4T,
27954 ARM_ARCH_NONE,
27955 FPU_ARCH_FPA),
27956 ARM_CPU_OPT ("arm8", NULL, ARM_ARCH_V4,
27957 ARM_ARCH_NONE,
27958 FPU_ARCH_FPA),
27959 ARM_CPU_OPT ("arm810", NULL, ARM_ARCH_V4,
27960 ARM_ARCH_NONE,
27961 FPU_ARCH_FPA),
27962 ARM_CPU_OPT ("strongarm", NULL, ARM_ARCH_V4,
27963 ARM_ARCH_NONE,
27964 FPU_ARCH_FPA),
27965 ARM_CPU_OPT ("strongarm1", NULL, ARM_ARCH_V4,
27966 ARM_ARCH_NONE,
27967 FPU_ARCH_FPA),
27968 ARM_CPU_OPT ("strongarm110", NULL, ARM_ARCH_V4,
27969 ARM_ARCH_NONE,
27970 FPU_ARCH_FPA),
27971 ARM_CPU_OPT ("strongarm1100", NULL, ARM_ARCH_V4,
27972 ARM_ARCH_NONE,
27973 FPU_ARCH_FPA),
27974 ARM_CPU_OPT ("strongarm1110", NULL, ARM_ARCH_V4,
27975 ARM_ARCH_NONE,
27976 FPU_ARCH_FPA),
27977 ARM_CPU_OPT ("arm9", NULL, ARM_ARCH_V4T,
27978 ARM_ARCH_NONE,
27979 FPU_ARCH_FPA),
27980 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T,
27981 ARM_ARCH_NONE,
27982 FPU_ARCH_FPA),
27983 ARM_CPU_OPT ("arm920t", NULL, ARM_ARCH_V4T,
27984 ARM_ARCH_NONE,
27985 FPU_ARCH_FPA),
27986 ARM_CPU_OPT ("arm922t", NULL, ARM_ARCH_V4T,
27987 ARM_ARCH_NONE,
27988 FPU_ARCH_FPA),
27989 ARM_CPU_OPT ("arm940t", NULL, ARM_ARCH_V4T,
27990 ARM_ARCH_NONE,
27991 FPU_ARCH_FPA),
27992 ARM_CPU_OPT ("arm9tdmi", NULL, ARM_ARCH_V4T,
27993 ARM_ARCH_NONE,
27994 FPU_ARCH_FPA),
27995 ARM_CPU_OPT ("fa526", NULL, ARM_ARCH_V4,
27996 ARM_ARCH_NONE,
27997 FPU_ARCH_FPA),
27998 ARM_CPU_OPT ("fa626", NULL, ARM_ARCH_V4,
27999 ARM_ARCH_NONE,
28000 FPU_ARCH_FPA),
28001
28002 /* For V5 or later processors we default to using VFP; but the user
28003 should really set the FPU type explicitly. */
28004 ARM_CPU_OPT ("arm9e-r0", NULL, ARM_ARCH_V5TExP,
28005 ARM_ARCH_NONE,
28006 FPU_ARCH_VFP_V2),
28007 ARM_CPU_OPT ("arm9e", NULL, ARM_ARCH_V5TE,
28008 ARM_ARCH_NONE,
28009 FPU_ARCH_VFP_V2),
28010 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ,
28011 ARM_ARCH_NONE,
28012 FPU_ARCH_VFP_V2),
28013 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ,
28014 ARM_ARCH_NONE,
28015 FPU_ARCH_VFP_V2),
28016 ARM_CPU_OPT ("arm926ej-s", NULL, ARM_ARCH_V5TEJ,
28017 ARM_ARCH_NONE,
28018 FPU_ARCH_VFP_V2),
28019 ARM_CPU_OPT ("arm946e-r0", NULL, ARM_ARCH_V5TExP,
28020 ARM_ARCH_NONE,
28021 FPU_ARCH_VFP_V2),
28022 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE,
28023 ARM_ARCH_NONE,
28024 FPU_ARCH_VFP_V2),
28025 ARM_CPU_OPT ("arm946e-s", NULL, ARM_ARCH_V5TE,
28026 ARM_ARCH_NONE,
28027 FPU_ARCH_VFP_V2),
28028 ARM_CPU_OPT ("arm966e-r0", NULL, ARM_ARCH_V5TExP,
28029 ARM_ARCH_NONE,
28030 FPU_ARCH_VFP_V2),
28031 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE,
28032 ARM_ARCH_NONE,
28033 FPU_ARCH_VFP_V2),
28034 ARM_CPU_OPT ("arm966e-s", NULL, ARM_ARCH_V5TE,
28035 ARM_ARCH_NONE,
28036 FPU_ARCH_VFP_V2),
28037 ARM_CPU_OPT ("arm968e-s", NULL, ARM_ARCH_V5TE,
28038 ARM_ARCH_NONE,
28039 FPU_ARCH_VFP_V2),
28040 ARM_CPU_OPT ("arm10t", NULL, ARM_ARCH_V5T,
28041 ARM_ARCH_NONE,
28042 FPU_ARCH_VFP_V1),
28043 ARM_CPU_OPT ("arm10tdmi", NULL, ARM_ARCH_V5T,
28044 ARM_ARCH_NONE,
28045 FPU_ARCH_VFP_V1),
28046 ARM_CPU_OPT ("arm10e", NULL, ARM_ARCH_V5TE,
28047 ARM_ARCH_NONE,
28048 FPU_ARCH_VFP_V2),
28049 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE,
28050 ARM_ARCH_NONE,
28051 FPU_ARCH_VFP_V2),
28052 ARM_CPU_OPT ("arm1020t", NULL, ARM_ARCH_V5T,
28053 ARM_ARCH_NONE,
28054 FPU_ARCH_VFP_V1),
28055 ARM_CPU_OPT ("arm1020e", NULL, ARM_ARCH_V5TE,
28056 ARM_ARCH_NONE,
28057 FPU_ARCH_VFP_V2),
28058 ARM_CPU_OPT ("arm1022e", NULL, ARM_ARCH_V5TE,
28059 ARM_ARCH_NONE,
28060 FPU_ARCH_VFP_V2),
28061 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ,
28062 ARM_ARCH_NONE,
28063 FPU_ARCH_VFP_V2),
28064 ARM_CPU_OPT ("arm1026ej-s", NULL, ARM_ARCH_V5TEJ,
28065 ARM_ARCH_NONE,
28066 FPU_ARCH_VFP_V2),
28067 ARM_CPU_OPT ("fa606te", NULL, ARM_ARCH_V5TE,
28068 ARM_ARCH_NONE,
28069 FPU_ARCH_VFP_V2),
28070 ARM_CPU_OPT ("fa616te", NULL, ARM_ARCH_V5TE,
28071 ARM_ARCH_NONE,
28072 FPU_ARCH_VFP_V2),
28073 ARM_CPU_OPT ("fa626te", NULL, ARM_ARCH_V5TE,
28074 ARM_ARCH_NONE,
28075 FPU_ARCH_VFP_V2),
28076 ARM_CPU_OPT ("fmp626", NULL, ARM_ARCH_V5TE,
28077 ARM_ARCH_NONE,
28078 FPU_ARCH_VFP_V2),
28079 ARM_CPU_OPT ("fa726te", NULL, ARM_ARCH_V5TE,
28080 ARM_ARCH_NONE,
28081 FPU_ARCH_VFP_V2),
28082 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6,
28083 ARM_ARCH_NONE,
28084 FPU_NONE),
28085 ARM_CPU_OPT ("arm1136j-s", NULL, ARM_ARCH_V6,
28086 ARM_ARCH_NONE,
28087 FPU_NONE),
28088 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6,
28089 ARM_ARCH_NONE,
28090 FPU_ARCH_VFP_V2),
28091 ARM_CPU_OPT ("arm1136jf-s", NULL, ARM_ARCH_V6,
28092 ARM_ARCH_NONE,
28093 FPU_ARCH_VFP_V2),
28094 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K,
28095 ARM_ARCH_NONE,
28096 FPU_ARCH_VFP_V2),
28097 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K,
28098 ARM_ARCH_NONE,
28099 FPU_NONE),
28100 ARM_CPU_OPT ("arm1156t2-s", NULL, ARM_ARCH_V6T2,
28101 ARM_ARCH_NONE,
28102 FPU_NONE),
28103 ARM_CPU_OPT ("arm1156t2f-s", NULL, ARM_ARCH_V6T2,
28104 ARM_ARCH_NONE,
28105 FPU_ARCH_VFP_V2),
28106 ARM_CPU_OPT ("arm1176jz-s", NULL, ARM_ARCH_V6KZ,
28107 ARM_ARCH_NONE,
28108 FPU_NONE),
28109 ARM_CPU_OPT ("arm1176jzf-s", NULL, ARM_ARCH_V6KZ,
28110 ARM_ARCH_NONE,
28111 FPU_ARCH_VFP_V2),
28112 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A,
28113 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
28114 FPU_NONE),
28115 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE,
28116 ARM_ARCH_NONE,
28117 FPU_ARCH_NEON_VFP_V4),
28118 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A,
28119 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
28120 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
28121 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A,
28122 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
28123 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
28124 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE,
28125 ARM_ARCH_NONE,
28126 FPU_ARCH_NEON_VFP_V4),
28127 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE,
28128 ARM_ARCH_NONE,
28129 FPU_ARCH_NEON_VFP_V4),
28130 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE,
28131 ARM_ARCH_NONE,
28132 FPU_ARCH_NEON_VFP_V4),
28133 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A,
28134 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
28135 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
28136 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A,
28137 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
28138 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
28139 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A,
28140 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
28141 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
28142 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A,
28143 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
28144 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
28145 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A,
28146 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
28147 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
28148 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A,
28149 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
28150 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
28151 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A,
28152 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
28153 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
28154 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A,
28155 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
28156 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
28157 ARM_CPU_OPT ("cortex-a76", "Cortex-A76", ARM_ARCH_V8_2A,
28158 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
28159 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
28160 ARM_CPU_OPT ("ares", "Ares", ARM_ARCH_V8_2A,
28161 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
28162 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
28163 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R,
28164 ARM_ARCH_NONE,
28165 FPU_NONE),
28166 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R,
28167 ARM_ARCH_NONE,
28168 FPU_ARCH_VFP_V3D16),
28169 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R,
28170 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
28171 FPU_NONE),
28172 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R,
28173 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
28174 FPU_ARCH_VFP_V3D16),
28175 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R,
28176 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
28177 FPU_ARCH_VFP_V3D16),
28178 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R,
28179 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
28180 FPU_ARCH_NEON_VFP_ARMV8),
28181 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN,
28182 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
28183 FPU_NONE),
28184 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE,
28185 ARM_ARCH_NONE,
28186 FPU_NONE),
28187 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM,
28188 ARM_ARCH_NONE,
28189 FPU_NONE),
28190 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM,
28191 ARM_ARCH_NONE,
28192 FPU_NONE),
28193 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M,
28194 ARM_ARCH_NONE,
28195 FPU_NONE),
28196 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM,
28197 ARM_ARCH_NONE,
28198 FPU_NONE),
28199 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM,
28200 ARM_ARCH_NONE,
28201 FPU_NONE),
28202 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM,
28203 ARM_ARCH_NONE,
28204 FPU_NONE),
28205 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A,
28206 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
28207 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
28208 ARM_CPU_OPT ("neoverse-n1", "Neoverse N1", ARM_ARCH_V8_2A,
28209 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
28210 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
28211 /* ??? XSCALE is really an architecture. */
28212 ARM_CPU_OPT ("xscale", NULL, ARM_ARCH_XSCALE,
28213 ARM_ARCH_NONE,
28214 FPU_ARCH_VFP_V2),
28215
28216 /* ??? iwmmxt is not a processor. */
28217 ARM_CPU_OPT ("iwmmxt", NULL, ARM_ARCH_IWMMXT,
28218 ARM_ARCH_NONE,
28219 FPU_ARCH_VFP_V2),
28220 ARM_CPU_OPT ("iwmmxt2", NULL, ARM_ARCH_IWMMXT2,
28221 ARM_ARCH_NONE,
28222 FPU_ARCH_VFP_V2),
28223 ARM_CPU_OPT ("i80200", NULL, ARM_ARCH_XSCALE,
28224 ARM_ARCH_NONE,
28225 FPU_ARCH_VFP_V2),
28226
28227 /* Maverick. */
28228 ARM_CPU_OPT ("ep9312", "ARM920T",
28229 ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
28230 ARM_ARCH_NONE, FPU_ARCH_MAVERICK),
28231
28232 /* Marvell processors. */
28233 ARM_CPU_OPT ("marvell-pj4", NULL, ARM_ARCH_V7A,
28234 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
28235 FPU_ARCH_VFP_V3D16),
28236 ARM_CPU_OPT ("marvell-whitney", NULL, ARM_ARCH_V7A,
28237 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
28238 FPU_ARCH_NEON_VFP_V4),
28239
28240 /* APM X-Gene family. */
28241 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A,
28242 ARM_ARCH_NONE,
28243 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
28244 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A,
28245 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
28246 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
28247
28248 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
28249 };
28250 #undef ARM_CPU_OPT
28251
28252 struct arm_ext_table
28253 {
28254 const char * name;
28255 size_t name_len;
28256 const arm_feature_set merge;
28257 const arm_feature_set clear;
28258 };
28259
28260 struct arm_arch_option_table
28261 {
28262 const char * name;
28263 size_t name_len;
28264 const arm_feature_set value;
28265 const arm_feature_set default_fpu;
28266 const struct arm_ext_table * ext_table;
28267 };
28268
28269 /* Used to add support for +E and +noE extension. */
28270 #define ARM_EXT(E, M, C) { E, sizeof (E) - 1, M, C }
28271 /* Used to add support for a +E extension. */
28272 #define ARM_ADD(E, M) { E, sizeof(E) - 1, M, ARM_ARCH_NONE }
28273 /* Used to add support for a +noE extension. */
28274 #define ARM_REMOVE(E, C) { E, sizeof(E) -1, ARM_ARCH_NONE, C }
28275
28276 #define ALL_FP ARM_FEATURE (0, ARM_EXT2_FP16_INST | ARM_EXT2_FP16_FML, \
28277 ~0 & ~FPU_ENDIAN_PURE)
28278
28279 static const struct arm_ext_table armv5te_ext_table[] =
28280 {
28281 ARM_EXT ("fp", FPU_ARCH_VFP_V2, ALL_FP),
28282 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28283 };
28284
28285 static const struct arm_ext_table armv7_ext_table[] =
28286 {
28287 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
28288 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28289 };
28290
28291 static const struct arm_ext_table armv7ve_ext_table[] =
28292 {
28293 ARM_EXT ("fp", FPU_ARCH_VFP_V4D16, ALL_FP),
28294 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16),
28295 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
28296 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
28297 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
28298 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16), /* Alias for +fp. */
28299 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
28300
28301 ARM_EXT ("simd", FPU_ARCH_NEON_VFP_V4,
28302 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
28303
28304 /* Aliases for +simd. */
28305 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
28306
28307 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
28308 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
28309 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
28310
28311 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28312 };
28313
28314 static const struct arm_ext_table armv7a_ext_table[] =
28315 {
28316 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
28317 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp. */
28318 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
28319 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
28320 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
28321 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16),
28322 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
28323
28324 ARM_EXT ("simd", FPU_ARCH_VFP_V3_PLUS_NEON_V1,
28325 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
28326
28327 /* Aliases for +simd. */
28328 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
28329 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
28330
28331 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
28332 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
28333
28334 ARM_ADD ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP)),
28335 ARM_ADD ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC)),
28336 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28337 };
28338
28339 static const struct arm_ext_table armv7r_ext_table[] =
28340 {
28341 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V3xD),
28342 ARM_ADD ("vfpv3xd", FPU_ARCH_VFP_V3xD), /* Alias for +fp.sp. */
28343 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
28344 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp. */
28345 ARM_ADD ("vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16),
28346 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
28347 ARM_EXT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
28348 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV)),
28349 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28350 };
28351
28352 static const struct arm_ext_table armv7em_ext_table[] =
28353 {
28354 ARM_EXT ("fp", FPU_ARCH_VFP_V4_SP_D16, ALL_FP),
28355 /* Alias for +fp, used to be known as fpv4-sp-d16. */
28356 ARM_ADD ("vfpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16),
28357 ARM_ADD ("fpv5", FPU_ARCH_VFP_V5_SP_D16),
28358 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
28359 ARM_ADD ("fpv5-d16", FPU_ARCH_VFP_V5D16),
28360 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28361 };
28362
28363 static const struct arm_ext_table armv8a_ext_table[] =
28364 {
28365 ARM_ADD ("crc", ARCH_CRC_ARMV8),
28366 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
28367 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
28368 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
28369
28370 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28371 should use the +simd option to turn on FP. */
28372 ARM_REMOVE ("fp", ALL_FP),
28373 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
28374 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
28375 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28376 };
28377
28378
28379 static const struct arm_ext_table armv81a_ext_table[] =
28380 {
28381 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
28382 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
28383 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
28384
28385 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28386 should use the +simd option to turn on FP. */
28387 ARM_REMOVE ("fp", ALL_FP),
28388 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
28389 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
28390 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28391 };
28392
28393 static const struct arm_ext_table armv82a_ext_table[] =
28394 {
28395 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
28396 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16),
28397 ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML),
28398 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
28399 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
28400 ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
28401
28402 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28403 should use the +simd option to turn on FP. */
28404 ARM_REMOVE ("fp", ALL_FP),
28405 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
28406 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
28407 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28408 };
28409
28410 static const struct arm_ext_table armv84a_ext_table[] =
28411 {
28412 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
28413 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
28414 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
28415 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
28416
28417 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28418 should use the +simd option to turn on FP. */
28419 ARM_REMOVE ("fp", ALL_FP),
28420 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
28421 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
28422 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28423 };
28424
28425 static const struct arm_ext_table armv85a_ext_table[] =
28426 {
28427 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
28428 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
28429 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
28430 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
28431
28432 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28433 should use the +simd option to turn on FP. */
28434 ARM_REMOVE ("fp", ALL_FP),
28435 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28436 };
28437
28438 static const struct arm_ext_table armv8m_main_ext_table[] =
28439 {
28440 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
28441 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP)),
28442 ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16, ALL_FP),
28443 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
28444 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28445 };
28446
28447 static const struct arm_ext_table armv8_1m_main_ext_table[] =
28448 {
28449 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
28450 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP)),
28451 ARM_EXT ("fp",
28452 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
28453 FPU_VFP_V5_SP_D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA),
28454 ALL_FP),
28455 ARM_ADD ("fp.dp",
28456 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
28457 FPU_VFP_V5D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
28458 ARM_EXT ("mve", ARM_FEATURE_COPROC (FPU_MVE),
28459 ARM_FEATURE_COPROC (FPU_MVE | FPU_MVE_FP)),
28460 ARM_ADD ("mve.fp",
28461 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
28462 FPU_MVE | FPU_MVE_FP | FPU_VFP_V5_SP_D16 |
28463 FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
28464 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28465 };
28466
28467 static const struct arm_ext_table armv8r_ext_table[] =
28468 {
28469 ARM_ADD ("crc", ARCH_CRC_ARMV8),
28470 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
28471 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
28472 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
28473 ARM_REMOVE ("fp", ALL_FP),
28474 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V5_SP_D16),
28475 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
28476 };
28477
28478 /* This list should, at a minimum, contain all the architecture names
28479 recognized by GCC. */
28480 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF, NULL }
28481 #define ARM_ARCH_OPT2(N, V, DF, ext) \
28482 { N, sizeof (N) - 1, V, DF, ext##_ext_table }
28483
28484 static const struct arm_arch_option_table arm_archs[] =
28485 {
28486 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
28487 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
28488 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
28489 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
28490 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
28491 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
28492 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
28493 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
28494 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
28495 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
28496 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
28497 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
28498 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
28499 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
28500 ARM_ARCH_OPT2 ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP, armv5te),
28501 ARM_ARCH_OPT2 ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP, armv5te),
28502 ARM_ARCH_OPT2 ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP, armv5te),
28503 ARM_ARCH_OPT2 ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP, armv5te),
28504 ARM_ARCH_OPT2 ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP, armv5te),
28505 ARM_ARCH_OPT2 ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP, armv5te),
28506 ARM_ARCH_OPT2 ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP, armv5te),
28507 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
28508 kept to preserve existing behaviour. */
28509 ARM_ARCH_OPT2 ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP, armv5te),
28510 ARM_ARCH_OPT2 ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP, armv5te),
28511 ARM_ARCH_OPT2 ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP, armv5te),
28512 ARM_ARCH_OPT2 ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP, armv5te),
28513 ARM_ARCH_OPT2 ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP, armv5te),
28514 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
28515 kept to preserve existing behaviour. */
28516 ARM_ARCH_OPT2 ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP, armv5te),
28517 ARM_ARCH_OPT2 ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP, armv5te),
28518 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
28519 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
28520 ARM_ARCH_OPT2 ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP, armv7),
28521 /* The official spelling of the ARMv7 profile variants is the dashed form.
28522 Accept the non-dashed form for compatibility with old toolchains. */
28523 ARM_ARCH_OPT2 ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP, armv7a),
28524 ARM_ARCH_OPT2 ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP, armv7ve),
28525 ARM_ARCH_OPT2 ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP, armv7r),
28526 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
28527 ARM_ARCH_OPT2 ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP, armv7a),
28528 ARM_ARCH_OPT2 ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP, armv7r),
28529 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
28530 ARM_ARCH_OPT2 ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP, armv7em),
28531 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
28532 ARM_ARCH_OPT2 ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP,
28533 armv8m_main),
28534 ARM_ARCH_OPT2 ("armv8.1-m.main", ARM_ARCH_V8_1M_MAIN, FPU_ARCH_VFP,
28535 armv8_1m_main),
28536 ARM_ARCH_OPT2 ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP, armv8a),
28537 ARM_ARCH_OPT2 ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP, armv81a),
28538 ARM_ARCH_OPT2 ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP, armv82a),
28539 ARM_ARCH_OPT2 ("armv8.3-a", ARM_ARCH_V8_3A, FPU_ARCH_VFP, armv82a),
28540 ARM_ARCH_OPT2 ("armv8-r", ARM_ARCH_V8R, FPU_ARCH_VFP, armv8r),
28541 ARM_ARCH_OPT2 ("armv8.4-a", ARM_ARCH_V8_4A, FPU_ARCH_VFP, armv84a),
28542 ARM_ARCH_OPT2 ("armv8.5-a", ARM_ARCH_V8_5A, FPU_ARCH_VFP, armv85a),
28543 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
28544 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
28545 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2, FPU_ARCH_VFP),
28546 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
28547 };
28548 #undef ARM_ARCH_OPT
28549
28550 /* ISA extensions in the co-processor and main instruction set space. */
28551
28552 struct arm_option_extension_value_table
28553 {
28554 const char * name;
28555 size_t name_len;
28556 const arm_feature_set merge_value;
28557 const arm_feature_set clear_value;
28558 /* List of architectures for which an extension is available. ARM_ARCH_NONE
28559 indicates that an extension is available for all architectures while
28560 ARM_ANY marks an empty entry. */
28561 const arm_feature_set allowed_archs[2];
28562 };
28563
28564 /* The following table must be in alphabetical order with a NULL last entry. */
28565
28566 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
28567 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
28568
28569 /* DEPRECATED: Refrain from using this table to add any new extensions, instead
28570 use the context sensitive approach using arm_ext_table's. */
28571 static const struct arm_option_extension_value_table arm_extensions[] =
28572 {
28573 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
28574 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
28575 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
28576 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
28577 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
28578 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8,
28579 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD),
28580 ARM_ARCH_V8_2A),
28581 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
28582 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
28583 ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
28584 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
28585 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
28586 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
28587 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
28588 ARM_ARCH_V8_2A),
28589 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
28590 | ARM_EXT2_FP16_FML),
28591 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
28592 | ARM_EXT2_FP16_FML),
28593 ARM_ARCH_V8_2A),
28594 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
28595 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
28596 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
28597 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
28598 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
28599 Thumb divide instruction. Due to this having the same name as the
28600 previous entry, this will be ignored when doing command-line parsing and
28601 only considered by build attribute selection code. */
28602 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
28603 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
28604 ARM_FEATURE_CORE_LOW (ARM_EXT_V7)),
28605 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
28606 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
28607 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
28608 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
28609 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
28610 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
28611 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
28612 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
28613 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
28614 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
28615 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
28616 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
28617 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
28618 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
28619 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
28620 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
28621 ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
28622 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
28623 ARM_ARCH_V8A),
28624 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
28625 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
28626 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
28627 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1,
28628 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
28629 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
28630 ARM_EXT_OPT ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
28631 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
28632 ARM_ARCH_V8A),
28633 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
28634 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
28635 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
28636 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
28637 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
28638 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
28639 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
28640 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
28641 | ARM_EXT_DIV),
28642 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
28643 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
28644 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
28645 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
28646 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
28647 };
28648 #undef ARM_EXT_OPT
28649
28650 /* ISA floating-point and Advanced SIMD extensions. */
28651 struct arm_option_fpu_value_table
28652 {
28653 const char * name;
28654 const arm_feature_set value;
28655 };
28656
28657 /* This list should, at a minimum, contain all the fpu names
28658 recognized by GCC. */
28659 static const struct arm_option_fpu_value_table arm_fpus[] =
28660 {
28661 {"softfpa", FPU_NONE},
28662 {"fpe", FPU_ARCH_FPE},
28663 {"fpe2", FPU_ARCH_FPE},
28664 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
28665 {"fpa", FPU_ARCH_FPA},
28666 {"fpa10", FPU_ARCH_FPA},
28667 {"fpa11", FPU_ARCH_FPA},
28668 {"arm7500fe", FPU_ARCH_FPA},
28669 {"softvfp", FPU_ARCH_VFP},
28670 {"softvfp+vfp", FPU_ARCH_VFP_V2},
28671 {"vfp", FPU_ARCH_VFP_V2},
28672 {"vfp9", FPU_ARCH_VFP_V2},
28673 {"vfp3", FPU_ARCH_VFP_V3}, /* Undocumented, use vfpv3. */
28674 {"vfp10", FPU_ARCH_VFP_V2},
28675 {"vfp10-r0", FPU_ARCH_VFP_V1},
28676 {"vfpxd", FPU_ARCH_VFP_V1xD},
28677 {"vfpv2", FPU_ARCH_VFP_V2},
28678 {"vfpv3", FPU_ARCH_VFP_V3},
28679 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
28680 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
28681 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
28682 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
28683 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
28684 {"arm1020t", FPU_ARCH_VFP_V1},
28685 {"arm1020e", FPU_ARCH_VFP_V2},
28686 {"arm1136jfs", FPU_ARCH_VFP_V2}, /* Undocumented, use arm1136jf-s. */
28687 {"arm1136jf-s", FPU_ARCH_VFP_V2},
28688 {"maverick", FPU_ARCH_MAVERICK},
28689 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
28690 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
28691 {"neon-fp16", FPU_ARCH_NEON_FP16},
28692 {"vfpv4", FPU_ARCH_VFP_V4},
28693 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
28694 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
28695 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
28696 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
28697 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
28698 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
28699 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
28700 {"crypto-neon-fp-armv8",
28701 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
28702 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
28703 {"crypto-neon-fp-armv8.1",
28704 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
28705 {NULL, ARM_ARCH_NONE}
28706 };
28707
28708 struct arm_option_value_table
28709 {
28710 const char *name;
28711 long value;
28712 };
28713
28714 static const struct arm_option_value_table arm_float_abis[] =
28715 {
28716 {"hard", ARM_FLOAT_ABI_HARD},
28717 {"softfp", ARM_FLOAT_ABI_SOFTFP},
28718 {"soft", ARM_FLOAT_ABI_SOFT},
28719 {NULL, 0}
28720 };
28721
28722 #ifdef OBJ_ELF
28723 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
28724 static const struct arm_option_value_table arm_eabis[] =
28725 {
28726 {"gnu", EF_ARM_EABI_UNKNOWN},
28727 {"4", EF_ARM_EABI_VER4},
28728 {"5", EF_ARM_EABI_VER5},
28729 {NULL, 0}
28730 };
28731 #endif
28732
28733 struct arm_long_option_table
28734 {
28735 const char * option; /* Substring to match. */
28736 const char * help; /* Help information. */
28737 int (* func) (const char * subopt); /* Function to decode sub-option. */
28738 const char * deprecated; /* If non-null, print this message. */
28739 };
28740
28741 static bfd_boolean
28742 arm_parse_extension (const char *str, const arm_feature_set *opt_set,
28743 arm_feature_set *ext_set,
28744 const struct arm_ext_table *ext_table)
28745 {
28746 /* We insist on extensions being specified in alphabetical order, and with
28747 extensions being added before being removed. We achieve this by having
28748 the global ARM_EXTENSIONS table in alphabetical order, and using the
28749 ADDING_VALUE variable to indicate whether we are adding an extension (1)
28750 or removing it (0) and only allowing it to change in the order
28751 -1 -> 1 -> 0. */
28752 const struct arm_option_extension_value_table * opt = NULL;
28753 const arm_feature_set arm_any = ARM_ANY;
28754 int adding_value = -1;
28755
28756 while (str != NULL && *str != 0)
28757 {
28758 const char *ext;
28759 size_t len;
28760
28761 if (*str != '+')
28762 {
28763 as_bad (_("invalid architectural extension"));
28764 return FALSE;
28765 }
28766
28767 str++;
28768 ext = strchr (str, '+');
28769
28770 if (ext != NULL)
28771 len = ext - str;
28772 else
28773 len = strlen (str);
28774
28775 if (len >= 2 && strncmp (str, "no", 2) == 0)
28776 {
28777 if (adding_value != 0)
28778 {
28779 adding_value = 0;
28780 opt = arm_extensions;
28781 }
28782
28783 len -= 2;
28784 str += 2;
28785 }
28786 else if (len > 0)
28787 {
28788 if (adding_value == -1)
28789 {
28790 adding_value = 1;
28791 opt = arm_extensions;
28792 }
28793 else if (adding_value != 1)
28794 {
28795 as_bad (_("must specify extensions to add before specifying "
28796 "those to remove"));
28797 return FALSE;
28798 }
28799 }
28800
28801 if (len == 0)
28802 {
28803 as_bad (_("missing architectural extension"));
28804 return FALSE;
28805 }
28806
28807 gas_assert (adding_value != -1);
28808 gas_assert (opt != NULL);
28809
28810 if (ext_table != NULL)
28811 {
28812 const struct arm_ext_table * ext_opt = ext_table;
28813 bfd_boolean found = FALSE;
28814 for (; ext_opt->name != NULL; ext_opt++)
28815 if (ext_opt->name_len == len
28816 && strncmp (ext_opt->name, str, len) == 0)
28817 {
28818 if (adding_value)
28819 {
28820 if (ARM_FEATURE_ZERO (ext_opt->merge))
28821 /* TODO: Option not supported. When we remove the
28822 legacy table this case should error out. */
28823 continue;
28824
28825 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, ext_opt->merge);
28826 }
28827 else
28828 {
28829 if (ARM_FEATURE_ZERO (ext_opt->clear))
28830 /* TODO: Option not supported. When we remove the
28831 legacy table this case should error out. */
28832 continue;
28833 ARM_CLEAR_FEATURE (*ext_set, *ext_set, ext_opt->clear);
28834 }
28835 found = TRUE;
28836 break;
28837 }
28838 if (found)
28839 {
28840 str = ext;
28841 continue;
28842 }
28843 }
28844
28845 /* Scan over the options table trying to find an exact match. */
28846 for (; opt->name != NULL; opt++)
28847 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
28848 {
28849 int i, nb_allowed_archs =
28850 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
28851 /* Check we can apply the extension to this architecture. */
28852 for (i = 0; i < nb_allowed_archs; i++)
28853 {
28854 /* Empty entry. */
28855 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
28856 continue;
28857 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *opt_set))
28858 break;
28859 }
28860 if (i == nb_allowed_archs)
28861 {
28862 as_bad (_("extension does not apply to the base architecture"));
28863 return FALSE;
28864 }
28865
28866 /* Add or remove the extension. */
28867 if (adding_value)
28868 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
28869 else
28870 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
28871
28872 /* Allowing Thumb division instructions for ARMv7 in autodetection
28873 rely on this break so that duplicate extensions (extensions
28874 with the same name as a previous extension in the list) are not
28875 considered for command-line parsing. */
28876 break;
28877 }
28878
28879 if (opt->name == NULL)
28880 {
28881 /* Did we fail to find an extension because it wasn't specified in
28882 alphabetical order, or because it does not exist? */
28883
28884 for (opt = arm_extensions; opt->name != NULL; opt++)
28885 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
28886 break;
28887
28888 if (opt->name == NULL)
28889 as_bad (_("unknown architectural extension `%s'"), str);
28890 else
28891 as_bad (_("architectural extensions must be specified in "
28892 "alphabetical order"));
28893
28894 return FALSE;
28895 }
28896 else
28897 {
28898 /* We should skip the extension we've just matched the next time
28899 round. */
28900 opt++;
28901 }
28902
28903 str = ext;
28904 };
28905
28906 return TRUE;
28907 }
28908
28909 static bfd_boolean
28910 arm_parse_cpu (const char *str)
28911 {
28912 const struct arm_cpu_option_table *opt;
28913 const char *ext = strchr (str, '+');
28914 size_t len;
28915
28916 if (ext != NULL)
28917 len = ext - str;
28918 else
28919 len = strlen (str);
28920
28921 if (len == 0)
28922 {
28923 as_bad (_("missing cpu name `%s'"), str);
28924 return FALSE;
28925 }
28926
28927 for (opt = arm_cpus; opt->name != NULL; opt++)
28928 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
28929 {
28930 mcpu_cpu_opt = &opt->value;
28931 if (mcpu_ext_opt == NULL)
28932 mcpu_ext_opt = XNEW (arm_feature_set);
28933 *mcpu_ext_opt = opt->ext;
28934 mcpu_fpu_opt = &opt->default_fpu;
28935 if (opt->canonical_name)
28936 {
28937 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
28938 strcpy (selected_cpu_name, opt->canonical_name);
28939 }
28940 else
28941 {
28942 size_t i;
28943
28944 if (len >= sizeof selected_cpu_name)
28945 len = (sizeof selected_cpu_name) - 1;
28946
28947 for (i = 0; i < len; i++)
28948 selected_cpu_name[i] = TOUPPER (opt->name[i]);
28949 selected_cpu_name[i] = 0;
28950 }
28951
28952 if (ext != NULL)
28953 return arm_parse_extension (ext, mcpu_cpu_opt, mcpu_ext_opt, NULL);
28954
28955 return TRUE;
28956 }
28957
28958 as_bad (_("unknown cpu `%s'"), str);
28959 return FALSE;
28960 }
28961
28962 static bfd_boolean
28963 arm_parse_arch (const char *str)
28964 {
28965 const struct arm_arch_option_table *opt;
28966 const char *ext = strchr (str, '+');
28967 size_t len;
28968
28969 if (ext != NULL)
28970 len = ext - str;
28971 else
28972 len = strlen (str);
28973
28974 if (len == 0)
28975 {
28976 as_bad (_("missing architecture name `%s'"), str);
28977 return FALSE;
28978 }
28979
28980 for (opt = arm_archs; opt->name != NULL; opt++)
28981 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
28982 {
28983 march_cpu_opt = &opt->value;
28984 if (march_ext_opt == NULL)
28985 march_ext_opt = XNEW (arm_feature_set);
28986 *march_ext_opt = arm_arch_none;
28987 march_fpu_opt = &opt->default_fpu;
28988 strcpy (selected_cpu_name, opt->name);
28989
28990 if (ext != NULL)
28991 return arm_parse_extension (ext, march_cpu_opt, march_ext_opt,
28992 opt->ext_table);
28993
28994 return TRUE;
28995 }
28996
28997 as_bad (_("unknown architecture `%s'\n"), str);
28998 return FALSE;
28999 }
29000
29001 static bfd_boolean
29002 arm_parse_fpu (const char * str)
29003 {
29004 const struct arm_option_fpu_value_table * opt;
29005
29006 for (opt = arm_fpus; opt->name != NULL; opt++)
29007 if (streq (opt->name, str))
29008 {
29009 mfpu_opt = &opt->value;
29010 return TRUE;
29011 }
29012
29013 as_bad (_("unknown floating point format `%s'\n"), str);
29014 return FALSE;
29015 }
29016
29017 static bfd_boolean
29018 arm_parse_float_abi (const char * str)
29019 {
29020 const struct arm_option_value_table * opt;
29021
29022 for (opt = arm_float_abis; opt->name != NULL; opt++)
29023 if (streq (opt->name, str))
29024 {
29025 mfloat_abi_opt = opt->value;
29026 return TRUE;
29027 }
29028
29029 as_bad (_("unknown floating point abi `%s'\n"), str);
29030 return FALSE;
29031 }
29032
29033 #ifdef OBJ_ELF
29034 static bfd_boolean
29035 arm_parse_eabi (const char * str)
29036 {
29037 const struct arm_option_value_table *opt;
29038
29039 for (opt = arm_eabis; opt->name != NULL; opt++)
29040 if (streq (opt->name, str))
29041 {
29042 meabi_flags = opt->value;
29043 return TRUE;
29044 }
29045 as_bad (_("unknown EABI `%s'\n"), str);
29046 return FALSE;
29047 }
29048 #endif
29049
29050 static bfd_boolean
29051 arm_parse_it_mode (const char * str)
29052 {
29053 bfd_boolean ret = TRUE;
29054
29055 if (streq ("arm", str))
29056 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
29057 else if (streq ("thumb", str))
29058 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
29059 else if (streq ("always", str))
29060 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
29061 else if (streq ("never", str))
29062 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
29063 else
29064 {
29065 as_bad (_("unknown implicit IT mode `%s', should be "\
29066 "arm, thumb, always, or never."), str);
29067 ret = FALSE;
29068 }
29069
29070 return ret;
29071 }
29072
29073 static bfd_boolean
29074 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
29075 {
29076 codecomposer_syntax = TRUE;
29077 arm_comment_chars[0] = ';';
29078 arm_line_separator_chars[0] = 0;
29079 return TRUE;
29080 }
29081
29082 struct arm_long_option_table arm_long_opts[] =
29083 {
29084 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
29085 arm_parse_cpu, NULL},
29086 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
29087 arm_parse_arch, NULL},
29088 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
29089 arm_parse_fpu, NULL},
29090 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
29091 arm_parse_float_abi, NULL},
29092 #ifdef OBJ_ELF
29093 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
29094 arm_parse_eabi, NULL},
29095 #endif
29096 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
29097 arm_parse_it_mode, NULL},
29098 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
29099 arm_ccs_mode, NULL},
29100 {NULL, NULL, 0, NULL}
29101 };
29102
29103 int
29104 md_parse_option (int c, const char * arg)
29105 {
29106 struct arm_option_table *opt;
29107 const struct arm_legacy_option_table *fopt;
29108 struct arm_long_option_table *lopt;
29109
29110 switch (c)
29111 {
29112 #ifdef OPTION_EB
29113 case OPTION_EB:
29114 target_big_endian = 1;
29115 break;
29116 #endif
29117
29118 #ifdef OPTION_EL
29119 case OPTION_EL:
29120 target_big_endian = 0;
29121 break;
29122 #endif
29123
29124 case OPTION_FIX_V4BX:
29125 fix_v4bx = TRUE;
29126 break;
29127
29128 #ifdef OBJ_ELF
29129 case OPTION_FDPIC:
29130 arm_fdpic = TRUE;
29131 break;
29132 #endif /* OBJ_ELF */
29133
29134 case 'a':
29135 /* Listing option. Just ignore these, we don't support additional
29136 ones. */
29137 return 0;
29138
29139 default:
29140 for (opt = arm_opts; opt->option != NULL; opt++)
29141 {
29142 if (c == opt->option[0]
29143 && ((arg == NULL && opt->option[1] == 0)
29144 || streq (arg, opt->option + 1)))
29145 {
29146 /* If the option is deprecated, tell the user. */
29147 if (warn_on_deprecated && opt->deprecated != NULL)
29148 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
29149 arg ? arg : "", _(opt->deprecated));
29150
29151 if (opt->var != NULL)
29152 *opt->var = opt->value;
29153
29154 return 1;
29155 }
29156 }
29157
29158 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
29159 {
29160 if (c == fopt->option[0]
29161 && ((arg == NULL && fopt->option[1] == 0)
29162 || streq (arg, fopt->option + 1)))
29163 {
29164 /* If the option is deprecated, tell the user. */
29165 if (warn_on_deprecated && fopt->deprecated != NULL)
29166 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
29167 arg ? arg : "", _(fopt->deprecated));
29168
29169 if (fopt->var != NULL)
29170 *fopt->var = &fopt->value;
29171
29172 return 1;
29173 }
29174 }
29175
29176 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
29177 {
29178 /* These options are expected to have an argument. */
29179 if (c == lopt->option[0]
29180 && arg != NULL
29181 && strncmp (arg, lopt->option + 1,
29182 strlen (lopt->option + 1)) == 0)
29183 {
29184 /* If the option is deprecated, tell the user. */
29185 if (warn_on_deprecated && lopt->deprecated != NULL)
29186 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
29187 _(lopt->deprecated));
29188
29189 /* Call the sup-option parser. */
29190 return lopt->func (arg + strlen (lopt->option) - 1);
29191 }
29192 }
29193
29194 return 0;
29195 }
29196
29197 return 1;
29198 }
29199
29200 void
29201 md_show_usage (FILE * fp)
29202 {
29203 struct arm_option_table *opt;
29204 struct arm_long_option_table *lopt;
29205
29206 fprintf (fp, _(" ARM-specific assembler options:\n"));
29207
29208 for (opt = arm_opts; opt->option != NULL; opt++)
29209 if (opt->help != NULL)
29210 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
29211
29212 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
29213 if (lopt->help != NULL)
29214 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
29215
29216 #ifdef OPTION_EB
29217 fprintf (fp, _("\
29218 -EB assemble code for a big-endian cpu\n"));
29219 #endif
29220
29221 #ifdef OPTION_EL
29222 fprintf (fp, _("\
29223 -EL assemble code for a little-endian cpu\n"));
29224 #endif
29225
29226 fprintf (fp, _("\
29227 --fix-v4bx Allow BX in ARMv4 code\n"));
29228
29229 #ifdef OBJ_ELF
29230 fprintf (fp, _("\
29231 --fdpic generate an FDPIC object file\n"));
29232 #endif /* OBJ_ELF */
29233 }
29234
29235 #ifdef OBJ_ELF
29236
29237 typedef struct
29238 {
29239 int val;
29240 arm_feature_set flags;
29241 } cpu_arch_ver_table;
29242
29243 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
29244 chronologically for architectures, with an exception for ARMv6-M and
29245 ARMv6S-M due to legacy reasons. No new architecture should have a
29246 special case. This allows for build attribute selection results to be
29247 stable when new architectures are added. */
29248 static const cpu_arch_ver_table cpu_arch_ver[] =
29249 {
29250 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V1},
29251 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2},
29252 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2S},
29253 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3},
29254 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3M},
29255 {TAG_CPU_ARCH_V4, ARM_ARCH_V4xM},
29256 {TAG_CPU_ARCH_V4, ARM_ARCH_V4},
29257 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4TxM},
29258 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4T},
29259 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5xM},
29260 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5},
29261 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5TxM},
29262 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5T},
29263 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TExP},
29264 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TE},
29265 {TAG_CPU_ARCH_V5TEJ, ARM_ARCH_V5TEJ},
29266 {TAG_CPU_ARCH_V6, ARM_ARCH_V6},
29267 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6Z},
29268 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6KZ},
29269 {TAG_CPU_ARCH_V6K, ARM_ARCH_V6K},
29270 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6T2},
29271 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KT2},
29272 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6ZT2},
29273 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KZT2},
29274
29275 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
29276 always selected build attributes to match those of ARMv6-M
29277 (resp. ARMv6S-M). However, due to these architectures being a strict
29278 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
29279 would be selected when fully respecting chronology of architectures.
29280 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
29281 move them before ARMv7 architectures. */
29282 {TAG_CPU_ARCH_V6_M, ARM_ARCH_V6M},
29283 {TAG_CPU_ARCH_V6S_M, ARM_ARCH_V6SM},
29284
29285 {TAG_CPU_ARCH_V7, ARM_ARCH_V7},
29286 {TAG_CPU_ARCH_V7, ARM_ARCH_V7A},
29287 {TAG_CPU_ARCH_V7, ARM_ARCH_V7R},
29288 {TAG_CPU_ARCH_V7, ARM_ARCH_V7M},
29289 {TAG_CPU_ARCH_V7, ARM_ARCH_V7VE},
29290 {TAG_CPU_ARCH_V7E_M, ARM_ARCH_V7EM},
29291 {TAG_CPU_ARCH_V8, ARM_ARCH_V8A},
29292 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_1A},
29293 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_2A},
29294 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_3A},
29295 {TAG_CPU_ARCH_V8M_BASE, ARM_ARCH_V8M_BASE},
29296 {TAG_CPU_ARCH_V8M_MAIN, ARM_ARCH_V8M_MAIN},
29297 {TAG_CPU_ARCH_V8R, ARM_ARCH_V8R},
29298 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_4A},
29299 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_5A},
29300 {TAG_CPU_ARCH_V8_1M_MAIN, ARM_ARCH_V8_1M_MAIN},
29301 {-1, ARM_ARCH_NONE}
29302 };
29303
29304 /* Set an attribute if it has not already been set by the user. */
29305
29306 static void
29307 aeabi_set_attribute_int (int tag, int value)
29308 {
29309 if (tag < 1
29310 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
29311 || !attributes_set_explicitly[tag])
29312 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
29313 }
29314
29315 static void
29316 aeabi_set_attribute_string (int tag, const char *value)
29317 {
29318 if (tag < 1
29319 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
29320 || !attributes_set_explicitly[tag])
29321 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
29322 }
29323
29324 /* Return whether features in the *NEEDED feature set are available via
29325 extensions for the architecture whose feature set is *ARCH_FSET. */
29326
29327 static bfd_boolean
29328 have_ext_for_needed_feat_p (const arm_feature_set *arch_fset,
29329 const arm_feature_set *needed)
29330 {
29331 int i, nb_allowed_archs;
29332 arm_feature_set ext_fset;
29333 const struct arm_option_extension_value_table *opt;
29334
29335 ext_fset = arm_arch_none;
29336 for (opt = arm_extensions; opt->name != NULL; opt++)
29337 {
29338 /* Extension does not provide any feature we need. */
29339 if (!ARM_CPU_HAS_FEATURE (*needed, opt->merge_value))
29340 continue;
29341
29342 nb_allowed_archs =
29343 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
29344 for (i = 0; i < nb_allowed_archs; i++)
29345 {
29346 /* Empty entry. */
29347 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_arch_any))
29348 break;
29349
29350 /* Extension is available, add it. */
29351 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *arch_fset))
29352 ARM_MERGE_FEATURE_SETS (ext_fset, ext_fset, opt->merge_value);
29353 }
29354 }
29355
29356 /* Can we enable all features in *needed? */
29357 return ARM_FSET_CPU_SUBSET (*needed, ext_fset);
29358 }
29359
29360 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
29361 a given architecture feature set *ARCH_EXT_FSET including extension feature
29362 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
29363 - if true, check for an exact match of the architecture modulo extensions;
29364 - otherwise, select build attribute value of the first superset
29365 architecture released so that results remains stable when new architectures
29366 are added.
29367 For -march/-mcpu=all the build attribute value of the most featureful
29368 architecture is returned. Tag_CPU_arch_profile result is returned in
29369 PROFILE. */
29370
29371 static int
29372 get_aeabi_cpu_arch_from_fset (const arm_feature_set *arch_ext_fset,
29373 const arm_feature_set *ext_fset,
29374 char *profile, int exact_match)
29375 {
29376 arm_feature_set arch_fset;
29377 const cpu_arch_ver_table *p_ver, *p_ver_ret = NULL;
29378
29379 /* Select most featureful architecture with all its extensions if building
29380 for -march=all as the feature sets used to set build attributes. */
29381 if (ARM_FEATURE_EQUAL (*arch_ext_fset, arm_arch_any))
29382 {
29383 /* Force revisiting of decision for each new architecture. */
29384 gas_assert (MAX_TAG_CPU_ARCH <= TAG_CPU_ARCH_V8_1M_MAIN);
29385 *profile = 'A';
29386 return TAG_CPU_ARCH_V8;
29387 }
29388
29389 ARM_CLEAR_FEATURE (arch_fset, *arch_ext_fset, *ext_fset);
29390
29391 for (p_ver = cpu_arch_ver; p_ver->val != -1; p_ver++)
29392 {
29393 arm_feature_set known_arch_fset;
29394
29395 ARM_CLEAR_FEATURE (known_arch_fset, p_ver->flags, fpu_any);
29396 if (exact_match)
29397 {
29398 /* Base architecture match user-specified architecture and
29399 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
29400 if (ARM_FEATURE_EQUAL (*arch_ext_fset, known_arch_fset))
29401 {
29402 p_ver_ret = p_ver;
29403 goto found;
29404 }
29405 /* Base architecture match user-specified architecture only
29406 (eg. ARMv6-M in the same case as above). Record it in case we
29407 find a match with above condition. */
29408 else if (p_ver_ret == NULL
29409 && ARM_FEATURE_EQUAL (arch_fset, known_arch_fset))
29410 p_ver_ret = p_ver;
29411 }
29412 else
29413 {
29414
29415 /* Architecture has all features wanted. */
29416 if (ARM_FSET_CPU_SUBSET (arch_fset, known_arch_fset))
29417 {
29418 arm_feature_set added_fset;
29419
29420 /* Compute features added by this architecture over the one
29421 recorded in p_ver_ret. */
29422 if (p_ver_ret != NULL)
29423 ARM_CLEAR_FEATURE (added_fset, known_arch_fset,
29424 p_ver_ret->flags);
29425 /* First architecture that match incl. with extensions, or the
29426 only difference in features over the recorded match is
29427 features that were optional and are now mandatory. */
29428 if (p_ver_ret == NULL
29429 || ARM_FSET_CPU_SUBSET (added_fset, arch_fset))
29430 {
29431 p_ver_ret = p_ver;
29432 goto found;
29433 }
29434 }
29435 else if (p_ver_ret == NULL)
29436 {
29437 arm_feature_set needed_ext_fset;
29438
29439 ARM_CLEAR_FEATURE (needed_ext_fset, arch_fset, known_arch_fset);
29440
29441 /* Architecture has all features needed when using some
29442 extensions. Record it and continue searching in case there
29443 exist an architecture providing all needed features without
29444 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
29445 OS extension). */
29446 if (have_ext_for_needed_feat_p (&known_arch_fset,
29447 &needed_ext_fset))
29448 p_ver_ret = p_ver;
29449 }
29450 }
29451 }
29452
29453 if (p_ver_ret == NULL)
29454 return -1;
29455
29456 found:
29457 /* Tag_CPU_arch_profile. */
29458 if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7a)
29459 || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8)
29460 || (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_atomics)
29461 && !ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8m_m_only)))
29462 *profile = 'A';
29463 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7r))
29464 *profile = 'R';
29465 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_m))
29466 *profile = 'M';
29467 else
29468 *profile = '\0';
29469 return p_ver_ret->val;
29470 }
29471
29472 /* Set the public EABI object attributes. */
29473
29474 static void
29475 aeabi_set_public_attributes (void)
29476 {
29477 char profile = '\0';
29478 int arch = -1;
29479 int virt_sec = 0;
29480 int fp16_optional = 0;
29481 int skip_exact_match = 0;
29482 arm_feature_set flags, flags_arch, flags_ext;
29483
29484 /* Autodetection mode, choose the architecture based the instructions
29485 actually used. */
29486 if (no_cpu_selected ())
29487 {
29488 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
29489
29490 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
29491 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
29492
29493 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
29494 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
29495
29496 /* Code run during relaxation relies on selected_cpu being set. */
29497 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
29498 flags_ext = arm_arch_none;
29499 ARM_CLEAR_FEATURE (selected_arch, flags_arch, flags_ext);
29500 selected_ext = flags_ext;
29501 selected_cpu = flags;
29502 }
29503 /* Otherwise, choose the architecture based on the capabilities of the
29504 requested cpu. */
29505 else
29506 {
29507 ARM_MERGE_FEATURE_SETS (flags_arch, selected_arch, selected_ext);
29508 ARM_CLEAR_FEATURE (flags_arch, flags_arch, fpu_any);
29509 flags_ext = selected_ext;
29510 flags = selected_cpu;
29511 }
29512 ARM_MERGE_FEATURE_SETS (flags, flags, selected_fpu);
29513
29514 /* Allow the user to override the reported architecture. */
29515 if (!ARM_FEATURE_ZERO (selected_object_arch))
29516 {
29517 ARM_CLEAR_FEATURE (flags_arch, selected_object_arch, fpu_any);
29518 flags_ext = arm_arch_none;
29519 }
29520 else
29521 skip_exact_match = ARM_FEATURE_EQUAL (selected_cpu, arm_arch_any);
29522
29523 /* When this function is run again after relaxation has happened there is no
29524 way to determine whether an architecture or CPU was specified by the user:
29525 - selected_cpu is set above for relaxation to work;
29526 - march_cpu_opt is not set if only -mcpu or .cpu is used;
29527 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
29528 Therefore, if not in -march=all case we first try an exact match and fall
29529 back to autodetection. */
29530 if (!skip_exact_match)
29531 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 1);
29532 if (arch == -1)
29533 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 0);
29534 if (arch == -1)
29535 as_bad (_("no architecture contains all the instructions used\n"));
29536
29537 /* Tag_CPU_name. */
29538 if (selected_cpu_name[0])
29539 {
29540 char *q;
29541
29542 q = selected_cpu_name;
29543 if (strncmp (q, "armv", 4) == 0)
29544 {
29545 int i;
29546
29547 q += 4;
29548 for (i = 0; q[i]; i++)
29549 q[i] = TOUPPER (q[i]);
29550 }
29551 aeabi_set_attribute_string (Tag_CPU_name, q);
29552 }
29553
29554 /* Tag_CPU_arch. */
29555 aeabi_set_attribute_int (Tag_CPU_arch, arch);
29556
29557 /* Tag_CPU_arch_profile. */
29558 if (profile != '\0')
29559 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
29560
29561 /* Tag_DSP_extension. */
29562 if (ARM_CPU_HAS_FEATURE (selected_ext, arm_ext_dsp))
29563 aeabi_set_attribute_int (Tag_DSP_extension, 1);
29564
29565 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
29566 /* Tag_ARM_ISA_use. */
29567 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
29568 || ARM_FEATURE_ZERO (flags_arch))
29569 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
29570
29571 /* Tag_THUMB_ISA_use. */
29572 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
29573 || ARM_FEATURE_ZERO (flags_arch))
29574 {
29575 int thumb_isa_use;
29576
29577 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
29578 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
29579 thumb_isa_use = 3;
29580 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
29581 thumb_isa_use = 2;
29582 else
29583 thumb_isa_use = 1;
29584 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
29585 }
29586
29587 /* Tag_VFP_arch. */
29588 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
29589 aeabi_set_attribute_int (Tag_VFP_arch,
29590 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
29591 ? 7 : 8);
29592 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
29593 aeabi_set_attribute_int (Tag_VFP_arch,
29594 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
29595 ? 5 : 6);
29596 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
29597 {
29598 fp16_optional = 1;
29599 aeabi_set_attribute_int (Tag_VFP_arch, 3);
29600 }
29601 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
29602 {
29603 aeabi_set_attribute_int (Tag_VFP_arch, 4);
29604 fp16_optional = 1;
29605 }
29606 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
29607 aeabi_set_attribute_int (Tag_VFP_arch, 2);
29608 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
29609 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
29610 aeabi_set_attribute_int (Tag_VFP_arch, 1);
29611
29612 /* Tag_ABI_HardFP_use. */
29613 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
29614 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
29615 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
29616
29617 /* Tag_WMMX_arch. */
29618 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
29619 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
29620 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
29621 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
29622
29623 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
29624 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
29625 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
29626 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
29627 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
29628 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
29629 {
29630 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
29631 {
29632 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
29633 }
29634 else
29635 {
29636 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
29637 fp16_optional = 1;
29638 }
29639 }
29640
29641 if (ARM_CPU_HAS_FEATURE (flags, mve_fp_ext))
29642 aeabi_set_attribute_int (Tag_MVE_arch, 2);
29643 else if (ARM_CPU_HAS_FEATURE (flags, mve_ext))
29644 aeabi_set_attribute_int (Tag_MVE_arch, 1);
29645
29646 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
29647 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
29648 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
29649
29650 /* Tag_DIV_use.
29651
29652 We set Tag_DIV_use to two when integer divide instructions have been used
29653 in ARM state, or when Thumb integer divide instructions have been used,
29654 but we have no architecture profile set, nor have we any ARM instructions.
29655
29656 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
29657 by the base architecture.
29658
29659 For new architectures we will have to check these tests. */
29660 gas_assert (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
29661 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
29662 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
29663 aeabi_set_attribute_int (Tag_DIV_use, 0);
29664 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
29665 || (profile == '\0'
29666 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
29667 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
29668 aeabi_set_attribute_int (Tag_DIV_use, 2);
29669
29670 /* Tag_MP_extension_use. */
29671 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
29672 aeabi_set_attribute_int (Tag_MPextension_use, 1);
29673
29674 /* Tag Virtualization_use. */
29675 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
29676 virt_sec |= 1;
29677 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
29678 virt_sec |= 2;
29679 if (virt_sec != 0)
29680 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
29681 }
29682
29683 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
29684 finished and free extension feature bits which will not be used anymore. */
29685
29686 void
29687 arm_md_post_relax (void)
29688 {
29689 aeabi_set_public_attributes ();
29690 XDELETE (mcpu_ext_opt);
29691 mcpu_ext_opt = NULL;
29692 XDELETE (march_ext_opt);
29693 march_ext_opt = NULL;
29694 }
29695
29696 /* Add the default contents for the .ARM.attributes section. */
29697
29698 void
29699 arm_md_end (void)
29700 {
29701 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
29702 return;
29703
29704 aeabi_set_public_attributes ();
29705 }
29706 #endif /* OBJ_ELF */
29707
29708 /* Parse a .cpu directive. */
29709
29710 static void
29711 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
29712 {
29713 const struct arm_cpu_option_table *opt;
29714 char *name;
29715 char saved_char;
29716
29717 name = input_line_pointer;
29718 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
29719 input_line_pointer++;
29720 saved_char = *input_line_pointer;
29721 *input_line_pointer = 0;
29722
29723 /* Skip the first "all" entry. */
29724 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
29725 if (streq (opt->name, name))
29726 {
29727 selected_arch = opt->value;
29728 selected_ext = opt->ext;
29729 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
29730 if (opt->canonical_name)
29731 strcpy (selected_cpu_name, opt->canonical_name);
29732 else
29733 {
29734 int i;
29735 for (i = 0; opt->name[i]; i++)
29736 selected_cpu_name[i] = TOUPPER (opt->name[i]);
29737
29738 selected_cpu_name[i] = 0;
29739 }
29740 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
29741
29742 *input_line_pointer = saved_char;
29743 demand_empty_rest_of_line ();
29744 return;
29745 }
29746 as_bad (_("unknown cpu `%s'"), name);
29747 *input_line_pointer = saved_char;
29748 ignore_rest_of_line ();
29749 }
29750
29751 /* Parse a .arch directive. */
29752
29753 static void
29754 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
29755 {
29756 const struct arm_arch_option_table *opt;
29757 char saved_char;
29758 char *name;
29759
29760 name = input_line_pointer;
29761 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
29762 input_line_pointer++;
29763 saved_char = *input_line_pointer;
29764 *input_line_pointer = 0;
29765
29766 /* Skip the first "all" entry. */
29767 for (opt = arm_archs + 1; opt->name != NULL; opt++)
29768 if (streq (opt->name, name))
29769 {
29770 selected_arch = opt->value;
29771 selected_ext = arm_arch_none;
29772 selected_cpu = selected_arch;
29773 strcpy (selected_cpu_name, opt->name);
29774 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
29775 *input_line_pointer = saved_char;
29776 demand_empty_rest_of_line ();
29777 return;
29778 }
29779
29780 as_bad (_("unknown architecture `%s'\n"), name);
29781 *input_line_pointer = saved_char;
29782 ignore_rest_of_line ();
29783 }
29784
29785 /* Parse a .object_arch directive. */
29786
29787 static void
29788 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
29789 {
29790 const struct arm_arch_option_table *opt;
29791 char saved_char;
29792 char *name;
29793
29794 name = input_line_pointer;
29795 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
29796 input_line_pointer++;
29797 saved_char = *input_line_pointer;
29798 *input_line_pointer = 0;
29799
29800 /* Skip the first "all" entry. */
29801 for (opt = arm_archs + 1; opt->name != NULL; opt++)
29802 if (streq (opt->name, name))
29803 {
29804 selected_object_arch = opt->value;
29805 *input_line_pointer = saved_char;
29806 demand_empty_rest_of_line ();
29807 return;
29808 }
29809
29810 as_bad (_("unknown architecture `%s'\n"), name);
29811 *input_line_pointer = saved_char;
29812 ignore_rest_of_line ();
29813 }
29814
29815 /* Parse a .arch_extension directive. */
29816
29817 static void
29818 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
29819 {
29820 const struct arm_option_extension_value_table *opt;
29821 char saved_char;
29822 char *name;
29823 int adding_value = 1;
29824
29825 name = input_line_pointer;
29826 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
29827 input_line_pointer++;
29828 saved_char = *input_line_pointer;
29829 *input_line_pointer = 0;
29830
29831 if (strlen (name) >= 2
29832 && strncmp (name, "no", 2) == 0)
29833 {
29834 adding_value = 0;
29835 name += 2;
29836 }
29837
29838 for (opt = arm_extensions; opt->name != NULL; opt++)
29839 if (streq (opt->name, name))
29840 {
29841 int i, nb_allowed_archs =
29842 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
29843 for (i = 0; i < nb_allowed_archs; i++)
29844 {
29845 /* Empty entry. */
29846 if (ARM_CPU_IS_ANY (opt->allowed_archs[i]))
29847 continue;
29848 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], selected_arch))
29849 break;
29850 }
29851
29852 if (i == nb_allowed_archs)
29853 {
29854 as_bad (_("architectural extension `%s' is not allowed for the "
29855 "current base architecture"), name);
29856 break;
29857 }
29858
29859 if (adding_value)
29860 ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
29861 opt->merge_value);
29862 else
29863 ARM_CLEAR_FEATURE (selected_ext, selected_ext, opt->clear_value);
29864
29865 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
29866 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
29867 *input_line_pointer = saved_char;
29868 demand_empty_rest_of_line ();
29869 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
29870 on this return so that duplicate extensions (extensions with the
29871 same name as a previous extension in the list) are not considered
29872 for command-line parsing. */
29873 return;
29874 }
29875
29876 if (opt->name == NULL)
29877 as_bad (_("unknown architecture extension `%s'\n"), name);
29878
29879 *input_line_pointer = saved_char;
29880 ignore_rest_of_line ();
29881 }
29882
29883 /* Parse a .fpu directive. */
29884
29885 static void
29886 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
29887 {
29888 const struct arm_option_fpu_value_table *opt;
29889 char saved_char;
29890 char *name;
29891
29892 name = input_line_pointer;
29893 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
29894 input_line_pointer++;
29895 saved_char = *input_line_pointer;
29896 *input_line_pointer = 0;
29897
29898 for (opt = arm_fpus; opt->name != NULL; opt++)
29899 if (streq (opt->name, name))
29900 {
29901 selected_fpu = opt->value;
29902 #ifndef CPU_DEFAULT
29903 if (no_cpu_selected ())
29904 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
29905 else
29906 #endif
29907 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
29908 *input_line_pointer = saved_char;
29909 demand_empty_rest_of_line ();
29910 return;
29911 }
29912
29913 as_bad (_("unknown floating point format `%s'\n"), name);
29914 *input_line_pointer = saved_char;
29915 ignore_rest_of_line ();
29916 }
29917
29918 /* Copy symbol information. */
29919
29920 void
29921 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
29922 {
29923 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
29924 }
29925
29926 #ifdef OBJ_ELF
29927 /* Given a symbolic attribute NAME, return the proper integer value.
29928 Returns -1 if the attribute is not known. */
29929
29930 int
29931 arm_convert_symbolic_attribute (const char *name)
29932 {
29933 static const struct
29934 {
29935 const char * name;
29936 const int tag;
29937 }
29938 attribute_table[] =
29939 {
29940 /* When you modify this table you should
29941 also modify the list in doc/c-arm.texi. */
29942 #define T(tag) {#tag, tag}
29943 T (Tag_CPU_raw_name),
29944 T (Tag_CPU_name),
29945 T (Tag_CPU_arch),
29946 T (Tag_CPU_arch_profile),
29947 T (Tag_ARM_ISA_use),
29948 T (Tag_THUMB_ISA_use),
29949 T (Tag_FP_arch),
29950 T (Tag_VFP_arch),
29951 T (Tag_WMMX_arch),
29952 T (Tag_Advanced_SIMD_arch),
29953 T (Tag_PCS_config),
29954 T (Tag_ABI_PCS_R9_use),
29955 T (Tag_ABI_PCS_RW_data),
29956 T (Tag_ABI_PCS_RO_data),
29957 T (Tag_ABI_PCS_GOT_use),
29958 T (Tag_ABI_PCS_wchar_t),
29959 T (Tag_ABI_FP_rounding),
29960 T (Tag_ABI_FP_denormal),
29961 T (Tag_ABI_FP_exceptions),
29962 T (Tag_ABI_FP_user_exceptions),
29963 T (Tag_ABI_FP_number_model),
29964 T (Tag_ABI_align_needed),
29965 T (Tag_ABI_align8_needed),
29966 T (Tag_ABI_align_preserved),
29967 T (Tag_ABI_align8_preserved),
29968 T (Tag_ABI_enum_size),
29969 T (Tag_ABI_HardFP_use),
29970 T (Tag_ABI_VFP_args),
29971 T (Tag_ABI_WMMX_args),
29972 T (Tag_ABI_optimization_goals),
29973 T (Tag_ABI_FP_optimization_goals),
29974 T (Tag_compatibility),
29975 T (Tag_CPU_unaligned_access),
29976 T (Tag_FP_HP_extension),
29977 T (Tag_VFP_HP_extension),
29978 T (Tag_ABI_FP_16bit_format),
29979 T (Tag_MPextension_use),
29980 T (Tag_DIV_use),
29981 T (Tag_nodefaults),
29982 T (Tag_also_compatible_with),
29983 T (Tag_conformance),
29984 T (Tag_T2EE_use),
29985 T (Tag_Virtualization_use),
29986 T (Tag_DSP_extension),
29987 T (Tag_MVE_arch),
29988 /* We deliberately do not include Tag_MPextension_use_legacy. */
29989 #undef T
29990 };
29991 unsigned int i;
29992
29993 if (name == NULL)
29994 return -1;
29995
29996 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
29997 if (streq (name, attribute_table[i].name))
29998 return attribute_table[i].tag;
29999
30000 return -1;
30001 }
30002
30003 /* Apply sym value for relocations only in the case that they are for
30004 local symbols in the same segment as the fixup and you have the
30005 respective architectural feature for blx and simple switches. */
30006
30007 int
30008 arm_apply_sym_value (struct fix * fixP, segT this_seg)
30009 {
30010 if (fixP->fx_addsy
30011 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
30012 /* PR 17444: If the local symbol is in a different section then a reloc
30013 will always be generated for it, so applying the symbol value now
30014 will result in a double offset being stored in the relocation. */
30015 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
30016 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
30017 {
30018 switch (fixP->fx_r_type)
30019 {
30020 case BFD_RELOC_ARM_PCREL_BLX:
30021 case BFD_RELOC_THUMB_PCREL_BRANCH23:
30022 if (ARM_IS_FUNC (fixP->fx_addsy))
30023 return 1;
30024 break;
30025
30026 case BFD_RELOC_ARM_PCREL_CALL:
30027 case BFD_RELOC_THUMB_PCREL_BLX:
30028 if (THUMB_IS_FUNC (fixP->fx_addsy))
30029 return 1;
30030 break;
30031
30032 default:
30033 break;
30034 }
30035
30036 }
30037 return 0;
30038 }
30039 #endif /* OBJ_ELF */
This page took 0.936564 seconds and 4 git commands to generate.