1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2019 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
9 This file is part of GAS, the GNU Assembler.
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
30 #include "safe-ctype.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
38 #include "dw2gencfi.h"
41 #include "dwarf2dbg.h"
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
47 /* This structure holds the unwinding state. */
52 symbolS
* table_entry
;
53 symbolS
* personality_routine
;
54 int personality_index
;
55 /* The segment containing the function. */
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes
;
62 /* The number of bytes pushed to the stack. */
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset
;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
72 /* Nonzero if an unwind_setfp directive has been seen. */
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored
:1;
78 /* Whether --fdpic was given. */
83 /* Results from operand parsing worker functions. */
87 PARSE_OPERAND_SUCCESS
,
89 PARSE_OPERAND_FAIL_NO_BACKTRACK
90 } parse_operand_result
;
99 /* Types of processor to assemble for. */
101 /* The code that was here used to select a default CPU depending on compiler
102 pre-defines which were only present when doing native builds, thus
103 changing gas' default behaviour depending upon the build host.
105 If you have a target that requires a default CPU option then the you
106 should define CPU_DEFAULT here. */
111 # define FPU_DEFAULT FPU_ARCH_FPA
112 # elif defined (TE_NetBSD)
114 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
116 /* Legacy a.out format. */
117 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
119 # elif defined (TE_VXWORKS)
120 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
122 /* For backwards compatibility, default to FPA. */
123 # define FPU_DEFAULT FPU_ARCH_FPA
125 #endif /* ifndef FPU_DEFAULT */
127 #define streq(a, b) (strcmp (a, b) == 0)
129 /* Current set of feature bits available (CPU+FPU). Different from
130 selected_cpu + selected_fpu in case of autodetection since the CPU
131 feature bits are then all set. */
132 static arm_feature_set cpu_variant
;
133 /* Feature bits used in each execution state. Used to set build attribute
134 (in particular Tag_*_ISA_use) in CPU autodetection mode. */
135 static arm_feature_set arm_arch_used
;
136 static arm_feature_set thumb_arch_used
;
138 /* Flags stored in private area of BFD structure. */
139 static int uses_apcs_26
= FALSE
;
140 static int atpcs
= FALSE
;
141 static int support_interwork
= FALSE
;
142 static int uses_apcs_float
= FALSE
;
143 static int pic_code
= FALSE
;
144 static int fix_v4bx
= FALSE
;
145 /* Warn on using deprecated features. */
146 static int warn_on_deprecated
= TRUE
;
148 /* Understand CodeComposer Studio assembly syntax. */
149 bfd_boolean codecomposer_syntax
= FALSE
;
151 /* Variables that we set while parsing command-line options. Once all
152 options have been read we re-process these values to set the real
155 /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
156 instead of -mcpu=arm1). */
157 static const arm_feature_set
*legacy_cpu
= NULL
;
158 static const arm_feature_set
*legacy_fpu
= NULL
;
160 /* CPU, extension and FPU feature bits selected by -mcpu. */
161 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
162 static arm_feature_set
*mcpu_ext_opt
= NULL
;
163 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
165 /* CPU, extension and FPU feature bits selected by -march. */
166 static const arm_feature_set
*march_cpu_opt
= NULL
;
167 static arm_feature_set
*march_ext_opt
= NULL
;
168 static const arm_feature_set
*march_fpu_opt
= NULL
;
170 /* Feature bits selected by -mfpu. */
171 static const arm_feature_set
*mfpu_opt
= NULL
;
173 /* Constants for known architecture features. */
174 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
175 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V1
;
176 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
177 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V3
;
178 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_NEON_V1
;
179 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
180 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
182 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
184 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
187 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
190 static const arm_feature_set arm_ext_v1
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
191 static const arm_feature_set arm_ext_v2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2
);
192 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2S
);
193 static const arm_feature_set arm_ext_v3
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3
);
194 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3M
);
195 static const arm_feature_set arm_ext_v4
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4
);
196 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
);
197 static const arm_feature_set arm_ext_v5
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5
);
198 static const arm_feature_set arm_ext_v4t_5
=
199 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
| ARM_EXT_V5
);
200 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5T
);
201 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
);
202 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
);
203 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5J
);
204 static const arm_feature_set arm_ext_v6
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6
);
205 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
);
206 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2
);
207 /* Only for compatability of hint instructions. */
208 static const arm_feature_set arm_ext_v6k_v6t2
=
209 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
| ARM_EXT_V6T2
);
210 static const arm_feature_set arm_ext_v6_notm
=
211 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM
);
212 static const arm_feature_set arm_ext_v6_dsp
=
213 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP
);
214 static const arm_feature_set arm_ext_barrier
=
215 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER
);
216 static const arm_feature_set arm_ext_msr
=
217 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR
);
218 static const arm_feature_set arm_ext_div
= ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
);
219 static const arm_feature_set arm_ext_v7
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7
);
220 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
);
221 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
);
223 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7M
);
225 static const arm_feature_set arm_ext_v8
= ARM_FEATURE_CORE_LOW (ARM_EXT_V8
);
226 static const arm_feature_set arm_ext_m
=
227 ARM_FEATURE_CORE (ARM_EXT_V6M
| ARM_EXT_V7M
,
228 ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
229 static const arm_feature_set arm_ext_mp
= ARM_FEATURE_CORE_LOW (ARM_EXT_MP
);
230 static const arm_feature_set arm_ext_sec
= ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
);
231 static const arm_feature_set arm_ext_os
= ARM_FEATURE_CORE_LOW (ARM_EXT_OS
);
232 static const arm_feature_set arm_ext_adiv
= ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
);
233 static const arm_feature_set arm_ext_virt
= ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
);
234 static const arm_feature_set arm_ext_pan
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
);
235 static const arm_feature_set arm_ext_v8m
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
);
236 static const arm_feature_set arm_ext_v8m_main
=
237 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN
);
238 static const arm_feature_set arm_ext_v8_1m_main
=
239 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN
);
240 /* Instructions in ARMv8-M only found in M profile architectures. */
241 static const arm_feature_set arm_ext_v8m_m_only
=
242 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
243 static const arm_feature_set arm_ext_v6t2_v8m
=
244 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M
);
245 /* Instructions shared between ARMv8-A and ARMv8-M. */
246 static const arm_feature_set arm_ext_atomics
=
247 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS
);
249 /* DSP instructions Tag_DSP_extension refers to. */
250 static const arm_feature_set arm_ext_dsp
=
251 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
| ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
);
253 static const arm_feature_set arm_ext_ras
=
254 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
);
255 /* FP16 instructions. */
256 static const arm_feature_set arm_ext_fp16
=
257 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
);
258 static const arm_feature_set arm_ext_fp16_fml
=
259 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML
);
260 static const arm_feature_set arm_ext_v8_2
=
261 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A
);
262 static const arm_feature_set arm_ext_v8_3
=
263 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A
);
264 static const arm_feature_set arm_ext_sb
=
265 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
);
266 static const arm_feature_set arm_ext_predres
=
267 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
);
269 static const arm_feature_set arm_arch_any
= ARM_ANY
;
271 static const arm_feature_set fpu_any
= FPU_ANY
;
273 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED
= ARM_FEATURE (-1, -1, -1);
274 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
275 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
277 static const arm_feature_set arm_cext_iwmmxt2
=
278 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
);
279 static const arm_feature_set arm_cext_iwmmxt
=
280 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
);
281 static const arm_feature_set arm_cext_xscale
=
282 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
);
283 static const arm_feature_set arm_cext_maverick
=
284 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
);
285 static const arm_feature_set fpu_fpa_ext_v1
=
286 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1
);
287 static const arm_feature_set fpu_fpa_ext_v2
=
288 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2
);
289 static const arm_feature_set fpu_vfp_ext_v1xd
=
290 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD
);
291 static const arm_feature_set fpu_vfp_ext_v1
=
292 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1
);
293 static const arm_feature_set fpu_vfp_ext_v2
=
294 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2
);
295 static const arm_feature_set fpu_vfp_ext_v3xd
=
296 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD
);
297 static const arm_feature_set fpu_vfp_ext_v3
=
298 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3
);
299 static const arm_feature_set fpu_vfp_ext_d32
=
300 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32
);
301 static const arm_feature_set fpu_neon_ext_v1
=
302 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
);
303 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
304 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
305 static const arm_feature_set mve_ext
=
306 ARM_FEATURE_COPROC (FPU_MVE
);
307 static const arm_feature_set mve_fp_ext
=
308 ARM_FEATURE_COPROC (FPU_MVE_FP
);
310 static const arm_feature_set fpu_vfp_fp16
=
311 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16
);
312 static const arm_feature_set fpu_neon_ext_fma
=
313 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA
);
315 static const arm_feature_set fpu_vfp_ext_fma
=
316 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA
);
317 static const arm_feature_set fpu_vfp_ext_armv8
=
318 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8
);
319 static const arm_feature_set fpu_vfp_ext_armv8xd
=
320 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD
);
321 static const arm_feature_set fpu_neon_ext_armv8
=
322 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
);
323 static const arm_feature_set fpu_crypto_ext_armv8
=
324 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8
);
325 static const arm_feature_set crc_ext_armv8
=
326 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
);
327 static const arm_feature_set fpu_neon_ext_v8_1
=
328 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA
);
329 static const arm_feature_set fpu_neon_ext_dotprod
=
330 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD
);
332 static int mfloat_abi_opt
= -1;
333 /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
335 static arm_feature_set selected_arch
= ARM_ARCH_NONE
;
336 /* Extension feature bits selected by the last -mcpu/-march or .arch_extension
338 static arm_feature_set selected_ext
= ARM_ARCH_NONE
;
339 /* Feature bits selected by the last -mcpu/-march or by the combination of the
340 last .cpu/.arch directive .arch_extension directives since that
342 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
343 /* FPU feature bits selected by the last -mfpu or .fpu directive. */
344 static arm_feature_set selected_fpu
= FPU_NONE
;
345 /* Feature bits selected by the last .object_arch directive. */
346 static arm_feature_set selected_object_arch
= ARM_ARCH_NONE
;
347 /* Must be long enough to hold any of the names in arm_cpus. */
348 static char selected_cpu_name
[20];
350 extern FLONUM_TYPE generic_floating_point_number
;
352 /* Return if no cpu was selected on command-line. */
354 no_cpu_selected (void)
356 return ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_none
);
361 static int meabi_flags
= EABI_DEFAULT
;
363 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
366 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
371 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
376 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
377 symbolS
* GOT_symbol
;
380 /* 0: assemble for ARM,
381 1: assemble for Thumb,
382 2: assemble for Thumb even though target CPU does not support thumb
384 static int thumb_mode
= 0;
385 /* A value distinct from the possible values for thumb_mode that we
386 can use to record whether thumb_mode has been copied into the
387 tc_frag_data field of a frag. */
388 #define MODE_RECORDED (1 << 4)
390 /* Specifies the intrinsic IT insn behavior mode. */
391 enum implicit_it_mode
393 IMPLICIT_IT_MODE_NEVER
= 0x00,
394 IMPLICIT_IT_MODE_ARM
= 0x01,
395 IMPLICIT_IT_MODE_THUMB
= 0x02,
396 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
398 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
400 /* If unified_syntax is true, we are processing the new unified
401 ARM/Thumb syntax. Important differences from the old ARM mode:
403 - Immediate operands do not require a # prefix.
404 - Conditional affixes always appear at the end of the
405 instruction. (For backward compatibility, those instructions
406 that formerly had them in the middle, continue to accept them
408 - The IT instruction may appear, and if it does is validated
409 against subsequent conditional affixes. It does not generate
412 Important differences from the old Thumb mode:
414 - Immediate operands do not require a # prefix.
415 - Most of the V6T2 instructions are only available in unified mode.
416 - The .N and .W suffixes are recognized and honored (it is an error
417 if they cannot be honored).
418 - All instructions set the flags if and only if they have an 's' affix.
419 - Conditional affixes may be used. They are validated against
420 preceding IT instructions. Unlike ARM mode, you cannot use a
421 conditional affix except in the scope of an IT instruction. */
423 static bfd_boolean unified_syntax
= FALSE
;
425 /* An immediate operand can start with #, and ld*, st*, pld operands
426 can contain [ and ]. We need to tell APP not to elide whitespace
427 before a [, which can appear as the first operand for pld.
428 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
429 const char arm_symbol_chars
[] = "#[]{}";
444 enum neon_el_type type
;
448 #define NEON_MAX_TYPE_ELS 4
452 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
456 enum pred_instruction_type
462 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
463 if inside, should be the last one. */
464 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
465 i.e. BKPT and NOP. */
466 IT_INSN
, /* The IT insn has been parsed. */
467 VPT_INSN
, /* The VPT/VPST insn has been parsed. */
468 MVE_OUTSIDE_PRED_INSN
, /* Instruction to indicate a MVE instruction without
469 a predication code. */
470 MVE_UNPREDICABLE_INSN
/* MVE instruction that is non-predicable. */
473 /* The maximum number of operands we need. */
474 #define ARM_IT_MAX_OPERANDS 6
475 #define ARM_IT_MAX_RELOCS 3
480 unsigned long instruction
;
484 /* "uncond_value" is set to the value in place of the conditional field in
485 unconditional versions of the instruction, or -1 if nothing is
488 struct neon_type vectype
;
489 /* This does not indicate an actual NEON instruction, only that
490 the mnemonic accepts neon-style type suffixes. */
492 /* Set to the opcode if the instruction needs relaxation.
493 Zero if the instruction is not relaxed. */
497 bfd_reloc_code_real_type type
;
500 } relocs
[ARM_IT_MAX_RELOCS
];
502 enum pred_instruction_type pred_insn_type
;
508 struct neon_type_el vectype
;
509 unsigned present
: 1; /* Operand present. */
510 unsigned isreg
: 1; /* Operand was a register. */
511 unsigned immisreg
: 1; /* .imm field is a second register. */
512 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
513 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
514 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
515 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
516 instructions. This allows us to disambiguate ARM <-> vector insns. */
517 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
518 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
519 unsigned isquad
: 1; /* Operand is SIMD quad register. */
520 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
521 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
522 unsigned writeback
: 1; /* Operand has trailing ! */
523 unsigned preind
: 1; /* Preindexed address. */
524 unsigned postind
: 1; /* Postindexed address. */
525 unsigned negative
: 1; /* Index register was negated. */
526 unsigned shifted
: 1; /* Shift applied to operation. */
527 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
528 } operands
[ARM_IT_MAX_OPERANDS
];
531 static struct arm_it inst
;
533 #define NUM_FLOAT_VALS 8
535 const char * fp_const
[] =
537 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
540 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
550 #define CP_T_X 0x00008000
551 #define CP_T_Y 0x00400000
553 #define CONDS_BIT 0x00100000
554 #define LOAD_BIT 0x00100000
556 #define DOUBLE_LOAD_FLAG 0x00000001
560 const char * template_name
;
564 #define COND_ALWAYS 0xE
568 const char * template_name
;
572 struct asm_barrier_opt
574 const char * template_name
;
576 const arm_feature_set arch
;
579 /* The bit that distinguishes CPSR and SPSR. */
580 #define SPSR_BIT (1 << 22)
582 /* The individual PSR flag bits. */
583 #define PSR_c (1 << 16)
584 #define PSR_x (1 << 17)
585 #define PSR_s (1 << 18)
586 #define PSR_f (1 << 19)
591 bfd_reloc_code_real_type reloc
;
596 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
597 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
602 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
605 /* Bits for DEFINED field in neon_typed_alias. */
606 #define NTA_HASTYPE 1
607 #define NTA_HASINDEX 2
609 struct neon_typed_alias
611 unsigned char defined
;
613 struct neon_type_el eltype
;
616 /* ARM register categories. This includes coprocessor numbers and various
617 architecture extensions' registers. Each entry should have an error message
618 in reg_expected_msgs below. */
647 /* Structure for a hash table entry for a register.
648 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
649 information which states whether a vector type or index is specified (for a
650 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
656 unsigned char builtin
;
657 struct neon_typed_alias
* neon
;
660 /* Diagnostics used when we don't get a register of the expected type. */
661 const char * const reg_expected_msgs
[] =
663 [REG_TYPE_RN
] = N_("ARM register expected"),
664 [REG_TYPE_CP
] = N_("bad or missing co-processor number"),
665 [REG_TYPE_CN
] = N_("co-processor register expected"),
666 [REG_TYPE_FN
] = N_("FPA register expected"),
667 [REG_TYPE_VFS
] = N_("VFP single precision register expected"),
668 [REG_TYPE_VFD
] = N_("VFP/Neon double precision register expected"),
669 [REG_TYPE_NQ
] = N_("Neon quad precision register expected"),
670 [REG_TYPE_VFSD
] = N_("VFP single or double precision register expected"),
671 [REG_TYPE_NDQ
] = N_("Neon double or quad precision register expected"),
672 [REG_TYPE_NSD
] = N_("Neon single or double precision register expected"),
673 [REG_TYPE_NSDQ
] = N_("VFP single, double or Neon quad precision register"
675 [REG_TYPE_VFC
] = N_("VFP system register expected"),
676 [REG_TYPE_MVF
] = N_("Maverick MVF register expected"),
677 [REG_TYPE_MVD
] = N_("Maverick MVD register expected"),
678 [REG_TYPE_MVFX
] = N_("Maverick MVFX register expected"),
679 [REG_TYPE_MVDX
] = N_("Maverick MVDX register expected"),
680 [REG_TYPE_MVAX
] = N_("Maverick MVAX register expected"),
681 [REG_TYPE_DSPSC
] = N_("Maverick DSPSC register expected"),
682 [REG_TYPE_MMXWR
] = N_("iWMMXt data register expected"),
683 [REG_TYPE_MMXWC
] = N_("iWMMXt control register expected"),
684 [REG_TYPE_MMXWCG
] = N_("iWMMXt scalar register expected"),
685 [REG_TYPE_XSCALE
] = N_("XScale accumulator register expected"),
686 [REG_TYPE_MQ
] = N_("MVE vector register expected"),
687 [REG_TYPE_RNB
] = N_("")
690 /* Some well known registers that we refer to directly elsewhere. */
696 /* ARM instructions take 4bytes in the object file, Thumb instructions
702 /* Basic string to match. */
703 const char * template_name
;
705 /* Parameters to instruction. */
706 unsigned int operands
[8];
708 /* Conditional tag - see opcode_lookup. */
709 unsigned int tag
: 4;
711 /* Basic instruction code. */
714 /* Thumb-format instruction code. */
717 /* Which architecture variant provides this instruction. */
718 const arm_feature_set
* avariant
;
719 const arm_feature_set
* tvariant
;
721 /* Function to call to encode instruction in ARM format. */
722 void (* aencode
) (void);
724 /* Function to call to encode instruction in Thumb format. */
725 void (* tencode
) (void);
727 /* Indicates whether this instruction may be vector predicated. */
728 unsigned int mayBeVecPred
: 1;
731 /* Defines for various bits that we will want to toggle. */
732 #define INST_IMMEDIATE 0x02000000
733 #define OFFSET_REG 0x02000000
734 #define HWOFFSET_IMM 0x00400000
735 #define SHIFT_BY_REG 0x00000010
736 #define PRE_INDEX 0x01000000
737 #define INDEX_UP 0x00800000
738 #define WRITE_BACK 0x00200000
739 #define LDM_TYPE_2_OR_3 0x00400000
740 #define CPSI_MMOD 0x00020000
742 #define LITERAL_MASK 0xf000f000
743 #define OPCODE_MASK 0xfe1fffff
744 #define V4_STR_BIT 0x00000020
745 #define VLDR_VMOV_SAME 0x0040f000
747 #define T2_SUBS_PC_LR 0xf3de8f00
749 #define DATA_OP_SHIFT 21
750 #define SBIT_SHIFT 20
752 #define T2_OPCODE_MASK 0xfe1fffff
753 #define T2_DATA_OP_SHIFT 21
754 #define T2_SBIT_SHIFT 20
756 #define A_COND_MASK 0xf0000000
757 #define A_PUSH_POP_OP_MASK 0x0fff0000
759 /* Opcodes for pushing/poping registers to/from the stack. */
760 #define A1_OPCODE_PUSH 0x092d0000
761 #define A2_OPCODE_PUSH 0x052d0004
762 #define A2_OPCODE_POP 0x049d0004
764 /* Codes to distinguish the arithmetic instructions. */
775 #define OPCODE_CMP 10
776 #define OPCODE_CMN 11
777 #define OPCODE_ORR 12
778 #define OPCODE_MOV 13
779 #define OPCODE_BIC 14
780 #define OPCODE_MVN 15
782 #define T2_OPCODE_AND 0
783 #define T2_OPCODE_BIC 1
784 #define T2_OPCODE_ORR 2
785 #define T2_OPCODE_ORN 3
786 #define T2_OPCODE_EOR 4
787 #define T2_OPCODE_ADD 8
788 #define T2_OPCODE_ADC 10
789 #define T2_OPCODE_SBC 11
790 #define T2_OPCODE_SUB 13
791 #define T2_OPCODE_RSB 14
793 #define T_OPCODE_MUL 0x4340
794 #define T_OPCODE_TST 0x4200
795 #define T_OPCODE_CMN 0x42c0
796 #define T_OPCODE_NEG 0x4240
797 #define T_OPCODE_MVN 0x43c0
799 #define T_OPCODE_ADD_R3 0x1800
800 #define T_OPCODE_SUB_R3 0x1a00
801 #define T_OPCODE_ADD_HI 0x4400
802 #define T_OPCODE_ADD_ST 0xb000
803 #define T_OPCODE_SUB_ST 0xb080
804 #define T_OPCODE_ADD_SP 0xa800
805 #define T_OPCODE_ADD_PC 0xa000
806 #define T_OPCODE_ADD_I8 0x3000
807 #define T_OPCODE_SUB_I8 0x3800
808 #define T_OPCODE_ADD_I3 0x1c00
809 #define T_OPCODE_SUB_I3 0x1e00
811 #define T_OPCODE_ASR_R 0x4100
812 #define T_OPCODE_LSL_R 0x4080
813 #define T_OPCODE_LSR_R 0x40c0
814 #define T_OPCODE_ROR_R 0x41c0
815 #define T_OPCODE_ASR_I 0x1000
816 #define T_OPCODE_LSL_I 0x0000
817 #define T_OPCODE_LSR_I 0x0800
819 #define T_OPCODE_MOV_I8 0x2000
820 #define T_OPCODE_CMP_I8 0x2800
821 #define T_OPCODE_CMP_LR 0x4280
822 #define T_OPCODE_MOV_HR 0x4600
823 #define T_OPCODE_CMP_HR 0x4500
825 #define T_OPCODE_LDR_PC 0x4800
826 #define T_OPCODE_LDR_SP 0x9800
827 #define T_OPCODE_STR_SP 0x9000
828 #define T_OPCODE_LDR_IW 0x6800
829 #define T_OPCODE_STR_IW 0x6000
830 #define T_OPCODE_LDR_IH 0x8800
831 #define T_OPCODE_STR_IH 0x8000
832 #define T_OPCODE_LDR_IB 0x7800
833 #define T_OPCODE_STR_IB 0x7000
834 #define T_OPCODE_LDR_RW 0x5800
835 #define T_OPCODE_STR_RW 0x5000
836 #define T_OPCODE_LDR_RH 0x5a00
837 #define T_OPCODE_STR_RH 0x5200
838 #define T_OPCODE_LDR_RB 0x5c00
839 #define T_OPCODE_STR_RB 0x5400
841 #define T_OPCODE_PUSH 0xb400
842 #define T_OPCODE_POP 0xbc00
844 #define T_OPCODE_BRANCH 0xe000
846 #define THUMB_SIZE 2 /* Size of thumb instruction. */
847 #define THUMB_PP_PC_LR 0x0100
848 #define THUMB_LOAD_BIT 0x0800
849 #define THUMB2_LOAD_BIT 0x00100000
851 #define BAD_SYNTAX _("syntax error")
852 #define BAD_ARGS _("bad arguments to instruction")
853 #define BAD_SP _("r13 not allowed here")
854 #define BAD_PC _("r15 not allowed here")
855 #define BAD_ODD _("Odd register not allowed here")
856 #define BAD_EVEN _("Even register not allowed here")
857 #define BAD_COND _("instruction cannot be conditional")
858 #define BAD_OVERLAP _("registers may not be the same")
859 #define BAD_HIREG _("lo register required")
860 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
861 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode")
862 #define BAD_BRANCH _("branch must be last instruction in IT block")
863 #define BAD_BRANCH_OFF _("branch out of range or not a multiple of 2")
864 #define BAD_NOT_IT _("instruction not allowed in IT block")
865 #define BAD_NOT_VPT _("instruction missing MVE vector predication code")
866 #define BAD_FPU _("selected FPU does not support instruction")
867 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
868 #define BAD_OUT_VPT \
869 _("vector predicated instruction should be in VPT/VPST block")
870 #define BAD_IT_COND _("incorrect condition in IT block")
871 #define BAD_VPT_COND _("incorrect condition in VPT/VPST block")
872 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
873 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
874 #define BAD_PC_ADDRESSING \
875 _("cannot use register index with PC-relative addressing")
876 #define BAD_PC_WRITEBACK \
877 _("cannot use writeback with PC-relative addressing")
878 #define BAD_RANGE _("branch out of range")
879 #define BAD_FP16 _("selected processor does not support fp16 instruction")
880 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
881 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
882 #define MVE_NOT_IT _("Warning: instruction is UNPREDICTABLE in an IT " \
884 #define MVE_NOT_VPT _("Warning: instruction is UNPREDICTABLE in a VPT " \
886 #define MVE_BAD_PC _("Warning: instruction is UNPREDICTABLE with PC" \
888 #define MVE_BAD_SP _("Warning: instruction is UNPREDICTABLE with SP" \
890 #define BAD_SIMD_TYPE _("bad type in SIMD instruction")
891 #define BAD_MVE_AUTO \
892 _("GAS auto-detection mode and -march=all is deprecated for MVE, please" \
893 " use a valid -march or -mcpu option.")
894 #define BAD_MVE_SRCDEST _("Warning: 32-bit element size and same destination "\
895 "and source operands makes instruction UNPREDICTABLE")
896 #define BAD_EL_TYPE _("bad element type for instruction")
898 static struct hash_control
* arm_ops_hsh
;
899 static struct hash_control
* arm_cond_hsh
;
900 static struct hash_control
* arm_vcond_hsh
;
901 static struct hash_control
* arm_shift_hsh
;
902 static struct hash_control
* arm_psr_hsh
;
903 static struct hash_control
* arm_v7m_psr_hsh
;
904 static struct hash_control
* arm_reg_hsh
;
905 static struct hash_control
* arm_reloc_hsh
;
906 static struct hash_control
* arm_barrier_opt_hsh
;
908 /* Stuff needed to resolve the label ambiguity
917 symbolS
* last_label_seen
;
918 static int label_is_thumb_function_name
= FALSE
;
920 /* Literal pool structure. Held on a per-section
921 and per-sub-section basis. */
923 #define MAX_LITERAL_POOL_SIZE 1024
924 typedef struct literal_pool
926 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
927 unsigned int next_free_entry
;
933 struct dwarf2_line_info locs
[MAX_LITERAL_POOL_SIZE
];
935 struct literal_pool
* next
;
936 unsigned int alignment
;
939 /* Pointer to a linked list of literal pools. */
940 literal_pool
* list_of_pools
= NULL
;
942 typedef enum asmfunc_states
945 WAITING_ASMFUNC_NAME
,
949 static asmfunc_states asmfunc_state
= OUTSIDE_ASMFUNC
;
952 # define now_pred seg_info (now_seg)->tc_segment_info_data.current_pred
954 static struct current_pred now_pred
;
958 now_pred_compatible (int cond
)
960 return (cond
& ~1) == (now_pred
.cc
& ~1);
964 conditional_insn (void)
966 return inst
.cond
!= COND_ALWAYS
;
969 static int in_pred_block (void);
971 static int handle_pred_state (void);
973 static void force_automatic_it_block_close (void);
975 static void it_fsm_post_encode (void);
977 #define set_pred_insn_type(type) \
980 inst.pred_insn_type = type; \
981 if (handle_pred_state () == FAIL) \
986 #define set_pred_insn_type_nonvoid(type, failret) \
989 inst.pred_insn_type = type; \
990 if (handle_pred_state () == FAIL) \
995 #define set_pred_insn_type_last() \
998 if (inst.cond == COND_ALWAYS) \
999 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN); \
1001 set_pred_insn_type (INSIDE_IT_LAST_INSN); \
1007 /* This array holds the chars that always start a comment. If the
1008 pre-processor is disabled, these aren't very useful. */
1009 char arm_comment_chars
[] = "@";
1011 /* This array holds the chars that only start a comment at the beginning of
1012 a line. If the line seems to have the form '# 123 filename'
1013 .line and .file directives will appear in the pre-processed output. */
1014 /* Note that input_file.c hand checks for '#' at the beginning of the
1015 first line of the input file. This is because the compiler outputs
1016 #NO_APP at the beginning of its output. */
1017 /* Also note that comments like this one will always work. */
1018 const char line_comment_chars
[] = "#";
1020 char arm_line_separator_chars
[] = ";";
1022 /* Chars that can be used to separate mant
1023 from exp in floating point numbers. */
1024 const char EXP_CHARS
[] = "eE";
1026 /* Chars that mean this number is a floating point constant. */
1027 /* As in 0f12.456 */
1028 /* or 0d1.2345e12 */
1030 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
1032 /* Prefix characters that indicate the start of an immediate
1034 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
1036 /* Separator character handling. */
1038 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
1041 skip_past_char (char ** str
, char c
)
1043 /* PR gas/14987: Allow for whitespace before the expected character. */
1044 skip_whitespace (*str
);
1055 #define skip_past_comma(str) skip_past_char (str, ',')
1057 /* Arithmetic expressions (possibly involving symbols). */
1059 /* Return TRUE if anything in the expression is a bignum. */
1062 walk_no_bignums (symbolS
* sp
)
1064 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
1067 if (symbol_get_value_expression (sp
)->X_add_symbol
)
1069 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
1070 || (symbol_get_value_expression (sp
)->X_op_symbol
1071 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
1077 static bfd_boolean in_my_get_expression
= FALSE
;
1079 /* Third argument to my_get_expression. */
1080 #define GE_NO_PREFIX 0
1081 #define GE_IMM_PREFIX 1
1082 #define GE_OPT_PREFIX 2
1083 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1084 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1085 #define GE_OPT_PREFIX_BIG 3
1088 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
1092 /* In unified syntax, all prefixes are optional. */
1094 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
1097 switch (prefix_mode
)
1099 case GE_NO_PREFIX
: break;
1101 if (!is_immediate_prefix (**str
))
1103 inst
.error
= _("immediate expression requires a # prefix");
1109 case GE_OPT_PREFIX_BIG
:
1110 if (is_immediate_prefix (**str
))
1117 memset (ep
, 0, sizeof (expressionS
));
1119 save_in
= input_line_pointer
;
1120 input_line_pointer
= *str
;
1121 in_my_get_expression
= TRUE
;
1123 in_my_get_expression
= FALSE
;
1125 if (ep
->X_op
== O_illegal
|| ep
->X_op
== O_absent
)
1127 /* We found a bad or missing expression in md_operand(). */
1128 *str
= input_line_pointer
;
1129 input_line_pointer
= save_in
;
1130 if (inst
.error
== NULL
)
1131 inst
.error
= (ep
->X_op
== O_absent
1132 ? _("missing expression") :_("bad expression"));
1136 /* Get rid of any bignums now, so that we don't generate an error for which
1137 we can't establish a line number later on. Big numbers are never valid
1138 in instructions, which is where this routine is always called. */
1139 if (prefix_mode
!= GE_OPT_PREFIX_BIG
1140 && (ep
->X_op
== O_big
1141 || (ep
->X_add_symbol
1142 && (walk_no_bignums (ep
->X_add_symbol
)
1144 && walk_no_bignums (ep
->X_op_symbol
))))))
1146 inst
.error
= _("invalid constant");
1147 *str
= input_line_pointer
;
1148 input_line_pointer
= save_in
;
1152 *str
= input_line_pointer
;
1153 input_line_pointer
= save_in
;
1157 /* Turn a string in input_line_pointer into a floating point constant
1158 of type TYPE, and store the appropriate bytes in *LITP. The number
1159 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1160 returned, or NULL on OK.
1162 Note that fp constants aren't represent in the normal way on the ARM.
1163 In big endian mode, things are as expected. However, in little endian
1164 mode fp constants are big-endian word-wise, and little-endian byte-wise
1165 within the words. For example, (double) 1.1 in big endian mode is
1166 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1167 the byte sequence 99 99 f1 3f 9a 99 99 99.
1169 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1172 md_atof (int type
, char * litP
, int * sizeP
)
1175 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1207 return _("Unrecognized or unsupported floating point constant");
1210 t
= atof_ieee (input_line_pointer
, type
, words
);
1212 input_line_pointer
= t
;
1213 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1215 if (target_big_endian
)
1217 for (i
= 0; i
< prec
; i
++)
1219 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1220 litP
+= sizeof (LITTLENUM_TYPE
);
1225 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1226 for (i
= prec
- 1; i
>= 0; i
--)
1228 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1229 litP
+= sizeof (LITTLENUM_TYPE
);
1232 /* For a 4 byte float the order of elements in `words' is 1 0.
1233 For an 8 byte float the order is 1 0 3 2. */
1234 for (i
= 0; i
< prec
; i
+= 2)
1236 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1237 sizeof (LITTLENUM_TYPE
));
1238 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1239 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1240 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1247 /* We handle all bad expressions here, so that we can report the faulty
1248 instruction in the error message. */
1251 md_operand (expressionS
* exp
)
1253 if (in_my_get_expression
)
1254 exp
->X_op
= O_illegal
;
1257 /* Immediate values. */
1260 /* Generic immediate-value read function for use in directives.
1261 Accepts anything that 'expression' can fold to a constant.
1262 *val receives the number. */
1265 immediate_for_directive (int *val
)
1268 exp
.X_op
= O_illegal
;
1270 if (is_immediate_prefix (*input_line_pointer
))
1272 input_line_pointer
++;
1276 if (exp
.X_op
!= O_constant
)
1278 as_bad (_("expected #constant"));
1279 ignore_rest_of_line ();
1282 *val
= exp
.X_add_number
;
1287 /* Register parsing. */
1289 /* Generic register parser. CCP points to what should be the
1290 beginning of a register name. If it is indeed a valid register
1291 name, advance CCP over it and return the reg_entry structure;
1292 otherwise return NULL. Does not issue diagnostics. */
1294 static struct reg_entry
*
1295 arm_reg_parse_multi (char **ccp
)
1299 struct reg_entry
*reg
;
1301 skip_whitespace (start
);
1303 #ifdef REGISTER_PREFIX
1304 if (*start
!= REGISTER_PREFIX
)
1308 #ifdef OPTIONAL_REGISTER_PREFIX
1309 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1314 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1319 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1321 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1331 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1332 enum arm_reg_type type
)
1334 /* Alternative syntaxes are accepted for a few register classes. */
1341 /* Generic coprocessor register names are allowed for these. */
1342 if (reg
&& reg
->type
== REG_TYPE_CN
)
1347 /* For backward compatibility, a bare number is valid here. */
1349 unsigned long processor
= strtoul (start
, ccp
, 10);
1350 if (*ccp
!= start
&& processor
<= 15)
1355 case REG_TYPE_MMXWC
:
1356 /* WC includes WCG. ??? I'm not sure this is true for all
1357 instructions that take WC registers. */
1358 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1369 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1370 return value is the register number or FAIL. */
1373 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1376 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1379 /* Do not allow a scalar (reg+index) to parse as a register. */
1380 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1383 if (reg
&& reg
->type
== type
)
1386 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1393 /* Parse a Neon type specifier. *STR should point at the leading '.'
1394 character. Does no verification at this stage that the type fits the opcode
1401 Can all be legally parsed by this function.
1403 Fills in neon_type struct pointer with parsed information, and updates STR
1404 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1405 type, FAIL if not. */
1408 parse_neon_type (struct neon_type
*type
, char **str
)
1415 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1417 enum neon_el_type thistype
= NT_untyped
;
1418 unsigned thissize
= -1u;
1425 /* Just a size without an explicit type. */
1429 switch (TOLOWER (*ptr
))
1431 case 'i': thistype
= NT_integer
; break;
1432 case 'f': thistype
= NT_float
; break;
1433 case 'p': thistype
= NT_poly
; break;
1434 case 's': thistype
= NT_signed
; break;
1435 case 'u': thistype
= NT_unsigned
; break;
1437 thistype
= NT_float
;
1442 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1448 /* .f is an abbreviation for .f32. */
1449 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1454 thissize
= strtoul (ptr
, &ptr
, 10);
1456 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1459 as_bad (_("bad size %d in type specifier"), thissize
);
1467 type
->el
[type
->elems
].type
= thistype
;
1468 type
->el
[type
->elems
].size
= thissize
;
1473 /* Empty/missing type is not a successful parse. */
1474 if (type
->elems
== 0)
1482 /* Errors may be set multiple times during parsing or bit encoding
1483 (particularly in the Neon bits), but usually the earliest error which is set
1484 will be the most meaningful. Avoid overwriting it with later (cascading)
1485 errors by calling this function. */
1488 first_error (const char *err
)
1494 /* Parse a single type, e.g. ".s32", leading period included. */
1496 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1499 struct neon_type optype
;
1503 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1505 if (optype
.elems
== 1)
1506 *vectype
= optype
.el
[0];
1509 first_error (_("only one type should be specified for operand"));
1515 first_error (_("vector type expected"));
1527 /* Special meanings for indices (which have a range of 0-7), which will fit into
1530 #define NEON_ALL_LANES 15
1531 #define NEON_INTERLEAVE_LANES 14
1533 /* Record a use of the given feature. */
1535 record_feature_use (const arm_feature_set
*feature
)
1538 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, *feature
);
1540 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, *feature
);
1543 /* If the given feature available in the selected CPU, mark it as used.
1544 Returns TRUE iff feature is available. */
1546 mark_feature_used (const arm_feature_set
*feature
)
1549 /* Do not support the use of MVE only instructions when in auto-detection or
1551 if (((feature
== &mve_ext
) || (feature
== &mve_fp_ext
))
1552 && ARM_CPU_IS_ANY (cpu_variant
))
1554 first_error (BAD_MVE_AUTO
);
1557 /* Ensure the option is valid on the current architecture. */
1558 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
1561 /* Add the appropriate architecture feature for the barrier option used.
1563 record_feature_use (feature
);
1568 /* Parse either a register or a scalar, with an optional type. Return the
1569 register number, and optionally fill in the actual type of the register
1570 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1571 type/index information in *TYPEINFO. */
1574 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1575 enum arm_reg_type
*rtype
,
1576 struct neon_typed_alias
*typeinfo
)
1579 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1580 struct neon_typed_alias atype
;
1581 struct neon_type_el parsetype
;
1585 atype
.eltype
.type
= NT_invtype
;
1586 atype
.eltype
.size
= -1;
1588 /* Try alternate syntax for some types of register. Note these are mutually
1589 exclusive with the Neon syntax extensions. */
1592 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1600 /* Undo polymorphism when a set of register types may be accepted. */
1601 if ((type
== REG_TYPE_NDQ
1602 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1603 || (type
== REG_TYPE_VFSD
1604 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1605 || (type
== REG_TYPE_NSDQ
1606 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1607 || reg
->type
== REG_TYPE_NQ
))
1608 || (type
== REG_TYPE_NSD
1609 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1610 || (type
== REG_TYPE_MMXWC
1611 && (reg
->type
== REG_TYPE_MMXWCG
)))
1612 type
= (enum arm_reg_type
) reg
->type
;
1614 if (type
== REG_TYPE_MQ
)
1616 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
1619 if (!reg
|| reg
->type
!= REG_TYPE_NQ
)
1622 if (reg
->number
> 14 && !mark_feature_used (&fpu_vfp_ext_d32
))
1624 first_error (_("expected MVE register [q0..q7]"));
1629 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)
1630 && (type
== REG_TYPE_NQ
))
1634 if (type
!= reg
->type
)
1640 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1642 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1644 first_error (_("can't redefine type for operand"));
1647 atype
.defined
|= NTA_HASTYPE
;
1648 atype
.eltype
= parsetype
;
1651 if (skip_past_char (&str
, '[') == SUCCESS
)
1653 if (type
!= REG_TYPE_VFD
1654 && !(type
== REG_TYPE_VFS
1655 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8_2
)))
1657 first_error (_("only D registers may be indexed"));
1661 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1663 first_error (_("can't change index for operand"));
1667 atype
.defined
|= NTA_HASINDEX
;
1669 if (skip_past_char (&str
, ']') == SUCCESS
)
1670 atype
.index
= NEON_ALL_LANES
;
1675 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1677 if (exp
.X_op
!= O_constant
)
1679 first_error (_("constant expression required"));
1683 if (skip_past_char (&str
, ']') == FAIL
)
1686 atype
.index
= exp
.X_add_number
;
1701 /* Like arm_reg_parse, but also allow the following extra features:
1702 - If RTYPE is non-zero, return the (possibly restricted) type of the
1703 register (e.g. Neon double or quad reg when either has been requested).
1704 - If this is a Neon vector type with additional type information, fill
1705 in the struct pointed to by VECTYPE (if non-NULL).
1706 This function will fault on encountering a scalar. */
1709 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1710 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1712 struct neon_typed_alias atype
;
1714 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1719 /* Do not allow regname(... to parse as a register. */
1723 /* Do not allow a scalar (reg+index) to parse as a register. */
1724 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1726 first_error (_("register operand expected, but got scalar"));
1731 *vectype
= atype
.eltype
;
1738 #define NEON_SCALAR_REG(X) ((X) >> 4)
1739 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1741 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1742 have enough information to be able to do a good job bounds-checking. So, we
1743 just do easy checks here, and do further checks later. */
1746 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1750 struct neon_typed_alias atype
;
1751 enum arm_reg_type reg_type
= REG_TYPE_VFD
;
1754 reg_type
= REG_TYPE_VFS
;
1756 reg
= parse_typed_reg_or_scalar (&str
, reg_type
, NULL
, &atype
);
1758 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1761 if (atype
.index
== NEON_ALL_LANES
)
1763 first_error (_("scalar must have an index"));
1766 else if (atype
.index
>= 64 / elsize
)
1768 first_error (_("scalar index out of range"));
1773 *type
= atype
.eltype
;
1777 return reg
* 16 + atype
.index
;
1780 /* Types of registers in a list. */
1793 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1796 parse_reg_list (char ** strp
, enum reg_list_els etype
)
1802 gas_assert (etype
== REGLIST_RN
|| etype
== REGLIST_CLRM
);
1804 /* We come back here if we get ranges concatenated by '+' or '|'. */
1807 skip_whitespace (str
);
1820 const char apsr_str
[] = "apsr";
1821 int apsr_str_len
= strlen (apsr_str
);
1823 reg
= arm_reg_parse (&str
, REGLIST_RN
);
1824 if (etype
== REGLIST_CLRM
)
1826 if (reg
== REG_SP
|| reg
== REG_PC
)
1828 else if (reg
== FAIL
1829 && !strncasecmp (str
, apsr_str
, apsr_str_len
)
1830 && !ISALPHA (*(str
+ apsr_str_len
)))
1833 str
+= apsr_str_len
;
1838 first_error (_("r0-r12, lr or APSR expected"));
1842 else /* etype == REGLIST_RN. */
1846 first_error (_(reg_expected_msgs
[REGLIST_RN
]));
1857 first_error (_("bad range in register list"));
1861 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1863 if (range
& (1 << i
))
1865 (_("Warning: duplicated register (r%d) in register list"),
1873 if (range
& (1 << reg
))
1874 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1876 else if (reg
<= cur_reg
)
1877 as_tsktsk (_("Warning: register range not in ascending order"));
1882 while (skip_past_comma (&str
) != FAIL
1883 || (in_range
= 1, *str
++ == '-'));
1886 if (skip_past_char (&str
, '}') == FAIL
)
1888 first_error (_("missing `}'"));
1892 else if (etype
== REGLIST_RN
)
1896 if (my_get_expression (&exp
, &str
, GE_NO_PREFIX
))
1899 if (exp
.X_op
== O_constant
)
1901 if (exp
.X_add_number
1902 != (exp
.X_add_number
& 0x0000ffff))
1904 inst
.error
= _("invalid register mask");
1908 if ((range
& exp
.X_add_number
) != 0)
1910 int regno
= range
& exp
.X_add_number
;
1913 regno
= (1 << regno
) - 1;
1915 (_("Warning: duplicated register (r%d) in register list"),
1919 range
|= exp
.X_add_number
;
1923 if (inst
.relocs
[0].type
!= 0)
1925 inst
.error
= _("expression too complex");
1929 memcpy (&inst
.relocs
[0].exp
, &exp
, sizeof (expressionS
));
1930 inst
.relocs
[0].type
= BFD_RELOC_ARM_MULTI
;
1931 inst
.relocs
[0].pc_rel
= 0;
1935 if (*str
== '|' || *str
== '+')
1941 while (another_range
);
1947 /* Parse a VFP register list. If the string is invalid return FAIL.
1948 Otherwise return the number of registers, and set PBASE to the first
1949 register. Parses registers of type ETYPE.
1950 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1951 - Q registers can be used to specify pairs of D registers
1952 - { } can be omitted from around a singleton register list
1953 FIXME: This is not implemented, as it would require backtracking in
1956 This could be done (the meaning isn't really ambiguous), but doesn't
1957 fit in well with the current parsing framework.
1958 - 32 D registers may be used (also true for VFPv3).
1959 FIXME: Types are ignored in these register lists, which is probably a
1963 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
,
1964 bfd_boolean
*partial_match
)
1969 enum arm_reg_type regtype
= (enum arm_reg_type
) 0;
1973 unsigned long mask
= 0;
1975 bfd_boolean vpr_seen
= FALSE
;
1976 bfd_boolean expect_vpr
=
1977 (etype
== REGLIST_VFP_S_VPR
) || (etype
== REGLIST_VFP_D_VPR
);
1979 if (skip_past_char (&str
, '{') == FAIL
)
1981 inst
.error
= _("expecting {");
1988 case REGLIST_VFP_S_VPR
:
1989 regtype
= REG_TYPE_VFS
;
1994 case REGLIST_VFP_D_VPR
:
1995 regtype
= REG_TYPE_VFD
;
1998 case REGLIST_NEON_D
:
1999 regtype
= REG_TYPE_NDQ
;
2006 if (etype
!= REGLIST_VFP_S
&& etype
!= REGLIST_VFP_S_VPR
)
2008 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
2009 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
2013 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
2016 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
2023 base_reg
= max_regs
;
2024 *partial_match
= FALSE
;
2028 int setmask
= 1, addregs
= 1;
2029 const char vpr_str
[] = "vpr";
2030 int vpr_str_len
= strlen (vpr_str
);
2032 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
2036 if (new_base
== FAIL
2037 && !strncasecmp (str
, vpr_str
, vpr_str_len
)
2038 && !ISALPHA (*(str
+ vpr_str_len
))
2044 base_reg
= 0; /* Canonicalize VPR only on d0 with 0 regs. */
2048 first_error (_("VPR expected last"));
2051 else if (new_base
== FAIL
)
2053 if (regtype
== REG_TYPE_VFS
)
2054 first_error (_("VFP single precision register or VPR "
2056 else /* regtype == REG_TYPE_VFD. */
2057 first_error (_("VFP/Neon double precision register or VPR "
2062 else if (new_base
== FAIL
)
2064 first_error (_(reg_expected_msgs
[regtype
]));
2068 *partial_match
= TRUE
;
2072 if (new_base
>= max_regs
)
2074 first_error (_("register out of range in list"));
2078 /* Note: a value of 2 * n is returned for the register Q<n>. */
2079 if (regtype
== REG_TYPE_NQ
)
2085 if (new_base
< base_reg
)
2086 base_reg
= new_base
;
2088 if (mask
& (setmask
<< new_base
))
2090 first_error (_("invalid register list"));
2094 if ((mask
>> new_base
) != 0 && ! warned
&& !vpr_seen
)
2096 as_tsktsk (_("register list not in ascending order"));
2100 mask
|= setmask
<< new_base
;
2103 if (*str
== '-') /* We have the start of a range expression */
2109 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
2112 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
2116 if (high_range
>= max_regs
)
2118 first_error (_("register out of range in list"));
2122 if (regtype
== REG_TYPE_NQ
)
2123 high_range
= high_range
+ 1;
2125 if (high_range
<= new_base
)
2127 inst
.error
= _("register range not in ascending order");
2131 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
2133 if (mask
& (setmask
<< new_base
))
2135 inst
.error
= _("invalid register list");
2139 mask
|= setmask
<< new_base
;
2144 while (skip_past_comma (&str
) != FAIL
);
2148 /* Sanity check -- should have raised a parse error above. */
2149 if ((!vpr_seen
&& count
== 0) || count
> max_regs
)
2154 if (expect_vpr
&& !vpr_seen
)
2156 first_error (_("VPR expected last"));
2160 /* Final test -- the registers must be consecutive. */
2162 for (i
= 0; i
< count
; i
++)
2164 if ((mask
& (1u << i
)) == 0)
2166 inst
.error
= _("non-contiguous register range");
2176 /* True if two alias types are the same. */
2179 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
2187 if (a
->defined
!= b
->defined
)
2190 if ((a
->defined
& NTA_HASTYPE
) != 0
2191 && (a
->eltype
.type
!= b
->eltype
.type
2192 || a
->eltype
.size
!= b
->eltype
.size
))
2195 if ((a
->defined
& NTA_HASINDEX
) != 0
2196 && (a
->index
!= b
->index
))
2202 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2203 The base register is put in *PBASE.
2204 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2206 The register stride (minus one) is put in bit 4 of the return value.
2207 Bits [6:5] encode the list length (minus one).
2208 The type of the list elements is put in *ELTYPE, if non-NULL. */
2210 #define NEON_LANE(X) ((X) & 0xf)
2211 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2212 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2215 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
2217 struct neon_type_el
*eltype
)
2224 int leading_brace
= 0;
2225 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
2226 const char *const incr_error
= mve
? _("register stride must be 1") :
2227 _("register stride must be 1 or 2");
2228 const char *const type_error
= _("mismatched element/structure types in list");
2229 struct neon_typed_alias firsttype
;
2230 firsttype
.defined
= 0;
2231 firsttype
.eltype
.type
= NT_invtype
;
2232 firsttype
.eltype
.size
= -1;
2233 firsttype
.index
= -1;
2235 if (skip_past_char (&ptr
, '{') == SUCCESS
)
2240 struct neon_typed_alias atype
;
2242 rtype
= REG_TYPE_MQ
;
2243 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
2247 first_error (_(reg_expected_msgs
[rtype
]));
2254 if (rtype
== REG_TYPE_NQ
)
2260 else if (reg_incr
== -1)
2262 reg_incr
= getreg
- base_reg
;
2263 if (reg_incr
< 1 || reg_incr
> 2)
2265 first_error (_(incr_error
));
2269 else if (getreg
!= base_reg
+ reg_incr
* count
)
2271 first_error (_(incr_error
));
2275 if (! neon_alias_types_same (&atype
, &firsttype
))
2277 first_error (_(type_error
));
2281 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2285 struct neon_typed_alias htype
;
2286 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
2288 lane
= NEON_INTERLEAVE_LANES
;
2289 else if (lane
!= NEON_INTERLEAVE_LANES
)
2291 first_error (_(type_error
));
2296 else if (reg_incr
!= 1)
2298 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2302 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
2305 first_error (_(reg_expected_msgs
[rtype
]));
2308 if (! neon_alias_types_same (&htype
, &firsttype
))
2310 first_error (_(type_error
));
2313 count
+= hireg
+ dregs
- getreg
;
2317 /* If we're using Q registers, we can't use [] or [n] syntax. */
2318 if (rtype
== REG_TYPE_NQ
)
2324 if ((atype
.defined
& NTA_HASINDEX
) != 0)
2328 else if (lane
!= atype
.index
)
2330 first_error (_(type_error
));
2334 else if (lane
== -1)
2335 lane
= NEON_INTERLEAVE_LANES
;
2336 else if (lane
!= NEON_INTERLEAVE_LANES
)
2338 first_error (_(type_error
));
2343 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
2345 /* No lane set by [x]. We must be interleaving structures. */
2347 lane
= NEON_INTERLEAVE_LANES
;
2350 if (lane
== -1 || base_reg
== -1 || count
< 1 || (!mve
&& count
> 4)
2351 || (count
> 1 && reg_incr
== -1))
2353 first_error (_("error parsing element/structure list"));
2357 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2359 first_error (_("expected }"));
2367 *eltype
= firsttype
.eltype
;
2372 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2375 /* Parse an explicit relocation suffix on an expression. This is
2376 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2377 arm_reloc_hsh contains no entries, so this function can only
2378 succeed if there is no () after the word. Returns -1 on error,
2379 BFD_RELOC_UNUSED if there wasn't any suffix. */
2382 parse_reloc (char **str
)
2384 struct reloc_entry
*r
;
2388 return BFD_RELOC_UNUSED
;
2393 while (*q
&& *q
!= ')' && *q
!= ',')
2398 if ((r
= (struct reloc_entry
*)
2399 hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2406 /* Directives: register aliases. */
2408 static struct reg_entry
*
2409 insert_reg_alias (char *str
, unsigned number
, int type
)
2411 struct reg_entry
*new_reg
;
2414 if ((new_reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, str
)) != 0)
2416 if (new_reg
->builtin
)
2417 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2419 /* Only warn about a redefinition if it's not defined as the
2421 else if (new_reg
->number
!= number
|| new_reg
->type
!= type
)
2422 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2427 name
= xstrdup (str
);
2428 new_reg
= XNEW (struct reg_entry
);
2430 new_reg
->name
= name
;
2431 new_reg
->number
= number
;
2432 new_reg
->type
= type
;
2433 new_reg
->builtin
= FALSE
;
2434 new_reg
->neon
= NULL
;
2436 if (hash_insert (arm_reg_hsh
, name
, (void *) new_reg
))
2443 insert_neon_reg_alias (char *str
, int number
, int type
,
2444 struct neon_typed_alias
*atype
)
2446 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2450 first_error (_("attempt to redefine typed alias"));
2456 reg
->neon
= XNEW (struct neon_typed_alias
);
2457 *reg
->neon
= *atype
;
2461 /* Look for the .req directive. This is of the form:
2463 new_register_name .req existing_register_name
2465 If we find one, or if it looks sufficiently like one that we want to
2466 handle any error here, return TRUE. Otherwise return FALSE. */
2469 create_register_alias (char * newname
, char *p
)
2471 struct reg_entry
*old
;
2472 char *oldname
, *nbuf
;
2475 /* The input scrubber ensures that whitespace after the mnemonic is
2476 collapsed to single spaces. */
2478 if (strncmp (oldname
, " .req ", 6) != 0)
2482 if (*oldname
== '\0')
2485 old
= (struct reg_entry
*) hash_find (arm_reg_hsh
, oldname
);
2488 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2492 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2493 the desired alias name, and p points to its end. If not, then
2494 the desired alias name is in the global original_case_string. */
2495 #ifdef TC_CASE_SENSITIVE
2498 newname
= original_case_string
;
2499 nlen
= strlen (newname
);
2502 nbuf
= xmemdup0 (newname
, nlen
);
2504 /* Create aliases under the new name as stated; an all-lowercase
2505 version of the new name; and an all-uppercase version of the new
2507 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2509 for (p
= nbuf
; *p
; p
++)
2512 if (strncmp (nbuf
, newname
, nlen
))
2514 /* If this attempt to create an additional alias fails, do not bother
2515 trying to create the all-lower case alias. We will fail and issue
2516 a second, duplicate error message. This situation arises when the
2517 programmer does something like:
2520 The second .req creates the "Foo" alias but then fails to create
2521 the artificial FOO alias because it has already been created by the
2523 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2530 for (p
= nbuf
; *p
; p
++)
2533 if (strncmp (nbuf
, newname
, nlen
))
2534 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2541 /* Create a Neon typed/indexed register alias using directives, e.g.:
2546 These typed registers can be used instead of the types specified after the
2547 Neon mnemonic, so long as all operands given have types. Types can also be
2548 specified directly, e.g.:
2549 vadd d0.s32, d1.s32, d2.s32 */
2552 create_neon_reg_alias (char *newname
, char *p
)
2554 enum arm_reg_type basetype
;
2555 struct reg_entry
*basereg
;
2556 struct reg_entry mybasereg
;
2557 struct neon_type ntype
;
2558 struct neon_typed_alias typeinfo
;
2559 char *namebuf
, *nameend ATTRIBUTE_UNUSED
;
2562 typeinfo
.defined
= 0;
2563 typeinfo
.eltype
.type
= NT_invtype
;
2564 typeinfo
.eltype
.size
= -1;
2565 typeinfo
.index
= -1;
2569 if (strncmp (p
, " .dn ", 5) == 0)
2570 basetype
= REG_TYPE_VFD
;
2571 else if (strncmp (p
, " .qn ", 5) == 0)
2572 basetype
= REG_TYPE_NQ
;
2581 basereg
= arm_reg_parse_multi (&p
);
2583 if (basereg
&& basereg
->type
!= basetype
)
2585 as_bad (_("bad type for register"));
2589 if (basereg
== NULL
)
2592 /* Try parsing as an integer. */
2593 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2594 if (exp
.X_op
!= O_constant
)
2596 as_bad (_("expression must be constant"));
2599 basereg
= &mybasereg
;
2600 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2606 typeinfo
= *basereg
->neon
;
2608 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2610 /* We got a type. */
2611 if (typeinfo
.defined
& NTA_HASTYPE
)
2613 as_bad (_("can't redefine the type of a register alias"));
2617 typeinfo
.defined
|= NTA_HASTYPE
;
2618 if (ntype
.elems
!= 1)
2620 as_bad (_("you must specify a single type only"));
2623 typeinfo
.eltype
= ntype
.el
[0];
2626 if (skip_past_char (&p
, '[') == SUCCESS
)
2629 /* We got a scalar index. */
2631 if (typeinfo
.defined
& NTA_HASINDEX
)
2633 as_bad (_("can't redefine the index of a scalar alias"));
2637 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2639 if (exp
.X_op
!= O_constant
)
2641 as_bad (_("scalar index must be constant"));
2645 typeinfo
.defined
|= NTA_HASINDEX
;
2646 typeinfo
.index
= exp
.X_add_number
;
2648 if (skip_past_char (&p
, ']') == FAIL
)
2650 as_bad (_("expecting ]"));
2655 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2656 the desired alias name, and p points to its end. If not, then
2657 the desired alias name is in the global original_case_string. */
2658 #ifdef TC_CASE_SENSITIVE
2659 namelen
= nameend
- newname
;
2661 newname
= original_case_string
;
2662 namelen
= strlen (newname
);
2665 namebuf
= xmemdup0 (newname
, namelen
);
2667 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2668 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2670 /* Insert name in all uppercase. */
2671 for (p
= namebuf
; *p
; p
++)
2674 if (strncmp (namebuf
, newname
, namelen
))
2675 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2676 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2678 /* Insert name in all lowercase. */
2679 for (p
= namebuf
; *p
; p
++)
2682 if (strncmp (namebuf
, newname
, namelen
))
2683 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2684 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2690 /* Should never be called, as .req goes between the alias and the
2691 register name, not at the beginning of the line. */
2694 s_req (int a ATTRIBUTE_UNUSED
)
2696 as_bad (_("invalid syntax for .req directive"));
2700 s_dn (int a ATTRIBUTE_UNUSED
)
2702 as_bad (_("invalid syntax for .dn directive"));
2706 s_qn (int a ATTRIBUTE_UNUSED
)
2708 as_bad (_("invalid syntax for .qn directive"));
2711 /* The .unreq directive deletes an alias which was previously defined
2712 by .req. For example:
2718 s_unreq (int a ATTRIBUTE_UNUSED
)
2723 name
= input_line_pointer
;
2725 while (*input_line_pointer
!= 0
2726 && *input_line_pointer
!= ' '
2727 && *input_line_pointer
!= '\n')
2728 ++input_line_pointer
;
2730 saved_char
= *input_line_pointer
;
2731 *input_line_pointer
= 0;
2734 as_bad (_("invalid syntax for .unreq directive"));
2737 struct reg_entry
*reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
,
2741 as_bad (_("unknown register alias '%s'"), name
);
2742 else if (reg
->builtin
)
2743 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2750 hash_delete (arm_reg_hsh
, name
, FALSE
);
2751 free ((char *) reg
->name
);
2756 /* Also locate the all upper case and all lower case versions.
2757 Do not complain if we cannot find one or the other as it
2758 was probably deleted above. */
2760 nbuf
= strdup (name
);
2761 for (p
= nbuf
; *p
; p
++)
2763 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2766 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2767 free ((char *) reg
->name
);
2773 for (p
= nbuf
; *p
; p
++)
2775 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2778 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2779 free ((char *) reg
->name
);
2789 *input_line_pointer
= saved_char
;
2790 demand_empty_rest_of_line ();
2793 /* Directives: Instruction set selection. */
2796 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2797 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2798 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2799 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2801 /* Create a new mapping symbol for the transition to STATE. */
2804 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2807 const char * symname
;
2814 type
= BSF_NO_FLAGS
;
2818 type
= BSF_NO_FLAGS
;
2822 type
= BSF_NO_FLAGS
;
2828 symbolP
= symbol_new (symname
, now_seg
, value
, frag
);
2829 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2834 THUMB_SET_FUNC (symbolP
, 0);
2835 ARM_SET_THUMB (symbolP
, 0);
2836 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2840 THUMB_SET_FUNC (symbolP
, 1);
2841 ARM_SET_THUMB (symbolP
, 1);
2842 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2850 /* Save the mapping symbols for future reference. Also check that
2851 we do not place two mapping symbols at the same offset within a
2852 frag. We'll handle overlap between frags in
2853 check_mapping_symbols.
2855 If .fill or other data filling directive generates zero sized data,
2856 the mapping symbol for the following code will have the same value
2857 as the one generated for the data filling directive. In this case,
2858 we replace the old symbol with the new one at the same address. */
2861 if (frag
->tc_frag_data
.first_map
!= NULL
)
2863 know (S_GET_VALUE (frag
->tc_frag_data
.first_map
) == 0);
2864 symbol_remove (frag
->tc_frag_data
.first_map
, &symbol_rootP
, &symbol_lastP
);
2866 frag
->tc_frag_data
.first_map
= symbolP
;
2868 if (frag
->tc_frag_data
.last_map
!= NULL
)
2870 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) <= S_GET_VALUE (symbolP
));
2871 if (S_GET_VALUE (frag
->tc_frag_data
.last_map
) == S_GET_VALUE (symbolP
))
2872 symbol_remove (frag
->tc_frag_data
.last_map
, &symbol_rootP
, &symbol_lastP
);
2874 frag
->tc_frag_data
.last_map
= symbolP
;
2877 /* We must sometimes convert a region marked as code to data during
2878 code alignment, if an odd number of bytes have to be padded. The
2879 code mapping symbol is pushed to an aligned address. */
2882 insert_data_mapping_symbol (enum mstate state
,
2883 valueT value
, fragS
*frag
, offsetT bytes
)
2885 /* If there was already a mapping symbol, remove it. */
2886 if (frag
->tc_frag_data
.last_map
!= NULL
2887 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
2889 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
2893 know (frag
->tc_frag_data
.first_map
== symp
);
2894 frag
->tc_frag_data
.first_map
= NULL
;
2896 frag
->tc_frag_data
.last_map
= NULL
;
2897 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
2900 make_mapping_symbol (MAP_DATA
, value
, frag
);
2901 make_mapping_symbol (state
, value
+ bytes
, frag
);
2904 static void mapping_state_2 (enum mstate state
, int max_chars
);
2906 /* Set the mapping state to STATE. Only call this when about to
2907 emit some STATE bytes to the file. */
2909 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2911 mapping_state (enum mstate state
)
2913 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2915 if (mapstate
== state
)
2916 /* The mapping symbol has already been emitted.
2917 There is nothing else to do. */
2920 if (state
== MAP_ARM
|| state
== MAP_THUMB
)
2922 All ARM instructions require 4-byte alignment.
2923 (Almost) all Thumb instructions require 2-byte alignment.
2925 When emitting instructions into any section, mark the section
2928 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2929 but themselves require 2-byte alignment; this applies to some
2930 PC- relative forms. However, these cases will involve implicit
2931 literal pool generation or an explicit .align >=2, both of
2932 which will cause the section to me marked with sufficient
2933 alignment. Thus, we don't handle those cases here. */
2934 record_alignment (now_seg
, state
== MAP_ARM
? 2 : 1);
2936 if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
2937 /* This case will be evaluated later. */
2940 mapping_state_2 (state
, 0);
2943 /* Same as mapping_state, but MAX_CHARS bytes have already been
2944 allocated. Put the mapping symbol that far back. */
2947 mapping_state_2 (enum mstate state
, int max_chars
)
2949 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2951 if (!SEG_NORMAL (now_seg
))
2954 if (mapstate
== state
)
2955 /* The mapping symbol has already been emitted.
2956 There is nothing else to do. */
2959 if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
2960 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
2962 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
2963 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
2966 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
2969 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2970 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
2974 #define mapping_state(x) ((void)0)
2975 #define mapping_state_2(x, y) ((void)0)
2978 /* Find the real, Thumb encoded start of a Thumb function. */
2982 find_real_start (symbolS
* symbolP
)
2985 const char * name
= S_GET_NAME (symbolP
);
2986 symbolS
* new_target
;
2988 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2989 #define STUB_NAME ".real_start_of"
2994 /* The compiler may generate BL instructions to local labels because
2995 it needs to perform a branch to a far away location. These labels
2996 do not have a corresponding ".real_start_of" label. We check
2997 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2998 the ".real_start_of" convention for nonlocal branches. */
2999 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
3002 real_start
= concat (STUB_NAME
, name
, NULL
);
3003 new_target
= symbol_find (real_start
);
3006 if (new_target
== NULL
)
3008 as_warn (_("Failed to find real start of function: %s\n"), name
);
3009 new_target
= symbolP
;
3017 opcode_select (int width
)
3024 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
3025 as_bad (_("selected processor does not support THUMB opcodes"));
3028 /* No need to force the alignment, since we will have been
3029 coming from ARM mode, which is word-aligned. */
3030 record_alignment (now_seg
, 1);
3037 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
3038 as_bad (_("selected processor does not support ARM opcodes"));
3043 frag_align (2, 0, 0);
3045 record_alignment (now_seg
, 1);
3050 as_bad (_("invalid instruction size selected (%d)"), width
);
3055 s_arm (int ignore ATTRIBUTE_UNUSED
)
3058 demand_empty_rest_of_line ();
3062 s_thumb (int ignore ATTRIBUTE_UNUSED
)
3065 demand_empty_rest_of_line ();
3069 s_code (int unused ATTRIBUTE_UNUSED
)
3073 temp
= get_absolute_expression ();
3078 opcode_select (temp
);
3082 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
3087 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
3089 /* If we are not already in thumb mode go into it, EVEN if
3090 the target processor does not support thumb instructions.
3091 This is used by gcc/config/arm/lib1funcs.asm for example
3092 to compile interworking support functions even if the
3093 target processor should not support interworking. */
3097 record_alignment (now_seg
, 1);
3100 demand_empty_rest_of_line ();
3104 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
3108 /* The following label is the name/address of the start of a Thumb function.
3109 We need to know this for the interworking support. */
3110 label_is_thumb_function_name
= TRUE
;
3113 /* Perform a .set directive, but also mark the alias as
3114 being a thumb function. */
3117 s_thumb_set (int equiv
)
3119 /* XXX the following is a duplicate of the code for s_set() in read.c
3120 We cannot just call that code as we need to get at the symbol that
3127 /* Especial apologies for the random logic:
3128 This just grew, and could be parsed much more simply!
3130 delim
= get_symbol_name (& name
);
3131 end_name
= input_line_pointer
;
3132 (void) restore_line_pointer (delim
);
3134 if (*input_line_pointer
!= ',')
3137 as_bad (_("expected comma after name \"%s\""), name
);
3139 ignore_rest_of_line ();
3143 input_line_pointer
++;
3146 if (name
[0] == '.' && name
[1] == '\0')
3148 /* XXX - this should not happen to .thumb_set. */
3152 if ((symbolP
= symbol_find (name
)) == NULL
3153 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
3156 /* When doing symbol listings, play games with dummy fragments living
3157 outside the normal fragment chain to record the file and line info
3159 if (listing
& LISTING_SYMBOLS
)
3161 extern struct list_info_struct
* listing_tail
;
3162 fragS
* dummy_frag
= (fragS
* ) xmalloc (sizeof (fragS
));
3164 memset (dummy_frag
, 0, sizeof (fragS
));
3165 dummy_frag
->fr_type
= rs_fill
;
3166 dummy_frag
->line
= listing_tail
;
3167 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
3168 dummy_frag
->fr_symbol
= symbolP
;
3172 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
3175 /* "set" symbols are local unless otherwise specified. */
3176 SF_SET_LOCAL (symbolP
);
3177 #endif /* OBJ_COFF */
3178 } /* Make a new symbol. */
3180 symbol_table_insert (symbolP
);
3185 && S_IS_DEFINED (symbolP
)
3186 && S_GET_SEGMENT (symbolP
) != reg_section
)
3187 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
3189 pseudo_set (symbolP
);
3191 demand_empty_rest_of_line ();
3193 /* XXX Now we come to the Thumb specific bit of code. */
3195 THUMB_SET_FUNC (symbolP
, 1);
3196 ARM_SET_THUMB (symbolP
, 1);
3197 #if defined OBJ_ELF || defined OBJ_COFF
3198 ARM_SET_INTERWORK (symbolP
, support_interwork
);
3202 /* Directives: Mode selection. */
3204 /* .syntax [unified|divided] - choose the new unified syntax
3205 (same for Arm and Thumb encoding, modulo slight differences in what
3206 can be represented) or the old divergent syntax for each mode. */
3208 s_syntax (int unused ATTRIBUTE_UNUSED
)
3212 delim
= get_symbol_name (& name
);
3214 if (!strcasecmp (name
, "unified"))
3215 unified_syntax
= TRUE
;
3216 else if (!strcasecmp (name
, "divided"))
3217 unified_syntax
= FALSE
;
3220 as_bad (_("unrecognized syntax mode \"%s\""), name
);
3223 (void) restore_line_pointer (delim
);
3224 demand_empty_rest_of_line ();
3227 /* Directives: sectioning and alignment. */
3230 s_bss (int ignore ATTRIBUTE_UNUSED
)
3232 /* We don't support putting frags in the BSS segment, we fake it by
3233 marking in_bss, then looking at s_skip for clues. */
3234 subseg_set (bss_section
, 0);
3235 demand_empty_rest_of_line ();
3237 #ifdef md_elf_section_change_hook
3238 md_elf_section_change_hook ();
3243 s_even (int ignore ATTRIBUTE_UNUSED
)
3245 /* Never make frag if expect extra pass. */
3247 frag_align (1, 0, 0);
3249 record_alignment (now_seg
, 1);
3251 demand_empty_rest_of_line ();
3254 /* Directives: CodeComposer Studio. */
3256 /* .ref (for CodeComposer Studio syntax only). */
3258 s_ccs_ref (int unused ATTRIBUTE_UNUSED
)
3260 if (codecomposer_syntax
)
3261 ignore_rest_of_line ();
3263 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3266 /* If name is not NULL, then it is used for marking the beginning of a
3267 function, whereas if it is NULL then it means the function end. */
3269 asmfunc_debug (const char * name
)
3271 static const char * last_name
= NULL
;
3275 gas_assert (last_name
== NULL
);
3278 if (debug_type
== DEBUG_STABS
)
3279 stabs_generate_asm_func (name
, name
);
3283 gas_assert (last_name
!= NULL
);
3285 if (debug_type
== DEBUG_STABS
)
3286 stabs_generate_asm_endfunc (last_name
, last_name
);
3293 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED
)
3295 if (codecomposer_syntax
)
3297 switch (asmfunc_state
)
3299 case OUTSIDE_ASMFUNC
:
3300 asmfunc_state
= WAITING_ASMFUNC_NAME
;
3303 case WAITING_ASMFUNC_NAME
:
3304 as_bad (_(".asmfunc repeated."));
3307 case WAITING_ENDASMFUNC
:
3308 as_bad (_(".asmfunc without function."));
3311 demand_empty_rest_of_line ();
3314 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3318 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED
)
3320 if (codecomposer_syntax
)
3322 switch (asmfunc_state
)
3324 case OUTSIDE_ASMFUNC
:
3325 as_bad (_(".endasmfunc without a .asmfunc."));
3328 case WAITING_ASMFUNC_NAME
:
3329 as_bad (_(".endasmfunc without function."));
3332 case WAITING_ENDASMFUNC
:
3333 asmfunc_state
= OUTSIDE_ASMFUNC
;
3334 asmfunc_debug (NULL
);
3337 demand_empty_rest_of_line ();
3340 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3344 s_ccs_def (int name
)
3346 if (codecomposer_syntax
)
3349 as_bad (_(".def pseudo-op only available with -mccs flag."));
3352 /* Directives: Literal pools. */
3354 static literal_pool
*
3355 find_literal_pool (void)
3357 literal_pool
* pool
;
3359 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
3361 if (pool
->section
== now_seg
3362 && pool
->sub_section
== now_subseg
)
3369 static literal_pool
*
3370 find_or_make_literal_pool (void)
3372 /* Next literal pool ID number. */
3373 static unsigned int latest_pool_num
= 1;
3374 literal_pool
* pool
;
3376 pool
= find_literal_pool ();
3380 /* Create a new pool. */
3381 pool
= XNEW (literal_pool
);
3385 pool
->next_free_entry
= 0;
3386 pool
->section
= now_seg
;
3387 pool
->sub_section
= now_subseg
;
3388 pool
->next
= list_of_pools
;
3389 pool
->symbol
= NULL
;
3390 pool
->alignment
= 2;
3392 /* Add it to the list. */
3393 list_of_pools
= pool
;
3396 /* New pools, and emptied pools, will have a NULL symbol. */
3397 if (pool
->symbol
== NULL
)
3399 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
3400 (valueT
) 0, &zero_address_frag
);
3401 pool
->id
= latest_pool_num
++;
3408 /* Add the literal in the global 'inst'
3409 structure to the relevant literal pool. */
3412 add_to_lit_pool (unsigned int nbytes
)
3414 #define PADDING_SLOT 0x1
3415 #define LIT_ENTRY_SIZE_MASK 0xFF
3416 literal_pool
* pool
;
3417 unsigned int entry
, pool_size
= 0;
3418 bfd_boolean padding_slot_p
= FALSE
;
3424 imm1
= inst
.operands
[1].imm
;
3425 imm2
= (inst
.operands
[1].regisimm
? inst
.operands
[1].reg
3426 : inst
.relocs
[0].exp
.X_unsigned
? 0
3427 : ((bfd_int64_t
) inst
.operands
[1].imm
) >> 32);
3428 if (target_big_endian
)
3431 imm2
= inst
.operands
[1].imm
;
3435 pool
= find_or_make_literal_pool ();
3437 /* Check if this literal value is already in the pool. */
3438 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3442 if ((pool
->literals
[entry
].X_op
== inst
.relocs
[0].exp
.X_op
)
3443 && (inst
.relocs
[0].exp
.X_op
== O_constant
)
3444 && (pool
->literals
[entry
].X_add_number
3445 == inst
.relocs
[0].exp
.X_add_number
)
3446 && (pool
->literals
[entry
].X_md
== nbytes
)
3447 && (pool
->literals
[entry
].X_unsigned
3448 == inst
.relocs
[0].exp
.X_unsigned
))
3451 if ((pool
->literals
[entry
].X_op
== inst
.relocs
[0].exp
.X_op
)
3452 && (inst
.relocs
[0].exp
.X_op
== O_symbol
)
3453 && (pool
->literals
[entry
].X_add_number
3454 == inst
.relocs
[0].exp
.X_add_number
)
3455 && (pool
->literals
[entry
].X_add_symbol
3456 == inst
.relocs
[0].exp
.X_add_symbol
)
3457 && (pool
->literals
[entry
].X_op_symbol
3458 == inst
.relocs
[0].exp
.X_op_symbol
)
3459 && (pool
->literals
[entry
].X_md
== nbytes
))
3462 else if ((nbytes
== 8)
3463 && !(pool_size
& 0x7)
3464 && ((entry
+ 1) != pool
->next_free_entry
)
3465 && (pool
->literals
[entry
].X_op
== O_constant
)
3466 && (pool
->literals
[entry
].X_add_number
== (offsetT
) imm1
)
3467 && (pool
->literals
[entry
].X_unsigned
3468 == inst
.relocs
[0].exp
.X_unsigned
)
3469 && (pool
->literals
[entry
+ 1].X_op
== O_constant
)
3470 && (pool
->literals
[entry
+ 1].X_add_number
== (offsetT
) imm2
)
3471 && (pool
->literals
[entry
+ 1].X_unsigned
3472 == inst
.relocs
[0].exp
.X_unsigned
))
3475 padding_slot_p
= ((pool
->literals
[entry
].X_md
>> 8) == PADDING_SLOT
);
3476 if (padding_slot_p
&& (nbytes
== 4))
3482 /* Do we need to create a new entry? */
3483 if (entry
== pool
->next_free_entry
)
3485 if (entry
>= MAX_LITERAL_POOL_SIZE
)
3487 inst
.error
= _("literal pool overflow");
3493 /* For 8-byte entries, we align to an 8-byte boundary,
3494 and split it into two 4-byte entries, because on 32-bit
3495 host, 8-byte constants are treated as big num, thus
3496 saved in "generic_bignum" which will be overwritten
3497 by later assignments.
3499 We also need to make sure there is enough space for
3502 We also check to make sure the literal operand is a
3504 if (!(inst
.relocs
[0].exp
.X_op
== O_constant
3505 || inst
.relocs
[0].exp
.X_op
== O_big
))
3507 inst
.error
= _("invalid type for literal pool");
3510 else if (pool_size
& 0x7)
3512 if ((entry
+ 2) >= MAX_LITERAL_POOL_SIZE
)
3514 inst
.error
= _("literal pool overflow");
3518 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3519 pool
->literals
[entry
].X_op
= O_constant
;
3520 pool
->literals
[entry
].X_add_number
= 0;
3521 pool
->literals
[entry
++].X_md
= (PADDING_SLOT
<< 8) | 4;
3522 pool
->next_free_entry
+= 1;
3525 else if ((entry
+ 1) >= MAX_LITERAL_POOL_SIZE
)
3527 inst
.error
= _("literal pool overflow");
3531 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3532 pool
->literals
[entry
].X_op
= O_constant
;
3533 pool
->literals
[entry
].X_add_number
= imm1
;
3534 pool
->literals
[entry
].X_unsigned
= inst
.relocs
[0].exp
.X_unsigned
;
3535 pool
->literals
[entry
++].X_md
= 4;
3536 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3537 pool
->literals
[entry
].X_op
= O_constant
;
3538 pool
->literals
[entry
].X_add_number
= imm2
;
3539 pool
->literals
[entry
].X_unsigned
= inst
.relocs
[0].exp
.X_unsigned
;
3540 pool
->literals
[entry
].X_md
= 4;
3541 pool
->alignment
= 3;
3542 pool
->next_free_entry
+= 1;
3546 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3547 pool
->literals
[entry
].X_md
= 4;
3551 /* PR ld/12974: Record the location of the first source line to reference
3552 this entry in the literal pool. If it turns out during linking that the
3553 symbol does not exist we will be able to give an accurate line number for
3554 the (first use of the) missing reference. */
3555 if (debug_type
== DEBUG_DWARF2
)
3556 dwarf2_where (pool
->locs
+ entry
);
3558 pool
->next_free_entry
+= 1;
3560 else if (padding_slot_p
)
3562 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3563 pool
->literals
[entry
].X_md
= nbytes
;
3566 inst
.relocs
[0].exp
.X_op
= O_symbol
;
3567 inst
.relocs
[0].exp
.X_add_number
= pool_size
;
3568 inst
.relocs
[0].exp
.X_add_symbol
= pool
->symbol
;
3574 tc_start_label_without_colon (void)
3576 bfd_boolean ret
= TRUE
;
3578 if (codecomposer_syntax
&& asmfunc_state
== WAITING_ASMFUNC_NAME
)
3580 const char *label
= input_line_pointer
;
3582 while (!is_end_of_line
[(int) label
[-1]])
3587 as_bad (_("Invalid label '%s'"), label
);
3591 asmfunc_debug (label
);
3593 asmfunc_state
= WAITING_ENDASMFUNC
;
3599 /* Can't use symbol_new here, so have to create a symbol and then at
3600 a later date assign it a value. That's what these functions do. */
3603 symbol_locate (symbolS
* symbolP
,
3604 const char * name
, /* It is copied, the caller can modify. */
3605 segT segment
, /* Segment identifier (SEG_<something>). */
3606 valueT valu
, /* Symbol value. */
3607 fragS
* frag
) /* Associated fragment. */
3610 char * preserved_copy_of_name
;
3612 name_length
= strlen (name
) + 1; /* +1 for \0. */
3613 obstack_grow (¬es
, name
, name_length
);
3614 preserved_copy_of_name
= (char *) obstack_finish (¬es
);
3616 #ifdef tc_canonicalize_symbol_name
3617 preserved_copy_of_name
=
3618 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3621 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3623 S_SET_SEGMENT (symbolP
, segment
);
3624 S_SET_VALUE (symbolP
, valu
);
3625 symbol_clear_list_pointers (symbolP
);
3627 symbol_set_frag (symbolP
, frag
);
3629 /* Link to end of symbol chain. */
3631 extern int symbol_table_frozen
;
3633 if (symbol_table_frozen
)
3637 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3639 obj_symbol_new_hook (symbolP
);
3641 #ifdef tc_symbol_new_hook
3642 tc_symbol_new_hook (symbolP
);
3646 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3647 #endif /* DEBUG_SYMS */
3651 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3654 literal_pool
* pool
;
3657 pool
= find_literal_pool ();
3659 || pool
->symbol
== NULL
3660 || pool
->next_free_entry
== 0)
3663 /* Align pool as you have word accesses.
3664 Only make a frag if we have to. */
3666 frag_align (pool
->alignment
, 0, 0);
3668 record_alignment (now_seg
, 2);
3671 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= MAP_DATA
;
3672 make_mapping_symbol (MAP_DATA
, (valueT
) frag_now_fix (), frag_now
);
3674 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3676 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3677 (valueT
) frag_now_fix (), frag_now
);
3678 symbol_table_insert (pool
->symbol
);
3680 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3682 #if defined OBJ_COFF || defined OBJ_ELF
3683 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3686 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3689 if (debug_type
== DEBUG_DWARF2
)
3690 dwarf2_gen_line_info (frag_now_fix (), pool
->locs
+ entry
);
3692 /* First output the expression in the instruction to the pool. */
3693 emit_expr (&(pool
->literals
[entry
]),
3694 pool
->literals
[entry
].X_md
& LIT_ENTRY_SIZE_MASK
);
3697 /* Mark the pool as empty. */
3698 pool
->next_free_entry
= 0;
3699 pool
->symbol
= NULL
;
3703 /* Forward declarations for functions below, in the MD interface
3705 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3706 static valueT
create_unwind_entry (int);
3707 static void start_unwind_section (const segT
, int);
3708 static void add_unwind_opcode (valueT
, int);
3709 static void flush_pending_unwind (void);
3711 /* Directives: Data. */
3714 s_arm_elf_cons (int nbytes
)
3718 #ifdef md_flush_pending_output
3719 md_flush_pending_output ();
3722 if (is_it_end_of_statement ())
3724 demand_empty_rest_of_line ();
3728 #ifdef md_cons_align
3729 md_cons_align (nbytes
);
3732 mapping_state (MAP_DATA
);
3736 char *base
= input_line_pointer
;
3740 if (exp
.X_op
!= O_symbol
)
3741 emit_expr (&exp
, (unsigned int) nbytes
);
3744 char *before_reloc
= input_line_pointer
;
3745 reloc
= parse_reloc (&input_line_pointer
);
3748 as_bad (_("unrecognized relocation suffix"));
3749 ignore_rest_of_line ();
3752 else if (reloc
== BFD_RELOC_UNUSED
)
3753 emit_expr (&exp
, (unsigned int) nbytes
);
3756 reloc_howto_type
*howto
= (reloc_howto_type
*)
3757 bfd_reloc_type_lookup (stdoutput
,
3758 (bfd_reloc_code_real_type
) reloc
);
3759 int size
= bfd_get_reloc_size (howto
);
3761 if (reloc
== BFD_RELOC_ARM_PLT32
)
3763 as_bad (_("(plt) is only valid on branch targets"));
3764 reloc
= BFD_RELOC_UNUSED
;
3769 as_bad (ngettext ("%s relocations do not fit in %d byte",
3770 "%s relocations do not fit in %d bytes",
3772 howto
->name
, nbytes
);
3775 /* We've parsed an expression stopping at O_symbol.
3776 But there may be more expression left now that we
3777 have parsed the relocation marker. Parse it again.
3778 XXX Surely there is a cleaner way to do this. */
3779 char *p
= input_line_pointer
;
3781 char *save_buf
= XNEWVEC (char, input_line_pointer
- base
);
3783 memcpy (save_buf
, base
, input_line_pointer
- base
);
3784 memmove (base
+ (input_line_pointer
- before_reloc
),
3785 base
, before_reloc
- base
);
3787 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3789 memcpy (base
, save_buf
, p
- base
);
3791 offset
= nbytes
- size
;
3792 p
= frag_more (nbytes
);
3793 memset (p
, 0, nbytes
);
3794 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3795 size
, &exp
, 0, (enum bfd_reloc_code_real
) reloc
);
3801 while (*input_line_pointer
++ == ',');
3803 /* Put terminator back into stream. */
3804 input_line_pointer
--;
3805 demand_empty_rest_of_line ();
3808 /* Emit an expression containing a 32-bit thumb instruction.
3809 Implementation based on put_thumb32_insn. */
3812 emit_thumb32_expr (expressionS
* exp
)
3814 expressionS exp_high
= *exp
;
3816 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3817 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3818 exp
->X_add_number
&= 0xffff;
3819 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3822 /* Guess the instruction size based on the opcode. */
3825 thumb_insn_size (int opcode
)
3827 if ((unsigned int) opcode
< 0xe800u
)
3829 else if ((unsigned int) opcode
>= 0xe8000000u
)
3836 emit_insn (expressionS
*exp
, int nbytes
)
3840 if (exp
->X_op
== O_constant
)
3845 size
= thumb_insn_size (exp
->X_add_number
);
3849 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3851 as_bad (_(".inst.n operand too big. "\
3852 "Use .inst.w instead"));
3857 if (now_pred
.state
== AUTOMATIC_PRED_BLOCK
)
3858 set_pred_insn_type_nonvoid (OUTSIDE_PRED_INSN
, 0);
3860 set_pred_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3862 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3863 emit_thumb32_expr (exp
);
3865 emit_expr (exp
, (unsigned int) size
);
3867 it_fsm_post_encode ();
3871 as_bad (_("cannot determine Thumb instruction size. " \
3872 "Use .inst.n/.inst.w instead"));
3875 as_bad (_("constant expression required"));
3880 /* Like s_arm_elf_cons but do not use md_cons_align and
3881 set the mapping state to MAP_ARM/MAP_THUMB. */
3884 s_arm_elf_inst (int nbytes
)
3886 if (is_it_end_of_statement ())
3888 demand_empty_rest_of_line ();
3892 /* Calling mapping_state () here will not change ARM/THUMB,
3893 but will ensure not to be in DATA state. */
3896 mapping_state (MAP_THUMB
);
3901 as_bad (_("width suffixes are invalid in ARM mode"));
3902 ignore_rest_of_line ();
3908 mapping_state (MAP_ARM
);
3917 if (! emit_insn (& exp
, nbytes
))
3919 ignore_rest_of_line ();
3923 while (*input_line_pointer
++ == ',');
3925 /* Put terminator back into stream. */
3926 input_line_pointer
--;
3927 demand_empty_rest_of_line ();
3930 /* Parse a .rel31 directive. */
3933 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3940 if (*input_line_pointer
== '1')
3941 highbit
= 0x80000000;
3942 else if (*input_line_pointer
!= '0')
3943 as_bad (_("expected 0 or 1"));
3945 input_line_pointer
++;
3946 if (*input_line_pointer
!= ',')
3947 as_bad (_("missing comma"));
3948 input_line_pointer
++;
3950 #ifdef md_flush_pending_output
3951 md_flush_pending_output ();
3954 #ifdef md_cons_align
3958 mapping_state (MAP_DATA
);
3963 md_number_to_chars (p
, highbit
, 4);
3964 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3965 BFD_RELOC_ARM_PREL31
);
3967 demand_empty_rest_of_line ();
3970 /* Directives: AEABI stack-unwind tables. */
3972 /* Parse an unwind_fnstart directive. Simply records the current location. */
3975 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3977 demand_empty_rest_of_line ();
3978 if (unwind
.proc_start
)
3980 as_bad (_("duplicate .fnstart directive"));
3984 /* Mark the start of the function. */
3985 unwind
.proc_start
= expr_build_dot ();
3987 /* Reset the rest of the unwind info. */
3988 unwind
.opcode_count
= 0;
3989 unwind
.table_entry
= NULL
;
3990 unwind
.personality_routine
= NULL
;
3991 unwind
.personality_index
= -1;
3992 unwind
.frame_size
= 0;
3993 unwind
.fp_offset
= 0;
3994 unwind
.fp_reg
= REG_SP
;
3996 unwind
.sp_restored
= 0;
4000 /* Parse a handlerdata directive. Creates the exception handling table entry
4001 for the function. */
4004 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
4006 demand_empty_rest_of_line ();
4007 if (!unwind
.proc_start
)
4008 as_bad (MISSING_FNSTART
);
4010 if (unwind
.table_entry
)
4011 as_bad (_("duplicate .handlerdata directive"));
4013 create_unwind_entry (1);
4016 /* Parse an unwind_fnend directive. Generates the index table entry. */
4019 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
4024 unsigned int marked_pr_dependency
;
4026 demand_empty_rest_of_line ();
4028 if (!unwind
.proc_start
)
4030 as_bad (_(".fnend directive without .fnstart"));
4034 /* Add eh table entry. */
4035 if (unwind
.table_entry
== NULL
)
4036 val
= create_unwind_entry (0);
4040 /* Add index table entry. This is two words. */
4041 start_unwind_section (unwind
.saved_seg
, 1);
4042 frag_align (2, 0, 0);
4043 record_alignment (now_seg
, 2);
4045 ptr
= frag_more (8);
4047 where
= frag_now_fix () - 8;
4049 /* Self relative offset of the function start. */
4050 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
4051 BFD_RELOC_ARM_PREL31
);
4053 /* Indicate dependency on EHABI-defined personality routines to the
4054 linker, if it hasn't been done already. */
4055 marked_pr_dependency
4056 = seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
;
4057 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
4058 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
4060 static const char *const name
[] =
4062 "__aeabi_unwind_cpp_pr0",
4063 "__aeabi_unwind_cpp_pr1",
4064 "__aeabi_unwind_cpp_pr2"
4066 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
4067 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
4068 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
4069 |= 1 << unwind
.personality_index
;
4073 /* Inline exception table entry. */
4074 md_number_to_chars (ptr
+ 4, val
, 4);
4076 /* Self relative offset of the table entry. */
4077 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
4078 BFD_RELOC_ARM_PREL31
);
4080 /* Restore the original section. */
4081 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
4083 unwind
.proc_start
= NULL
;
4087 /* Parse an unwind_cantunwind directive. */
4090 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
4092 demand_empty_rest_of_line ();
4093 if (!unwind
.proc_start
)
4094 as_bad (MISSING_FNSTART
);
4096 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4097 as_bad (_("personality routine specified for cantunwind frame"));
4099 unwind
.personality_index
= -2;
4103 /* Parse a personalityindex directive. */
4106 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
4110 if (!unwind
.proc_start
)
4111 as_bad (MISSING_FNSTART
);
4113 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4114 as_bad (_("duplicate .personalityindex directive"));
4118 if (exp
.X_op
!= O_constant
4119 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
4121 as_bad (_("bad personality routine number"));
4122 ignore_rest_of_line ();
4126 unwind
.personality_index
= exp
.X_add_number
;
4128 demand_empty_rest_of_line ();
4132 /* Parse a personality directive. */
4135 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
4139 if (!unwind
.proc_start
)
4140 as_bad (MISSING_FNSTART
);
4142 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4143 as_bad (_("duplicate .personality directive"));
4145 c
= get_symbol_name (& name
);
4146 p
= input_line_pointer
;
4148 ++ input_line_pointer
;
4149 unwind
.personality_routine
= symbol_find_or_make (name
);
4151 demand_empty_rest_of_line ();
4155 /* Parse a directive saving core registers. */
4158 s_arm_unwind_save_core (void)
4164 range
= parse_reg_list (&input_line_pointer
, REGLIST_RN
);
4167 as_bad (_("expected register list"));
4168 ignore_rest_of_line ();
4172 demand_empty_rest_of_line ();
4174 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
4175 into .unwind_save {..., sp...}. We aren't bothered about the value of
4176 ip because it is clobbered by calls. */
4177 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
4178 && (range
& 0x3000) == 0x1000)
4180 unwind
.opcode_count
--;
4181 unwind
.sp_restored
= 0;
4182 range
= (range
| 0x2000) & ~0x1000;
4183 unwind
.pending_offset
= 0;
4189 /* See if we can use the short opcodes. These pop a block of up to 8
4190 registers starting with r4, plus maybe r14. */
4191 for (n
= 0; n
< 8; n
++)
4193 /* Break at the first non-saved register. */
4194 if ((range
& (1 << (n
+ 4))) == 0)
4197 /* See if there are any other bits set. */
4198 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
4200 /* Use the long form. */
4201 op
= 0x8000 | ((range
>> 4) & 0xfff);
4202 add_unwind_opcode (op
, 2);
4206 /* Use the short form. */
4208 op
= 0xa8; /* Pop r14. */
4210 op
= 0xa0; /* Do not pop r14. */
4212 add_unwind_opcode (op
, 1);
4219 op
= 0xb100 | (range
& 0xf);
4220 add_unwind_opcode (op
, 2);
4223 /* Record the number of bytes pushed. */
4224 for (n
= 0; n
< 16; n
++)
4226 if (range
& (1 << n
))
4227 unwind
.frame_size
+= 4;
4232 /* Parse a directive saving FPA registers. */
4235 s_arm_unwind_save_fpa (int reg
)
4241 /* Get Number of registers to transfer. */
4242 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4245 exp
.X_op
= O_illegal
;
4247 if (exp
.X_op
!= O_constant
)
4249 as_bad (_("expected , <constant>"));
4250 ignore_rest_of_line ();
4254 num_regs
= exp
.X_add_number
;
4256 if (num_regs
< 1 || num_regs
> 4)
4258 as_bad (_("number of registers must be in the range [1:4]"));
4259 ignore_rest_of_line ();
4263 demand_empty_rest_of_line ();
4268 op
= 0xb4 | (num_regs
- 1);
4269 add_unwind_opcode (op
, 1);
4274 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
4275 add_unwind_opcode (op
, 2);
4277 unwind
.frame_size
+= num_regs
* 12;
4281 /* Parse a directive saving VFP registers for ARMv6 and above. */
4284 s_arm_unwind_save_vfp_armv6 (void)
4289 int num_vfpv3_regs
= 0;
4290 int num_regs_below_16
;
4291 bfd_boolean partial_match
;
4293 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
,
4297 as_bad (_("expected register list"));
4298 ignore_rest_of_line ();
4302 demand_empty_rest_of_line ();
4304 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4305 than FSTMX/FLDMX-style ones). */
4307 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4309 num_vfpv3_regs
= count
;
4310 else if (start
+ count
> 16)
4311 num_vfpv3_regs
= start
+ count
- 16;
4313 if (num_vfpv3_regs
> 0)
4315 int start_offset
= start
> 16 ? start
- 16 : 0;
4316 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
4317 add_unwind_opcode (op
, 2);
4320 /* Generate opcode for registers numbered in the range 0 .. 15. */
4321 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
4322 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
4323 if (num_regs_below_16
> 0)
4325 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
4326 add_unwind_opcode (op
, 2);
4329 unwind
.frame_size
+= count
* 8;
4333 /* Parse a directive saving VFP registers for pre-ARMv6. */
4336 s_arm_unwind_save_vfp (void)
4341 bfd_boolean partial_match
;
4343 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
,
4347 as_bad (_("expected register list"));
4348 ignore_rest_of_line ();
4352 demand_empty_rest_of_line ();
4357 op
= 0xb8 | (count
- 1);
4358 add_unwind_opcode (op
, 1);
4363 op
= 0xb300 | (reg
<< 4) | (count
- 1);
4364 add_unwind_opcode (op
, 2);
4366 unwind
.frame_size
+= count
* 8 + 4;
4370 /* Parse a directive saving iWMMXt data registers. */
4373 s_arm_unwind_save_mmxwr (void)
4381 if (*input_line_pointer
== '{')
4382 input_line_pointer
++;
4386 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4390 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4395 as_tsktsk (_("register list not in ascending order"));
4398 if (*input_line_pointer
== '-')
4400 input_line_pointer
++;
4401 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4404 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4407 else if (reg
>= hi_reg
)
4409 as_bad (_("bad register range"));
4412 for (; reg
< hi_reg
; reg
++)
4416 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4418 skip_past_char (&input_line_pointer
, '}');
4420 demand_empty_rest_of_line ();
4422 /* Generate any deferred opcodes because we're going to be looking at
4424 flush_pending_unwind ();
4426 for (i
= 0; i
< 16; i
++)
4428 if (mask
& (1 << i
))
4429 unwind
.frame_size
+= 8;
4432 /* Attempt to combine with a previous opcode. We do this because gcc
4433 likes to output separate unwind directives for a single block of
4435 if (unwind
.opcode_count
> 0)
4437 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
4438 if ((i
& 0xf8) == 0xc0)
4441 /* Only merge if the blocks are contiguous. */
4444 if ((mask
& 0xfe00) == (1 << 9))
4446 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
4447 unwind
.opcode_count
--;
4450 else if (i
== 6 && unwind
.opcode_count
>= 2)
4452 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
4456 op
= 0xffff << (reg
- 1);
4458 && ((mask
& op
) == (1u << (reg
- 1))))
4460 op
= (1 << (reg
+ i
+ 1)) - 1;
4461 op
&= ~((1 << reg
) - 1);
4463 unwind
.opcode_count
-= 2;
4470 /* We want to generate opcodes in the order the registers have been
4471 saved, ie. descending order. */
4472 for (reg
= 15; reg
>= -1; reg
--)
4474 /* Save registers in blocks. */
4476 || !(mask
& (1 << reg
)))
4478 /* We found an unsaved reg. Generate opcodes to save the
4485 op
= 0xc0 | (hi_reg
- 10);
4486 add_unwind_opcode (op
, 1);
4491 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
4492 add_unwind_opcode (op
, 2);
4501 ignore_rest_of_line ();
4505 s_arm_unwind_save_mmxwcg (void)
4512 if (*input_line_pointer
== '{')
4513 input_line_pointer
++;
4515 skip_whitespace (input_line_pointer
);
4519 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4523 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4529 as_tsktsk (_("register list not in ascending order"));
4532 if (*input_line_pointer
== '-')
4534 input_line_pointer
++;
4535 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4538 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4541 else if (reg
>= hi_reg
)
4543 as_bad (_("bad register range"));
4546 for (; reg
< hi_reg
; reg
++)
4550 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4552 skip_past_char (&input_line_pointer
, '}');
4554 demand_empty_rest_of_line ();
4556 /* Generate any deferred opcodes because we're going to be looking at
4558 flush_pending_unwind ();
4560 for (reg
= 0; reg
< 16; reg
++)
4562 if (mask
& (1 << reg
))
4563 unwind
.frame_size
+= 4;
4566 add_unwind_opcode (op
, 2);
4569 ignore_rest_of_line ();
4573 /* Parse an unwind_save directive.
4574 If the argument is non-zero, this is a .vsave directive. */
4577 s_arm_unwind_save (int arch_v6
)
4580 struct reg_entry
*reg
;
4581 bfd_boolean had_brace
= FALSE
;
4583 if (!unwind
.proc_start
)
4584 as_bad (MISSING_FNSTART
);
4586 /* Figure out what sort of save we have. */
4587 peek
= input_line_pointer
;
4595 reg
= arm_reg_parse_multi (&peek
);
4599 as_bad (_("register expected"));
4600 ignore_rest_of_line ();
4609 as_bad (_("FPA .unwind_save does not take a register list"));
4610 ignore_rest_of_line ();
4613 input_line_pointer
= peek
;
4614 s_arm_unwind_save_fpa (reg
->number
);
4618 s_arm_unwind_save_core ();
4623 s_arm_unwind_save_vfp_armv6 ();
4625 s_arm_unwind_save_vfp ();
4628 case REG_TYPE_MMXWR
:
4629 s_arm_unwind_save_mmxwr ();
4632 case REG_TYPE_MMXWCG
:
4633 s_arm_unwind_save_mmxwcg ();
4637 as_bad (_(".unwind_save does not support this kind of register"));
4638 ignore_rest_of_line ();
4643 /* Parse an unwind_movsp directive. */
4646 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4652 if (!unwind
.proc_start
)
4653 as_bad (MISSING_FNSTART
);
4655 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4658 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4659 ignore_rest_of_line ();
4663 /* Optional constant. */
4664 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4666 if (immediate_for_directive (&offset
) == FAIL
)
4672 demand_empty_rest_of_line ();
4674 if (reg
== REG_SP
|| reg
== REG_PC
)
4676 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4680 if (unwind
.fp_reg
!= REG_SP
)
4681 as_bad (_("unexpected .unwind_movsp directive"));
4683 /* Generate opcode to restore the value. */
4685 add_unwind_opcode (op
, 1);
4687 /* Record the information for later. */
4688 unwind
.fp_reg
= reg
;
4689 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4690 unwind
.sp_restored
= 1;
4693 /* Parse an unwind_pad directive. */
4696 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4700 if (!unwind
.proc_start
)
4701 as_bad (MISSING_FNSTART
);
4703 if (immediate_for_directive (&offset
) == FAIL
)
4708 as_bad (_("stack increment must be multiple of 4"));
4709 ignore_rest_of_line ();
4713 /* Don't generate any opcodes, just record the details for later. */
4714 unwind
.frame_size
+= offset
;
4715 unwind
.pending_offset
+= offset
;
4717 demand_empty_rest_of_line ();
4720 /* Parse an unwind_setfp directive. */
4723 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4729 if (!unwind
.proc_start
)
4730 as_bad (MISSING_FNSTART
);
4732 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4733 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4736 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4738 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4740 as_bad (_("expected <reg>, <reg>"));
4741 ignore_rest_of_line ();
4745 /* Optional constant. */
4746 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4748 if (immediate_for_directive (&offset
) == FAIL
)
4754 demand_empty_rest_of_line ();
4756 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4758 as_bad (_("register must be either sp or set by a previous"
4759 "unwind_movsp directive"));
4763 /* Don't generate any opcodes, just record the information for later. */
4764 unwind
.fp_reg
= fp_reg
;
4766 if (sp_reg
== REG_SP
)
4767 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4769 unwind
.fp_offset
-= offset
;
4772 /* Parse an unwind_raw directive. */
4775 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4778 /* This is an arbitrary limit. */
4779 unsigned char op
[16];
4782 if (!unwind
.proc_start
)
4783 as_bad (MISSING_FNSTART
);
4786 if (exp
.X_op
== O_constant
4787 && skip_past_comma (&input_line_pointer
) != FAIL
)
4789 unwind
.frame_size
+= exp
.X_add_number
;
4793 exp
.X_op
= O_illegal
;
4795 if (exp
.X_op
!= O_constant
)
4797 as_bad (_("expected <offset>, <opcode>"));
4798 ignore_rest_of_line ();
4804 /* Parse the opcode. */
4809 as_bad (_("unwind opcode too long"));
4810 ignore_rest_of_line ();
4812 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4814 as_bad (_("invalid unwind opcode"));
4815 ignore_rest_of_line ();
4818 op
[count
++] = exp
.X_add_number
;
4820 /* Parse the next byte. */
4821 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4827 /* Add the opcode bytes in reverse order. */
4829 add_unwind_opcode (op
[count
], 1);
4831 demand_empty_rest_of_line ();
4835 /* Parse a .eabi_attribute directive. */
4838 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4840 int tag
= obj_elf_vendor_attribute (OBJ_ATTR_PROC
);
4842 if (tag
>= 0 && tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4843 attributes_set_explicitly
[tag
] = 1;
4846 /* Emit a tls fix for the symbol. */
4849 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED
)
4853 #ifdef md_flush_pending_output
4854 md_flush_pending_output ();
4857 #ifdef md_cons_align
4861 /* Since we're just labelling the code, there's no need to define a
4864 p
= obstack_next_free (&frchain_now
->frch_obstack
);
4865 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 0,
4866 thumb_mode
? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4867 : BFD_RELOC_ARM_TLS_DESCSEQ
);
4869 #endif /* OBJ_ELF */
4871 static void s_arm_arch (int);
4872 static void s_arm_object_arch (int);
4873 static void s_arm_cpu (int);
4874 static void s_arm_fpu (int);
4875 static void s_arm_arch_extension (int);
4880 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
4887 if (exp
.X_op
== O_symbol
)
4888 exp
.X_op
= O_secrel
;
4890 emit_expr (&exp
, 4);
4892 while (*input_line_pointer
++ == ',');
4894 input_line_pointer
--;
4895 demand_empty_rest_of_line ();
4899 /* This table describes all the machine specific pseudo-ops the assembler
4900 has to support. The fields are:
4901 pseudo-op name without dot
4902 function to call to execute this pseudo-op
4903 Integer arg to pass to the function. */
4905 const pseudo_typeS md_pseudo_table
[] =
4907 /* Never called because '.req' does not start a line. */
4908 { "req", s_req
, 0 },
4909 /* Following two are likewise never called. */
4912 { "unreq", s_unreq
, 0 },
4913 { "bss", s_bss
, 0 },
4914 { "align", s_align_ptwo
, 2 },
4915 { "arm", s_arm
, 0 },
4916 { "thumb", s_thumb
, 0 },
4917 { "code", s_code
, 0 },
4918 { "force_thumb", s_force_thumb
, 0 },
4919 { "thumb_func", s_thumb_func
, 0 },
4920 { "thumb_set", s_thumb_set
, 0 },
4921 { "even", s_even
, 0 },
4922 { "ltorg", s_ltorg
, 0 },
4923 { "pool", s_ltorg
, 0 },
4924 { "syntax", s_syntax
, 0 },
4925 { "cpu", s_arm_cpu
, 0 },
4926 { "arch", s_arm_arch
, 0 },
4927 { "object_arch", s_arm_object_arch
, 0 },
4928 { "fpu", s_arm_fpu
, 0 },
4929 { "arch_extension", s_arm_arch_extension
, 0 },
4931 { "word", s_arm_elf_cons
, 4 },
4932 { "long", s_arm_elf_cons
, 4 },
4933 { "inst.n", s_arm_elf_inst
, 2 },
4934 { "inst.w", s_arm_elf_inst
, 4 },
4935 { "inst", s_arm_elf_inst
, 0 },
4936 { "rel31", s_arm_rel31
, 0 },
4937 { "fnstart", s_arm_unwind_fnstart
, 0 },
4938 { "fnend", s_arm_unwind_fnend
, 0 },
4939 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
4940 { "personality", s_arm_unwind_personality
, 0 },
4941 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
4942 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
4943 { "save", s_arm_unwind_save
, 0 },
4944 { "vsave", s_arm_unwind_save
, 1 },
4945 { "movsp", s_arm_unwind_movsp
, 0 },
4946 { "pad", s_arm_unwind_pad
, 0 },
4947 { "setfp", s_arm_unwind_setfp
, 0 },
4948 { "unwind_raw", s_arm_unwind_raw
, 0 },
4949 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
4950 { "tlsdescseq", s_arm_tls_descseq
, 0 },
4954 /* These are used for dwarf. */
4958 /* These are used for dwarf2. */
4959 { "file", dwarf2_directive_file
, 0 },
4960 { "loc", dwarf2_directive_loc
, 0 },
4961 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
4963 { "extend", float_cons
, 'x' },
4964 { "ldouble", float_cons
, 'x' },
4965 { "packed", float_cons
, 'p' },
4967 {"secrel32", pe_directive_secrel
, 0},
4970 /* These are for compatibility with CodeComposer Studio. */
4971 {"ref", s_ccs_ref
, 0},
4972 {"def", s_ccs_def
, 0},
4973 {"asmfunc", s_ccs_asmfunc
, 0},
4974 {"endasmfunc", s_ccs_endasmfunc
, 0},
4979 /* Parser functions used exclusively in instruction operands. */
4981 /* Generic immediate-value read function for use in insn parsing.
4982 STR points to the beginning of the immediate (the leading #);
4983 VAL receives the value; if the value is outside [MIN, MAX]
4984 issue an error. PREFIX_OPT is true if the immediate prefix is
4988 parse_immediate (char **str
, int *val
, int min
, int max
,
4989 bfd_boolean prefix_opt
)
4993 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4994 if (exp
.X_op
!= O_constant
)
4996 inst
.error
= _("constant expression required");
5000 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
5002 inst
.error
= _("immediate value out of range");
5006 *val
= exp
.X_add_number
;
5010 /* Less-generic immediate-value read function with the possibility of loading a
5011 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
5012 instructions. Puts the result directly in inst.operands[i]. */
5015 parse_big_immediate (char **str
, int i
, expressionS
*in_exp
,
5016 bfd_boolean allow_symbol_p
)
5019 expressionS
*exp_p
= in_exp
? in_exp
: &exp
;
5022 my_get_expression (exp_p
, &ptr
, GE_OPT_PREFIX_BIG
);
5024 if (exp_p
->X_op
== O_constant
)
5026 inst
.operands
[i
].imm
= exp_p
->X_add_number
& 0xffffffff;
5027 /* If we're on a 64-bit host, then a 64-bit number can be returned using
5028 O_constant. We have to be careful not to break compilation for
5029 32-bit X_add_number, though. */
5030 if ((exp_p
->X_add_number
& ~(offsetT
)(0xffffffffU
)) != 0)
5032 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
5033 inst
.operands
[i
].reg
= (((exp_p
->X_add_number
>> 16) >> 16)
5035 inst
.operands
[i
].regisimm
= 1;
5038 else if (exp_p
->X_op
== O_big
5039 && LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 32)
5041 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
5043 /* Bignums have their least significant bits in
5044 generic_bignum[0]. Make sure we put 32 bits in imm and
5045 32 bits in reg, in a (hopefully) portable way. */
5046 gas_assert (parts
!= 0);
5048 /* Make sure that the number is not too big.
5049 PR 11972: Bignums can now be sign-extended to the
5050 size of a .octa so check that the out of range bits
5051 are all zero or all one. */
5052 if (LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 64)
5054 LITTLENUM_TYPE m
= -1;
5056 if (generic_bignum
[parts
* 2] != 0
5057 && generic_bignum
[parts
* 2] != m
)
5060 for (j
= parts
* 2 + 1; j
< (unsigned) exp_p
->X_add_number
; j
++)
5061 if (generic_bignum
[j
] != generic_bignum
[j
-1])
5065 inst
.operands
[i
].imm
= 0;
5066 for (j
= 0; j
< parts
; j
++, idx
++)
5067 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
5068 << (LITTLENUM_NUMBER_OF_BITS
* j
);
5069 inst
.operands
[i
].reg
= 0;
5070 for (j
= 0; j
< parts
; j
++, idx
++)
5071 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
5072 << (LITTLENUM_NUMBER_OF_BITS
* j
);
5073 inst
.operands
[i
].regisimm
= 1;
5075 else if (!(exp_p
->X_op
== O_symbol
&& allow_symbol_p
))
5083 /* Returns the pseudo-register number of an FPA immediate constant,
5084 or FAIL if there isn't a valid constant here. */
5087 parse_fpa_immediate (char ** str
)
5089 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
5095 /* First try and match exact strings, this is to guarantee
5096 that some formats will work even for cross assembly. */
5098 for (i
= 0; fp_const
[i
]; i
++)
5100 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
5104 *str
+= strlen (fp_const
[i
]);
5105 if (is_end_of_line
[(unsigned char) **str
])
5111 /* Just because we didn't get a match doesn't mean that the constant
5112 isn't valid, just that it is in a format that we don't
5113 automatically recognize. Try parsing it with the standard
5114 expression routines. */
5116 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
5118 /* Look for a raw floating point number. */
5119 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
5120 && is_end_of_line
[(unsigned char) *save_in
])
5122 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
5124 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
5126 if (words
[j
] != fp_values
[i
][j
])
5130 if (j
== MAX_LITTLENUMS
)
5138 /* Try and parse a more complex expression, this will probably fail
5139 unless the code uses a floating point prefix (eg "0f"). */
5140 save_in
= input_line_pointer
;
5141 input_line_pointer
= *str
;
5142 if (expression (&exp
) == absolute_section
5143 && exp
.X_op
== O_big
5144 && exp
.X_add_number
< 0)
5146 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
5148 #define X_PRECISION 5
5149 #define E_PRECISION 15L
5150 if (gen_to_words (words
, X_PRECISION
, E_PRECISION
) == 0)
5152 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
5154 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
5156 if (words
[j
] != fp_values
[i
][j
])
5160 if (j
== MAX_LITTLENUMS
)
5162 *str
= input_line_pointer
;
5163 input_line_pointer
= save_in
;
5170 *str
= input_line_pointer
;
5171 input_line_pointer
= save_in
;
5172 inst
.error
= _("invalid FPA immediate expression");
5176 /* Returns 1 if a number has "quarter-precision" float format
5177 0baBbbbbbc defgh000 00000000 00000000. */
5180 is_quarter_float (unsigned imm
)
5182 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
5183 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
5187 /* Detect the presence of a floating point or integer zero constant,
5191 parse_ifimm_zero (char **in
)
5195 if (!is_immediate_prefix (**in
))
5197 /* In unified syntax, all prefixes are optional. */
5198 if (!unified_syntax
)
5204 /* Accept #0x0 as a synonym for #0. */
5205 if (strncmp (*in
, "0x", 2) == 0)
5208 if (parse_immediate (in
, &val
, 0, 0, TRUE
) == FAIL
)
5213 error_code
= atof_generic (in
, ".", EXP_CHARS
,
5214 &generic_floating_point_number
);
5217 && generic_floating_point_number
.sign
== '+'
5218 && (generic_floating_point_number
.low
5219 > generic_floating_point_number
.leader
))
5225 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5226 0baBbbbbbc defgh000 00000000 00000000.
5227 The zero and minus-zero cases need special handling, since they can't be
5228 encoded in the "quarter-precision" float format, but can nonetheless be
5229 loaded as integer constants. */
5232 parse_qfloat_immediate (char **ccp
, int *immed
)
5236 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
5237 int found_fpchar
= 0;
5239 skip_past_char (&str
, '#');
5241 /* We must not accidentally parse an integer as a floating-point number. Make
5242 sure that the value we parse is not an integer by checking for special
5243 characters '.' or 'e'.
5244 FIXME: This is a horrible hack, but doing better is tricky because type
5245 information isn't in a very usable state at parse time. */
5247 skip_whitespace (fpnum
);
5249 if (strncmp (fpnum
, "0x", 2) == 0)
5253 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
5254 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
5264 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
5266 unsigned fpword
= 0;
5269 /* Our FP word must be 32 bits (single-precision FP). */
5270 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
5272 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
5276 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
5289 /* Shift operands. */
5292 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
5295 struct asm_shift_name
5298 enum shift_kind kind
;
5301 /* Third argument to parse_shift. */
5302 enum parse_shift_mode
5304 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
5305 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
5306 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
5307 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
5308 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
5311 /* Parse a <shift> specifier on an ARM data processing instruction.
5312 This has three forms:
5314 (LSL|LSR|ASL|ASR|ROR) Rs
5315 (LSL|LSR|ASL|ASR|ROR) #imm
5318 Note that ASL is assimilated to LSL in the instruction encoding, and
5319 RRX to ROR #0 (which cannot be written as such). */
5322 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
5324 const struct asm_shift_name
*shift_name
;
5325 enum shift_kind shift
;
5330 for (p
= *str
; ISALPHA (*p
); p
++)
5335 inst
.error
= _("shift expression expected");
5339 shift_name
= (const struct asm_shift_name
*) hash_find_n (arm_shift_hsh
, *str
,
5342 if (shift_name
== NULL
)
5344 inst
.error
= _("shift expression expected");
5348 shift
= shift_name
->kind
;
5352 case NO_SHIFT_RESTRICT
:
5353 case SHIFT_IMMEDIATE
: break;
5355 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
5356 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
5358 inst
.error
= _("'LSL' or 'ASR' required");
5363 case SHIFT_LSL_IMMEDIATE
:
5364 if (shift
!= SHIFT_LSL
)
5366 inst
.error
= _("'LSL' required");
5371 case SHIFT_ASR_IMMEDIATE
:
5372 if (shift
!= SHIFT_ASR
)
5374 inst
.error
= _("'ASR' required");
5382 if (shift
!= SHIFT_RRX
)
5384 /* Whitespace can appear here if the next thing is a bare digit. */
5385 skip_whitespace (p
);
5387 if (mode
== NO_SHIFT_RESTRICT
5388 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5390 inst
.operands
[i
].imm
= reg
;
5391 inst
.operands
[i
].immisreg
= 1;
5393 else if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5396 inst
.operands
[i
].shift_kind
= shift
;
5397 inst
.operands
[i
].shifted
= 1;
5402 /* Parse a <shifter_operand> for an ARM data processing instruction:
5405 #<immediate>, <rotate>
5409 where <shift> is defined by parse_shift above, and <rotate> is a
5410 multiple of 2 between 0 and 30. Validation of immediate operands
5411 is deferred to md_apply_fix. */
5414 parse_shifter_operand (char **str
, int i
)
5419 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
5421 inst
.operands
[i
].reg
= value
;
5422 inst
.operands
[i
].isreg
= 1;
5424 /* parse_shift will override this if appropriate */
5425 inst
.relocs
[0].exp
.X_op
= O_constant
;
5426 inst
.relocs
[0].exp
.X_add_number
= 0;
5428 if (skip_past_comma (str
) == FAIL
)
5431 /* Shift operation on register. */
5432 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
5435 if (my_get_expression (&inst
.relocs
[0].exp
, str
, GE_IMM_PREFIX
))
5438 if (skip_past_comma (str
) == SUCCESS
)
5440 /* #x, y -- ie explicit rotation by Y. */
5441 if (my_get_expression (&exp
, str
, GE_NO_PREFIX
))
5444 if (exp
.X_op
!= O_constant
|| inst
.relocs
[0].exp
.X_op
!= O_constant
)
5446 inst
.error
= _("constant expression expected");
5450 value
= exp
.X_add_number
;
5451 if (value
< 0 || value
> 30 || value
% 2 != 0)
5453 inst
.error
= _("invalid rotation");
5456 if (inst
.relocs
[0].exp
.X_add_number
< 0
5457 || inst
.relocs
[0].exp
.X_add_number
> 255)
5459 inst
.error
= _("invalid constant");
5463 /* Encode as specified. */
5464 inst
.operands
[i
].imm
= inst
.relocs
[0].exp
.X_add_number
| value
<< 7;
5468 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
5469 inst
.relocs
[0].pc_rel
= 0;
5473 /* Group relocation information. Each entry in the table contains the
5474 textual name of the relocation as may appear in assembler source
5475 and must end with a colon.
5476 Along with this textual name are the relocation codes to be used if
5477 the corresponding instruction is an ALU instruction (ADD or SUB only),
5478 an LDR, an LDRS, or an LDC. */
5480 struct group_reloc_table_entry
5491 /* Varieties of non-ALU group relocation. */
5499 static struct group_reloc_table_entry group_reloc_table
[] =
5500 { /* Program counter relative: */
5502 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
5507 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
5508 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
5509 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
5510 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
5512 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
5517 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
5518 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
5519 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
5520 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
5522 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
5523 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
5524 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
5525 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
5526 /* Section base relative */
5528 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
5533 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
5534 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
5535 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
5536 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
5538 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
5543 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
5544 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
5545 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
5546 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
5548 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
5549 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
5550 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
5551 BFD_RELOC_ARM_LDC_SB_G2
}, /* LDC */
5552 /* Absolute thumb alu relocations. */
5554 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
,/* ALU. */
5559 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
,/* ALU. */
5564 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
,/* ALU. */
5569 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,/* ALU. */
5574 /* Given the address of a pointer pointing to the textual name of a group
5575 relocation as may appear in assembler source, attempt to find its details
5576 in group_reloc_table. The pointer will be updated to the character after
5577 the trailing colon. On failure, FAIL will be returned; SUCCESS
5578 otherwise. On success, *entry will be updated to point at the relevant
5579 group_reloc_table entry. */
5582 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
5585 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
5587 int length
= strlen (group_reloc_table
[i
].name
);
5589 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
5590 && (*str
)[length
] == ':')
5592 *out
= &group_reloc_table
[i
];
5593 *str
+= (length
+ 1);
5601 /* Parse a <shifter_operand> for an ARM data processing instruction
5602 (as for parse_shifter_operand) where group relocations are allowed:
5605 #<immediate>, <rotate>
5606 #:<group_reloc>:<expression>
5610 where <group_reloc> is one of the strings defined in group_reloc_table.
5611 The hashes are optional.
5613 Everything else is as for parse_shifter_operand. */
5615 static parse_operand_result
5616 parse_shifter_operand_group_reloc (char **str
, int i
)
5618 /* Determine if we have the sequence of characters #: or just :
5619 coming next. If we do, then we check for a group relocation.
5620 If we don't, punt the whole lot to parse_shifter_operand. */
5622 if (((*str
)[0] == '#' && (*str
)[1] == ':')
5623 || (*str
)[0] == ':')
5625 struct group_reloc_table_entry
*entry
;
5627 if ((*str
)[0] == '#')
5632 /* Try to parse a group relocation. Anything else is an error. */
5633 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
5635 inst
.error
= _("unknown group relocation");
5636 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5639 /* We now have the group relocation table entry corresponding to
5640 the name in the assembler source. Next, we parse the expression. */
5641 if (my_get_expression (&inst
.relocs
[0].exp
, str
, GE_NO_PREFIX
))
5642 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5644 /* Record the relocation type (always the ALU variant here). */
5645 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) entry
->alu_code
;
5646 gas_assert (inst
.relocs
[0].type
!= 0);
5648 return PARSE_OPERAND_SUCCESS
;
5651 return parse_shifter_operand (str
, i
) == SUCCESS
5652 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
5654 /* Never reached. */
5657 /* Parse a Neon alignment expression. Information is written to
5658 inst.operands[i]. We assume the initial ':' has been skipped.
5660 align .imm = align << 8, .immisalign=1, .preind=0 */
5661 static parse_operand_result
5662 parse_neon_alignment (char **str
, int i
)
5667 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
5669 if (exp
.X_op
!= O_constant
)
5671 inst
.error
= _("alignment must be constant");
5672 return PARSE_OPERAND_FAIL
;
5675 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
5676 inst
.operands
[i
].immisalign
= 1;
5677 /* Alignments are not pre-indexes. */
5678 inst
.operands
[i
].preind
= 0;
5681 return PARSE_OPERAND_SUCCESS
;
5684 /* Parse all forms of an ARM address expression. Information is written
5685 to inst.operands[i] and/or inst.relocs[0].
5687 Preindexed addressing (.preind=1):
5689 [Rn, #offset] .reg=Rn .relocs[0].exp=offset
5690 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5691 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5692 .shift_kind=shift .relocs[0].exp=shift_imm
5694 These three may have a trailing ! which causes .writeback to be set also.
5696 Postindexed addressing (.postind=1, .writeback=1):
5698 [Rn], #offset .reg=Rn .relocs[0].exp=offset
5699 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5700 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5701 .shift_kind=shift .relocs[0].exp=shift_imm
5703 Unindexed addressing (.preind=0, .postind=0):
5705 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5709 [Rn]{!} shorthand for [Rn,#0]{!}
5710 =immediate .isreg=0 .relocs[0].exp=immediate
5711 label .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label
5713 It is the caller's responsibility to check for addressing modes not
5714 supported by the instruction, and to set inst.relocs[0].type. */
5716 static parse_operand_result
5717 parse_address_main (char **str
, int i
, int group_relocations
,
5718 group_reloc_type group_type
)
5723 if (skip_past_char (&p
, '[') == FAIL
)
5725 if (skip_past_char (&p
, '=') == FAIL
)
5727 /* Bare address - translate to PC-relative offset. */
5728 inst
.relocs
[0].pc_rel
= 1;
5729 inst
.operands
[i
].reg
= REG_PC
;
5730 inst
.operands
[i
].isreg
= 1;
5731 inst
.operands
[i
].preind
= 1;
5733 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_OPT_PREFIX_BIG
))
5734 return PARSE_OPERAND_FAIL
;
5736 else if (parse_big_immediate (&p
, i
, &inst
.relocs
[0].exp
,
5737 /*allow_symbol_p=*/TRUE
))
5738 return PARSE_OPERAND_FAIL
;
5741 return PARSE_OPERAND_SUCCESS
;
5744 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5745 skip_whitespace (p
);
5747 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5749 if (group_type
== GROUP_MVE
)
5750 inst
.error
= BAD_ADDR_MODE
;
5752 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5753 return PARSE_OPERAND_FAIL
;
5755 inst
.operands
[i
].reg
= reg
;
5756 inst
.operands
[i
].isreg
= 1;
5758 if (skip_past_comma (&p
) == SUCCESS
)
5760 inst
.operands
[i
].preind
= 1;
5763 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5765 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5767 inst
.operands
[i
].imm
= reg
;
5768 inst
.operands
[i
].immisreg
= 1;
5770 if (skip_past_comma (&p
) == SUCCESS
)
5771 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5772 return PARSE_OPERAND_FAIL
;
5774 else if (skip_past_char (&p
, ':') == SUCCESS
)
5776 /* FIXME: '@' should be used here, but it's filtered out by generic
5777 code before we get to see it here. This may be subject to
5779 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5781 if (result
!= PARSE_OPERAND_SUCCESS
)
5786 if (inst
.operands
[i
].negative
)
5788 inst
.operands
[i
].negative
= 0;
5792 if (group_relocations
5793 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
5795 struct group_reloc_table_entry
*entry
;
5797 /* Skip over the #: or : sequence. */
5803 /* Try to parse a group relocation. Anything else is an
5805 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
5807 inst
.error
= _("unknown group relocation");
5808 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5811 /* We now have the group relocation table entry corresponding to
5812 the name in the assembler source. Next, we parse the
5814 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_NO_PREFIX
))
5815 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5817 /* Record the relocation type. */
5822 = (bfd_reloc_code_real_type
) entry
->ldr_code
;
5827 = (bfd_reloc_code_real_type
) entry
->ldrs_code
;
5832 = (bfd_reloc_code_real_type
) entry
->ldc_code
;
5839 if (inst
.relocs
[0].type
== 0)
5841 inst
.error
= _("this group relocation is not allowed on this instruction");
5842 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5849 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5850 return PARSE_OPERAND_FAIL
;
5851 /* If the offset is 0, find out if it's a +0 or -0. */
5852 if (inst
.relocs
[0].exp
.X_op
== O_constant
5853 && inst
.relocs
[0].exp
.X_add_number
== 0)
5855 skip_whitespace (q
);
5859 skip_whitespace (q
);
5862 inst
.operands
[i
].negative
= 1;
5867 else if (skip_past_char (&p
, ':') == SUCCESS
)
5869 /* FIXME: '@' should be used here, but it's filtered out by generic code
5870 before we get to see it here. This may be subject to change. */
5871 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5873 if (result
!= PARSE_OPERAND_SUCCESS
)
5877 if (skip_past_char (&p
, ']') == FAIL
)
5879 inst
.error
= _("']' expected");
5880 return PARSE_OPERAND_FAIL
;
5883 if (skip_past_char (&p
, '!') == SUCCESS
)
5884 inst
.operands
[i
].writeback
= 1;
5886 else if (skip_past_comma (&p
) == SUCCESS
)
5888 if (skip_past_char (&p
, '{') == SUCCESS
)
5890 /* [Rn], {expr} - unindexed, with option */
5891 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
5892 0, 255, TRUE
) == FAIL
)
5893 return PARSE_OPERAND_FAIL
;
5895 if (skip_past_char (&p
, '}') == FAIL
)
5897 inst
.error
= _("'}' expected at end of 'option' field");
5898 return PARSE_OPERAND_FAIL
;
5900 if (inst
.operands
[i
].preind
)
5902 inst
.error
= _("cannot combine index with option");
5903 return PARSE_OPERAND_FAIL
;
5906 return PARSE_OPERAND_SUCCESS
;
5910 inst
.operands
[i
].postind
= 1;
5911 inst
.operands
[i
].writeback
= 1;
5913 if (inst
.operands
[i
].preind
)
5915 inst
.error
= _("cannot combine pre- and post-indexing");
5916 return PARSE_OPERAND_FAIL
;
5920 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5922 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5924 /* We might be using the immediate for alignment already. If we
5925 are, OR the register number into the low-order bits. */
5926 if (inst
.operands
[i
].immisalign
)
5927 inst
.operands
[i
].imm
|= reg
;
5929 inst
.operands
[i
].imm
= reg
;
5930 inst
.operands
[i
].immisreg
= 1;
5932 if (skip_past_comma (&p
) == SUCCESS
)
5933 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5934 return PARSE_OPERAND_FAIL
;
5940 if (inst
.operands
[i
].negative
)
5942 inst
.operands
[i
].negative
= 0;
5945 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5946 return PARSE_OPERAND_FAIL
;
5947 /* If the offset is 0, find out if it's a +0 or -0. */
5948 if (inst
.relocs
[0].exp
.X_op
== O_constant
5949 && inst
.relocs
[0].exp
.X_add_number
== 0)
5951 skip_whitespace (q
);
5955 skip_whitespace (q
);
5958 inst
.operands
[i
].negative
= 1;
5964 /* If at this point neither .preind nor .postind is set, we have a
5965 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5966 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
5968 inst
.operands
[i
].preind
= 1;
5969 inst
.relocs
[0].exp
.X_op
= O_constant
;
5970 inst
.relocs
[0].exp
.X_add_number
= 0;
5973 return PARSE_OPERAND_SUCCESS
;
5977 parse_address (char **str
, int i
)
5979 return parse_address_main (str
, i
, 0, GROUP_LDR
) == PARSE_OPERAND_SUCCESS
5983 static parse_operand_result
5984 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
5986 return parse_address_main (str
, i
, 1, type
);
5989 /* Parse an operand for a MOVW or MOVT instruction. */
5991 parse_half (char **str
)
5996 skip_past_char (&p
, '#');
5997 if (strncasecmp (p
, ":lower16:", 9) == 0)
5998 inst
.relocs
[0].type
= BFD_RELOC_ARM_MOVW
;
5999 else if (strncasecmp (p
, ":upper16:", 9) == 0)
6000 inst
.relocs
[0].type
= BFD_RELOC_ARM_MOVT
;
6002 if (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
)
6005 skip_whitespace (p
);
6008 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_NO_PREFIX
))
6011 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
6013 if (inst
.relocs
[0].exp
.X_op
!= O_constant
)
6015 inst
.error
= _("constant expression expected");
6018 if (inst
.relocs
[0].exp
.X_add_number
< 0
6019 || inst
.relocs
[0].exp
.X_add_number
> 0xffff)
6021 inst
.error
= _("immediate value out of range");
6029 /* Miscellaneous. */
6031 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
6032 or a bitmask suitable to be or-ed into the ARM msr instruction. */
6034 parse_psr (char **str
, bfd_boolean lhs
)
6037 unsigned long psr_field
;
6038 const struct asm_psr
*psr
;
6040 bfd_boolean is_apsr
= FALSE
;
6041 bfd_boolean m_profile
= ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
);
6043 /* PR gas/12698: If the user has specified -march=all then m_profile will
6044 be TRUE, but we want to ignore it in this case as we are building for any
6045 CPU type, including non-m variants. */
6046 if (ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
6049 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
6050 feature for ease of use and backwards compatibility. */
6052 if (strncasecmp (p
, "SPSR", 4) == 0)
6055 goto unsupported_psr
;
6057 psr_field
= SPSR_BIT
;
6059 else if (strncasecmp (p
, "CPSR", 4) == 0)
6062 goto unsupported_psr
;
6066 else if (strncasecmp (p
, "APSR", 4) == 0)
6068 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
6069 and ARMv7-R architecture CPUs. */
6078 while (ISALNUM (*p
) || *p
== '_');
6080 if (strncasecmp (start
, "iapsr", 5) == 0
6081 || strncasecmp (start
, "eapsr", 5) == 0
6082 || strncasecmp (start
, "xpsr", 4) == 0
6083 || strncasecmp (start
, "psr", 3) == 0)
6084 p
= start
+ strcspn (start
, "rR") + 1;
6086 psr
= (const struct asm_psr
*) hash_find_n (arm_v7m_psr_hsh
, start
,
6092 /* If APSR is being written, a bitfield may be specified. Note that
6093 APSR itself is handled above. */
6094 if (psr
->field
<= 3)
6096 psr_field
= psr
->field
;
6102 /* M-profile MSR instructions have the mask field set to "10", except
6103 *PSR variants which modify APSR, which may use a different mask (and
6104 have been handled already). Do that by setting the PSR_f field
6106 return psr
->field
| (lhs
? PSR_f
: 0);
6109 goto unsupported_psr
;
6115 /* A suffix follows. */
6121 while (ISALNUM (*p
) || *p
== '_');
6125 /* APSR uses a notation for bits, rather than fields. */
6126 unsigned int nzcvq_bits
= 0;
6127 unsigned int g_bit
= 0;
6130 for (bit
= start
; bit
!= p
; bit
++)
6132 switch (TOLOWER (*bit
))
6135 nzcvq_bits
|= (nzcvq_bits
& 0x01) ? 0x20 : 0x01;
6139 nzcvq_bits
|= (nzcvq_bits
& 0x02) ? 0x20 : 0x02;
6143 nzcvq_bits
|= (nzcvq_bits
& 0x04) ? 0x20 : 0x04;
6147 nzcvq_bits
|= (nzcvq_bits
& 0x08) ? 0x20 : 0x08;
6151 nzcvq_bits
|= (nzcvq_bits
& 0x10) ? 0x20 : 0x10;
6155 g_bit
|= (g_bit
& 0x1) ? 0x2 : 0x1;
6159 inst
.error
= _("unexpected bit specified after APSR");
6164 if (nzcvq_bits
== 0x1f)
6169 if (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
))
6171 inst
.error
= _("selected processor does not "
6172 "support DSP extension");
6179 if ((nzcvq_bits
& 0x20) != 0
6180 || (nzcvq_bits
!= 0x1f && nzcvq_bits
!= 0)
6181 || (g_bit
& 0x2) != 0)
6183 inst
.error
= _("bad bitmask specified after APSR");
6189 psr
= (const struct asm_psr
*) hash_find_n (arm_psr_hsh
, start
,
6194 psr_field
|= psr
->field
;
6200 goto error
; /* Garbage after "[CS]PSR". */
6202 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
6203 is deprecated, but allow it anyway. */
6207 as_tsktsk (_("writing to APSR without specifying a bitmask is "
6210 else if (!m_profile
)
6211 /* These bits are never right for M-profile devices: don't set them
6212 (only code paths which read/write APSR reach here). */
6213 psr_field
|= (PSR_c
| PSR_f
);
6219 inst
.error
= _("selected processor does not support requested special "
6220 "purpose register");
6224 inst
.error
= _("flag for {c}psr instruction expected");
6229 parse_sys_vldr_vstr (char **str
)
6238 {"FPSCR", 0x1, 0x0},
6239 {"FPSCR_nzcvqc", 0x2, 0x0},
6242 {"FPCXTNS", 0x6, 0x1},
6243 {"FPCXTS", 0x7, 0x1}
6245 char *op_end
= strchr (*str
, ',');
6246 size_t op_strlen
= op_end
- *str
;
6248 for (i
= 0; i
< sizeof (sysregs
) / sizeof (sysregs
[0]); i
++)
6250 if (!strncmp (*str
, sysregs
[i
].name
, op_strlen
))
6252 val
= sysregs
[i
].regl
| (sysregs
[i
].regh
<< 3);
6261 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6262 value suitable for splatting into the AIF field of the instruction. */
6265 parse_cps_flags (char **str
)
6274 case '\0': case ',':
6277 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
6278 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
6279 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
6282 inst
.error
= _("unrecognized CPS flag");
6287 if (saw_a_flag
== 0)
6289 inst
.error
= _("missing CPS flags");
6297 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6298 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6301 parse_endian_specifier (char **str
)
6306 if (strncasecmp (s
, "BE", 2))
6308 else if (strncasecmp (s
, "LE", 2))
6312 inst
.error
= _("valid endian specifiers are be or le");
6316 if (ISALNUM (s
[2]) || s
[2] == '_')
6318 inst
.error
= _("valid endian specifiers are be or le");
6323 return little_endian
;
6326 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6327 value suitable for poking into the rotate field of an sxt or sxta
6328 instruction, or FAIL on error. */
6331 parse_ror (char **str
)
6336 if (strncasecmp (s
, "ROR", 3) == 0)
6340 inst
.error
= _("missing rotation field after comma");
6344 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
6349 case 0: *str
= s
; return 0x0;
6350 case 8: *str
= s
; return 0x1;
6351 case 16: *str
= s
; return 0x2;
6352 case 24: *str
= s
; return 0x3;
6355 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
6360 /* Parse a conditional code (from conds[] below). The value returned is in the
6361 range 0 .. 14, or FAIL. */
6363 parse_cond (char **str
)
6366 const struct asm_cond
*c
;
6368 /* Condition codes are always 2 characters, so matching up to
6369 3 characters is sufficient. */
6374 while (ISALPHA (*q
) && n
< 3)
6376 cond
[n
] = TOLOWER (*q
);
6381 c
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, cond
, n
);
6384 inst
.error
= _("condition required");
6392 /* Parse an option for a barrier instruction. Returns the encoding for the
6395 parse_barrier (char **str
)
6398 const struct asm_barrier_opt
*o
;
6401 while (ISALPHA (*q
))
6404 o
= (const struct asm_barrier_opt
*) hash_find_n (arm_barrier_opt_hsh
, p
,
6409 if (!mark_feature_used (&o
->arch
))
6416 /* Parse the operands of a table branch instruction. Similar to a memory
6419 parse_tb (char **str
)
6424 if (skip_past_char (&p
, '[') == FAIL
)
6426 inst
.error
= _("'[' expected");
6430 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6432 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6435 inst
.operands
[0].reg
= reg
;
6437 if (skip_past_comma (&p
) == FAIL
)
6439 inst
.error
= _("',' expected");
6443 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6445 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6448 inst
.operands
[0].imm
= reg
;
6450 if (skip_past_comma (&p
) == SUCCESS
)
6452 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
6454 if (inst
.relocs
[0].exp
.X_add_number
!= 1)
6456 inst
.error
= _("invalid shift");
6459 inst
.operands
[0].shifted
= 1;
6462 if (skip_past_char (&p
, ']') == FAIL
)
6464 inst
.error
= _("']' expected");
6471 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6472 information on the types the operands can take and how they are encoded.
6473 Up to four operands may be read; this function handles setting the
6474 ".present" field for each read operand itself.
6475 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6476 else returns FAIL. */
6479 parse_neon_mov (char **str
, int *which_operand
)
6481 int i
= *which_operand
, val
;
6482 enum arm_reg_type rtype
;
6484 struct neon_type_el optype
;
6486 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6488 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6489 inst
.operands
[i
].reg
= val
;
6490 inst
.operands
[i
].isscalar
= 1;
6491 inst
.operands
[i
].vectype
= optype
;
6492 inst
.operands
[i
++].present
= 1;
6494 if (skip_past_comma (&ptr
) == FAIL
)
6497 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6500 inst
.operands
[i
].reg
= val
;
6501 inst
.operands
[i
].isreg
= 1;
6502 inst
.operands
[i
].present
= 1;
6504 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
6507 /* Cases 0, 1, 2, 3, 5 (D only). */
6508 if (skip_past_comma (&ptr
) == FAIL
)
6511 inst
.operands
[i
].reg
= val
;
6512 inst
.operands
[i
].isreg
= 1;
6513 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6514 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6515 inst
.operands
[i
].isvec
= 1;
6516 inst
.operands
[i
].vectype
= optype
;
6517 inst
.operands
[i
++].present
= 1;
6519 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6521 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6522 Case 13: VMOV <Sd>, <Rm> */
6523 inst
.operands
[i
].reg
= val
;
6524 inst
.operands
[i
].isreg
= 1;
6525 inst
.operands
[i
].present
= 1;
6527 if (rtype
== REG_TYPE_NQ
)
6529 first_error (_("can't use Neon quad register here"));
6532 else if (rtype
!= REG_TYPE_VFS
)
6535 if (skip_past_comma (&ptr
) == FAIL
)
6537 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6539 inst
.operands
[i
].reg
= val
;
6540 inst
.operands
[i
].isreg
= 1;
6541 inst
.operands
[i
].present
= 1;
6544 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
6547 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6548 Case 1: VMOV<c><q> <Dd>, <Dm>
6549 Case 8: VMOV.F32 <Sd>, <Sm>
6550 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6552 inst
.operands
[i
].reg
= val
;
6553 inst
.operands
[i
].isreg
= 1;
6554 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6555 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6556 inst
.operands
[i
].isvec
= 1;
6557 inst
.operands
[i
].vectype
= optype
;
6558 inst
.operands
[i
].present
= 1;
6560 if (skip_past_comma (&ptr
) == SUCCESS
)
6565 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6568 inst
.operands
[i
].reg
= val
;
6569 inst
.operands
[i
].isreg
= 1;
6570 inst
.operands
[i
++].present
= 1;
6572 if (skip_past_comma (&ptr
) == FAIL
)
6575 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6578 inst
.operands
[i
].reg
= val
;
6579 inst
.operands
[i
].isreg
= 1;
6580 inst
.operands
[i
].present
= 1;
6583 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
6584 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6585 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6586 Case 10: VMOV.F32 <Sd>, #<imm>
6587 Case 11: VMOV.F64 <Dd>, #<imm> */
6588 inst
.operands
[i
].immisfloat
= 1;
6589 else if (parse_big_immediate (&ptr
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6591 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6592 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6596 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6600 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6603 inst
.operands
[i
].reg
= val
;
6604 inst
.operands
[i
].isreg
= 1;
6605 inst
.operands
[i
++].present
= 1;
6607 if (skip_past_comma (&ptr
) == FAIL
)
6610 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6612 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6613 inst
.operands
[i
].reg
= val
;
6614 inst
.operands
[i
].isscalar
= 1;
6615 inst
.operands
[i
].present
= 1;
6616 inst
.operands
[i
].vectype
= optype
;
6618 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6620 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6621 inst
.operands
[i
].reg
= val
;
6622 inst
.operands
[i
].isreg
= 1;
6623 inst
.operands
[i
++].present
= 1;
6625 if (skip_past_comma (&ptr
) == FAIL
)
6628 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
6631 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
6635 inst
.operands
[i
].reg
= val
;
6636 inst
.operands
[i
].isreg
= 1;
6637 inst
.operands
[i
].isvec
= 1;
6638 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6639 inst
.operands
[i
].vectype
= optype
;
6640 inst
.operands
[i
].present
= 1;
6642 if (rtype
== REG_TYPE_VFS
)
6646 if (skip_past_comma (&ptr
) == FAIL
)
6648 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
6651 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
6654 inst
.operands
[i
].reg
= val
;
6655 inst
.operands
[i
].isreg
= 1;
6656 inst
.operands
[i
].isvec
= 1;
6657 inst
.operands
[i
].issingle
= 1;
6658 inst
.operands
[i
].vectype
= optype
;
6659 inst
.operands
[i
].present
= 1;
6662 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
6666 inst
.operands
[i
].reg
= val
;
6667 inst
.operands
[i
].isreg
= 1;
6668 inst
.operands
[i
].isvec
= 1;
6669 inst
.operands
[i
].issingle
= 1;
6670 inst
.operands
[i
].vectype
= optype
;
6671 inst
.operands
[i
].present
= 1;
6676 first_error (_("parse error"));
6680 /* Successfully parsed the operands. Update args. */
6686 first_error (_("expected comma"));
6690 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
6694 /* Use this macro when the operand constraints are different
6695 for ARM and THUMB (e.g. ldrd). */
6696 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6697 ((arm_operand) | ((thumb_operand) << 16))
6699 /* Matcher codes for parse_operands. */
6700 enum operand_parse_code
6702 OP_stop
, /* end of line */
6704 OP_RR
, /* ARM register */
6705 OP_RRnpc
, /* ARM register, not r15 */
6706 OP_RRnpcsp
, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6707 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
6708 OP_RRnpctw
, /* ARM register, not r15 in Thumb-state or with writeback,
6709 optional trailing ! */
6710 OP_RRw
, /* ARM register, not r15, optional trailing ! */
6711 OP_RCP
, /* Coprocessor number */
6712 OP_RCN
, /* Coprocessor register */
6713 OP_RF
, /* FPA register */
6714 OP_RVS
, /* VFP single precision register */
6715 OP_RVD
, /* VFP double precision register (0..15) */
6716 OP_RND
, /* Neon double precision register (0..31) */
6717 OP_RNDMQ
, /* Neon double precision (0..31) or MVE vector register. */
6718 OP_RNDMQR
, /* Neon double precision (0..31), MVE vector or ARM register.
6720 OP_RNQ
, /* Neon quad precision register */
6721 OP_RNQMQ
, /* Neon quad or MVE vector register. */
6722 OP_RVSD
, /* VFP single or double precision register */
6723 OP_RNSD
, /* Neon single or double precision register */
6724 OP_RNDQ
, /* Neon double or quad precision register */
6725 OP_RNDQMQ
, /* Neon double, quad or MVE vector register. */
6726 OP_RNSDQ
, /* Neon single, double or quad precision register */
6727 OP_RNSC
, /* Neon scalar D[X] */
6728 OP_RVC
, /* VFP control register */
6729 OP_RMF
, /* Maverick F register */
6730 OP_RMD
, /* Maverick D register */
6731 OP_RMFX
, /* Maverick FX register */
6732 OP_RMDX
, /* Maverick DX register */
6733 OP_RMAX
, /* Maverick AX register */
6734 OP_RMDS
, /* Maverick DSPSC register */
6735 OP_RIWR
, /* iWMMXt wR register */
6736 OP_RIWC
, /* iWMMXt wC register */
6737 OP_RIWG
, /* iWMMXt wCG register */
6738 OP_RXA
, /* XScale accumulator register */
6740 OP_RNSDQMQ
, /* Neon single, double or quad register or MVE vector register
6742 OP_RNSDQMQR
, /* Neon single, double or quad register, MVE vector register or
6744 OP_RMQ
, /* MVE vector register. */
6746 /* New operands for Armv8.1-M Mainline. */
6747 OP_LR
, /* ARM LR register */
6748 OP_RRe
, /* ARM register, only even numbered. */
6749 OP_RRo
, /* ARM register, only odd numbered, not r13 or r15. */
6750 OP_RRnpcsp_I32
, /* ARM register (no BadReg) or literal 1 .. 32 */
6752 OP_REGLST
, /* ARM register list */
6753 OP_CLRMLST
, /* CLRM register list */
6754 OP_VRSLST
, /* VFP single-precision register list */
6755 OP_VRDLST
, /* VFP double-precision register list */
6756 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
6757 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
6758 OP_NSTRLST
, /* Neon element/structure list */
6759 OP_VRSDVLST
, /* VFP single or double-precision register list and VPR */
6760 OP_MSTRLST2
, /* MVE vector list with two elements. */
6761 OP_MSTRLST4
, /* MVE vector list with four elements. */
6763 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
6764 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
6765 OP_RSVD_FI0
, /* VFP S or D reg, or floating point immediate zero. */
6766 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
6767 OP_RNSD_RNSC
, /* Neon S or D reg, or Neon scalar. */
6768 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
6769 OP_RNSDQ_RNSC_MQ
, /* Vector S, D or Q reg, Neon scalar or MVE vector register.
6771 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
6772 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
6773 OP_VMOV
, /* Neon VMOV operands. */
6774 OP_RNDQ_Ibig
, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6775 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
6776 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6777 OP_VLDR
, /* VLDR operand. */
6779 OP_I0
, /* immediate zero */
6780 OP_I7
, /* immediate value 0 .. 7 */
6781 OP_I15
, /* 0 .. 15 */
6782 OP_I16
, /* 1 .. 16 */
6783 OP_I16z
, /* 0 .. 16 */
6784 OP_I31
, /* 0 .. 31 */
6785 OP_I31w
, /* 0 .. 31, optional trailing ! */
6786 OP_I32
, /* 1 .. 32 */
6787 OP_I32z
, /* 0 .. 32 */
6788 OP_I63
, /* 0 .. 63 */
6789 OP_I63s
, /* -64 .. 63 */
6790 OP_I64
, /* 1 .. 64 */
6791 OP_I64z
, /* 0 .. 64 */
6792 OP_I255
, /* 0 .. 255 */
6794 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
6795 OP_I7b
, /* 0 .. 7 */
6796 OP_I15b
, /* 0 .. 15 */
6797 OP_I31b
, /* 0 .. 31 */
6799 OP_SH
, /* shifter operand */
6800 OP_SHG
, /* shifter operand with possible group relocation */
6801 OP_ADDR
, /* Memory address expression (any mode) */
6802 OP_ADDRMVE
, /* Memory address expression for MVE's VSTR/VLDR. */
6803 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
6804 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
6805 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
6806 OP_EXP
, /* arbitrary expression */
6807 OP_EXPi
, /* same, with optional immediate prefix */
6808 OP_EXPr
, /* same, with optional relocation suffix */
6809 OP_EXPs
, /* same, with optional non-first operand relocation suffix */
6810 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
6811 OP_IROT1
, /* VCADD rotate immediate: 90, 270. */
6812 OP_IROT2
, /* VCMLA rotate immediate: 0, 90, 180, 270. */
6814 OP_CPSF
, /* CPS flags */
6815 OP_ENDI
, /* Endianness specifier */
6816 OP_wPSR
, /* CPSR/SPSR/APSR mask for msr (writing). */
6817 OP_rPSR
, /* CPSR/SPSR/APSR mask for msr (reading). */
6818 OP_COND
, /* conditional code */
6819 OP_TB
, /* Table branch. */
6821 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
6823 OP_RRnpc_I0
, /* ARM register or literal 0 */
6824 OP_RR_EXr
, /* ARM register or expression with opt. reloc stuff. */
6825 OP_RR_EXi
, /* ARM register or expression with imm prefix */
6826 OP_RF_IF
, /* FPA register or immediate */
6827 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
6828 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
6830 /* Optional operands. */
6831 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
6832 OP_oI31b
, /* 0 .. 31 */
6833 OP_oI32b
, /* 1 .. 32 */
6834 OP_oI32z
, /* 0 .. 32 */
6835 OP_oIffffb
, /* 0 .. 65535 */
6836 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
6838 OP_oRR
, /* ARM register */
6839 OP_oLR
, /* ARM LR register */
6840 OP_oRRnpc
, /* ARM register, not the PC */
6841 OP_oRRnpcsp
, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6842 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
6843 OP_oRND
, /* Optional Neon double precision register */
6844 OP_oRNQ
, /* Optional Neon quad precision register */
6845 OP_oRNDQMQ
, /* Optional Neon double, quad or MVE vector register. */
6846 OP_oRNDQ
, /* Optional Neon double or quad precision register */
6847 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
6848 OP_oRNSDQMQ
, /* Optional single, double or quad register or MVE vector
6850 OP_oSHll
, /* LSL immediate */
6851 OP_oSHar
, /* ASR immediate */
6852 OP_oSHllar
, /* LSL or ASR immediate */
6853 OP_oROR
, /* ROR 0/8/16/24 */
6854 OP_oBARRIER_I15
, /* Option argument for a barrier instruction. */
6856 /* Some pre-defined mixed (ARM/THUMB) operands. */
6857 OP_RR_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RR
, OP_RRnpcsp
),
6858 OP_RRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RRnpc
, OP_RRnpcsp
),
6859 OP_oRRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc
, OP_oRRnpcsp
),
6861 OP_FIRST_OPTIONAL
= OP_oI7b
6864 /* Generic instruction operand parser. This does no encoding and no
6865 semantic validation; it merely squirrels values away in the inst
6866 structure. Returns SUCCESS or FAIL depending on whether the
6867 specified grammar matched. */
6869 parse_operands (char *str
, const unsigned int *pattern
, bfd_boolean thumb
)
6871 unsigned const int *upat
= pattern
;
6872 char *backtrack_pos
= 0;
6873 const char *backtrack_error
= 0;
6874 int i
, val
= 0, backtrack_index
= 0;
6875 enum arm_reg_type rtype
;
6876 parse_operand_result result
;
6877 unsigned int op_parse_code
;
6878 bfd_boolean partial_match
;
6880 #define po_char_or_fail(chr) \
6883 if (skip_past_char (&str, chr) == FAIL) \
6888 #define po_reg_or_fail(regtype) \
6891 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6892 & inst.operands[i].vectype); \
6895 first_error (_(reg_expected_msgs[regtype])); \
6898 inst.operands[i].reg = val; \
6899 inst.operands[i].isreg = 1; \
6900 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6901 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6902 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6903 || rtype == REG_TYPE_VFD \
6904 || rtype == REG_TYPE_NQ); \
6908 #define po_reg_or_goto(regtype, label) \
6911 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6912 & inst.operands[i].vectype); \
6916 inst.operands[i].reg = val; \
6917 inst.operands[i].isreg = 1; \
6918 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6919 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6920 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6921 || rtype == REG_TYPE_VFD \
6922 || rtype == REG_TYPE_NQ); \
6926 #define po_imm_or_fail(min, max, popt) \
6929 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6931 inst.operands[i].imm = val; \
6935 #define po_scalar_or_goto(elsz, label) \
6938 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6941 inst.operands[i].reg = val; \
6942 inst.operands[i].isscalar = 1; \
6946 #define po_misc_or_fail(expr) \
6954 #define po_misc_or_fail_no_backtrack(expr) \
6958 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6959 backtrack_pos = 0; \
6960 if (result != PARSE_OPERAND_SUCCESS) \
6965 #define po_barrier_or_imm(str) \
6968 val = parse_barrier (&str); \
6969 if (val == FAIL && ! ISALPHA (*str)) \
6972 /* ISB can only take SY as an option. */ \
6973 || ((inst.instruction & 0xf0) == 0x60 \
6976 inst.error = _("invalid barrier type"); \
6977 backtrack_pos = 0; \
6983 skip_whitespace (str
);
6985 for (i
= 0; upat
[i
] != OP_stop
; i
++)
6987 op_parse_code
= upat
[i
];
6988 if (op_parse_code
>= 1<<16)
6989 op_parse_code
= thumb
? (op_parse_code
>> 16)
6990 : (op_parse_code
& ((1<<16)-1));
6992 if (op_parse_code
>= OP_FIRST_OPTIONAL
)
6994 /* Remember where we are in case we need to backtrack. */
6995 gas_assert (!backtrack_pos
);
6996 backtrack_pos
= str
;
6997 backtrack_error
= inst
.error
;
6998 backtrack_index
= i
;
7001 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
7002 po_char_or_fail (',');
7004 switch (op_parse_code
)
7016 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
7017 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
7018 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
7019 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
7020 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
7021 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
7024 po_reg_or_goto (REG_TYPE_RN
, try_rndmq
);
7028 po_reg_or_goto (REG_TYPE_MQ
, try_rnd
);
7031 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
7033 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
7035 /* Also accept generic coprocessor regs for unknown registers. */
7037 po_reg_or_fail (REG_TYPE_CN
);
7039 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
7040 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
7041 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
7042 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
7043 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
7044 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
7045 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
7046 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
7047 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
7048 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
7051 po_reg_or_goto (REG_TYPE_MQ
, try_nq
);
7054 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
7055 case OP_RNSD
: po_reg_or_fail (REG_TYPE_NSD
); break;
7058 po_reg_or_goto (REG_TYPE_MQ
, try_rndq
);
7062 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
7063 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
7065 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
7067 po_reg_or_goto (REG_TYPE_RN
, try_mq
);
7072 po_reg_or_goto (REG_TYPE_MQ
, try_nsdq2
);
7075 po_reg_or_fail (REG_TYPE_NSDQ
);
7079 po_reg_or_fail (REG_TYPE_MQ
);
7081 /* Neon scalar. Using an element size of 8 means that some invalid
7082 scalars are accepted here, so deal with those in later code. */
7083 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
7087 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
7090 po_imm_or_fail (0, 0, TRUE
);
7095 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
7100 po_reg_or_goto (REG_TYPE_VFSD
, try_ifimm0
);
7103 if (parse_ifimm_zero (&str
))
7104 inst
.operands
[i
].imm
= 0;
7108 = _("only floating point zero is allowed as immediate value");
7116 po_scalar_or_goto (8, try_rr
);
7119 po_reg_or_fail (REG_TYPE_RN
);
7123 case OP_RNSDQ_RNSC_MQ
:
7124 po_reg_or_goto (REG_TYPE_MQ
, try_rnsdq_rnsc
);
7129 po_scalar_or_goto (8, try_nsdq
);
7132 po_reg_or_fail (REG_TYPE_NSDQ
);
7138 po_scalar_or_goto (8, try_s_scalar
);
7141 po_scalar_or_goto (4, try_nsd
);
7144 po_reg_or_fail (REG_TYPE_NSD
);
7150 po_scalar_or_goto (8, try_ndq
);
7153 po_reg_or_fail (REG_TYPE_NDQ
);
7159 po_scalar_or_goto (8, try_vfd
);
7162 po_reg_or_fail (REG_TYPE_VFD
);
7167 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
7168 not careful then bad things might happen. */
7169 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
7174 po_reg_or_goto (REG_TYPE_NDQ
, try_immbig
);
7177 /* There's a possibility of getting a 64-bit immediate here, so
7178 we need special handling. */
7179 if (parse_big_immediate (&str
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
7182 inst
.error
= _("immediate value is out of range");
7190 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
7193 po_imm_or_fail (0, 63, TRUE
);
7198 po_char_or_fail ('[');
7199 po_reg_or_fail (REG_TYPE_RN
);
7200 po_char_or_fail (']');
7206 po_reg_or_fail (REG_TYPE_RN
);
7207 if (skip_past_char (&str
, '!') == SUCCESS
)
7208 inst
.operands
[i
].writeback
= 1;
7212 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
7213 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
7214 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
7215 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
7216 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
7217 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
7218 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
7219 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
7220 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
7221 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
7222 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
7223 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
7225 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
7227 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
7228 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
7230 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
7231 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
7232 case OP_oI32z
: po_imm_or_fail ( 0, 32, TRUE
); break;
7233 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
7235 /* Immediate variants */
7237 po_char_or_fail ('{');
7238 po_imm_or_fail (0, 255, TRUE
);
7239 po_char_or_fail ('}');
7243 /* The expression parser chokes on a trailing !, so we have
7244 to find it first and zap it. */
7247 while (*s
&& *s
!= ',')
7252 inst
.operands
[i
].writeback
= 1;
7254 po_imm_or_fail (0, 31, TRUE
);
7262 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7267 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7272 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7274 if (inst
.relocs
[0].exp
.X_op
== O_symbol
)
7276 val
= parse_reloc (&str
);
7279 inst
.error
= _("unrecognized relocation suffix");
7282 else if (val
!= BFD_RELOC_UNUSED
)
7284 inst
.operands
[i
].imm
= val
;
7285 inst
.operands
[i
].hasreloc
= 1;
7291 po_misc_or_fail (my_get_expression (&inst
.relocs
[i
].exp
, &str
,
7293 if (inst
.relocs
[i
].exp
.X_op
== O_symbol
)
7295 inst
.operands
[i
].hasreloc
= 1;
7297 else if (inst
.relocs
[i
].exp
.X_op
== O_constant
)
7299 inst
.operands
[i
].imm
= inst
.relocs
[i
].exp
.X_add_number
;
7300 inst
.operands
[i
].hasreloc
= 0;
7304 /* Operand for MOVW or MOVT. */
7306 po_misc_or_fail (parse_half (&str
));
7309 /* Register or expression. */
7310 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
7311 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
7313 /* Register or immediate. */
7314 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
7315 I0
: po_imm_or_fail (0, 0, FALSE
); break;
7317 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
7319 if (!is_immediate_prefix (*str
))
7322 val
= parse_fpa_immediate (&str
);
7325 /* FPA immediates are encoded as registers 8-15.
7326 parse_fpa_immediate has already applied the offset. */
7327 inst
.operands
[i
].reg
= val
;
7328 inst
.operands
[i
].isreg
= 1;
7331 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
7332 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
7334 /* Two kinds of register. */
7337 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7339 || (rege
->type
!= REG_TYPE_MMXWR
7340 && rege
->type
!= REG_TYPE_MMXWC
7341 && rege
->type
!= REG_TYPE_MMXWCG
))
7343 inst
.error
= _("iWMMXt data or control register expected");
7346 inst
.operands
[i
].reg
= rege
->number
;
7347 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
7353 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7355 || (rege
->type
!= REG_TYPE_MMXWC
7356 && rege
->type
!= REG_TYPE_MMXWCG
))
7358 inst
.error
= _("iWMMXt control register expected");
7361 inst
.operands
[i
].reg
= rege
->number
;
7362 inst
.operands
[i
].isreg
= 1;
7367 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
7368 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
7369 case OP_oROR
: val
= parse_ror (&str
); break;
7370 case OP_COND
: val
= parse_cond (&str
); break;
7371 case OP_oBARRIER_I15
:
7372 po_barrier_or_imm (str
); break;
7374 if (parse_immediate (&str
, &val
, 0, 15, TRUE
) == FAIL
)
7380 po_reg_or_goto (REG_TYPE_RNB
, try_psr
);
7381 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_virt
))
7383 inst
.error
= _("Banked registers are not available with this "
7389 val
= parse_psr (&str
, op_parse_code
== OP_wPSR
);
7393 po_reg_or_goto (REG_TYPE_VFSD
, try_sysreg
);
7396 val
= parse_sys_vldr_vstr (&str
);
7400 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
7403 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7405 if (strncasecmp (str
, "APSR_", 5) == 0)
7412 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
7413 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
7414 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
7415 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
7416 default: found
= 16;
7420 inst
.operands
[i
].isvec
= 1;
7421 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7422 inst
.operands
[i
].reg
= REG_PC
;
7429 po_misc_or_fail (parse_tb (&str
));
7432 /* Register lists. */
7434 val
= parse_reg_list (&str
, REGLIST_RN
);
7437 inst
.operands
[i
].writeback
= 1;
7443 val
= parse_reg_list (&str
, REGLIST_CLRM
);
7447 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
,
7452 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
,
7457 /* Allow Q registers too. */
7458 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7459 REGLIST_NEON_D
, &partial_match
);
7463 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7464 REGLIST_VFP_S
, &partial_match
);
7465 inst
.operands
[i
].issingle
= 1;
7470 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7471 REGLIST_VFP_D_VPR
, &partial_match
);
7472 if (val
== FAIL
&& !partial_match
)
7475 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7476 REGLIST_VFP_S_VPR
, &partial_match
);
7477 inst
.operands
[i
].issingle
= 1;
7482 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7483 REGLIST_NEON_D
, &partial_match
);
7488 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
7489 1, &inst
.operands
[i
].vectype
);
7490 if (val
!= (((op_parse_code
== OP_MSTRLST2
) ? 3 : 7) << 5 | 0xe))
7494 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
7495 0, &inst
.operands
[i
].vectype
);
7498 /* Addressing modes */
7500 po_misc_or_fail (parse_address_group_reloc (&str
, i
, GROUP_MVE
));
7504 po_misc_or_fail (parse_address (&str
, i
));
7508 po_misc_or_fail_no_backtrack (
7509 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
7513 po_misc_or_fail_no_backtrack (
7514 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
7518 po_misc_or_fail_no_backtrack (
7519 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
7523 po_misc_or_fail (parse_shifter_operand (&str
, i
));
7527 po_misc_or_fail_no_backtrack (
7528 parse_shifter_operand_group_reloc (&str
, i
));
7532 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
7536 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
7540 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
7544 as_fatal (_("unhandled operand code %d"), op_parse_code
);
7547 /* Various value-based sanity checks and shared operations. We
7548 do not signal immediate failures for the register constraints;
7549 this allows a syntax error to take precedence. */
7550 switch (op_parse_code
)
7558 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
7559 inst
.error
= BAD_PC
;
7564 if (inst
.operands
[i
].isreg
)
7566 if (inst
.operands
[i
].reg
== REG_PC
)
7567 inst
.error
= BAD_PC
;
7568 else if (inst
.operands
[i
].reg
== REG_SP
7569 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7570 relaxed since ARMv8-A. */
7571 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
7574 inst
.error
= BAD_SP
;
7580 if (inst
.operands
[i
].isreg
7581 && inst
.operands
[i
].reg
== REG_PC
7582 && (inst
.operands
[i
].writeback
|| thumb
))
7583 inst
.error
= BAD_PC
;
7587 if (inst
.operands
[i
].isreg
)
7596 case OP_oBARRIER_I15
:
7609 inst
.operands
[i
].imm
= val
;
7614 if (inst
.operands
[i
].reg
!= REG_LR
)
7615 inst
.error
= _("operand must be LR register");
7619 if (inst
.operands
[i
].isreg
7620 && (inst
.operands
[i
].reg
& 0x00000001) != 0)
7621 inst
.error
= BAD_ODD
;
7625 if (inst
.operands
[i
].isreg
)
7627 if ((inst
.operands
[i
].reg
& 0x00000001) != 1)
7628 inst
.error
= BAD_EVEN
;
7629 else if (inst
.operands
[i
].reg
== REG_SP
)
7630 as_tsktsk (MVE_BAD_SP
);
7631 else if (inst
.operands
[i
].reg
== REG_PC
)
7632 inst
.error
= BAD_PC
;
7640 /* If we get here, this operand was successfully parsed. */
7641 inst
.operands
[i
].present
= 1;
7645 inst
.error
= BAD_ARGS
;
7650 /* The parse routine should already have set inst.error, but set a
7651 default here just in case. */
7653 inst
.error
= BAD_SYNTAX
;
7657 /* Do not backtrack over a trailing optional argument that
7658 absorbed some text. We will only fail again, with the
7659 'garbage following instruction' error message, which is
7660 probably less helpful than the current one. */
7661 if (backtrack_index
== i
&& backtrack_pos
!= str
7662 && upat
[i
+1] == OP_stop
)
7665 inst
.error
= BAD_SYNTAX
;
7669 /* Try again, skipping the optional argument at backtrack_pos. */
7670 str
= backtrack_pos
;
7671 inst
.error
= backtrack_error
;
7672 inst
.operands
[backtrack_index
].present
= 0;
7673 i
= backtrack_index
;
7677 /* Check that we have parsed all the arguments. */
7678 if (*str
!= '\0' && !inst
.error
)
7679 inst
.error
= _("garbage following instruction");
7681 return inst
.error
? FAIL
: SUCCESS
;
7684 #undef po_char_or_fail
7685 #undef po_reg_or_fail
7686 #undef po_reg_or_goto
7687 #undef po_imm_or_fail
7688 #undef po_scalar_or_fail
7689 #undef po_barrier_or_imm
7691 /* Shorthand macro for instruction encoding functions issuing errors. */
7692 #define constraint(expr, err) \
7703 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7704 instructions are unpredictable if these registers are used. This
7705 is the BadReg predicate in ARM's Thumb-2 documentation.
7707 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7708 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
7709 #define reject_bad_reg(reg) \
7711 if (reg == REG_PC) \
7713 inst.error = BAD_PC; \
7716 else if (reg == REG_SP \
7717 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
7719 inst.error = BAD_SP; \
7724 /* If REG is R13 (the stack pointer), warn that its use is
7726 #define warn_deprecated_sp(reg) \
7728 if (warn_on_deprecated && reg == REG_SP) \
7729 as_tsktsk (_("use of r13 is deprecated")); \
7732 /* Functions for operand encoding. ARM, then Thumb. */
7734 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7736 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7738 The only binary encoding difference is the Coprocessor number. Coprocessor
7739 9 is used for half-precision calculations or conversions. The format of the
7740 instruction is the same as the equivalent Coprocessor 10 instruction that
7741 exists for Single-Precision operation. */
7744 do_scalar_fp16_v82_encode (void)
7746 if (inst
.cond
< COND_ALWAYS
)
7747 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7748 " the behaviour is UNPREDICTABLE"));
7749 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
7752 inst
.instruction
= (inst
.instruction
& 0xfffff0ff) | 0x900;
7753 mark_feature_used (&arm_ext_fp16
);
7756 /* If VAL can be encoded in the immediate field of an ARM instruction,
7757 return the encoded form. Otherwise, return FAIL. */
7760 encode_arm_immediate (unsigned int val
)
7767 for (i
= 2; i
< 32; i
+= 2)
7768 if ((a
= rotate_left (val
, i
)) <= 0xff)
7769 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
7774 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7775 return the encoded form. Otherwise, return FAIL. */
7777 encode_thumb32_immediate (unsigned int val
)
7784 for (i
= 1; i
<= 24; i
++)
7787 if ((val
& ~(0xff << i
)) == 0)
7788 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
7792 if (val
== ((a
<< 16) | a
))
7794 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
7798 if (val
== ((a
<< 16) | a
))
7799 return 0x200 | (a
>> 8);
7803 /* Encode a VFP SP or DP register number into inst.instruction. */
7806 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
7808 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
7811 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
7814 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
7817 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
7822 first_error (_("D register out of range for selected VFP version"));
7830 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
7834 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
7838 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
7842 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
7846 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
7850 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
7858 /* Encode a <shift> in an ARM-format instruction. The immediate,
7859 if any, is handled by md_apply_fix. */
7861 encode_arm_shift (int i
)
7863 /* register-shifted register. */
7864 if (inst
.operands
[i
].immisreg
)
7867 for (op_index
= 0; op_index
<= i
; ++op_index
)
7869 /* Check the operand only when it's presented. In pre-UAL syntax,
7870 if the destination register is the same as the first operand, two
7871 register form of the instruction can be used. */
7872 if (inst
.operands
[op_index
].present
&& inst
.operands
[op_index
].isreg
7873 && inst
.operands
[op_index
].reg
== REG_PC
)
7874 as_warn (UNPRED_REG ("r15"));
7877 if (inst
.operands
[i
].imm
== REG_PC
)
7878 as_warn (UNPRED_REG ("r15"));
7881 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7882 inst
.instruction
|= SHIFT_ROR
<< 5;
7885 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7886 if (inst
.operands
[i
].immisreg
)
7888 inst
.instruction
|= SHIFT_BY_REG
;
7889 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
7892 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
7897 encode_arm_shifter_operand (int i
)
7899 if (inst
.operands
[i
].isreg
)
7901 inst
.instruction
|= inst
.operands
[i
].reg
;
7902 encode_arm_shift (i
);
7906 inst
.instruction
|= INST_IMMEDIATE
;
7907 if (inst
.relocs
[0].type
!= BFD_RELOC_ARM_IMMEDIATE
)
7908 inst
.instruction
|= inst
.operands
[i
].imm
;
7912 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7914 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
7917 Generate an error if the operand is not a register. */
7918 constraint (!inst
.operands
[i
].isreg
,
7919 _("Instruction does not support =N addresses"));
7921 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7923 if (inst
.operands
[i
].preind
)
7927 inst
.error
= _("instruction does not accept preindexed addressing");
7930 inst
.instruction
|= PRE_INDEX
;
7931 if (inst
.operands
[i
].writeback
)
7932 inst
.instruction
|= WRITE_BACK
;
7935 else if (inst
.operands
[i
].postind
)
7937 gas_assert (inst
.operands
[i
].writeback
);
7939 inst
.instruction
|= WRITE_BACK
;
7941 else /* unindexed - only for coprocessor */
7943 inst
.error
= _("instruction does not accept unindexed addressing");
7947 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
7948 && (((inst
.instruction
& 0x000f0000) >> 16)
7949 == ((inst
.instruction
& 0x0000f000) >> 12)))
7950 as_warn ((inst
.instruction
& LOAD_BIT
)
7951 ? _("destination register same as write-back base")
7952 : _("source register same as write-back base"));
7955 /* inst.operands[i] was set up by parse_address. Encode it into an
7956 ARM-format mode 2 load or store instruction. If is_t is true,
7957 reject forms that cannot be used with a T instruction (i.e. not
7960 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
7962 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
7964 encode_arm_addr_mode_common (i
, is_t
);
7966 if (inst
.operands
[i
].immisreg
)
7968 constraint ((inst
.operands
[i
].imm
== REG_PC
7969 || (is_pc
&& inst
.operands
[i
].writeback
)),
7971 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
7972 inst
.instruction
|= inst
.operands
[i
].imm
;
7973 if (!inst
.operands
[i
].negative
)
7974 inst
.instruction
|= INDEX_UP
;
7975 if (inst
.operands
[i
].shifted
)
7977 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7978 inst
.instruction
|= SHIFT_ROR
<< 5;
7981 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7982 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
7986 else /* immediate offset in inst.relocs[0] */
7988 if (is_pc
&& !inst
.relocs
[0].pc_rel
)
7990 const bfd_boolean is_load
= ((inst
.instruction
& LOAD_BIT
) != 0);
7992 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7993 cannot use PC in addressing.
7994 PC cannot be used in writeback addressing, either. */
7995 constraint ((is_t
|| inst
.operands
[i
].writeback
),
7998 /* Use of PC in str is deprecated for ARMv7. */
7999 if (warn_on_deprecated
8001 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
))
8002 as_tsktsk (_("use of PC in this instruction is deprecated"));
8005 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
8007 /* Prefer + for zero encoded value. */
8008 if (!inst
.operands
[i
].negative
)
8009 inst
.instruction
|= INDEX_UP
;
8010 inst
.relocs
[0].type
= BFD_RELOC_ARM_OFFSET_IMM
;
8015 /* inst.operands[i] was set up by parse_address. Encode it into an
8016 ARM-format mode 3 load or store instruction. Reject forms that
8017 cannot be used with such instructions. If is_t is true, reject
8018 forms that cannot be used with a T instruction (i.e. not
8021 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
8023 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
8025 inst
.error
= _("instruction does not accept scaled register index");
8029 encode_arm_addr_mode_common (i
, is_t
);
8031 if (inst
.operands
[i
].immisreg
)
8033 constraint ((inst
.operands
[i
].imm
== REG_PC
8034 || (is_t
&& inst
.operands
[i
].reg
== REG_PC
)),
8036 constraint (inst
.operands
[i
].reg
== REG_PC
&& inst
.operands
[i
].writeback
,
8038 inst
.instruction
|= inst
.operands
[i
].imm
;
8039 if (!inst
.operands
[i
].negative
)
8040 inst
.instruction
|= INDEX_UP
;
8042 else /* immediate offset in inst.relocs[0] */
8044 constraint ((inst
.operands
[i
].reg
== REG_PC
&& !inst
.relocs
[0].pc_rel
8045 && inst
.operands
[i
].writeback
),
8047 inst
.instruction
|= HWOFFSET_IMM
;
8048 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
8050 /* Prefer + for zero encoded value. */
8051 if (!inst
.operands
[i
].negative
)
8052 inst
.instruction
|= INDEX_UP
;
8054 inst
.relocs
[0].type
= BFD_RELOC_ARM_OFFSET_IMM8
;
8059 /* Write immediate bits [7:0] to the following locations:
8061 |28/24|23 19|18 16|15 4|3 0|
8062 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
8064 This function is used by VMOV/VMVN/VORR/VBIC. */
8067 neon_write_immbits (unsigned immbits
)
8069 inst
.instruction
|= immbits
& 0xf;
8070 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
8071 inst
.instruction
|= ((immbits
>> 7) & 0x1) << (thumb_mode
? 28 : 24);
8074 /* Invert low-order SIZE bits of XHI:XLO. */
8077 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
8079 unsigned immlo
= xlo
? *xlo
: 0;
8080 unsigned immhi
= xhi
? *xhi
: 0;
8085 immlo
= (~immlo
) & 0xff;
8089 immlo
= (~immlo
) & 0xffff;
8093 immhi
= (~immhi
) & 0xffffffff;
8097 immlo
= (~immlo
) & 0xffffffff;
8111 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
8115 neon_bits_same_in_bytes (unsigned imm
)
8117 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
8118 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
8119 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
8120 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
8123 /* For immediate of above form, return 0bABCD. */
8126 neon_squash_bits (unsigned imm
)
8128 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
8129 | ((imm
& 0x01000000) >> 21);
8132 /* Compress quarter-float representation to 0b...000 abcdefgh. */
8135 neon_qfloat_bits (unsigned imm
)
8137 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
8140 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
8141 the instruction. *OP is passed as the initial value of the op field, and
8142 may be set to a different value depending on the constant (i.e.
8143 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
8144 MVN). If the immediate looks like a repeated pattern then also
8145 try smaller element sizes. */
8148 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
8149 unsigned *immbits
, int *op
, int size
,
8150 enum neon_el_type type
)
8152 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
8154 if (type
== NT_float
&& !float_p
)
8157 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
8159 if (size
!= 32 || *op
== 1)
8161 *immbits
= neon_qfloat_bits (immlo
);
8167 if (neon_bits_same_in_bytes (immhi
)
8168 && neon_bits_same_in_bytes (immlo
))
8172 *immbits
= (neon_squash_bits (immhi
) << 4)
8173 | neon_squash_bits (immlo
);
8184 if (immlo
== (immlo
& 0x000000ff))
8189 else if (immlo
== (immlo
& 0x0000ff00))
8191 *immbits
= immlo
>> 8;
8194 else if (immlo
== (immlo
& 0x00ff0000))
8196 *immbits
= immlo
>> 16;
8199 else if (immlo
== (immlo
& 0xff000000))
8201 *immbits
= immlo
>> 24;
8204 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
8206 *immbits
= (immlo
>> 8) & 0xff;
8209 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
8211 *immbits
= (immlo
>> 16) & 0xff;
8215 if ((immlo
& 0xffff) != (immlo
>> 16))
8222 if (immlo
== (immlo
& 0x000000ff))
8227 else if (immlo
== (immlo
& 0x0000ff00))
8229 *immbits
= immlo
>> 8;
8233 if ((immlo
& 0xff) != (immlo
>> 8))
8238 if (immlo
== (immlo
& 0x000000ff))
8240 /* Don't allow MVN with 8-bit immediate. */
8250 #if defined BFD_HOST_64_BIT
8251 /* Returns TRUE if double precision value V may be cast
8252 to single precision without loss of accuracy. */
8255 is_double_a_single (bfd_int64_t v
)
8257 int exp
= (int)((v
>> 52) & 0x7FF);
8258 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
8260 return (exp
== 0 || exp
== 0x7FF
8261 || (exp
>= 1023 - 126 && exp
<= 1023 + 127))
8262 && (mantissa
& 0x1FFFFFFFl
) == 0;
8265 /* Returns a double precision value casted to single precision
8266 (ignoring the least significant bits in exponent and mantissa). */
8269 double_to_single (bfd_int64_t v
)
8271 int sign
= (int) ((v
>> 63) & 1l);
8272 int exp
= (int) ((v
>> 52) & 0x7FF);
8273 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
8279 exp
= exp
- 1023 + 127;
8288 /* No denormalized numbers. */
8294 return (sign
<< 31) | (exp
<< 23) | mantissa
;
8296 #endif /* BFD_HOST_64_BIT */
8305 static void do_vfp_nsyn_opcode (const char *);
8307 /* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
8308 Determine whether it can be performed with a move instruction; if
8309 it can, convert inst.instruction to that move instruction and
8310 return TRUE; if it can't, convert inst.instruction to a literal-pool
8311 load and return FALSE. If this is not a valid thing to do in the
8312 current context, set inst.error and return TRUE.
8314 inst.operands[i] describes the destination register. */
8317 move_or_literal_pool (int i
, enum lit_type t
, bfd_boolean mode_3
)
8320 bfd_boolean thumb_p
= (t
== CONST_THUMB
);
8321 bfd_boolean arm_p
= (t
== CONST_ARM
);
8324 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
8328 if ((inst
.instruction
& tbit
) == 0)
8330 inst
.error
= _("invalid pseudo operation");
8334 if (inst
.relocs
[0].exp
.X_op
!= O_constant
8335 && inst
.relocs
[0].exp
.X_op
!= O_symbol
8336 && inst
.relocs
[0].exp
.X_op
!= O_big
)
8338 inst
.error
= _("constant expression expected");
8342 if (inst
.relocs
[0].exp
.X_op
== O_constant
8343 || inst
.relocs
[0].exp
.X_op
== O_big
)
8345 #if defined BFD_HOST_64_BIT
8350 if (inst
.relocs
[0].exp
.X_op
== O_big
)
8352 LITTLENUM_TYPE w
[X_PRECISION
];
8355 if (inst
.relocs
[0].exp
.X_add_number
== -1)
8357 gen_to_words (w
, X_PRECISION
, E_PRECISION
);
8359 /* FIXME: Should we check words w[2..5] ? */
8364 #if defined BFD_HOST_64_BIT
8366 ((((((((bfd_int64_t
) l
[3] & LITTLENUM_MASK
)
8367 << LITTLENUM_NUMBER_OF_BITS
)
8368 | ((bfd_int64_t
) l
[2] & LITTLENUM_MASK
))
8369 << LITTLENUM_NUMBER_OF_BITS
)
8370 | ((bfd_int64_t
) l
[1] & LITTLENUM_MASK
))
8371 << LITTLENUM_NUMBER_OF_BITS
)
8372 | ((bfd_int64_t
) l
[0] & LITTLENUM_MASK
));
8374 v
= ((l
[1] & LITTLENUM_MASK
) << LITTLENUM_NUMBER_OF_BITS
)
8375 | (l
[0] & LITTLENUM_MASK
);
8379 v
= inst
.relocs
[0].exp
.X_add_number
;
8381 if (!inst
.operands
[i
].issingle
)
8385 /* LDR should not use lead in a flag-setting instruction being
8386 chosen so we do not check whether movs can be used. */
8388 if ((ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
8389 || ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
8390 && inst
.operands
[i
].reg
!= 13
8391 && inst
.operands
[i
].reg
!= 15)
8393 /* Check if on thumb2 it can be done with a mov.w, mvn or
8394 movw instruction. */
8395 unsigned int newimm
;
8396 bfd_boolean isNegated
;
8398 newimm
= encode_thumb32_immediate (v
);
8399 if (newimm
!= (unsigned int) FAIL
)
8403 newimm
= encode_thumb32_immediate (~v
);
8404 if (newimm
!= (unsigned int) FAIL
)
8408 /* The number can be loaded with a mov.w or mvn
8410 if (newimm
!= (unsigned int) FAIL
8411 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
8413 inst
.instruction
= (0xf04f0000 /* MOV.W. */
8414 | (inst
.operands
[i
].reg
<< 8));
8415 /* Change to MOVN. */
8416 inst
.instruction
|= (isNegated
? 0x200000 : 0);
8417 inst
.instruction
|= (newimm
& 0x800) << 15;
8418 inst
.instruction
|= (newimm
& 0x700) << 4;
8419 inst
.instruction
|= (newimm
& 0x0ff);
8422 /* The number can be loaded with a movw instruction. */
8423 else if ((v
& ~0xFFFF) == 0
8424 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
8426 int imm
= v
& 0xFFFF;
8428 inst
.instruction
= 0xf2400000; /* MOVW. */
8429 inst
.instruction
|= (inst
.operands
[i
].reg
<< 8);
8430 inst
.instruction
|= (imm
& 0xf000) << 4;
8431 inst
.instruction
|= (imm
& 0x0800) << 15;
8432 inst
.instruction
|= (imm
& 0x0700) << 4;
8433 inst
.instruction
|= (imm
& 0x00ff);
8440 int value
= encode_arm_immediate (v
);
8444 /* This can be done with a mov instruction. */
8445 inst
.instruction
&= LITERAL_MASK
;
8446 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
8447 inst
.instruction
|= value
& 0xfff;
8451 value
= encode_arm_immediate (~ v
);
8454 /* This can be done with a mvn instruction. */
8455 inst
.instruction
&= LITERAL_MASK
;
8456 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
8457 inst
.instruction
|= value
& 0xfff;
8461 else if (t
== CONST_VEC
&& ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
8464 unsigned immbits
= 0;
8465 unsigned immlo
= inst
.operands
[1].imm
;
8466 unsigned immhi
= inst
.operands
[1].regisimm
8467 ? inst
.operands
[1].reg
8468 : inst
.relocs
[0].exp
.X_unsigned
8470 : ((bfd_int64_t
)((int) immlo
)) >> 32;
8471 int cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8472 &op
, 64, NT_invtype
);
8476 neon_invert_size (&immlo
, &immhi
, 64);
8478 cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8479 &op
, 64, NT_invtype
);
8484 inst
.instruction
= (inst
.instruction
& VLDR_VMOV_SAME
)
8490 /* Fill other bits in vmov encoding for both thumb and arm. */
8492 inst
.instruction
|= (0x7U
<< 29) | (0xF << 24);
8494 inst
.instruction
|= (0xFU
<< 28) | (0x1 << 25);
8495 neon_write_immbits (immbits
);
8503 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8504 if (inst
.operands
[i
].issingle
8505 && is_quarter_float (inst
.operands
[1].imm
)
8506 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3xd
))
8508 inst
.operands
[1].imm
=
8509 neon_qfloat_bits (v
);
8510 do_vfp_nsyn_opcode ("fconsts");
8514 /* If our host does not support a 64-bit type then we cannot perform
8515 the following optimization. This mean that there will be a
8516 discrepancy between the output produced by an assembler built for
8517 a 32-bit-only host and the output produced from a 64-bit host, but
8518 this cannot be helped. */
8519 #if defined BFD_HOST_64_BIT
8520 else if (!inst
.operands
[1].issingle
8521 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
8523 if (is_double_a_single (v
)
8524 && is_quarter_float (double_to_single (v
)))
8526 inst
.operands
[1].imm
=
8527 neon_qfloat_bits (double_to_single (v
));
8528 do_vfp_nsyn_opcode ("fconstd");
8536 if (add_to_lit_pool ((!inst
.operands
[i
].isvec
8537 || inst
.operands
[i
].issingle
) ? 4 : 8) == FAIL
)
8540 inst
.operands
[1].reg
= REG_PC
;
8541 inst
.operands
[1].isreg
= 1;
8542 inst
.operands
[1].preind
= 1;
8543 inst
.relocs
[0].pc_rel
= 1;
8544 inst
.relocs
[0].type
= (thumb_p
8545 ? BFD_RELOC_ARM_THUMB_OFFSET
8547 ? BFD_RELOC_ARM_HWLITERAL
8548 : BFD_RELOC_ARM_LITERAL
));
8552 /* inst.operands[i] was set up by parse_address. Encode it into an
8553 ARM-format instruction. Reject all forms which cannot be encoded
8554 into a coprocessor load/store instruction. If wb_ok is false,
8555 reject use of writeback; if unind_ok is false, reject use of
8556 unindexed addressing. If reloc_override is not 0, use it instead
8557 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8558 (in which case it is preserved). */
8561 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
8563 if (!inst
.operands
[i
].isreg
)
8566 if (! inst
.operands
[0].isvec
)
8568 inst
.error
= _("invalid co-processor operand");
8571 if (move_or_literal_pool (0, CONST_VEC
, /*mode_3=*/FALSE
))
8575 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8577 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
8579 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
8581 gas_assert (!inst
.operands
[i
].writeback
);
8584 inst
.error
= _("instruction does not support unindexed addressing");
8587 inst
.instruction
|= inst
.operands
[i
].imm
;
8588 inst
.instruction
|= INDEX_UP
;
8592 if (inst
.operands
[i
].preind
)
8593 inst
.instruction
|= PRE_INDEX
;
8595 if (inst
.operands
[i
].writeback
)
8597 if (inst
.operands
[i
].reg
== REG_PC
)
8599 inst
.error
= _("pc may not be used with write-back");
8604 inst
.error
= _("instruction does not support writeback");
8607 inst
.instruction
|= WRITE_BACK
;
8611 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) reloc_override
;
8612 else if ((inst
.relocs
[0].type
< BFD_RELOC_ARM_ALU_PC_G0_NC
8613 || inst
.relocs
[0].type
> BFD_RELOC_ARM_LDC_SB_G2
)
8614 && inst
.relocs
[0].type
!= BFD_RELOC_ARM_LDR_PC_G0
)
8617 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
8619 inst
.relocs
[0].type
= BFD_RELOC_ARM_CP_OFF_IMM
;
8622 /* Prefer + for zero encoded value. */
8623 if (!inst
.operands
[i
].negative
)
8624 inst
.instruction
|= INDEX_UP
;
8629 /* Functions for instruction encoding, sorted by sub-architecture.
8630 First some generics; their names are taken from the conventional
8631 bit positions for register arguments in ARM format instructions. */
8641 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8647 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8653 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8654 inst
.instruction
|= inst
.operands
[1].reg
;
8660 inst
.instruction
|= inst
.operands
[0].reg
;
8661 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8667 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8668 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8674 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8675 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8681 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8682 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8686 check_obsolete (const arm_feature_set
*feature
, const char *msg
)
8688 if (ARM_CPU_IS_ANY (cpu_variant
))
8690 as_tsktsk ("%s", msg
);
8693 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
8705 unsigned Rn
= inst
.operands
[2].reg
;
8706 /* Enforce restrictions on SWP instruction. */
8707 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
8709 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
8710 _("Rn must not overlap other operands"));
8712 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8714 if (!check_obsolete (&arm_ext_v8
,
8715 _("swp{b} use is obsoleted for ARMv8 and later"))
8716 && warn_on_deprecated
8717 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
))
8718 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8721 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8722 inst
.instruction
|= inst
.operands
[1].reg
;
8723 inst
.instruction
|= Rn
<< 16;
8729 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8730 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8731 inst
.instruction
|= inst
.operands
[2].reg
;
8737 constraint ((inst
.operands
[2].reg
== REG_PC
), BAD_PC
);
8738 constraint (((inst
.relocs
[0].exp
.X_op
!= O_constant
8739 && inst
.relocs
[0].exp
.X_op
!= O_illegal
)
8740 || inst
.relocs
[0].exp
.X_add_number
!= 0),
8742 inst
.instruction
|= inst
.operands
[0].reg
;
8743 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8744 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8750 inst
.instruction
|= inst
.operands
[0].imm
;
8756 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8757 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8760 /* ARM instructions, in alphabetical order by function name (except
8761 that wrapper functions appear immediately after the function they
8764 /* This is a pseudo-op of the form "adr rd, label" to be converted
8765 into a relative address of the form "add rd, pc, #label-.-8". */
8770 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8772 /* Frag hacking will turn this into a sub instruction if the offset turns
8773 out to be negative. */
8774 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
8775 inst
.relocs
[0].pc_rel
= 1;
8776 inst
.relocs
[0].exp
.X_add_number
-= 8;
8778 if (support_interwork
8779 && inst
.relocs
[0].exp
.X_op
== O_symbol
8780 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
8781 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
8782 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
8783 inst
.relocs
[0].exp
.X_add_number
|= 1;
8786 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8787 into a relative address of the form:
8788 add rd, pc, #low(label-.-8)"
8789 add rd, rd, #high(label-.-8)" */
8794 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8796 /* Frag hacking will turn this into a sub instruction if the offset turns
8797 out to be negative. */
8798 inst
.relocs
[0].type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
8799 inst
.relocs
[0].pc_rel
= 1;
8800 inst
.size
= INSN_SIZE
* 2;
8801 inst
.relocs
[0].exp
.X_add_number
-= 8;
8803 if (support_interwork
8804 && inst
.relocs
[0].exp
.X_op
== O_symbol
8805 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
8806 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
8807 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
8808 inst
.relocs
[0].exp
.X_add_number
|= 1;
8814 constraint (inst
.relocs
[0].type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8815 && inst
.relocs
[0].type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
8817 if (!inst
.operands
[1].present
)
8818 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8819 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8820 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8821 encode_arm_shifter_operand (2);
8827 if (inst
.operands
[0].present
)
8828 inst
.instruction
|= inst
.operands
[0].imm
;
8830 inst
.instruction
|= 0xf;
8836 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8837 constraint (msb
> 32, _("bit-field extends past end of register"));
8838 /* The instruction encoding stores the LSB and MSB,
8839 not the LSB and width. */
8840 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8841 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
8842 inst
.instruction
|= (msb
- 1) << 16;
8850 /* #0 in second position is alternative syntax for bfc, which is
8851 the same instruction but with REG_PC in the Rm field. */
8852 if (!inst
.operands
[1].isreg
)
8853 inst
.operands
[1].reg
= REG_PC
;
8855 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8856 constraint (msb
> 32, _("bit-field extends past end of register"));
8857 /* The instruction encoding stores the LSB and MSB,
8858 not the LSB and width. */
8859 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8860 inst
.instruction
|= inst
.operands
[1].reg
;
8861 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8862 inst
.instruction
|= (msb
- 1) << 16;
8868 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8869 _("bit-field extends past end of register"));
8870 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8871 inst
.instruction
|= inst
.operands
[1].reg
;
8872 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8873 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
8876 /* ARM V5 breakpoint instruction (argument parse)
8877 BKPT <16 bit unsigned immediate>
8878 Instruction is not conditional.
8879 The bit pattern given in insns[] has the COND_ALWAYS condition,
8880 and it is an error if the caller tried to override that. */
8885 /* Top 12 of 16 bits to bits 19:8. */
8886 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
8888 /* Bottom 4 of 16 bits to bits 3:0. */
8889 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
8893 encode_branch (int default_reloc
)
8895 if (inst
.operands
[0].hasreloc
)
8897 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
8898 && inst
.operands
[0].imm
!= BFD_RELOC_ARM_TLS_CALL
,
8899 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8900 inst
.relocs
[0].type
= inst
.operands
[0].imm
== BFD_RELOC_ARM_PLT32
8901 ? BFD_RELOC_ARM_PLT32
8902 : thumb_mode
? BFD_RELOC_ARM_THM_TLS_CALL
: BFD_RELOC_ARM_TLS_CALL
;
8905 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) default_reloc
;
8906 inst
.relocs
[0].pc_rel
= 1;
8913 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8914 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8917 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8924 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8926 if (inst
.cond
== COND_ALWAYS
)
8927 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
8929 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8933 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8936 /* ARM V5 branch-link-exchange instruction (argument parse)
8937 BLX <target_addr> ie BLX(1)
8938 BLX{<condition>} <Rm> ie BLX(2)
8939 Unfortunately, there are two different opcodes for this mnemonic.
8940 So, the insns[].value is not used, and the code here zaps values
8941 into inst.instruction.
8942 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8947 if (inst
.operands
[0].isreg
)
8949 /* Arg is a register; the opcode provided by insns[] is correct.
8950 It is not illegal to do "blx pc", just useless. */
8951 if (inst
.operands
[0].reg
== REG_PC
)
8952 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8954 inst
.instruction
|= inst
.operands
[0].reg
;
8958 /* Arg is an address; this instruction cannot be executed
8959 conditionally, and the opcode must be adjusted.
8960 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8961 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8962 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
8963 inst
.instruction
= 0xfa000000;
8964 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
8971 bfd_boolean want_reloc
;
8973 if (inst
.operands
[0].reg
== REG_PC
)
8974 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8976 inst
.instruction
|= inst
.operands
[0].reg
;
8977 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8978 it is for ARMv4t or earlier. */
8979 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
8980 if (!ARM_FEATURE_ZERO (selected_object_arch
)
8981 && !ARM_CPU_HAS_FEATURE (selected_object_arch
, arm_ext_v5
))
8985 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
8990 inst
.relocs
[0].type
= BFD_RELOC_ARM_V4BX
;
8994 /* ARM v5TEJ. Jump to Jazelle code. */
8999 if (inst
.operands
[0].reg
== REG_PC
)
9000 as_tsktsk (_("use of r15 in bxj is not really useful"));
9002 inst
.instruction
|= inst
.operands
[0].reg
;
9005 /* Co-processor data operation:
9006 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
9007 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
9011 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9012 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
9013 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
9014 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9015 inst
.instruction
|= inst
.operands
[4].reg
;
9016 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
9022 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9023 encode_arm_shifter_operand (1);
9026 /* Transfer between coprocessor and ARM registers.
9027 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
9032 No special properties. */
9034 struct deprecated_coproc_regs_s
9041 arm_feature_set deprecated
;
9042 arm_feature_set obsoleted
;
9043 const char *dep_msg
;
9044 const char *obs_msg
;
9047 #define DEPR_ACCESS_V8 \
9048 N_("This coprocessor register access is deprecated in ARMv8")
9050 /* Table of all deprecated coprocessor registers. */
9051 static struct deprecated_coproc_regs_s deprecated_coproc_regs
[] =
9053 {15, 0, 7, 10, 5, /* CP15DMB. */
9054 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9055 DEPR_ACCESS_V8
, NULL
},
9056 {15, 0, 7, 10, 4, /* CP15DSB. */
9057 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9058 DEPR_ACCESS_V8
, NULL
},
9059 {15, 0, 7, 5, 4, /* CP15ISB. */
9060 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9061 DEPR_ACCESS_V8
, NULL
},
9062 {14, 6, 1, 0, 0, /* TEEHBR. */
9063 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9064 DEPR_ACCESS_V8
, NULL
},
9065 {14, 6, 0, 0, 0, /* TEECR. */
9066 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9067 DEPR_ACCESS_V8
, NULL
},
9070 #undef DEPR_ACCESS_V8
9072 static const size_t deprecated_coproc_reg_count
=
9073 sizeof (deprecated_coproc_regs
) / sizeof (deprecated_coproc_regs
[0]);
9081 Rd
= inst
.operands
[2].reg
;
9084 if (inst
.instruction
== 0xee000010
9085 || inst
.instruction
== 0xfe000010)
9087 reject_bad_reg (Rd
);
9088 else if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9090 constraint (Rd
== REG_SP
, BAD_SP
);
9095 if (inst
.instruction
== 0xe000010)
9096 constraint (Rd
== REG_PC
, BAD_PC
);
9099 for (i
= 0; i
< deprecated_coproc_reg_count
; ++i
)
9101 const struct deprecated_coproc_regs_s
*r
=
9102 deprecated_coproc_regs
+ i
;
9104 if (inst
.operands
[0].reg
== r
->cp
9105 && inst
.operands
[1].imm
== r
->opc1
9106 && inst
.operands
[3].reg
== r
->crn
9107 && inst
.operands
[4].reg
== r
->crm
9108 && inst
.operands
[5].imm
== r
->opc2
)
9110 if (! ARM_CPU_IS_ANY (cpu_variant
)
9111 && warn_on_deprecated
9112 && ARM_CPU_HAS_FEATURE (cpu_variant
, r
->deprecated
))
9113 as_tsktsk ("%s", r
->dep_msg
);
9117 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9118 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
9119 inst
.instruction
|= Rd
<< 12;
9120 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9121 inst
.instruction
|= inst
.operands
[4].reg
;
9122 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
9125 /* Transfer between coprocessor register and pair of ARM registers.
9126 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
9131 Two XScale instructions are special cases of these:
9133 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
9134 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
9136 Result unpredictable if Rd or Rn is R15. */
9143 Rd
= inst
.operands
[2].reg
;
9144 Rn
= inst
.operands
[3].reg
;
9148 reject_bad_reg (Rd
);
9149 reject_bad_reg (Rn
);
9153 constraint (Rd
== REG_PC
, BAD_PC
);
9154 constraint (Rn
== REG_PC
, BAD_PC
);
9157 /* Only check the MRRC{2} variants. */
9158 if ((inst
.instruction
& 0x0FF00000) == 0x0C500000)
9160 /* If Rd == Rn, error that the operation is
9161 unpredictable (example MRRC p3,#1,r1,r1,c4). */
9162 constraint (Rd
== Rn
, BAD_OVERLAP
);
9165 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9166 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
9167 inst
.instruction
|= Rd
<< 12;
9168 inst
.instruction
|= Rn
<< 16;
9169 inst
.instruction
|= inst
.operands
[4].reg
;
9175 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
9176 if (inst
.operands
[1].present
)
9178 inst
.instruction
|= CPSI_MMOD
;
9179 inst
.instruction
|= inst
.operands
[1].imm
;
9186 inst
.instruction
|= inst
.operands
[0].imm
;
9192 unsigned Rd
, Rn
, Rm
;
9194 Rd
= inst
.operands
[0].reg
;
9195 Rn
= (inst
.operands
[1].present
9196 ? inst
.operands
[1].reg
: Rd
);
9197 Rm
= inst
.operands
[2].reg
;
9199 constraint ((Rd
== REG_PC
), BAD_PC
);
9200 constraint ((Rn
== REG_PC
), BAD_PC
);
9201 constraint ((Rm
== REG_PC
), BAD_PC
);
9203 inst
.instruction
|= Rd
<< 16;
9204 inst
.instruction
|= Rn
<< 0;
9205 inst
.instruction
|= Rm
<< 8;
9211 /* There is no IT instruction in ARM mode. We
9212 process it to do the validation as if in
9213 thumb mode, just in case the code gets
9214 assembled for thumb using the unified syntax. */
9219 set_pred_insn_type (IT_INSN
);
9220 now_pred
.mask
= (inst
.instruction
& 0xf) | 0x10;
9221 now_pred
.cc
= inst
.operands
[0].imm
;
9225 /* If there is only one register in the register list,
9226 then return its register number. Otherwise return -1. */
9228 only_one_reg_in_list (int range
)
9230 int i
= ffs (range
) - 1;
9231 return (i
> 15 || range
!= (1 << i
)) ? -1 : i
;
9235 encode_ldmstm(int from_push_pop_mnem
)
9237 int base_reg
= inst
.operands
[0].reg
;
9238 int range
= inst
.operands
[1].imm
;
9241 inst
.instruction
|= base_reg
<< 16;
9242 inst
.instruction
|= range
;
9244 if (inst
.operands
[1].writeback
)
9245 inst
.instruction
|= LDM_TYPE_2_OR_3
;
9247 if (inst
.operands
[0].writeback
)
9249 inst
.instruction
|= WRITE_BACK
;
9250 /* Check for unpredictable uses of writeback. */
9251 if (inst
.instruction
& LOAD_BIT
)
9253 /* Not allowed in LDM type 2. */
9254 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
9255 && ((range
& (1 << REG_PC
)) == 0))
9256 as_warn (_("writeback of base register is UNPREDICTABLE"));
9257 /* Only allowed if base reg not in list for other types. */
9258 else if (range
& (1 << base_reg
))
9259 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
9263 /* Not allowed for type 2. */
9264 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
9265 as_warn (_("writeback of base register is UNPREDICTABLE"));
9266 /* Only allowed if base reg not in list, or first in list. */
9267 else if ((range
& (1 << base_reg
))
9268 && (range
& ((1 << base_reg
) - 1)))
9269 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
9273 /* If PUSH/POP has only one register, then use the A2 encoding. */
9274 one_reg
= only_one_reg_in_list (range
);
9275 if (from_push_pop_mnem
&& one_reg
>= 0)
9277 int is_push
= (inst
.instruction
& A_PUSH_POP_OP_MASK
) == A1_OPCODE_PUSH
;
9279 if (is_push
&& one_reg
== 13 /* SP */)
9280 /* PR 22483: The A2 encoding cannot be used when
9281 pushing the stack pointer as this is UNPREDICTABLE. */
9284 inst
.instruction
&= A_COND_MASK
;
9285 inst
.instruction
|= is_push
? A2_OPCODE_PUSH
: A2_OPCODE_POP
;
9286 inst
.instruction
|= one_reg
<< 12;
9293 encode_ldmstm (/*from_push_pop_mnem=*/FALSE
);
9296 /* ARMv5TE load-consecutive (argument parse)
9305 constraint (inst
.operands
[0].reg
% 2 != 0,
9306 _("first transfer register must be even"));
9307 constraint (inst
.operands
[1].present
9308 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
9309 _("can only transfer two consecutive registers"));
9310 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
9311 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
9313 if (!inst
.operands
[1].present
)
9314 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
9316 /* encode_arm_addr_mode_3 will diagnose overlap between the base
9317 register and the first register written; we have to diagnose
9318 overlap between the base and the second register written here. */
9320 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
9321 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
9322 as_warn (_("base register written back, and overlaps "
9323 "second transfer register"));
9325 if (!(inst
.instruction
& V4_STR_BIT
))
9327 /* For an index-register load, the index register must not overlap the
9328 destination (even if not write-back). */
9329 if (inst
.operands
[2].immisreg
9330 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
9331 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
9332 as_warn (_("index register overlaps transfer register"));
9334 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9335 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
9341 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
9342 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
9343 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
9344 || inst
.operands
[1].negative
9345 /* This can arise if the programmer has written
9347 or if they have mistakenly used a register name as the last
9350 It is very difficult to distinguish between these two cases
9351 because "rX" might actually be a label. ie the register
9352 name has been occluded by a symbol of the same name. So we
9353 just generate a general 'bad addressing mode' type error
9354 message and leave it up to the programmer to discover the
9355 true cause and fix their mistake. */
9356 || (inst
.operands
[1].reg
== REG_PC
),
9359 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9360 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9361 _("offset must be zero in ARM encoding"));
9363 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
9365 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9366 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9367 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
9373 constraint (inst
.operands
[0].reg
% 2 != 0,
9374 _("even register required"));
9375 constraint (inst
.operands
[1].present
9376 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
9377 _("can only load two consecutive registers"));
9378 /* If op 1 were present and equal to PC, this function wouldn't
9379 have been called in the first place. */
9380 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
9382 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9383 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9386 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9387 which is not a multiple of four is UNPREDICTABLE. */
9389 check_ldr_r15_aligned (void)
9391 constraint (!(inst
.operands
[1].immisreg
)
9392 && (inst
.operands
[0].reg
== REG_PC
9393 && inst
.operands
[1].reg
== REG_PC
9394 && (inst
.relocs
[0].exp
.X_add_number
& 0x3)),
9395 _("ldr to register 15 must be 4-byte aligned"));
9401 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9402 if (!inst
.operands
[1].isreg
)
9403 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/FALSE
))
9405 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
9406 check_ldr_r15_aligned ();
9412 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9414 if (inst
.operands
[1].preind
)
9416 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9417 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9418 _("this instruction requires a post-indexed address"));
9420 inst
.operands
[1].preind
= 0;
9421 inst
.operands
[1].postind
= 1;
9422 inst
.operands
[1].writeback
= 1;
9424 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9425 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
9428 /* Halfword and signed-byte load/store operations. */
9433 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9434 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9435 if (!inst
.operands
[1].isreg
)
9436 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/TRUE
))
9438 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
9444 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9446 if (inst
.operands
[1].preind
)
9448 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9449 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9450 _("this instruction requires a post-indexed address"));
9452 inst
.operands
[1].preind
= 0;
9453 inst
.operands
[1].postind
= 1;
9454 inst
.operands
[1].writeback
= 1;
9456 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9457 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
9460 /* Co-processor register load/store.
9461 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9465 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9466 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9467 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
9473 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9474 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9475 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
9476 && !(inst
.instruction
& 0x00400000))
9477 as_tsktsk (_("Rd and Rm should be different in mla"));
9479 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9480 inst
.instruction
|= inst
.operands
[1].reg
;
9481 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9482 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9488 constraint (inst
.relocs
[0].type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9489 && inst
.relocs
[0].type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
9491 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9492 encode_arm_shifter_operand (1);
9495 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9502 top
= (inst
.instruction
& 0x00400000) != 0;
9503 constraint (top
&& inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVW
,
9504 _(":lower16: not allowed in this instruction"));
9505 constraint (!top
&& inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVT
,
9506 _(":upper16: not allowed in this instruction"));
9507 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9508 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
9510 imm
= inst
.relocs
[0].exp
.X_add_number
;
9511 /* The value is in two pieces: 0:11, 16:19. */
9512 inst
.instruction
|= (imm
& 0x00000fff);
9513 inst
.instruction
|= (imm
& 0x0000f000) << 4;
9518 do_vfp_nsyn_mrs (void)
9520 if (inst
.operands
[0].isvec
)
9522 if (inst
.operands
[1].reg
!= 1)
9523 first_error (_("operand 1 must be FPSCR"));
9524 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
9525 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
9526 do_vfp_nsyn_opcode ("fmstat");
9528 else if (inst
.operands
[1].isvec
)
9529 do_vfp_nsyn_opcode ("fmrx");
9537 do_vfp_nsyn_msr (void)
9539 if (inst
.operands
[0].isvec
)
9540 do_vfp_nsyn_opcode ("fmxr");
9550 unsigned Rt
= inst
.operands
[0].reg
;
9552 if (thumb_mode
&& Rt
== REG_SP
)
9554 inst
.error
= BAD_SP
;
9558 /* MVFR2 is only valid at ARMv8-A. */
9559 if (inst
.operands
[1].reg
== 5)
9560 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
9563 /* APSR_ sets isvec. All other refs to PC are illegal. */
9564 if (!inst
.operands
[0].isvec
&& Rt
== REG_PC
)
9566 inst
.error
= BAD_PC
;
9570 /* If we get through parsing the register name, we just insert the number
9571 generated into the instruction without further validation. */
9572 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
9573 inst
.instruction
|= (Rt
<< 12);
9579 unsigned Rt
= inst
.operands
[1].reg
;
9582 reject_bad_reg (Rt
);
9583 else if (Rt
== REG_PC
)
9585 inst
.error
= BAD_PC
;
9589 /* MVFR2 is only valid for ARMv8-A. */
9590 if (inst
.operands
[0].reg
== 5)
9591 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
9594 /* If we get through parsing the register name, we just insert the number
9595 generated into the instruction without further validation. */
9596 inst
.instruction
|= (inst
.operands
[0].reg
<< 16);
9597 inst
.instruction
|= (Rt
<< 12);
9605 if (do_vfp_nsyn_mrs () == SUCCESS
)
9608 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9609 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9611 if (inst
.operands
[1].isreg
)
9613 br
= inst
.operands
[1].reg
;
9614 if (((br
& 0x200) == 0) && ((br
& 0xf0000) != 0xf0000))
9615 as_bad (_("bad register for mrs"));
9619 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9620 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
9622 _("'APSR', 'CPSR' or 'SPSR' expected"));
9623 br
= (15<<16) | (inst
.operands
[1].imm
& SPSR_BIT
);
9626 inst
.instruction
|= br
;
9629 /* Two possible forms:
9630 "{C|S}PSR_<field>, Rm",
9631 "{C|S}PSR_f, #expression". */
9636 if (do_vfp_nsyn_msr () == SUCCESS
)
9639 inst
.instruction
|= inst
.operands
[0].imm
;
9640 if (inst
.operands
[1].isreg
)
9641 inst
.instruction
|= inst
.operands
[1].reg
;
9644 inst
.instruction
|= INST_IMMEDIATE
;
9645 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
9646 inst
.relocs
[0].pc_rel
= 0;
9653 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
9655 if (!inst
.operands
[2].present
)
9656 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
9657 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9658 inst
.instruction
|= inst
.operands
[1].reg
;
9659 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9661 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9662 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9663 as_tsktsk (_("Rd and Rm should be different in mul"));
9666 /* Long Multiply Parser
9667 UMULL RdLo, RdHi, Rm, Rs
9668 SMULL RdLo, RdHi, Rm, Rs
9669 UMLAL RdLo, RdHi, Rm, Rs
9670 SMLAL RdLo, RdHi, Rm, Rs. */
9675 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9676 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9677 inst
.instruction
|= inst
.operands
[2].reg
;
9678 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9680 /* rdhi and rdlo must be different. */
9681 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9682 as_tsktsk (_("rdhi and rdlo must be different"));
9684 /* rdhi, rdlo and rm must all be different before armv6. */
9685 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
9686 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
9687 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9688 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9694 if (inst
.operands
[0].present
9695 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
9697 /* Architectural NOP hints are CPSR sets with no bits selected. */
9698 inst
.instruction
&= 0xf0000000;
9699 inst
.instruction
|= 0x0320f000;
9700 if (inst
.operands
[0].present
)
9701 inst
.instruction
|= inst
.operands
[0].imm
;
9705 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9706 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9707 Condition defaults to COND_ALWAYS.
9708 Error if Rd, Rn or Rm are R15. */
9713 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9714 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9715 inst
.instruction
|= inst
.operands
[2].reg
;
9716 if (inst
.operands
[3].present
)
9717 encode_arm_shift (3);
9720 /* ARM V6 PKHTB (Argument Parse). */
9725 if (!inst
.operands
[3].present
)
9727 /* If the shift specifier is omitted, turn the instruction
9728 into pkhbt rd, rm, rn. */
9729 inst
.instruction
&= 0xfff00010;
9730 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9731 inst
.instruction
|= inst
.operands
[1].reg
;
9732 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9736 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9737 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9738 inst
.instruction
|= inst
.operands
[2].reg
;
9739 encode_arm_shift (3);
9743 /* ARMv5TE: Preload-Cache
9744 MP Extensions: Preload for write
9748 Syntactically, like LDR with B=1, W=0, L=1. */
9753 constraint (!inst
.operands
[0].isreg
,
9754 _("'[' expected after PLD mnemonic"));
9755 constraint (inst
.operands
[0].postind
,
9756 _("post-indexed expression used in preload instruction"));
9757 constraint (inst
.operands
[0].writeback
,
9758 _("writeback used in preload instruction"));
9759 constraint (!inst
.operands
[0].preind
,
9760 _("unindexed addressing used in preload instruction"));
9761 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9764 /* ARMv7: PLI <addr_mode> */
9768 constraint (!inst
.operands
[0].isreg
,
9769 _("'[' expected after PLI mnemonic"));
9770 constraint (inst
.operands
[0].postind
,
9771 _("post-indexed expression used in preload instruction"));
9772 constraint (inst
.operands
[0].writeback
,
9773 _("writeback used in preload instruction"));
9774 constraint (!inst
.operands
[0].preind
,
9775 _("unindexed addressing used in preload instruction"));
9776 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9777 inst
.instruction
&= ~PRE_INDEX
;
9783 constraint (inst
.operands
[0].writeback
,
9784 _("push/pop do not support {reglist}^"));
9785 inst
.operands
[1] = inst
.operands
[0];
9786 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
9787 inst
.operands
[0].isreg
= 1;
9788 inst
.operands
[0].writeback
= 1;
9789 inst
.operands
[0].reg
= REG_SP
;
9790 encode_ldmstm (/*from_push_pop_mnem=*/TRUE
);
9793 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9794 word at the specified address and the following word
9796 Unconditionally executed.
9797 Error if Rn is R15. */
9802 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9803 if (inst
.operands
[0].writeback
)
9804 inst
.instruction
|= WRITE_BACK
;
9807 /* ARM V6 ssat (argument parse). */
9812 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9813 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
9814 inst
.instruction
|= inst
.operands
[2].reg
;
9816 if (inst
.operands
[3].present
)
9817 encode_arm_shift (3);
9820 /* ARM V6 usat (argument parse). */
9825 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9826 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9827 inst
.instruction
|= inst
.operands
[2].reg
;
9829 if (inst
.operands
[3].present
)
9830 encode_arm_shift (3);
9833 /* ARM V6 ssat16 (argument parse). */
9838 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9839 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
9840 inst
.instruction
|= inst
.operands
[2].reg
;
9846 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9847 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9848 inst
.instruction
|= inst
.operands
[2].reg
;
9851 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9852 preserving the other bits.
9854 setend <endian_specifier>, where <endian_specifier> is either
9860 if (warn_on_deprecated
9861 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9862 as_tsktsk (_("setend use is deprecated for ARMv8"));
9864 if (inst
.operands
[0].imm
)
9865 inst
.instruction
|= 0x200;
9871 unsigned int Rm
= (inst
.operands
[1].present
9872 ? inst
.operands
[1].reg
9873 : inst
.operands
[0].reg
);
9875 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9876 inst
.instruction
|= Rm
;
9877 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
9879 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9880 inst
.instruction
|= SHIFT_BY_REG
;
9881 /* PR 12854: Error on extraneous shifts. */
9882 constraint (inst
.operands
[2].shifted
,
9883 _("extraneous shift as part of operand to shift insn"));
9886 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
9892 inst
.relocs
[0].type
= BFD_RELOC_ARM_SMC
;
9893 inst
.relocs
[0].pc_rel
= 0;
9899 inst
.relocs
[0].type
= BFD_RELOC_ARM_HVC
;
9900 inst
.relocs
[0].pc_rel
= 0;
9906 inst
.relocs
[0].type
= BFD_RELOC_ARM_SWI
;
9907 inst
.relocs
[0].pc_rel
= 0;
9913 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9914 _("selected processor does not support SETPAN instruction"));
9916 inst
.instruction
|= ((inst
.operands
[0].imm
& 1) << 9);
9922 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9923 _("selected processor does not support SETPAN instruction"));
9925 inst
.instruction
|= (inst
.operands
[0].imm
<< 3);
9928 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9929 SMLAxy{cond} Rd,Rm,Rs,Rn
9930 SMLAWy{cond} Rd,Rm,Rs,Rn
9931 Error if any register is R15. */
9936 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9937 inst
.instruction
|= inst
.operands
[1].reg
;
9938 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9939 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9942 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9943 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9944 Error if any register is R15.
9945 Warning if Rdlo == Rdhi. */
9950 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9951 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9952 inst
.instruction
|= inst
.operands
[2].reg
;
9953 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9955 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9956 as_tsktsk (_("rdhi and rdlo must be different"));
9959 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9960 SMULxy{cond} Rd,Rm,Rs
9961 Error if any register is R15. */
9966 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9967 inst
.instruction
|= inst
.operands
[1].reg
;
9968 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9971 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9972 the same for both ARM and Thumb-2. */
9979 if (inst
.operands
[0].present
)
9981 reg
= inst
.operands
[0].reg
;
9982 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
9987 inst
.instruction
|= reg
<< 16;
9988 inst
.instruction
|= inst
.operands
[1].imm
;
9989 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
9990 inst
.instruction
|= WRITE_BACK
;
9993 /* ARM V6 strex (argument parse). */
9998 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9999 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
10000 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
10001 || inst
.operands
[2].negative
10002 /* See comment in do_ldrex(). */
10003 || (inst
.operands
[2].reg
== REG_PC
),
10006 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10007 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
10009 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
10010 || inst
.relocs
[0].exp
.X_add_number
!= 0,
10011 _("offset must be zero in ARM encoding"));
10013 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10014 inst
.instruction
|= inst
.operands
[1].reg
;
10015 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10016 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
10020 do_t_strexbh (void)
10022 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
10023 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
10024 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
10025 || inst
.operands
[2].negative
,
10028 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10029 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
10037 constraint (inst
.operands
[1].reg
% 2 != 0,
10038 _("even register required"));
10039 constraint (inst
.operands
[2].present
10040 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
10041 _("can only store two consecutive registers"));
10042 /* If op 2 were present and equal to PC, this function wouldn't
10043 have been called in the first place. */
10044 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
10046 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10047 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
10048 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
10051 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10052 inst
.instruction
|= inst
.operands
[1].reg
;
10053 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
10060 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10061 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
10069 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10070 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
10075 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
10076 extends it to 32-bits, and adds the result to a value in another
10077 register. You can specify a rotation by 0, 8, 16, or 24 bits
10078 before extracting the 16-bit value.
10079 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
10080 Condition defaults to COND_ALWAYS.
10081 Error if any register uses R15. */
10086 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10087 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10088 inst
.instruction
|= inst
.operands
[2].reg
;
10089 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
10094 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
10095 Condition defaults to COND_ALWAYS.
10096 Error if any register uses R15. */
10101 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10102 inst
.instruction
|= inst
.operands
[1].reg
;
10103 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
10106 /* VFP instructions. In a logical order: SP variant first, monad
10107 before dyad, arithmetic then move then load/store. */
10110 do_vfp_sp_monadic (void)
10112 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10113 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
10117 do_vfp_sp_dyadic (void)
10119 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10120 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
10121 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
10125 do_vfp_sp_compare_z (void)
10127 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10131 do_vfp_dp_sp_cvt (void)
10133 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10134 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
10138 do_vfp_sp_dp_cvt (void)
10140 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10141 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
10145 do_vfp_reg_from_sp (void)
10147 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10148 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
10152 do_vfp_reg2_from_sp2 (void)
10154 constraint (inst
.operands
[2].imm
!= 2,
10155 _("only two consecutive VFP SP registers allowed here"));
10156 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10157 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10158 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
10162 do_vfp_sp_from_reg (void)
10164 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
10165 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10169 do_vfp_sp2_from_reg2 (void)
10171 constraint (inst
.operands
[0].imm
!= 2,
10172 _("only two consecutive VFP SP registers allowed here"));
10173 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
10174 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10175 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10179 do_vfp_sp_ldst (void)
10181 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10182 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
10186 do_vfp_dp_ldst (void)
10188 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10189 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
10194 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
10196 if (inst
.operands
[0].writeback
)
10197 inst
.instruction
|= WRITE_BACK
;
10199 constraint (ldstm_type
!= VFP_LDSTMIA
,
10200 _("this addressing mode requires base-register writeback"));
10201 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10202 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
10203 inst
.instruction
|= inst
.operands
[1].imm
;
10207 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
10211 if (inst
.operands
[0].writeback
)
10212 inst
.instruction
|= WRITE_BACK
;
10214 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
10215 _("this addressing mode requires base-register writeback"));
10217 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10218 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10220 count
= inst
.operands
[1].imm
<< 1;
10221 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
10224 inst
.instruction
|= count
;
10228 do_vfp_sp_ldstmia (void)
10230 vfp_sp_ldstm (VFP_LDSTMIA
);
10234 do_vfp_sp_ldstmdb (void)
10236 vfp_sp_ldstm (VFP_LDSTMDB
);
10240 do_vfp_dp_ldstmia (void)
10242 vfp_dp_ldstm (VFP_LDSTMIA
);
10246 do_vfp_dp_ldstmdb (void)
10248 vfp_dp_ldstm (VFP_LDSTMDB
);
10252 do_vfp_xp_ldstmia (void)
10254 vfp_dp_ldstm (VFP_LDSTMIAX
);
10258 do_vfp_xp_ldstmdb (void)
10260 vfp_dp_ldstm (VFP_LDSTMDBX
);
10264 do_vfp_dp_rd_rm (void)
10266 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10267 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
10271 do_vfp_dp_rn_rd (void)
10273 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
10274 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10278 do_vfp_dp_rd_rn (void)
10280 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10281 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
10285 do_vfp_dp_rd_rn_rm (void)
10287 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10288 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
10289 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
10293 do_vfp_dp_rd (void)
10295 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10299 do_vfp_dp_rm_rd_rn (void)
10301 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
10302 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10303 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
10306 /* VFPv3 instructions. */
10308 do_vfp_sp_const (void)
10310 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10311 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
10312 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
10316 do_vfp_dp_const (void)
10318 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10319 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
10320 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
10324 vfp_conv (int srcsize
)
10326 int immbits
= srcsize
- inst
.operands
[1].imm
;
10328 if (srcsize
== 16 && !(immbits
>= 0 && immbits
<= srcsize
))
10330 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
10331 i.e. immbits must be in range 0 - 16. */
10332 inst
.error
= _("immediate value out of range, expected range [0, 16]");
10335 else if (srcsize
== 32 && !(immbits
>= 0 && immbits
< srcsize
))
10337 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
10338 i.e. immbits must be in range 0 - 31. */
10339 inst
.error
= _("immediate value out of range, expected range [1, 32]");
10343 inst
.instruction
|= (immbits
& 1) << 5;
10344 inst
.instruction
|= (immbits
>> 1);
10348 do_vfp_sp_conv_16 (void)
10350 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10355 do_vfp_dp_conv_16 (void)
10357 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10362 do_vfp_sp_conv_32 (void)
10364 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10369 do_vfp_dp_conv_32 (void)
10371 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10375 /* FPA instructions. Also in a logical order. */
10380 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10381 inst
.instruction
|= inst
.operands
[1].reg
;
10385 do_fpa_ldmstm (void)
10387 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10388 switch (inst
.operands
[1].imm
)
10390 case 1: inst
.instruction
|= CP_T_X
; break;
10391 case 2: inst
.instruction
|= CP_T_Y
; break;
10392 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
10397 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
10399 /* The instruction specified "ea" or "fd", so we can only accept
10400 [Rn]{!}. The instruction does not really support stacking or
10401 unstacking, so we have to emulate these by setting appropriate
10402 bits and offsets. */
10403 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
10404 || inst
.relocs
[0].exp
.X_add_number
!= 0,
10405 _("this instruction does not support indexing"));
10407 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
10408 inst
.relocs
[0].exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
10410 if (!(inst
.instruction
& INDEX_UP
))
10411 inst
.relocs
[0].exp
.X_add_number
= -inst
.relocs
[0].exp
.X_add_number
;
10413 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
10415 inst
.operands
[2].preind
= 0;
10416 inst
.operands
[2].postind
= 1;
10420 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
10423 /* iWMMXt instructions: strictly in alphabetical order. */
10426 do_iwmmxt_tandorc (void)
10428 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
10432 do_iwmmxt_textrc (void)
10434 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10435 inst
.instruction
|= inst
.operands
[1].imm
;
10439 do_iwmmxt_textrm (void)
10441 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10442 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10443 inst
.instruction
|= inst
.operands
[2].imm
;
10447 do_iwmmxt_tinsr (void)
10449 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10450 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10451 inst
.instruction
|= inst
.operands
[2].imm
;
10455 do_iwmmxt_tmia (void)
10457 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10458 inst
.instruction
|= inst
.operands
[1].reg
;
10459 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10463 do_iwmmxt_waligni (void)
10465 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10466 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10467 inst
.instruction
|= inst
.operands
[2].reg
;
10468 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
10472 do_iwmmxt_wmerge (void)
10474 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10475 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10476 inst
.instruction
|= inst
.operands
[2].reg
;
10477 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
10481 do_iwmmxt_wmov (void)
10483 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10484 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10485 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10486 inst
.instruction
|= inst
.operands
[1].reg
;
10490 do_iwmmxt_wldstbh (void)
10493 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10495 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
10497 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
10498 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
10502 do_iwmmxt_wldstw (void)
10504 /* RIWR_RIWC clears .isreg for a control register. */
10505 if (!inst
.operands
[0].isreg
)
10507 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
10508 inst
.instruction
|= 0xf0000000;
10511 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10512 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
10516 do_iwmmxt_wldstd (void)
10518 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10519 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
10520 && inst
.operands
[1].immisreg
)
10522 inst
.instruction
&= ~0x1a000ff;
10523 inst
.instruction
|= (0xfU
<< 28);
10524 if (inst
.operands
[1].preind
)
10525 inst
.instruction
|= PRE_INDEX
;
10526 if (!inst
.operands
[1].negative
)
10527 inst
.instruction
|= INDEX_UP
;
10528 if (inst
.operands
[1].writeback
)
10529 inst
.instruction
|= WRITE_BACK
;
10530 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10531 inst
.instruction
|= inst
.relocs
[0].exp
.X_add_number
<< 4;
10532 inst
.instruction
|= inst
.operands
[1].imm
;
10535 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
10539 do_iwmmxt_wshufh (void)
10541 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10542 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10543 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
10544 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
10548 do_iwmmxt_wzero (void)
10550 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10551 inst
.instruction
|= inst
.operands
[0].reg
;
10552 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10553 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10557 do_iwmmxt_wrwrwr_or_imm5 (void)
10559 if (inst
.operands
[2].isreg
)
10562 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
10563 _("immediate operand requires iWMMXt2"));
10565 if (inst
.operands
[2].imm
== 0)
10567 switch ((inst
.instruction
>> 20) & 0xf)
10573 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10574 inst
.operands
[2].imm
= 16;
10575 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
10581 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10582 inst
.operands
[2].imm
= 32;
10583 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
10590 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10592 wrn
= (inst
.instruction
>> 16) & 0xf;
10593 inst
.instruction
&= 0xff0fff0f;
10594 inst
.instruction
|= wrn
;
10595 /* Bail out here; the instruction is now assembled. */
10600 /* Map 32 -> 0, etc. */
10601 inst
.operands
[2].imm
&= 0x1f;
10602 inst
.instruction
|= (0xfU
<< 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
10606 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10607 operations first, then control, shift, and load/store. */
10609 /* Insns like "foo X,Y,Z". */
10612 do_mav_triple (void)
10614 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10615 inst
.instruction
|= inst
.operands
[1].reg
;
10616 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10619 /* Insns like "foo W,X,Y,Z".
10620 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10625 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10626 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10627 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10628 inst
.instruction
|= inst
.operands
[3].reg
;
10631 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10633 do_mav_dspsc (void)
10635 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10638 /* Maverick shift immediate instructions.
10639 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10640 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10643 do_mav_shift (void)
10645 int imm
= inst
.operands
[2].imm
;
10647 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10648 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10650 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10651 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10652 Bit 4 should be 0. */
10653 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
10655 inst
.instruction
|= imm
;
10658 /* XScale instructions. Also sorted arithmetic before move. */
10660 /* Xscale multiply-accumulate (argument parse)
10663 MIAxycc acc0,Rm,Rs. */
10668 inst
.instruction
|= inst
.operands
[1].reg
;
10669 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10672 /* Xscale move-accumulator-register (argument parse)
10674 MARcc acc0,RdLo,RdHi. */
10679 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10680 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10683 /* Xscale move-register-accumulator (argument parse)
10685 MRAcc RdLo,RdHi,acc0. */
10690 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
10691 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10692 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10695 /* Encoding functions relevant only to Thumb. */
10697 /* inst.operands[i] is a shifted-register operand; encode
10698 it into inst.instruction in the format used by Thumb32. */
10701 encode_thumb32_shifted_operand (int i
)
10703 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
10704 unsigned int shift
= inst
.operands
[i
].shift_kind
;
10706 constraint (inst
.operands
[i
].immisreg
,
10707 _("shift by register not allowed in thumb mode"));
10708 inst
.instruction
|= inst
.operands
[i
].reg
;
10709 if (shift
== SHIFT_RRX
)
10710 inst
.instruction
|= SHIFT_ROR
<< 4;
10713 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
10714 _("expression too complex"));
10716 constraint (value
> 32
10717 || (value
== 32 && (shift
== SHIFT_LSL
10718 || shift
== SHIFT_ROR
)),
10719 _("shift expression is too large"));
10723 else if (value
== 32)
10726 inst
.instruction
|= shift
<< 4;
10727 inst
.instruction
|= (value
& 0x1c) << 10;
10728 inst
.instruction
|= (value
& 0x03) << 6;
10733 /* inst.operands[i] was set up by parse_address. Encode it into a
10734 Thumb32 format load or store instruction. Reject forms that cannot
10735 be used with such instructions. If is_t is true, reject forms that
10736 cannot be used with a T instruction; if is_d is true, reject forms
10737 that cannot be used with a D instruction. If it is a store insn,
10738 reject PC in Rn. */
10741 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
10743 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
10745 constraint (!inst
.operands
[i
].isreg
,
10746 _("Instruction does not support =N addresses"));
10748 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
10749 if (inst
.operands
[i
].immisreg
)
10751 constraint (is_pc
, BAD_PC_ADDRESSING
);
10752 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
10753 constraint (inst
.operands
[i
].negative
,
10754 _("Thumb does not support negative register indexing"));
10755 constraint (inst
.operands
[i
].postind
,
10756 _("Thumb does not support register post-indexing"));
10757 constraint (inst
.operands
[i
].writeback
,
10758 _("Thumb does not support register indexing with writeback"));
10759 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
10760 _("Thumb supports only LSL in shifted register indexing"));
10762 inst
.instruction
|= inst
.operands
[i
].imm
;
10763 if (inst
.operands
[i
].shifted
)
10765 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
10766 _("expression too complex"));
10767 constraint (inst
.relocs
[0].exp
.X_add_number
< 0
10768 || inst
.relocs
[0].exp
.X_add_number
> 3,
10769 _("shift out of range"));
10770 inst
.instruction
|= inst
.relocs
[0].exp
.X_add_number
<< 4;
10772 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
10774 else if (inst
.operands
[i
].preind
)
10776 constraint (is_pc
&& inst
.operands
[i
].writeback
, BAD_PC_WRITEBACK
);
10777 constraint (is_t
&& inst
.operands
[i
].writeback
,
10778 _("cannot use writeback with this instruction"));
10779 constraint (is_pc
&& ((inst
.instruction
& THUMB2_LOAD_BIT
) == 0),
10780 BAD_PC_ADDRESSING
);
10784 inst
.instruction
|= 0x01000000;
10785 if (inst
.operands
[i
].writeback
)
10786 inst
.instruction
|= 0x00200000;
10790 inst
.instruction
|= 0x00000c00;
10791 if (inst
.operands
[i
].writeback
)
10792 inst
.instruction
|= 0x00000100;
10794 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10796 else if (inst
.operands
[i
].postind
)
10798 gas_assert (inst
.operands
[i
].writeback
);
10799 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
10800 constraint (is_t
, _("cannot use post-indexing with this instruction"));
10803 inst
.instruction
|= 0x00200000;
10805 inst
.instruction
|= 0x00000900;
10806 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10808 else /* unindexed - only for coprocessor */
10809 inst
.error
= _("instruction does not accept unindexed addressing");
10812 /* Table of Thumb instructions which exist in both 16- and 32-bit
10813 encodings (the latter only in post-V6T2 cores). The index is the
10814 value used in the insns table below. When there is more than one
10815 possible 16-bit encoding for the instruction, this table always
10817 Also contains several pseudo-instructions used during relaxation. */
10818 #define T16_32_TAB \
10819 X(_adc, 4140, eb400000), \
10820 X(_adcs, 4140, eb500000), \
10821 X(_add, 1c00, eb000000), \
10822 X(_adds, 1c00, eb100000), \
10823 X(_addi, 0000, f1000000), \
10824 X(_addis, 0000, f1100000), \
10825 X(_add_pc,000f, f20f0000), \
10826 X(_add_sp,000d, f10d0000), \
10827 X(_adr, 000f, f20f0000), \
10828 X(_and, 4000, ea000000), \
10829 X(_ands, 4000, ea100000), \
10830 X(_asr, 1000, fa40f000), \
10831 X(_asrs, 1000, fa50f000), \
10832 X(_b, e000, f000b000), \
10833 X(_bcond, d000, f0008000), \
10834 X(_bf, 0000, f040e001), \
10835 X(_bfcsel,0000, f000e001), \
10836 X(_bfx, 0000, f060e001), \
10837 X(_bfl, 0000, f000c001), \
10838 X(_bflx, 0000, f070e001), \
10839 X(_bic, 4380, ea200000), \
10840 X(_bics, 4380, ea300000), \
10841 X(_cmn, 42c0, eb100f00), \
10842 X(_cmp, 2800, ebb00f00), \
10843 X(_cpsie, b660, f3af8400), \
10844 X(_cpsid, b670, f3af8600), \
10845 X(_cpy, 4600, ea4f0000), \
10846 X(_dec_sp,80dd, f1ad0d00), \
10847 X(_dls, 0000, f040e001), \
10848 X(_eor, 4040, ea800000), \
10849 X(_eors, 4040, ea900000), \
10850 X(_inc_sp,00dd, f10d0d00), \
10851 X(_ldmia, c800, e8900000), \
10852 X(_ldr, 6800, f8500000), \
10853 X(_ldrb, 7800, f8100000), \
10854 X(_ldrh, 8800, f8300000), \
10855 X(_ldrsb, 5600, f9100000), \
10856 X(_ldrsh, 5e00, f9300000), \
10857 X(_ldr_pc,4800, f85f0000), \
10858 X(_ldr_pc2,4800, f85f0000), \
10859 X(_ldr_sp,9800, f85d0000), \
10860 X(_le, 0000, f00fc001), \
10861 X(_lsl, 0000, fa00f000), \
10862 X(_lsls, 0000, fa10f000), \
10863 X(_lsr, 0800, fa20f000), \
10864 X(_lsrs, 0800, fa30f000), \
10865 X(_mov, 2000, ea4f0000), \
10866 X(_movs, 2000, ea5f0000), \
10867 X(_mul, 4340, fb00f000), \
10868 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10869 X(_mvn, 43c0, ea6f0000), \
10870 X(_mvns, 43c0, ea7f0000), \
10871 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10872 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10873 X(_orr, 4300, ea400000), \
10874 X(_orrs, 4300, ea500000), \
10875 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10876 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10877 X(_rev, ba00, fa90f080), \
10878 X(_rev16, ba40, fa90f090), \
10879 X(_revsh, bac0, fa90f0b0), \
10880 X(_ror, 41c0, fa60f000), \
10881 X(_rors, 41c0, fa70f000), \
10882 X(_sbc, 4180, eb600000), \
10883 X(_sbcs, 4180, eb700000), \
10884 X(_stmia, c000, e8800000), \
10885 X(_str, 6000, f8400000), \
10886 X(_strb, 7000, f8000000), \
10887 X(_strh, 8000, f8200000), \
10888 X(_str_sp,9000, f84d0000), \
10889 X(_sub, 1e00, eba00000), \
10890 X(_subs, 1e00, ebb00000), \
10891 X(_subi, 8000, f1a00000), \
10892 X(_subis, 8000, f1b00000), \
10893 X(_sxtb, b240, fa4ff080), \
10894 X(_sxth, b200, fa0ff080), \
10895 X(_tst, 4200, ea100f00), \
10896 X(_uxtb, b2c0, fa5ff080), \
10897 X(_uxth, b280, fa1ff080), \
10898 X(_nop, bf00, f3af8000), \
10899 X(_yield, bf10, f3af8001), \
10900 X(_wfe, bf20, f3af8002), \
10901 X(_wfi, bf30, f3af8003), \
10902 X(_wls, 0000, f040c001), \
10903 X(_sev, bf40, f3af8004), \
10904 X(_sevl, bf50, f3af8005), \
10905 X(_udf, de00, f7f0a000)
10907 /* To catch errors in encoding functions, the codes are all offset by
10908 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10909 as 16-bit instructions. */
10910 #define X(a,b,c) T_MNEM##a
10911 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
10914 #define X(a,b,c) 0x##b
10915 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
10916 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10919 #define X(a,b,c) 0x##c
10920 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
10921 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10922 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10926 /* Thumb instruction encoders, in alphabetical order. */
10928 /* ADDW or SUBW. */
10931 do_t_add_sub_w (void)
10935 Rd
= inst
.operands
[0].reg
;
10936 Rn
= inst
.operands
[1].reg
;
10938 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10939 is the SP-{plus,minus}-immediate form of the instruction. */
10941 constraint (Rd
== REG_PC
, BAD_PC
);
10943 reject_bad_reg (Rd
);
10945 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
10946 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMM12
;
10949 /* Parse an add or subtract instruction. We get here with inst.instruction
10950 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
10953 do_t_add_sub (void)
10957 Rd
= inst
.operands
[0].reg
;
10958 Rs
= (inst
.operands
[1].present
10959 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10960 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10963 set_pred_insn_type_last ();
10965 if (unified_syntax
)
10968 bfd_boolean narrow
;
10971 flags
= (inst
.instruction
== T_MNEM_adds
10972 || inst
.instruction
== T_MNEM_subs
);
10974 narrow
= !in_pred_block ();
10976 narrow
= in_pred_block ();
10977 if (!inst
.operands
[2].isreg
)
10981 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
10982 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10984 add
= (inst
.instruction
== T_MNEM_add
10985 || inst
.instruction
== T_MNEM_adds
);
10987 if (inst
.size_req
!= 4)
10989 /* Attempt to use a narrow opcode, with relaxation if
10991 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
10992 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
10993 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
10994 opcode
= T_MNEM_add_sp
;
10995 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
10996 opcode
= T_MNEM_add_pc
;
10997 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
11000 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
11002 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
11006 inst
.instruction
= THUMB_OP16(opcode
);
11007 inst
.instruction
|= (Rd
<< 4) | Rs
;
11008 if (inst
.relocs
[0].type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11009 || (inst
.relocs
[0].type
11010 > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
))
11012 if (inst
.size_req
== 2)
11013 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
11015 inst
.relax
= opcode
;
11019 constraint (inst
.size_req
== 2, BAD_HIREG
);
11021 if (inst
.size_req
== 4
11022 || (inst
.size_req
!= 2 && !opcode
))
11024 constraint ((inst
.relocs
[0].type
11025 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
)
11026 && (inst
.relocs
[0].type
11027 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
) ,
11028 THUMB1_RELOC_ONLY
);
11031 constraint (add
, BAD_PC
);
11032 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
11033 _("only SUBS PC, LR, #const allowed"));
11034 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
11035 _("expression too complex"));
11036 constraint (inst
.relocs
[0].exp
.X_add_number
< 0
11037 || inst
.relocs
[0].exp
.X_add_number
> 0xff,
11038 _("immediate value out of range"));
11039 inst
.instruction
= T2_SUBS_PC_LR
11040 | inst
.relocs
[0].exp
.X_add_number
;
11041 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
11044 else if (Rs
== REG_PC
)
11046 /* Always use addw/subw. */
11047 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
11048 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMM12
;
11052 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11053 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
11056 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11058 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_ADD_IMM
;
11060 inst
.instruction
|= Rd
<< 8;
11061 inst
.instruction
|= Rs
<< 16;
11066 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
11067 unsigned int shift
= inst
.operands
[2].shift_kind
;
11069 Rn
= inst
.operands
[2].reg
;
11070 /* See if we can do this with a 16-bit instruction. */
11071 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
11073 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
11078 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
11079 || inst
.instruction
== T_MNEM_add
)
11081 : T_OPCODE_SUB_R3
);
11082 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
11086 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
11088 /* Thumb-1 cores (except v6-M) require at least one high
11089 register in a narrow non flag setting add. */
11090 if (Rd
> 7 || Rn
> 7
11091 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
11092 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
11099 inst
.instruction
= T_OPCODE_ADD_HI
;
11100 inst
.instruction
|= (Rd
& 8) << 4;
11101 inst
.instruction
|= (Rd
& 7);
11102 inst
.instruction
|= Rn
<< 3;
11108 constraint (Rd
== REG_PC
, BAD_PC
);
11109 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
11110 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
11111 constraint (Rs
== REG_PC
, BAD_PC
);
11112 reject_bad_reg (Rn
);
11114 /* If we get here, it can't be done in 16 bits. */
11115 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
11116 _("shift must be constant"));
11117 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11118 inst
.instruction
|= Rd
<< 8;
11119 inst
.instruction
|= Rs
<< 16;
11120 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& value
> 3,
11121 _("shift value over 3 not allowed in thumb mode"));
11122 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& shift
!= SHIFT_LSL
,
11123 _("only LSL shift allowed in thumb mode"));
11124 encode_thumb32_shifted_operand (2);
11129 constraint (inst
.instruction
== T_MNEM_adds
11130 || inst
.instruction
== T_MNEM_subs
,
11133 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
11135 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
11136 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
11139 inst
.instruction
= (inst
.instruction
== T_MNEM_add
11140 ? 0x0000 : 0x8000);
11141 inst
.instruction
|= (Rd
<< 4) | Rs
;
11142 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
11146 Rn
= inst
.operands
[2].reg
;
11147 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
11149 /* We now have Rd, Rs, and Rn set to registers. */
11150 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
11152 /* Can't do this for SUB. */
11153 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
11154 inst
.instruction
= T_OPCODE_ADD_HI
;
11155 inst
.instruction
|= (Rd
& 8) << 4;
11156 inst
.instruction
|= (Rd
& 7);
11158 inst
.instruction
|= Rn
<< 3;
11160 inst
.instruction
|= Rs
<< 3;
11162 constraint (1, _("dest must overlap one source register"));
11166 inst
.instruction
= (inst
.instruction
== T_MNEM_add
11167 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
11168 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
11178 Rd
= inst
.operands
[0].reg
;
11179 reject_bad_reg (Rd
);
11181 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
11183 /* Defer to section relaxation. */
11184 inst
.relax
= inst
.instruction
;
11185 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11186 inst
.instruction
|= Rd
<< 4;
11188 else if (unified_syntax
&& inst
.size_req
!= 2)
11190 /* Generate a 32-bit opcode. */
11191 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11192 inst
.instruction
|= Rd
<< 8;
11193 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_ADD_PC12
;
11194 inst
.relocs
[0].pc_rel
= 1;
11198 /* Generate a 16-bit opcode. */
11199 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11200 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
11201 inst
.relocs
[0].exp
.X_add_number
-= 4; /* PC relative adjust. */
11202 inst
.relocs
[0].pc_rel
= 1;
11203 inst
.instruction
|= Rd
<< 4;
11206 if (inst
.relocs
[0].exp
.X_op
== O_symbol
11207 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
11208 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
11209 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
11210 inst
.relocs
[0].exp
.X_add_number
+= 1;
11213 /* Arithmetic instructions for which there is just one 16-bit
11214 instruction encoding, and it allows only two low registers.
11215 For maximal compatibility with ARM syntax, we allow three register
11216 operands even when Thumb-32 instructions are not available, as long
11217 as the first two are identical. For instance, both "sbc r0,r1" and
11218 "sbc r0,r0,r1" are allowed. */
11224 Rd
= inst
.operands
[0].reg
;
11225 Rs
= (inst
.operands
[1].present
11226 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
11227 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
11228 Rn
= inst
.operands
[2].reg
;
11230 reject_bad_reg (Rd
);
11231 reject_bad_reg (Rs
);
11232 if (inst
.operands
[2].isreg
)
11233 reject_bad_reg (Rn
);
11235 if (unified_syntax
)
11237 if (!inst
.operands
[2].isreg
)
11239 /* For an immediate, we always generate a 32-bit opcode;
11240 section relaxation will shrink it later if possible. */
11241 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11242 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11243 inst
.instruction
|= Rd
<< 8;
11244 inst
.instruction
|= Rs
<< 16;
11245 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11249 bfd_boolean narrow
;
11251 /* See if we can do this with a 16-bit instruction. */
11252 if (THUMB_SETS_FLAGS (inst
.instruction
))
11253 narrow
= !in_pred_block ();
11255 narrow
= in_pred_block ();
11257 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
11259 if (inst
.operands
[2].shifted
)
11261 if (inst
.size_req
== 4)
11267 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11268 inst
.instruction
|= Rd
;
11269 inst
.instruction
|= Rn
<< 3;
11273 /* If we get here, it can't be done in 16 bits. */
11274 constraint (inst
.operands
[2].shifted
11275 && inst
.operands
[2].immisreg
,
11276 _("shift must be constant"));
11277 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11278 inst
.instruction
|= Rd
<< 8;
11279 inst
.instruction
|= Rs
<< 16;
11280 encode_thumb32_shifted_operand (2);
11285 /* On its face this is a lie - the instruction does set the
11286 flags. However, the only supported mnemonic in this mode
11287 says it doesn't. */
11288 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
11290 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
11291 _("unshifted register required"));
11292 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
11293 constraint (Rd
!= Rs
,
11294 _("dest and source1 must be the same register"));
11296 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11297 inst
.instruction
|= Rd
;
11298 inst
.instruction
|= Rn
<< 3;
11302 /* Similarly, but for instructions where the arithmetic operation is
11303 commutative, so we can allow either of them to be different from
11304 the destination operand in a 16-bit instruction. For instance, all
11305 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
11312 Rd
= inst
.operands
[0].reg
;
11313 Rs
= (inst
.operands
[1].present
11314 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
11315 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
11316 Rn
= inst
.operands
[2].reg
;
11318 reject_bad_reg (Rd
);
11319 reject_bad_reg (Rs
);
11320 if (inst
.operands
[2].isreg
)
11321 reject_bad_reg (Rn
);
11323 if (unified_syntax
)
11325 if (!inst
.operands
[2].isreg
)
11327 /* For an immediate, we always generate a 32-bit opcode;
11328 section relaxation will shrink it later if possible. */
11329 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11330 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11331 inst
.instruction
|= Rd
<< 8;
11332 inst
.instruction
|= Rs
<< 16;
11333 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11337 bfd_boolean narrow
;
11339 /* See if we can do this with a 16-bit instruction. */
11340 if (THUMB_SETS_FLAGS (inst
.instruction
))
11341 narrow
= !in_pred_block ();
11343 narrow
= in_pred_block ();
11345 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
11347 if (inst
.operands
[2].shifted
)
11349 if (inst
.size_req
== 4)
11356 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11357 inst
.instruction
|= Rd
;
11358 inst
.instruction
|= Rn
<< 3;
11363 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11364 inst
.instruction
|= Rd
;
11365 inst
.instruction
|= Rs
<< 3;
11370 /* If we get here, it can't be done in 16 bits. */
11371 constraint (inst
.operands
[2].shifted
11372 && inst
.operands
[2].immisreg
,
11373 _("shift must be constant"));
11374 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11375 inst
.instruction
|= Rd
<< 8;
11376 inst
.instruction
|= Rs
<< 16;
11377 encode_thumb32_shifted_operand (2);
11382 /* On its face this is a lie - the instruction does set the
11383 flags. However, the only supported mnemonic in this mode
11384 says it doesn't. */
11385 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
11387 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
11388 _("unshifted register required"));
11389 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
11391 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11392 inst
.instruction
|= Rd
;
11395 inst
.instruction
|= Rn
<< 3;
11397 inst
.instruction
|= Rs
<< 3;
11399 constraint (1, _("dest must overlap one source register"));
11407 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
11408 constraint (msb
> 32, _("bit-field extends past end of register"));
11409 /* The instruction encoding stores the LSB and MSB,
11410 not the LSB and width. */
11411 Rd
= inst
.operands
[0].reg
;
11412 reject_bad_reg (Rd
);
11413 inst
.instruction
|= Rd
<< 8;
11414 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
11415 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
11416 inst
.instruction
|= msb
- 1;
11425 Rd
= inst
.operands
[0].reg
;
11426 reject_bad_reg (Rd
);
11428 /* #0 in second position is alternative syntax for bfc, which is
11429 the same instruction but with REG_PC in the Rm field. */
11430 if (!inst
.operands
[1].isreg
)
11434 Rn
= inst
.operands
[1].reg
;
11435 reject_bad_reg (Rn
);
11438 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
11439 constraint (msb
> 32, _("bit-field extends past end of register"));
11440 /* The instruction encoding stores the LSB and MSB,
11441 not the LSB and width. */
11442 inst
.instruction
|= Rd
<< 8;
11443 inst
.instruction
|= Rn
<< 16;
11444 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
11445 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
11446 inst
.instruction
|= msb
- 1;
11454 Rd
= inst
.operands
[0].reg
;
11455 Rn
= inst
.operands
[1].reg
;
11457 reject_bad_reg (Rd
);
11458 reject_bad_reg (Rn
);
11460 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
11461 _("bit-field extends past end of register"));
11462 inst
.instruction
|= Rd
<< 8;
11463 inst
.instruction
|= Rn
<< 16;
11464 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
11465 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
11466 inst
.instruction
|= inst
.operands
[3].imm
- 1;
11469 /* ARM V5 Thumb BLX (argument parse)
11470 BLX <target_addr> which is BLX(1)
11471 BLX <Rm> which is BLX(2)
11472 Unfortunately, there are two different opcodes for this mnemonic.
11473 So, the insns[].value is not used, and the code here zaps values
11474 into inst.instruction.
11476 ??? How to take advantage of the additional two bits of displacement
11477 available in Thumb32 mode? Need new relocation? */
11482 set_pred_insn_type_last ();
11484 if (inst
.operands
[0].isreg
)
11486 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
11487 /* We have a register, so this is BLX(2). */
11488 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11492 /* No register. This must be BLX(1). */
11493 inst
.instruction
= 0xf000e800;
11494 encode_branch (BFD_RELOC_THUMB_PCREL_BLX
);
11503 bfd_reloc_code_real_type reloc
;
11506 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN
);
11508 if (in_pred_block ())
11510 /* Conditional branches inside IT blocks are encoded as unconditional
11512 cond
= COND_ALWAYS
;
11517 if (cond
!= COND_ALWAYS
)
11518 opcode
= T_MNEM_bcond
;
11520 opcode
= inst
.instruction
;
11523 && (inst
.size_req
== 4
11524 || (inst
.size_req
!= 2
11525 && (inst
.operands
[0].hasreloc
11526 || inst
.relocs
[0].exp
.X_op
== O_constant
))))
11528 inst
.instruction
= THUMB_OP32(opcode
);
11529 if (cond
== COND_ALWAYS
)
11530 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
11533 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
),
11534 _("selected architecture does not support "
11535 "wide conditional branch instruction"));
11537 gas_assert (cond
!= 0xF);
11538 inst
.instruction
|= cond
<< 22;
11539 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
11544 inst
.instruction
= THUMB_OP16(opcode
);
11545 if (cond
== COND_ALWAYS
)
11546 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
11549 inst
.instruction
|= cond
<< 8;
11550 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
11552 /* Allow section relaxation. */
11553 if (unified_syntax
&& inst
.size_req
!= 2)
11554 inst
.relax
= opcode
;
11556 inst
.relocs
[0].type
= reloc
;
11557 inst
.relocs
[0].pc_rel
= 1;
11560 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11561 between the two is the maximum immediate allowed - which is passed in
11564 do_t_bkpt_hlt1 (int range
)
11566 constraint (inst
.cond
!= COND_ALWAYS
,
11567 _("instruction is always unconditional"));
11568 if (inst
.operands
[0].present
)
11570 constraint (inst
.operands
[0].imm
> range
,
11571 _("immediate value out of range"));
11572 inst
.instruction
|= inst
.operands
[0].imm
;
11575 set_pred_insn_type (NEUTRAL_IT_INSN
);
11581 do_t_bkpt_hlt1 (63);
11587 do_t_bkpt_hlt1 (255);
11591 do_t_branch23 (void)
11593 set_pred_insn_type_last ();
11594 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23
);
11596 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11597 this file. We used to simply ignore the PLT reloc type here --
11598 the branch encoding is now needed to deal with TLSCALL relocs.
11599 So if we see a PLT reloc now, put it back to how it used to be to
11600 keep the preexisting behaviour. */
11601 if (inst
.relocs
[0].type
== BFD_RELOC_ARM_PLT32
)
11602 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
11604 #if defined(OBJ_COFF)
11605 /* If the destination of the branch is a defined symbol which does not have
11606 the THUMB_FUNC attribute, then we must be calling a function which has
11607 the (interfacearm) attribute. We look for the Thumb entry point to that
11608 function and change the branch to refer to that function instead. */
11609 if ( inst
.relocs
[0].exp
.X_op
== O_symbol
11610 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
11611 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
11612 && ! THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
11613 inst
.relocs
[0].exp
.X_add_symbol
11614 = find_real_start (inst
.relocs
[0].exp
.X_add_symbol
);
11621 set_pred_insn_type_last ();
11622 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11623 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11624 should cause the alignment to be checked once it is known. This is
11625 because BX PC only works if the instruction is word aligned. */
11633 set_pred_insn_type_last ();
11634 Rm
= inst
.operands
[0].reg
;
11635 reject_bad_reg (Rm
);
11636 inst
.instruction
|= Rm
<< 16;
11645 Rd
= inst
.operands
[0].reg
;
11646 Rm
= inst
.operands
[1].reg
;
11648 reject_bad_reg (Rd
);
11649 reject_bad_reg (Rm
);
11651 inst
.instruction
|= Rd
<< 8;
11652 inst
.instruction
|= Rm
<< 16;
11653 inst
.instruction
|= Rm
;
11659 set_pred_insn_type (OUTSIDE_PRED_INSN
);
11665 set_pred_insn_type (OUTSIDE_PRED_INSN
);
11666 inst
.instruction
|= inst
.operands
[0].imm
;
11672 set_pred_insn_type (OUTSIDE_PRED_INSN
);
11674 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
11675 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
11677 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
11678 inst
.instruction
= 0xf3af8000;
11679 inst
.instruction
|= imod
<< 9;
11680 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
11681 if (inst
.operands
[1].present
)
11682 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
11686 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
11687 && (inst
.operands
[0].imm
& 4),
11688 _("selected processor does not support 'A' form "
11689 "of this instruction"));
11690 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
11691 _("Thumb does not support the 2-argument "
11692 "form of this instruction"));
11693 inst
.instruction
|= inst
.operands
[0].imm
;
11697 /* THUMB CPY instruction (argument parse). */
11702 if (inst
.size_req
== 4)
11704 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
11705 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11706 inst
.instruction
|= inst
.operands
[1].reg
;
11710 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
11711 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
11712 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11719 set_pred_insn_type (OUTSIDE_PRED_INSN
);
11720 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11721 inst
.instruction
|= inst
.operands
[0].reg
;
11722 inst
.relocs
[0].pc_rel
= 1;
11723 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
11729 inst
.instruction
|= inst
.operands
[0].imm
;
11735 unsigned Rd
, Rn
, Rm
;
11737 Rd
= inst
.operands
[0].reg
;
11738 Rn
= (inst
.operands
[1].present
11739 ? inst
.operands
[1].reg
: Rd
);
11740 Rm
= inst
.operands
[2].reg
;
11742 reject_bad_reg (Rd
);
11743 reject_bad_reg (Rn
);
11744 reject_bad_reg (Rm
);
11746 inst
.instruction
|= Rd
<< 8;
11747 inst
.instruction
|= Rn
<< 16;
11748 inst
.instruction
|= Rm
;
11754 if (unified_syntax
&& inst
.size_req
== 4)
11755 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11757 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11763 unsigned int cond
= inst
.operands
[0].imm
;
11765 set_pred_insn_type (IT_INSN
);
11766 now_pred
.mask
= (inst
.instruction
& 0xf) | 0x10;
11767 now_pred
.cc
= cond
;
11768 now_pred
.warn_deprecated
= FALSE
;
11769 now_pred
.type
= SCALAR_PRED
;
11771 /* If the condition is a negative condition, invert the mask. */
11772 if ((cond
& 0x1) == 0x0)
11774 unsigned int mask
= inst
.instruction
& 0x000f;
11776 if ((mask
& 0x7) == 0)
11778 /* No conversion needed. */
11779 now_pred
.block_length
= 1;
11781 else if ((mask
& 0x3) == 0)
11784 now_pred
.block_length
= 2;
11786 else if ((mask
& 0x1) == 0)
11789 now_pred
.block_length
= 3;
11794 now_pred
.block_length
= 4;
11797 inst
.instruction
&= 0xfff0;
11798 inst
.instruction
|= mask
;
11801 inst
.instruction
|= cond
<< 4;
11807 /* We are dealing with a vector predicated block. */
11808 set_pred_insn_type (VPT_INSN
);
11810 now_pred
.mask
= ((inst
.instruction
& 0x00400000) >> 19)
11811 | ((inst
.instruction
& 0xe000) >> 13);
11812 now_pred
.warn_deprecated
= FALSE
;
11813 now_pred
.type
= VECTOR_PRED
;
11816 /* Helper function used for both push/pop and ldm/stm. */
11818 encode_thumb2_multi (bfd_boolean do_io
, int base
, unsigned mask
,
11819 bfd_boolean writeback
)
11821 bfd_boolean load
, store
;
11823 gas_assert (base
!= -1 || !do_io
);
11824 load
= do_io
&& ((inst
.instruction
& (1 << 20)) != 0);
11825 store
= do_io
&& !load
;
11827 if (mask
& (1 << 13))
11828 inst
.error
= _("SP not allowed in register list");
11830 if (do_io
&& (mask
& (1 << base
)) != 0
11832 inst
.error
= _("having the base register in the register list when "
11833 "using write back is UNPREDICTABLE");
11837 if (mask
& (1 << 15))
11839 if (mask
& (1 << 14))
11840 inst
.error
= _("LR and PC should not both be in register list");
11842 set_pred_insn_type_last ();
11847 if (mask
& (1 << 15))
11848 inst
.error
= _("PC not allowed in register list");
11851 if (do_io
&& ((mask
& (mask
- 1)) == 0))
11853 /* Single register transfers implemented as str/ldr. */
11856 if (inst
.instruction
& (1 << 23))
11857 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
11859 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
11863 if (inst
.instruction
& (1 << 23))
11864 inst
.instruction
= 0x00800000; /* ia -> [base] */
11866 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
11869 inst
.instruction
|= 0xf8400000;
11871 inst
.instruction
|= 0x00100000;
11873 mask
= ffs (mask
) - 1;
11876 else if (writeback
)
11877 inst
.instruction
|= WRITE_BACK
;
11879 inst
.instruction
|= mask
;
11881 inst
.instruction
|= base
<< 16;
11887 /* This really doesn't seem worth it. */
11888 constraint (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
,
11889 _("expression too complex"));
11890 constraint (inst
.operands
[1].writeback
,
11891 _("Thumb load/store multiple does not support {reglist}^"));
11893 if (unified_syntax
)
11895 bfd_boolean narrow
;
11899 /* See if we can use a 16-bit instruction. */
11900 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
11901 && inst
.size_req
!= 4
11902 && !(inst
.operands
[1].imm
& ~0xff))
11904 mask
= 1 << inst
.operands
[0].reg
;
11906 if (inst
.operands
[0].reg
<= 7)
11908 if (inst
.instruction
== T_MNEM_stmia
11909 ? inst
.operands
[0].writeback
11910 : (inst
.operands
[0].writeback
11911 == !(inst
.operands
[1].imm
& mask
)))
11913 if (inst
.instruction
== T_MNEM_stmia
11914 && (inst
.operands
[1].imm
& mask
)
11915 && (inst
.operands
[1].imm
& (mask
- 1)))
11916 as_warn (_("value stored for r%d is UNKNOWN"),
11917 inst
.operands
[0].reg
);
11919 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11920 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11921 inst
.instruction
|= inst
.operands
[1].imm
;
11924 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11926 /* This means 1 register in reg list one of 3 situations:
11927 1. Instruction is stmia, but without writeback.
11928 2. lmdia without writeback, but with Rn not in
11930 3. ldmia with writeback, but with Rn in reglist.
11931 Case 3 is UNPREDICTABLE behaviour, so we handle
11932 case 1 and 2 which can be converted into a 16-bit
11933 str or ldr. The SP cases are handled below. */
11934 unsigned long opcode
;
11935 /* First, record an error for Case 3. */
11936 if (inst
.operands
[1].imm
& mask
11937 && inst
.operands
[0].writeback
)
11939 _("having the base register in the register list when "
11940 "using write back is UNPREDICTABLE");
11942 opcode
= (inst
.instruction
== T_MNEM_stmia
? T_MNEM_str
11944 inst
.instruction
= THUMB_OP16 (opcode
);
11945 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11946 inst
.instruction
|= (ffs (inst
.operands
[1].imm
)-1);
11950 else if (inst
.operands
[0] .reg
== REG_SP
)
11952 if (inst
.operands
[0].writeback
)
11955 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11956 ? T_MNEM_push
: T_MNEM_pop
);
11957 inst
.instruction
|= inst
.operands
[1].imm
;
11960 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11963 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11964 ? T_MNEM_str_sp
: T_MNEM_ldr_sp
);
11965 inst
.instruction
|= ((ffs (inst
.operands
[1].imm
)-1) << 8);
11973 if (inst
.instruction
< 0xffff)
11974 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11976 encode_thumb2_multi (TRUE
/* do_io */, inst
.operands
[0].reg
,
11977 inst
.operands
[1].imm
,
11978 inst
.operands
[0].writeback
);
11983 constraint (inst
.operands
[0].reg
> 7
11984 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
11985 constraint (inst
.instruction
!= T_MNEM_ldmia
11986 && inst
.instruction
!= T_MNEM_stmia
,
11987 _("Thumb-2 instruction only valid in unified syntax"));
11988 if (inst
.instruction
== T_MNEM_stmia
)
11990 if (!inst
.operands
[0].writeback
)
11991 as_warn (_("this instruction will write back the base register"));
11992 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
11993 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
11994 as_warn (_("value stored for r%d is UNKNOWN"),
11995 inst
.operands
[0].reg
);
11999 if (!inst
.operands
[0].writeback
12000 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
12001 as_warn (_("this instruction will write back the base register"));
12002 else if (inst
.operands
[0].writeback
12003 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
12004 as_warn (_("this instruction will not write back the base register"));
12007 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12008 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12009 inst
.instruction
|= inst
.operands
[1].imm
;
12016 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
12017 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
12018 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
12019 || inst
.operands
[1].negative
,
12022 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
12024 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12025 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12026 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
12032 if (!inst
.operands
[1].present
)
12034 constraint (inst
.operands
[0].reg
== REG_LR
,
12035 _("r14 not allowed as first register "
12036 "when second register is omitted"));
12037 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
12039 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
12042 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12043 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
12044 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
12050 unsigned long opcode
;
12053 if (inst
.operands
[0].isreg
12054 && !inst
.operands
[0].preind
12055 && inst
.operands
[0].reg
== REG_PC
)
12056 set_pred_insn_type_last ();
12058 opcode
= inst
.instruction
;
12059 if (unified_syntax
)
12061 if (!inst
.operands
[1].isreg
)
12063 if (opcode
<= 0xffff)
12064 inst
.instruction
= THUMB_OP32 (opcode
);
12065 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
12068 if (inst
.operands
[1].isreg
12069 && !inst
.operands
[1].writeback
12070 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
12071 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
12072 && opcode
<= 0xffff
12073 && inst
.size_req
!= 4)
12075 /* Insn may have a 16-bit form. */
12076 Rn
= inst
.operands
[1].reg
;
12077 if (inst
.operands
[1].immisreg
)
12079 inst
.instruction
= THUMB_OP16 (opcode
);
12081 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
12083 else if (opcode
!= T_MNEM_ldr
&& opcode
!= T_MNEM_str
)
12084 reject_bad_reg (inst
.operands
[1].imm
);
12086 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
12087 && opcode
!= T_MNEM_ldrsb
)
12088 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
12089 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
12096 if (inst
.relocs
[0].pc_rel
)
12097 opcode
= T_MNEM_ldr_pc2
;
12099 opcode
= T_MNEM_ldr_pc
;
12103 if (opcode
== T_MNEM_ldr
)
12104 opcode
= T_MNEM_ldr_sp
;
12106 opcode
= T_MNEM_str_sp
;
12108 inst
.instruction
= inst
.operands
[0].reg
<< 8;
12112 inst
.instruction
= inst
.operands
[0].reg
;
12113 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12115 inst
.instruction
|= THUMB_OP16 (opcode
);
12116 if (inst
.size_req
== 2)
12117 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
12119 inst
.relax
= opcode
;
12123 /* Definitely a 32-bit variant. */
12125 /* Warning for Erratum 752419. */
12126 if (opcode
== T_MNEM_ldr
12127 && inst
.operands
[0].reg
== REG_SP
12128 && inst
.operands
[1].writeback
== 1
12129 && !inst
.operands
[1].immisreg
)
12131 if (no_cpu_selected ()
12132 || (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
)
12133 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
)
12134 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7r
)))
12135 as_warn (_("This instruction may be unpredictable "
12136 "if executed on M-profile cores "
12137 "with interrupts enabled."));
12140 /* Do some validations regarding addressing modes. */
12141 if (inst
.operands
[1].immisreg
)
12142 reject_bad_reg (inst
.operands
[1].imm
);
12144 constraint (inst
.operands
[1].writeback
== 1
12145 && inst
.operands
[0].reg
== inst
.operands
[1].reg
,
12148 inst
.instruction
= THUMB_OP32 (opcode
);
12149 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12150 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
12151 check_ldr_r15_aligned ();
12155 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
12157 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
12159 /* Only [Rn,Rm] is acceptable. */
12160 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
12161 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
12162 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
12163 || inst
.operands
[1].negative
,
12164 _("Thumb does not support this addressing mode"));
12165 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12169 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12170 if (!inst
.operands
[1].isreg
)
12171 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
12174 constraint (!inst
.operands
[1].preind
12175 || inst
.operands
[1].shifted
12176 || inst
.operands
[1].writeback
,
12177 _("Thumb does not support this addressing mode"));
12178 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
12180 constraint (inst
.instruction
& 0x0600,
12181 _("byte or halfword not valid for base register"));
12182 constraint (inst
.operands
[1].reg
== REG_PC
12183 && !(inst
.instruction
& THUMB_LOAD_BIT
),
12184 _("r15 based store not allowed"));
12185 constraint (inst
.operands
[1].immisreg
,
12186 _("invalid base register for register offset"));
12188 if (inst
.operands
[1].reg
== REG_PC
)
12189 inst
.instruction
= T_OPCODE_LDR_PC
;
12190 else if (inst
.instruction
& THUMB_LOAD_BIT
)
12191 inst
.instruction
= T_OPCODE_LDR_SP
;
12193 inst
.instruction
= T_OPCODE_STR_SP
;
12195 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12196 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
12200 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
12201 if (!inst
.operands
[1].immisreg
)
12203 /* Immediate offset. */
12204 inst
.instruction
|= inst
.operands
[0].reg
;
12205 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12206 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
12210 /* Register offset. */
12211 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
12212 constraint (inst
.operands
[1].negative
,
12213 _("Thumb does not support this addressing mode"));
12216 switch (inst
.instruction
)
12218 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
12219 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
12220 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
12221 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
12222 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
12223 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
12224 case 0x5600 /* ldrsb */:
12225 case 0x5e00 /* ldrsh */: break;
12229 inst
.instruction
|= inst
.operands
[0].reg
;
12230 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12231 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
12237 if (!inst
.operands
[1].present
)
12239 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
12240 constraint (inst
.operands
[0].reg
== REG_LR
,
12241 _("r14 not allowed here"));
12242 constraint (inst
.operands
[0].reg
== REG_R12
,
12243 _("r12 not allowed here"));
12246 if (inst
.operands
[2].writeback
12247 && (inst
.operands
[0].reg
== inst
.operands
[2].reg
12248 || inst
.operands
[1].reg
== inst
.operands
[2].reg
))
12249 as_warn (_("base register written back, and overlaps "
12250 "one of transfer registers"));
12252 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12253 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
12254 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
12260 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12261 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
12267 unsigned Rd
, Rn
, Rm
, Ra
;
12269 Rd
= inst
.operands
[0].reg
;
12270 Rn
= inst
.operands
[1].reg
;
12271 Rm
= inst
.operands
[2].reg
;
12272 Ra
= inst
.operands
[3].reg
;
12274 reject_bad_reg (Rd
);
12275 reject_bad_reg (Rn
);
12276 reject_bad_reg (Rm
);
12277 reject_bad_reg (Ra
);
12279 inst
.instruction
|= Rd
<< 8;
12280 inst
.instruction
|= Rn
<< 16;
12281 inst
.instruction
|= Rm
;
12282 inst
.instruction
|= Ra
<< 12;
12288 unsigned RdLo
, RdHi
, Rn
, Rm
;
12290 RdLo
= inst
.operands
[0].reg
;
12291 RdHi
= inst
.operands
[1].reg
;
12292 Rn
= inst
.operands
[2].reg
;
12293 Rm
= inst
.operands
[3].reg
;
12295 reject_bad_reg (RdLo
);
12296 reject_bad_reg (RdHi
);
12297 reject_bad_reg (Rn
);
12298 reject_bad_reg (Rm
);
12300 inst
.instruction
|= RdLo
<< 12;
12301 inst
.instruction
|= RdHi
<< 8;
12302 inst
.instruction
|= Rn
<< 16;
12303 inst
.instruction
|= Rm
;
12307 do_t_mov_cmp (void)
12311 Rn
= inst
.operands
[0].reg
;
12312 Rm
= inst
.operands
[1].reg
;
12315 set_pred_insn_type_last ();
12317 if (unified_syntax
)
12319 int r0off
= (inst
.instruction
== T_MNEM_mov
12320 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
12321 unsigned long opcode
;
12322 bfd_boolean narrow
;
12323 bfd_boolean low_regs
;
12325 low_regs
= (Rn
<= 7 && Rm
<= 7);
12326 opcode
= inst
.instruction
;
12327 if (in_pred_block ())
12328 narrow
= opcode
!= T_MNEM_movs
;
12330 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
12331 if (inst
.size_req
== 4
12332 || inst
.operands
[1].shifted
)
12335 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
12336 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
12337 && !inst
.operands
[1].shifted
12341 inst
.instruction
= T2_SUBS_PC_LR
;
12345 if (opcode
== T_MNEM_cmp
)
12347 constraint (Rn
== REG_PC
, BAD_PC
);
12350 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
12352 warn_deprecated_sp (Rm
);
12353 /* R15 was documented as a valid choice for Rm in ARMv6,
12354 but as UNPREDICTABLE in ARMv7. ARM's proprietary
12355 tools reject R15, so we do too. */
12356 constraint (Rm
== REG_PC
, BAD_PC
);
12359 reject_bad_reg (Rm
);
12361 else if (opcode
== T_MNEM_mov
12362 || opcode
== T_MNEM_movs
)
12364 if (inst
.operands
[1].isreg
)
12366 if (opcode
== T_MNEM_movs
)
12368 reject_bad_reg (Rn
);
12369 reject_bad_reg (Rm
);
12373 /* This is mov.n. */
12374 if ((Rn
== REG_SP
|| Rn
== REG_PC
)
12375 && (Rm
== REG_SP
|| Rm
== REG_PC
))
12377 as_tsktsk (_("Use of r%u as a source register is "
12378 "deprecated when r%u is the destination "
12379 "register."), Rm
, Rn
);
12384 /* This is mov.w. */
12385 constraint (Rn
== REG_PC
, BAD_PC
);
12386 constraint (Rm
== REG_PC
, BAD_PC
);
12387 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
12388 constraint (Rn
== REG_SP
&& Rm
== REG_SP
, BAD_SP
);
12392 reject_bad_reg (Rn
);
12395 if (!inst
.operands
[1].isreg
)
12397 /* Immediate operand. */
12398 if (!in_pred_block () && opcode
== T_MNEM_mov
)
12400 if (low_regs
&& narrow
)
12402 inst
.instruction
= THUMB_OP16 (opcode
);
12403 inst
.instruction
|= Rn
<< 8;
12404 if (inst
.relocs
[0].type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12405 || inst
.relocs
[0].type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
12407 if (inst
.size_req
== 2)
12408 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_IMM
;
12410 inst
.relax
= opcode
;
12415 constraint ((inst
.relocs
[0].type
12416 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
)
12417 && (inst
.relocs
[0].type
12418 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
) ,
12419 THUMB1_RELOC_ONLY
);
12421 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12422 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12423 inst
.instruction
|= Rn
<< r0off
;
12424 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12427 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
12428 && (inst
.instruction
== T_MNEM_mov
12429 || inst
.instruction
== T_MNEM_movs
))
12431 /* Register shifts are encoded as separate shift instructions. */
12432 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
12434 if (in_pred_block ())
12439 if (inst
.size_req
== 4)
12442 if (!low_regs
|| inst
.operands
[1].imm
> 7)
12448 switch (inst
.operands
[1].shift_kind
)
12451 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
12454 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
12457 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
12460 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
12466 inst
.instruction
= opcode
;
12469 inst
.instruction
|= Rn
;
12470 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
12475 inst
.instruction
|= CONDS_BIT
;
12477 inst
.instruction
|= Rn
<< 8;
12478 inst
.instruction
|= Rm
<< 16;
12479 inst
.instruction
|= inst
.operands
[1].imm
;
12484 /* Some mov with immediate shift have narrow variants.
12485 Register shifts are handled above. */
12486 if (low_regs
&& inst
.operands
[1].shifted
12487 && (inst
.instruction
== T_MNEM_mov
12488 || inst
.instruction
== T_MNEM_movs
))
12490 if (in_pred_block ())
12491 narrow
= (inst
.instruction
== T_MNEM_mov
);
12493 narrow
= (inst
.instruction
== T_MNEM_movs
);
12498 switch (inst
.operands
[1].shift_kind
)
12500 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12501 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12502 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12503 default: narrow
= FALSE
; break;
12509 inst
.instruction
|= Rn
;
12510 inst
.instruction
|= Rm
<< 3;
12511 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12515 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12516 inst
.instruction
|= Rn
<< r0off
;
12517 encode_thumb32_shifted_operand (1);
12521 switch (inst
.instruction
)
12524 /* In v4t or v5t a move of two lowregs produces unpredictable
12525 results. Don't allow this. */
12528 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
),
12529 "MOV Rd, Rs with two low registers is not "
12530 "permitted on this architecture");
12531 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
12535 inst
.instruction
= T_OPCODE_MOV_HR
;
12536 inst
.instruction
|= (Rn
& 0x8) << 4;
12537 inst
.instruction
|= (Rn
& 0x7);
12538 inst
.instruction
|= Rm
<< 3;
12542 /* We know we have low registers at this point.
12543 Generate LSLS Rd, Rs, #0. */
12544 inst
.instruction
= T_OPCODE_LSL_I
;
12545 inst
.instruction
|= Rn
;
12546 inst
.instruction
|= Rm
<< 3;
12552 inst
.instruction
= T_OPCODE_CMP_LR
;
12553 inst
.instruction
|= Rn
;
12554 inst
.instruction
|= Rm
<< 3;
12558 inst
.instruction
= T_OPCODE_CMP_HR
;
12559 inst
.instruction
|= (Rn
& 0x8) << 4;
12560 inst
.instruction
|= (Rn
& 0x7);
12561 inst
.instruction
|= Rm
<< 3;
12568 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12570 /* PR 10443: Do not silently ignore shifted operands. */
12571 constraint (inst
.operands
[1].shifted
,
12572 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12574 if (inst
.operands
[1].isreg
)
12576 if (Rn
< 8 && Rm
< 8)
12578 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12579 since a MOV instruction produces unpredictable results. */
12580 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12581 inst
.instruction
= T_OPCODE_ADD_I3
;
12583 inst
.instruction
= T_OPCODE_CMP_LR
;
12585 inst
.instruction
|= Rn
;
12586 inst
.instruction
|= Rm
<< 3;
12590 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12591 inst
.instruction
= T_OPCODE_MOV_HR
;
12593 inst
.instruction
= T_OPCODE_CMP_HR
;
12599 constraint (Rn
> 7,
12600 _("only lo regs allowed with immediate"));
12601 inst
.instruction
|= Rn
<< 8;
12602 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_IMM
;
12613 top
= (inst
.instruction
& 0x00800000) != 0;
12614 if (inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVW
)
12616 constraint (top
, _(":lower16: not allowed in this instruction"));
12617 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_MOVW
;
12619 else if (inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVT
)
12621 constraint (!top
, _(":upper16: not allowed in this instruction"));
12622 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_MOVT
;
12625 Rd
= inst
.operands
[0].reg
;
12626 reject_bad_reg (Rd
);
12628 inst
.instruction
|= Rd
<< 8;
12629 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
12631 imm
= inst
.relocs
[0].exp
.X_add_number
;
12632 inst
.instruction
|= (imm
& 0xf000) << 4;
12633 inst
.instruction
|= (imm
& 0x0800) << 15;
12634 inst
.instruction
|= (imm
& 0x0700) << 4;
12635 inst
.instruction
|= (imm
& 0x00ff);
12640 do_t_mvn_tst (void)
12644 Rn
= inst
.operands
[0].reg
;
12645 Rm
= inst
.operands
[1].reg
;
12647 if (inst
.instruction
== T_MNEM_cmp
12648 || inst
.instruction
== T_MNEM_cmn
)
12649 constraint (Rn
== REG_PC
, BAD_PC
);
12651 reject_bad_reg (Rn
);
12652 reject_bad_reg (Rm
);
12654 if (unified_syntax
)
12656 int r0off
= (inst
.instruction
== T_MNEM_mvn
12657 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
12658 bfd_boolean narrow
;
12660 if (inst
.size_req
== 4
12661 || inst
.instruction
> 0xffff
12662 || inst
.operands
[1].shifted
12663 || Rn
> 7 || Rm
> 7)
12665 else if (inst
.instruction
== T_MNEM_cmn
12666 || inst
.instruction
== T_MNEM_tst
)
12668 else if (THUMB_SETS_FLAGS (inst
.instruction
))
12669 narrow
= !in_pred_block ();
12671 narrow
= in_pred_block ();
12673 if (!inst
.operands
[1].isreg
)
12675 /* For an immediate, we always generate a 32-bit opcode;
12676 section relaxation will shrink it later if possible. */
12677 if (inst
.instruction
< 0xffff)
12678 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12679 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12680 inst
.instruction
|= Rn
<< r0off
;
12681 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12685 /* See if we can do this with a 16-bit instruction. */
12688 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12689 inst
.instruction
|= Rn
;
12690 inst
.instruction
|= Rm
<< 3;
12694 constraint (inst
.operands
[1].shifted
12695 && inst
.operands
[1].immisreg
,
12696 _("shift must be constant"));
12697 if (inst
.instruction
< 0xffff)
12698 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12699 inst
.instruction
|= Rn
<< r0off
;
12700 encode_thumb32_shifted_operand (1);
12706 constraint (inst
.instruction
> 0xffff
12707 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
12708 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
12709 _("unshifted register required"));
12710 constraint (Rn
> 7 || Rm
> 7,
12713 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12714 inst
.instruction
|= Rn
;
12715 inst
.instruction
|= Rm
<< 3;
12724 if (do_vfp_nsyn_mrs () == SUCCESS
)
12727 Rd
= inst
.operands
[0].reg
;
12728 reject_bad_reg (Rd
);
12729 inst
.instruction
|= Rd
<< 8;
12731 if (inst
.operands
[1].isreg
)
12733 unsigned br
= inst
.operands
[1].reg
;
12734 if (((br
& 0x200) == 0) && ((br
& 0xf000) != 0xf000))
12735 as_bad (_("bad register for mrs"));
12737 inst
.instruction
|= br
& (0xf << 16);
12738 inst
.instruction
|= (br
& 0x300) >> 4;
12739 inst
.instruction
|= (br
& SPSR_BIT
) >> 2;
12743 int flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12745 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12747 /* PR gas/12698: The constraint is only applied for m_profile.
12748 If the user has specified -march=all, we want to ignore it as
12749 we are building for any CPU type, including non-m variants. */
12750 bfd_boolean m_profile
=
12751 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12752 constraint ((flags
!= 0) && m_profile
, _("selected processor does "
12753 "not support requested special purpose register"));
12756 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12758 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
12759 _("'APSR', 'CPSR' or 'SPSR' expected"));
12761 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12762 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
12763 inst
.instruction
|= 0xf0000;
12773 if (do_vfp_nsyn_msr () == SUCCESS
)
12776 constraint (!inst
.operands
[1].isreg
,
12777 _("Thumb encoding does not support an immediate here"));
12779 if (inst
.operands
[0].isreg
)
12780 flags
= (int)(inst
.operands
[0].reg
);
12782 flags
= inst
.operands
[0].imm
;
12784 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12786 int bits
= inst
.operands
[0].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12788 /* PR gas/12698: The constraint is only applied for m_profile.
12789 If the user has specified -march=all, we want to ignore it as
12790 we are building for any CPU type, including non-m variants. */
12791 bfd_boolean m_profile
=
12792 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12793 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12794 && (bits
& ~(PSR_s
| PSR_f
)) != 0)
12795 || (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12796 && bits
!= PSR_f
)) && m_profile
,
12797 _("selected processor does not support requested special "
12798 "purpose register"));
12801 constraint ((flags
& 0xff) != 0, _("selected processor does not support "
12802 "requested special purpose register"));
12804 Rn
= inst
.operands
[1].reg
;
12805 reject_bad_reg (Rn
);
12807 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12808 inst
.instruction
|= (flags
& 0xf0000) >> 8;
12809 inst
.instruction
|= (flags
& 0x300) >> 4;
12810 inst
.instruction
|= (flags
& 0xff);
12811 inst
.instruction
|= Rn
<< 16;
12817 bfd_boolean narrow
;
12818 unsigned Rd
, Rn
, Rm
;
12820 if (!inst
.operands
[2].present
)
12821 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
12823 Rd
= inst
.operands
[0].reg
;
12824 Rn
= inst
.operands
[1].reg
;
12825 Rm
= inst
.operands
[2].reg
;
12827 if (unified_syntax
)
12829 if (inst
.size_req
== 4
12835 else if (inst
.instruction
== T_MNEM_muls
)
12836 narrow
= !in_pred_block ();
12838 narrow
= in_pred_block ();
12842 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
12843 constraint (Rn
> 7 || Rm
> 7,
12850 /* 16-bit MULS/Conditional MUL. */
12851 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12852 inst
.instruction
|= Rd
;
12855 inst
.instruction
|= Rm
<< 3;
12857 inst
.instruction
|= Rn
<< 3;
12859 constraint (1, _("dest must overlap one source register"));
12863 constraint (inst
.instruction
!= T_MNEM_mul
,
12864 _("Thumb-2 MUL must not set flags"));
12866 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12867 inst
.instruction
|= Rd
<< 8;
12868 inst
.instruction
|= Rn
<< 16;
12869 inst
.instruction
|= Rm
<< 0;
12871 reject_bad_reg (Rd
);
12872 reject_bad_reg (Rn
);
12873 reject_bad_reg (Rm
);
12880 unsigned RdLo
, RdHi
, Rn
, Rm
;
12882 RdLo
= inst
.operands
[0].reg
;
12883 RdHi
= inst
.operands
[1].reg
;
12884 Rn
= inst
.operands
[2].reg
;
12885 Rm
= inst
.operands
[3].reg
;
12887 reject_bad_reg (RdLo
);
12888 reject_bad_reg (RdHi
);
12889 reject_bad_reg (Rn
);
12890 reject_bad_reg (Rm
);
12892 inst
.instruction
|= RdLo
<< 12;
12893 inst
.instruction
|= RdHi
<< 8;
12894 inst
.instruction
|= Rn
<< 16;
12895 inst
.instruction
|= Rm
;
12898 as_tsktsk (_("rdhi and rdlo must be different"));
12904 set_pred_insn_type (NEUTRAL_IT_INSN
);
12906 if (unified_syntax
)
12908 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
12910 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12911 inst
.instruction
|= inst
.operands
[0].imm
;
12915 /* PR9722: Check for Thumb2 availability before
12916 generating a thumb2 nop instruction. */
12917 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
12919 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12920 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
12923 inst
.instruction
= 0x46c0;
12928 constraint (inst
.operands
[0].present
,
12929 _("Thumb does not support NOP with hints"));
12930 inst
.instruction
= 0x46c0;
12937 if (unified_syntax
)
12939 bfd_boolean narrow
;
12941 if (THUMB_SETS_FLAGS (inst
.instruction
))
12942 narrow
= !in_pred_block ();
12944 narrow
= in_pred_block ();
12945 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12947 if (inst
.size_req
== 4)
12952 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12953 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12954 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12958 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12959 inst
.instruction
|= inst
.operands
[0].reg
;
12960 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12965 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
12967 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12969 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12970 inst
.instruction
|= inst
.operands
[0].reg
;
12971 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12980 Rd
= inst
.operands
[0].reg
;
12981 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
12983 reject_bad_reg (Rd
);
12984 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12985 reject_bad_reg (Rn
);
12987 inst
.instruction
|= Rd
<< 8;
12988 inst
.instruction
|= Rn
<< 16;
12990 if (!inst
.operands
[2].isreg
)
12992 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12993 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12999 Rm
= inst
.operands
[2].reg
;
13000 reject_bad_reg (Rm
);
13002 constraint (inst
.operands
[2].shifted
13003 && inst
.operands
[2].immisreg
,
13004 _("shift must be constant"));
13005 encode_thumb32_shifted_operand (2);
13012 unsigned Rd
, Rn
, Rm
;
13014 Rd
= inst
.operands
[0].reg
;
13015 Rn
= inst
.operands
[1].reg
;
13016 Rm
= inst
.operands
[2].reg
;
13018 reject_bad_reg (Rd
);
13019 reject_bad_reg (Rn
);
13020 reject_bad_reg (Rm
);
13022 inst
.instruction
|= Rd
<< 8;
13023 inst
.instruction
|= Rn
<< 16;
13024 inst
.instruction
|= Rm
;
13025 if (inst
.operands
[3].present
)
13027 unsigned int val
= inst
.relocs
[0].exp
.X_add_number
;
13028 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
13029 _("expression too complex"));
13030 inst
.instruction
|= (val
& 0x1c) << 10;
13031 inst
.instruction
|= (val
& 0x03) << 6;
13038 if (!inst
.operands
[3].present
)
13042 inst
.instruction
&= ~0x00000020;
13044 /* PR 10168. Swap the Rm and Rn registers. */
13045 Rtmp
= inst
.operands
[1].reg
;
13046 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
13047 inst
.operands
[2].reg
= Rtmp
;
13055 if (inst
.operands
[0].immisreg
)
13056 reject_bad_reg (inst
.operands
[0].imm
);
13058 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
13062 do_t_push_pop (void)
13066 constraint (inst
.operands
[0].writeback
,
13067 _("push/pop do not support {reglist}^"));
13068 constraint (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
,
13069 _("expression too complex"));
13071 mask
= inst
.operands
[0].imm
;
13072 if (inst
.size_req
!= 4 && (mask
& ~0xff) == 0)
13073 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
13074 else if (inst
.size_req
!= 4
13075 && (mask
& ~0xff) == (1U << (inst
.instruction
== T_MNEM_push
13076 ? REG_LR
: REG_PC
)))
13078 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13079 inst
.instruction
|= THUMB_PP_PC_LR
;
13080 inst
.instruction
|= mask
& 0xff;
13082 else if (unified_syntax
)
13084 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13085 encode_thumb2_multi (TRUE
/* do_io */, 13, mask
, TRUE
);
13089 inst
.error
= _("invalid register list to push/pop instruction");
13097 if (unified_syntax
)
13098 encode_thumb2_multi (FALSE
/* do_io */, -1, inst
.operands
[0].imm
, FALSE
);
13101 inst
.error
= _("invalid register list to push/pop instruction");
13107 do_t_vscclrm (void)
13109 if (inst
.operands
[0].issingle
)
13111 inst
.instruction
|= (inst
.operands
[0].reg
& 0x1) << 22;
13112 inst
.instruction
|= (inst
.operands
[0].reg
& 0x1e) << 11;
13113 inst
.instruction
|= inst
.operands
[0].imm
;
13117 inst
.instruction
|= (inst
.operands
[0].reg
& 0x10) << 18;
13118 inst
.instruction
|= (inst
.operands
[0].reg
& 0xf) << 12;
13119 inst
.instruction
|= 1 << 8;
13120 inst
.instruction
|= inst
.operands
[0].imm
<< 1;
13129 Rd
= inst
.operands
[0].reg
;
13130 Rm
= inst
.operands
[1].reg
;
13132 reject_bad_reg (Rd
);
13133 reject_bad_reg (Rm
);
13135 inst
.instruction
|= Rd
<< 8;
13136 inst
.instruction
|= Rm
<< 16;
13137 inst
.instruction
|= Rm
;
13145 Rd
= inst
.operands
[0].reg
;
13146 Rm
= inst
.operands
[1].reg
;
13148 reject_bad_reg (Rd
);
13149 reject_bad_reg (Rm
);
13151 if (Rd
<= 7 && Rm
<= 7
13152 && inst
.size_req
!= 4)
13154 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13155 inst
.instruction
|= Rd
;
13156 inst
.instruction
|= Rm
<< 3;
13158 else if (unified_syntax
)
13160 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13161 inst
.instruction
|= Rd
<< 8;
13162 inst
.instruction
|= Rm
<< 16;
13163 inst
.instruction
|= Rm
;
13166 inst
.error
= BAD_HIREG
;
13174 Rd
= inst
.operands
[0].reg
;
13175 Rm
= inst
.operands
[1].reg
;
13177 reject_bad_reg (Rd
);
13178 reject_bad_reg (Rm
);
13180 inst
.instruction
|= Rd
<< 8;
13181 inst
.instruction
|= Rm
;
13189 Rd
= inst
.operands
[0].reg
;
13190 Rs
= (inst
.operands
[1].present
13191 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
13192 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
13194 reject_bad_reg (Rd
);
13195 reject_bad_reg (Rs
);
13196 if (inst
.operands
[2].isreg
)
13197 reject_bad_reg (inst
.operands
[2].reg
);
13199 inst
.instruction
|= Rd
<< 8;
13200 inst
.instruction
|= Rs
<< 16;
13201 if (!inst
.operands
[2].isreg
)
13203 bfd_boolean narrow
;
13205 if ((inst
.instruction
& 0x00100000) != 0)
13206 narrow
= !in_pred_block ();
13208 narrow
= in_pred_block ();
13210 if (Rd
> 7 || Rs
> 7)
13213 if (inst
.size_req
== 4 || !unified_syntax
)
13216 if (inst
.relocs
[0].exp
.X_op
!= O_constant
13217 || inst
.relocs
[0].exp
.X_add_number
!= 0)
13220 /* Turn rsb #0 into 16-bit neg. We should probably do this via
13221 relaxation, but it doesn't seem worth the hassle. */
13224 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13225 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
13226 inst
.instruction
|= Rs
<< 3;
13227 inst
.instruction
|= Rd
;
13231 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
13232 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
13236 encode_thumb32_shifted_operand (2);
13242 if (warn_on_deprecated
13243 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
13244 as_tsktsk (_("setend use is deprecated for ARMv8"));
13246 set_pred_insn_type (OUTSIDE_PRED_INSN
);
13247 if (inst
.operands
[0].imm
)
13248 inst
.instruction
|= 0x8;
13254 if (!inst
.operands
[1].present
)
13255 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
13257 if (unified_syntax
)
13259 bfd_boolean narrow
;
13262 switch (inst
.instruction
)
13265 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
13267 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
13269 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
13271 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
13275 if (THUMB_SETS_FLAGS (inst
.instruction
))
13276 narrow
= !in_pred_block ();
13278 narrow
= in_pred_block ();
13279 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
13281 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
13283 if (inst
.operands
[2].isreg
13284 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
13285 || inst
.operands
[2].reg
> 7))
13287 if (inst
.size_req
== 4)
13290 reject_bad_reg (inst
.operands
[0].reg
);
13291 reject_bad_reg (inst
.operands
[1].reg
);
13295 if (inst
.operands
[2].isreg
)
13297 reject_bad_reg (inst
.operands
[2].reg
);
13298 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13299 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13300 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13301 inst
.instruction
|= inst
.operands
[2].reg
;
13303 /* PR 12854: Error on extraneous shifts. */
13304 constraint (inst
.operands
[2].shifted
,
13305 _("extraneous shift as part of operand to shift insn"));
13309 inst
.operands
[1].shifted
= 1;
13310 inst
.operands
[1].shift_kind
= shift_kind
;
13311 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
13312 ? T_MNEM_movs
: T_MNEM_mov
);
13313 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13314 encode_thumb32_shifted_operand (1);
13315 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
13316 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13321 if (inst
.operands
[2].isreg
)
13323 switch (shift_kind
)
13325 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
13326 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
13327 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
13328 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
13332 inst
.instruction
|= inst
.operands
[0].reg
;
13333 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
13335 /* PR 12854: Error on extraneous shifts. */
13336 constraint (inst
.operands
[2].shifted
,
13337 _("extraneous shift as part of operand to shift insn"));
13341 switch (shift_kind
)
13343 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
13344 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
13345 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
13348 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
13349 inst
.instruction
|= inst
.operands
[0].reg
;
13350 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
13356 constraint (inst
.operands
[0].reg
> 7
13357 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
13358 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
13360 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
13362 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
13363 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
13364 _("source1 and dest must be same register"));
13366 switch (inst
.instruction
)
13368 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
13369 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
13370 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
13371 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
13375 inst
.instruction
|= inst
.operands
[0].reg
;
13376 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
13378 /* PR 12854: Error on extraneous shifts. */
13379 constraint (inst
.operands
[2].shifted
,
13380 _("extraneous shift as part of operand to shift insn"));
13384 switch (inst
.instruction
)
13386 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
13387 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
13388 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
13389 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
13392 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
13393 inst
.instruction
|= inst
.operands
[0].reg
;
13394 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
13402 unsigned Rd
, Rn
, Rm
;
13404 Rd
= inst
.operands
[0].reg
;
13405 Rn
= inst
.operands
[1].reg
;
13406 Rm
= inst
.operands
[2].reg
;
13408 reject_bad_reg (Rd
);
13409 reject_bad_reg (Rn
);
13410 reject_bad_reg (Rm
);
13412 inst
.instruction
|= Rd
<< 8;
13413 inst
.instruction
|= Rn
<< 16;
13414 inst
.instruction
|= Rm
;
13420 unsigned Rd
, Rn
, Rm
;
13422 Rd
= inst
.operands
[0].reg
;
13423 Rm
= inst
.operands
[1].reg
;
13424 Rn
= inst
.operands
[2].reg
;
13426 reject_bad_reg (Rd
);
13427 reject_bad_reg (Rn
);
13428 reject_bad_reg (Rm
);
13430 inst
.instruction
|= Rd
<< 8;
13431 inst
.instruction
|= Rn
<< 16;
13432 inst
.instruction
|= Rm
;
13438 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
13439 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
),
13440 _("SMC is not permitted on this architecture"));
13441 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
13442 _("expression too complex"));
13443 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13444 inst
.instruction
|= (value
& 0xf000) >> 12;
13445 inst
.instruction
|= (value
& 0x0ff0);
13446 inst
.instruction
|= (value
& 0x000f) << 16;
13447 /* PR gas/15623: SMC instructions must be last in an IT block. */
13448 set_pred_insn_type_last ();
13454 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
13456 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13457 inst
.instruction
|= (value
& 0x0fff);
13458 inst
.instruction
|= (value
& 0xf000) << 4;
13462 do_t_ssat_usat (int bias
)
13466 Rd
= inst
.operands
[0].reg
;
13467 Rn
= inst
.operands
[2].reg
;
13469 reject_bad_reg (Rd
);
13470 reject_bad_reg (Rn
);
13472 inst
.instruction
|= Rd
<< 8;
13473 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
13474 inst
.instruction
|= Rn
<< 16;
13476 if (inst
.operands
[3].present
)
13478 offsetT shift_amount
= inst
.relocs
[0].exp
.X_add_number
;
13480 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13482 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
13483 _("expression too complex"));
13485 if (shift_amount
!= 0)
13487 constraint (shift_amount
> 31,
13488 _("shift expression is too large"));
13490 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
13491 inst
.instruction
|= 0x00200000; /* sh bit. */
13493 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
13494 inst
.instruction
|= (shift_amount
& 0x03) << 6;
13502 do_t_ssat_usat (1);
13510 Rd
= inst
.operands
[0].reg
;
13511 Rn
= inst
.operands
[2].reg
;
13513 reject_bad_reg (Rd
);
13514 reject_bad_reg (Rn
);
13516 inst
.instruction
|= Rd
<< 8;
13517 inst
.instruction
|= inst
.operands
[1].imm
- 1;
13518 inst
.instruction
|= Rn
<< 16;
13524 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
13525 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
13526 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
13527 || inst
.operands
[2].negative
,
13530 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
13532 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13533 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13534 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
13535 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
13541 if (!inst
.operands
[2].present
)
13542 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
13544 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
13545 || inst
.operands
[0].reg
== inst
.operands
[2].reg
13546 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
13549 inst
.instruction
|= inst
.operands
[0].reg
;
13550 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13551 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
13552 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
13558 unsigned Rd
, Rn
, Rm
;
13560 Rd
= inst
.operands
[0].reg
;
13561 Rn
= inst
.operands
[1].reg
;
13562 Rm
= inst
.operands
[2].reg
;
13564 reject_bad_reg (Rd
);
13565 reject_bad_reg (Rn
);
13566 reject_bad_reg (Rm
);
13568 inst
.instruction
|= Rd
<< 8;
13569 inst
.instruction
|= Rn
<< 16;
13570 inst
.instruction
|= Rm
;
13571 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
13579 Rd
= inst
.operands
[0].reg
;
13580 Rm
= inst
.operands
[1].reg
;
13582 reject_bad_reg (Rd
);
13583 reject_bad_reg (Rm
);
13585 if (inst
.instruction
<= 0xffff
13586 && inst
.size_req
!= 4
13587 && Rd
<= 7 && Rm
<= 7
13588 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
13590 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13591 inst
.instruction
|= Rd
;
13592 inst
.instruction
|= Rm
<< 3;
13594 else if (unified_syntax
)
13596 if (inst
.instruction
<= 0xffff)
13597 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13598 inst
.instruction
|= Rd
<< 8;
13599 inst
.instruction
|= Rm
;
13600 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
13604 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
13605 _("Thumb encoding does not support rotation"));
13606 constraint (1, BAD_HIREG
);
13613 inst
.relocs
[0].type
= BFD_RELOC_ARM_SWI
;
13622 half
= (inst
.instruction
& 0x10) != 0;
13623 set_pred_insn_type_last ();
13624 constraint (inst
.operands
[0].immisreg
,
13625 _("instruction requires register index"));
13627 Rn
= inst
.operands
[0].reg
;
13628 Rm
= inst
.operands
[0].imm
;
13630 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
13631 constraint (Rn
== REG_SP
, BAD_SP
);
13632 reject_bad_reg (Rm
);
13634 constraint (!half
&& inst
.operands
[0].shifted
,
13635 _("instruction does not allow shifted index"));
13636 inst
.instruction
|= (Rn
<< 16) | Rm
;
13642 if (!inst
.operands
[0].present
)
13643 inst
.operands
[0].imm
= 0;
13645 if ((unsigned int) inst
.operands
[0].imm
> 255 || inst
.size_req
== 4)
13647 constraint (inst
.size_req
== 2,
13648 _("immediate value out of range"));
13649 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13650 inst
.instruction
|= (inst
.operands
[0].imm
& 0xf000u
) << 4;
13651 inst
.instruction
|= (inst
.operands
[0].imm
& 0x0fffu
) << 0;
13655 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13656 inst
.instruction
|= inst
.operands
[0].imm
;
13659 set_pred_insn_type (NEUTRAL_IT_INSN
);
13666 do_t_ssat_usat (0);
13674 Rd
= inst
.operands
[0].reg
;
13675 Rn
= inst
.operands
[2].reg
;
13677 reject_bad_reg (Rd
);
13678 reject_bad_reg (Rn
);
13680 inst
.instruction
|= Rd
<< 8;
13681 inst
.instruction
|= inst
.operands
[1].imm
;
13682 inst
.instruction
|= Rn
<< 16;
13685 /* Checking the range of the branch offset (VAL) with NBITS bits
13686 and IS_SIGNED signedness. Also checks the LSB to be 0. */
13688 v8_1_branch_value_check (int val
, int nbits
, int is_signed
)
13690 gas_assert (nbits
> 0 && nbits
<= 32);
13693 int cmp
= (1 << (nbits
- 1));
13694 if ((val
< -cmp
) || (val
>= cmp
) || (val
& 0x01))
13699 if ((val
<= 0) || (val
>= (1 << nbits
)) || (val
& 0x1))
13705 /* For branches in Armv8.1-M Mainline. */
13707 do_t_branch_future (void)
13709 unsigned long insn
= inst
.instruction
;
13711 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13712 if (inst
.operands
[0].hasreloc
== 0)
13714 if (v8_1_branch_value_check (inst
.operands
[0].imm
, 5, FALSE
) == FAIL
)
13715 as_bad (BAD_BRANCH_OFF
);
13717 inst
.instruction
|= ((inst
.operands
[0].imm
& 0x1f) >> 1) << 23;
13721 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH5
;
13722 inst
.relocs
[0].pc_rel
= 1;
13728 if (inst
.operands
[1].hasreloc
== 0)
13730 int val
= inst
.operands
[1].imm
;
13731 if (v8_1_branch_value_check (inst
.operands
[1].imm
, 17, TRUE
) == FAIL
)
13732 as_bad (BAD_BRANCH_OFF
);
13734 int immA
= (val
& 0x0001f000) >> 12;
13735 int immB
= (val
& 0x00000ffc) >> 2;
13736 int immC
= (val
& 0x00000002) >> 1;
13737 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13741 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF17
;
13742 inst
.relocs
[1].pc_rel
= 1;
13747 if (inst
.operands
[1].hasreloc
== 0)
13749 int val
= inst
.operands
[1].imm
;
13750 if (v8_1_branch_value_check (inst
.operands
[1].imm
, 19, TRUE
) == FAIL
)
13751 as_bad (BAD_BRANCH_OFF
);
13753 int immA
= (val
& 0x0007f000) >> 12;
13754 int immB
= (val
& 0x00000ffc) >> 2;
13755 int immC
= (val
& 0x00000002) >> 1;
13756 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13760 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF19
;
13761 inst
.relocs
[1].pc_rel
= 1;
13765 case T_MNEM_bfcsel
:
13767 if (inst
.operands
[1].hasreloc
== 0)
13769 int val
= inst
.operands
[1].imm
;
13770 int immA
= (val
& 0x00001000) >> 12;
13771 int immB
= (val
& 0x00000ffc) >> 2;
13772 int immC
= (val
& 0x00000002) >> 1;
13773 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13777 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF13
;
13778 inst
.relocs
[1].pc_rel
= 1;
13782 if (inst
.operands
[2].hasreloc
== 0)
13784 constraint ((inst
.operands
[0].hasreloc
!= 0), BAD_ARGS
);
13785 int val2
= inst
.operands
[2].imm
;
13786 int val0
= inst
.operands
[0].imm
& 0x1f;
13787 int diff
= val2
- val0
;
13789 inst
.instruction
|= 1 << 17; /* T bit. */
13790 else if (diff
!= 2)
13791 as_bad (_("out of range label-relative fixup value"));
13795 constraint ((inst
.operands
[0].hasreloc
== 0), BAD_ARGS
);
13796 inst
.relocs
[2].type
= BFD_RELOC_THUMB_PCREL_BFCSEL
;
13797 inst
.relocs
[2].pc_rel
= 1;
13801 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
13802 inst
.instruction
|= (inst
.operands
[3].imm
& 0xf) << 18;
13807 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13814 /* Helper function for do_t_loloop to handle relocations. */
13816 v8_1_loop_reloc (int is_le
)
13818 if (inst
.relocs
[0].exp
.X_op
== O_constant
)
13820 int value
= inst
.relocs
[0].exp
.X_add_number
;
13821 value
= (is_le
) ? -value
: value
;
13823 if (v8_1_branch_value_check (value
, 12, FALSE
) == FAIL
)
13824 as_bad (BAD_BRANCH_OFF
);
13828 immh
= (value
& 0x00000ffc) >> 2;
13829 imml
= (value
& 0x00000002) >> 1;
13831 inst
.instruction
|= (imml
<< 11) | (immh
<< 1);
13835 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_LOOP12
;
13836 inst
.relocs
[0].pc_rel
= 1;
13840 /* To handle the Scalar Low Overhead Loop instructions
13841 in Armv8.1-M Mainline. */
13845 unsigned long insn
= inst
.instruction
;
13847 set_pred_insn_type (OUTSIDE_PRED_INSN
);
13848 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13854 if (!inst
.operands
[0].present
)
13855 inst
.instruction
|= 1 << 21;
13857 v8_1_loop_reloc (TRUE
);
13861 v8_1_loop_reloc (FALSE
);
13862 /* Fall through. */
13864 constraint (inst
.operands
[1].isreg
!= 1, BAD_ARGS
);
13865 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
13872 /* MVE instruction encoder helpers. */
13873 #define M_MNEM_vabav 0xee800f01
13874 #define M_MNEM_vmladav 0xeef00e00
13875 #define M_MNEM_vmladava 0xeef00e20
13876 #define M_MNEM_vmladavx 0xeef01e00
13877 #define M_MNEM_vmladavax 0xeef01e20
13878 #define M_MNEM_vmlsdav 0xeef00e01
13879 #define M_MNEM_vmlsdava 0xeef00e21
13880 #define M_MNEM_vmlsdavx 0xeef01e01
13881 #define M_MNEM_vmlsdavax 0xeef01e21
13882 #define M_MNEM_vmullt 0xee011e00
13883 #define M_MNEM_vmullb 0xee010e00
13884 #define M_MNEM_vst20 0xfc801e00
13885 #define M_MNEM_vst21 0xfc801e20
13886 #define M_MNEM_vst40 0xfc801e01
13887 #define M_MNEM_vst41 0xfc801e21
13888 #define M_MNEM_vst42 0xfc801e41
13889 #define M_MNEM_vst43 0xfc801e61
13890 #define M_MNEM_vld20 0xfc901e00
13891 #define M_MNEM_vld21 0xfc901e20
13892 #define M_MNEM_vld40 0xfc901e01
13893 #define M_MNEM_vld41 0xfc901e21
13894 #define M_MNEM_vld42 0xfc901e41
13895 #define M_MNEM_vld43 0xfc901e61
13897 /* Neon instruction encoder helpers. */
13899 /* Encodings for the different types for various Neon opcodes. */
13901 /* An "invalid" code for the following tables. */
13904 struct neon_tab_entry
13907 unsigned float_or_poly
;
13908 unsigned scalar_or_imm
;
13911 /* Map overloaded Neon opcodes to their respective encodings. */
13912 #define NEON_ENC_TAB \
13913 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13914 X(vabdl, 0x0800700, N_INV, N_INV), \
13915 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13916 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13917 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13918 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13919 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13920 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13921 X(vaddl, 0x0800000, N_INV, N_INV), \
13922 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13923 X(vsubl, 0x0800200, N_INV, N_INV), \
13924 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13925 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13926 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13927 /* Register variants of the following two instructions are encoded as
13928 vcge / vcgt with the operands reversed. */ \
13929 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13930 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13931 X(vfma, N_INV, 0x0000c10, N_INV), \
13932 X(vfms, N_INV, 0x0200c10, N_INV), \
13933 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13934 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13935 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13936 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13937 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13938 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13939 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13940 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13941 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13942 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13943 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13944 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13945 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13946 X(vshl, 0x0000400, N_INV, 0x0800510), \
13947 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13948 X(vand, 0x0000110, N_INV, 0x0800030), \
13949 X(vbic, 0x0100110, N_INV, 0x0800030), \
13950 X(veor, 0x1000110, N_INV, N_INV), \
13951 X(vorn, 0x0300110, N_INV, 0x0800010), \
13952 X(vorr, 0x0200110, N_INV, 0x0800010), \
13953 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13954 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13955 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13956 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13957 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13958 X(vst1, 0x0000000, 0x0800000, N_INV), \
13959 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13960 X(vst2, 0x0000100, 0x0800100, N_INV), \
13961 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13962 X(vst3, 0x0000200, 0x0800200, N_INV), \
13963 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13964 X(vst4, 0x0000300, 0x0800300, N_INV), \
13965 X(vmovn, 0x1b20200, N_INV, N_INV), \
13966 X(vtrn, 0x1b20080, N_INV, N_INV), \
13967 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13968 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13969 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13970 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13971 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13972 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13973 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13974 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13975 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13976 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13977 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13978 X(vseleq, 0xe000a00, N_INV, N_INV), \
13979 X(vselvs, 0xe100a00, N_INV, N_INV), \
13980 X(vselge, 0xe200a00, N_INV, N_INV), \
13981 X(vselgt, 0xe300a00, N_INV, N_INV), \
13982 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13983 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13984 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13985 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13986 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13987 X(aes, 0x3b00300, N_INV, N_INV), \
13988 X(sha3op, 0x2000c00, N_INV, N_INV), \
13989 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13990 X(sha2op, 0x3ba0380, N_INV, N_INV)
13994 #define X(OPC,I,F,S) N_MNEM_##OPC
13999 static const struct neon_tab_entry neon_enc_tab
[] =
14001 #define X(OPC,I,F,S) { (I), (F), (S) }
14006 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
14007 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14008 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14009 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14010 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14011 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14012 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14013 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14014 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14015 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14016 #define NEON_ENC_SINGLE_(X) \
14017 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
14018 #define NEON_ENC_DOUBLE_(X) \
14019 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
14020 #define NEON_ENC_FPV8_(X) \
14021 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
14023 #define NEON_ENCODE(type, inst) \
14026 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
14027 inst.is_neon = 1; \
14031 #define check_neon_suffixes \
14034 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
14036 as_bad (_("invalid neon suffix for non neon instruction")); \
14042 /* Define shapes for instruction operands. The following mnemonic characters
14043 are used in this table:
14045 F - VFP S<n> register
14046 D - Neon D<n> register
14047 Q - Neon Q<n> register
14051 L - D<n> register list
14053 This table is used to generate various data:
14054 - enumerations of the form NS_DDR to be used as arguments to
14056 - a table classifying shapes into single, double, quad, mixed.
14057 - a table used to drive neon_select_shape. */
14059 #define NEON_SHAPE_DEF \
14060 X(3, (R, Q, Q), QUAD), \
14061 X(3, (D, D, D), DOUBLE), \
14062 X(3, (Q, Q, Q), QUAD), \
14063 X(3, (D, D, I), DOUBLE), \
14064 X(3, (Q, Q, I), QUAD), \
14065 X(3, (D, D, S), DOUBLE), \
14066 X(3, (Q, Q, S), QUAD), \
14067 X(3, (Q, Q, R), QUAD), \
14068 X(2, (D, D), DOUBLE), \
14069 X(2, (Q, Q), QUAD), \
14070 X(2, (D, S), DOUBLE), \
14071 X(2, (Q, S), QUAD), \
14072 X(2, (D, R), DOUBLE), \
14073 X(2, (Q, R), QUAD), \
14074 X(2, (D, I), DOUBLE), \
14075 X(2, (Q, I), QUAD), \
14076 X(3, (D, L, D), DOUBLE), \
14077 X(2, (D, Q), MIXED), \
14078 X(2, (Q, D), MIXED), \
14079 X(3, (D, Q, I), MIXED), \
14080 X(3, (Q, D, I), MIXED), \
14081 X(3, (Q, D, D), MIXED), \
14082 X(3, (D, Q, Q), MIXED), \
14083 X(3, (Q, Q, D), MIXED), \
14084 X(3, (Q, D, S), MIXED), \
14085 X(3, (D, Q, S), MIXED), \
14086 X(4, (D, D, D, I), DOUBLE), \
14087 X(4, (Q, Q, Q, I), QUAD), \
14088 X(4, (D, D, S, I), DOUBLE), \
14089 X(4, (Q, Q, S, I), QUAD), \
14090 X(2, (F, F), SINGLE), \
14091 X(3, (F, F, F), SINGLE), \
14092 X(2, (F, I), SINGLE), \
14093 X(2, (F, D), MIXED), \
14094 X(2, (D, F), MIXED), \
14095 X(3, (F, F, I), MIXED), \
14096 X(4, (R, R, F, F), SINGLE), \
14097 X(4, (F, F, R, R), SINGLE), \
14098 X(3, (D, R, R), DOUBLE), \
14099 X(3, (R, R, D), DOUBLE), \
14100 X(2, (S, R), SINGLE), \
14101 X(2, (R, S), SINGLE), \
14102 X(2, (F, R), SINGLE), \
14103 X(2, (R, F), SINGLE), \
14104 /* Half float shape supported so far. */\
14105 X (2, (H, D), MIXED), \
14106 X (2, (D, H), MIXED), \
14107 X (2, (H, F), MIXED), \
14108 X (2, (F, H), MIXED), \
14109 X (2, (H, H), HALF), \
14110 X (2, (H, R), HALF), \
14111 X (2, (R, H), HALF), \
14112 X (2, (H, I), HALF), \
14113 X (3, (H, H, H), HALF), \
14114 X (3, (H, F, I), MIXED), \
14115 X (3, (F, H, I), MIXED), \
14116 X (3, (D, H, H), MIXED), \
14117 X (3, (D, H, S), MIXED)
14119 #define S2(A,B) NS_##A##B
14120 #define S3(A,B,C) NS_##A##B##C
14121 #define S4(A,B,C,D) NS_##A##B##C##D
14123 #define X(N, L, C) S##N L
14136 enum neon_shape_class
14145 #define X(N, L, C) SC_##C
14147 static enum neon_shape_class neon_shape_class
[] =
14166 /* Register widths of above. */
14167 static unsigned neon_shape_el_size
[] =
14179 struct neon_shape_info
14182 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
14185 #define S2(A,B) { SE_##A, SE_##B }
14186 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
14187 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
14189 #define X(N, L, C) { N, S##N L }
14191 static struct neon_shape_info neon_shape_tab
[] =
14201 /* Bit masks used in type checking given instructions.
14202 'N_EQK' means the type must be the same as (or based on in some way) the key
14203 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
14204 set, various other bits can be set as well in order to modify the meaning of
14205 the type constraint. */
14207 enum neon_type_mask
14231 N_KEY
= 0x1000000, /* Key element (main type specifier). */
14232 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
14233 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
14234 N_UNT
= 0x8000000, /* Must be explicitly untyped. */
14235 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
14236 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
14237 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
14238 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
14239 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
14240 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
14241 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
14243 N_MAX_NONSPECIAL
= N_P64
14246 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
14248 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
14249 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14250 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
14251 #define N_S_32 (N_S8 | N_S16 | N_S32)
14252 #define N_F_16_32 (N_F16 | N_F32)
14253 #define N_SUF_32 (N_SU_32 | N_F_16_32)
14254 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
14255 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
14256 #define N_F_ALL (N_F16 | N_F32 | N_F64)
14257 #define N_I_MVE (N_I8 | N_I16 | N_I32)
14258 #define N_F_MVE (N_F16 | N_F32)
14259 #define N_SU_MVE (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14261 /* Pass this as the first type argument to neon_check_type to ignore types
14263 #define N_IGNORE_TYPE (N_KEY | N_EQK)
14265 /* Select a "shape" for the current instruction (describing register types or
14266 sizes) from a list of alternatives. Return NS_NULL if the current instruction
14267 doesn't fit. For non-polymorphic shapes, checking is usually done as a
14268 function of operand parsing, so this function doesn't need to be called.
14269 Shapes should be listed in order of decreasing length. */
14271 static enum neon_shape
14272 neon_select_shape (enum neon_shape shape
, ...)
14275 enum neon_shape first_shape
= shape
;
14277 /* Fix missing optional operands. FIXME: we don't know at this point how
14278 many arguments we should have, so this makes the assumption that we have
14279 > 1. This is true of all current Neon opcodes, I think, but may not be
14280 true in the future. */
14281 if (!inst
.operands
[1].present
)
14282 inst
.operands
[1] = inst
.operands
[0];
14284 va_start (ap
, shape
);
14286 for (; shape
!= NS_NULL
; shape
= (enum neon_shape
) va_arg (ap
, int))
14291 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
14293 if (!inst
.operands
[j
].present
)
14299 switch (neon_shape_tab
[shape
].el
[j
])
14301 /* If a .f16, .16, .u16, .s16 type specifier is given over
14302 a VFP single precision register operand, it's essentially
14303 means only half of the register is used.
14305 If the type specifier is given after the mnemonics, the
14306 information is stored in inst.vectype. If the type specifier
14307 is given after register operand, the information is stored
14308 in inst.operands[].vectype.
14310 When there is only one type specifier, and all the register
14311 operands are the same type of hardware register, the type
14312 specifier applies to all register operands.
14314 If no type specifier is given, the shape is inferred from
14315 operand information.
14318 vadd.f16 s0, s1, s2: NS_HHH
14319 vabs.f16 s0, s1: NS_HH
14320 vmov.f16 s0, r1: NS_HR
14321 vmov.f16 r0, s1: NS_RH
14322 vcvt.f16 r0, s1: NS_RH
14323 vcvt.f16.s32 s2, s2, #29: NS_HFI
14324 vcvt.f16.s32 s2, s2: NS_HF
14327 if (!(inst
.operands
[j
].isreg
14328 && inst
.operands
[j
].isvec
14329 && inst
.operands
[j
].issingle
14330 && !inst
.operands
[j
].isquad
14331 && ((inst
.vectype
.elems
== 1
14332 && inst
.vectype
.el
[0].size
== 16)
14333 || (inst
.vectype
.elems
> 1
14334 && inst
.vectype
.el
[j
].size
== 16)
14335 || (inst
.vectype
.elems
== 0
14336 && inst
.operands
[j
].vectype
.type
!= NT_invtype
14337 && inst
.operands
[j
].vectype
.size
== 16))))
14342 if (!(inst
.operands
[j
].isreg
14343 && inst
.operands
[j
].isvec
14344 && inst
.operands
[j
].issingle
14345 && !inst
.operands
[j
].isquad
14346 && ((inst
.vectype
.elems
== 1 && inst
.vectype
.el
[0].size
== 32)
14347 || (inst
.vectype
.elems
> 1 && inst
.vectype
.el
[j
].size
== 32)
14348 || (inst
.vectype
.elems
== 0
14349 && (inst
.operands
[j
].vectype
.size
== 32
14350 || inst
.operands
[j
].vectype
.type
== NT_invtype
)))))
14355 if (!(inst
.operands
[j
].isreg
14356 && inst
.operands
[j
].isvec
14357 && !inst
.operands
[j
].isquad
14358 && !inst
.operands
[j
].issingle
))
14363 if (!(inst
.operands
[j
].isreg
14364 && !inst
.operands
[j
].isvec
))
14369 if (!(inst
.operands
[j
].isreg
14370 && inst
.operands
[j
].isvec
14371 && inst
.operands
[j
].isquad
14372 && !inst
.operands
[j
].issingle
))
14377 if (!(!inst
.operands
[j
].isreg
14378 && !inst
.operands
[j
].isscalar
))
14383 if (!(!inst
.operands
[j
].isreg
14384 && inst
.operands
[j
].isscalar
))
14394 if (matches
&& (j
>= ARM_IT_MAX_OPERANDS
|| !inst
.operands
[j
].present
))
14395 /* We've matched all the entries in the shape table, and we don't
14396 have any left over operands which have not been matched. */
14402 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
14403 first_error (_("invalid instruction shape"));
14408 /* True if SHAPE is predominantly a quadword operation (most of the time, this
14409 means the Q bit should be set). */
14412 neon_quad (enum neon_shape shape
)
14414 return neon_shape_class
[shape
] == SC_QUAD
;
14418 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
14421 /* Allow modification to be made to types which are constrained to be
14422 based on the key element, based on bits set alongside N_EQK. */
14423 if ((typebits
& N_EQK
) != 0)
14425 if ((typebits
& N_HLF
) != 0)
14427 else if ((typebits
& N_DBL
) != 0)
14429 if ((typebits
& N_SGN
) != 0)
14430 *g_type
= NT_signed
;
14431 else if ((typebits
& N_UNS
) != 0)
14432 *g_type
= NT_unsigned
;
14433 else if ((typebits
& N_INT
) != 0)
14434 *g_type
= NT_integer
;
14435 else if ((typebits
& N_FLT
) != 0)
14436 *g_type
= NT_float
;
14437 else if ((typebits
& N_SIZ
) != 0)
14438 *g_type
= NT_untyped
;
14442 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
14443 operand type, i.e. the single type specified in a Neon instruction when it
14444 is the only one given. */
14446 static struct neon_type_el
14447 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
14449 struct neon_type_el dest
= *key
;
14451 gas_assert ((thisarg
& N_EQK
) != 0);
14453 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
14458 /* Convert Neon type and size into compact bitmask representation. */
14460 static enum neon_type_mask
14461 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
14468 case 8: return N_8
;
14469 case 16: return N_16
;
14470 case 32: return N_32
;
14471 case 64: return N_64
;
14479 case 8: return N_I8
;
14480 case 16: return N_I16
;
14481 case 32: return N_I32
;
14482 case 64: return N_I64
;
14490 case 16: return N_F16
;
14491 case 32: return N_F32
;
14492 case 64: return N_F64
;
14500 case 8: return N_P8
;
14501 case 16: return N_P16
;
14502 case 64: return N_P64
;
14510 case 8: return N_S8
;
14511 case 16: return N_S16
;
14512 case 32: return N_S32
;
14513 case 64: return N_S64
;
14521 case 8: return N_U8
;
14522 case 16: return N_U16
;
14523 case 32: return N_U32
;
14524 case 64: return N_U64
;
14535 /* Convert compact Neon bitmask type representation to a type and size. Only
14536 handles the case where a single bit is set in the mask. */
14539 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
14540 enum neon_type_mask mask
)
14542 if ((mask
& N_EQK
) != 0)
14545 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
14547 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_F16
| N_P16
)) != 0)
14549 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
14551 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
| N_P64
)) != 0)
14556 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
14558 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
14559 *type
= NT_unsigned
;
14560 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
14561 *type
= NT_integer
;
14562 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
14563 *type
= NT_untyped
;
14564 else if ((mask
& (N_P8
| N_P16
| N_P64
)) != 0)
14566 else if ((mask
& (N_F_ALL
)) != 0)
14574 /* Modify a bitmask of allowed types. This is only needed for type
14578 modify_types_allowed (unsigned allowed
, unsigned mods
)
14581 enum neon_el_type type
;
14587 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
14589 if (el_type_of_type_chk (&type
, &size
,
14590 (enum neon_type_mask
) (allowed
& i
)) == SUCCESS
)
14592 neon_modify_type_size (mods
, &type
, &size
);
14593 destmask
|= type_chk_of_el_type (type
, size
);
14600 /* Check type and return type classification.
14601 The manual states (paraphrase): If one datatype is given, it indicates the
14603 - the second operand, if there is one
14604 - the operand, if there is no second operand
14605 - the result, if there are no operands.
14606 This isn't quite good enough though, so we use a concept of a "key" datatype
14607 which is set on a per-instruction basis, which is the one which matters when
14608 only one data type is written.
14609 Note: this function has side-effects (e.g. filling in missing operands). All
14610 Neon instructions should call it before performing bit encoding. */
14612 static struct neon_type_el
14613 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
14616 unsigned i
, pass
, key_el
= 0;
14617 unsigned types
[NEON_MAX_TYPE_ELS
];
14618 enum neon_el_type k_type
= NT_invtype
;
14619 unsigned k_size
= -1u;
14620 struct neon_type_el badtype
= {NT_invtype
, -1};
14621 unsigned key_allowed
= 0;
14623 /* Optional registers in Neon instructions are always (not) in operand 1.
14624 Fill in the missing operand here, if it was omitted. */
14625 if (els
> 1 && !inst
.operands
[1].present
)
14626 inst
.operands
[1] = inst
.operands
[0];
14628 /* Suck up all the varargs. */
14630 for (i
= 0; i
< els
; i
++)
14632 unsigned thisarg
= va_arg (ap
, unsigned);
14633 if (thisarg
== N_IGNORE_TYPE
)
14638 types
[i
] = thisarg
;
14639 if ((thisarg
& N_KEY
) != 0)
14644 if (inst
.vectype
.elems
> 0)
14645 for (i
= 0; i
< els
; i
++)
14646 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
14648 first_error (_("types specified in both the mnemonic and operands"));
14652 /* Duplicate inst.vectype elements here as necessary.
14653 FIXME: No idea if this is exactly the same as the ARM assembler,
14654 particularly when an insn takes one register and one non-register
14656 if (inst
.vectype
.elems
== 1 && els
> 1)
14659 inst
.vectype
.elems
= els
;
14660 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
14661 for (j
= 0; j
< els
; j
++)
14663 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
14666 else if (inst
.vectype
.elems
== 0 && els
> 0)
14669 /* No types were given after the mnemonic, so look for types specified
14670 after each operand. We allow some flexibility here; as long as the
14671 "key" operand has a type, we can infer the others. */
14672 for (j
= 0; j
< els
; j
++)
14673 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
14674 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
14676 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
14678 for (j
= 0; j
< els
; j
++)
14679 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
14680 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
14685 first_error (_("operand types can't be inferred"));
14689 else if (inst
.vectype
.elems
!= els
)
14691 first_error (_("type specifier has the wrong number of parts"));
14695 for (pass
= 0; pass
< 2; pass
++)
14697 for (i
= 0; i
< els
; i
++)
14699 unsigned thisarg
= types
[i
];
14700 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
14701 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
14702 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
14703 unsigned g_size
= inst
.vectype
.el
[i
].size
;
14705 /* Decay more-specific signed & unsigned types to sign-insensitive
14706 integer types if sign-specific variants are unavailable. */
14707 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
14708 && (types_allowed
& N_SU_ALL
) == 0)
14709 g_type
= NT_integer
;
14711 /* If only untyped args are allowed, decay any more specific types to
14712 them. Some instructions only care about signs for some element
14713 sizes, so handle that properly. */
14714 if (((types_allowed
& N_UNT
) == 0)
14715 && ((g_size
== 8 && (types_allowed
& N_8
) != 0)
14716 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
14717 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
14718 || (g_size
== 64 && (types_allowed
& N_64
) != 0)))
14719 g_type
= NT_untyped
;
14723 if ((thisarg
& N_KEY
) != 0)
14727 key_allowed
= thisarg
& ~N_KEY
;
14729 /* Check architecture constraint on FP16 extension. */
14731 && k_type
== NT_float
14732 && ! ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
14734 inst
.error
= _(BAD_FP16
);
14741 if ((thisarg
& N_VFP
) != 0)
14743 enum neon_shape_el regshape
;
14744 unsigned regwidth
, match
;
14746 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14749 first_error (_("invalid instruction shape"));
14752 regshape
= neon_shape_tab
[ns
].el
[i
];
14753 regwidth
= neon_shape_el_size
[regshape
];
14755 /* In VFP mode, operands must match register widths. If we
14756 have a key operand, use its width, else use the width of
14757 the current operand. */
14763 /* FP16 will use a single precision register. */
14764 if (regwidth
== 32 && match
== 16)
14766 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
14770 inst
.error
= _(BAD_FP16
);
14775 if (regwidth
!= match
)
14777 first_error (_("operand size must match register width"));
14782 if ((thisarg
& N_EQK
) == 0)
14784 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
14786 if ((given_type
& types_allowed
) == 0)
14788 first_error (BAD_SIMD_TYPE
);
14794 enum neon_el_type mod_k_type
= k_type
;
14795 unsigned mod_k_size
= k_size
;
14796 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
14797 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
14799 first_error (_("inconsistent types in Neon instruction"));
14807 return inst
.vectype
.el
[key_el
];
14810 /* Neon-style VFP instruction forwarding. */
14812 /* Thumb VFP instructions have 0xE in the condition field. */
14815 do_vfp_cond_or_thumb (void)
14820 inst
.instruction
|= 0xe0000000;
14822 inst
.instruction
|= inst
.cond
<< 28;
14825 /* Look up and encode a simple mnemonic, for use as a helper function for the
14826 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14827 etc. It is assumed that operand parsing has already been done, and that the
14828 operands are in the form expected by the given opcode (this isn't necessarily
14829 the same as the form in which they were parsed, hence some massaging must
14830 take place before this function is called).
14831 Checks current arch version against that in the looked-up opcode. */
14834 do_vfp_nsyn_opcode (const char *opname
)
14836 const struct asm_opcode
*opcode
;
14838 opcode
= (const struct asm_opcode
*) hash_find (arm_ops_hsh
, opname
);
14843 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
14844 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
14851 inst
.instruction
= opcode
->tvalue
;
14852 opcode
->tencode ();
14856 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
14857 opcode
->aencode ();
14862 do_vfp_nsyn_add_sub (enum neon_shape rs
)
14864 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
14866 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14869 do_vfp_nsyn_opcode ("fadds");
14871 do_vfp_nsyn_opcode ("fsubs");
14873 /* ARMv8.2 fp16 instruction. */
14875 do_scalar_fp16_v82_encode ();
14880 do_vfp_nsyn_opcode ("faddd");
14882 do_vfp_nsyn_opcode ("fsubd");
14886 /* Check operand types to see if this is a VFP instruction, and if so call
14890 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
14892 enum neon_shape rs
;
14893 struct neon_type_el et
;
14898 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14899 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14903 rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14904 et
= neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14905 N_F_ALL
| N_KEY
| N_VFP
);
14912 if (et
.type
!= NT_invtype
)
14923 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
14925 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
14927 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14930 do_vfp_nsyn_opcode ("fmacs");
14932 do_vfp_nsyn_opcode ("fnmacs");
14934 /* ARMv8.2 fp16 instruction. */
14936 do_scalar_fp16_v82_encode ();
14941 do_vfp_nsyn_opcode ("fmacd");
14943 do_vfp_nsyn_opcode ("fnmacd");
14948 do_vfp_nsyn_fma_fms (enum neon_shape rs
)
14950 int is_fma
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vfma
;
14952 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14955 do_vfp_nsyn_opcode ("ffmas");
14957 do_vfp_nsyn_opcode ("ffnmas");
14959 /* ARMv8.2 fp16 instruction. */
14961 do_scalar_fp16_v82_encode ();
14966 do_vfp_nsyn_opcode ("ffmad");
14968 do_vfp_nsyn_opcode ("ffnmad");
14973 do_vfp_nsyn_mul (enum neon_shape rs
)
14975 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14977 do_vfp_nsyn_opcode ("fmuls");
14979 /* ARMv8.2 fp16 instruction. */
14981 do_scalar_fp16_v82_encode ();
14984 do_vfp_nsyn_opcode ("fmuld");
14988 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
14990 int is_neg
= (inst
.instruction
& 0x80) != 0;
14991 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_VFP
| N_KEY
);
14993 if (rs
== NS_FF
|| rs
== NS_HH
)
14996 do_vfp_nsyn_opcode ("fnegs");
14998 do_vfp_nsyn_opcode ("fabss");
15000 /* ARMv8.2 fp16 instruction. */
15002 do_scalar_fp16_v82_encode ();
15007 do_vfp_nsyn_opcode ("fnegd");
15009 do_vfp_nsyn_opcode ("fabsd");
15013 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
15014 insns belong to Neon, and are handled elsewhere. */
15017 do_vfp_nsyn_ldm_stm (int is_dbmode
)
15019 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
15023 do_vfp_nsyn_opcode ("fldmdbs");
15025 do_vfp_nsyn_opcode ("fldmias");
15030 do_vfp_nsyn_opcode ("fstmdbs");
15032 do_vfp_nsyn_opcode ("fstmias");
15037 do_vfp_nsyn_sqrt (void)
15039 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
15040 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
15042 if (rs
== NS_FF
|| rs
== NS_HH
)
15044 do_vfp_nsyn_opcode ("fsqrts");
15046 /* ARMv8.2 fp16 instruction. */
15048 do_scalar_fp16_v82_encode ();
15051 do_vfp_nsyn_opcode ("fsqrtd");
15055 do_vfp_nsyn_div (void)
15057 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
15058 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
15059 N_F_ALL
| N_KEY
| N_VFP
);
15061 if (rs
== NS_FFF
|| rs
== NS_HHH
)
15063 do_vfp_nsyn_opcode ("fdivs");
15065 /* ARMv8.2 fp16 instruction. */
15067 do_scalar_fp16_v82_encode ();
15070 do_vfp_nsyn_opcode ("fdivd");
15074 do_vfp_nsyn_nmul (void)
15076 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
15077 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
15078 N_F_ALL
| N_KEY
| N_VFP
);
15080 if (rs
== NS_FFF
|| rs
== NS_HHH
)
15082 NEON_ENCODE (SINGLE
, inst
);
15083 do_vfp_sp_dyadic ();
15085 /* ARMv8.2 fp16 instruction. */
15087 do_scalar_fp16_v82_encode ();
15091 NEON_ENCODE (DOUBLE
, inst
);
15092 do_vfp_dp_rd_rn_rm ();
15094 do_vfp_cond_or_thumb ();
15099 do_vfp_nsyn_cmp (void)
15101 enum neon_shape rs
;
15102 if (inst
.operands
[1].isreg
)
15104 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
15105 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
15107 if (rs
== NS_FF
|| rs
== NS_HH
)
15109 NEON_ENCODE (SINGLE
, inst
);
15110 do_vfp_sp_monadic ();
15114 NEON_ENCODE (DOUBLE
, inst
);
15115 do_vfp_dp_rd_rm ();
15120 rs
= neon_select_shape (NS_HI
, NS_FI
, NS_DI
, NS_NULL
);
15121 neon_check_type (2, rs
, N_F_ALL
| N_KEY
| N_VFP
, N_EQK
);
15123 switch (inst
.instruction
& 0x0fffffff)
15126 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
15129 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
15135 if (rs
== NS_FI
|| rs
== NS_HI
)
15137 NEON_ENCODE (SINGLE
, inst
);
15138 do_vfp_sp_compare_z ();
15142 NEON_ENCODE (DOUBLE
, inst
);
15146 do_vfp_cond_or_thumb ();
15148 /* ARMv8.2 fp16 instruction. */
15149 if (rs
== NS_HI
|| rs
== NS_HH
)
15150 do_scalar_fp16_v82_encode ();
15154 nsyn_insert_sp (void)
15156 inst
.operands
[1] = inst
.operands
[0];
15157 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
15158 inst
.operands
[0].reg
= REG_SP
;
15159 inst
.operands
[0].isreg
= 1;
15160 inst
.operands
[0].writeback
= 1;
15161 inst
.operands
[0].present
= 1;
15165 do_vfp_nsyn_push (void)
15169 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
15170 _("register list must contain at least 1 and at most 16 "
15173 if (inst
.operands
[1].issingle
)
15174 do_vfp_nsyn_opcode ("fstmdbs");
15176 do_vfp_nsyn_opcode ("fstmdbd");
15180 do_vfp_nsyn_pop (void)
15184 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
15185 _("register list must contain at least 1 and at most 16 "
15188 if (inst
.operands
[1].issingle
)
15189 do_vfp_nsyn_opcode ("fldmias");
15191 do_vfp_nsyn_opcode ("fldmiad");
15194 /* Fix up Neon data-processing instructions, ORing in the correct bits for
15195 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
15198 neon_dp_fixup (struct arm_it
* insn
)
15200 unsigned int i
= insn
->instruction
;
15205 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
15216 insn
->instruction
= i
;
15219 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
15223 neon_logbits (unsigned x
)
15225 return ffs (x
) - 4;
15228 #define LOW4(R) ((R) & 0xf)
15229 #define HI1(R) (((R) >> 4) & 1)
15232 mve_encode_qqr (int size
, int fp
)
15234 if (inst
.operands
[2].reg
== REG_SP
)
15235 as_tsktsk (MVE_BAD_SP
);
15236 else if (inst
.operands
[2].reg
== REG_PC
)
15237 as_tsktsk (MVE_BAD_PC
);
15242 if (((unsigned)inst
.instruction
) == 0xd00)
15243 inst
.instruction
= 0xee300f40;
15245 else if (((unsigned)inst
.instruction
) == 0x200d00)
15246 inst
.instruction
= 0xee301f40;
15248 /* Setting size which is 1 for F16 and 0 for F32. */
15249 inst
.instruction
|= (size
== 16) << 28;
15254 if (((unsigned)inst
.instruction
) == 0x800)
15255 inst
.instruction
= 0xee010f40;
15257 else if (((unsigned)inst
.instruction
) == 0x1000800)
15258 inst
.instruction
= 0xee011f40;
15259 /* Setting bits for size. */
15260 inst
.instruction
|= neon_logbits (size
) << 20;
15262 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15263 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15264 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15265 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15266 inst
.instruction
|= inst
.operands
[2].reg
;
15271 mve_encode_rqq (unsigned bit28
, unsigned size
)
15273 inst
.instruction
|= bit28
<< 28;
15274 inst
.instruction
|= neon_logbits (size
) << 20;
15275 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15276 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
15277 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15278 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15279 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15284 mve_encode_qqq (int ubit
, int size
)
15287 inst
.instruction
|= (ubit
!= 0) << 28;
15288 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15289 inst
.instruction
|= neon_logbits (size
) << 20;
15290 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15291 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15292 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15293 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15294 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15300 /* Encode insns with bit pattern:
15302 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15303 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
15305 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
15306 different meaning for some instruction. */
15309 neon_three_same (int isquad
, int ubit
, int size
)
15311 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15312 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15313 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15314 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15315 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15316 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15317 inst
.instruction
|= (isquad
!= 0) << 6;
15318 inst
.instruction
|= (ubit
!= 0) << 24;
15320 inst
.instruction
|= neon_logbits (size
) << 20;
15322 neon_dp_fixup (&inst
);
15325 /* Encode instructions of the form:
15327 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
15328 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
15330 Don't write size if SIZE == -1. */
15333 neon_two_same (int qbit
, int ubit
, int size
)
15335 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15336 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15337 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15338 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15339 inst
.instruction
|= (qbit
!= 0) << 6;
15340 inst
.instruction
|= (ubit
!= 0) << 24;
15343 inst
.instruction
|= neon_logbits (size
) << 18;
15345 neon_dp_fixup (&inst
);
15348 /* Neon instruction encoders, in approximate order of appearance. */
15351 do_neon_dyadic_i_su (void)
15353 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15354 struct neon_type_el et
= neon_check_type (3, rs
,
15355 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
15356 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15360 do_neon_dyadic_i64_su (void)
15362 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15363 struct neon_type_el et
= neon_check_type (3, rs
,
15364 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
15365 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15369 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
15372 unsigned size
= et
.size
>> 3;
15373 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15374 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15375 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15376 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15377 inst
.instruction
|= (isquad
!= 0) << 6;
15378 inst
.instruction
|= immbits
<< 16;
15379 inst
.instruction
|= (size
>> 3) << 7;
15380 inst
.instruction
|= (size
& 0x7) << 19;
15382 inst
.instruction
|= (uval
!= 0) << 24;
15384 neon_dp_fixup (&inst
);
15388 do_neon_shl_imm (void)
15390 if (!inst
.operands
[2].isreg
)
15392 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15393 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
15394 int imm
= inst
.operands
[2].imm
;
15396 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15397 _("immediate out of range for shift"));
15398 NEON_ENCODE (IMMED
, inst
);
15399 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15403 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15404 struct neon_type_el et
= neon_check_type (3, rs
,
15405 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
15408 /* VSHL/VQSHL 3-register variants have syntax such as:
15410 whereas other 3-register operations encoded by neon_three_same have
15413 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
15415 tmp
= inst
.operands
[2].reg
;
15416 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15417 inst
.operands
[1].reg
= tmp
;
15418 NEON_ENCODE (INTEGER
, inst
);
15419 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15424 do_neon_qshl_imm (void)
15426 if (!inst
.operands
[2].isreg
)
15428 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15429 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
15430 int imm
= inst
.operands
[2].imm
;
15432 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15433 _("immediate out of range for shift"));
15434 NEON_ENCODE (IMMED
, inst
);
15435 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
, imm
);
15439 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15440 struct neon_type_el et
= neon_check_type (3, rs
,
15441 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
15444 /* See note in do_neon_shl_imm. */
15445 tmp
= inst
.operands
[2].reg
;
15446 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15447 inst
.operands
[1].reg
= tmp
;
15448 NEON_ENCODE (INTEGER
, inst
);
15449 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15454 do_neon_rshl (void)
15456 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15457 struct neon_type_el et
= neon_check_type (3, rs
,
15458 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
15461 tmp
= inst
.operands
[2].reg
;
15462 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15463 inst
.operands
[1].reg
= tmp
;
15464 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15468 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
15470 /* Handle .I8 pseudo-instructions. */
15473 /* Unfortunately, this will make everything apart from zero out-of-range.
15474 FIXME is this the intended semantics? There doesn't seem much point in
15475 accepting .I8 if so. */
15476 immediate
|= immediate
<< 8;
15482 if (immediate
== (immediate
& 0x000000ff))
15484 *immbits
= immediate
;
15487 else if (immediate
== (immediate
& 0x0000ff00))
15489 *immbits
= immediate
>> 8;
15492 else if (immediate
== (immediate
& 0x00ff0000))
15494 *immbits
= immediate
>> 16;
15497 else if (immediate
== (immediate
& 0xff000000))
15499 *immbits
= immediate
>> 24;
15502 if ((immediate
& 0xffff) != (immediate
>> 16))
15503 goto bad_immediate
;
15504 immediate
&= 0xffff;
15507 if (immediate
== (immediate
& 0x000000ff))
15509 *immbits
= immediate
;
15512 else if (immediate
== (immediate
& 0x0000ff00))
15514 *immbits
= immediate
>> 8;
15519 first_error (_("immediate value out of range"));
15524 do_neon_logic (void)
15526 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
15528 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15529 neon_check_type (3, rs
, N_IGNORE_TYPE
);
15530 /* U bit and size field were set as part of the bitmask. */
15531 NEON_ENCODE (INTEGER
, inst
);
15532 neon_three_same (neon_quad (rs
), 0, -1);
15536 const int three_ops_form
= (inst
.operands
[2].present
15537 && !inst
.operands
[2].isreg
);
15538 const int immoperand
= (three_ops_form
? 2 : 1);
15539 enum neon_shape rs
= (three_ops_form
15540 ? neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
)
15541 : neon_select_shape (NS_DI
, NS_QI
, NS_NULL
));
15542 struct neon_type_el et
= neon_check_type (2, rs
,
15543 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
15544 enum neon_opc opcode
= (enum neon_opc
) inst
.instruction
& 0x0fffffff;
15548 if (et
.type
== NT_invtype
)
15551 if (three_ops_form
)
15552 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
15553 _("first and second operands shall be the same register"));
15555 NEON_ENCODE (IMMED
, inst
);
15557 immbits
= inst
.operands
[immoperand
].imm
;
15560 /* .i64 is a pseudo-op, so the immediate must be a repeating
15562 if (immbits
!= (inst
.operands
[immoperand
].regisimm
?
15563 inst
.operands
[immoperand
].reg
: 0))
15565 /* Set immbits to an invalid constant. */
15566 immbits
= 0xdeadbeef;
15573 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15577 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15581 /* Pseudo-instruction for VBIC. */
15582 neon_invert_size (&immbits
, 0, et
.size
);
15583 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15587 /* Pseudo-instruction for VORR. */
15588 neon_invert_size (&immbits
, 0, et
.size
);
15589 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15599 inst
.instruction
|= neon_quad (rs
) << 6;
15600 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15601 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15602 inst
.instruction
|= cmode
<< 8;
15603 neon_write_immbits (immbits
);
15605 neon_dp_fixup (&inst
);
15610 do_neon_bitfield (void)
15612 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15613 neon_check_type (3, rs
, N_IGNORE_TYPE
);
15614 neon_three_same (neon_quad (rs
), 0, -1);
15618 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
15621 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_QQR
, NS_NULL
);
15622 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
15624 if (et
.type
== NT_float
)
15626 NEON_ENCODE (FLOAT
, inst
);
15628 mve_encode_qqr (et
.size
, 1);
15630 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
15634 NEON_ENCODE (INTEGER
, inst
);
15636 mve_encode_qqr (et
.size
, 0);
15638 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
15644 do_neon_dyadic_if_su_d (void)
15646 /* This version only allow D registers, but that constraint is enforced during
15647 operand parsing so we don't need to do anything extra here. */
15648 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
15652 do_neon_dyadic_if_i_d (void)
15654 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15655 affected if we specify unsigned args. */
15656 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15659 enum vfp_or_neon_is_neon_bits
15662 NEON_CHECK_ARCH
= 2,
15663 NEON_CHECK_ARCH8
= 4
15666 /* Call this function if an instruction which may have belonged to the VFP or
15667 Neon instruction sets, but turned out to be a Neon instruction (due to the
15668 operand types involved, etc.). We have to check and/or fix-up a couple of
15671 - Make sure the user hasn't attempted to make a Neon instruction
15673 - Alter the value in the condition code field if necessary.
15674 - Make sure that the arch supports Neon instructions.
15676 Which of these operations take place depends on bits from enum
15677 vfp_or_neon_is_neon_bits.
15679 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
15680 current instruction's condition is COND_ALWAYS, the condition field is
15681 changed to inst.uncond_value. This is necessary because instructions shared
15682 between VFP and Neon may be conditional for the VFP variants only, and the
15683 unconditional Neon version must have, e.g., 0xF in the condition field. */
15686 vfp_or_neon_is_neon (unsigned check
)
15688 /* Conditions are always legal in Thumb mode (IT blocks). */
15689 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
15691 if (inst
.cond
!= COND_ALWAYS
)
15693 first_error (_(BAD_COND
));
15696 if (inst
.uncond_value
!= -1)
15697 inst
.instruction
|= inst
.uncond_value
<< 28;
15701 if (((check
& NEON_CHECK_ARCH
) && !mark_feature_used (&fpu_neon_ext_v1
))
15702 || ((check
& NEON_CHECK_ARCH8
)
15703 && !mark_feature_used (&fpu_neon_ext_armv8
)))
15705 first_error (_(BAD_FPU
));
15713 check_simd_pred_availability (int fp
, unsigned check
)
15715 if (inst
.cond
> COND_ALWAYS
)
15717 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
15719 inst
.error
= BAD_FPU
;
15722 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
15724 else if (inst
.cond
< COND_ALWAYS
)
15726 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
15727 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
15728 else if (vfp_or_neon_is_neon (check
) == FAIL
)
15733 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fp
? mve_fp_ext
: mve_ext
)
15734 && vfp_or_neon_is_neon (check
) == FAIL
)
15737 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
15738 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
15744 do_mve_vst_vld (void)
15746 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
15749 constraint (!inst
.operands
[1].preind
|| inst
.relocs
[0].exp
.X_add_symbol
!= 0
15750 || inst
.relocs
[0].exp
.X_add_number
!= 0
15751 || inst
.operands
[1].immisreg
!= 0,
15753 constraint (inst
.vectype
.el
[0].size
> 32, BAD_EL_TYPE
);
15754 if (inst
.operands
[1].reg
== REG_PC
)
15755 as_tsktsk (MVE_BAD_PC
);
15756 else if (inst
.operands
[1].reg
== REG_SP
&& inst
.operands
[1].writeback
)
15757 as_tsktsk (MVE_BAD_SP
);
15760 /* These instructions are one of the "exceptions" mentioned in
15761 handle_pred_state. They are MVE instructions that are not VPT compatible
15762 and do not accept a VPT code, thus appending such a code is a syntax
15764 if (inst
.cond
> COND_ALWAYS
)
15765 first_error (BAD_SYNTAX
);
15766 /* If we append a scalar condition code we can set this to
15767 MVE_OUTSIDE_PRED_INSN as it will also lead to a syntax error. */
15768 else if (inst
.cond
< COND_ALWAYS
)
15769 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
15771 inst
.pred_insn_type
= MVE_UNPREDICABLE_INSN
;
15773 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15774 inst
.instruction
|= inst
.operands
[1].writeback
<< 21;
15775 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
15776 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15777 inst
.instruction
|= neon_logbits (inst
.vectype
.el
[0].size
) << 7;
15782 do_neon_dyadic_if_su (void)
15784 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_QQR
, NS_NULL
);
15785 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
15788 if (check_simd_pred_availability (et
.type
== NT_float
,
15789 NEON_CHECK_ARCH
| NEON_CHECK_CC
))
15792 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
15796 do_neon_addsub_if_i (void)
15798 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1xd
)
15799 && try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
15802 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_QQR
, NS_NULL
);
15803 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
,
15804 N_EQK
, N_IF_32
| N_I64
| N_KEY
);
15806 constraint (rs
== NS_QQR
&& et
.size
== 64, BAD_FPU
);
15807 /* If we are parsing Q registers and the element types match MVE, which NEON
15808 also supports, then we must check whether this is an instruction that can
15809 be used by both MVE/NEON. This distinction can be made based on whether
15810 they are predicated or not. */
15811 if ((rs
== NS_QQQ
|| rs
== NS_QQR
) && et
.size
!= 64)
15813 if (check_simd_pred_availability (et
.type
== NT_float
,
15814 NEON_CHECK_ARCH
| NEON_CHECK_CC
))
15819 /* If they are either in a D register or are using an unsupported. */
15821 && vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15825 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15826 affected if we specify unsigned args. */
15827 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
15830 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
15832 V<op> A,B (A is operand 0, B is operand 2)
15837 so handle that case specially. */
15840 neon_exchange_operands (void)
15842 if (inst
.operands
[1].present
)
15844 void *scratch
= xmalloc (sizeof (inst
.operands
[0]));
15846 /* Swap operands[1] and operands[2]. */
15847 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
15848 inst
.operands
[1] = inst
.operands
[2];
15849 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
15854 inst
.operands
[1] = inst
.operands
[2];
15855 inst
.operands
[2] = inst
.operands
[0];
15860 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
15862 if (inst
.operands
[2].isreg
)
15865 neon_exchange_operands ();
15866 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
15870 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15871 struct neon_type_el et
= neon_check_type (2, rs
,
15872 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
15874 NEON_ENCODE (IMMED
, inst
);
15875 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15876 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15877 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15878 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15879 inst
.instruction
|= neon_quad (rs
) << 6;
15880 inst
.instruction
|= (et
.type
== NT_float
) << 10;
15881 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15883 neon_dp_fixup (&inst
);
15890 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, FALSE
);
15894 do_neon_cmp_inv (void)
15896 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, TRUE
);
15902 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
15905 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
15906 scalars, which are encoded in 5 bits, M : Rm.
15907 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
15908 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
15911 Dot Product instructions are similar to multiply instructions except elsize
15912 should always be 32.
15914 This function translates SCALAR, which is GAS's internal encoding of indexed
15915 scalar register, to raw encoding. There is also register and index range
15916 check based on ELSIZE. */
15919 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
15921 unsigned regno
= NEON_SCALAR_REG (scalar
);
15922 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
15927 if (regno
> 7 || elno
> 3)
15929 return regno
| (elno
<< 3);
15932 if (regno
> 15 || elno
> 1)
15934 return regno
| (elno
<< 4);
15938 first_error (_("scalar out of range for multiply instruction"));
15944 /* Encode multiply / multiply-accumulate scalar instructions. */
15947 neon_mul_mac (struct neon_type_el et
, int ubit
)
15951 /* Give a more helpful error message if we have an invalid type. */
15952 if (et
.type
== NT_invtype
)
15955 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
15956 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15957 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15958 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15959 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15960 inst
.instruction
|= LOW4 (scalar
);
15961 inst
.instruction
|= HI1 (scalar
) << 5;
15962 inst
.instruction
|= (et
.type
== NT_float
) << 8;
15963 inst
.instruction
|= neon_logbits (et
.size
) << 20;
15964 inst
.instruction
|= (ubit
!= 0) << 24;
15966 neon_dp_fixup (&inst
);
15970 do_neon_mac_maybe_scalar (void)
15972 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
15975 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15978 if (inst
.operands
[2].isscalar
)
15980 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15981 struct neon_type_el et
= neon_check_type (3, rs
,
15982 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F_16_32
| N_KEY
);
15983 NEON_ENCODE (SCALAR
, inst
);
15984 neon_mul_mac (et
, neon_quad (rs
));
15988 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15989 affected if we specify unsigned args. */
15990 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15995 do_neon_fmac (void)
15997 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms
) == SUCCESS
)
16000 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16003 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
16009 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
16010 struct neon_type_el et
= neon_check_type (3, rs
,
16011 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16012 neon_three_same (neon_quad (rs
), 0, et
.size
);
16015 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
16016 same types as the MAC equivalents. The polynomial type for this instruction
16017 is encoded the same as the integer type. */
16022 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
16025 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16028 if (inst
.operands
[2].isscalar
)
16029 do_neon_mac_maybe_scalar ();
16031 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F16
| N_F32
| N_P8
, 0);
16035 do_neon_qdmulh (void)
16037 if (inst
.operands
[2].isscalar
)
16039 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
16040 struct neon_type_el et
= neon_check_type (3, rs
,
16041 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
16042 NEON_ENCODE (SCALAR
, inst
);
16043 neon_mul_mac (et
, neon_quad (rs
));
16047 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
16048 struct neon_type_el et
= neon_check_type (3, rs
,
16049 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
16050 NEON_ENCODE (INTEGER
, inst
);
16051 /* The U bit (rounding) comes from bit mask. */
16052 neon_three_same (neon_quad (rs
), 0, et
.size
);
16057 do_mve_vmull (void)
16060 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_DDS
,
16061 NS_QQS
, NS_QQQ
, NS_QQR
, NS_NULL
);
16062 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)
16063 && inst
.cond
== COND_ALWAYS
16064 && ((unsigned)inst
.instruction
) == M_MNEM_vmullt
)
16069 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
16070 N_SUF_32
| N_F64
| N_P8
16071 | N_P16
| N_I_MVE
| N_KEY
);
16072 if (((et
.type
== NT_poly
) && et
.size
== 8
16073 && ARM_CPU_IS_ANY (cpu_variant
))
16074 || (et
.type
== NT_integer
) || (et
.type
== NT_float
))
16081 constraint (rs
!= NS_QQQ
, BAD_FPU
);
16082 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
16083 N_SU_32
| N_P8
| N_P16
| N_KEY
);
16085 /* We are dealing with MVE's vmullt. */
16087 && (inst
.operands
[0].reg
== inst
.operands
[1].reg
16088 || inst
.operands
[0].reg
== inst
.operands
[2].reg
))
16089 as_tsktsk (BAD_MVE_SRCDEST
);
16091 if (inst
.cond
> COND_ALWAYS
)
16092 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16094 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16096 if (et
.type
== NT_poly
)
16097 mve_encode_qqq (neon_logbits (et
.size
), 64);
16099 mve_encode_qqq (et
.type
== NT_unsigned
, et
.size
);
16104 inst
.instruction
= N_MNEM_vmul
;
16107 inst
.pred_insn_type
= INSIDE_IT_INSN
;
16112 do_mve_vabav (void)
16114 enum neon_shape rs
= neon_select_shape (NS_RQQ
, NS_NULL
);
16119 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
16122 struct neon_type_el et
= neon_check_type (2, NS_NULL
, N_EQK
, N_KEY
| N_S8
16123 | N_S16
| N_S32
| N_U8
| N_U16
16126 if (inst
.cond
> COND_ALWAYS
)
16127 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16129 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16131 mve_encode_rqq (et
.type
== NT_unsigned
, et
.size
);
16135 do_mve_vmladav (void)
16137 enum neon_shape rs
= neon_select_shape (NS_RQQ
, NS_NULL
);
16138 struct neon_type_el et
= neon_check_type (3, rs
,
16139 N_EQK
, N_EQK
, N_SU_MVE
| N_KEY
);
16141 if (et
.type
== NT_unsigned
16142 && (inst
.instruction
== M_MNEM_vmladavx
16143 || inst
.instruction
== M_MNEM_vmladavax
16144 || inst
.instruction
== M_MNEM_vmlsdav
16145 || inst
.instruction
== M_MNEM_vmlsdava
16146 || inst
.instruction
== M_MNEM_vmlsdavx
16147 || inst
.instruction
== M_MNEM_vmlsdavax
))
16148 first_error (BAD_SIMD_TYPE
);
16150 constraint (inst
.operands
[2].reg
> 14,
16151 _("MVE vector register in the range [Q0..Q7] expected"));
16153 if (inst
.cond
> COND_ALWAYS
)
16154 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16156 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16158 if (inst
.instruction
== M_MNEM_vmlsdav
16159 || inst
.instruction
== M_MNEM_vmlsdava
16160 || inst
.instruction
== M_MNEM_vmlsdavx
16161 || inst
.instruction
== M_MNEM_vmlsdavax
)
16162 inst
.instruction
|= (et
.size
== 8) << 28;
16164 inst
.instruction
|= (et
.size
== 8) << 8;
16166 mve_encode_rqq (et
.type
== NT_unsigned
, 64);
16167 inst
.instruction
|= (et
.size
== 32) << 16;
16171 do_neon_qrdmlah (void)
16173 /* Check we're on the correct architecture. */
16174 if (!mark_feature_used (&fpu_neon_ext_armv8
))
16176 _("instruction form not available on this architecture.");
16177 else if (!mark_feature_used (&fpu_neon_ext_v8_1
))
16179 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
16180 record_feature_use (&fpu_neon_ext_v8_1
);
16183 if (inst
.operands
[2].isscalar
)
16185 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
16186 struct neon_type_el et
= neon_check_type (3, rs
,
16187 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
16188 NEON_ENCODE (SCALAR
, inst
);
16189 neon_mul_mac (et
, neon_quad (rs
));
16193 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
16194 struct neon_type_el et
= neon_check_type (3, rs
,
16195 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
16196 NEON_ENCODE (INTEGER
, inst
);
16197 /* The U bit (rounding) comes from bit mask. */
16198 neon_three_same (neon_quad (rs
), 0, et
.size
);
16203 do_neon_fcmp_absolute (void)
16205 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
16206 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
16207 N_F_16_32
| N_KEY
);
16208 /* Size field comes from bit mask. */
16209 neon_three_same (neon_quad (rs
), 1, et
.size
== 16 ? (int) et
.size
: -1);
16213 do_neon_fcmp_absolute_inv (void)
16215 neon_exchange_operands ();
16216 do_neon_fcmp_absolute ();
16220 do_neon_step (void)
16222 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
16223 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
16224 N_F_16_32
| N_KEY
);
16225 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
16229 do_neon_abs_neg (void)
16231 enum neon_shape rs
;
16232 struct neon_type_el et
;
16234 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
16237 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16238 et
= neon_check_type (2, rs
, N_EQK
, N_S_32
| N_F_16_32
| N_KEY
);
16240 if (check_simd_pred_availability (et
.type
== NT_float
,
16241 NEON_CHECK_ARCH
| NEON_CHECK_CC
))
16244 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16245 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16246 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16247 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16248 inst
.instruction
|= neon_quad (rs
) << 6;
16249 inst
.instruction
|= (et
.type
== NT_float
) << 10;
16250 inst
.instruction
|= neon_logbits (et
.size
) << 18;
16252 neon_dp_fixup (&inst
);
16258 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16259 struct neon_type_el et
= neon_check_type (2, rs
,
16260 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
16261 int imm
= inst
.operands
[2].imm
;
16262 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
16263 _("immediate out of range for insert"));
16264 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
16270 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16271 struct neon_type_el et
= neon_check_type (2, rs
,
16272 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
16273 int imm
= inst
.operands
[2].imm
;
16274 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16275 _("immediate out of range for insert"));
16276 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
16280 do_neon_qshlu_imm (void)
16282 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16283 struct neon_type_el et
= neon_check_type (2, rs
,
16284 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
16285 int imm
= inst
.operands
[2].imm
;
16286 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
16287 _("immediate out of range for shift"));
16288 /* Only encodes the 'U present' variant of the instruction.
16289 In this case, signed types have OP (bit 8) set to 0.
16290 Unsigned types have OP set to 1. */
16291 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
16292 /* The rest of the bits are the same as other immediate shifts. */
16293 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
16297 do_neon_qmovn (void)
16299 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
16300 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
16301 /* Saturating move where operands can be signed or unsigned, and the
16302 destination has the same signedness. */
16303 NEON_ENCODE (INTEGER
, inst
);
16304 if (et
.type
== NT_unsigned
)
16305 inst
.instruction
|= 0xc0;
16307 inst
.instruction
|= 0x80;
16308 neon_two_same (0, 1, et
.size
/ 2);
16312 do_neon_qmovun (void)
16314 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
16315 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
16316 /* Saturating move with unsigned results. Operands must be signed. */
16317 NEON_ENCODE (INTEGER
, inst
);
16318 neon_two_same (0, 1, et
.size
/ 2);
16322 do_neon_rshift_sat_narrow (void)
16324 /* FIXME: Types for narrowing. If operands are signed, results can be signed
16325 or unsigned. If operands are unsigned, results must also be unsigned. */
16326 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
16327 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
16328 int imm
= inst
.operands
[2].imm
;
16329 /* This gets the bounds check, size encoding and immediate bits calculation
16333 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
16334 VQMOVN.I<size> <Dd>, <Qm>. */
16337 inst
.operands
[2].present
= 0;
16338 inst
.instruction
= N_MNEM_vqmovn
;
16343 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16344 _("immediate out of range"));
16345 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
16349 do_neon_rshift_sat_narrow_u (void)
16351 /* FIXME: Types for narrowing. If operands are signed, results can be signed
16352 or unsigned. If operands are unsigned, results must also be unsigned. */
16353 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
16354 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
16355 int imm
= inst
.operands
[2].imm
;
16356 /* This gets the bounds check, size encoding and immediate bits calculation
16360 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
16361 VQMOVUN.I<size> <Dd>, <Qm>. */
16364 inst
.operands
[2].present
= 0;
16365 inst
.instruction
= N_MNEM_vqmovun
;
16370 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16371 _("immediate out of range"));
16372 /* FIXME: The manual is kind of unclear about what value U should have in
16373 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
16375 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
16379 do_neon_movn (void)
16381 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
16382 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
16383 NEON_ENCODE (INTEGER
, inst
);
16384 neon_two_same (0, 1, et
.size
/ 2);
16388 do_neon_rshift_narrow (void)
16390 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
16391 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
16392 int imm
= inst
.operands
[2].imm
;
16393 /* This gets the bounds check, size encoding and immediate bits calculation
16397 /* If immediate is zero then we are a pseudo-instruction for
16398 VMOVN.I<size> <Dd>, <Qm> */
16401 inst
.operands
[2].present
= 0;
16402 inst
.instruction
= N_MNEM_vmovn
;
16407 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16408 _("immediate out of range for narrowing operation"));
16409 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
16413 do_neon_shll (void)
16415 /* FIXME: Type checking when lengthening. */
16416 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
16417 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
16418 unsigned imm
= inst
.operands
[2].imm
;
16420 if (imm
== et
.size
)
16422 /* Maximum shift variant. */
16423 NEON_ENCODE (INTEGER
, inst
);
16424 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16425 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16426 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16427 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16428 inst
.instruction
|= neon_logbits (et
.size
) << 18;
16430 neon_dp_fixup (&inst
);
16434 /* A more-specific type check for non-max versions. */
16435 et
= neon_check_type (2, NS_QDI
,
16436 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
16437 NEON_ENCODE (IMMED
, inst
);
16438 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
16442 /* Check the various types for the VCVT instruction, and return which version
16443 the current instruction is. */
16445 #define CVT_FLAVOUR_VAR \
16446 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
16447 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
16448 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
16449 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
16450 /* Half-precision conversions. */ \
16451 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
16452 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
16453 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
16454 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
16455 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
16456 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
16457 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
16458 Compared with single/double precision variants, only the co-processor \
16459 field is different, so the encoding flow is reused here. */ \
16460 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
16461 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
16462 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
16463 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
16464 /* VFP instructions. */ \
16465 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
16466 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
16467 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
16468 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
16469 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
16470 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
16471 /* VFP instructions with bitshift. */ \
16472 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
16473 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
16474 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
16475 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
16476 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
16477 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
16478 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
16479 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
16481 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
16482 neon_cvt_flavour_##C,
16484 /* The different types of conversions we can do. */
16485 enum neon_cvt_flavour
16488 neon_cvt_flavour_invalid
,
16489 neon_cvt_flavour_first_fp
= neon_cvt_flavour_f32_f64
16494 static enum neon_cvt_flavour
16495 get_neon_cvt_flavour (enum neon_shape rs
)
16497 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
16498 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
16499 if (et.type != NT_invtype) \
16501 inst.error = NULL; \
16502 return (neon_cvt_flavour_##C); \
16505 struct neon_type_el et
;
16506 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
16507 || rs
== NS_FF
) ? N_VFP
: 0;
16508 /* The instruction versions which take an immediate take one register
16509 argument, which is extended to the width of the full register. Thus the
16510 "source" and "destination" registers must have the same width. Hack that
16511 here by making the size equal to the key (wider, in this case) operand. */
16512 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
16516 return neon_cvt_flavour_invalid
;
16531 /* Neon-syntax VFP conversions. */
16534 do_vfp_nsyn_cvt (enum neon_shape rs
, enum neon_cvt_flavour flavour
)
16536 const char *opname
= 0;
16538 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
16539 || rs
== NS_FHI
|| rs
== NS_HFI
)
16541 /* Conversions with immediate bitshift. */
16542 const char *enc
[] =
16544 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
16550 if (flavour
< (int) ARRAY_SIZE (enc
))
16552 opname
= enc
[flavour
];
16553 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
16554 _("operands 0 and 1 must be the same register"));
16555 inst
.operands
[1] = inst
.operands
[2];
16556 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
16561 /* Conversions without bitshift. */
16562 const char *enc
[] =
16564 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
16570 if (flavour
< (int) ARRAY_SIZE (enc
))
16571 opname
= enc
[flavour
];
16575 do_vfp_nsyn_opcode (opname
);
16577 /* ARMv8.2 fp16 VCVT instruction. */
16578 if (flavour
== neon_cvt_flavour_s32_f16
16579 || flavour
== neon_cvt_flavour_u32_f16
16580 || flavour
== neon_cvt_flavour_f16_u32
16581 || flavour
== neon_cvt_flavour_f16_s32
)
16582 do_scalar_fp16_v82_encode ();
16586 do_vfp_nsyn_cvtz (void)
16588 enum neon_shape rs
= neon_select_shape (NS_FH
, NS_FF
, NS_FD
, NS_NULL
);
16589 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
16590 const char *enc
[] =
16592 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
16598 if (flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
16599 do_vfp_nsyn_opcode (enc
[flavour
]);
16603 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour
,
16604 enum neon_cvt_mode mode
)
16609 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16610 D register operands. */
16611 if (flavour
== neon_cvt_flavour_s32_f64
16612 || flavour
== neon_cvt_flavour_u32_f64
)
16613 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16616 if (flavour
== neon_cvt_flavour_s32_f16
16617 || flavour
== neon_cvt_flavour_u32_f16
)
16618 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
16621 set_pred_insn_type (OUTSIDE_PRED_INSN
);
16625 case neon_cvt_flavour_s32_f64
:
16629 case neon_cvt_flavour_s32_f32
:
16633 case neon_cvt_flavour_s32_f16
:
16637 case neon_cvt_flavour_u32_f64
:
16641 case neon_cvt_flavour_u32_f32
:
16645 case neon_cvt_flavour_u32_f16
:
16650 first_error (_("invalid instruction shape"));
16656 case neon_cvt_mode_a
: rm
= 0; break;
16657 case neon_cvt_mode_n
: rm
= 1; break;
16658 case neon_cvt_mode_p
: rm
= 2; break;
16659 case neon_cvt_mode_m
: rm
= 3; break;
16660 default: first_error (_("invalid rounding mode")); return;
16663 NEON_ENCODE (FPV8
, inst
);
16664 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
16665 encode_arm_vfp_reg (inst
.operands
[1].reg
, sz
== 1 ? VFP_REG_Dm
: VFP_REG_Sm
);
16666 inst
.instruction
|= sz
<< 8;
16668 /* ARMv8.2 fp16 VCVT instruction. */
16669 if (flavour
== neon_cvt_flavour_s32_f16
16670 ||flavour
== neon_cvt_flavour_u32_f16
)
16671 do_scalar_fp16_v82_encode ();
16672 inst
.instruction
|= op
<< 7;
16673 inst
.instruction
|= rm
<< 16;
16674 inst
.instruction
|= 0xf0000000;
16675 inst
.is_neon
= TRUE
;
16679 do_neon_cvt_1 (enum neon_cvt_mode mode
)
16681 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
16682 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
,
16683 NS_FH
, NS_HF
, NS_FHI
, NS_HFI
,
16685 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
16687 if (flavour
== neon_cvt_flavour_invalid
)
16690 /* PR11109: Handle round-to-zero for VCVT conversions. */
16691 if (mode
== neon_cvt_mode_z
16692 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_vfp_v2
)
16693 && (flavour
== neon_cvt_flavour_s16_f16
16694 || flavour
== neon_cvt_flavour_u16_f16
16695 || flavour
== neon_cvt_flavour_s32_f32
16696 || flavour
== neon_cvt_flavour_u32_f32
16697 || flavour
== neon_cvt_flavour_s32_f64
16698 || flavour
== neon_cvt_flavour_u32_f64
)
16699 && (rs
== NS_FD
|| rs
== NS_FF
))
16701 do_vfp_nsyn_cvtz ();
16705 /* ARMv8.2 fp16 VCVT conversions. */
16706 if (mode
== neon_cvt_mode_z
16707 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
)
16708 && (flavour
== neon_cvt_flavour_s32_f16
16709 || flavour
== neon_cvt_flavour_u32_f16
)
16712 do_vfp_nsyn_cvtz ();
16713 do_scalar_fp16_v82_encode ();
16717 /* VFP rather than Neon conversions. */
16718 if (flavour
>= neon_cvt_flavour_first_fp
)
16720 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
16721 do_vfp_nsyn_cvt (rs
, flavour
);
16723 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
16734 unsigned enctab
[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
16735 0x0000100, 0x1000100, 0x0, 0x1000000};
16737 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16740 /* Fixed-point conversion with #0 immediate is encoded as an
16741 integer conversion. */
16742 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
16744 NEON_ENCODE (IMMED
, inst
);
16745 if (flavour
!= neon_cvt_flavour_invalid
)
16746 inst
.instruction
|= enctab
[flavour
];
16747 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16748 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16749 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16750 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16751 inst
.instruction
|= neon_quad (rs
) << 6;
16752 inst
.instruction
|= 1 << 21;
16753 if (flavour
< neon_cvt_flavour_s16_f16
)
16755 inst
.instruction
|= 1 << 21;
16756 immbits
= 32 - inst
.operands
[2].imm
;
16757 inst
.instruction
|= immbits
<< 16;
16761 inst
.instruction
|= 3 << 20;
16762 immbits
= 16 - inst
.operands
[2].imm
;
16763 inst
.instruction
|= immbits
<< 16;
16764 inst
.instruction
&= ~(1 << 9);
16767 neon_dp_fixup (&inst
);
16773 if (mode
!= neon_cvt_mode_x
&& mode
!= neon_cvt_mode_z
)
16775 NEON_ENCODE (FLOAT
, inst
);
16776 set_pred_insn_type (OUTSIDE_PRED_INSN
);
16778 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
16781 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16782 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16783 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16784 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16785 inst
.instruction
|= neon_quad (rs
) << 6;
16786 inst
.instruction
|= (flavour
== neon_cvt_flavour_u16_f16
16787 || flavour
== neon_cvt_flavour_u32_f32
) << 7;
16788 inst
.instruction
|= mode
<< 8;
16789 if (flavour
== neon_cvt_flavour_u16_f16
16790 || flavour
== neon_cvt_flavour_s16_f16
)
16791 /* Mask off the original size bits and reencode them. */
16792 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff) | (1 << 18));
16795 inst
.instruction
|= 0xfc000000;
16797 inst
.instruction
|= 0xf0000000;
16803 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080,
16804 0x100, 0x180, 0x0, 0x080};
16806 NEON_ENCODE (INTEGER
, inst
);
16808 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16811 if (flavour
!= neon_cvt_flavour_invalid
)
16812 inst
.instruction
|= enctab
[flavour
];
16814 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16815 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16816 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16817 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16818 inst
.instruction
|= neon_quad (rs
) << 6;
16819 if (flavour
>= neon_cvt_flavour_s16_f16
16820 && flavour
<= neon_cvt_flavour_f16_u16
)
16821 /* Half precision. */
16822 inst
.instruction
|= 1 << 18;
16824 inst
.instruction
|= 2 << 18;
16826 neon_dp_fixup (&inst
);
16831 /* Half-precision conversions for Advanced SIMD -- neon. */
16834 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16838 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
16840 as_bad (_("operand size must match register width"));
16845 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
16847 as_bad (_("operand size must match register width"));
16852 inst
.instruction
= 0x3b60600;
16854 inst
.instruction
= 0x3b60700;
16856 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16857 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16858 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16859 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16860 neon_dp_fixup (&inst
);
16864 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
16865 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
16866 do_vfp_nsyn_cvt (rs
, flavour
);
16868 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
16873 do_neon_cvtr (void)
16875 do_neon_cvt_1 (neon_cvt_mode_x
);
16881 do_neon_cvt_1 (neon_cvt_mode_z
);
16885 do_neon_cvta (void)
16887 do_neon_cvt_1 (neon_cvt_mode_a
);
16891 do_neon_cvtn (void)
16893 do_neon_cvt_1 (neon_cvt_mode_n
);
16897 do_neon_cvtp (void)
16899 do_neon_cvt_1 (neon_cvt_mode_p
);
16903 do_neon_cvtm (void)
16905 do_neon_cvt_1 (neon_cvt_mode_m
);
16909 do_neon_cvttb_2 (bfd_boolean t
, bfd_boolean to
, bfd_boolean is_double
)
16912 mark_feature_used (&fpu_vfp_ext_armv8
);
16914 encode_arm_vfp_reg (inst
.operands
[0].reg
,
16915 (is_double
&& !to
) ? VFP_REG_Dd
: VFP_REG_Sd
);
16916 encode_arm_vfp_reg (inst
.operands
[1].reg
,
16917 (is_double
&& to
) ? VFP_REG_Dm
: VFP_REG_Sm
);
16918 inst
.instruction
|= to
? 0x10000 : 0;
16919 inst
.instruction
|= t
? 0x80 : 0;
16920 inst
.instruction
|= is_double
? 0x100 : 0;
16921 do_vfp_cond_or_thumb ();
16925 do_neon_cvttb_1 (bfd_boolean t
)
16927 enum neon_shape rs
= neon_select_shape (NS_HF
, NS_HD
, NS_FH
, NS_FF
, NS_FD
,
16928 NS_DF
, NS_DH
, NS_NULL
);
16932 else if (neon_check_type (2, rs
, N_F16
, N_F32
| N_VFP
).type
!= NT_invtype
)
16935 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/FALSE
);
16937 else if (neon_check_type (2, rs
, N_F32
| N_VFP
, N_F16
).type
!= NT_invtype
)
16940 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/FALSE
);
16942 else if (neon_check_type (2, rs
, N_F16
, N_F64
| N_VFP
).type
!= NT_invtype
)
16944 /* The VCVTB and VCVTT instructions with D-register operands
16945 don't work for SP only targets. */
16946 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16950 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/TRUE
);
16952 else if (neon_check_type (2, rs
, N_F64
| N_VFP
, N_F16
).type
!= NT_invtype
)
16954 /* The VCVTB and VCVTT instructions with D-register operands
16955 don't work for SP only targets. */
16956 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16960 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/TRUE
);
16967 do_neon_cvtb (void)
16969 do_neon_cvttb_1 (FALSE
);
16974 do_neon_cvtt (void)
16976 do_neon_cvttb_1 (TRUE
);
16980 neon_move_immediate (void)
16982 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
16983 struct neon_type_el et
= neon_check_type (2, rs
,
16984 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
16985 unsigned immlo
, immhi
= 0, immbits
;
16986 int op
, cmode
, float_p
;
16988 constraint (et
.type
== NT_invtype
,
16989 _("operand size must be specified for immediate VMOV"));
16991 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
16992 op
= (inst
.instruction
& (1 << 5)) != 0;
16994 immlo
= inst
.operands
[1].imm
;
16995 if (inst
.operands
[1].regisimm
)
16996 immhi
= inst
.operands
[1].reg
;
16998 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
16999 _("immediate has bits set outside the operand size"));
17001 float_p
= inst
.operands
[1].immisfloat
;
17003 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
17004 et
.size
, et
.type
)) == FAIL
)
17006 /* Invert relevant bits only. */
17007 neon_invert_size (&immlo
, &immhi
, et
.size
);
17008 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
17009 with one or the other; those cases are caught by
17010 neon_cmode_for_move_imm. */
17012 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
17013 &op
, et
.size
, et
.type
)) == FAIL
)
17015 first_error (_("immediate out of range"));
17020 inst
.instruction
&= ~(1 << 5);
17021 inst
.instruction
|= op
<< 5;
17023 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17024 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17025 inst
.instruction
|= neon_quad (rs
) << 6;
17026 inst
.instruction
|= cmode
<< 8;
17028 neon_write_immbits (immbits
);
17034 if (inst
.operands
[1].isreg
)
17036 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17038 NEON_ENCODE (INTEGER
, inst
);
17039 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17040 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17041 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17042 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17043 inst
.instruction
|= neon_quad (rs
) << 6;
17047 NEON_ENCODE (IMMED
, inst
);
17048 neon_move_immediate ();
17051 neon_dp_fixup (&inst
);
17054 /* Encode instructions of form:
17056 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
17057 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
17060 neon_mixed_length (struct neon_type_el et
, unsigned size
)
17062 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17063 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17064 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
17065 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
17066 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
17067 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
17068 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
17069 inst
.instruction
|= neon_logbits (size
) << 20;
17071 neon_dp_fixup (&inst
);
17075 do_neon_dyadic_long (void)
17077 enum neon_shape rs
= neon_select_shape (NS_QDD
, NS_QQQ
, NS_QQR
, NS_NULL
);
17080 if (vfp_or_neon_is_neon (NEON_CHECK_ARCH
| NEON_CHECK_CC
) == FAIL
)
17083 NEON_ENCODE (INTEGER
, inst
);
17084 /* FIXME: Type checking for lengthening op. */
17085 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
17086 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
17087 neon_mixed_length (et
, et
.size
);
17089 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)
17090 && (inst
.cond
== 0xf || inst
.cond
== 0x10))
17092 /* If parsing for MVE, vaddl/vsubl/vabdl{e,t} can only be vadd/vsub/vabd
17093 in an IT block with le/lt conditions. */
17095 if (inst
.cond
== 0xf)
17097 else if (inst
.cond
== 0x10)
17100 inst
.pred_insn_type
= INSIDE_IT_INSN
;
17102 if (inst
.instruction
== N_MNEM_vaddl
)
17104 inst
.instruction
= N_MNEM_vadd
;
17105 do_neon_addsub_if_i ();
17107 else if (inst
.instruction
== N_MNEM_vsubl
)
17109 inst
.instruction
= N_MNEM_vsub
;
17110 do_neon_addsub_if_i ();
17112 else if (inst
.instruction
== N_MNEM_vabdl
)
17114 inst
.instruction
= N_MNEM_vabd
;
17115 do_neon_dyadic_if_su ();
17119 first_error (BAD_FPU
);
17123 do_neon_abal (void)
17125 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
17126 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
17127 neon_mixed_length (et
, et
.size
);
17131 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
17133 if (inst
.operands
[2].isscalar
)
17135 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
17136 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
17137 NEON_ENCODE (SCALAR
, inst
);
17138 neon_mul_mac (et
, et
.type
== NT_unsigned
);
17142 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
17143 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
17144 NEON_ENCODE (INTEGER
, inst
);
17145 neon_mixed_length (et
, et
.size
);
17150 do_neon_mac_maybe_scalar_long (void)
17152 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
17155 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
17156 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
17159 neon_scalar_for_fmac_fp16_long (unsigned scalar
, unsigned quad_p
)
17161 unsigned regno
= NEON_SCALAR_REG (scalar
);
17162 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
17166 if (regno
> 7 || elno
> 3)
17169 return ((regno
& 0x7)
17170 | ((elno
& 0x1) << 3)
17171 | (((elno
>> 1) & 0x1) << 5));
17175 if (regno
> 15 || elno
> 1)
17178 return (((regno
& 0x1) << 5)
17179 | ((regno
>> 1) & 0x7)
17180 | ((elno
& 0x1) << 3));
17184 first_error (_("scalar out of range for multiply instruction"));
17189 do_neon_fmac_maybe_scalar_long (int subtype
)
17191 enum neon_shape rs
;
17193 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
17194 field (bits[21:20]) has different meaning. For scalar index variant, it's
17195 used to differentiate add and subtract, otherwise it's with fixed value
17199 if (inst
.cond
!= COND_ALWAYS
)
17200 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
17201 "behaviour is UNPREDICTABLE"));
17203 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16_fml
),
17206 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
17209 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
17210 be a scalar index register. */
17211 if (inst
.operands
[2].isscalar
)
17213 high8
= 0xfe000000;
17216 rs
= neon_select_shape (NS_DHS
, NS_QDS
, NS_NULL
);
17220 high8
= 0xfc000000;
17223 inst
.instruction
|= (0x1 << 23);
17224 rs
= neon_select_shape (NS_DHH
, NS_QDD
, NS_NULL
);
17227 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_F16
);
17229 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
17230 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
17231 so we simply pass -1 as size. */
17232 unsigned quad_p
= (rs
== NS_QDD
|| rs
== NS_QDS
);
17233 neon_three_same (quad_p
, 0, size
);
17235 /* Undo neon_dp_fixup. Redo the high eight bits. */
17236 inst
.instruction
&= 0x00ffffff;
17237 inst
.instruction
|= high8
;
17239 #define LOW1(R) ((R) & 0x1)
17240 #define HI4(R) (((R) >> 1) & 0xf)
17241 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
17242 whether the instruction is in Q form and whether Vm is a scalar indexed
17244 if (inst
.operands
[2].isscalar
)
17247 = neon_scalar_for_fmac_fp16_long (inst
.operands
[2].reg
, quad_p
);
17248 inst
.instruction
&= 0xffffffd0;
17249 inst
.instruction
|= rm
;
17253 /* Redo Rn as well. */
17254 inst
.instruction
&= 0xfff0ff7f;
17255 inst
.instruction
|= HI4 (inst
.operands
[1].reg
) << 16;
17256 inst
.instruction
|= LOW1 (inst
.operands
[1].reg
) << 7;
17261 /* Redo Rn and Rm. */
17262 inst
.instruction
&= 0xfff0ff50;
17263 inst
.instruction
|= HI4 (inst
.operands
[1].reg
) << 16;
17264 inst
.instruction
|= LOW1 (inst
.operands
[1].reg
) << 7;
17265 inst
.instruction
|= HI4 (inst
.operands
[2].reg
);
17266 inst
.instruction
|= LOW1 (inst
.operands
[2].reg
) << 5;
17271 do_neon_vfmal (void)
17273 return do_neon_fmac_maybe_scalar_long (0);
17277 do_neon_vfmsl (void)
17279 return do_neon_fmac_maybe_scalar_long (1);
17283 do_neon_dyadic_wide (void)
17285 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
17286 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
17287 neon_mixed_length (et
, et
.size
);
17291 do_neon_dyadic_narrow (void)
17293 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
17294 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
17295 /* Operand sign is unimportant, and the U bit is part of the opcode,
17296 so force the operand type to integer. */
17297 et
.type
= NT_integer
;
17298 neon_mixed_length (et
, et
.size
/ 2);
17302 do_neon_mul_sat_scalar_long (void)
17304 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
17308 do_neon_vmull (void)
17310 if (inst
.operands
[2].isscalar
)
17311 do_neon_mac_maybe_scalar_long ();
17314 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
17315 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_P64
| N_KEY
);
17317 if (et
.type
== NT_poly
)
17318 NEON_ENCODE (POLY
, inst
);
17320 NEON_ENCODE (INTEGER
, inst
);
17322 /* For polynomial encoding the U bit must be zero, and the size must
17323 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
17324 obviously, as 0b10). */
17327 /* Check we're on the correct architecture. */
17328 if (!mark_feature_used (&fpu_crypto_ext_armv8
))
17330 _("Instruction form not available on this architecture.");
17335 neon_mixed_length (et
, et
.size
);
17342 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
17343 struct neon_type_el et
= neon_check_type (3, rs
,
17344 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
17345 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
17347 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
17348 _("shift out of range"));
17349 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17350 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17351 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
17352 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
17353 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
17354 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
17355 inst
.instruction
|= neon_quad (rs
) << 6;
17356 inst
.instruction
|= imm
<< 8;
17358 neon_dp_fixup (&inst
);
17364 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17365 struct neon_type_el et
= neon_check_type (2, rs
,
17366 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
17367 unsigned op
= (inst
.instruction
>> 7) & 3;
17368 /* N (width of reversed regions) is encoded as part of the bitmask. We
17369 extract it here to check the elements to be reversed are smaller.
17370 Otherwise we'd get a reserved instruction. */
17371 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
17372 gas_assert (elsize
!= 0);
17373 constraint (et
.size
>= elsize
,
17374 _("elements must be smaller than reversal region"));
17375 neon_two_same (neon_quad (rs
), 1, et
.size
);
17381 if (inst
.operands
[1].isscalar
)
17383 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
17384 struct neon_type_el et
= neon_check_type (2, rs
,
17385 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
17386 unsigned sizebits
= et
.size
>> 3;
17387 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
17388 int logsize
= neon_logbits (et
.size
);
17389 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
17391 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
17394 NEON_ENCODE (SCALAR
, inst
);
17395 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17396 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17397 inst
.instruction
|= LOW4 (dm
);
17398 inst
.instruction
|= HI1 (dm
) << 5;
17399 inst
.instruction
|= neon_quad (rs
) << 6;
17400 inst
.instruction
|= x
<< 17;
17401 inst
.instruction
|= sizebits
<< 16;
17403 neon_dp_fixup (&inst
);
17407 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
17408 struct neon_type_el et
= neon_check_type (2, rs
,
17409 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
17410 /* Duplicate ARM register to lanes of vector. */
17411 NEON_ENCODE (ARMREG
, inst
);
17414 case 8: inst
.instruction
|= 0x400000; break;
17415 case 16: inst
.instruction
|= 0x000020; break;
17416 case 32: inst
.instruction
|= 0x000000; break;
17419 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
17420 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
17421 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
17422 inst
.instruction
|= neon_quad (rs
) << 21;
17423 /* The encoding for this instruction is identical for the ARM and Thumb
17424 variants, except for the condition field. */
17425 do_vfp_cond_or_thumb ();
17429 /* VMOV has particularly many variations. It can be one of:
17430 0. VMOV<c><q> <Qd>, <Qm>
17431 1. VMOV<c><q> <Dd>, <Dm>
17432 (Register operations, which are VORR with Rm = Rn.)
17433 2. VMOV<c><q>.<dt> <Qd>, #<imm>
17434 3. VMOV<c><q>.<dt> <Dd>, #<imm>
17436 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
17437 (ARM register to scalar.)
17438 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
17439 (Two ARM registers to vector.)
17440 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
17441 (Scalar to ARM register.)
17442 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
17443 (Vector to two ARM registers.)
17444 8. VMOV.F32 <Sd>, <Sm>
17445 9. VMOV.F64 <Dd>, <Dm>
17446 (VFP register moves.)
17447 10. VMOV.F32 <Sd>, #imm
17448 11. VMOV.F64 <Dd>, #imm
17449 (VFP float immediate load.)
17450 12. VMOV <Rd>, <Sm>
17451 (VFP single to ARM reg.)
17452 13. VMOV <Sd>, <Rm>
17453 (ARM reg to VFP single.)
17454 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
17455 (Two ARM regs to two VFP singles.)
17456 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
17457 (Two VFP singles to two ARM regs.)
17459 These cases can be disambiguated using neon_select_shape, except cases 1/9
17460 and 3/11 which depend on the operand type too.
17462 All the encoded bits are hardcoded by this function.
17464 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
17465 Cases 5, 7 may be used with VFPv2 and above.
17467 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
17468 can specify a type where it doesn't make sense to, and is ignored). */
17473 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
17474 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
,
17475 NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
17476 NS_HR
, NS_RH
, NS_HI
, NS_NULL
);
17477 struct neon_type_el et
;
17478 const char *ldconst
= 0;
17482 case NS_DD
: /* case 1/9. */
17483 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
17484 /* It is not an error here if no type is given. */
17486 if (et
.type
== NT_float
&& et
.size
== 64)
17488 do_vfp_nsyn_opcode ("fcpyd");
17491 /* fall through. */
17493 case NS_QQ
: /* case 0/1. */
17495 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
17497 /* The architecture manual I have doesn't explicitly state which
17498 value the U bit should have for register->register moves, but
17499 the equivalent VORR instruction has U = 0, so do that. */
17500 inst
.instruction
= 0x0200110;
17501 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17502 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17503 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17504 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17505 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
17506 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
17507 inst
.instruction
|= neon_quad (rs
) << 6;
17509 neon_dp_fixup (&inst
);
17513 case NS_DI
: /* case 3/11. */
17514 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
17516 if (et
.type
== NT_float
&& et
.size
== 64)
17518 /* case 11 (fconstd). */
17519 ldconst
= "fconstd";
17520 goto encode_fconstd
;
17522 /* fall through. */
17524 case NS_QI
: /* case 2/3. */
17525 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
17527 inst
.instruction
= 0x0800010;
17528 neon_move_immediate ();
17529 neon_dp_fixup (&inst
);
17532 case NS_SR
: /* case 4. */
17534 unsigned bcdebits
= 0;
17536 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
17537 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
17539 /* .<size> is optional here, defaulting to .32. */
17540 if (inst
.vectype
.elems
== 0
17541 && inst
.operands
[0].vectype
.type
== NT_invtype
17542 && inst
.operands
[1].vectype
.type
== NT_invtype
)
17544 inst
.vectype
.el
[0].type
= NT_untyped
;
17545 inst
.vectype
.el
[0].size
= 32;
17546 inst
.vectype
.elems
= 1;
17549 et
= neon_check_type (2, NS_NULL
, N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
17550 logsize
= neon_logbits (et
.size
);
17552 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
17554 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
17555 && et
.size
!= 32, _(BAD_FPU
));
17556 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
17557 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
17561 case 8: bcdebits
= 0x8; break;
17562 case 16: bcdebits
= 0x1; break;
17563 case 32: bcdebits
= 0x0; break;
17567 bcdebits
|= x
<< logsize
;
17569 inst
.instruction
= 0xe000b10;
17570 do_vfp_cond_or_thumb ();
17571 inst
.instruction
|= LOW4 (dn
) << 16;
17572 inst
.instruction
|= HI1 (dn
) << 7;
17573 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
17574 inst
.instruction
|= (bcdebits
& 3) << 5;
17575 inst
.instruction
|= (bcdebits
>> 2) << 21;
17579 case NS_DRR
: /* case 5 (fmdrr). */
17580 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
17583 inst
.instruction
= 0xc400b10;
17584 do_vfp_cond_or_thumb ();
17585 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
17586 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
17587 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
17588 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
17591 case NS_RS
: /* case 6. */
17594 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
17595 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
17596 unsigned abcdebits
= 0;
17598 /* .<dt> is optional here, defaulting to .32. */
17599 if (inst
.vectype
.elems
== 0
17600 && inst
.operands
[0].vectype
.type
== NT_invtype
17601 && inst
.operands
[1].vectype
.type
== NT_invtype
)
17603 inst
.vectype
.el
[0].type
= NT_untyped
;
17604 inst
.vectype
.el
[0].size
= 32;
17605 inst
.vectype
.elems
= 1;
17608 et
= neon_check_type (2, NS_NULL
,
17609 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
17610 logsize
= neon_logbits (et
.size
);
17612 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
17614 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
17615 && et
.size
!= 32, _(BAD_FPU
));
17616 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
17617 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
17621 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
17622 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
17623 case 32: abcdebits
= 0x00; break;
17627 abcdebits
|= x
<< logsize
;
17628 inst
.instruction
= 0xe100b10;
17629 do_vfp_cond_or_thumb ();
17630 inst
.instruction
|= LOW4 (dn
) << 16;
17631 inst
.instruction
|= HI1 (dn
) << 7;
17632 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
17633 inst
.instruction
|= (abcdebits
& 3) << 5;
17634 inst
.instruction
|= (abcdebits
>> 2) << 21;
17638 case NS_RRD
: /* case 7 (fmrrd). */
17639 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
17642 inst
.instruction
= 0xc500b10;
17643 do_vfp_cond_or_thumb ();
17644 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
17645 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
17646 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
17647 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
17650 case NS_FF
: /* case 8 (fcpys). */
17651 do_vfp_nsyn_opcode ("fcpys");
17655 case NS_FI
: /* case 10 (fconsts). */
17656 ldconst
= "fconsts";
17658 if (!inst
.operands
[1].immisfloat
)
17661 /* Immediate has to fit in 8 bits so float is enough. */
17662 float imm
= (float) inst
.operands
[1].imm
;
17663 memcpy (&new_imm
, &imm
, sizeof (float));
17664 /* But the assembly may have been written to provide an integer
17665 bit pattern that equates to a float, so check that the
17666 conversion has worked. */
17667 if (is_quarter_float (new_imm
))
17669 if (is_quarter_float (inst
.operands
[1].imm
))
17670 as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
17672 inst
.operands
[1].imm
= new_imm
;
17673 inst
.operands
[1].immisfloat
= 1;
17677 if (is_quarter_float (inst
.operands
[1].imm
))
17679 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
17680 do_vfp_nsyn_opcode (ldconst
);
17682 /* ARMv8.2 fp16 vmov.f16 instruction. */
17684 do_scalar_fp16_v82_encode ();
17687 first_error (_("immediate out of range"));
17691 case NS_RF
: /* case 12 (fmrs). */
17692 do_vfp_nsyn_opcode ("fmrs");
17693 /* ARMv8.2 fp16 vmov.f16 instruction. */
17695 do_scalar_fp16_v82_encode ();
17699 case NS_FR
: /* case 13 (fmsr). */
17700 do_vfp_nsyn_opcode ("fmsr");
17701 /* ARMv8.2 fp16 vmov.f16 instruction. */
17703 do_scalar_fp16_v82_encode ();
17706 /* The encoders for the fmrrs and fmsrr instructions expect three operands
17707 (one of which is a list), but we have parsed four. Do some fiddling to
17708 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
17710 case NS_RRFF
: /* case 14 (fmrrs). */
17711 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
17712 _("VFP registers must be adjacent"));
17713 inst
.operands
[2].imm
= 2;
17714 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
17715 do_vfp_nsyn_opcode ("fmrrs");
17718 case NS_FFRR
: /* case 15 (fmsrr). */
17719 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
17720 _("VFP registers must be adjacent"));
17721 inst
.operands
[1] = inst
.operands
[2];
17722 inst
.operands
[2] = inst
.operands
[3];
17723 inst
.operands
[0].imm
= 2;
17724 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
17725 do_vfp_nsyn_opcode ("fmsrr");
17729 /* neon_select_shape has determined that the instruction
17730 shape is wrong and has already set the error message. */
17739 do_neon_rshift_round_imm (void)
17741 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
17742 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
17743 int imm
= inst
.operands
[2].imm
;
17745 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
17748 inst
.operands
[2].present
= 0;
17753 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
17754 _("immediate out of range for shift"));
17755 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
17760 do_neon_movhf (void)
17762 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_NULL
);
17763 constraint (rs
!= NS_HH
, _("invalid suffix"));
17765 if (inst
.cond
!= COND_ALWAYS
)
17769 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
17770 " the behaviour is UNPREDICTABLE"));
17774 inst
.error
= BAD_COND
;
17779 do_vfp_sp_monadic ();
17782 inst
.instruction
|= 0xf0000000;
17786 do_neon_movl (void)
17788 struct neon_type_el et
= neon_check_type (2, NS_QD
,
17789 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
17790 unsigned sizebits
= et
.size
>> 3;
17791 inst
.instruction
|= sizebits
<< 19;
17792 neon_two_same (0, et
.type
== NT_unsigned
, -1);
17798 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17799 struct neon_type_el et
= neon_check_type (2, rs
,
17800 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
17801 NEON_ENCODE (INTEGER
, inst
);
17802 neon_two_same (neon_quad (rs
), 1, et
.size
);
17806 do_neon_zip_uzp (void)
17808 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17809 struct neon_type_el et
= neon_check_type (2, rs
,
17810 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
17811 if (rs
== NS_DD
&& et
.size
== 32)
17813 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
17814 inst
.instruction
= N_MNEM_vtrn
;
17818 neon_two_same (neon_quad (rs
), 1, et
.size
);
17822 do_neon_sat_abs_neg (void)
17824 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17825 struct neon_type_el et
= neon_check_type (2, rs
,
17826 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
17827 neon_two_same (neon_quad (rs
), 1, et
.size
);
17831 do_neon_pair_long (void)
17833 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17834 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
17835 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
17836 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
17837 neon_two_same (neon_quad (rs
), 1, et
.size
);
17841 do_neon_recip_est (void)
17843 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17844 struct neon_type_el et
= neon_check_type (2, rs
,
17845 N_EQK
| N_FLT
, N_F_16_32
| N_U32
| N_KEY
);
17846 inst
.instruction
|= (et
.type
== NT_float
) << 8;
17847 neon_two_same (neon_quad (rs
), 1, et
.size
);
17853 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17854 struct neon_type_el et
= neon_check_type (2, rs
,
17855 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
17856 neon_two_same (neon_quad (rs
), 1, et
.size
);
17862 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17863 struct neon_type_el et
= neon_check_type (2, rs
,
17864 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
17865 neon_two_same (neon_quad (rs
), 1, et
.size
);
17871 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17872 struct neon_type_el et
= neon_check_type (2, rs
,
17873 N_EQK
| N_INT
, N_8
| N_KEY
);
17874 neon_two_same (neon_quad (rs
), 1, et
.size
);
17880 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17881 neon_two_same (neon_quad (rs
), 1, -1);
17885 do_neon_tbl_tbx (void)
17887 unsigned listlenbits
;
17888 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
17890 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
17892 first_error (_("bad list length for table lookup"));
17896 listlenbits
= inst
.operands
[1].imm
- 1;
17897 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17898 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17899 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
17900 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
17901 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
17902 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
17903 inst
.instruction
|= listlenbits
<< 8;
17905 neon_dp_fixup (&inst
);
17909 do_neon_ldm_stm (void)
17911 /* P, U and L bits are part of bitmask. */
17912 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
17913 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
17915 if (inst
.operands
[1].issingle
)
17917 do_vfp_nsyn_ldm_stm (is_dbmode
);
17921 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
17922 _("writeback (!) must be used for VLDMDB and VSTMDB"));
17924 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
17925 _("register list must contain at least 1 and at most 16 "
17928 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
17929 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
17930 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
17931 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
17933 inst
.instruction
|= offsetbits
;
17935 do_vfp_cond_or_thumb ();
17939 do_neon_ldr_str (void)
17941 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
17943 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
17944 And is UNPREDICTABLE in thumb mode. */
17946 && inst
.operands
[1].reg
== REG_PC
17947 && (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
) || thumb_mode
))
17950 inst
.error
= _("Use of PC here is UNPREDICTABLE");
17951 else if (warn_on_deprecated
)
17952 as_tsktsk (_("Use of PC here is deprecated"));
17955 if (inst
.operands
[0].issingle
)
17958 do_vfp_nsyn_opcode ("flds");
17960 do_vfp_nsyn_opcode ("fsts");
17962 /* ARMv8.2 vldr.16/vstr.16 instruction. */
17963 if (inst
.vectype
.el
[0].size
== 16)
17964 do_scalar_fp16_v82_encode ();
17969 do_vfp_nsyn_opcode ("fldd");
17971 do_vfp_nsyn_opcode ("fstd");
17976 do_t_vldr_vstr_sysreg (void)
17978 int fp_vldr_bitno
= 20, sysreg_vldr_bitno
= 20;
17979 bfd_boolean is_vldr
= ((inst
.instruction
& (1 << fp_vldr_bitno
)) != 0);
17981 /* Use of PC is UNPREDICTABLE. */
17982 if (inst
.operands
[1].reg
== REG_PC
)
17983 inst
.error
= _("Use of PC here is UNPREDICTABLE");
17985 if (inst
.operands
[1].immisreg
)
17986 inst
.error
= _("instruction does not accept register index");
17988 if (!inst
.operands
[1].isreg
)
17989 inst
.error
= _("instruction does not accept PC-relative addressing");
17991 if (abs (inst
.operands
[1].imm
) >= (1 << 7))
17992 inst
.error
= _("immediate value out of range");
17994 inst
.instruction
= 0xec000f80;
17996 inst
.instruction
|= 1 << sysreg_vldr_bitno
;
17997 encode_arm_cp_address (1, TRUE
, FALSE
, BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
);
17998 inst
.instruction
|= (inst
.operands
[0].imm
& 0x7) << 13;
17999 inst
.instruction
|= (inst
.operands
[0].imm
& 0x8) << 19;
18003 do_vldr_vstr (void)
18005 bfd_boolean sysreg_op
= !inst
.operands
[0].isreg
;
18007 /* VLDR/VSTR (System Register). */
18010 if (!mark_feature_used (&arm_ext_v8_1m_main
))
18011 as_bad (_("Instruction not permitted on this architecture"));
18013 do_t_vldr_vstr_sysreg ();
18018 if (!mark_feature_used (&fpu_vfp_ext_v1xd
))
18019 as_bad (_("Instruction not permitted on this architecture"));
18020 do_neon_ldr_str ();
18024 /* "interleave" version also handles non-interleaving register VLD1/VST1
18028 do_neon_ld_st_interleave (void)
18030 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
18031 N_8
| N_16
| N_32
| N_64
);
18032 unsigned alignbits
= 0;
18034 /* The bits in this table go:
18035 0: register stride of one (0) or two (1)
18036 1,2: register list length, minus one (1, 2, 3, 4).
18037 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
18038 We use -1 for invalid entries. */
18039 const int typetable
[] =
18041 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
18042 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
18043 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
18044 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
18048 if (et
.type
== NT_invtype
)
18051 if (inst
.operands
[1].immisalign
)
18052 switch (inst
.operands
[1].imm
>> 8)
18054 case 64: alignbits
= 1; break;
18056 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2
18057 && NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
18058 goto bad_alignment
;
18062 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
18063 goto bad_alignment
;
18068 first_error (_("bad alignment"));
18072 inst
.instruction
|= alignbits
<< 4;
18073 inst
.instruction
|= neon_logbits (et
.size
) << 6;
18075 /* Bits [4:6] of the immediate in a list specifier encode register stride
18076 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
18077 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
18078 up the right value for "type" in a table based on this value and the given
18079 list style, then stick it back. */
18080 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
18081 | (((inst
.instruction
>> 8) & 3) << 3);
18083 typebits
= typetable
[idx
];
18085 constraint (typebits
== -1, _("bad list type for instruction"));
18086 constraint (((inst
.instruction
>> 8) & 3) && et
.size
== 64,
18089 inst
.instruction
&= ~0xf00;
18090 inst
.instruction
|= typebits
<< 8;
18093 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
18094 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
18095 otherwise. The variable arguments are a list of pairs of legal (size, align)
18096 values, terminated with -1. */
18099 neon_alignment_bit (int size
, int align
, int *do_alignment
, ...)
18102 int result
= FAIL
, thissize
, thisalign
;
18104 if (!inst
.operands
[1].immisalign
)
18110 va_start (ap
, do_alignment
);
18114 thissize
= va_arg (ap
, int);
18115 if (thissize
== -1)
18117 thisalign
= va_arg (ap
, int);
18119 if (size
== thissize
&& align
== thisalign
)
18122 while (result
!= SUCCESS
);
18126 if (result
== SUCCESS
)
18129 first_error (_("unsupported alignment for instruction"));
18135 do_neon_ld_st_lane (void)
18137 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
18138 int align_good
, do_alignment
= 0;
18139 int logsize
= neon_logbits (et
.size
);
18140 int align
= inst
.operands
[1].imm
>> 8;
18141 int n
= (inst
.instruction
>> 8) & 3;
18142 int max_el
= 64 / et
.size
;
18144 if (et
.type
== NT_invtype
)
18147 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
18148 _("bad list length"));
18149 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
18150 _("scalar index out of range"));
18151 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
18153 _("stride of 2 unavailable when element size is 8"));
18157 case 0: /* VLD1 / VST1. */
18158 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 16, 16,
18160 if (align_good
== FAIL
)
18164 unsigned alignbits
= 0;
18167 case 16: alignbits
= 0x1; break;
18168 case 32: alignbits
= 0x3; break;
18171 inst
.instruction
|= alignbits
<< 4;
18175 case 1: /* VLD2 / VST2. */
18176 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 16,
18177 16, 32, 32, 64, -1);
18178 if (align_good
== FAIL
)
18181 inst
.instruction
|= 1 << 4;
18184 case 2: /* VLD3 / VST3. */
18185 constraint (inst
.operands
[1].immisalign
,
18186 _("can't use alignment with this instruction"));
18189 case 3: /* VLD4 / VST4. */
18190 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
18191 16, 64, 32, 64, 32, 128, -1);
18192 if (align_good
== FAIL
)
18196 unsigned alignbits
= 0;
18199 case 8: alignbits
= 0x1; break;
18200 case 16: alignbits
= 0x1; break;
18201 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
18204 inst
.instruction
|= alignbits
<< 4;
18211 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
18212 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
18213 inst
.instruction
|= 1 << (4 + logsize
);
18215 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
18216 inst
.instruction
|= logsize
<< 10;
18219 /* Encode single n-element structure to all lanes VLD<n> instructions. */
18222 do_neon_ld_dup (void)
18224 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
18225 int align_good
, do_alignment
= 0;
18227 if (et
.type
== NT_invtype
)
18230 switch ((inst
.instruction
>> 8) & 3)
18232 case 0: /* VLD1. */
18233 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
18234 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
18235 &do_alignment
, 16, 16, 32, 32, -1);
18236 if (align_good
== FAIL
)
18238 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
18241 case 2: inst
.instruction
|= 1 << 5; break;
18242 default: first_error (_("bad list length")); return;
18244 inst
.instruction
|= neon_logbits (et
.size
) << 6;
18247 case 1: /* VLD2. */
18248 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
18249 &do_alignment
, 8, 16, 16, 32, 32, 64,
18251 if (align_good
== FAIL
)
18253 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
18254 _("bad list length"));
18255 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
18256 inst
.instruction
|= 1 << 5;
18257 inst
.instruction
|= neon_logbits (et
.size
) << 6;
18260 case 2: /* VLD3. */
18261 constraint (inst
.operands
[1].immisalign
,
18262 _("can't use alignment with this instruction"));
18263 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
18264 _("bad list length"));
18265 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
18266 inst
.instruction
|= 1 << 5;
18267 inst
.instruction
|= neon_logbits (et
.size
) << 6;
18270 case 3: /* VLD4. */
18272 int align
= inst
.operands
[1].imm
>> 8;
18273 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
18274 16, 64, 32, 64, 32, 128, -1);
18275 if (align_good
== FAIL
)
18277 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
18278 _("bad list length"));
18279 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
18280 inst
.instruction
|= 1 << 5;
18281 if (et
.size
== 32 && align
== 128)
18282 inst
.instruction
|= 0x3 << 6;
18284 inst
.instruction
|= neon_logbits (et
.size
) << 6;
18291 inst
.instruction
|= do_alignment
<< 4;
18294 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
18295 apart from bits [11:4]. */
18298 do_neon_ldx_stx (void)
18300 if (inst
.operands
[1].isreg
)
18301 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
18303 switch (NEON_LANE (inst
.operands
[0].imm
))
18305 case NEON_INTERLEAVE_LANES
:
18306 NEON_ENCODE (INTERLV
, inst
);
18307 do_neon_ld_st_interleave ();
18310 case NEON_ALL_LANES
:
18311 NEON_ENCODE (DUP
, inst
);
18312 if (inst
.instruction
== N_INV
)
18314 first_error ("only loads support such operands");
18321 NEON_ENCODE (LANE
, inst
);
18322 do_neon_ld_st_lane ();
18325 /* L bit comes from bit mask. */
18326 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18327 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18328 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
18330 if (inst
.operands
[1].postind
)
18332 int postreg
= inst
.operands
[1].imm
& 0xf;
18333 constraint (!inst
.operands
[1].immisreg
,
18334 _("post-index must be a register"));
18335 constraint (postreg
== 0xd || postreg
== 0xf,
18336 _("bad register for post-index"));
18337 inst
.instruction
|= postreg
;
18341 constraint (inst
.operands
[1].immisreg
, BAD_ADDR_MODE
);
18342 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
18343 || inst
.relocs
[0].exp
.X_add_number
!= 0,
18346 if (inst
.operands
[1].writeback
)
18348 inst
.instruction
|= 0xd;
18351 inst
.instruction
|= 0xf;
18355 inst
.instruction
|= 0xf9000000;
18357 inst
.instruction
|= 0xf4000000;
18362 do_vfp_nsyn_fpv8 (enum neon_shape rs
)
18364 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
18365 D register operands. */
18366 if (neon_shape_class
[rs
] == SC_DOUBLE
)
18367 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
18370 NEON_ENCODE (FPV8
, inst
);
18372 if (rs
== NS_FFF
|| rs
== NS_HHH
)
18374 do_vfp_sp_dyadic ();
18376 /* ARMv8.2 fp16 instruction. */
18378 do_scalar_fp16_v82_encode ();
18381 do_vfp_dp_rd_rn_rm ();
18384 inst
.instruction
|= 0x100;
18386 inst
.instruction
|= 0xf0000000;
18392 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18394 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) != SUCCESS
)
18395 first_error (_("invalid instruction shape"));
18401 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18403 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) == SUCCESS
)
18406 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
18409 neon_dyadic_misc (NT_untyped
, N_F_16_32
, 0);
18413 do_vrint_1 (enum neon_cvt_mode mode
)
18415 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_QQ
, NS_NULL
);
18416 struct neon_type_el et
;
18421 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
18422 D register operands. */
18423 if (neon_shape_class
[rs
] == SC_DOUBLE
)
18424 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
18427 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
18429 if (et
.type
!= NT_invtype
)
18431 /* VFP encodings. */
18432 if (mode
== neon_cvt_mode_a
|| mode
== neon_cvt_mode_n
18433 || mode
== neon_cvt_mode_p
|| mode
== neon_cvt_mode_m
)
18434 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18436 NEON_ENCODE (FPV8
, inst
);
18437 if (rs
== NS_FF
|| rs
== NS_HH
)
18438 do_vfp_sp_monadic ();
18440 do_vfp_dp_rd_rm ();
18444 case neon_cvt_mode_r
: inst
.instruction
|= 0x00000000; break;
18445 case neon_cvt_mode_z
: inst
.instruction
|= 0x00000080; break;
18446 case neon_cvt_mode_x
: inst
.instruction
|= 0x00010000; break;
18447 case neon_cvt_mode_a
: inst
.instruction
|= 0xf0000000; break;
18448 case neon_cvt_mode_n
: inst
.instruction
|= 0xf0010000; break;
18449 case neon_cvt_mode_p
: inst
.instruction
|= 0xf0020000; break;
18450 case neon_cvt_mode_m
: inst
.instruction
|= 0xf0030000; break;
18454 inst
.instruction
|= (rs
== NS_DD
) << 8;
18455 do_vfp_cond_or_thumb ();
18457 /* ARMv8.2 fp16 vrint instruction. */
18459 do_scalar_fp16_v82_encode ();
18463 /* Neon encodings (or something broken...). */
18465 et
= neon_check_type (2, rs
, N_EQK
, N_F_16_32
| N_KEY
);
18467 if (et
.type
== NT_invtype
)
18470 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18471 NEON_ENCODE (FLOAT
, inst
);
18473 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
18476 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18477 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18478 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
18479 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
18480 inst
.instruction
|= neon_quad (rs
) << 6;
18481 /* Mask off the original size bits and reencode them. */
18482 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff)
18483 | neon_logbits (et
.size
) << 18);
18487 case neon_cvt_mode_z
: inst
.instruction
|= 3 << 7; break;
18488 case neon_cvt_mode_x
: inst
.instruction
|= 1 << 7; break;
18489 case neon_cvt_mode_a
: inst
.instruction
|= 2 << 7; break;
18490 case neon_cvt_mode_n
: inst
.instruction
|= 0 << 7; break;
18491 case neon_cvt_mode_p
: inst
.instruction
|= 7 << 7; break;
18492 case neon_cvt_mode_m
: inst
.instruction
|= 5 << 7; break;
18493 case neon_cvt_mode_r
: inst
.error
= _("invalid rounding mode"); break;
18498 inst
.instruction
|= 0xfc000000;
18500 inst
.instruction
|= 0xf0000000;
18507 do_vrint_1 (neon_cvt_mode_x
);
18513 do_vrint_1 (neon_cvt_mode_z
);
18519 do_vrint_1 (neon_cvt_mode_r
);
18525 do_vrint_1 (neon_cvt_mode_a
);
18531 do_vrint_1 (neon_cvt_mode_n
);
18537 do_vrint_1 (neon_cvt_mode_p
);
18543 do_vrint_1 (neon_cvt_mode_m
);
18547 neon_scalar_for_vcmla (unsigned opnd
, unsigned elsize
)
18549 unsigned regno
= NEON_SCALAR_REG (opnd
);
18550 unsigned elno
= NEON_SCALAR_INDEX (opnd
);
18552 if (elsize
== 16 && elno
< 2 && regno
< 16)
18553 return regno
| (elno
<< 4);
18554 else if (elsize
== 32 && elno
== 0)
18557 first_error (_("scalar out of range"));
18564 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
18566 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
18567 _("expression too complex"));
18568 unsigned rot
= inst
.relocs
[0].exp
.X_add_number
;
18569 constraint (rot
!= 0 && rot
!= 90 && rot
!= 180 && rot
!= 270,
18570 _("immediate out of range"));
18572 if (inst
.operands
[2].isscalar
)
18574 enum neon_shape rs
= neon_select_shape (NS_DDSI
, NS_QQSI
, NS_NULL
);
18575 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
18576 N_KEY
| N_F16
| N_F32
).size
;
18577 unsigned m
= neon_scalar_for_vcmla (inst
.operands
[2].reg
, size
);
18579 inst
.instruction
= 0xfe000800;
18580 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18581 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18582 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
18583 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
18584 inst
.instruction
|= LOW4 (m
);
18585 inst
.instruction
|= HI1 (m
) << 5;
18586 inst
.instruction
|= neon_quad (rs
) << 6;
18587 inst
.instruction
|= rot
<< 20;
18588 inst
.instruction
|= (size
== 32) << 23;
18592 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
18593 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
18594 N_KEY
| N_F16
| N_F32
).size
;
18595 neon_three_same (neon_quad (rs
), 0, -1);
18596 inst
.instruction
&= 0x00ffffff; /* Undo neon_dp_fixup. */
18597 inst
.instruction
|= 0xfc200800;
18598 inst
.instruction
|= rot
<< 23;
18599 inst
.instruction
|= (size
== 32) << 20;
18606 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
18608 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
18609 _("expression too complex"));
18610 unsigned rot
= inst
.relocs
[0].exp
.X_add_number
;
18611 constraint (rot
!= 90 && rot
!= 270, _("immediate out of range"));
18612 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
18613 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
18614 N_KEY
| N_F16
| N_F32
).size
;
18615 neon_three_same (neon_quad (rs
), 0, -1);
18616 inst
.instruction
&= 0x00ffffff; /* Undo neon_dp_fixup. */
18617 inst
.instruction
|= 0xfc800800;
18618 inst
.instruction
|= (rot
== 270) << 24;
18619 inst
.instruction
|= (size
== 32) << 20;
18622 /* Dot Product instructions encoding support. */
18625 do_neon_dotproduct (int unsigned_p
)
18627 enum neon_shape rs
;
18628 unsigned scalar_oprd2
= 0;
18631 if (inst
.cond
!= COND_ALWAYS
)
18632 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
18633 "is UNPREDICTABLE"));
18635 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
18638 /* Dot Product instructions are in three-same D/Q register format or the third
18639 operand can be a scalar index register. */
18640 if (inst
.operands
[2].isscalar
)
18642 scalar_oprd2
= neon_scalar_for_mul (inst
.operands
[2].reg
, 32);
18643 high8
= 0xfe000000;
18644 rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
18648 high8
= 0xfc000000;
18649 rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
18653 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_U8
);
18655 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_S8
);
18657 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
18658 Product instruction, so we pass 0 as the "ubit" parameter. And the
18659 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
18660 neon_three_same (neon_quad (rs
), 0, 32);
18662 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
18663 different NEON three-same encoding. */
18664 inst
.instruction
&= 0x00ffffff;
18665 inst
.instruction
|= high8
;
18666 /* Encode 'U' bit which indicates signedness. */
18667 inst
.instruction
|= (unsigned_p
? 1 : 0) << 4;
18668 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
18669 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
18670 the instruction encoding. */
18671 if (inst
.operands
[2].isscalar
)
18673 inst
.instruction
&= 0xffffffd0;
18674 inst
.instruction
|= LOW4 (scalar_oprd2
);
18675 inst
.instruction
|= HI1 (scalar_oprd2
) << 5;
18679 /* Dot Product instructions for signed integer. */
18682 do_neon_dotproduct_s (void)
18684 return do_neon_dotproduct (0);
18687 /* Dot Product instructions for unsigned integer. */
18690 do_neon_dotproduct_u (void)
18692 return do_neon_dotproduct (1);
18695 /* Crypto v1 instructions. */
18697 do_crypto_2op_1 (unsigned elttype
, int op
)
18699 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18701 if (neon_check_type (2, NS_QQ
, N_EQK
| N_UNT
, elttype
| N_UNT
| N_KEY
).type
18707 NEON_ENCODE (INTEGER
, inst
);
18708 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18709 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18710 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
18711 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
18713 inst
.instruction
|= op
<< 6;
18716 inst
.instruction
|= 0xfc000000;
18718 inst
.instruction
|= 0xf0000000;
18722 do_crypto_3op_1 (int u
, int op
)
18724 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18726 if (neon_check_type (3, NS_QQQ
, N_EQK
| N_UNT
, N_EQK
| N_UNT
,
18727 N_32
| N_UNT
| N_KEY
).type
== NT_invtype
)
18732 NEON_ENCODE (INTEGER
, inst
);
18733 neon_three_same (1, u
, 8 << op
);
18739 do_crypto_2op_1 (N_8
, 0);
18745 do_crypto_2op_1 (N_8
, 1);
18751 do_crypto_2op_1 (N_8
, 2);
18757 do_crypto_2op_1 (N_8
, 3);
18763 do_crypto_3op_1 (0, 0);
18769 do_crypto_3op_1 (0, 1);
18775 do_crypto_3op_1 (0, 2);
18781 do_crypto_3op_1 (0, 3);
18787 do_crypto_3op_1 (1, 0);
18793 do_crypto_3op_1 (1, 1);
18797 do_sha256su1 (void)
18799 do_crypto_3op_1 (1, 2);
18805 do_crypto_2op_1 (N_32
, -1);
18811 do_crypto_2op_1 (N_32
, 0);
18815 do_sha256su0 (void)
18817 do_crypto_2op_1 (N_32
, 1);
18821 do_crc32_1 (unsigned int poly
, unsigned int sz
)
18823 unsigned int Rd
= inst
.operands
[0].reg
;
18824 unsigned int Rn
= inst
.operands
[1].reg
;
18825 unsigned int Rm
= inst
.operands
[2].reg
;
18827 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18828 inst
.instruction
|= LOW4 (Rd
) << (thumb_mode
? 8 : 12);
18829 inst
.instruction
|= LOW4 (Rn
) << 16;
18830 inst
.instruction
|= LOW4 (Rm
);
18831 inst
.instruction
|= sz
<< (thumb_mode
? 4 : 21);
18832 inst
.instruction
|= poly
<< (thumb_mode
? 20 : 9);
18834 if (Rd
== REG_PC
|| Rn
== REG_PC
|| Rm
== REG_PC
)
18835 as_warn (UNPRED_REG ("r15"));
18877 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
18879 neon_check_type (2, NS_FD
, N_S32
, N_F64
);
18880 do_vfp_sp_dp_cvt ();
18881 do_vfp_cond_or_thumb ();
18885 /* Overall per-instruction processing. */
18887 /* We need to be able to fix up arbitrary expressions in some statements.
18888 This is so that we can handle symbols that are an arbitrary distance from
18889 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
18890 which returns part of an address in a form which will be valid for
18891 a data instruction. We do this by pushing the expression into a symbol
18892 in the expr_section, and creating a fix for that. */
18895 fix_new_arm (fragS
* frag
,
18909 /* Create an absolute valued symbol, so we have something to
18910 refer to in the object file. Unfortunately for us, gas's
18911 generic expression parsing will already have folded out
18912 any use of .set foo/.type foo %function that may have
18913 been used to set type information of the target location,
18914 that's being specified symbolically. We have to presume
18915 the user knows what they are doing. */
18919 sprintf (name
, "*ABS*0x%lx", (unsigned long)exp
->X_add_number
);
18921 symbol
= symbol_find_or_make (name
);
18922 S_SET_SEGMENT (symbol
, absolute_section
);
18923 symbol_set_frag (symbol
, &zero_address_frag
);
18924 S_SET_VALUE (symbol
, exp
->X_add_number
);
18925 exp
->X_op
= O_symbol
;
18926 exp
->X_add_symbol
= symbol
;
18927 exp
->X_add_number
= 0;
18933 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
,
18934 (enum bfd_reloc_code_real
) reloc
);
18938 new_fix
= (fixS
*) fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
18939 pc_rel
, (enum bfd_reloc_code_real
) reloc
);
18943 /* Mark whether the fix is to a THUMB instruction, or an ARM
18945 new_fix
->tc_fix_data
= thumb_mode
;
18948 /* Create a frg for an instruction requiring relaxation. */
18950 output_relax_insn (void)
18956 /* The size of the instruction is unknown, so tie the debug info to the
18957 start of the instruction. */
18958 dwarf2_emit_insn (0);
18960 switch (inst
.relocs
[0].exp
.X_op
)
18963 sym
= inst
.relocs
[0].exp
.X_add_symbol
;
18964 offset
= inst
.relocs
[0].exp
.X_add_number
;
18968 offset
= inst
.relocs
[0].exp
.X_add_number
;
18971 sym
= make_expr_symbol (&inst
.relocs
[0].exp
);
18975 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
18976 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
18977 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
18980 /* Write a 32-bit thumb instruction to buf. */
18982 put_thumb32_insn (char * buf
, unsigned long insn
)
18984 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
18985 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
18989 output_inst (const char * str
)
18995 as_bad ("%s -- `%s'", inst
.error
, str
);
19000 output_relax_insn ();
19003 if (inst
.size
== 0)
19006 to
= frag_more (inst
.size
);
19007 /* PR 9814: Record the thumb mode into the current frag so that we know
19008 what type of NOP padding to use, if necessary. We override any previous
19009 setting so that if the mode has changed then the NOPS that we use will
19010 match the encoding of the last instruction in the frag. */
19011 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
19013 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
19015 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
19016 put_thumb32_insn (to
, inst
.instruction
);
19018 else if (inst
.size
> INSN_SIZE
)
19020 gas_assert (inst
.size
== (2 * INSN_SIZE
));
19021 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
19022 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
19025 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
19028 for (r
= 0; r
< ARM_IT_MAX_RELOCS
; r
++)
19030 if (inst
.relocs
[r
].type
!= BFD_RELOC_UNUSED
)
19031 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
19032 inst
.size
, & inst
.relocs
[r
].exp
, inst
.relocs
[r
].pc_rel
,
19033 inst
.relocs
[r
].type
);
19036 dwarf2_emit_insn (inst
.size
);
19040 output_it_inst (int cond
, int mask
, char * to
)
19042 unsigned long instruction
= 0xbf00;
19045 instruction
|= mask
;
19046 instruction
|= cond
<< 4;
19050 to
= frag_more (2);
19052 dwarf2_emit_insn (2);
19056 md_number_to_chars (to
, instruction
, 2);
19061 /* Tag values used in struct asm_opcode's tag field. */
19064 OT_unconditional
, /* Instruction cannot be conditionalized.
19065 The ARM condition field is still 0xE. */
19066 OT_unconditionalF
, /* Instruction cannot be conditionalized
19067 and carries 0xF in its ARM condition field. */
19068 OT_csuffix
, /* Instruction takes a conditional suffix. */
19069 OT_csuffixF
, /* Some forms of the instruction take a scalar
19070 conditional suffix, others place 0xF where the
19071 condition field would be, others take a vector
19072 conditional suffix. */
19073 OT_cinfix3
, /* Instruction takes a conditional infix,
19074 beginning at character index 3. (In
19075 unified mode, it becomes a suffix.) */
19076 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
19077 tsts, cmps, cmns, and teqs. */
19078 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
19079 character index 3, even in unified mode. Used for
19080 legacy instructions where suffix and infix forms
19081 may be ambiguous. */
19082 OT_csuf_or_in3
, /* Instruction takes either a conditional
19083 suffix or an infix at character index 3. */
19084 OT_odd_infix_unc
, /* This is the unconditional variant of an
19085 instruction that takes a conditional infix
19086 at an unusual position. In unified mode,
19087 this variant will accept a suffix. */
19088 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
19089 are the conditional variants of instructions that
19090 take conditional infixes in unusual positions.
19091 The infix appears at character index
19092 (tag - OT_odd_infix_0). These are not accepted
19093 in unified mode. */
19096 /* Subroutine of md_assemble, responsible for looking up the primary
19097 opcode from the mnemonic the user wrote. STR points to the
19098 beginning of the mnemonic.
19100 This is not simply a hash table lookup, because of conditional
19101 variants. Most instructions have conditional variants, which are
19102 expressed with a _conditional affix_ to the mnemonic. If we were
19103 to encode each conditional variant as a literal string in the opcode
19104 table, it would have approximately 20,000 entries.
19106 Most mnemonics take this affix as a suffix, and in unified syntax,
19107 'most' is upgraded to 'all'. However, in the divided syntax, some
19108 instructions take the affix as an infix, notably the s-variants of
19109 the arithmetic instructions. Of those instructions, all but six
19110 have the infix appear after the third character of the mnemonic.
19112 Accordingly, the algorithm for looking up primary opcodes given
19115 1. Look up the identifier in the opcode table.
19116 If we find a match, go to step U.
19118 2. Look up the last two characters of the identifier in the
19119 conditions table. If we find a match, look up the first N-2
19120 characters of the identifier in the opcode table. If we
19121 find a match, go to step CE.
19123 3. Look up the fourth and fifth characters of the identifier in
19124 the conditions table. If we find a match, extract those
19125 characters from the identifier, and look up the remaining
19126 characters in the opcode table. If we find a match, go
19131 U. Examine the tag field of the opcode structure, in case this is
19132 one of the six instructions with its conditional infix in an
19133 unusual place. If it is, the tag tells us where to find the
19134 infix; look it up in the conditions table and set inst.cond
19135 accordingly. Otherwise, this is an unconditional instruction.
19136 Again set inst.cond accordingly. Return the opcode structure.
19138 CE. Examine the tag field to make sure this is an instruction that
19139 should receive a conditional suffix. If it is not, fail.
19140 Otherwise, set inst.cond from the suffix we already looked up,
19141 and return the opcode structure.
19143 CM. Examine the tag field to make sure this is an instruction that
19144 should receive a conditional infix after the third character.
19145 If it is not, fail. Otherwise, undo the edits to the current
19146 line of input and proceed as for case CE. */
19148 static const struct asm_opcode
*
19149 opcode_lookup (char **str
)
19153 const struct asm_opcode
*opcode
;
19154 const struct asm_cond
*cond
;
19157 /* Scan up to the end of the mnemonic, which must end in white space,
19158 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
19159 for (base
= end
= *str
; *end
!= '\0'; end
++)
19160 if (*end
== ' ' || *end
== '.')
19166 /* Handle a possible width suffix and/or Neon type suffix. */
19171 /* The .w and .n suffixes are only valid if the unified syntax is in
19173 if (unified_syntax
&& end
[1] == 'w')
19175 else if (unified_syntax
&& end
[1] == 'n')
19180 inst
.vectype
.elems
= 0;
19182 *str
= end
+ offset
;
19184 if (end
[offset
] == '.')
19186 /* See if we have a Neon type suffix (possible in either unified or
19187 non-unified ARM syntax mode). */
19188 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
19191 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
19197 /* Look for unaffixed or special-case affixed mnemonic. */
19198 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
19203 if (opcode
->tag
< OT_odd_infix_0
)
19205 inst
.cond
= COND_ALWAYS
;
19209 if (warn_on_deprecated
&& unified_syntax
)
19210 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
19211 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
19212 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
19215 inst
.cond
= cond
->value
;
19218 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
19220 /* Cannot have a conditional suffix on a mnemonic of less than a character.
19222 if (end
- base
< 2)
19225 cond
= (const struct asm_cond
*) hash_find_n (arm_vcond_hsh
, affix
, 1);
19226 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
19228 /* If this opcode can not be vector predicated then don't accept it with a
19229 vector predication code. */
19230 if (opcode
&& !opcode
->mayBeVecPred
)
19233 if (!opcode
|| !cond
)
19235 /* Cannot have a conditional suffix on a mnemonic of less than two
19237 if (end
- base
< 3)
19240 /* Look for suffixed mnemonic. */
19242 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
19243 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
19247 if (opcode
&& cond
)
19250 switch (opcode
->tag
)
19252 case OT_cinfix3_legacy
:
19253 /* Ignore conditional suffixes matched on infix only mnemonics. */
19257 case OT_cinfix3_deprecated
:
19258 case OT_odd_infix_unc
:
19259 if (!unified_syntax
)
19261 /* Fall through. */
19265 case OT_csuf_or_in3
:
19266 inst
.cond
= cond
->value
;
19269 case OT_unconditional
:
19270 case OT_unconditionalF
:
19272 inst
.cond
= cond
->value
;
19275 /* Delayed diagnostic. */
19276 inst
.error
= BAD_COND
;
19277 inst
.cond
= COND_ALWAYS
;
19286 /* Cannot have a usual-position infix on a mnemonic of less than
19287 six characters (five would be a suffix). */
19288 if (end
- base
< 6)
19291 /* Look for infixed mnemonic in the usual position. */
19293 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
19297 memcpy (save
, affix
, 2);
19298 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
19299 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
19301 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
19302 memcpy (affix
, save
, 2);
19305 && (opcode
->tag
== OT_cinfix3
19306 || opcode
->tag
== OT_cinfix3_deprecated
19307 || opcode
->tag
== OT_csuf_or_in3
19308 || opcode
->tag
== OT_cinfix3_legacy
))
19311 if (warn_on_deprecated
&& unified_syntax
19312 && (opcode
->tag
== OT_cinfix3
19313 || opcode
->tag
== OT_cinfix3_deprecated
))
19314 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
19316 inst
.cond
= cond
->value
;
19323 /* This function generates an initial IT instruction, leaving its block
19324 virtually open for the new instructions. Eventually,
19325 the mask will be updated by now_pred_add_mask () each time
19326 a new instruction needs to be included in the IT block.
19327 Finally, the block is closed with close_automatic_it_block ().
19328 The block closure can be requested either from md_assemble (),
19329 a tencode (), or due to a label hook. */
19332 new_automatic_it_block (int cond
)
19334 now_pred
.state
= AUTOMATIC_PRED_BLOCK
;
19335 now_pred
.mask
= 0x18;
19336 now_pred
.cc
= cond
;
19337 now_pred
.block_length
= 1;
19338 mapping_state (MAP_THUMB
);
19339 now_pred
.insn
= output_it_inst (cond
, now_pred
.mask
, NULL
);
19340 now_pred
.warn_deprecated
= FALSE
;
19341 now_pred
.insn_cond
= TRUE
;
19344 /* Close an automatic IT block.
19345 See comments in new_automatic_it_block (). */
19348 close_automatic_it_block (void)
19350 now_pred
.mask
= 0x10;
19351 now_pred
.block_length
= 0;
19354 /* Update the mask of the current automatically-generated IT
19355 instruction. See comments in new_automatic_it_block (). */
19358 now_pred_add_mask (int cond
)
19360 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
19361 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
19362 | ((bitvalue) << (nbit)))
19363 const int resulting_bit
= (cond
& 1);
19365 now_pred
.mask
&= 0xf;
19366 now_pred
.mask
= SET_BIT_VALUE (now_pred
.mask
,
19368 (5 - now_pred
.block_length
));
19369 now_pred
.mask
= SET_BIT_VALUE (now_pred
.mask
,
19371 ((5 - now_pred
.block_length
) - 1));
19372 output_it_inst (now_pred
.cc
, now_pred
.mask
, now_pred
.insn
);
19375 #undef SET_BIT_VALUE
19378 /* The IT blocks handling machinery is accessed through the these functions:
19379 it_fsm_pre_encode () from md_assemble ()
19380 set_pred_insn_type () optional, from the tencode functions
19381 set_pred_insn_type_last () ditto
19382 in_pred_block () ditto
19383 it_fsm_post_encode () from md_assemble ()
19384 force_automatic_it_block_close () from label handling functions
19387 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
19388 initializing the IT insn type with a generic initial value depending
19389 on the inst.condition.
19390 2) During the tencode function, two things may happen:
19391 a) The tencode function overrides the IT insn type by
19392 calling either set_pred_insn_type (type) or
19393 set_pred_insn_type_last ().
19394 b) The tencode function queries the IT block state by
19395 calling in_pred_block () (i.e. to determine narrow/not narrow mode).
19397 Both set_pred_insn_type and in_pred_block run the internal FSM state
19398 handling function (handle_pred_state), because: a) setting the IT insn
19399 type may incur in an invalid state (exiting the function),
19400 and b) querying the state requires the FSM to be updated.
19401 Specifically we want to avoid creating an IT block for conditional
19402 branches, so it_fsm_pre_encode is actually a guess and we can't
19403 determine whether an IT block is required until the tencode () routine
19404 has decided what type of instruction this actually it.
19405 Because of this, if set_pred_insn_type and in_pred_block have to be
19406 used, set_pred_insn_type has to be called first.
19408 set_pred_insn_type_last () is a wrapper of set_pred_insn_type (type),
19409 that determines the insn IT type depending on the inst.cond code.
19410 When a tencode () routine encodes an instruction that can be
19411 either outside an IT block, or, in the case of being inside, has to be
19412 the last one, set_pred_insn_type_last () will determine the proper
19413 IT instruction type based on the inst.cond code. Otherwise,
19414 set_pred_insn_type can be called for overriding that logic or
19415 for covering other cases.
19417 Calling handle_pred_state () may not transition the IT block state to
19418 OUTSIDE_PRED_BLOCK immediately, since the (current) state could be
19419 still queried. Instead, if the FSM determines that the state should
19420 be transitioned to OUTSIDE_PRED_BLOCK, a flag is marked to be closed
19421 after the tencode () function: that's what it_fsm_post_encode () does.
19423 Since in_pred_block () calls the state handling function to get an
19424 updated state, an error may occur (due to invalid insns combination).
19425 In that case, inst.error is set.
19426 Therefore, inst.error has to be checked after the execution of
19427 the tencode () routine.
19429 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
19430 any pending state change (if any) that didn't take place in
19431 handle_pred_state () as explained above. */
19434 it_fsm_pre_encode (void)
19436 if (inst
.cond
!= COND_ALWAYS
)
19437 inst
.pred_insn_type
= INSIDE_IT_INSN
;
19439 inst
.pred_insn_type
= OUTSIDE_PRED_INSN
;
19441 now_pred
.state_handled
= 0;
19444 /* IT state FSM handling function. */
19445 /* MVE instructions and non-MVE instructions are handled differently because of
19446 the introduction of VPT blocks.
19447 Specifications say that any non-MVE instruction inside a VPT block is
19448 UNPREDICTABLE, with the exception of the BKPT instruction. Whereas most MVE
19449 instructions are deemed to be UNPREDICTABLE if inside an IT block. For the
19450 few exceptions we have MVE_UNPREDICABLE_INSN.
19451 The error messages provided depending on the different combinations possible
19452 are described in the cases below:
19453 For 'most' MVE instructions:
19454 1) In an IT block, with an IT code: syntax error
19455 2) In an IT block, with a VPT code: error: must be in a VPT block
19456 3) In an IT block, with no code: warning: UNPREDICTABLE
19457 4) In a VPT block, with an IT code: syntax error
19458 5) In a VPT block, with a VPT code: OK!
19459 6) In a VPT block, with no code: error: missing code
19460 7) Outside a pred block, with an IT code: error: syntax error
19461 8) Outside a pred block, with a VPT code: error: should be in a VPT block
19462 9) Outside a pred block, with no code: OK!
19463 For non-MVE instructions:
19464 10) In an IT block, with an IT code: OK!
19465 11) In an IT block, with a VPT code: syntax error
19466 12) In an IT block, with no code: error: missing code
19467 13) In a VPT block, with an IT code: error: should be in an IT block
19468 14) In a VPT block, with a VPT code: syntax error
19469 15) In a VPT block, with no code: UNPREDICTABLE
19470 16) Outside a pred block, with an IT code: error: should be in an IT block
19471 17) Outside a pred block, with a VPT code: syntax error
19472 18) Outside a pred block, with no code: OK!
19477 handle_pred_state (void)
19479 now_pred
.state_handled
= 1;
19480 now_pred
.insn_cond
= FALSE
;
19482 switch (now_pred
.state
)
19484 case OUTSIDE_PRED_BLOCK
:
19485 switch (inst
.pred_insn_type
)
19487 case MVE_UNPREDICABLE_INSN
:
19488 case MVE_OUTSIDE_PRED_INSN
:
19489 if (inst
.cond
< COND_ALWAYS
)
19491 /* Case 7: Outside a pred block, with an IT code: error: syntax
19493 inst
.error
= BAD_SYNTAX
;
19496 /* Case 9: Outside a pred block, with no code: OK! */
19498 case OUTSIDE_PRED_INSN
:
19499 if (inst
.cond
> COND_ALWAYS
)
19501 /* Case 17: Outside a pred block, with a VPT code: syntax error.
19503 inst
.error
= BAD_SYNTAX
;
19506 /* Case 18: Outside a pred block, with no code: OK! */
19509 case INSIDE_VPT_INSN
:
19510 /* Case 8: Outside a pred block, with a VPT code: error: should be in
19512 inst
.error
= BAD_OUT_VPT
;
19515 case INSIDE_IT_INSN
:
19516 case INSIDE_IT_LAST_INSN
:
19517 if (inst
.cond
< COND_ALWAYS
)
19519 /* Case 16: Outside a pred block, with an IT code: error: should
19520 be in an IT block. */
19521 if (thumb_mode
== 0)
19524 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
19525 as_tsktsk (_("Warning: conditional outside an IT block"\
19530 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
19531 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
19533 /* Automatically generate the IT instruction. */
19534 new_automatic_it_block (inst
.cond
);
19535 if (inst
.pred_insn_type
== INSIDE_IT_LAST_INSN
)
19536 close_automatic_it_block ();
19540 inst
.error
= BAD_OUT_IT
;
19546 else if (inst
.cond
> COND_ALWAYS
)
19548 /* Case 17: Outside a pred block, with a VPT code: syntax error.
19550 inst
.error
= BAD_SYNTAX
;
19555 case IF_INSIDE_IT_LAST_INSN
:
19556 case NEUTRAL_IT_INSN
:
19560 if (inst
.cond
!= COND_ALWAYS
)
19561 first_error (BAD_SYNTAX
);
19562 now_pred
.state
= MANUAL_PRED_BLOCK
;
19563 now_pred
.block_length
= 0;
19564 now_pred
.type
= VECTOR_PRED
;
19568 now_pred
.state
= MANUAL_PRED_BLOCK
;
19569 now_pred
.block_length
= 0;
19570 now_pred
.type
= SCALAR_PRED
;
19575 case AUTOMATIC_PRED_BLOCK
:
19576 /* Three things may happen now:
19577 a) We should increment current it block size;
19578 b) We should close current it block (closing insn or 4 insns);
19579 c) We should close current it block and start a new one (due
19580 to incompatible conditions or
19581 4 insns-length block reached). */
19583 switch (inst
.pred_insn_type
)
19585 case INSIDE_VPT_INSN
:
19587 case MVE_UNPREDICABLE_INSN
:
19588 case MVE_OUTSIDE_PRED_INSN
:
19590 case OUTSIDE_PRED_INSN
:
19591 /* The closure of the block shall happen immediately,
19592 so any in_pred_block () call reports the block as closed. */
19593 force_automatic_it_block_close ();
19596 case INSIDE_IT_INSN
:
19597 case INSIDE_IT_LAST_INSN
:
19598 case IF_INSIDE_IT_LAST_INSN
:
19599 now_pred
.block_length
++;
19601 if (now_pred
.block_length
> 4
19602 || !now_pred_compatible (inst
.cond
))
19604 force_automatic_it_block_close ();
19605 if (inst
.pred_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
19606 new_automatic_it_block (inst
.cond
);
19610 now_pred
.insn_cond
= TRUE
;
19611 now_pred_add_mask (inst
.cond
);
19614 if (now_pred
.state
== AUTOMATIC_PRED_BLOCK
19615 && (inst
.pred_insn_type
== INSIDE_IT_LAST_INSN
19616 || inst
.pred_insn_type
== IF_INSIDE_IT_LAST_INSN
))
19617 close_automatic_it_block ();
19620 case NEUTRAL_IT_INSN
:
19621 now_pred
.block_length
++;
19622 now_pred
.insn_cond
= TRUE
;
19624 if (now_pred
.block_length
> 4)
19625 force_automatic_it_block_close ();
19627 now_pred_add_mask (now_pred
.cc
& 1);
19631 close_automatic_it_block ();
19632 now_pred
.state
= MANUAL_PRED_BLOCK
;
19637 case MANUAL_PRED_BLOCK
:
19640 if (now_pred
.type
== SCALAR_PRED
)
19642 /* Check conditional suffixes. */
19643 cond
= now_pred
.cc
^ ((now_pred
.mask
>> 4) & 1) ^ 1;
19644 now_pred
.mask
<<= 1;
19645 now_pred
.mask
&= 0x1f;
19646 is_last
= (now_pred
.mask
== 0x10);
19650 now_pred
.cc
^= (now_pred
.mask
>> 4);
19651 cond
= now_pred
.cc
+ 0xf;
19652 now_pred
.mask
<<= 1;
19653 now_pred
.mask
&= 0x1f;
19654 is_last
= now_pred
.mask
== 0x10;
19656 now_pred
.insn_cond
= TRUE
;
19658 switch (inst
.pred_insn_type
)
19660 case OUTSIDE_PRED_INSN
:
19661 if (now_pred
.type
== SCALAR_PRED
)
19663 if (inst
.cond
== COND_ALWAYS
)
19665 /* Case 12: In an IT block, with no code: error: missing
19667 inst
.error
= BAD_NOT_IT
;
19670 else if (inst
.cond
> COND_ALWAYS
)
19672 /* Case 11: In an IT block, with a VPT code: syntax error.
19674 inst
.error
= BAD_SYNTAX
;
19677 else if (thumb_mode
)
19679 /* This is for some special cases where a non-MVE
19680 instruction is not allowed in an IT block, such as cbz,
19681 but are put into one with a condition code.
19682 You could argue this should be a syntax error, but we
19683 gave the 'not allowed in IT block' diagnostic in the
19684 past so we will keep doing so. */
19685 inst
.error
= BAD_NOT_IT
;
19692 /* Case 15: In a VPT block, with no code: UNPREDICTABLE. */
19693 as_tsktsk (MVE_NOT_VPT
);
19696 case MVE_OUTSIDE_PRED_INSN
:
19697 if (now_pred
.type
== SCALAR_PRED
)
19699 if (inst
.cond
== COND_ALWAYS
)
19701 /* Case 3: In an IT block, with no code: warning:
19703 as_tsktsk (MVE_NOT_IT
);
19706 else if (inst
.cond
< COND_ALWAYS
)
19708 /* Case 1: In an IT block, with an IT code: syntax error.
19710 inst
.error
= BAD_SYNTAX
;
19718 if (inst
.cond
< COND_ALWAYS
)
19720 /* Case 4: In a VPT block, with an IT code: syntax error.
19722 inst
.error
= BAD_SYNTAX
;
19725 else if (inst
.cond
== COND_ALWAYS
)
19727 /* Case 6: In a VPT block, with no code: error: missing
19729 inst
.error
= BAD_NOT_VPT
;
19737 case MVE_UNPREDICABLE_INSN
:
19738 as_tsktsk (now_pred
.type
== SCALAR_PRED
? MVE_NOT_IT
: MVE_NOT_VPT
);
19740 case INSIDE_IT_INSN
:
19741 if (inst
.cond
> COND_ALWAYS
)
19743 /* Case 11: In an IT block, with a VPT code: syntax error. */
19744 /* Case 14: In a VPT block, with a VPT code: syntax error. */
19745 inst
.error
= BAD_SYNTAX
;
19748 else if (now_pred
.type
== SCALAR_PRED
)
19750 /* Case 10: In an IT block, with an IT code: OK! */
19751 if (cond
!= inst
.cond
)
19753 inst
.error
= now_pred
.type
== SCALAR_PRED
? BAD_IT_COND
:
19760 /* Case 13: In a VPT block, with an IT code: error: should be
19762 inst
.error
= BAD_OUT_IT
;
19767 case INSIDE_VPT_INSN
:
19768 if (now_pred
.type
== SCALAR_PRED
)
19770 /* Case 2: In an IT block, with a VPT code: error: must be in a
19772 inst
.error
= BAD_OUT_VPT
;
19775 /* Case 5: In a VPT block, with a VPT code: OK! */
19776 else if (cond
!= inst
.cond
)
19778 inst
.error
= BAD_VPT_COND
;
19782 case INSIDE_IT_LAST_INSN
:
19783 case IF_INSIDE_IT_LAST_INSN
:
19784 if (now_pred
.type
== VECTOR_PRED
|| inst
.cond
> COND_ALWAYS
)
19786 /* Case 4: In a VPT block, with an IT code: syntax error. */
19787 /* Case 11: In an IT block, with a VPT code: syntax error. */
19788 inst
.error
= BAD_SYNTAX
;
19791 else if (cond
!= inst
.cond
)
19793 inst
.error
= BAD_IT_COND
;
19798 inst
.error
= BAD_BRANCH
;
19803 case NEUTRAL_IT_INSN
:
19804 /* The BKPT instruction is unconditional even in a IT or VPT
19809 if (now_pred
.type
== SCALAR_PRED
)
19811 inst
.error
= BAD_IT_IT
;
19814 /* fall through. */
19816 if (inst
.cond
== COND_ALWAYS
)
19818 /* Executing a VPT/VPST instruction inside an IT block or a
19819 VPT/VPST/IT instruction inside a VPT block is UNPREDICTABLE.
19821 if (now_pred
.type
== SCALAR_PRED
)
19822 as_tsktsk (MVE_NOT_IT
);
19824 as_tsktsk (MVE_NOT_VPT
);
19829 /* VPT/VPST do not accept condition codes. */
19830 inst
.error
= BAD_SYNTAX
;
19841 struct depr_insn_mask
19843 unsigned long pattern
;
19844 unsigned long mask
;
19845 const char* description
;
19848 /* List of 16-bit instruction patterns deprecated in an IT block in
19850 static const struct depr_insn_mask depr_it_insns
[] = {
19851 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
19852 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
19853 { 0xa000, 0xb800, N_("ADR") },
19854 { 0x4800, 0xf800, N_("Literal loads") },
19855 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
19856 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
19857 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
19858 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
19859 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
19864 it_fsm_post_encode (void)
19868 if (!now_pred
.state_handled
)
19869 handle_pred_state ();
19871 if (now_pred
.insn_cond
19872 && !now_pred
.warn_deprecated
19873 && warn_on_deprecated
19874 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
)
19875 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_m
))
19877 if (inst
.instruction
>= 0x10000)
19879 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
19880 "performance deprecated in ARMv8-A and ARMv8-R"));
19881 now_pred
.warn_deprecated
= TRUE
;
19885 const struct depr_insn_mask
*p
= depr_it_insns
;
19887 while (p
->mask
!= 0)
19889 if ((inst
.instruction
& p
->mask
) == p
->pattern
)
19891 as_tsktsk (_("IT blocks containing 16-bit Thumb "
19892 "instructions of the following class are "
19893 "performance deprecated in ARMv8-A and "
19894 "ARMv8-R: %s"), p
->description
);
19895 now_pred
.warn_deprecated
= TRUE
;
19903 if (now_pred
.block_length
> 1)
19905 as_tsktsk (_("IT blocks containing more than one conditional "
19906 "instruction are performance deprecated in ARMv8-A and "
19908 now_pred
.warn_deprecated
= TRUE
;
19912 is_last
= (now_pred
.mask
== 0x10);
19915 now_pred
.state
= OUTSIDE_PRED_BLOCK
;
19921 force_automatic_it_block_close (void)
19923 if (now_pred
.state
== AUTOMATIC_PRED_BLOCK
)
19925 close_automatic_it_block ();
19926 now_pred
.state
= OUTSIDE_PRED_BLOCK
;
19932 in_pred_block (void)
19934 if (!now_pred
.state_handled
)
19935 handle_pred_state ();
19937 return now_pred
.state
!= OUTSIDE_PRED_BLOCK
;
19940 /* Whether OPCODE only has T32 encoding. Since this function is only used by
19941 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
19942 here, hence the "known" in the function name. */
19945 known_t32_only_insn (const struct asm_opcode
*opcode
)
19947 /* Original Thumb-1 wide instruction. */
19948 if (opcode
->tencode
== do_t_blx
19949 || opcode
->tencode
== do_t_branch23
19950 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
19951 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
))
19954 /* Wide-only instruction added to ARMv8-M Baseline. */
19955 if (ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v8m_m_only
)
19956 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_atomics
)
19957 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v6t2_v8m
)
19958 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_div
))
19964 /* Whether wide instruction variant can be used if available for a valid OPCODE
19968 t32_insn_ok (arm_feature_set arch
, const struct asm_opcode
*opcode
)
19970 if (known_t32_only_insn (opcode
))
19973 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
19974 of variant T3 of B.W is checked in do_t_branch. */
19975 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
19976 && opcode
->tencode
== do_t_branch
)
19979 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
19980 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
19981 && opcode
->tencode
== do_t_mov_cmp
19982 /* Make sure CMP instruction is not affected. */
19983 && opcode
->aencode
== do_mov
)
19986 /* Wide instruction variants of all instructions with narrow *and* wide
19987 variants become available with ARMv6t2. Other opcodes are either
19988 narrow-only or wide-only and are thus available if OPCODE is valid. */
19989 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v6t2
))
19992 /* OPCODE with narrow only instruction variant or wide variant not
19998 md_assemble (char *str
)
20001 const struct asm_opcode
* opcode
;
20003 /* Align the previous label if needed. */
20004 if (last_label_seen
!= NULL
)
20006 symbol_set_frag (last_label_seen
, frag_now
);
20007 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
20008 S_SET_SEGMENT (last_label_seen
, now_seg
);
20011 memset (&inst
, '\0', sizeof (inst
));
20013 for (r
= 0; r
< ARM_IT_MAX_RELOCS
; r
++)
20014 inst
.relocs
[r
].type
= BFD_RELOC_UNUSED
;
20016 opcode
= opcode_lookup (&p
);
20019 /* It wasn't an instruction, but it might be a register alias of
20020 the form alias .req reg, or a Neon .dn/.qn directive. */
20021 if (! create_register_alias (str
, p
)
20022 && ! create_neon_reg_alias (str
, p
))
20023 as_bad (_("bad instruction `%s'"), str
);
20028 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
20029 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
20031 /* The value which unconditional instructions should have in place of the
20032 condition field. */
20033 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
20037 arm_feature_set variant
;
20039 variant
= cpu_variant
;
20040 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
20041 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
20042 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
20043 /* Check that this instruction is supported for this CPU. */
20044 if (!opcode
->tvariant
20045 || (thumb_mode
== 1
20046 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
20048 if (opcode
->tencode
== do_t_swi
)
20049 as_bad (_("SVC is not permitted on this architecture"));
20051 as_bad (_("selected processor does not support `%s' in Thumb mode"), str
);
20054 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
20055 && opcode
->tencode
!= do_t_branch
)
20057 as_bad (_("Thumb does not support conditional execution"));
20061 /* Two things are addressed here:
20062 1) Implicit require narrow instructions on Thumb-1.
20063 This avoids relaxation accidentally introducing Thumb-2
20065 2) Reject wide instructions in non Thumb-2 cores.
20067 Only instructions with narrow and wide variants need to be handled
20068 but selecting all non wide-only instructions is easier. */
20069 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
)
20070 && !t32_insn_ok (variant
, opcode
))
20072 if (inst
.size_req
== 0)
20074 else if (inst
.size_req
== 4)
20076 if (ARM_CPU_HAS_FEATURE (variant
, arm_ext_v8m
))
20077 as_bad (_("selected processor does not support 32bit wide "
20078 "variant of instruction `%s'"), str
);
20080 as_bad (_("selected processor does not support `%s' in "
20081 "Thumb-2 mode"), str
);
20086 inst
.instruction
= opcode
->tvalue
;
20088 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/TRUE
))
20090 /* Prepare the pred_insn_type for those encodings that don't set
20092 it_fsm_pre_encode ();
20094 opcode
->tencode ();
20096 it_fsm_post_encode ();
20099 if (!(inst
.error
|| inst
.relax
))
20101 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
20102 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
20103 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
20105 as_bad (_("cannot honor width suffix -- `%s'"), str
);
20110 /* Something has gone badly wrong if we try to relax a fixed size
20112 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
20114 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
20115 *opcode
->tvariant
);
20116 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
20117 set those bits when Thumb-2 32-bit instructions are seen. The impact
20118 of relaxable instructions will be considered later after we finish all
20120 if (ARM_FEATURE_CORE_EQUAL (cpu_variant
, arm_arch_any
))
20121 variant
= arm_arch_none
;
20123 variant
= cpu_variant
;
20124 if (inst
.size
== 4 && !t32_insn_ok (variant
, opcode
))
20125 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
20128 check_neon_suffixes
;
20132 mapping_state (MAP_THUMB
);
20135 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
20139 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
20140 is_bx
= (opcode
->aencode
== do_bx
);
20142 /* Check that this instruction is supported for this CPU. */
20143 if (!(is_bx
&& fix_v4bx
)
20144 && !(opcode
->avariant
&&
20145 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
20147 as_bad (_("selected processor does not support `%s' in ARM mode"), str
);
20152 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
20156 inst
.instruction
= opcode
->avalue
;
20157 if (opcode
->tag
== OT_unconditionalF
)
20158 inst
.instruction
|= 0xFU
<< 28;
20160 inst
.instruction
|= inst
.cond
<< 28;
20161 inst
.size
= INSN_SIZE
;
20162 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/FALSE
))
20164 it_fsm_pre_encode ();
20165 opcode
->aencode ();
20166 it_fsm_post_encode ();
20168 /* Arm mode bx is marked as both v4T and v5 because it's still required
20169 on a hypothetical non-thumb v5 core. */
20171 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
20173 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
20174 *opcode
->avariant
);
20176 check_neon_suffixes
;
20180 mapping_state (MAP_ARM
);
20185 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
20193 check_pred_blocks_finished (void)
20198 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
20199 if (seg_info (sect
)->tc_segment_info_data
.current_pred
.state
20200 == MANUAL_PRED_BLOCK
)
20202 if (now_pred
.type
== SCALAR_PRED
)
20203 as_warn (_("section '%s' finished with an open IT block."),
20206 as_warn (_("section '%s' finished with an open VPT/VPST block."),
20210 if (now_pred
.state
== MANUAL_PRED_BLOCK
)
20212 if (now_pred
.type
== SCALAR_PRED
)
20213 as_warn (_("file finished with an open IT block."));
20215 as_warn (_("file finished with an open VPT/VPST block."));
20220 /* Various frobbings of labels and their addresses. */
20223 arm_start_line_hook (void)
20225 last_label_seen
= NULL
;
20229 arm_frob_label (symbolS
* sym
)
20231 last_label_seen
= sym
;
20233 ARM_SET_THUMB (sym
, thumb_mode
);
20235 #if defined OBJ_COFF || defined OBJ_ELF
20236 ARM_SET_INTERWORK (sym
, support_interwork
);
20239 force_automatic_it_block_close ();
20241 /* Note - do not allow local symbols (.Lxxx) to be labelled
20242 as Thumb functions. This is because these labels, whilst
20243 they exist inside Thumb code, are not the entry points for
20244 possible ARM->Thumb calls. Also, these labels can be used
20245 as part of a computed goto or switch statement. eg gcc
20246 can generate code that looks like this:
20248 ldr r2, [pc, .Laaa]
20258 The first instruction loads the address of the jump table.
20259 The second instruction converts a table index into a byte offset.
20260 The third instruction gets the jump address out of the table.
20261 The fourth instruction performs the jump.
20263 If the address stored at .Laaa is that of a symbol which has the
20264 Thumb_Func bit set, then the linker will arrange for this address
20265 to have the bottom bit set, which in turn would mean that the
20266 address computation performed by the third instruction would end
20267 up with the bottom bit set. Since the ARM is capable of unaligned
20268 word loads, the instruction would then load the incorrect address
20269 out of the jump table, and chaos would ensue. */
20270 if (label_is_thumb_function_name
20271 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
20272 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
20274 /* When the address of a Thumb function is taken the bottom
20275 bit of that address should be set. This will allow
20276 interworking between Arm and Thumb functions to work
20279 THUMB_SET_FUNC (sym
, 1);
20281 label_is_thumb_function_name
= FALSE
;
20284 dwarf2_emit_label (sym
);
20288 arm_data_in_code (void)
20290 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
20292 *input_line_pointer
= '/';
20293 input_line_pointer
+= 5;
20294 *input_line_pointer
= 0;
20302 arm_canonicalize_symbol_name (char * name
)
20306 if (thumb_mode
&& (len
= strlen (name
)) > 5
20307 && streq (name
+ len
- 5, "/data"))
20308 *(name
+ len
- 5) = 0;
20313 /* Table of all register names defined by default. The user can
20314 define additional names with .req. Note that all register names
20315 should appear in both upper and lowercase variants. Some registers
20316 also have mixed-case names. */
20318 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
20319 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
20320 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
20321 #define REGSET(p,t) \
20322 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
20323 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
20324 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
20325 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
20326 #define REGSETH(p,t) \
20327 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
20328 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
20329 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
20330 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
20331 #define REGSET2(p,t) \
20332 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
20333 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
20334 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
20335 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
20336 #define SPLRBANK(base,bank,t) \
20337 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
20338 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
20339 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
20340 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
20341 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
20342 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
20344 static const struct reg_entry reg_names
[] =
20346 /* ARM integer registers. */
20347 REGSET(r
, RN
), REGSET(R
, RN
),
20349 /* ATPCS synonyms. */
20350 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
20351 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
20352 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
20354 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
20355 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
20356 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
20358 /* Well-known aliases. */
20359 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
20360 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
20362 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
20363 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
20365 /* Coprocessor numbers. */
20366 REGSET(p
, CP
), REGSET(P
, CP
),
20368 /* Coprocessor register numbers. The "cr" variants are for backward
20370 REGSET(c
, CN
), REGSET(C
, CN
),
20371 REGSET(cr
, CN
), REGSET(CR
, CN
),
20373 /* ARM banked registers. */
20374 REGDEF(R8_usr
,512|(0<<16),RNB
), REGDEF(r8_usr
,512|(0<<16),RNB
),
20375 REGDEF(R9_usr
,512|(1<<16),RNB
), REGDEF(r9_usr
,512|(1<<16),RNB
),
20376 REGDEF(R10_usr
,512|(2<<16),RNB
), REGDEF(r10_usr
,512|(2<<16),RNB
),
20377 REGDEF(R11_usr
,512|(3<<16),RNB
), REGDEF(r11_usr
,512|(3<<16),RNB
),
20378 REGDEF(R12_usr
,512|(4<<16),RNB
), REGDEF(r12_usr
,512|(4<<16),RNB
),
20379 REGDEF(SP_usr
,512|(5<<16),RNB
), REGDEF(sp_usr
,512|(5<<16),RNB
),
20380 REGDEF(LR_usr
,512|(6<<16),RNB
), REGDEF(lr_usr
,512|(6<<16),RNB
),
20382 REGDEF(R8_fiq
,512|(8<<16),RNB
), REGDEF(r8_fiq
,512|(8<<16),RNB
),
20383 REGDEF(R9_fiq
,512|(9<<16),RNB
), REGDEF(r9_fiq
,512|(9<<16),RNB
),
20384 REGDEF(R10_fiq
,512|(10<<16),RNB
), REGDEF(r10_fiq
,512|(10<<16),RNB
),
20385 REGDEF(R11_fiq
,512|(11<<16),RNB
), REGDEF(r11_fiq
,512|(11<<16),RNB
),
20386 REGDEF(R12_fiq
,512|(12<<16),RNB
), REGDEF(r12_fiq
,512|(12<<16),RNB
),
20387 REGDEF(SP_fiq
,512|(13<<16),RNB
), REGDEF(sp_fiq
,512|(13<<16),RNB
),
20388 REGDEF(LR_fiq
,512|(14<<16),RNB
), REGDEF(lr_fiq
,512|(14<<16),RNB
),
20389 REGDEF(SPSR_fiq
,512|(14<<16)|SPSR_BIT
,RNB
), REGDEF(spsr_fiq
,512|(14<<16)|SPSR_BIT
,RNB
),
20391 SPLRBANK(0,IRQ
,RNB
), SPLRBANK(0,irq
,RNB
),
20392 SPLRBANK(2,SVC
,RNB
), SPLRBANK(2,svc
,RNB
),
20393 SPLRBANK(4,ABT
,RNB
), SPLRBANK(4,abt
,RNB
),
20394 SPLRBANK(6,UND
,RNB
), SPLRBANK(6,und
,RNB
),
20395 SPLRBANK(12,MON
,RNB
), SPLRBANK(12,mon
,RNB
),
20396 REGDEF(elr_hyp
,768|(14<<16),RNB
), REGDEF(ELR_hyp
,768|(14<<16),RNB
),
20397 REGDEF(sp_hyp
,768|(15<<16),RNB
), REGDEF(SP_hyp
,768|(15<<16),RNB
),
20398 REGDEF(spsr_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
20399 REGDEF(SPSR_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
20401 /* FPA registers. */
20402 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
20403 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
20405 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
20406 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
20408 /* VFP SP registers. */
20409 REGSET(s
,VFS
), REGSET(S
,VFS
),
20410 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
20412 /* VFP DP Registers. */
20413 REGSET(d
,VFD
), REGSET(D
,VFD
),
20414 /* Extra Neon DP registers. */
20415 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
20417 /* Neon QP registers. */
20418 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
20420 /* VFP control registers. */
20421 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
20422 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
20423 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
20424 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
20425 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
20426 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
20427 REGDEF(mvfr2
,5,VFC
), REGDEF(MVFR2
,5,VFC
),
20429 /* Maverick DSP coprocessor registers. */
20430 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
20431 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
20433 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
20434 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
20435 REGDEF(dspsc
,0,DSPSC
),
20437 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
20438 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
20439 REGDEF(DSPSC
,0,DSPSC
),
20441 /* iWMMXt data registers - p0, c0-15. */
20442 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
20444 /* iWMMXt control registers - p1, c0-3. */
20445 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
20446 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
20447 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
20448 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
20450 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
20451 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
20452 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
20453 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
20454 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
20456 /* XScale accumulator registers. */
20457 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
20463 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
20464 within psr_required_here. */
20465 static const struct asm_psr psrs
[] =
20467 /* Backward compatibility notation. Note that "all" is no longer
20468 truly all possible PSR bits. */
20469 {"all", PSR_c
| PSR_f
},
20473 /* Individual flags. */
20479 /* Combinations of flags. */
20480 {"fs", PSR_f
| PSR_s
},
20481 {"fx", PSR_f
| PSR_x
},
20482 {"fc", PSR_f
| PSR_c
},
20483 {"sf", PSR_s
| PSR_f
},
20484 {"sx", PSR_s
| PSR_x
},
20485 {"sc", PSR_s
| PSR_c
},
20486 {"xf", PSR_x
| PSR_f
},
20487 {"xs", PSR_x
| PSR_s
},
20488 {"xc", PSR_x
| PSR_c
},
20489 {"cf", PSR_c
| PSR_f
},
20490 {"cs", PSR_c
| PSR_s
},
20491 {"cx", PSR_c
| PSR_x
},
20492 {"fsx", PSR_f
| PSR_s
| PSR_x
},
20493 {"fsc", PSR_f
| PSR_s
| PSR_c
},
20494 {"fxs", PSR_f
| PSR_x
| PSR_s
},
20495 {"fxc", PSR_f
| PSR_x
| PSR_c
},
20496 {"fcs", PSR_f
| PSR_c
| PSR_s
},
20497 {"fcx", PSR_f
| PSR_c
| PSR_x
},
20498 {"sfx", PSR_s
| PSR_f
| PSR_x
},
20499 {"sfc", PSR_s
| PSR_f
| PSR_c
},
20500 {"sxf", PSR_s
| PSR_x
| PSR_f
},
20501 {"sxc", PSR_s
| PSR_x
| PSR_c
},
20502 {"scf", PSR_s
| PSR_c
| PSR_f
},
20503 {"scx", PSR_s
| PSR_c
| PSR_x
},
20504 {"xfs", PSR_x
| PSR_f
| PSR_s
},
20505 {"xfc", PSR_x
| PSR_f
| PSR_c
},
20506 {"xsf", PSR_x
| PSR_s
| PSR_f
},
20507 {"xsc", PSR_x
| PSR_s
| PSR_c
},
20508 {"xcf", PSR_x
| PSR_c
| PSR_f
},
20509 {"xcs", PSR_x
| PSR_c
| PSR_s
},
20510 {"cfs", PSR_c
| PSR_f
| PSR_s
},
20511 {"cfx", PSR_c
| PSR_f
| PSR_x
},
20512 {"csf", PSR_c
| PSR_s
| PSR_f
},
20513 {"csx", PSR_c
| PSR_s
| PSR_x
},
20514 {"cxf", PSR_c
| PSR_x
| PSR_f
},
20515 {"cxs", PSR_c
| PSR_x
| PSR_s
},
20516 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
20517 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
20518 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
20519 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
20520 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
20521 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
20522 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
20523 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
20524 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
20525 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
20526 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
20527 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
20528 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
20529 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
20530 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
20531 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
20532 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
20533 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
20534 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
20535 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
20536 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
20537 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
20538 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
20539 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
20542 /* Table of V7M psr names. */
20543 static const struct asm_psr v7m_psrs
[] =
20545 {"apsr", 0x0 }, {"APSR", 0x0 },
20546 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
20547 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
20548 {"psr", 0x3 }, {"PSR", 0x3 },
20549 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
20550 {"ipsr", 0x5 }, {"IPSR", 0x5 },
20551 {"epsr", 0x6 }, {"EPSR", 0x6 },
20552 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
20553 {"msp", 0x8 }, {"MSP", 0x8 },
20554 {"psp", 0x9 }, {"PSP", 0x9 },
20555 {"msplim", 0xa }, {"MSPLIM", 0xa },
20556 {"psplim", 0xb }, {"PSPLIM", 0xb },
20557 {"primask", 0x10}, {"PRIMASK", 0x10},
20558 {"basepri", 0x11}, {"BASEPRI", 0x11},
20559 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
20560 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
20561 {"control", 0x14}, {"CONTROL", 0x14},
20562 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
20563 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
20564 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
20565 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
20566 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
20567 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
20568 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
20569 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
20570 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
20573 /* Table of all shift-in-operand names. */
20574 static const struct asm_shift_name shift_names
[] =
20576 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
20577 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
20578 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
20579 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
20580 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
20581 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
20584 /* Table of all explicit relocation names. */
20586 static struct reloc_entry reloc_names
[] =
20588 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
20589 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
20590 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
20591 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
20592 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
20593 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
20594 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
20595 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
20596 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
20597 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
20598 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
},
20599 { "got_prel", BFD_RELOC_ARM_GOT_PREL
}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL
},
20600 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC
},
20601 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC
},
20602 { "tlscall", BFD_RELOC_ARM_TLS_CALL
},
20603 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL
},
20604 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ
},
20605 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ
},
20606 { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC
},
20607 { "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC
},
20608 { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC
},
20609 { "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC
},
20610 { "funcdesc", BFD_RELOC_ARM_FUNCDESC
},
20611 { "FUNCDESC", BFD_RELOC_ARM_FUNCDESC
},
20612 { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC
}, { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC
},
20613 { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC
}, { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC
},
20614 { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC
}, { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC
},
20618 /* Table of all conditional affixes. */
20619 static const struct asm_cond conds
[] =
20623 {"cs", 0x2}, {"hs", 0x2},
20624 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
20637 static const struct asm_cond vconds
[] =
20643 #define UL_BARRIER(L,U,CODE,FEAT) \
20644 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
20645 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
20647 static struct asm_barrier_opt barrier_opt_names
[] =
20649 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER
),
20650 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER
),
20651 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8
),
20652 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER
),
20653 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER
),
20654 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER
),
20655 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER
),
20656 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8
),
20657 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER
),
20658 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER
),
20659 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER
),
20660 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER
),
20661 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8
),
20662 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER
),
20663 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER
),
20664 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8
)
20669 /* Table of ARM-format instructions. */
20671 /* Macros for gluing together operand strings. N.B. In all cases
20672 other than OPS0, the trailing OP_stop comes from default
20673 zero-initialization of the unspecified elements of the array. */
20674 #define OPS0() { OP_stop, }
20675 #define OPS1(a) { OP_##a, }
20676 #define OPS2(a,b) { OP_##a,OP_##b, }
20677 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
20678 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
20679 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
20680 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
20682 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
20683 This is useful when mixing operands for ARM and THUMB, i.e. using the
20684 MIX_ARM_THUMB_OPERANDS macro.
20685 In order to use these macros, prefix the number of operands with _
20687 #define OPS_1(a) { a, }
20688 #define OPS_2(a,b) { a,b, }
20689 #define OPS_3(a,b,c) { a,b,c, }
20690 #define OPS_4(a,b,c,d) { a,b,c,d, }
20691 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
20692 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
20694 /* These macros abstract out the exact format of the mnemonic table and
20695 save some repeated characters. */
20697 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
20698 #define TxCE(mnem, op, top, nops, ops, ae, te) \
20699 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
20700 THUMB_VARIANT, do_##ae, do_##te, 0 }
20702 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
20703 a T_MNEM_xyz enumerator. */
20704 #define TCE(mnem, aop, top, nops, ops, ae, te) \
20705 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
20706 #define tCE(mnem, aop, top, nops, ops, ae, te) \
20707 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
20709 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
20710 infix after the third character. */
20711 #define TxC3(mnem, op, top, nops, ops, ae, te) \
20712 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
20713 THUMB_VARIANT, do_##ae, do_##te, 0 }
20714 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
20715 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
20716 THUMB_VARIANT, do_##ae, do_##te, 0 }
20717 #define TC3(mnem, aop, top, nops, ops, ae, te) \
20718 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
20719 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
20720 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
20721 #define tC3(mnem, aop, top, nops, ops, ae, te) \
20722 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
20723 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
20724 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
20726 /* Mnemonic that cannot be conditionalized. The ARM condition-code
20727 field is still 0xE. Many of the Thumb variants can be executed
20728 conditionally, so this is checked separately. */
20729 #define TUE(mnem, op, top, nops, ops, ae, te) \
20730 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
20731 THUMB_VARIANT, do_##ae, do_##te, 0 }
20733 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
20734 Used by mnemonics that have very minimal differences in the encoding for
20735 ARM and Thumb variants and can be handled in a common function. */
20736 #define TUEc(mnem, op, top, nops, ops, en) \
20737 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
20738 THUMB_VARIANT, do_##en, do_##en, 0 }
20740 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
20741 condition code field. */
20742 #define TUF(mnem, op, top, nops, ops, ae, te) \
20743 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
20744 THUMB_VARIANT, do_##ae, do_##te, 0 }
20746 /* ARM-only variants of all the above. */
20747 #define CE(mnem, op, nops, ops, ae) \
20748 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20750 #define C3(mnem, op, nops, ops, ae) \
20751 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20753 /* Thumb-only variants of TCE and TUE. */
20754 #define ToC(mnem, top, nops, ops, te) \
20755 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
20758 #define ToU(mnem, top, nops, ops, te) \
20759 { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
20762 /* T_MNEM_xyz enumerator variants of ToC. */
20763 #define toC(mnem, top, nops, ops, te) \
20764 { mnem, OPS##nops ops, OT_csuffix, 0x0, T_MNEM##top, 0, THUMB_VARIANT, NULL, \
20767 /* T_MNEM_xyz enumerator variants of ToU. */
20768 #define toU(mnem, top, nops, ops, te) \
20769 { mnem, OPS##nops ops, OT_unconditional, 0x0, T_MNEM##top, 0, THUMB_VARIANT, \
20772 /* Legacy mnemonics that always have conditional infix after the third
20774 #define CL(mnem, op, nops, ops, ae) \
20775 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
20776 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20778 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
20779 #define cCE(mnem, op, nops, ops, ae) \
20780 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
20782 /* Legacy coprocessor instructions where conditional infix and conditional
20783 suffix are ambiguous. For consistency this includes all FPA instructions,
20784 not just the potentially ambiguous ones. */
20785 #define cCL(mnem, op, nops, ops, ae) \
20786 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
20787 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
20789 /* Coprocessor, takes either a suffix or a position-3 infix
20790 (for an FPA corner case). */
20791 #define C3E(mnem, op, nops, ops, ae) \
20792 { mnem, OPS##nops ops, OT_csuf_or_in3, \
20793 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
20795 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
20796 { m1 #m2 m3, OPS##nops ops, \
20797 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
20798 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20800 #define CM(m1, m2, op, nops, ops, ae) \
20801 xCM_ (m1, , m2, op, nops, ops, ae), \
20802 xCM_ (m1, eq, m2, op, nops, ops, ae), \
20803 xCM_ (m1, ne, m2, op, nops, ops, ae), \
20804 xCM_ (m1, cs, m2, op, nops, ops, ae), \
20805 xCM_ (m1, hs, m2, op, nops, ops, ae), \
20806 xCM_ (m1, cc, m2, op, nops, ops, ae), \
20807 xCM_ (m1, ul, m2, op, nops, ops, ae), \
20808 xCM_ (m1, lo, m2, op, nops, ops, ae), \
20809 xCM_ (m1, mi, m2, op, nops, ops, ae), \
20810 xCM_ (m1, pl, m2, op, nops, ops, ae), \
20811 xCM_ (m1, vs, m2, op, nops, ops, ae), \
20812 xCM_ (m1, vc, m2, op, nops, ops, ae), \
20813 xCM_ (m1, hi, m2, op, nops, ops, ae), \
20814 xCM_ (m1, ls, m2, op, nops, ops, ae), \
20815 xCM_ (m1, ge, m2, op, nops, ops, ae), \
20816 xCM_ (m1, lt, m2, op, nops, ops, ae), \
20817 xCM_ (m1, gt, m2, op, nops, ops, ae), \
20818 xCM_ (m1, le, m2, op, nops, ops, ae), \
20819 xCM_ (m1, al, m2, op, nops, ops, ae)
20821 #define UE(mnem, op, nops, ops, ae) \
20822 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20824 #define UF(mnem, op, nops, ops, ae) \
20825 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20827 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
20828 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
20829 use the same encoding function for each. */
20830 #define NUF(mnem, op, nops, ops, enc) \
20831 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
20832 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
20834 /* Neon data processing, version which indirects through neon_enc_tab for
20835 the various overloaded versions of opcodes. */
20836 #define nUF(mnem, op, nops, ops, enc) \
20837 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
20838 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
20840 /* Neon insn with conditional suffix for the ARM version, non-overloaded
20842 #define NCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
20843 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
20844 THUMB_VARIANT, do_##enc, do_##enc, mve_p }
20846 #define NCE(mnem, op, nops, ops, enc) \
20847 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
20849 #define NCEF(mnem, op, nops, ops, enc) \
20850 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
20852 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
20853 #define nCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
20854 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
20855 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, mve_p }
20857 #define nCE(mnem, op, nops, ops, enc) \
20858 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
20860 #define nCEF(mnem, op, nops, ops, enc) \
20861 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
20864 #define mCEF(mnem, op, nops, ops, enc) \
20865 { #mnem, OPS##nops ops, OT_csuffixF, M_MNEM##op, M_MNEM##op, \
20866 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
20869 /* nCEF but for MVE predicated instructions. */
20870 #define mnCEF(mnem, op, nops, ops, enc) \
20871 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
20873 /* nCE but for MVE predicated instructions. */
20874 #define mnCE(mnem, op, nops, ops, enc) \
20875 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
20877 /* NUF but for potentially MVE predicated instructions. */
20878 #define MNUF(mnem, op, nops, ops, enc) \
20879 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
20880 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
20882 /* nUF but for potentially MVE predicated instructions. */
20883 #define mnUF(mnem, op, nops, ops, enc) \
20884 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
20885 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
20887 /* ToC but for potentially MVE predicated instructions. */
20888 #define mToC(mnem, top, nops, ops, te) \
20889 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
20892 /* NCE but for MVE predicated instructions. */
20893 #define MNCE(mnem, op, nops, ops, enc) \
20894 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
20896 /* NCEF but for MVE predicated instructions. */
20897 #define MNCEF(mnem, op, nops, ops, enc) \
20898 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
20901 static const struct asm_opcode insns
[] =
20903 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
20904 #define THUMB_VARIANT & arm_ext_v4t
20905 tCE("and", 0000000, _and
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20906 tC3("ands", 0100000, _ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20907 tCE("eor", 0200000, _eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20908 tC3("eors", 0300000, _eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20909 tCE("sub", 0400000, _sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
20910 tC3("subs", 0500000, _subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
20911 tCE("add", 0800000, _add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
20912 tC3("adds", 0900000, _adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
20913 tCE("adc", 0a00000
, _adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20914 tC3("adcs", 0b00000, _adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20915 tCE("sbc", 0c00000
, _sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
20916 tC3("sbcs", 0d00000
, _sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
20917 tCE("orr", 1800000, _orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20918 tC3("orrs", 1900000, _orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20919 tCE("bic", 1c00000
, _bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
20920 tC3("bics", 1d00000
, _bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
20922 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
20923 for setting PSR flag bits. They are obsolete in V6 and do not
20924 have Thumb equivalents. */
20925 tCE("tst", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20926 tC3w("tsts", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20927 CL("tstp", 110f000
, 2, (RR
, SH
), cmp
),
20928 tCE("cmp", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
20929 tC3w("cmps", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
20930 CL("cmpp", 150f000
, 2, (RR
, SH
), cmp
),
20931 tCE("cmn", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20932 tC3w("cmns", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20933 CL("cmnp", 170f000
, 2, (RR
, SH
), cmp
),
20935 tCE("mov", 1a00000
, _mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
20936 tC3("movs", 1b00000
, _movs
, 2, (RR
, SHG
), mov
, t_mov_cmp
),
20937 tCE("mvn", 1e00000
, _mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
20938 tC3("mvns", 1f00000
, _mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
20940 tCE("ldr", 4100000, _ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
20941 tC3("ldrb", 4500000, _ldrb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
20942 tCE("str", 4000000, _str
, _2
, (MIX_ARM_THUMB_OPERANDS (OP_RR
,
20944 OP_ADDRGLDR
),ldst
, t_ldst
),
20945 tC3("strb", 4400000, _strb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
20947 tCE("stm", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20948 tC3("stmia", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20949 tC3("stmea", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20950 tCE("ldm", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20951 tC3("ldmia", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20952 tC3("ldmfd", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20954 tCE("b", a000000
, _b
, 1, (EXPr
), branch
, t_branch
),
20955 TCE("bl", b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
20958 tCE("adr", 28f0000
, _adr
, 2, (RR
, EXP
), adr
, t_adr
),
20959 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
20960 tCE("nop", 1a00000
, _nop
, 1, (oI255c
), nop
, t_nop
),
20961 tCE("udf", 7f000f0
, _udf
, 1, (oIffffb
), bkpt
, t_udf
),
20963 /* Thumb-compatibility pseudo ops. */
20964 tCE("lsl", 1a00000
, _lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20965 tC3("lsls", 1b00000
, _lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20966 tCE("lsr", 1a00020
, _lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20967 tC3("lsrs", 1b00020
, _lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20968 tCE("asr", 1a00040
, _asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20969 tC3("asrs", 1b00040
, _asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20970 tCE("ror", 1a00060
, _ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20971 tC3("rors", 1b00060
, _rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20972 tCE("neg", 2600000, _neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
20973 tC3("negs", 2700000, _negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
20974 tCE("push", 92d0000
, _push
, 1, (REGLST
), push_pop
, t_push_pop
),
20975 tCE("pop", 8bd0000
, _pop
, 1, (REGLST
), push_pop
, t_push_pop
),
20977 /* These may simplify to neg. */
20978 TCE("rsb", 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
20979 TC3("rsbs", 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
20981 #undef THUMB_VARIANT
20982 #define THUMB_VARIANT & arm_ext_os
20984 TCE("swi", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
20985 TCE("svc", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
20987 #undef THUMB_VARIANT
20988 #define THUMB_VARIANT & arm_ext_v6
20990 TCE("cpy", 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
20992 /* V1 instructions with no Thumb analogue prior to V6T2. */
20993 #undef THUMB_VARIANT
20994 #define THUMB_VARIANT & arm_ext_v6t2
20996 TCE("teq", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20997 TC3w("teqs", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20998 CL("teqp", 130f000
, 2, (RR
, SH
), cmp
),
21000 TC3("ldrt", 4300000, f8500e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
21001 TC3("ldrbt", 4700000, f8100e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
21002 TC3("strt", 4200000, f8400e00
, 2, (RR_npcsp
, ADDR
), ldstt
, t_ldstt
),
21003 TC3("strbt", 4600000, f8000e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
21005 TC3("stmdb", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
21006 TC3("stmfd", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
21008 TC3("ldmdb", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
21009 TC3("ldmea", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
21011 /* V1 instructions with no Thumb analogue at all. */
21012 CE("rsc", 0e00000
, 3, (RR
, oRR
, SH
), arit
),
21013 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
21015 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
21016 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
21017 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
21018 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
21019 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
21020 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
21021 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
21022 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
21025 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
21026 #undef THUMB_VARIANT
21027 #define THUMB_VARIANT & arm_ext_v4t
21029 tCE("mul", 0000090, _mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
21030 tC3("muls", 0100090, _muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
21032 #undef THUMB_VARIANT
21033 #define THUMB_VARIANT & arm_ext_v6t2
21035 TCE("mla", 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
21036 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
21038 /* Generic coprocessor instructions. */
21039 TCE("cdp", e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
21040 TCE("ldc", c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21041 TC3("ldcl", c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21042 TCE("stc", c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21043 TC3("stcl", c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21044 TCE("mcr", e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
21045 TCE("mrc", e100010
, ee100010
, 6, (RCP
, I7b
, APSR_RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
21048 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
21050 CE("swp", 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
21051 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
21054 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
21055 #undef THUMB_VARIANT
21056 #define THUMB_VARIANT & arm_ext_msr
21058 TCE("mrs", 1000000, f3e08000
, 2, (RRnpc
, rPSR
), mrs
, t_mrs
),
21059 TCE("msr", 120f000
, f3808000
, 2, (wPSR
, RR_EXi
), msr
, t_msr
),
21062 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
21063 #undef THUMB_VARIANT
21064 #define THUMB_VARIANT & arm_ext_v6t2
21066 TCE("smull", 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
21067 CM("smull","s", 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
21068 TCE("umull", 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
21069 CM("umull","s", 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
21070 TCE("smlal", 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
21071 CM("smlal","s", 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
21072 TCE("umlal", 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
21073 CM("umlal","s", 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
21076 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
21077 #undef THUMB_VARIANT
21078 #define THUMB_VARIANT & arm_ext_v4t
21080 tC3("ldrh", 01000b0
, _ldrh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
21081 tC3("strh", 00000b0
, _strh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
21082 tC3("ldrsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
21083 tC3("ldrsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
21084 tC3("ldsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
21085 tC3("ldsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
21088 #define ARM_VARIANT & arm_ext_v4t_5
21090 /* ARM Architecture 4T. */
21091 /* Note: bx (and blx) are required on V5, even if the processor does
21092 not support Thumb. */
21093 TCE("bx", 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
21096 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
21097 #undef THUMB_VARIANT
21098 #define THUMB_VARIANT & arm_ext_v5t
21100 /* Note: blx has 2 variants; the .value coded here is for
21101 BLX(2). Only this variant has conditional execution. */
21102 TCE("blx", 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
21103 TUE("bkpt", 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
21105 #undef THUMB_VARIANT
21106 #define THUMB_VARIANT & arm_ext_v6t2
21108 TCE("clz", 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
21109 TUF("ldc2", c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21110 TUF("ldc2l", c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21111 TUF("stc2", c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21112 TUF("stc2l", c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21113 TUF("cdp2", e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
21114 TUF("mcr2", e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
21115 TUF("mrc2", e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
21118 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
21119 #undef THUMB_VARIANT
21120 #define THUMB_VARIANT & arm_ext_v5exp
21122 TCE("smlabb", 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
21123 TCE("smlatb", 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
21124 TCE("smlabt", 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
21125 TCE("smlatt", 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
21127 TCE("smlawb", 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
21128 TCE("smlawt", 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
21130 TCE("smlalbb", 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
21131 TCE("smlaltb", 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
21132 TCE("smlalbt", 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
21133 TCE("smlaltt", 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
21135 TCE("smulbb", 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21136 TCE("smultb", 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21137 TCE("smulbt", 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21138 TCE("smultt", 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21140 TCE("smulwb", 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21141 TCE("smulwt", 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21143 TCE("qadd", 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
21144 TCE("qdadd", 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
21145 TCE("qsub", 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
21146 TCE("qdsub", 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
21149 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
21150 #undef THUMB_VARIANT
21151 #define THUMB_VARIANT & arm_ext_v6t2
21153 TUF("pld", 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
21154 TC3("ldrd", 00000d0
, e8500000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, ADDRGLDRS
),
21156 TC3("strd", 00000f0
, e8400000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
,
21157 ADDRGLDRS
), ldrd
, t_ldstd
),
21159 TCE("mcrr", c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
21160 TCE("mrrc", c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
21163 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
21165 TCE("bxj", 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
21168 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
21169 #undef THUMB_VARIANT
21170 #define THUMB_VARIANT & arm_ext_v6
21172 TUF("cpsie", 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
21173 TUF("cpsid", 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
21174 tCE("rev", 6bf0f30
, _rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
21175 tCE("rev16", 6bf0fb0
, _rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
21176 tCE("revsh", 6ff0fb0
, _revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
21177 tCE("sxth", 6bf0070
, _sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21178 tCE("uxth", 6ff0070
, _uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21179 tCE("sxtb", 6af0070
, _sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21180 tCE("uxtb", 6ef0070
, _uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21181 TUF("setend", 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
21183 #undef THUMB_VARIANT
21184 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21186 TCE("ldrex", 1900f9f
, e8500f00
, 2, (RRnpc_npcsp
, ADDR
), ldrex
, t_ldrex
),
21187 TCE("strex", 1800f90
, e8400000
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
21189 #undef THUMB_VARIANT
21190 #define THUMB_VARIANT & arm_ext_v6t2
21192 TUF("mcrr2", c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
21193 TUF("mrrc2", c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
21195 TCE("ssat", 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
21196 TCE("usat", 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
21198 /* ARM V6 not included in V7M. */
21199 #undef THUMB_VARIANT
21200 #define THUMB_VARIANT & arm_ext_v6_notm
21201 TUF("rfeia", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
21202 TUF("rfe", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
21203 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
21204 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
21205 TUF("rfedb", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
21206 TUF("rfefd", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
21207 UF(rfefa
, 8100a00
, 1, (RRw
), rfe
),
21208 TUF("rfeea", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
21209 UF(rfeed
, 9900a00
, 1, (RRw
), rfe
),
21210 TUF("srsia", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
21211 TUF("srs", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
21212 TUF("srsea", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
21213 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
21214 UF(srsfa
, 9c00500
, 2, (oRRw
, I31w
), srs
),
21215 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
21216 UF(srsed
, 8400500, 2, (oRRw
, I31w
), srs
),
21217 TUF("srsdb", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
21218 TUF("srsfd", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
21219 TUF("cps", 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
21221 /* ARM V6 not included in V7M (eg. integer SIMD). */
21222 #undef THUMB_VARIANT
21223 #define THUMB_VARIANT & arm_ext_v6_dsp
21224 TCE("pkhbt", 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
21225 TCE("pkhtb", 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
21226 TCE("qadd16", 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21227 TCE("qadd8", 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21228 TCE("qasx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21229 /* Old name for QASX. */
21230 TCE("qaddsubx",6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21231 TCE("qsax", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21232 /* Old name for QSAX. */
21233 TCE("qsubaddx",6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21234 TCE("qsub16", 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21235 TCE("qsub8", 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21236 TCE("sadd16", 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21237 TCE("sadd8", 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21238 TCE("sasx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21239 /* Old name for SASX. */
21240 TCE("saddsubx",6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21241 TCE("shadd16", 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21242 TCE("shadd8", 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21243 TCE("shasx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21244 /* Old name for SHASX. */
21245 TCE("shaddsubx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21246 TCE("shsax", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21247 /* Old name for SHSAX. */
21248 TCE("shsubaddx", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21249 TCE("shsub16", 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21250 TCE("shsub8", 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21251 TCE("ssax", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21252 /* Old name for SSAX. */
21253 TCE("ssubaddx",6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21254 TCE("ssub16", 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21255 TCE("ssub8", 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21256 TCE("uadd16", 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21257 TCE("uadd8", 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21258 TCE("uasx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21259 /* Old name for UASX. */
21260 TCE("uaddsubx",6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21261 TCE("uhadd16", 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21262 TCE("uhadd8", 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21263 TCE("uhasx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21264 /* Old name for UHASX. */
21265 TCE("uhaddsubx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21266 TCE("uhsax", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21267 /* Old name for UHSAX. */
21268 TCE("uhsubaddx", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21269 TCE("uhsub16", 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21270 TCE("uhsub8", 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21271 TCE("uqadd16", 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21272 TCE("uqadd8", 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21273 TCE("uqasx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21274 /* Old name for UQASX. */
21275 TCE("uqaddsubx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21276 TCE("uqsax", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21277 /* Old name for UQSAX. */
21278 TCE("uqsubaddx", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21279 TCE("uqsub16", 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21280 TCE("uqsub8", 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21281 TCE("usub16", 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21282 TCE("usax", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21283 /* Old name for USAX. */
21284 TCE("usubaddx",6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21285 TCE("usub8", 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21286 TCE("sxtah", 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21287 TCE("sxtab16", 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21288 TCE("sxtab", 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21289 TCE("sxtb16", 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21290 TCE("uxtah", 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21291 TCE("uxtab16", 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21292 TCE("uxtab", 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21293 TCE("uxtb16", 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21294 TCE("sel", 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21295 TCE("smlad", 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21296 TCE("smladx", 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21297 TCE("smlald", 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
21298 TCE("smlaldx", 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
21299 TCE("smlsd", 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21300 TCE("smlsdx", 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21301 TCE("smlsld", 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
21302 TCE("smlsldx", 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
21303 TCE("smmla", 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21304 TCE("smmlar", 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21305 TCE("smmls", 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21306 TCE("smmlsr", 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21307 TCE("smmul", 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21308 TCE("smmulr", 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21309 TCE("smuad", 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21310 TCE("smuadx", 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21311 TCE("smusd", 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21312 TCE("smusdx", 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21313 TCE("ssat16", 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
21314 TCE("umaal", 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
21315 TCE("usad8", 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21316 TCE("usada8", 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21317 TCE("usat16", 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
21320 #define ARM_VARIANT & arm_ext_v6k_v6t2
21321 #undef THUMB_VARIANT
21322 #define THUMB_VARIANT & arm_ext_v6k_v6t2
21324 tCE("yield", 320f001
, _yield
, 0, (), noargs
, t_hint
),
21325 tCE("wfe", 320f002
, _wfe
, 0, (), noargs
, t_hint
),
21326 tCE("wfi", 320f003
, _wfi
, 0, (), noargs
, t_hint
),
21327 tCE("sev", 320f004
, _sev
, 0, (), noargs
, t_hint
),
21329 #undef THUMB_VARIANT
21330 #define THUMB_VARIANT & arm_ext_v6_notm
21331 TCE("ldrexd", 1b00f9f
, e8d0007f
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, RRnpcb
),
21333 TCE("strexd", 1a00f90
, e8c00070
, 4, (RRnpc_npcsp
, RRnpc_npcsp
, oRRnpc_npcsp
,
21334 RRnpcb
), strexd
, t_strexd
),
21336 #undef THUMB_VARIANT
21337 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21338 TCE("ldrexb", 1d00f9f
, e8d00f4f
, 2, (RRnpc_npcsp
,RRnpcb
),
21340 TCE("ldrexh", 1f00f9f
, e8d00f5f
, 2, (RRnpc_npcsp
, RRnpcb
),
21342 TCE("strexb", 1c00f90
, e8c00f40
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
21344 TCE("strexh", 1e00f90
, e8c00f50
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
21346 TUF("clrex", 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
21349 #define ARM_VARIANT & arm_ext_sec
21350 #undef THUMB_VARIANT
21351 #define THUMB_VARIANT & arm_ext_sec
21353 TCE("smc", 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
21356 #define ARM_VARIANT & arm_ext_virt
21357 #undef THUMB_VARIANT
21358 #define THUMB_VARIANT & arm_ext_virt
21360 TCE("hvc", 1400070, f7e08000
, 1, (EXPi
), hvc
, t_hvc
),
21361 TCE("eret", 160006e
, f3de8f00
, 0, (), noargs
, noargs
),
21364 #define ARM_VARIANT & arm_ext_pan
21365 #undef THUMB_VARIANT
21366 #define THUMB_VARIANT & arm_ext_pan
21368 TUF("setpan", 1100000, b610
, 1, (I7
), setpan
, t_setpan
),
21371 #define ARM_VARIANT & arm_ext_v6t2
21372 #undef THUMB_VARIANT
21373 #define THUMB_VARIANT & arm_ext_v6t2
21375 TCE("bfc", 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
21376 TCE("bfi", 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
21377 TCE("sbfx", 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
21378 TCE("ubfx", 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
21380 TCE("mls", 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
21381 TCE("rbit", 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
21383 TC3("ldrht", 03000b0
, f8300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
21384 TC3("ldrsht", 03000f0
, f9300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
21385 TC3("ldrsbt", 03000d0
, f9100e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
21386 TC3("strht", 02000b0
, f8200e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
21389 #define ARM_VARIANT & arm_ext_v3
21390 #undef THUMB_VARIANT
21391 #define THUMB_VARIANT & arm_ext_v6t2
21393 TUE("csdb", 320f014
, f3af8014
, 0, (), noargs
, t_csdb
),
21394 TUF("ssbb", 57ff040
, f3bf8f40
, 0, (), noargs
, t_csdb
),
21395 TUF("pssbb", 57ff044
, f3bf8f44
, 0, (), noargs
, t_csdb
),
21398 #define ARM_VARIANT & arm_ext_v6t2
21399 #undef THUMB_VARIANT
21400 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21401 TCE("movw", 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
21402 TCE("movt", 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
21404 /* Thumb-only instructions. */
21406 #define ARM_VARIANT NULL
21407 TUE("cbnz", 0, b900
, 2, (RR
, EXP
), 0, t_cbz
),
21408 TUE("cbz", 0, b100
, 2, (RR
, EXP
), 0, t_cbz
),
21410 /* ARM does not really have an IT instruction, so always allow it.
21411 The opcode is copied from Thumb in order to allow warnings in
21412 -mimplicit-it=[never | arm] modes. */
21414 #define ARM_VARIANT & arm_ext_v1
21415 #undef THUMB_VARIANT
21416 #define THUMB_VARIANT & arm_ext_v6t2
21418 TUE("it", bf08
, bf08
, 1, (COND
), it
, t_it
),
21419 TUE("itt", bf0c
, bf0c
, 1, (COND
), it
, t_it
),
21420 TUE("ite", bf04
, bf04
, 1, (COND
), it
, t_it
),
21421 TUE("ittt", bf0e
, bf0e
, 1, (COND
), it
, t_it
),
21422 TUE("itet", bf06
, bf06
, 1, (COND
), it
, t_it
),
21423 TUE("itte", bf0a
, bf0a
, 1, (COND
), it
, t_it
),
21424 TUE("itee", bf02
, bf02
, 1, (COND
), it
, t_it
),
21425 TUE("itttt", bf0f
, bf0f
, 1, (COND
), it
, t_it
),
21426 TUE("itett", bf07
, bf07
, 1, (COND
), it
, t_it
),
21427 TUE("ittet", bf0b
, bf0b
, 1, (COND
), it
, t_it
),
21428 TUE("iteet", bf03
, bf03
, 1, (COND
), it
, t_it
),
21429 TUE("ittte", bf0d
, bf0d
, 1, (COND
), it
, t_it
),
21430 TUE("itete", bf05
, bf05
, 1, (COND
), it
, t_it
),
21431 TUE("ittee", bf09
, bf09
, 1, (COND
), it
, t_it
),
21432 TUE("iteee", bf01
, bf01
, 1, (COND
), it
, t_it
),
21433 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
21434 TC3("rrx", 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
21435 TC3("rrxs", 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
21437 /* Thumb2 only instructions. */
21439 #define ARM_VARIANT NULL
21441 TCE("addw", 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
21442 TCE("subw", 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
21443 TCE("orn", 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
21444 TCE("orns", 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
21445 TCE("tbb", 0, e8d0f000
, 1, (TB
), 0, t_tb
),
21446 TCE("tbh", 0, e8d0f010
, 1, (TB
), 0, t_tb
),
21448 /* Hardware division instructions. */
21450 #define ARM_VARIANT & arm_ext_adiv
21451 #undef THUMB_VARIANT
21452 #define THUMB_VARIANT & arm_ext_div
21454 TCE("sdiv", 710f010
, fb90f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
21455 TCE("udiv", 730f010
, fbb0f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
21457 /* ARM V6M/V7 instructions. */
21459 #define ARM_VARIANT & arm_ext_barrier
21460 #undef THUMB_VARIANT
21461 #define THUMB_VARIANT & arm_ext_barrier
21463 TUF("dmb", 57ff050
, f3bf8f50
, 1, (oBARRIER_I15
), barrier
, barrier
),
21464 TUF("dsb", 57ff040
, f3bf8f40
, 1, (oBARRIER_I15
), barrier
, barrier
),
21465 TUF("isb", 57ff060
, f3bf8f60
, 1, (oBARRIER_I15
), barrier
, barrier
),
21467 /* ARM V7 instructions. */
21469 #define ARM_VARIANT & arm_ext_v7
21470 #undef THUMB_VARIANT
21471 #define THUMB_VARIANT & arm_ext_v7
21473 TUF("pli", 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
21474 TCE("dbg", 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
21477 #define ARM_VARIANT & arm_ext_mp
21478 #undef THUMB_VARIANT
21479 #define THUMB_VARIANT & arm_ext_mp
21481 TUF("pldw", 410f000
, f830f000
, 1, (ADDR
), pld
, t_pld
),
21483 /* AArchv8 instructions. */
21485 #define ARM_VARIANT & arm_ext_v8
21487 /* Instructions shared between armv8-a and armv8-m. */
21488 #undef THUMB_VARIANT
21489 #define THUMB_VARIANT & arm_ext_atomics
21491 TCE("lda", 1900c9f
, e8d00faf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
21492 TCE("ldab", 1d00c9f
, e8d00f8f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
21493 TCE("ldah", 1f00c9f
, e8d00f9f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
21494 TCE("stl", 180fc90
, e8c00faf
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
21495 TCE("stlb", 1c0fc90
, e8c00f8f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
21496 TCE("stlh", 1e0fc90
, e8c00f9f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
21497 TCE("ldaex", 1900e9f
, e8d00fef
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
21498 TCE("ldaexb", 1d00e9f
, e8d00fcf
, 2, (RRnpc
,RRnpcb
), rd_rn
, rd_rn
),
21499 TCE("ldaexh", 1f00e9f
, e8d00fdf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
21500 TCE("stlex", 1800e90
, e8c00fe0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
21502 TCE("stlexb", 1c00e90
, e8c00fc0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
21504 TCE("stlexh", 1e00e90
, e8c00fd0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
21506 #undef THUMB_VARIANT
21507 #define THUMB_VARIANT & arm_ext_v8
21509 tCE("sevl", 320f005
, _sevl
, 0, (), noargs
, t_hint
),
21510 TCE("ldaexd", 1b00e9f
, e8d000ff
, 3, (RRnpc
, oRRnpc
, RRnpcb
),
21512 TCE("stlexd", 1a00e90
, e8c000f0
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
),
21515 /* Defined in V8 but is in undefined encoding space for earlier
21516 architectures. However earlier architectures are required to treat
21517 this instuction as a semihosting trap as well. Hence while not explicitly
21518 defined as such, it is in fact correct to define the instruction for all
21520 #undef THUMB_VARIANT
21521 #define THUMB_VARIANT & arm_ext_v1
21523 #define ARM_VARIANT & arm_ext_v1
21524 TUE("hlt", 1000070, ba80
, 1, (oIffffb
), bkpt
, t_hlt
),
21526 /* ARMv8 T32 only. */
21528 #define ARM_VARIANT NULL
21529 TUF("dcps1", 0, f78f8001
, 0, (), noargs
, noargs
),
21530 TUF("dcps2", 0, f78f8002
, 0, (), noargs
, noargs
),
21531 TUF("dcps3", 0, f78f8003
, 0, (), noargs
, noargs
),
21533 /* FP for ARMv8. */
21535 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
21536 #undef THUMB_VARIANT
21537 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
21539 nUF(vseleq
, _vseleq
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
21540 nUF(vselvs
, _vselvs
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
21541 nUF(vselge
, _vselge
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
21542 nUF(vselgt
, _vselgt
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
21543 nUF(vmaxnm
, _vmaxnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
21544 nUF(vminnm
, _vminnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
21545 nUF(vcvta
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvta
),
21546 nUF(vcvtn
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtn
),
21547 nUF(vcvtp
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtp
),
21548 nUF(vcvtm
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtm
),
21549 nCE(vrintr
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintr
),
21550 nCE(vrintz
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintz
),
21551 nCE(vrintx
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintx
),
21552 nUF(vrinta
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrinta
),
21553 nUF(vrintn
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintn
),
21554 nUF(vrintp
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintp
),
21555 nUF(vrintm
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintm
),
21557 /* Crypto v1 extensions. */
21559 #define ARM_VARIANT & fpu_crypto_ext_armv8
21560 #undef THUMB_VARIANT
21561 #define THUMB_VARIANT & fpu_crypto_ext_armv8
21563 nUF(aese
, _aes
, 2, (RNQ
, RNQ
), aese
),
21564 nUF(aesd
, _aes
, 2, (RNQ
, RNQ
), aesd
),
21565 nUF(aesmc
, _aes
, 2, (RNQ
, RNQ
), aesmc
),
21566 nUF(aesimc
, _aes
, 2, (RNQ
, RNQ
), aesimc
),
21567 nUF(sha1c
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1c
),
21568 nUF(sha1p
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1p
),
21569 nUF(sha1m
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1m
),
21570 nUF(sha1su0
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1su0
),
21571 nUF(sha256h
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h
),
21572 nUF(sha256h2
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h2
),
21573 nUF(sha256su1
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256su1
),
21574 nUF(sha1h
, _sha1h
, 2, (RNQ
, RNQ
), sha1h
),
21575 nUF(sha1su1
, _sha2op
, 2, (RNQ
, RNQ
), sha1su1
),
21576 nUF(sha256su0
, _sha2op
, 2, (RNQ
, RNQ
), sha256su0
),
21579 #define ARM_VARIANT & crc_ext_armv8
21580 #undef THUMB_VARIANT
21581 #define THUMB_VARIANT & crc_ext_armv8
21582 TUEc("crc32b", 1000040, fac0f080
, 3, (RR
, oRR
, RR
), crc32b
),
21583 TUEc("crc32h", 1200040, fac0f090
, 3, (RR
, oRR
, RR
), crc32h
),
21584 TUEc("crc32w", 1400040, fac0f0a0
, 3, (RR
, oRR
, RR
), crc32w
),
21585 TUEc("crc32cb",1000240, fad0f080
, 3, (RR
, oRR
, RR
), crc32cb
),
21586 TUEc("crc32ch",1200240, fad0f090
, 3, (RR
, oRR
, RR
), crc32ch
),
21587 TUEc("crc32cw",1400240, fad0f0a0
, 3, (RR
, oRR
, RR
), crc32cw
),
21589 /* ARMv8.2 RAS extension. */
21591 #define ARM_VARIANT & arm_ext_ras
21592 #undef THUMB_VARIANT
21593 #define THUMB_VARIANT & arm_ext_ras
21594 TUE ("esb", 320f010
, f3af8010
, 0, (), noargs
, noargs
),
21597 #define ARM_VARIANT & arm_ext_v8_3
21598 #undef THUMB_VARIANT
21599 #define THUMB_VARIANT & arm_ext_v8_3
21600 NCE (vjcvt
, eb90bc0
, 2, (RVS
, RVD
), vjcvt
),
21601 NUF (vcmla
, 0, 4, (RNDQ
, RNDQ
, RNDQ_RNSC
, EXPi
), vcmla
),
21602 NUF (vcadd
, 0, 4, (RNDQ
, RNDQ
, RNDQ
, EXPi
), vcadd
),
21605 #define ARM_VARIANT & fpu_neon_ext_dotprod
21606 #undef THUMB_VARIANT
21607 #define THUMB_VARIANT & fpu_neon_ext_dotprod
21608 NUF (vsdot
, d00
, 3, (RNDQ
, RNDQ
, RNDQ_RNSC
), neon_dotproduct_s
),
21609 NUF (vudot
, d00
, 3, (RNDQ
, RNDQ
, RNDQ_RNSC
), neon_dotproduct_u
),
21612 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
21613 #undef THUMB_VARIANT
21614 #define THUMB_VARIANT NULL
21616 cCE("wfs", e200110
, 1, (RR
), rd
),
21617 cCE("rfs", e300110
, 1, (RR
), rd
),
21618 cCE("wfc", e400110
, 1, (RR
), rd
),
21619 cCE("rfc", e500110
, 1, (RR
), rd
),
21621 cCL("ldfs", c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21622 cCL("ldfd", c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21623 cCL("ldfe", c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21624 cCL("ldfp", c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21626 cCL("stfs", c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21627 cCL("stfd", c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21628 cCL("stfe", c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21629 cCL("stfp", c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21631 cCL("mvfs", e008100
, 2, (RF
, RF_IF
), rd_rm
),
21632 cCL("mvfsp", e008120
, 2, (RF
, RF_IF
), rd_rm
),
21633 cCL("mvfsm", e008140
, 2, (RF
, RF_IF
), rd_rm
),
21634 cCL("mvfsz", e008160
, 2, (RF
, RF_IF
), rd_rm
),
21635 cCL("mvfd", e008180
, 2, (RF
, RF_IF
), rd_rm
),
21636 cCL("mvfdp", e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
21637 cCL("mvfdm", e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
21638 cCL("mvfdz", e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
21639 cCL("mvfe", e088100
, 2, (RF
, RF_IF
), rd_rm
),
21640 cCL("mvfep", e088120
, 2, (RF
, RF_IF
), rd_rm
),
21641 cCL("mvfem", e088140
, 2, (RF
, RF_IF
), rd_rm
),
21642 cCL("mvfez", e088160
, 2, (RF
, RF_IF
), rd_rm
),
21644 cCL("mnfs", e108100
, 2, (RF
, RF_IF
), rd_rm
),
21645 cCL("mnfsp", e108120
, 2, (RF
, RF_IF
), rd_rm
),
21646 cCL("mnfsm", e108140
, 2, (RF
, RF_IF
), rd_rm
),
21647 cCL("mnfsz", e108160
, 2, (RF
, RF_IF
), rd_rm
),
21648 cCL("mnfd", e108180
, 2, (RF
, RF_IF
), rd_rm
),
21649 cCL("mnfdp", e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
21650 cCL("mnfdm", e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
21651 cCL("mnfdz", e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
21652 cCL("mnfe", e188100
, 2, (RF
, RF_IF
), rd_rm
),
21653 cCL("mnfep", e188120
, 2, (RF
, RF_IF
), rd_rm
),
21654 cCL("mnfem", e188140
, 2, (RF
, RF_IF
), rd_rm
),
21655 cCL("mnfez", e188160
, 2, (RF
, RF_IF
), rd_rm
),
21657 cCL("abss", e208100
, 2, (RF
, RF_IF
), rd_rm
),
21658 cCL("abssp", e208120
, 2, (RF
, RF_IF
), rd_rm
),
21659 cCL("abssm", e208140
, 2, (RF
, RF_IF
), rd_rm
),
21660 cCL("abssz", e208160
, 2, (RF
, RF_IF
), rd_rm
),
21661 cCL("absd", e208180
, 2, (RF
, RF_IF
), rd_rm
),
21662 cCL("absdp", e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
21663 cCL("absdm", e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
21664 cCL("absdz", e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
21665 cCL("abse", e288100
, 2, (RF
, RF_IF
), rd_rm
),
21666 cCL("absep", e288120
, 2, (RF
, RF_IF
), rd_rm
),
21667 cCL("absem", e288140
, 2, (RF
, RF_IF
), rd_rm
),
21668 cCL("absez", e288160
, 2, (RF
, RF_IF
), rd_rm
),
21670 cCL("rnds", e308100
, 2, (RF
, RF_IF
), rd_rm
),
21671 cCL("rndsp", e308120
, 2, (RF
, RF_IF
), rd_rm
),
21672 cCL("rndsm", e308140
, 2, (RF
, RF_IF
), rd_rm
),
21673 cCL("rndsz", e308160
, 2, (RF
, RF_IF
), rd_rm
),
21674 cCL("rndd", e308180
, 2, (RF
, RF_IF
), rd_rm
),
21675 cCL("rnddp", e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
21676 cCL("rnddm", e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
21677 cCL("rnddz", e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
21678 cCL("rnde", e388100
, 2, (RF
, RF_IF
), rd_rm
),
21679 cCL("rndep", e388120
, 2, (RF
, RF_IF
), rd_rm
),
21680 cCL("rndem", e388140
, 2, (RF
, RF_IF
), rd_rm
),
21681 cCL("rndez", e388160
, 2, (RF
, RF_IF
), rd_rm
),
21683 cCL("sqts", e408100
, 2, (RF
, RF_IF
), rd_rm
),
21684 cCL("sqtsp", e408120
, 2, (RF
, RF_IF
), rd_rm
),
21685 cCL("sqtsm", e408140
, 2, (RF
, RF_IF
), rd_rm
),
21686 cCL("sqtsz", e408160
, 2, (RF
, RF_IF
), rd_rm
),
21687 cCL("sqtd", e408180
, 2, (RF
, RF_IF
), rd_rm
),
21688 cCL("sqtdp", e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
21689 cCL("sqtdm", e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
21690 cCL("sqtdz", e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
21691 cCL("sqte", e488100
, 2, (RF
, RF_IF
), rd_rm
),
21692 cCL("sqtep", e488120
, 2, (RF
, RF_IF
), rd_rm
),
21693 cCL("sqtem", e488140
, 2, (RF
, RF_IF
), rd_rm
),
21694 cCL("sqtez", e488160
, 2, (RF
, RF_IF
), rd_rm
),
21696 cCL("logs", e508100
, 2, (RF
, RF_IF
), rd_rm
),
21697 cCL("logsp", e508120
, 2, (RF
, RF_IF
), rd_rm
),
21698 cCL("logsm", e508140
, 2, (RF
, RF_IF
), rd_rm
),
21699 cCL("logsz", e508160
, 2, (RF
, RF_IF
), rd_rm
),
21700 cCL("logd", e508180
, 2, (RF
, RF_IF
), rd_rm
),
21701 cCL("logdp", e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
21702 cCL("logdm", e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
21703 cCL("logdz", e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
21704 cCL("loge", e588100
, 2, (RF
, RF_IF
), rd_rm
),
21705 cCL("logep", e588120
, 2, (RF
, RF_IF
), rd_rm
),
21706 cCL("logem", e588140
, 2, (RF
, RF_IF
), rd_rm
),
21707 cCL("logez", e588160
, 2, (RF
, RF_IF
), rd_rm
),
21709 cCL("lgns", e608100
, 2, (RF
, RF_IF
), rd_rm
),
21710 cCL("lgnsp", e608120
, 2, (RF
, RF_IF
), rd_rm
),
21711 cCL("lgnsm", e608140
, 2, (RF
, RF_IF
), rd_rm
),
21712 cCL("lgnsz", e608160
, 2, (RF
, RF_IF
), rd_rm
),
21713 cCL("lgnd", e608180
, 2, (RF
, RF_IF
), rd_rm
),
21714 cCL("lgndp", e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
21715 cCL("lgndm", e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
21716 cCL("lgndz", e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
21717 cCL("lgne", e688100
, 2, (RF
, RF_IF
), rd_rm
),
21718 cCL("lgnep", e688120
, 2, (RF
, RF_IF
), rd_rm
),
21719 cCL("lgnem", e688140
, 2, (RF
, RF_IF
), rd_rm
),
21720 cCL("lgnez", e688160
, 2, (RF
, RF_IF
), rd_rm
),
21722 cCL("exps", e708100
, 2, (RF
, RF_IF
), rd_rm
),
21723 cCL("expsp", e708120
, 2, (RF
, RF_IF
), rd_rm
),
21724 cCL("expsm", e708140
, 2, (RF
, RF_IF
), rd_rm
),
21725 cCL("expsz", e708160
, 2, (RF
, RF_IF
), rd_rm
),
21726 cCL("expd", e708180
, 2, (RF
, RF_IF
), rd_rm
),
21727 cCL("expdp", e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
21728 cCL("expdm", e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
21729 cCL("expdz", e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
21730 cCL("expe", e788100
, 2, (RF
, RF_IF
), rd_rm
),
21731 cCL("expep", e788120
, 2, (RF
, RF_IF
), rd_rm
),
21732 cCL("expem", e788140
, 2, (RF
, RF_IF
), rd_rm
),
21733 cCL("expdz", e788160
, 2, (RF
, RF_IF
), rd_rm
),
21735 cCL("sins", e808100
, 2, (RF
, RF_IF
), rd_rm
),
21736 cCL("sinsp", e808120
, 2, (RF
, RF_IF
), rd_rm
),
21737 cCL("sinsm", e808140
, 2, (RF
, RF_IF
), rd_rm
),
21738 cCL("sinsz", e808160
, 2, (RF
, RF_IF
), rd_rm
),
21739 cCL("sind", e808180
, 2, (RF
, RF_IF
), rd_rm
),
21740 cCL("sindp", e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
21741 cCL("sindm", e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
21742 cCL("sindz", e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
21743 cCL("sine", e888100
, 2, (RF
, RF_IF
), rd_rm
),
21744 cCL("sinep", e888120
, 2, (RF
, RF_IF
), rd_rm
),
21745 cCL("sinem", e888140
, 2, (RF
, RF_IF
), rd_rm
),
21746 cCL("sinez", e888160
, 2, (RF
, RF_IF
), rd_rm
),
21748 cCL("coss", e908100
, 2, (RF
, RF_IF
), rd_rm
),
21749 cCL("cossp", e908120
, 2, (RF
, RF_IF
), rd_rm
),
21750 cCL("cossm", e908140
, 2, (RF
, RF_IF
), rd_rm
),
21751 cCL("cossz", e908160
, 2, (RF
, RF_IF
), rd_rm
),
21752 cCL("cosd", e908180
, 2, (RF
, RF_IF
), rd_rm
),
21753 cCL("cosdp", e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
21754 cCL("cosdm", e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
21755 cCL("cosdz", e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
21756 cCL("cose", e988100
, 2, (RF
, RF_IF
), rd_rm
),
21757 cCL("cosep", e988120
, 2, (RF
, RF_IF
), rd_rm
),
21758 cCL("cosem", e988140
, 2, (RF
, RF_IF
), rd_rm
),
21759 cCL("cosez", e988160
, 2, (RF
, RF_IF
), rd_rm
),
21761 cCL("tans", ea08100
, 2, (RF
, RF_IF
), rd_rm
),
21762 cCL("tansp", ea08120
, 2, (RF
, RF_IF
), rd_rm
),
21763 cCL("tansm", ea08140
, 2, (RF
, RF_IF
), rd_rm
),
21764 cCL("tansz", ea08160
, 2, (RF
, RF_IF
), rd_rm
),
21765 cCL("tand", ea08180
, 2, (RF
, RF_IF
), rd_rm
),
21766 cCL("tandp", ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
21767 cCL("tandm", ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
21768 cCL("tandz", ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
21769 cCL("tane", ea88100
, 2, (RF
, RF_IF
), rd_rm
),
21770 cCL("tanep", ea88120
, 2, (RF
, RF_IF
), rd_rm
),
21771 cCL("tanem", ea88140
, 2, (RF
, RF_IF
), rd_rm
),
21772 cCL("tanez", ea88160
, 2, (RF
, RF_IF
), rd_rm
),
21774 cCL("asns", eb08100
, 2, (RF
, RF_IF
), rd_rm
),
21775 cCL("asnsp", eb08120
, 2, (RF
, RF_IF
), rd_rm
),
21776 cCL("asnsm", eb08140
, 2, (RF
, RF_IF
), rd_rm
),
21777 cCL("asnsz", eb08160
, 2, (RF
, RF_IF
), rd_rm
),
21778 cCL("asnd", eb08180
, 2, (RF
, RF_IF
), rd_rm
),
21779 cCL("asndp", eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
21780 cCL("asndm", eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
21781 cCL("asndz", eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
21782 cCL("asne", eb88100
, 2, (RF
, RF_IF
), rd_rm
),
21783 cCL("asnep", eb88120
, 2, (RF
, RF_IF
), rd_rm
),
21784 cCL("asnem", eb88140
, 2, (RF
, RF_IF
), rd_rm
),
21785 cCL("asnez", eb88160
, 2, (RF
, RF_IF
), rd_rm
),
21787 cCL("acss", ec08100
, 2, (RF
, RF_IF
), rd_rm
),
21788 cCL("acssp", ec08120
, 2, (RF
, RF_IF
), rd_rm
),
21789 cCL("acssm", ec08140
, 2, (RF
, RF_IF
), rd_rm
),
21790 cCL("acssz", ec08160
, 2, (RF
, RF_IF
), rd_rm
),
21791 cCL("acsd", ec08180
, 2, (RF
, RF_IF
), rd_rm
),
21792 cCL("acsdp", ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
21793 cCL("acsdm", ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
21794 cCL("acsdz", ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
21795 cCL("acse", ec88100
, 2, (RF
, RF_IF
), rd_rm
),
21796 cCL("acsep", ec88120
, 2, (RF
, RF_IF
), rd_rm
),
21797 cCL("acsem", ec88140
, 2, (RF
, RF_IF
), rd_rm
),
21798 cCL("acsez", ec88160
, 2, (RF
, RF_IF
), rd_rm
),
21800 cCL("atns", ed08100
, 2, (RF
, RF_IF
), rd_rm
),
21801 cCL("atnsp", ed08120
, 2, (RF
, RF_IF
), rd_rm
),
21802 cCL("atnsm", ed08140
, 2, (RF
, RF_IF
), rd_rm
),
21803 cCL("atnsz", ed08160
, 2, (RF
, RF_IF
), rd_rm
),
21804 cCL("atnd", ed08180
, 2, (RF
, RF_IF
), rd_rm
),
21805 cCL("atndp", ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
21806 cCL("atndm", ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
21807 cCL("atndz", ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
21808 cCL("atne", ed88100
, 2, (RF
, RF_IF
), rd_rm
),
21809 cCL("atnep", ed88120
, 2, (RF
, RF_IF
), rd_rm
),
21810 cCL("atnem", ed88140
, 2, (RF
, RF_IF
), rd_rm
),
21811 cCL("atnez", ed88160
, 2, (RF
, RF_IF
), rd_rm
),
21813 cCL("urds", ee08100
, 2, (RF
, RF_IF
), rd_rm
),
21814 cCL("urdsp", ee08120
, 2, (RF
, RF_IF
), rd_rm
),
21815 cCL("urdsm", ee08140
, 2, (RF
, RF_IF
), rd_rm
),
21816 cCL("urdsz", ee08160
, 2, (RF
, RF_IF
), rd_rm
),
21817 cCL("urdd", ee08180
, 2, (RF
, RF_IF
), rd_rm
),
21818 cCL("urddp", ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
21819 cCL("urddm", ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
21820 cCL("urddz", ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
21821 cCL("urde", ee88100
, 2, (RF
, RF_IF
), rd_rm
),
21822 cCL("urdep", ee88120
, 2, (RF
, RF_IF
), rd_rm
),
21823 cCL("urdem", ee88140
, 2, (RF
, RF_IF
), rd_rm
),
21824 cCL("urdez", ee88160
, 2, (RF
, RF_IF
), rd_rm
),
21826 cCL("nrms", ef08100
, 2, (RF
, RF_IF
), rd_rm
),
21827 cCL("nrmsp", ef08120
, 2, (RF
, RF_IF
), rd_rm
),
21828 cCL("nrmsm", ef08140
, 2, (RF
, RF_IF
), rd_rm
),
21829 cCL("nrmsz", ef08160
, 2, (RF
, RF_IF
), rd_rm
),
21830 cCL("nrmd", ef08180
, 2, (RF
, RF_IF
), rd_rm
),
21831 cCL("nrmdp", ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
21832 cCL("nrmdm", ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
21833 cCL("nrmdz", ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
21834 cCL("nrme", ef88100
, 2, (RF
, RF_IF
), rd_rm
),
21835 cCL("nrmep", ef88120
, 2, (RF
, RF_IF
), rd_rm
),
21836 cCL("nrmem", ef88140
, 2, (RF
, RF_IF
), rd_rm
),
21837 cCL("nrmez", ef88160
, 2, (RF
, RF_IF
), rd_rm
),
21839 cCL("adfs", e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21840 cCL("adfsp", e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21841 cCL("adfsm", e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21842 cCL("adfsz", e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21843 cCL("adfd", e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21844 cCL("adfdp", e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21845 cCL("adfdm", e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21846 cCL("adfdz", e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21847 cCL("adfe", e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21848 cCL("adfep", e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21849 cCL("adfem", e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21850 cCL("adfez", e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21852 cCL("sufs", e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21853 cCL("sufsp", e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21854 cCL("sufsm", e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21855 cCL("sufsz", e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21856 cCL("sufd", e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21857 cCL("sufdp", e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21858 cCL("sufdm", e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21859 cCL("sufdz", e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21860 cCL("sufe", e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21861 cCL("sufep", e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21862 cCL("sufem", e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21863 cCL("sufez", e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21865 cCL("rsfs", e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21866 cCL("rsfsp", e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21867 cCL("rsfsm", e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21868 cCL("rsfsz", e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21869 cCL("rsfd", e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21870 cCL("rsfdp", e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21871 cCL("rsfdm", e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21872 cCL("rsfdz", e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21873 cCL("rsfe", e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21874 cCL("rsfep", e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21875 cCL("rsfem", e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21876 cCL("rsfez", e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21878 cCL("mufs", e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21879 cCL("mufsp", e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21880 cCL("mufsm", e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21881 cCL("mufsz", e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21882 cCL("mufd", e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21883 cCL("mufdp", e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21884 cCL("mufdm", e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21885 cCL("mufdz", e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21886 cCL("mufe", e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21887 cCL("mufep", e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21888 cCL("mufem", e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21889 cCL("mufez", e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21891 cCL("dvfs", e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21892 cCL("dvfsp", e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21893 cCL("dvfsm", e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21894 cCL("dvfsz", e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21895 cCL("dvfd", e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21896 cCL("dvfdp", e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21897 cCL("dvfdm", e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21898 cCL("dvfdz", e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21899 cCL("dvfe", e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21900 cCL("dvfep", e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21901 cCL("dvfem", e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21902 cCL("dvfez", e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21904 cCL("rdfs", e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21905 cCL("rdfsp", e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21906 cCL("rdfsm", e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21907 cCL("rdfsz", e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21908 cCL("rdfd", e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21909 cCL("rdfdp", e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21910 cCL("rdfdm", e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21911 cCL("rdfdz", e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21912 cCL("rdfe", e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21913 cCL("rdfep", e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21914 cCL("rdfem", e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21915 cCL("rdfez", e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21917 cCL("pows", e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21918 cCL("powsp", e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21919 cCL("powsm", e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21920 cCL("powsz", e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21921 cCL("powd", e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21922 cCL("powdp", e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21923 cCL("powdm", e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21924 cCL("powdz", e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21925 cCL("powe", e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21926 cCL("powep", e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21927 cCL("powem", e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21928 cCL("powez", e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21930 cCL("rpws", e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21931 cCL("rpwsp", e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21932 cCL("rpwsm", e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21933 cCL("rpwsz", e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21934 cCL("rpwd", e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21935 cCL("rpwdp", e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21936 cCL("rpwdm", e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21937 cCL("rpwdz", e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21938 cCL("rpwe", e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21939 cCL("rpwep", e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21940 cCL("rpwem", e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21941 cCL("rpwez", e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21943 cCL("rmfs", e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21944 cCL("rmfsp", e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21945 cCL("rmfsm", e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21946 cCL("rmfsz", e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21947 cCL("rmfd", e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21948 cCL("rmfdp", e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21949 cCL("rmfdm", e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21950 cCL("rmfdz", e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21951 cCL("rmfe", e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21952 cCL("rmfep", e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21953 cCL("rmfem", e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21954 cCL("rmfez", e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21956 cCL("fmls", e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21957 cCL("fmlsp", e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21958 cCL("fmlsm", e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21959 cCL("fmlsz", e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21960 cCL("fmld", e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21961 cCL("fmldp", e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21962 cCL("fmldm", e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21963 cCL("fmldz", e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21964 cCL("fmle", e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21965 cCL("fmlep", e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21966 cCL("fmlem", e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21967 cCL("fmlez", e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21969 cCL("fdvs", ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21970 cCL("fdvsp", ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21971 cCL("fdvsm", ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21972 cCL("fdvsz", ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21973 cCL("fdvd", ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21974 cCL("fdvdp", ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21975 cCL("fdvdm", ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21976 cCL("fdvdz", ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21977 cCL("fdve", ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21978 cCL("fdvep", ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21979 cCL("fdvem", ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21980 cCL("fdvez", ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21982 cCL("frds", eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21983 cCL("frdsp", eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21984 cCL("frdsm", eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21985 cCL("frdsz", eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21986 cCL("frdd", eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21987 cCL("frddp", eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21988 cCL("frddm", eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21989 cCL("frddz", eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21990 cCL("frde", eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21991 cCL("frdep", eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21992 cCL("frdem", eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21993 cCL("frdez", eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21995 cCL("pols", ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21996 cCL("polsp", ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21997 cCL("polsm", ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21998 cCL("polsz", ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21999 cCL("pold", ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22000 cCL("poldp", ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22001 cCL("poldm", ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22002 cCL("poldz", ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22003 cCL("pole", ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22004 cCL("polep", ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22005 cCL("polem", ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22006 cCL("polez", ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22008 cCE("cmf", e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
22009 C3E("cmfe", ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
22010 cCE("cnf", eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
22011 C3E("cnfe", ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
22013 cCL("flts", e000110
, 2, (RF
, RR
), rn_rd
),
22014 cCL("fltsp", e000130
, 2, (RF
, RR
), rn_rd
),
22015 cCL("fltsm", e000150
, 2, (RF
, RR
), rn_rd
),
22016 cCL("fltsz", e000170
, 2, (RF
, RR
), rn_rd
),
22017 cCL("fltd", e000190
, 2, (RF
, RR
), rn_rd
),
22018 cCL("fltdp", e0001b0
, 2, (RF
, RR
), rn_rd
),
22019 cCL("fltdm", e0001d0
, 2, (RF
, RR
), rn_rd
),
22020 cCL("fltdz", e0001f0
, 2, (RF
, RR
), rn_rd
),
22021 cCL("flte", e080110
, 2, (RF
, RR
), rn_rd
),
22022 cCL("fltep", e080130
, 2, (RF
, RR
), rn_rd
),
22023 cCL("fltem", e080150
, 2, (RF
, RR
), rn_rd
),
22024 cCL("fltez", e080170
, 2, (RF
, RR
), rn_rd
),
22026 /* The implementation of the FIX instruction is broken on some
22027 assemblers, in that it accepts a precision specifier as well as a
22028 rounding specifier, despite the fact that this is meaningless.
22029 To be more compatible, we accept it as well, though of course it
22030 does not set any bits. */
22031 cCE("fix", e100110
, 2, (RR
, RF
), rd_rm
),
22032 cCL("fixp", e100130
, 2, (RR
, RF
), rd_rm
),
22033 cCL("fixm", e100150
, 2, (RR
, RF
), rd_rm
),
22034 cCL("fixz", e100170
, 2, (RR
, RF
), rd_rm
),
22035 cCL("fixsp", e100130
, 2, (RR
, RF
), rd_rm
),
22036 cCL("fixsm", e100150
, 2, (RR
, RF
), rd_rm
),
22037 cCL("fixsz", e100170
, 2, (RR
, RF
), rd_rm
),
22038 cCL("fixdp", e100130
, 2, (RR
, RF
), rd_rm
),
22039 cCL("fixdm", e100150
, 2, (RR
, RF
), rd_rm
),
22040 cCL("fixdz", e100170
, 2, (RR
, RF
), rd_rm
),
22041 cCL("fixep", e100130
, 2, (RR
, RF
), rd_rm
),
22042 cCL("fixem", e100150
, 2, (RR
, RF
), rd_rm
),
22043 cCL("fixez", e100170
, 2, (RR
, RF
), rd_rm
),
22045 /* Instructions that were new with the real FPA, call them V2. */
22047 #define ARM_VARIANT & fpu_fpa_ext_v2
22049 cCE("lfm", c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
22050 cCL("lfmfd", c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
22051 cCL("lfmea", d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
22052 cCE("sfm", c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
22053 cCL("sfmfd", d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
22054 cCL("sfmea", c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
22057 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
22059 /* Moves and type conversions. */
22060 cCE("fcpys", eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22061 cCE("fmrs", e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
22062 cCE("fmsr", e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
22063 cCE("fmstat", ef1fa10
, 0, (), noargs
),
22064 cCE("vmrs", ef00a10
, 2, (APSR_RR
, RVC
), vmrs
),
22065 cCE("vmsr", ee00a10
, 2, (RVC
, RR
), vmsr
),
22066 cCE("fsitos", eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22067 cCE("fuitos", eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22068 cCE("ftosis", ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22069 cCE("ftosizs", ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22070 cCE("ftouis", ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22071 cCE("ftouizs", ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22072 cCE("fmrx", ef00a10
, 2, (RR
, RVC
), rd_rn
),
22073 cCE("fmxr", ee00a10
, 2, (RVC
, RR
), rn_rd
),
22075 /* Memory operations. */
22076 cCE("flds", d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
22077 cCE("fsts", d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
22078 cCE("fldmias", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
22079 cCE("fldmfds", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
22080 cCE("fldmdbs", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
22081 cCE("fldmeas", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
22082 cCE("fldmiax", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
22083 cCE("fldmfdx", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
22084 cCE("fldmdbx", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
22085 cCE("fldmeax", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
22086 cCE("fstmias", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
22087 cCE("fstmeas", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
22088 cCE("fstmdbs", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
22089 cCE("fstmfds", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
22090 cCE("fstmiax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
22091 cCE("fstmeax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
22092 cCE("fstmdbx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
22093 cCE("fstmfdx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
22095 /* Monadic operations. */
22096 cCE("fabss", eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22097 cCE("fnegs", eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22098 cCE("fsqrts", eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22100 /* Dyadic operations. */
22101 cCE("fadds", e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22102 cCE("fsubs", e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22103 cCE("fmuls", e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22104 cCE("fdivs", e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22105 cCE("fmacs", e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22106 cCE("fmscs", e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22107 cCE("fnmuls", e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22108 cCE("fnmacs", e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22109 cCE("fnmscs", e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22112 cCE("fcmps", eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22113 cCE("fcmpzs", eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
22114 cCE("fcmpes", eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22115 cCE("fcmpezs", eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
22117 /* Double precision load/store are still present on single precision
22118 implementations. */
22119 cCE("fldd", d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
22120 cCE("fstd", d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
22121 cCE("fldmiad", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
22122 cCE("fldmfdd", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
22123 cCE("fldmdbd", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
22124 cCE("fldmead", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
22125 cCE("fstmiad", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
22126 cCE("fstmead", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
22127 cCE("fstmdbd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
22128 cCE("fstmfdd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
22131 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
22133 /* Moves and type conversions. */
22134 cCE("fcpyd", eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
22135 cCE("fcvtds", eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
22136 cCE("fcvtsd", eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
22137 cCE("fmdhr", e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
22138 cCE("fmdlr", e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
22139 cCE("fmrdh", e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
22140 cCE("fmrdl", e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
22141 cCE("fsitod", eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
22142 cCE("fuitod", eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
22143 cCE("ftosid", ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
22144 cCE("ftosizd", ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
22145 cCE("ftouid", ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
22146 cCE("ftouizd", ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
22148 /* Monadic operations. */
22149 cCE("fabsd", eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
22150 cCE("fnegd", eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
22151 cCE("fsqrtd", eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
22153 /* Dyadic operations. */
22154 cCE("faddd", e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22155 cCE("fsubd", e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22156 cCE("fmuld", e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22157 cCE("fdivd", e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22158 cCE("fmacd", e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22159 cCE("fmscd", e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22160 cCE("fnmuld", e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22161 cCE("fnmacd", e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22162 cCE("fnmscd", e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22165 cCE("fcmpd", eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
22166 cCE("fcmpzd", eb50b40
, 1, (RVD
), vfp_dp_rd
),
22167 cCE("fcmped", eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
22168 cCE("fcmpezd", eb50bc0
, 1, (RVD
), vfp_dp_rd
),
22171 #define ARM_VARIANT & fpu_vfp_ext_v2
22173 cCE("fmsrr", c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
22174 cCE("fmrrs", c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
22175 cCE("fmdrr", c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
22176 cCE("fmrrd", c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
22178 /* Instructions which may belong to either the Neon or VFP instruction sets.
22179 Individual encoder functions perform additional architecture checks. */
22181 #define ARM_VARIANT & fpu_vfp_ext_v1xd
22182 #undef THUMB_VARIANT
22183 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
22185 /* These mnemonics are unique to VFP. */
22186 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
22187 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
22188 nCE(vnmul
, _vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
22189 nCE(vnmla
, _vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
22190 nCE(vnmls
, _vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
22191 nCE(vcmp
, _vcmp
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
22192 nCE(vcmpe
, _vcmpe
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
22193 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
22194 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
22195 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
22197 /* Mnemonics shared by Neon and VFP. */
22198 nCEF(vmul
, _vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
22199 nCEF(vmla
, _vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
22200 nCEF(vmls
, _vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
22202 NCE(vldm
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22203 NCE(vldmia
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22204 NCE(vldmdb
, d100b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22205 NCE(vstm
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22206 NCE(vstmia
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22207 NCE(vstmdb
, d000b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22209 nCEF(vcvt
, _vcvt
, 3, (RNSDQ
, RNSDQ
, oI32z
), neon_cvt
),
22210 nCEF(vcvtr
, _vcvt
, 2, (RNSDQ
, RNSDQ
), neon_cvtr
),
22211 NCEF(vcvtb
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtb
),
22212 NCEF(vcvtt
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtt
),
22215 /* NOTE: All VMOV encoding is special-cased! */
22216 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
22217 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
22219 #undef THUMB_VARIANT
22220 /* Could be either VLDR/VSTR or VLDR/VSTR (system register) which are guarded
22221 by different feature bits. Since we are setting the Thumb guard, we can
22222 require Thumb-1 which makes it a nop guard and set the right feature bit in
22223 do_vldr_vstr (). */
22224 #define THUMB_VARIANT & arm_ext_v4t
22225 NCE(vldr
, d100b00
, 2, (VLDR
, ADDRGLDC
), vldr_vstr
),
22226 NCE(vstr
, d000b00
, 2, (VLDR
, ADDRGLDC
), vldr_vstr
),
22229 #define ARM_VARIANT & arm_ext_fp16
22230 #undef THUMB_VARIANT
22231 #define THUMB_VARIANT & arm_ext_fp16
22232 /* New instructions added from v8.2, allowing the extraction and insertion of
22233 the upper 16 bits of a 32-bit vector register. */
22234 NCE (vmovx
, eb00a40
, 2, (RVS
, RVS
), neon_movhf
),
22235 NCE (vins
, eb00ac0
, 2, (RVS
, RVS
), neon_movhf
),
22237 /* New backported fma/fms instructions optional in v8.2. */
22238 NCE (vfmal
, 810, 3, (RNDQ
, RNSD
, RNSD_RNSC
), neon_vfmal
),
22239 NCE (vfmsl
, 810, 3, (RNDQ
, RNSD
, RNSD_RNSC
), neon_vfmsl
),
22241 #undef THUMB_VARIANT
22242 #define THUMB_VARIANT & fpu_neon_ext_v1
22244 #define ARM_VARIANT & fpu_neon_ext_v1
22246 /* Data processing with three registers of the same length. */
22247 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
22248 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
22249 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
22250 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
22251 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
22252 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
22253 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
22254 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
22255 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
22256 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
22257 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
22258 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
22259 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
22260 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
22261 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
22262 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
22263 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
22264 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
22265 /* If not immediate, fall back to neon_dyadic_i64_su.
22266 shl_imm should accept I8 I16 I32 I64,
22267 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
22268 nUF(vshl
, _vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
22269 nUF(vshlq
, _vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
22270 nUF(vqshl
, _vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
22271 nUF(vqshlq
, _vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
22272 /* Logic ops, types optional & ignored. */
22273 nUF(vand
, _vand
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
22274 nUF(vandq
, _vand
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
22275 nUF(vbic
, _vbic
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
22276 nUF(vbicq
, _vbic
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
22277 nUF(vorr
, _vorr
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
22278 nUF(vorrq
, _vorr
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
22279 nUF(vorn
, _vorn
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
22280 nUF(vornq
, _vorn
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
22281 nUF(veor
, _veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
22282 nUF(veorq
, _veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
22283 /* Bitfield ops, untyped. */
22284 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
22285 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
22286 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
22287 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
22288 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
22289 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
22290 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
22291 nUF(vabdq
, _vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
22292 nUF(vmax
, _vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
22293 nUF(vmaxq
, _vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
22294 nUF(vmin
, _vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
22295 nUF(vminq
, _vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
22296 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
22297 back to neon_dyadic_if_su. */
22298 nUF(vcge
, _vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
22299 nUF(vcgeq
, _vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
22300 nUF(vcgt
, _vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
22301 nUF(vcgtq
, _vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
22302 nUF(vclt
, _vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
22303 nUF(vcltq
, _vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
22304 nUF(vcle
, _vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
22305 nUF(vcleq
, _vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
22306 /* Comparison. Type I8 I16 I32 F32. */
22307 nUF(vceq
, _vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
22308 nUF(vceqq
, _vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
22309 /* As above, D registers only. */
22310 nUF(vpmax
, _vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
22311 nUF(vpmin
, _vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
22312 /* Int and float variants, signedness unimportant. */
22313 nUF(vmlaq
, _vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
22314 nUF(vmlsq
, _vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
22315 nUF(vpadd
, _vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
22316 /* Add/sub take types I8 I16 I32 I64 F32. */
22317 nUF(vaddq
, _vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
22318 nUF(vsubq
, _vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
22319 /* vtst takes sizes 8, 16, 32. */
22320 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
22321 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
22322 /* VMUL takes I8 I16 I32 F32 P8. */
22323 nUF(vmulq
, _vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
22324 /* VQD{R}MULH takes S16 S32. */
22325 nUF(vqdmulh
, _vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
22326 nUF(vqdmulhq
, _vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
22327 nUF(vqrdmulh
, _vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
22328 nUF(vqrdmulhq
, _vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
22329 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
22330 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
22331 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
22332 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
22333 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
22334 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
22335 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
22336 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
22337 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
22338 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
22339 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
22340 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
22341 /* ARM v8.1 extension. */
22342 nUF (vqrdmlah
, _vqrdmlah
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
22343 nUF (vqrdmlahq
, _vqrdmlah
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
22344 nUF (vqrdmlsh
, _vqrdmlsh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
22345 nUF (vqrdmlshq
, _vqrdmlsh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
22347 /* Two address, int/float. Types S8 S16 S32 F32. */
22348 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
22349 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
22351 /* Data processing with two registers and a shift amount. */
22352 /* Right shifts, and variants with rounding.
22353 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
22354 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
22355 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
22356 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
22357 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
22358 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
22359 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
22360 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
22361 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
22362 /* Shift and insert. Sizes accepted 8 16 32 64. */
22363 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
22364 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
22365 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
22366 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
22367 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
22368 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
22369 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
22370 /* Right shift immediate, saturating & narrowing, with rounding variants.
22371 Types accepted S16 S32 S64 U16 U32 U64. */
22372 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
22373 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
22374 /* As above, unsigned. Types accepted S16 S32 S64. */
22375 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
22376 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
22377 /* Right shift narrowing. Types accepted I16 I32 I64. */
22378 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
22379 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
22380 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
22381 nUF(vshll
, _vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
22382 /* CVT with optional immediate for fixed-point variant. */
22383 nUF(vcvtq
, _vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
22385 nUF(vmvn
, _vmvn
, 2, (RNDQ
, RNDQ_Ibig
), neon_mvn
),
22386 nUF(vmvnq
, _vmvn
, 2, (RNQ
, RNDQ_Ibig
), neon_mvn
),
22388 /* Data processing, three registers of different lengths. */
22389 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
22390 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
22391 /* If not scalar, fall back to neon_dyadic_long.
22392 Vector types as above, scalar types S16 S32 U16 U32. */
22393 nUF(vmlal
, _vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
22394 nUF(vmlsl
, _vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
22395 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
22396 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
22397 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
22398 /* Dyadic, narrowing insns. Types I16 I32 I64. */
22399 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
22400 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
22401 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
22402 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
22403 /* Saturating doubling multiplies. Types S16 S32. */
22404 nUF(vqdmlal
, _vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
22405 nUF(vqdmlsl
, _vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
22406 nUF(vqdmull
, _vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
22407 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
22408 S16 S32 U16 U32. */
22409 nUF(vmull
, _vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
22411 /* Extract. Size 8. */
22412 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
22413 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
22415 /* Two registers, miscellaneous. */
22416 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
22417 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
22418 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
22419 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
22420 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
22421 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
22422 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
22423 /* Vector replicate. Sizes 8 16 32. */
22424 nCE(vdup
, _vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
22425 nCE(vdupq
, _vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
22426 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
22427 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
22428 /* VMOVN. Types I16 I32 I64. */
22429 nUF(vmovn
, _vmovn
, 2, (RND
, RNQ
), neon_movn
),
22430 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
22431 nUF(vqmovn
, _vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
22432 /* VQMOVUN. Types S16 S32 S64. */
22433 nUF(vqmovun
, _vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
22434 /* VZIP / VUZP. Sizes 8 16 32. */
22435 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
22436 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
22437 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
22438 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
22439 /* VQABS / VQNEG. Types S8 S16 S32. */
22440 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
22441 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
22442 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
22443 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
22444 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
22445 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
22446 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
22447 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
22448 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
22449 /* Reciprocal estimates. Types U32 F16 F32. */
22450 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
22451 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
22452 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
22453 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
22454 /* VCLS. Types S8 S16 S32. */
22455 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
22456 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
22457 /* VCLZ. Types I8 I16 I32. */
22458 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
22459 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
22460 /* VCNT. Size 8. */
22461 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
22462 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
22463 /* Two address, untyped. */
22464 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
22465 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
22466 /* VTRN. Sizes 8 16 32. */
22467 nUF(vtrn
, _vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
22468 nUF(vtrnq
, _vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
22470 /* Table lookup. Size 8. */
22471 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
22472 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
22474 #undef THUMB_VARIANT
22475 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
22477 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
22479 /* Neon element/structure load/store. */
22480 nUF(vld1
, _vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22481 nUF(vst1
, _vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22482 nUF(vld2
, _vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22483 nUF(vst2
, _vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22484 nUF(vld3
, _vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22485 nUF(vst3
, _vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22486 nUF(vld4
, _vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22487 nUF(vst4
, _vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22489 #undef THUMB_VARIANT
22490 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
22492 #define ARM_VARIANT & fpu_vfp_ext_v3xd
22493 cCE("fconsts", eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
22494 cCE("fshtos", eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
22495 cCE("fsltos", eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
22496 cCE("fuhtos", ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
22497 cCE("fultos", ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
22498 cCE("ftoshs", ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
22499 cCE("ftosls", ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
22500 cCE("ftouhs", ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
22501 cCE("ftouls", ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
22503 #undef THUMB_VARIANT
22504 #define THUMB_VARIANT & fpu_vfp_ext_v3
22506 #define ARM_VARIANT & fpu_vfp_ext_v3
22508 cCE("fconstd", eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
22509 cCE("fshtod", eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
22510 cCE("fsltod", eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
22511 cCE("fuhtod", ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
22512 cCE("fultod", ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
22513 cCE("ftoshd", ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
22514 cCE("ftosld", ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
22515 cCE("ftouhd", ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
22516 cCE("ftould", ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
22519 #define ARM_VARIANT & fpu_vfp_ext_fma
22520 #undef THUMB_VARIANT
22521 #define THUMB_VARIANT & fpu_vfp_ext_fma
22522 /* Mnemonics shared by Neon and VFP. These are included in the
22523 VFP FMA variant; NEON and VFP FMA always includes the NEON
22524 FMA instructions. */
22525 nCEF(vfma
, _vfma
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
22526 nCEF(vfms
, _vfms
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
22527 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
22528 the v form should always be used. */
22529 cCE("ffmas", ea00a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22530 cCE("ffnmas", ea00a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22531 cCE("ffmad", ea00b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22532 cCE("ffnmad", ea00b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22533 nCE(vfnma
, _vfnma
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
22534 nCE(vfnms
, _vfnms
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
22536 #undef THUMB_VARIANT
22538 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
22540 cCE("mia", e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22541 cCE("miaph", e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22542 cCE("miabb", e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22543 cCE("miabt", e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22544 cCE("miatb", e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22545 cCE("miatt", e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22546 cCE("mar", c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
22547 cCE("mra", c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
22550 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
22552 cCE("tandcb", e13f130
, 1, (RR
), iwmmxt_tandorc
),
22553 cCE("tandch", e53f130
, 1, (RR
), iwmmxt_tandorc
),
22554 cCE("tandcw", e93f130
, 1, (RR
), iwmmxt_tandorc
),
22555 cCE("tbcstb", e400010
, 2, (RIWR
, RR
), rn_rd
),
22556 cCE("tbcsth", e400050
, 2, (RIWR
, RR
), rn_rd
),
22557 cCE("tbcstw", e400090
, 2, (RIWR
, RR
), rn_rd
),
22558 cCE("textrcb", e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
22559 cCE("textrch", e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
22560 cCE("textrcw", e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
22561 cCE("textrmub",e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22562 cCE("textrmuh",e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22563 cCE("textrmuw",e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22564 cCE("textrmsb",e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22565 cCE("textrmsh",e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22566 cCE("textrmsw",e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22567 cCE("tinsrb", e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
22568 cCE("tinsrh", e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
22569 cCE("tinsrw", e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
22570 cCE("tmcr", e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
22571 cCE("tmcrr", c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
22572 cCE("tmia", e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22573 cCE("tmiaph", e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22574 cCE("tmiabb", e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22575 cCE("tmiabt", e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22576 cCE("tmiatb", e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22577 cCE("tmiatt", e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22578 cCE("tmovmskb",e100030
, 2, (RR
, RIWR
), rd_rn
),
22579 cCE("tmovmskh",e500030
, 2, (RR
, RIWR
), rd_rn
),
22580 cCE("tmovmskw",e900030
, 2, (RR
, RIWR
), rd_rn
),
22581 cCE("tmrc", e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
22582 cCE("tmrrc", c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
22583 cCE("torcb", e13f150
, 1, (RR
), iwmmxt_tandorc
),
22584 cCE("torch", e53f150
, 1, (RR
), iwmmxt_tandorc
),
22585 cCE("torcw", e93f150
, 1, (RR
), iwmmxt_tandorc
),
22586 cCE("waccb", e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
22587 cCE("wacch", e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
22588 cCE("waccw", e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
22589 cCE("waddbss", e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22590 cCE("waddb", e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22591 cCE("waddbus", e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22592 cCE("waddhss", e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22593 cCE("waddh", e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22594 cCE("waddhus", e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22595 cCE("waddwss", eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22596 cCE("waddw", e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22597 cCE("waddwus", e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22598 cCE("waligni", e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
22599 cCE("walignr0",e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22600 cCE("walignr1",e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22601 cCE("walignr2",ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22602 cCE("walignr3",eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22603 cCE("wand", e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22604 cCE("wandn", e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22605 cCE("wavg2b", e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22606 cCE("wavg2br", e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22607 cCE("wavg2h", ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22608 cCE("wavg2hr", ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22609 cCE("wcmpeqb", e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22610 cCE("wcmpeqh", e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22611 cCE("wcmpeqw", e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22612 cCE("wcmpgtub",e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22613 cCE("wcmpgtuh",e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22614 cCE("wcmpgtuw",e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22615 cCE("wcmpgtsb",e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22616 cCE("wcmpgtsh",e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22617 cCE("wcmpgtsw",eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22618 cCE("wldrb", c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
22619 cCE("wldrh", c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
22620 cCE("wldrw", c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
22621 cCE("wldrd", c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
22622 cCE("wmacs", e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22623 cCE("wmacsz", e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22624 cCE("wmacu", e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22625 cCE("wmacuz", e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22626 cCE("wmadds", ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22627 cCE("wmaddu", e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22628 cCE("wmaxsb", e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22629 cCE("wmaxsh", e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22630 cCE("wmaxsw", ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22631 cCE("wmaxub", e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22632 cCE("wmaxuh", e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22633 cCE("wmaxuw", e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22634 cCE("wminsb", e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22635 cCE("wminsh", e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22636 cCE("wminsw", eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22637 cCE("wminub", e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22638 cCE("wminuh", e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22639 cCE("wminuw", e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22640 cCE("wmov", e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
22641 cCE("wmulsm", e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22642 cCE("wmulsl", e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22643 cCE("wmulum", e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22644 cCE("wmulul", e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22645 cCE("wor", e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22646 cCE("wpackhss",e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22647 cCE("wpackhus",e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22648 cCE("wpackwss",eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22649 cCE("wpackwus",e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22650 cCE("wpackdss",ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22651 cCE("wpackdus",ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22652 cCE("wrorh", e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22653 cCE("wrorhg", e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22654 cCE("wrorw", eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22655 cCE("wrorwg", eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22656 cCE("wrord", ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22657 cCE("wrordg", ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22658 cCE("wsadb", e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22659 cCE("wsadbz", e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22660 cCE("wsadh", e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22661 cCE("wsadhz", e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22662 cCE("wshufh", e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
22663 cCE("wsllh", e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22664 cCE("wsllhg", e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22665 cCE("wsllw", e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22666 cCE("wsllwg", e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22667 cCE("wslld", ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22668 cCE("wslldg", ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22669 cCE("wsrah", e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22670 cCE("wsrahg", e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22671 cCE("wsraw", e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22672 cCE("wsrawg", e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22673 cCE("wsrad", ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22674 cCE("wsradg", ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22675 cCE("wsrlh", e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22676 cCE("wsrlhg", e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22677 cCE("wsrlw", ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22678 cCE("wsrlwg", ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22679 cCE("wsrld", ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22680 cCE("wsrldg", ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22681 cCE("wstrb", c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
22682 cCE("wstrh", c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
22683 cCE("wstrw", c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
22684 cCE("wstrd", c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
22685 cCE("wsubbss", e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22686 cCE("wsubb", e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22687 cCE("wsubbus", e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22688 cCE("wsubhss", e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22689 cCE("wsubh", e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22690 cCE("wsubhus", e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22691 cCE("wsubwss", eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22692 cCE("wsubw", e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22693 cCE("wsubwus", e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22694 cCE("wunpckehub",e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
22695 cCE("wunpckehuh",e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
22696 cCE("wunpckehuw",e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
22697 cCE("wunpckehsb",e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
22698 cCE("wunpckehsh",e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
22699 cCE("wunpckehsw",ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
22700 cCE("wunpckihb", e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22701 cCE("wunpckihh", e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22702 cCE("wunpckihw", e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22703 cCE("wunpckelub",e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
22704 cCE("wunpckeluh",e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
22705 cCE("wunpckeluw",e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
22706 cCE("wunpckelsb",e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
22707 cCE("wunpckelsh",e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
22708 cCE("wunpckelsw",ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
22709 cCE("wunpckilb", e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22710 cCE("wunpckilh", e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22711 cCE("wunpckilw", e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22712 cCE("wxor", e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22713 cCE("wzero", e300000
, 1, (RIWR
), iwmmxt_wzero
),
22716 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
22718 cCE("torvscb", e12f190
, 1, (RR
), iwmmxt_tandorc
),
22719 cCE("torvsch", e52f190
, 1, (RR
), iwmmxt_tandorc
),
22720 cCE("torvscw", e92f190
, 1, (RR
), iwmmxt_tandorc
),
22721 cCE("wabsb", e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
22722 cCE("wabsh", e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
22723 cCE("wabsw", ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
22724 cCE("wabsdiffb", e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22725 cCE("wabsdiffh", e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22726 cCE("wabsdiffw", e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22727 cCE("waddbhusl", e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22728 cCE("waddbhusm", e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22729 cCE("waddhc", e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22730 cCE("waddwc", ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22731 cCE("waddsubhx", ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22732 cCE("wavg4", e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22733 cCE("wavg4r", e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22734 cCE("wmaddsn", ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22735 cCE("wmaddsx", eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22736 cCE("wmaddun", ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22737 cCE("wmaddux", e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22738 cCE("wmerge", e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
22739 cCE("wmiabb", e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22740 cCE("wmiabt", e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22741 cCE("wmiatb", e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22742 cCE("wmiatt", e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22743 cCE("wmiabbn", e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22744 cCE("wmiabtn", e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22745 cCE("wmiatbn", e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22746 cCE("wmiattn", e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22747 cCE("wmiawbb", e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22748 cCE("wmiawbt", e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22749 cCE("wmiawtb", ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22750 cCE("wmiawtt", eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22751 cCE("wmiawbbn", ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22752 cCE("wmiawbtn", ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22753 cCE("wmiawtbn", ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22754 cCE("wmiawttn", ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22755 cCE("wmulsmr", ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22756 cCE("wmulumr", ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22757 cCE("wmulwumr", ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22758 cCE("wmulwsmr", ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22759 cCE("wmulwum", ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22760 cCE("wmulwsm", ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22761 cCE("wmulwl", eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22762 cCE("wqmiabb", e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22763 cCE("wqmiabt", e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22764 cCE("wqmiatb", ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22765 cCE("wqmiatt", eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22766 cCE("wqmiabbn", ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22767 cCE("wqmiabtn", ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22768 cCE("wqmiatbn", ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22769 cCE("wqmiattn", ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22770 cCE("wqmulm", e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22771 cCE("wqmulmr", e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22772 cCE("wqmulwm", ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22773 cCE("wqmulwmr", ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22774 cCE("wsubaddhx", ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22777 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
22779 cCE("cfldrs", c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
22780 cCE("cfldrd", c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
22781 cCE("cfldr32", c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
22782 cCE("cfldr64", c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
22783 cCE("cfstrs", c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
22784 cCE("cfstrd", c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
22785 cCE("cfstr32", c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
22786 cCE("cfstr64", c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
22787 cCE("cfmvsr", e000450
, 2, (RMF
, RR
), rn_rd
),
22788 cCE("cfmvrs", e100450
, 2, (RR
, RMF
), rd_rn
),
22789 cCE("cfmvdlr", e000410
, 2, (RMD
, RR
), rn_rd
),
22790 cCE("cfmvrdl", e100410
, 2, (RR
, RMD
), rd_rn
),
22791 cCE("cfmvdhr", e000430
, 2, (RMD
, RR
), rn_rd
),
22792 cCE("cfmvrdh", e100430
, 2, (RR
, RMD
), rd_rn
),
22793 cCE("cfmv64lr",e000510
, 2, (RMDX
, RR
), rn_rd
),
22794 cCE("cfmvr64l",e100510
, 2, (RR
, RMDX
), rd_rn
),
22795 cCE("cfmv64hr",e000530
, 2, (RMDX
, RR
), rn_rd
),
22796 cCE("cfmvr64h",e100530
, 2, (RR
, RMDX
), rd_rn
),
22797 cCE("cfmval32",e200440
, 2, (RMAX
, RMFX
), rd_rn
),
22798 cCE("cfmv32al",e100440
, 2, (RMFX
, RMAX
), rd_rn
),
22799 cCE("cfmvam32",e200460
, 2, (RMAX
, RMFX
), rd_rn
),
22800 cCE("cfmv32am",e100460
, 2, (RMFX
, RMAX
), rd_rn
),
22801 cCE("cfmvah32",e200480
, 2, (RMAX
, RMFX
), rd_rn
),
22802 cCE("cfmv32ah",e100480
, 2, (RMFX
, RMAX
), rd_rn
),
22803 cCE("cfmva32", e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
22804 cCE("cfmv32a", e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
22805 cCE("cfmva64", e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
22806 cCE("cfmv64a", e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
22807 cCE("cfmvsc32",e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
22808 cCE("cfmv32sc",e1004e0
, 2, (RMDX
, RMDS
), rd
),
22809 cCE("cfcpys", e000400
, 2, (RMF
, RMF
), rd_rn
),
22810 cCE("cfcpyd", e000420
, 2, (RMD
, RMD
), rd_rn
),
22811 cCE("cfcvtsd", e000460
, 2, (RMD
, RMF
), rd_rn
),
22812 cCE("cfcvtds", e000440
, 2, (RMF
, RMD
), rd_rn
),
22813 cCE("cfcvt32s",e000480
, 2, (RMF
, RMFX
), rd_rn
),
22814 cCE("cfcvt32d",e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
22815 cCE("cfcvt64s",e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
22816 cCE("cfcvt64d",e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
22817 cCE("cfcvts32",e100580
, 2, (RMFX
, RMF
), rd_rn
),
22818 cCE("cfcvtd32",e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
22819 cCE("cftruncs32",e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
22820 cCE("cftruncd32",e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
22821 cCE("cfrshl32",e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
22822 cCE("cfrshl64",e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
22823 cCE("cfsh32", e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
22824 cCE("cfsh64", e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
22825 cCE("cfcmps", e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
22826 cCE("cfcmpd", e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
22827 cCE("cfcmp32", e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
22828 cCE("cfcmp64", e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
22829 cCE("cfabss", e300400
, 2, (RMF
, RMF
), rd_rn
),
22830 cCE("cfabsd", e300420
, 2, (RMD
, RMD
), rd_rn
),
22831 cCE("cfnegs", e300440
, 2, (RMF
, RMF
), rd_rn
),
22832 cCE("cfnegd", e300460
, 2, (RMD
, RMD
), rd_rn
),
22833 cCE("cfadds", e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
22834 cCE("cfaddd", e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
22835 cCE("cfsubs", e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
22836 cCE("cfsubd", e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
22837 cCE("cfmuls", e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
22838 cCE("cfmuld", e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
22839 cCE("cfabs32", e300500
, 2, (RMFX
, RMFX
), rd_rn
),
22840 cCE("cfabs64", e300520
, 2, (RMDX
, RMDX
), rd_rn
),
22841 cCE("cfneg32", e300540
, 2, (RMFX
, RMFX
), rd_rn
),
22842 cCE("cfneg64", e300560
, 2, (RMDX
, RMDX
), rd_rn
),
22843 cCE("cfadd32", e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22844 cCE("cfadd64", e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
22845 cCE("cfsub32", e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22846 cCE("cfsub64", e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
22847 cCE("cfmul32", e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22848 cCE("cfmul64", e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
22849 cCE("cfmac32", e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22850 cCE("cfmsc32", e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22851 cCE("cfmadd32",e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
22852 cCE("cfmsub32",e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
22853 cCE("cfmadda32", e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
22854 cCE("cfmsuba32", e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
22856 /* ARMv8.5-A instructions. */
22858 #define ARM_VARIANT & arm_ext_sb
22859 #undef THUMB_VARIANT
22860 #define THUMB_VARIANT & arm_ext_sb
22861 TUF("sb", 57ff070
, f3bf8f70
, 0, (), noargs
, noargs
),
22864 #define ARM_VARIANT & arm_ext_predres
22865 #undef THUMB_VARIANT
22866 #define THUMB_VARIANT & arm_ext_predres
22867 CE("cfprctx", e070f93
, 1, (RRnpc
), rd
),
22868 CE("dvprctx", e070fb3
, 1, (RRnpc
), rd
),
22869 CE("cpprctx", e070ff3
, 1, (RRnpc
), rd
),
22871 /* ARMv8-M instructions. */
22873 #define ARM_VARIANT NULL
22874 #undef THUMB_VARIANT
22875 #define THUMB_VARIANT & arm_ext_v8m
22876 ToU("sg", e97fe97f
, 0, (), noargs
),
22877 ToC("blxns", 4784, 1, (RRnpc
), t_blx
),
22878 ToC("bxns", 4704, 1, (RRnpc
), t_bx
),
22879 ToC("tt", e840f000
, 2, (RRnpc
, RRnpc
), tt
),
22880 ToC("ttt", e840f040
, 2, (RRnpc
, RRnpc
), tt
),
22881 ToC("tta", e840f080
, 2, (RRnpc
, RRnpc
), tt
),
22882 ToC("ttat", e840f0c0
, 2, (RRnpc
, RRnpc
), tt
),
22884 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
22885 instructions behave as nop if no VFP is present. */
22886 #undef THUMB_VARIANT
22887 #define THUMB_VARIANT & arm_ext_v8m_main
22888 ToC("vlldm", ec300a00
, 1, (RRnpc
), rn
),
22889 ToC("vlstm", ec200a00
, 1, (RRnpc
), rn
),
22891 /* Armv8.1-M Mainline instructions. */
22892 #undef THUMB_VARIANT
22893 #define THUMB_VARIANT & arm_ext_v8_1m_main
22894 toC("bf", _bf
, 2, (EXPs
, EXPs
), t_branch_future
),
22895 toU("bfcsel", _bfcsel
, 4, (EXPs
, EXPs
, EXPs
, COND
), t_branch_future
),
22896 toC("bfx", _bfx
, 2, (EXPs
, RRnpcsp
), t_branch_future
),
22897 toC("bfl", _bfl
, 2, (EXPs
, EXPs
), t_branch_future
),
22898 toC("bflx", _bflx
, 2, (EXPs
, RRnpcsp
), t_branch_future
),
22900 toU("dls", _dls
, 2, (LR
, RRnpcsp
), t_loloop
),
22901 toU("wls", _wls
, 3, (LR
, RRnpcsp
, EXP
), t_loloop
),
22902 toU("le", _le
, 2, (oLR
, EXP
), t_loloop
),
22904 ToC("clrm", e89f0000
, 1, (CLRMLST
), t_clrm
),
22905 ToC("vscclrm", ec9f0a00
, 1, (VRSDVLST
), t_vscclrm
),
22907 #undef THUMB_VARIANT
22908 #define THUMB_VARIANT & mve_ext
22909 ToC("vpst", fe710f4d
, 0, (), mve_vpt
),
22910 ToC("vpstt", fe318f4d
, 0, (), mve_vpt
),
22911 ToC("vpste", fe718f4d
, 0, (), mve_vpt
),
22912 ToC("vpsttt", fe314f4d
, 0, (), mve_vpt
),
22913 ToC("vpstte", fe31cf4d
, 0, (), mve_vpt
),
22914 ToC("vpstet", fe71cf4d
, 0, (), mve_vpt
),
22915 ToC("vpstee", fe714f4d
, 0, (), mve_vpt
),
22916 ToC("vpstttt", fe312f4d
, 0, (), mve_vpt
),
22917 ToC("vpsttte", fe316f4d
, 0, (), mve_vpt
),
22918 ToC("vpsttet", fe31ef4d
, 0, (), mve_vpt
),
22919 ToC("vpsttee", fe31af4d
, 0, (), mve_vpt
),
22920 ToC("vpstett", fe71af4d
, 0, (), mve_vpt
),
22921 ToC("vpstete", fe71ef4d
, 0, (), mve_vpt
),
22922 ToC("vpsteet", fe716f4d
, 0, (), mve_vpt
),
22923 ToC("vpsteee", fe712f4d
, 0, (), mve_vpt
),
22925 /* MVE and MVE FP only. */
22926 mCEF(vmullb
, _vmullb
, 3, (RMQ
, RMQ
, RMQ
), mve_vmull
),
22927 mCEF(vabav
, _vabav
, 3, (RRnpcsp
, RMQ
, RMQ
), mve_vabav
),
22928 mCEF(vmladav
, _vmladav
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22929 mCEF(vmladava
, _vmladava
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22930 mCEF(vmladavx
, _vmladavx
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22931 mCEF(vmladavax
, _vmladavax
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22932 mCEF(vmlav
, _vmladav
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22933 mCEF(vmlava
, _vmladava
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22934 mCEF(vmlsdav
, _vmlsdav
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22935 mCEF(vmlsdava
, _vmlsdava
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22936 mCEF(vmlsdavx
, _vmlsdavx
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22937 mCEF(vmlsdavax
, _vmlsdavax
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22939 mCEF(vst20
, _vst20
, 2, (MSTRLST2
, ADDRMVE
), mve_vst_vld
),
22940 mCEF(vst21
, _vst21
, 2, (MSTRLST2
, ADDRMVE
), mve_vst_vld
),
22941 mCEF(vst40
, _vst40
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
22942 mCEF(vst41
, _vst41
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
22943 mCEF(vst42
, _vst42
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
22944 mCEF(vst43
, _vst43
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
22945 mCEF(vld20
, _vld20
, 2, (MSTRLST2
, ADDRMVE
), mve_vst_vld
),
22946 mCEF(vld21
, _vld21
, 2, (MSTRLST2
, ADDRMVE
), mve_vst_vld
),
22947 mCEF(vld40
, _vld40
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
22948 mCEF(vld41
, _vld41
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
22949 mCEF(vld42
, _vld42
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
22950 mCEF(vld43
, _vld43
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
22953 #define ARM_VARIANT & fpu_vfp_ext_v1xd
22954 #undef THUMB_VARIANT
22955 #define THUMB_VARIANT & arm_ext_v6t2
22957 mCEF(vmullt
, _vmullt
, 3, (RNSDQMQ
, oRNSDQMQ
, RNSDQ_RNSC_MQ
), mve_vmull
),
22958 mnCEF(vadd
, _vadd
, 3, (RNSDQMQ
, oRNSDQMQ
, RNSDQMQR
), neon_addsub_if_i
),
22959 mnCEF(vsub
, _vsub
, 3, (RNSDQMQ
, oRNSDQMQ
, RNSDQMQR
), neon_addsub_if_i
),
22961 MNCEF(vabs
, 1b10300
, 2, (RNSDQMQ
, RNSDQMQ
), neon_abs_neg
),
22962 MNCEF(vneg
, 1b10380
, 2, (RNSDQMQ
, RNSDQMQ
), neon_abs_neg
),
22965 #define ARM_VARIANT & fpu_neon_ext_v1
22966 mnUF(vabd
, _vabd
, 3, (RNDQMQ
, oRNDQMQ
, RNDQMQ
), neon_dyadic_if_su
),
22967 mnUF(vabdl
, _vabdl
, 3, (RNQMQ
, RNDMQ
, RNDMQ
), neon_dyadic_long
),
22968 mnUF(vaddl
, _vaddl
, 3, (RNQMQ
, RNDMQ
, RNDMQR
), neon_dyadic_long
),
22969 mnUF(vsubl
, _vsubl
, 3, (RNQMQ
, RNDMQ
, RNDMQR
), neon_dyadic_long
),
22972 #undef THUMB_VARIANT
23004 /* MD interface: bits in the object file. */
23006 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
23007 for use in the a.out file, and stores them in the array pointed to by buf.
23008 This knows about the endian-ness of the target machine and does
23009 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
23010 2 (short) and 4 (long) Floating numbers are put out as a series of
23011 LITTLENUMS (shorts, here at least). */
23014 md_number_to_chars (char * buf
, valueT val
, int n
)
23016 if (target_big_endian
)
23017 number_to_chars_bigendian (buf
, val
, n
);
23019 number_to_chars_littleendian (buf
, val
, n
);
23023 md_chars_to_number (char * buf
, int n
)
23026 unsigned char * where
= (unsigned char *) buf
;
23028 if (target_big_endian
)
23033 result
|= (*where
++ & 255);
23041 result
|= (where
[n
] & 255);
23048 /* MD interface: Sections. */
23050 /* Calculate the maximum variable size (i.e., excluding fr_fix)
23051 that an rs_machine_dependent frag may reach. */
23054 arm_frag_max_var (fragS
*fragp
)
23056 /* We only use rs_machine_dependent for variable-size Thumb instructions,
23057 which are either THUMB_SIZE (2) or INSN_SIZE (4).
23059 Note that we generate relaxable instructions even for cases that don't
23060 really need it, like an immediate that's a trivial constant. So we're
23061 overestimating the instruction size for some of those cases. Rather
23062 than putting more intelligence here, it would probably be better to
23063 avoid generating a relaxation frag in the first place when it can be
23064 determined up front that a short instruction will suffice. */
23066 gas_assert (fragp
->fr_type
== rs_machine_dependent
);
23070 /* Estimate the size of a frag before relaxing. Assume everything fits in
23074 md_estimate_size_before_relax (fragS
* fragp
,
23075 segT segtype ATTRIBUTE_UNUSED
)
23081 /* Convert a machine dependent frag. */
23084 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
23086 unsigned long insn
;
23087 unsigned long old_op
;
23095 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
23097 old_op
= bfd_get_16(abfd
, buf
);
23098 if (fragp
->fr_symbol
)
23100 exp
.X_op
= O_symbol
;
23101 exp
.X_add_symbol
= fragp
->fr_symbol
;
23105 exp
.X_op
= O_constant
;
23107 exp
.X_add_number
= fragp
->fr_offset
;
23108 opcode
= fragp
->fr_subtype
;
23111 case T_MNEM_ldr_pc
:
23112 case T_MNEM_ldr_pc2
:
23113 case T_MNEM_ldr_sp
:
23114 case T_MNEM_str_sp
:
23121 if (fragp
->fr_var
== 4)
23123 insn
= THUMB_OP32 (opcode
);
23124 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
23126 insn
|= (old_op
& 0x700) << 4;
23130 insn
|= (old_op
& 7) << 12;
23131 insn
|= (old_op
& 0x38) << 13;
23133 insn
|= 0x00000c00;
23134 put_thumb32_insn (buf
, insn
);
23135 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
23139 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
23141 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
23144 if (fragp
->fr_var
== 4)
23146 insn
= THUMB_OP32 (opcode
);
23147 insn
|= (old_op
& 0xf0) << 4;
23148 put_thumb32_insn (buf
, insn
);
23149 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
23153 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
23154 exp
.X_add_number
-= 4;
23162 if (fragp
->fr_var
== 4)
23164 int r0off
= (opcode
== T_MNEM_mov
23165 || opcode
== T_MNEM_movs
) ? 0 : 8;
23166 insn
= THUMB_OP32 (opcode
);
23167 insn
= (insn
& 0xe1ffffff) | 0x10000000;
23168 insn
|= (old_op
& 0x700) << r0off
;
23169 put_thumb32_insn (buf
, insn
);
23170 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
23174 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
23179 if (fragp
->fr_var
== 4)
23181 insn
= THUMB_OP32(opcode
);
23182 put_thumb32_insn (buf
, insn
);
23183 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
23186 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
23190 if (fragp
->fr_var
== 4)
23192 insn
= THUMB_OP32(opcode
);
23193 insn
|= (old_op
& 0xf00) << 14;
23194 put_thumb32_insn (buf
, insn
);
23195 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
23198 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
23201 case T_MNEM_add_sp
:
23202 case T_MNEM_add_pc
:
23203 case T_MNEM_inc_sp
:
23204 case T_MNEM_dec_sp
:
23205 if (fragp
->fr_var
== 4)
23207 /* ??? Choose between add and addw. */
23208 insn
= THUMB_OP32 (opcode
);
23209 insn
|= (old_op
& 0xf0) << 4;
23210 put_thumb32_insn (buf
, insn
);
23211 if (opcode
== T_MNEM_add_pc
)
23212 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
23214 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
23217 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
23225 if (fragp
->fr_var
== 4)
23227 insn
= THUMB_OP32 (opcode
);
23228 insn
|= (old_op
& 0xf0) << 4;
23229 insn
|= (old_op
& 0xf) << 16;
23230 put_thumb32_insn (buf
, insn
);
23231 if (insn
& (1 << 20))
23232 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
23234 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
23237 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
23243 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
23244 (enum bfd_reloc_code_real
) reloc_type
);
23245 fixp
->fx_file
= fragp
->fr_file
;
23246 fixp
->fx_line
= fragp
->fr_line
;
23247 fragp
->fr_fix
+= fragp
->fr_var
;
23249 /* Set whether we use thumb-2 ISA based on final relaxation results. */
23250 if (thumb_mode
&& fragp
->fr_var
== 4 && no_cpu_selected ()
23251 && !ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
))
23252 ARM_MERGE_FEATURE_SETS (arm_arch_used
, thumb_arch_used
, arm_ext_v6t2
);
23255 /* Return the size of a relaxable immediate operand instruction.
23256 SHIFT and SIZE specify the form of the allowable immediate. */
23258 relax_immediate (fragS
*fragp
, int size
, int shift
)
23264 /* ??? Should be able to do better than this. */
23265 if (fragp
->fr_symbol
)
23268 low
= (1 << shift
) - 1;
23269 mask
= (1 << (shift
+ size
)) - (1 << shift
);
23270 offset
= fragp
->fr_offset
;
23271 /* Force misaligned offsets to 32-bit variant. */
23274 if (offset
& ~mask
)
23279 /* Get the address of a symbol during relaxation. */
23281 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
23287 sym
= fragp
->fr_symbol
;
23288 sym_frag
= symbol_get_frag (sym
);
23289 know (S_GET_SEGMENT (sym
) != absolute_section
23290 || sym_frag
== &zero_address_frag
);
23291 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
23293 /* If frag has yet to be reached on this pass, assume it will
23294 move by STRETCH just as we did. If this is not so, it will
23295 be because some frag between grows, and that will force
23299 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
23303 /* Adjust stretch for any alignment frag. Note that if have
23304 been expanding the earlier code, the symbol may be
23305 defined in what appears to be an earlier frag. FIXME:
23306 This doesn't handle the fr_subtype field, which specifies
23307 a maximum number of bytes to skip when doing an
23309 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
23311 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
23314 stretch
= - ((- stretch
)
23315 & ~ ((1 << (int) f
->fr_offset
) - 1));
23317 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
23329 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
23332 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
23337 /* Assume worst case for symbols not known to be in the same section. */
23338 if (fragp
->fr_symbol
== NULL
23339 || !S_IS_DEFINED (fragp
->fr_symbol
)
23340 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
23341 || S_IS_WEAK (fragp
->fr_symbol
))
23344 val
= relaxed_symbol_addr (fragp
, stretch
);
23345 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
23346 addr
= (addr
+ 4) & ~3;
23347 /* Force misaligned targets to 32-bit variant. */
23351 if (val
< 0 || val
> 1020)
23356 /* Return the size of a relaxable add/sub immediate instruction. */
23358 relax_addsub (fragS
*fragp
, asection
*sec
)
23363 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
23364 op
= bfd_get_16(sec
->owner
, buf
);
23365 if ((op
& 0xf) == ((op
>> 4) & 0xf))
23366 return relax_immediate (fragp
, 8, 0);
23368 return relax_immediate (fragp
, 3, 0);
23371 /* Return TRUE iff the definition of symbol S could be pre-empted
23372 (overridden) at link or load time. */
23374 symbol_preemptible (symbolS
*s
)
23376 /* Weak symbols can always be pre-empted. */
23380 /* Non-global symbols cannot be pre-empted. */
23381 if (! S_IS_EXTERNAL (s
))
23385 /* In ELF, a global symbol can be marked protected, or private. In that
23386 case it can't be pre-empted (other definitions in the same link unit
23387 would violate the ODR). */
23388 if (ELF_ST_VISIBILITY (S_GET_OTHER (s
)) > STV_DEFAULT
)
23392 /* Other global symbols might be pre-empted. */
23396 /* Return the size of a relaxable branch instruction. BITS is the
23397 size of the offset field in the narrow instruction. */
23400 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
23406 /* Assume worst case for symbols not known to be in the same section. */
23407 if (!S_IS_DEFINED (fragp
->fr_symbol
)
23408 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
23409 || S_IS_WEAK (fragp
->fr_symbol
))
23413 /* A branch to a function in ARM state will require interworking. */
23414 if (S_IS_DEFINED (fragp
->fr_symbol
)
23415 && ARM_IS_FUNC (fragp
->fr_symbol
))
23419 if (symbol_preemptible (fragp
->fr_symbol
))
23422 val
= relaxed_symbol_addr (fragp
, stretch
);
23423 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
23426 /* Offset is a signed value *2 */
23428 if (val
>= limit
|| val
< -limit
)
23434 /* Relax a machine dependent frag. This returns the amount by which
23435 the current size of the frag should change. */
23438 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
23443 oldsize
= fragp
->fr_var
;
23444 switch (fragp
->fr_subtype
)
23446 case T_MNEM_ldr_pc2
:
23447 newsize
= relax_adr (fragp
, sec
, stretch
);
23449 case T_MNEM_ldr_pc
:
23450 case T_MNEM_ldr_sp
:
23451 case T_MNEM_str_sp
:
23452 newsize
= relax_immediate (fragp
, 8, 2);
23456 newsize
= relax_immediate (fragp
, 5, 2);
23460 newsize
= relax_immediate (fragp
, 5, 1);
23464 newsize
= relax_immediate (fragp
, 5, 0);
23467 newsize
= relax_adr (fragp
, sec
, stretch
);
23473 newsize
= relax_immediate (fragp
, 8, 0);
23476 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
23479 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
23481 case T_MNEM_add_sp
:
23482 case T_MNEM_add_pc
:
23483 newsize
= relax_immediate (fragp
, 8, 2);
23485 case T_MNEM_inc_sp
:
23486 case T_MNEM_dec_sp
:
23487 newsize
= relax_immediate (fragp
, 7, 2);
23493 newsize
= relax_addsub (fragp
, sec
);
23499 fragp
->fr_var
= newsize
;
23500 /* Freeze wide instructions that are at or before the same location as
23501 in the previous pass. This avoids infinite loops.
23502 Don't freeze them unconditionally because targets may be artificially
23503 misaligned by the expansion of preceding frags. */
23504 if (stretch
<= 0 && newsize
> 2)
23506 md_convert_frag (sec
->owner
, sec
, fragp
);
23510 return newsize
- oldsize
;
23513 /* Round up a section size to the appropriate boundary. */
23516 md_section_align (segT segment ATTRIBUTE_UNUSED
,
23522 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
23523 of an rs_align_code fragment. */
23526 arm_handle_align (fragS
* fragP
)
23528 static unsigned char const arm_noop
[2][2][4] =
23531 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
23532 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
23535 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
23536 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
23539 static unsigned char const thumb_noop
[2][2][2] =
23542 {0xc0, 0x46}, /* LE */
23543 {0x46, 0xc0}, /* BE */
23546 {0x00, 0xbf}, /* LE */
23547 {0xbf, 0x00} /* BE */
23550 static unsigned char const wide_thumb_noop
[2][4] =
23551 { /* Wide Thumb-2 */
23552 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
23553 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
23556 unsigned bytes
, fix
, noop_size
;
23558 const unsigned char * noop
;
23559 const unsigned char *narrow_noop
= NULL
;
23564 if (fragP
->fr_type
!= rs_align_code
)
23567 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
23568 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
23571 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
23572 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
23574 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
23576 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
23578 if (ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
23579 ? selected_cpu
: arm_arch_none
, arm_ext_v6t2
))
23581 narrow_noop
= thumb_noop
[1][target_big_endian
];
23582 noop
= wide_thumb_noop
[target_big_endian
];
23585 noop
= thumb_noop
[0][target_big_endian
];
23593 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
23594 ? selected_cpu
: arm_arch_none
,
23596 [target_big_endian
];
23603 fragP
->fr_var
= noop_size
;
23605 if (bytes
& (noop_size
- 1))
23607 fix
= bytes
& (noop_size
- 1);
23609 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
23611 memset (p
, 0, fix
);
23618 if (bytes
& noop_size
)
23620 /* Insert a narrow noop. */
23621 memcpy (p
, narrow_noop
, noop_size
);
23623 bytes
-= noop_size
;
23627 /* Use wide noops for the remainder */
23631 while (bytes
>= noop_size
)
23633 memcpy (p
, noop
, noop_size
);
23635 bytes
-= noop_size
;
23639 fragP
->fr_fix
+= fix
;
23642 /* Called from md_do_align. Used to create an alignment
23643 frag in a code section. */
23646 arm_frag_align_code (int n
, int max
)
23650 /* We assume that there will never be a requirement
23651 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
23652 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
23657 _("alignments greater than %d bytes not supported in .text sections."),
23658 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
23659 as_fatal ("%s", err_msg
);
23662 p
= frag_var (rs_align_code
,
23663 MAX_MEM_FOR_RS_ALIGN_CODE
,
23665 (relax_substateT
) max
,
23672 /* Perform target specific initialisation of a frag.
23673 Note - despite the name this initialisation is not done when the frag
23674 is created, but only when its type is assigned. A frag can be created
23675 and used a long time before its type is set, so beware of assuming that
23676 this initialisation is performed first. */
23680 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
23682 /* Record whether this frag is in an ARM or a THUMB area. */
23683 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
23686 #else /* OBJ_ELF is defined. */
23688 arm_init_frag (fragS
* fragP
, int max_chars
)
23690 bfd_boolean frag_thumb_mode
;
23692 /* If the current ARM vs THUMB mode has not already
23693 been recorded into this frag then do so now. */
23694 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
23695 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
23697 /* PR 21809: Do not set a mapping state for debug sections
23698 - it just confuses other tools. */
23699 if (bfd_get_section_flags (NULL
, now_seg
) & SEC_DEBUGGING
)
23702 frag_thumb_mode
= fragP
->tc_frag_data
.thumb_mode
^ MODE_RECORDED
;
23704 /* Record a mapping symbol for alignment frags. We will delete this
23705 later if the alignment ends up empty. */
23706 switch (fragP
->fr_type
)
23709 case rs_align_test
:
23711 mapping_state_2 (MAP_DATA
, max_chars
);
23713 case rs_align_code
:
23714 mapping_state_2 (frag_thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
23721 /* When we change sections we need to issue a new mapping symbol. */
23724 arm_elf_change_section (void)
23726 /* Link an unlinked unwind index table section to the .text section. */
23727 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
23728 && elf_linked_to_section (now_seg
) == NULL
)
23729 elf_linked_to_section (now_seg
) = text_section
;
23733 arm_elf_section_type (const char * str
, size_t len
)
23735 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
23736 return SHT_ARM_EXIDX
;
23741 /* Code to deal with unwinding tables. */
23743 static void add_unwind_adjustsp (offsetT
);
23745 /* Generate any deferred unwind frame offset. */
23748 flush_pending_unwind (void)
23752 offset
= unwind
.pending_offset
;
23753 unwind
.pending_offset
= 0;
23755 add_unwind_adjustsp (offset
);
23758 /* Add an opcode to this list for this function. Two-byte opcodes should
23759 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
23763 add_unwind_opcode (valueT op
, int length
)
23765 /* Add any deferred stack adjustment. */
23766 if (unwind
.pending_offset
)
23767 flush_pending_unwind ();
23769 unwind
.sp_restored
= 0;
23771 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
23773 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
23774 if (unwind
.opcodes
)
23775 unwind
.opcodes
= XRESIZEVEC (unsigned char, unwind
.opcodes
,
23776 unwind
.opcode_alloc
);
23778 unwind
.opcodes
= XNEWVEC (unsigned char, unwind
.opcode_alloc
);
23783 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
23785 unwind
.opcode_count
++;
23789 /* Add unwind opcodes to adjust the stack pointer. */
23792 add_unwind_adjustsp (offsetT offset
)
23796 if (offset
> 0x200)
23798 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
23803 /* Long form: 0xb2, uleb128. */
23804 /* This might not fit in a word so add the individual bytes,
23805 remembering the list is built in reverse order. */
23806 o
= (valueT
) ((offset
- 0x204) >> 2);
23808 add_unwind_opcode (0, 1);
23810 /* Calculate the uleb128 encoding of the offset. */
23814 bytes
[n
] = o
& 0x7f;
23820 /* Add the insn. */
23822 add_unwind_opcode (bytes
[n
- 1], 1);
23823 add_unwind_opcode (0xb2, 1);
23825 else if (offset
> 0x100)
23827 /* Two short opcodes. */
23828 add_unwind_opcode (0x3f, 1);
23829 op
= (offset
- 0x104) >> 2;
23830 add_unwind_opcode (op
, 1);
23832 else if (offset
> 0)
23834 /* Short opcode. */
23835 op
= (offset
- 4) >> 2;
23836 add_unwind_opcode (op
, 1);
23838 else if (offset
< 0)
23841 while (offset
> 0x100)
23843 add_unwind_opcode (0x7f, 1);
23846 op
= ((offset
- 4) >> 2) | 0x40;
23847 add_unwind_opcode (op
, 1);
23851 /* Finish the list of unwind opcodes for this function. */
23854 finish_unwind_opcodes (void)
23858 if (unwind
.fp_used
)
23860 /* Adjust sp as necessary. */
23861 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
23862 flush_pending_unwind ();
23864 /* After restoring sp from the frame pointer. */
23865 op
= 0x90 | unwind
.fp_reg
;
23866 add_unwind_opcode (op
, 1);
23869 flush_pending_unwind ();
23873 /* Start an exception table entry. If idx is nonzero this is an index table
23877 start_unwind_section (const segT text_seg
, int idx
)
23879 const char * text_name
;
23880 const char * prefix
;
23881 const char * prefix_once
;
23882 const char * group_name
;
23890 prefix
= ELF_STRING_ARM_unwind
;
23891 prefix_once
= ELF_STRING_ARM_unwind_once
;
23892 type
= SHT_ARM_EXIDX
;
23896 prefix
= ELF_STRING_ARM_unwind_info
;
23897 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
23898 type
= SHT_PROGBITS
;
23901 text_name
= segment_name (text_seg
);
23902 if (streq (text_name
, ".text"))
23905 if (strncmp (text_name
, ".gnu.linkonce.t.",
23906 strlen (".gnu.linkonce.t.")) == 0)
23908 prefix
= prefix_once
;
23909 text_name
+= strlen (".gnu.linkonce.t.");
23912 sec_name
= concat (prefix
, text_name
, (char *) NULL
);
23918 /* Handle COMDAT group. */
23919 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
23921 group_name
= elf_group_name (text_seg
);
23922 if (group_name
== NULL
)
23924 as_bad (_("Group section `%s' has no group signature"),
23925 segment_name (text_seg
));
23926 ignore_rest_of_line ();
23929 flags
|= SHF_GROUP
;
23933 obj_elf_change_section (sec_name
, type
, 0, flags
, 0, group_name
,
23936 /* Set the section link for index tables. */
23938 elf_linked_to_section (now_seg
) = text_seg
;
23942 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
23943 personality routine data. Returns zero, or the index table value for
23944 an inline entry. */
23947 create_unwind_entry (int have_data
)
23952 /* The current word of data. */
23954 /* The number of bytes left in this word. */
23957 finish_unwind_opcodes ();
23959 /* Remember the current text section. */
23960 unwind
.saved_seg
= now_seg
;
23961 unwind
.saved_subseg
= now_subseg
;
23963 start_unwind_section (now_seg
, 0);
23965 if (unwind
.personality_routine
== NULL
)
23967 if (unwind
.personality_index
== -2)
23970 as_bad (_("handlerdata in cantunwind frame"));
23971 return 1; /* EXIDX_CANTUNWIND. */
23974 /* Use a default personality routine if none is specified. */
23975 if (unwind
.personality_index
== -1)
23977 if (unwind
.opcode_count
> 3)
23978 unwind
.personality_index
= 1;
23980 unwind
.personality_index
= 0;
23983 /* Space for the personality routine entry. */
23984 if (unwind
.personality_index
== 0)
23986 if (unwind
.opcode_count
> 3)
23987 as_bad (_("too many unwind opcodes for personality routine 0"));
23991 /* All the data is inline in the index table. */
23994 while (unwind
.opcode_count
> 0)
23996 unwind
.opcode_count
--;
23997 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
24001 /* Pad with "finish" opcodes. */
24003 data
= (data
<< 8) | 0xb0;
24010 /* We get two opcodes "free" in the first word. */
24011 size
= unwind
.opcode_count
- 2;
24015 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
24016 if (unwind
.personality_index
!= -1)
24018 as_bad (_("attempt to recreate an unwind entry"));
24022 /* An extra byte is required for the opcode count. */
24023 size
= unwind
.opcode_count
+ 1;
24026 size
= (size
+ 3) >> 2;
24028 as_bad (_("too many unwind opcodes"));
24030 frag_align (2, 0, 0);
24031 record_alignment (now_seg
, 2);
24032 unwind
.table_entry
= expr_build_dot ();
24034 /* Allocate the table entry. */
24035 ptr
= frag_more ((size
<< 2) + 4);
24036 /* PR 13449: Zero the table entries in case some of them are not used. */
24037 memset (ptr
, 0, (size
<< 2) + 4);
24038 where
= frag_now_fix () - ((size
<< 2) + 4);
24040 switch (unwind
.personality_index
)
24043 /* ??? Should this be a PLT generating relocation? */
24044 /* Custom personality routine. */
24045 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
24046 BFD_RELOC_ARM_PREL31
);
24051 /* Set the first byte to the number of additional words. */
24052 data
= size
> 0 ? size
- 1 : 0;
24056 /* ABI defined personality routines. */
24058 /* Three opcodes bytes are packed into the first word. */
24065 /* The size and first two opcode bytes go in the first word. */
24066 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
24071 /* Should never happen. */
24075 /* Pack the opcodes into words (MSB first), reversing the list at the same
24077 while (unwind
.opcode_count
> 0)
24081 md_number_to_chars (ptr
, data
, 4);
24086 unwind
.opcode_count
--;
24088 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
24091 /* Finish off the last word. */
24094 /* Pad with "finish" opcodes. */
24096 data
= (data
<< 8) | 0xb0;
24098 md_number_to_chars (ptr
, data
, 4);
24103 /* Add an empty descriptor if there is no user-specified data. */
24104 ptr
= frag_more (4);
24105 md_number_to_chars (ptr
, 0, 4);
24112 /* Initialize the DWARF-2 unwind information for this procedure. */
24115 tc_arm_frame_initial_instructions (void)
24117 cfi_add_CFA_def_cfa (REG_SP
, 0);
24119 #endif /* OBJ_ELF */
24121 /* Convert REGNAME to a DWARF-2 register number. */
24124 tc_arm_regname_to_dw2regnum (char *regname
)
24126 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
24130 /* PR 16694: Allow VFP registers as well. */
24131 reg
= arm_reg_parse (®name
, REG_TYPE_VFS
);
24135 reg
= arm_reg_parse (®name
, REG_TYPE_VFD
);
24144 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
24148 exp
.X_op
= O_secrel
;
24149 exp
.X_add_symbol
= symbol
;
24150 exp
.X_add_number
= 0;
24151 emit_expr (&exp
, size
);
24155 /* MD interface: Symbol and relocation handling. */
24157 /* Return the address within the segment that a PC-relative fixup is
24158 relative to. For ARM, PC-relative fixups applied to instructions
24159 are generally relative to the location of the fixup plus 8 bytes.
24160 Thumb branches are offset by 4, and Thumb loads relative to PC
24161 require special handling. */
24164 md_pcrel_from_section (fixS
* fixP
, segT seg
)
24166 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24168 /* If this is pc-relative and we are going to emit a relocation
24169 then we just want to put out any pipeline compensation that the linker
24170 will need. Otherwise we want to use the calculated base.
24171 For WinCE we skip the bias for externals as well, since this
24172 is how the MS ARM-CE assembler behaves and we want to be compatible. */
24174 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
24175 || (arm_force_relocation (fixP
)
24177 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
24183 switch (fixP
->fx_r_type
)
24185 /* PC relative addressing on the Thumb is slightly odd as the
24186 bottom two bits of the PC are forced to zero for the
24187 calculation. This happens *after* application of the
24188 pipeline offset. However, Thumb adrl already adjusts for
24189 this, so we need not do it again. */
24190 case BFD_RELOC_ARM_THUMB_ADD
:
24193 case BFD_RELOC_ARM_THUMB_OFFSET
:
24194 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
24195 case BFD_RELOC_ARM_T32_ADD_PC12
:
24196 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
24197 return (base
+ 4) & ~3;
24199 /* Thumb branches are simply offset by +4. */
24200 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
24201 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
24202 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
24203 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
24204 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
24205 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
24206 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
24207 case BFD_RELOC_ARM_THUMB_BF17
:
24208 case BFD_RELOC_ARM_THUMB_BF19
:
24209 case BFD_RELOC_ARM_THUMB_BF13
:
24210 case BFD_RELOC_ARM_THUMB_LOOP12
:
24213 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
24215 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24216 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24217 && ARM_IS_FUNC (fixP
->fx_addsy
)
24218 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24219 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24222 /* BLX is like branches above, but forces the low two bits of PC to
24224 case BFD_RELOC_THUMB_PCREL_BLX
:
24226 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24227 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24228 && THUMB_IS_FUNC (fixP
->fx_addsy
)
24229 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24230 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24231 return (base
+ 4) & ~3;
24233 /* ARM mode branches are offset by +8. However, the Windows CE
24234 loader expects the relocation not to take this into account. */
24235 case BFD_RELOC_ARM_PCREL_BLX
:
24237 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24238 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24239 && ARM_IS_FUNC (fixP
->fx_addsy
)
24240 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24241 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24244 case BFD_RELOC_ARM_PCREL_CALL
:
24246 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24247 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24248 && THUMB_IS_FUNC (fixP
->fx_addsy
)
24249 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24250 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24253 case BFD_RELOC_ARM_PCREL_BRANCH
:
24254 case BFD_RELOC_ARM_PCREL_JUMP
:
24255 case BFD_RELOC_ARM_PLT32
:
24257 /* When handling fixups immediately, because we have already
24258 discovered the value of a symbol, or the address of the frag involved
24259 we must account for the offset by +8, as the OS loader will never see the reloc.
24260 see fixup_segment() in write.c
24261 The S_IS_EXTERNAL test handles the case of global symbols.
24262 Those need the calculated base, not just the pipe compensation the linker will need. */
24264 && fixP
->fx_addsy
!= NULL
24265 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24266 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
24274 /* ARM mode loads relative to PC are also offset by +8. Unlike
24275 branches, the Windows CE loader *does* expect the relocation
24276 to take this into account. */
24277 case BFD_RELOC_ARM_OFFSET_IMM
:
24278 case BFD_RELOC_ARM_OFFSET_IMM8
:
24279 case BFD_RELOC_ARM_HWLITERAL
:
24280 case BFD_RELOC_ARM_LITERAL
:
24281 case BFD_RELOC_ARM_CP_OFF_IMM
:
24285 /* Other PC-relative relocations are un-offset. */
24291 static bfd_boolean flag_warn_syms
= TRUE
;
24294 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED
, char * name
)
24296 /* PR 18347 - Warn if the user attempts to create a symbol with the same
24297 name as an ARM instruction. Whilst strictly speaking it is allowed, it
24298 does mean that the resulting code might be very confusing to the reader.
24299 Also this warning can be triggered if the user omits an operand before
24300 an immediate address, eg:
24304 GAS treats this as an assignment of the value of the symbol foo to a
24305 symbol LDR, and so (without this code) it will not issue any kind of
24306 warning or error message.
24308 Note - ARM instructions are case-insensitive but the strings in the hash
24309 table are all stored in lower case, so we must first ensure that name is
24311 if (flag_warn_syms
&& arm_ops_hsh
)
24313 char * nbuf
= strdup (name
);
24316 for (p
= nbuf
; *p
; p
++)
24318 if (hash_find (arm_ops_hsh
, nbuf
) != NULL
)
24320 static struct hash_control
* already_warned
= NULL
;
24322 if (already_warned
== NULL
)
24323 already_warned
= hash_new ();
24324 /* Only warn about the symbol once. To keep the code
24325 simple we let hash_insert do the lookup for us. */
24326 if (hash_insert (already_warned
, nbuf
, NULL
) == NULL
)
24327 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name
);
24336 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
24337 Otherwise we have no need to default values of symbols. */
24340 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
24343 if (name
[0] == '_' && name
[1] == 'G'
24344 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
24348 if (symbol_find (name
))
24349 as_bad (_("GOT already in the symbol table"));
24351 GOT_symbol
= symbol_new (name
, undefined_section
,
24352 (valueT
) 0, & zero_address_frag
);
24362 /* Subroutine of md_apply_fix. Check to see if an immediate can be
24363 computed as two separate immediate values, added together. We
24364 already know that this value cannot be computed by just one ARM
24367 static unsigned int
24368 validate_immediate_twopart (unsigned int val
,
24369 unsigned int * highpart
)
24374 for (i
= 0; i
< 32; i
+= 2)
24375 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
24381 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
24383 else if (a
& 0xff0000)
24385 if (a
& 0xff000000)
24387 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
24391 gas_assert (a
& 0xff000000);
24392 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
24395 return (a
& 0xff) | (i
<< 7);
24402 validate_offset_imm (unsigned int val
, int hwse
)
24404 if ((hwse
&& val
> 255) || val
> 4095)
24409 /* Subroutine of md_apply_fix. Do those data_ops which can take a
24410 negative immediate constant by altering the instruction. A bit of
24415 by inverting the second operand, and
24418 by negating the second operand. */
24421 negate_data_op (unsigned long * instruction
,
24422 unsigned long value
)
24425 unsigned long negated
, inverted
;
24427 negated
= encode_arm_immediate (-value
);
24428 inverted
= encode_arm_immediate (~value
);
24430 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
24433 /* First negates. */
24434 case OPCODE_SUB
: /* ADD <-> SUB */
24435 new_inst
= OPCODE_ADD
;
24440 new_inst
= OPCODE_SUB
;
24444 case OPCODE_CMP
: /* CMP <-> CMN */
24445 new_inst
= OPCODE_CMN
;
24450 new_inst
= OPCODE_CMP
;
24454 /* Now Inverted ops. */
24455 case OPCODE_MOV
: /* MOV <-> MVN */
24456 new_inst
= OPCODE_MVN
;
24461 new_inst
= OPCODE_MOV
;
24465 case OPCODE_AND
: /* AND <-> BIC */
24466 new_inst
= OPCODE_BIC
;
24471 new_inst
= OPCODE_AND
;
24475 case OPCODE_ADC
: /* ADC <-> SBC */
24476 new_inst
= OPCODE_SBC
;
24481 new_inst
= OPCODE_ADC
;
24485 /* We cannot do anything. */
24490 if (value
== (unsigned) FAIL
)
24493 *instruction
&= OPCODE_MASK
;
24494 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
24498 /* Like negate_data_op, but for Thumb-2. */
24500 static unsigned int
24501 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
24505 unsigned int negated
, inverted
;
24507 negated
= encode_thumb32_immediate (-value
);
24508 inverted
= encode_thumb32_immediate (~value
);
24510 rd
= (*instruction
>> 8) & 0xf;
24511 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
24514 /* ADD <-> SUB. Includes CMP <-> CMN. */
24515 case T2_OPCODE_SUB
:
24516 new_inst
= T2_OPCODE_ADD
;
24520 case T2_OPCODE_ADD
:
24521 new_inst
= T2_OPCODE_SUB
;
24525 /* ORR <-> ORN. Includes MOV <-> MVN. */
24526 case T2_OPCODE_ORR
:
24527 new_inst
= T2_OPCODE_ORN
;
24531 case T2_OPCODE_ORN
:
24532 new_inst
= T2_OPCODE_ORR
;
24536 /* AND <-> BIC. TST has no inverted equivalent. */
24537 case T2_OPCODE_AND
:
24538 new_inst
= T2_OPCODE_BIC
;
24545 case T2_OPCODE_BIC
:
24546 new_inst
= T2_OPCODE_AND
;
24551 case T2_OPCODE_ADC
:
24552 new_inst
= T2_OPCODE_SBC
;
24556 case T2_OPCODE_SBC
:
24557 new_inst
= T2_OPCODE_ADC
;
24561 /* We cannot do anything. */
24566 if (value
== (unsigned int)FAIL
)
24569 *instruction
&= T2_OPCODE_MASK
;
24570 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
24574 /* Read a 32-bit thumb instruction from buf. */
24576 static unsigned long
24577 get_thumb32_insn (char * buf
)
24579 unsigned long insn
;
24580 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
24581 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
24586 /* We usually want to set the low bit on the address of thumb function
24587 symbols. In particular .word foo - . should have the low bit set.
24588 Generic code tries to fold the difference of two symbols to
24589 a constant. Prevent this and force a relocation when the first symbols
24590 is a thumb function. */
24593 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
24595 if (op
== O_subtract
24596 && l
->X_op
== O_symbol
24597 && r
->X_op
== O_symbol
24598 && THUMB_IS_FUNC (l
->X_add_symbol
))
24600 l
->X_op
= O_subtract
;
24601 l
->X_op_symbol
= r
->X_add_symbol
;
24602 l
->X_add_number
-= r
->X_add_number
;
24606 /* Process as normal. */
24610 /* Encode Thumb2 unconditional branches and calls. The encoding
24611 for the 2 are identical for the immediate values. */
24614 encode_thumb2_b_bl_offset (char * buf
, offsetT value
)
24616 #define T2I1I2MASK ((1 << 13) | (1 << 11))
24619 addressT S
, I1
, I2
, lo
, hi
;
24621 S
= (value
>> 24) & 0x01;
24622 I1
= (value
>> 23) & 0x01;
24623 I2
= (value
>> 22) & 0x01;
24624 hi
= (value
>> 12) & 0x3ff;
24625 lo
= (value
>> 1) & 0x7ff;
24626 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24627 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
24628 newval
|= (S
<< 10) | hi
;
24629 newval2
&= ~T2I1I2MASK
;
24630 newval2
|= (((I1
^ S
) << 13) | ((I2
^ S
) << 11) | lo
) ^ T2I1I2MASK
;
24631 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24632 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
24636 md_apply_fix (fixS
* fixP
,
24640 offsetT value
= * valP
;
24642 unsigned int newimm
;
24643 unsigned long temp
;
24645 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
24647 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
24649 /* Note whether this will delete the relocation. */
24651 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
24654 /* On a 64-bit host, silently truncate 'value' to 32 bits for
24655 consistency with the behaviour on 32-bit hosts. Remember value
24657 value
&= 0xffffffff;
24658 value
^= 0x80000000;
24659 value
-= 0x80000000;
24662 fixP
->fx_addnumber
= value
;
24664 /* Same treatment for fixP->fx_offset. */
24665 fixP
->fx_offset
&= 0xffffffff;
24666 fixP
->fx_offset
^= 0x80000000;
24667 fixP
->fx_offset
-= 0x80000000;
24669 switch (fixP
->fx_r_type
)
24671 case BFD_RELOC_NONE
:
24672 /* This will need to go in the object file. */
24676 case BFD_RELOC_ARM_IMMEDIATE
:
24677 /* We claim that this fixup has been processed here,
24678 even if in fact we generate an error because we do
24679 not have a reloc for it, so tc_gen_reloc will reject it. */
24682 if (fixP
->fx_addsy
)
24684 const char *msg
= 0;
24686 if (! S_IS_DEFINED (fixP
->fx_addsy
))
24687 msg
= _("undefined symbol %s used as an immediate value");
24688 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
24689 msg
= _("symbol %s is in a different section");
24690 else if (S_IS_WEAK (fixP
->fx_addsy
))
24691 msg
= _("symbol %s is weak and may be overridden later");
24695 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24696 msg
, S_GET_NAME (fixP
->fx_addsy
));
24701 temp
= md_chars_to_number (buf
, INSN_SIZE
);
24703 /* If the offset is negative, we should use encoding A2 for ADR. */
24704 if ((temp
& 0xfff0000) == 0x28f0000 && value
< 0)
24705 newimm
= negate_data_op (&temp
, value
);
24708 newimm
= encode_arm_immediate (value
);
24710 /* If the instruction will fail, see if we can fix things up by
24711 changing the opcode. */
24712 if (newimm
== (unsigned int) FAIL
)
24713 newimm
= negate_data_op (&temp
, value
);
24714 /* MOV accepts both ARM modified immediate (A1 encoding) and
24715 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
24716 When disassembling, MOV is preferred when there is no encoding
24718 if (newimm
== (unsigned int) FAIL
24719 && ((temp
>> DATA_OP_SHIFT
) & 0xf) == OPCODE_MOV
24720 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
24721 && !((temp
>> SBIT_SHIFT
) & 0x1)
24722 && value
>= 0 && value
<= 0xffff)
24724 /* Clear bits[23:20] to change encoding from A1 to A2. */
24725 temp
&= 0xff0fffff;
24726 /* Encoding high 4bits imm. Code below will encode the remaining
24728 temp
|= (value
& 0x0000f000) << 4;
24729 newimm
= value
& 0x00000fff;
24733 if (newimm
== (unsigned int) FAIL
)
24735 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24736 _("invalid constant (%lx) after fixup"),
24737 (unsigned long) value
);
24741 newimm
|= (temp
& 0xfffff000);
24742 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
24745 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
24747 unsigned int highpart
= 0;
24748 unsigned int newinsn
= 0xe1a00000; /* nop. */
24750 if (fixP
->fx_addsy
)
24752 const char *msg
= 0;
24754 if (! S_IS_DEFINED (fixP
->fx_addsy
))
24755 msg
= _("undefined symbol %s used as an immediate value");
24756 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
24757 msg
= _("symbol %s is in a different section");
24758 else if (S_IS_WEAK (fixP
->fx_addsy
))
24759 msg
= _("symbol %s is weak and may be overridden later");
24763 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24764 msg
, S_GET_NAME (fixP
->fx_addsy
));
24769 newimm
= encode_arm_immediate (value
);
24770 temp
= md_chars_to_number (buf
, INSN_SIZE
);
24772 /* If the instruction will fail, see if we can fix things up by
24773 changing the opcode. */
24774 if (newimm
== (unsigned int) FAIL
24775 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
24777 /* No ? OK - try using two ADD instructions to generate
24779 newimm
= validate_immediate_twopart (value
, & highpart
);
24781 /* Yes - then make sure that the second instruction is
24783 if (newimm
!= (unsigned int) FAIL
)
24785 /* Still No ? Try using a negated value. */
24786 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
24787 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
24788 /* Otherwise - give up. */
24791 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24792 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
24797 /* Replace the first operand in the 2nd instruction (which
24798 is the PC) with the destination register. We have
24799 already added in the PC in the first instruction and we
24800 do not want to do it again. */
24801 newinsn
&= ~ 0xf0000;
24802 newinsn
|= ((newinsn
& 0x0f000) << 4);
24805 newimm
|= (temp
& 0xfffff000);
24806 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
24808 highpart
|= (newinsn
& 0xfffff000);
24809 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
24813 case BFD_RELOC_ARM_OFFSET_IMM
:
24814 if (!fixP
->fx_done
&& seg
->use_rela_p
)
24816 /* Fall through. */
24818 case BFD_RELOC_ARM_LITERAL
:
24824 if (validate_offset_imm (value
, 0) == FAIL
)
24826 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
24827 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24828 _("invalid literal constant: pool needs to be closer"));
24830 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24831 _("bad immediate value for offset (%ld)"),
24836 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24838 newval
&= 0xfffff000;
24841 newval
&= 0xff7ff000;
24842 newval
|= value
| (sign
? INDEX_UP
: 0);
24844 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24847 case BFD_RELOC_ARM_OFFSET_IMM8
:
24848 case BFD_RELOC_ARM_HWLITERAL
:
24854 if (validate_offset_imm (value
, 1) == FAIL
)
24856 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
24857 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24858 _("invalid literal constant: pool needs to be closer"));
24860 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24861 _("bad immediate value for 8-bit offset (%ld)"),
24866 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24868 newval
&= 0xfffff0f0;
24871 newval
&= 0xff7ff0f0;
24872 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
24874 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24877 case BFD_RELOC_ARM_T32_OFFSET_U8
:
24878 if (value
< 0 || value
> 1020 || value
% 4 != 0)
24879 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24880 _("bad immediate value for offset (%ld)"), (long) value
);
24883 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
24885 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
24888 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
24889 /* This is a complicated relocation used for all varieties of Thumb32
24890 load/store instruction with immediate offset:
24892 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
24893 *4, optional writeback(W)
24894 (doubleword load/store)
24896 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
24897 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
24898 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
24899 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
24900 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
24902 Uppercase letters indicate bits that are already encoded at
24903 this point. Lowercase letters are our problem. For the
24904 second block of instructions, the secondary opcode nybble
24905 (bits 8..11) is present, and bit 23 is zero, even if this is
24906 a PC-relative operation. */
24907 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24909 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
24911 if ((newval
& 0xf0000000) == 0xe0000000)
24913 /* Doubleword load/store: 8-bit offset, scaled by 4. */
24915 newval
|= (1 << 23);
24918 if (value
% 4 != 0)
24920 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24921 _("offset not a multiple of 4"));
24927 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24928 _("offset out of range"));
24933 else if ((newval
& 0x000f0000) == 0x000f0000)
24935 /* PC-relative, 12-bit offset. */
24937 newval
|= (1 << 23);
24942 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24943 _("offset out of range"));
24948 else if ((newval
& 0x00000100) == 0x00000100)
24950 /* Writeback: 8-bit, +/- offset. */
24952 newval
|= (1 << 9);
24957 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24958 _("offset out of range"));
24963 else if ((newval
& 0x00000f00) == 0x00000e00)
24965 /* T-instruction: positive 8-bit offset. */
24966 if (value
< 0 || value
> 0xff)
24968 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24969 _("offset out of range"));
24977 /* Positive 12-bit or negative 8-bit offset. */
24981 newval
|= (1 << 23);
24991 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24992 _("offset out of range"));
24999 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
25000 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
25003 case BFD_RELOC_ARM_SHIFT_IMM
:
25004 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25005 if (((unsigned long) value
) > 32
25007 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
25009 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25010 _("shift expression is too large"));
25015 /* Shifts of zero must be done as lsl. */
25017 else if (value
== 32)
25019 newval
&= 0xfffff07f;
25020 newval
|= (value
& 0x1f) << 7;
25021 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25024 case BFD_RELOC_ARM_T32_IMMEDIATE
:
25025 case BFD_RELOC_ARM_T32_ADD_IMM
:
25026 case BFD_RELOC_ARM_T32_IMM12
:
25027 case BFD_RELOC_ARM_T32_ADD_PC12
:
25028 /* We claim that this fixup has been processed here,
25029 even if in fact we generate an error because we do
25030 not have a reloc for it, so tc_gen_reloc will reject it. */
25034 && ! S_IS_DEFINED (fixP
->fx_addsy
))
25036 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25037 _("undefined symbol %s used as an immediate value"),
25038 S_GET_NAME (fixP
->fx_addsy
));
25042 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25044 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
25047 if ((fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
25048 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
25049 Thumb2 modified immediate encoding (T2). */
25050 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
25051 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
25053 newimm
= encode_thumb32_immediate (value
);
25054 if (newimm
== (unsigned int) FAIL
)
25055 newimm
= thumb32_negate_data_op (&newval
, value
);
25057 if (newimm
== (unsigned int) FAIL
)
25059 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
)
25061 /* Turn add/sum into addw/subw. */
25062 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
25063 newval
= (newval
& 0xfeffffff) | 0x02000000;
25064 /* No flat 12-bit imm encoding for addsw/subsw. */
25065 if ((newval
& 0x00100000) == 0)
25067 /* 12 bit immediate for addw/subw. */
25071 newval
^= 0x00a00000;
25074 newimm
= (unsigned int) FAIL
;
25081 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
25082 UINT16 (T3 encoding), MOVW only accepts UINT16. When
25083 disassembling, MOV is preferred when there is no encoding
25085 if (((newval
>> T2_DATA_OP_SHIFT
) & 0xf) == T2_OPCODE_ORR
25086 /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
25087 but with the Rn field [19:16] set to 1111. */
25088 && (((newval
>> 16) & 0xf) == 0xf)
25089 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
)
25090 && !((newval
>> T2_SBIT_SHIFT
) & 0x1)
25091 && value
>= 0 && value
<= 0xffff)
25093 /* Toggle bit[25] to change encoding from T2 to T3. */
25095 /* Clear bits[19:16]. */
25096 newval
&= 0xfff0ffff;
25097 /* Encoding high 4bits imm. Code below will encode the
25098 remaining low 12bits. */
25099 newval
|= (value
& 0x0000f000) << 4;
25100 newimm
= value
& 0x00000fff;
25105 if (newimm
== (unsigned int)FAIL
)
25107 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25108 _("invalid constant (%lx) after fixup"),
25109 (unsigned long) value
);
25113 newval
|= (newimm
& 0x800) << 15;
25114 newval
|= (newimm
& 0x700) << 4;
25115 newval
|= (newimm
& 0x0ff);
25117 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
25118 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
25121 case BFD_RELOC_ARM_SMC
:
25122 if (((unsigned long) value
) > 0xffff)
25123 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25124 _("invalid smc expression"));
25125 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25126 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
25127 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25130 case BFD_RELOC_ARM_HVC
:
25131 if (((unsigned long) value
) > 0xffff)
25132 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25133 _("invalid hvc expression"));
25134 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25135 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
25136 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25139 case BFD_RELOC_ARM_SWI
:
25140 if (fixP
->tc_fix_data
!= 0)
25142 if (((unsigned long) value
) > 0xff)
25143 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25144 _("invalid swi expression"));
25145 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25147 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25151 if (((unsigned long) value
) > 0x00ffffff)
25152 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25153 _("invalid swi expression"));
25154 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25156 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25160 case BFD_RELOC_ARM_MULTI
:
25161 if (((unsigned long) value
) > 0xffff)
25162 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25163 _("invalid expression in load/store multiple"));
25164 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
25165 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25169 case BFD_RELOC_ARM_PCREL_CALL
:
25171 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
25173 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25174 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25175 && THUMB_IS_FUNC (fixP
->fx_addsy
))
25176 /* Flip the bl to blx. This is a simple flip
25177 bit here because we generate PCREL_CALL for
25178 unconditional bls. */
25180 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25181 newval
= newval
| 0x10000000;
25182 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25188 goto arm_branch_common
;
25190 case BFD_RELOC_ARM_PCREL_JUMP
:
25191 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
25193 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25194 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25195 && THUMB_IS_FUNC (fixP
->fx_addsy
))
25197 /* This would map to a bl<cond>, b<cond>,
25198 b<always> to a Thumb function. We
25199 need to force a relocation for this particular
25201 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25204 /* Fall through. */
25206 case BFD_RELOC_ARM_PLT32
:
25208 case BFD_RELOC_ARM_PCREL_BRANCH
:
25210 goto arm_branch_common
;
25212 case BFD_RELOC_ARM_PCREL_BLX
:
25215 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
25217 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25218 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25219 && ARM_IS_FUNC (fixP
->fx_addsy
))
25221 /* Flip the blx to a bl and warn. */
25222 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
25223 newval
= 0xeb000000;
25224 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
25225 _("blx to '%s' an ARM ISA state function changed to bl"),
25227 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25233 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
25234 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
25238 /* We are going to store value (shifted right by two) in the
25239 instruction, in a 24 bit, signed field. Bits 26 through 32 either
25240 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
25243 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25244 _("misaligned branch destination"));
25245 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
25246 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
25247 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25249 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25251 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25252 newval
|= (value
>> 2) & 0x00ffffff;
25253 /* Set the H bit on BLX instructions. */
25257 newval
|= 0x01000000;
25259 newval
&= ~0x01000000;
25261 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25265 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
25266 /* CBZ can only branch forward. */
25268 /* Attempts to use CBZ to branch to the next instruction
25269 (which, strictly speaking, are prohibited) will be turned into
25272 FIXME: It may be better to remove the instruction completely and
25273 perform relaxation. */
25276 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25277 newval
= 0xbf00; /* NOP encoding T1 */
25278 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25283 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25285 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25287 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25288 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
25289 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25294 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
25295 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
25296 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25298 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25300 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25301 newval
|= (value
& 0x1ff) >> 1;
25302 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25306 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
25307 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
25308 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25310 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25312 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25313 newval
|= (value
& 0xfff) >> 1;
25314 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25318 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
25320 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25321 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25322 && ARM_IS_FUNC (fixP
->fx_addsy
)
25323 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
25325 /* Force a relocation for a branch 20 bits wide. */
25328 if ((value
& ~0x1fffff) && ((value
& ~0x0fffff) != ~0x0fffff))
25329 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25330 _("conditional branch out of range"));
25332 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25335 addressT S
, J1
, J2
, lo
, hi
;
25337 S
= (value
& 0x00100000) >> 20;
25338 J2
= (value
& 0x00080000) >> 19;
25339 J1
= (value
& 0x00040000) >> 18;
25340 hi
= (value
& 0x0003f000) >> 12;
25341 lo
= (value
& 0x00000ffe) >> 1;
25343 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25344 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25345 newval
|= (S
<< 10) | hi
;
25346 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
25347 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25348 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
25352 case BFD_RELOC_THUMB_PCREL_BLX
:
25353 /* If there is a blx from a thumb state function to
25354 another thumb function flip this to a bl and warn
25358 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25359 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25360 && THUMB_IS_FUNC (fixP
->fx_addsy
))
25362 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
25363 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
25364 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
25366 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25367 newval
= newval
| 0x1000;
25368 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
25369 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
25374 goto thumb_bl_common
;
25376 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
25377 /* A bl from Thumb state ISA to an internal ARM state function
25378 is converted to a blx. */
25380 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25381 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25382 && ARM_IS_FUNC (fixP
->fx_addsy
)
25383 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
25385 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25386 newval
= newval
& ~0x1000;
25387 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
25388 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
25394 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
25395 /* For a BLX instruction, make sure that the relocation is rounded up
25396 to a word boundary. This follows the semantics of the instruction
25397 which specifies that bit 1 of the target address will come from bit
25398 1 of the base address. */
25399 value
= (value
+ 3) & ~ 3;
25402 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
25403 && fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
25404 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
25407 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
25409 if (!(ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)))
25410 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25411 else if ((value
& ~0x1ffffff)
25412 && ((value
& ~0x1ffffff) != ~0x1ffffff))
25413 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25414 _("Thumb2 branch out of range"));
25417 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25418 encode_thumb2_b_bl_offset (buf
, value
);
25422 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
25423 if ((value
& ~0x0ffffff) && ((value
& ~0x0ffffff) != ~0x0ffffff))
25424 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25426 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25427 encode_thumb2_b_bl_offset (buf
, value
);
25432 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25437 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25438 md_number_to_chars (buf
, value
, 2);
25442 case BFD_RELOC_ARM_TLS_CALL
:
25443 case BFD_RELOC_ARM_THM_TLS_CALL
:
25444 case BFD_RELOC_ARM_TLS_DESCSEQ
:
25445 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
25446 case BFD_RELOC_ARM_TLS_GOTDESC
:
25447 case BFD_RELOC_ARM_TLS_GD32
:
25448 case BFD_RELOC_ARM_TLS_LE32
:
25449 case BFD_RELOC_ARM_TLS_IE32
:
25450 case BFD_RELOC_ARM_TLS_LDM32
:
25451 case BFD_RELOC_ARM_TLS_LDO32
:
25452 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
25455 /* Same handling as above, but with the arm_fdpic guard. */
25456 case BFD_RELOC_ARM_TLS_GD32_FDPIC
:
25457 case BFD_RELOC_ARM_TLS_IE32_FDPIC
:
25458 case BFD_RELOC_ARM_TLS_LDM32_FDPIC
:
25461 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
25465 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25466 _("Relocation supported only in FDPIC mode"));
25470 case BFD_RELOC_ARM_GOT32
:
25471 case BFD_RELOC_ARM_GOTOFF
:
25474 case BFD_RELOC_ARM_GOT_PREL
:
25475 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25476 md_number_to_chars (buf
, value
, 4);
25479 case BFD_RELOC_ARM_TARGET2
:
25480 /* TARGET2 is not partial-inplace, so we need to write the
25481 addend here for REL targets, because it won't be written out
25482 during reloc processing later. */
25483 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25484 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
25487 /* Relocations for FDPIC. */
25488 case BFD_RELOC_ARM_GOTFUNCDESC
:
25489 case BFD_RELOC_ARM_GOTOFFFUNCDESC
:
25490 case BFD_RELOC_ARM_FUNCDESC
:
25493 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25494 md_number_to_chars (buf
, 0, 4);
25498 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25499 _("Relocation supported only in FDPIC mode"));
25504 case BFD_RELOC_RVA
:
25506 case BFD_RELOC_ARM_TARGET1
:
25507 case BFD_RELOC_ARM_ROSEGREL32
:
25508 case BFD_RELOC_ARM_SBREL32
:
25509 case BFD_RELOC_32_PCREL
:
25511 case BFD_RELOC_32_SECREL
:
25513 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25515 /* For WinCE we only do this for pcrel fixups. */
25516 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
25518 md_number_to_chars (buf
, value
, 4);
25522 case BFD_RELOC_ARM_PREL31
:
25523 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25525 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
25526 if ((value
^ (value
>> 1)) & 0x40000000)
25528 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25529 _("rel31 relocation overflow"));
25531 newval
|= value
& 0x7fffffff;
25532 md_number_to_chars (buf
, newval
, 4);
25537 case BFD_RELOC_ARM_CP_OFF_IMM
:
25538 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
25539 case BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
:
25540 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
)
25541 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25543 newval
= get_thumb32_insn (buf
);
25544 if ((newval
& 0x0f200f00) == 0x0d000900)
25546 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
25547 has permitted values that are multiples of 2, in the range 0
25549 if (value
< -510 || value
> 510 || (value
& 1))
25550 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25551 _("co-processor offset out of range"));
25553 else if ((newval
& 0xfe001f80) == 0xec000f80)
25555 if (value
< -511 || value
> 512 || (value
& 3))
25556 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25557 _("co-processor offset out of range"));
25559 else if (value
< -1023 || value
> 1023 || (value
& 3))
25560 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25561 _("co-processor offset out of range"));
25566 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
25567 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
25568 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25570 newval
= get_thumb32_insn (buf
);
25573 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
)
25574 newval
&= 0xffffff80;
25576 newval
&= 0xffffff00;
25580 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
)
25581 newval
&= 0xff7fff80;
25583 newval
&= 0xff7fff00;
25584 if ((newval
& 0x0f200f00) == 0x0d000900)
25586 /* This is a fp16 vstr/vldr.
25588 It requires the immediate offset in the instruction is shifted
25589 left by 1 to be a half-word offset.
25591 Here, left shift by 1 first, and later right shift by 2
25592 should get the right offset. */
25595 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
25597 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
25598 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
25599 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25601 put_thumb32_insn (buf
, newval
);
25604 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
25605 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
25606 if (value
< -255 || value
> 255)
25607 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25608 _("co-processor offset out of range"));
25610 goto cp_off_common
;
25612 case BFD_RELOC_ARM_THUMB_OFFSET
:
25613 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25614 /* Exactly what ranges, and where the offset is inserted depends
25615 on the type of instruction, we can establish this from the
25617 switch (newval
>> 12)
25619 case 4: /* PC load. */
25620 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
25621 forced to zero for these loads; md_pcrel_from has already
25622 compensated for this. */
25624 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25625 _("invalid offset, target not word aligned (0x%08lX)"),
25626 (((unsigned long) fixP
->fx_frag
->fr_address
25627 + (unsigned long) fixP
->fx_where
) & ~3)
25628 + (unsigned long) value
);
25630 if (value
& ~0x3fc)
25631 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25632 _("invalid offset, value too big (0x%08lX)"),
25635 newval
|= value
>> 2;
25638 case 9: /* SP load/store. */
25639 if (value
& ~0x3fc)
25640 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25641 _("invalid offset, value too big (0x%08lX)"),
25643 newval
|= value
>> 2;
25646 case 6: /* Word load/store. */
25648 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25649 _("invalid offset, value too big (0x%08lX)"),
25651 newval
|= value
<< 4; /* 6 - 2. */
25654 case 7: /* Byte load/store. */
25656 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25657 _("invalid offset, value too big (0x%08lX)"),
25659 newval
|= value
<< 6;
25662 case 8: /* Halfword load/store. */
25664 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25665 _("invalid offset, value too big (0x%08lX)"),
25667 newval
|= value
<< 5; /* 6 - 1. */
25671 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25672 "Unable to process relocation for thumb opcode: %lx",
25673 (unsigned long) newval
);
25676 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25679 case BFD_RELOC_ARM_THUMB_ADD
:
25680 /* This is a complicated relocation, since we use it for all of
25681 the following immediate relocations:
25685 9bit ADD/SUB SP word-aligned
25686 10bit ADD PC/SP word-aligned
25688 The type of instruction being processed is encoded in the
25695 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25697 int rd
= (newval
>> 4) & 0xf;
25698 int rs
= newval
& 0xf;
25699 int subtract
= !!(newval
& 0x8000);
25701 /* Check for HI regs, only very restricted cases allowed:
25702 Adjusting SP, and using PC or SP to get an address. */
25703 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
25704 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
25705 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25706 _("invalid Hi register with immediate"));
25708 /* If value is negative, choose the opposite instruction. */
25712 subtract
= !subtract
;
25714 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25715 _("immediate value out of range"));
25720 if (value
& ~0x1fc)
25721 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25722 _("invalid immediate for stack address calculation"));
25723 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
25724 newval
|= value
>> 2;
25726 else if (rs
== REG_PC
|| rs
== REG_SP
)
25728 /* PR gas/18541. If the addition is for a defined symbol
25729 within range of an ADR instruction then accept it. */
25732 && fixP
->fx_addsy
!= NULL
)
25736 if (! S_IS_DEFINED (fixP
->fx_addsy
)
25737 || S_GET_SEGMENT (fixP
->fx_addsy
) != seg
25738 || S_IS_WEAK (fixP
->fx_addsy
))
25740 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25741 _("address calculation needs a strongly defined nearby symbol"));
25745 offsetT v
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
25747 /* Round up to the next 4-byte boundary. */
25752 v
= S_GET_VALUE (fixP
->fx_addsy
) - v
;
25756 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25757 _("symbol too far away"));
25767 if (subtract
|| value
& ~0x3fc)
25768 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25769 _("invalid immediate for address calculation (value = 0x%08lX)"),
25770 (unsigned long) (subtract
? - value
: value
));
25771 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
25773 newval
|= value
>> 2;
25778 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25779 _("immediate value out of range"));
25780 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
25781 newval
|= (rd
<< 8) | value
;
25786 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25787 _("immediate value out of range"));
25788 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
25789 newval
|= rd
| (rs
<< 3) | (value
<< 6);
25792 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25795 case BFD_RELOC_ARM_THUMB_IMM
:
25796 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25797 if (value
< 0 || value
> 255)
25798 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25799 _("invalid immediate: %ld is out of range"),
25802 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25805 case BFD_RELOC_ARM_THUMB_SHIFT
:
25806 /* 5bit shift value (0..32). LSL cannot take 32. */
25807 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
25808 temp
= newval
& 0xf800;
25809 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
25810 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25811 _("invalid shift value: %ld"), (long) value
);
25812 /* Shifts of zero must be encoded as LSL. */
25814 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
25815 /* Shifts of 32 are encoded as zero. */
25816 else if (value
== 32)
25818 newval
|= value
<< 6;
25819 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25822 case BFD_RELOC_VTABLE_INHERIT
:
25823 case BFD_RELOC_VTABLE_ENTRY
:
25827 case BFD_RELOC_ARM_MOVW
:
25828 case BFD_RELOC_ARM_MOVT
:
25829 case BFD_RELOC_ARM_THUMB_MOVW
:
25830 case BFD_RELOC_ARM_THUMB_MOVT
:
25831 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25833 /* REL format relocations are limited to a 16-bit addend. */
25834 if (!fixP
->fx_done
)
25836 if (value
< -0x8000 || value
> 0x7fff)
25837 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25838 _("offset out of range"));
25840 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
25841 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
25846 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
25847 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
25849 newval
= get_thumb32_insn (buf
);
25850 newval
&= 0xfbf08f00;
25851 newval
|= (value
& 0xf000) << 4;
25852 newval
|= (value
& 0x0800) << 15;
25853 newval
|= (value
& 0x0700) << 4;
25854 newval
|= (value
& 0x00ff);
25855 put_thumb32_insn (buf
, newval
);
25859 newval
= md_chars_to_number (buf
, 4);
25860 newval
&= 0xfff0f000;
25861 newval
|= value
& 0x0fff;
25862 newval
|= (value
& 0xf000) << 4;
25863 md_number_to_chars (buf
, newval
, 4);
25868 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
25869 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
25870 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
25871 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
25872 gas_assert (!fixP
->fx_done
);
25875 bfd_boolean is_mov
;
25876 bfd_vma encoded_addend
= value
;
25878 /* Check that addend can be encoded in instruction. */
25879 if (!seg
->use_rela_p
&& (value
< 0 || value
> 255))
25880 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25881 _("the offset 0x%08lX is not representable"),
25882 (unsigned long) encoded_addend
);
25884 /* Extract the instruction. */
25885 insn
= md_chars_to_number (buf
, THUMB_SIZE
);
25886 is_mov
= (insn
& 0xf800) == 0x2000;
25891 if (!seg
->use_rela_p
)
25892 insn
|= encoded_addend
;
25898 /* Extract the instruction. */
25899 /* Encoding is the following
25904 /* The following conditions must be true :
25909 rd
= (insn
>> 4) & 0xf;
25911 if ((insn
& 0x8000) || (rd
!= rs
) || rd
> 7)
25912 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25913 _("Unable to process relocation for thumb opcode: %lx"),
25914 (unsigned long) insn
);
25916 /* Encode as ADD immediate8 thumb 1 code. */
25917 insn
= 0x3000 | (rd
<< 8);
25919 /* Place the encoded addend into the first 8 bits of the
25921 if (!seg
->use_rela_p
)
25922 insn
|= encoded_addend
;
25925 /* Update the instruction. */
25926 md_number_to_chars (buf
, insn
, THUMB_SIZE
);
25930 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
25931 case BFD_RELOC_ARM_ALU_PC_G0
:
25932 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
25933 case BFD_RELOC_ARM_ALU_PC_G1
:
25934 case BFD_RELOC_ARM_ALU_PC_G2
:
25935 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
25936 case BFD_RELOC_ARM_ALU_SB_G0
:
25937 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
25938 case BFD_RELOC_ARM_ALU_SB_G1
:
25939 case BFD_RELOC_ARM_ALU_SB_G2
:
25940 gas_assert (!fixP
->fx_done
);
25941 if (!seg
->use_rela_p
)
25944 bfd_vma encoded_addend
;
25945 bfd_vma addend_abs
= llabs (value
);
25947 /* Check that the absolute value of the addend can be
25948 expressed as an 8-bit constant plus a rotation. */
25949 encoded_addend
= encode_arm_immediate (addend_abs
);
25950 if (encoded_addend
== (unsigned int) FAIL
)
25951 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25952 _("the offset 0x%08lX is not representable"),
25953 (unsigned long) addend_abs
);
25955 /* Extract the instruction. */
25956 insn
= md_chars_to_number (buf
, INSN_SIZE
);
25958 /* If the addend is positive, use an ADD instruction.
25959 Otherwise use a SUB. Take care not to destroy the S bit. */
25960 insn
&= 0xff1fffff;
25966 /* Place the encoded addend into the first 12 bits of the
25968 insn
&= 0xfffff000;
25969 insn
|= encoded_addend
;
25971 /* Update the instruction. */
25972 md_number_to_chars (buf
, insn
, INSN_SIZE
);
25976 case BFD_RELOC_ARM_LDR_PC_G0
:
25977 case BFD_RELOC_ARM_LDR_PC_G1
:
25978 case BFD_RELOC_ARM_LDR_PC_G2
:
25979 case BFD_RELOC_ARM_LDR_SB_G0
:
25980 case BFD_RELOC_ARM_LDR_SB_G1
:
25981 case BFD_RELOC_ARM_LDR_SB_G2
:
25982 gas_assert (!fixP
->fx_done
);
25983 if (!seg
->use_rela_p
)
25986 bfd_vma addend_abs
= llabs (value
);
25988 /* Check that the absolute value of the addend can be
25989 encoded in 12 bits. */
25990 if (addend_abs
>= 0x1000)
25991 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25992 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
25993 (unsigned long) addend_abs
);
25995 /* Extract the instruction. */
25996 insn
= md_chars_to_number (buf
, INSN_SIZE
);
25998 /* If the addend is negative, clear bit 23 of the instruction.
25999 Otherwise set it. */
26001 insn
&= ~(1 << 23);
26005 /* Place the absolute value of the addend into the first 12 bits
26006 of the instruction. */
26007 insn
&= 0xfffff000;
26008 insn
|= addend_abs
;
26010 /* Update the instruction. */
26011 md_number_to_chars (buf
, insn
, INSN_SIZE
);
26015 case BFD_RELOC_ARM_LDRS_PC_G0
:
26016 case BFD_RELOC_ARM_LDRS_PC_G1
:
26017 case BFD_RELOC_ARM_LDRS_PC_G2
:
26018 case BFD_RELOC_ARM_LDRS_SB_G0
:
26019 case BFD_RELOC_ARM_LDRS_SB_G1
:
26020 case BFD_RELOC_ARM_LDRS_SB_G2
:
26021 gas_assert (!fixP
->fx_done
);
26022 if (!seg
->use_rela_p
)
26025 bfd_vma addend_abs
= llabs (value
);
26027 /* Check that the absolute value of the addend can be
26028 encoded in 8 bits. */
26029 if (addend_abs
>= 0x100)
26030 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26031 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
26032 (unsigned long) addend_abs
);
26034 /* Extract the instruction. */
26035 insn
= md_chars_to_number (buf
, INSN_SIZE
);
26037 /* If the addend is negative, clear bit 23 of the instruction.
26038 Otherwise set it. */
26040 insn
&= ~(1 << 23);
26044 /* Place the first four bits of the absolute value of the addend
26045 into the first 4 bits of the instruction, and the remaining
26046 four into bits 8 .. 11. */
26047 insn
&= 0xfffff0f0;
26048 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
26050 /* Update the instruction. */
26051 md_number_to_chars (buf
, insn
, INSN_SIZE
);
26055 case BFD_RELOC_ARM_LDC_PC_G0
:
26056 case BFD_RELOC_ARM_LDC_PC_G1
:
26057 case BFD_RELOC_ARM_LDC_PC_G2
:
26058 case BFD_RELOC_ARM_LDC_SB_G0
:
26059 case BFD_RELOC_ARM_LDC_SB_G1
:
26060 case BFD_RELOC_ARM_LDC_SB_G2
:
26061 gas_assert (!fixP
->fx_done
);
26062 if (!seg
->use_rela_p
)
26065 bfd_vma addend_abs
= llabs (value
);
26067 /* Check that the absolute value of the addend is a multiple of
26068 four and, when divided by four, fits in 8 bits. */
26069 if (addend_abs
& 0x3)
26070 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26071 _("bad offset 0x%08lX (must be word-aligned)"),
26072 (unsigned long) addend_abs
);
26074 if ((addend_abs
>> 2) > 0xff)
26075 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26076 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
26077 (unsigned long) addend_abs
);
26079 /* Extract the instruction. */
26080 insn
= md_chars_to_number (buf
, INSN_SIZE
);
26082 /* If the addend is negative, clear bit 23 of the instruction.
26083 Otherwise set it. */
26085 insn
&= ~(1 << 23);
26089 /* Place the addend (divided by four) into the first eight
26090 bits of the instruction. */
26091 insn
&= 0xfffffff0;
26092 insn
|= addend_abs
>> 2;
26094 /* Update the instruction. */
26095 md_number_to_chars (buf
, insn
, INSN_SIZE
);
26099 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
26101 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26102 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26103 && ARM_IS_FUNC (fixP
->fx_addsy
)
26104 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26106 /* Force a relocation for a branch 5 bits wide. */
26109 if (v8_1_branch_value_check (value
, 5, FALSE
) == FAIL
)
26110 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26113 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26115 addressT boff
= value
>> 1;
26117 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26118 newval
|= (boff
<< 7);
26119 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26123 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
26125 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26126 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26127 && ARM_IS_FUNC (fixP
->fx_addsy
)
26128 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26132 if ((value
& ~0x7f) && ((value
& ~0x3f) != ~0x3f))
26133 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26134 _("branch out of range"));
26136 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26138 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26140 addressT boff
= ((newval
& 0x0780) >> 7) << 1;
26141 addressT diff
= value
- boff
;
26145 newval
|= 1 << 1; /* T bit. */
26147 else if (diff
!= 2)
26149 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26150 _("out of range label-relative fixup value"));
26152 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26156 case BFD_RELOC_ARM_THUMB_BF17
:
26158 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26159 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26160 && ARM_IS_FUNC (fixP
->fx_addsy
)
26161 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26163 /* Force a relocation for a branch 17 bits wide. */
26167 if (v8_1_branch_value_check (value
, 17, TRUE
) == FAIL
)
26168 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26171 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26174 addressT immA
, immB
, immC
;
26176 immA
= (value
& 0x0001f000) >> 12;
26177 immB
= (value
& 0x00000ffc) >> 2;
26178 immC
= (value
& 0x00000002) >> 1;
26180 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26181 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
26183 newval2
|= (immC
<< 11) | (immB
<< 1);
26184 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26185 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
26189 case BFD_RELOC_ARM_THUMB_BF19
:
26191 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26192 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26193 && ARM_IS_FUNC (fixP
->fx_addsy
)
26194 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26196 /* Force a relocation for a branch 19 bits wide. */
26200 if (v8_1_branch_value_check (value
, 19, TRUE
) == FAIL
)
26201 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26204 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26207 addressT immA
, immB
, immC
;
26209 immA
= (value
& 0x0007f000) >> 12;
26210 immB
= (value
& 0x00000ffc) >> 2;
26211 immC
= (value
& 0x00000002) >> 1;
26213 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26214 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
26216 newval2
|= (immC
<< 11) | (immB
<< 1);
26217 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26218 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
26222 case BFD_RELOC_ARM_THUMB_BF13
:
26224 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26225 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26226 && ARM_IS_FUNC (fixP
->fx_addsy
)
26227 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26229 /* Force a relocation for a branch 13 bits wide. */
26233 if (v8_1_branch_value_check (value
, 13, TRUE
) == FAIL
)
26234 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26237 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26240 addressT immA
, immB
, immC
;
26242 immA
= (value
& 0x00001000) >> 12;
26243 immB
= (value
& 0x00000ffc) >> 2;
26244 immC
= (value
& 0x00000002) >> 1;
26246 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26247 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
26249 newval2
|= (immC
<< 11) | (immB
<< 1);
26250 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26251 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
26255 case BFD_RELOC_ARM_THUMB_LOOP12
:
26257 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26258 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26259 && ARM_IS_FUNC (fixP
->fx_addsy
)
26260 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26262 /* Force a relocation for a branch 12 bits wide. */
26266 bfd_vma insn
= get_thumb32_insn (buf
);
26267 /* le lr, <label> or le <label> */
26268 if (((insn
& 0xffffffff) == 0xf00fc001)
26269 || ((insn
& 0xffffffff) == 0xf02fc001))
26272 if (v8_1_branch_value_check (value
, 12, FALSE
) == FAIL
)
26273 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26275 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26277 addressT imml
, immh
;
26279 immh
= (value
& 0x00000ffc) >> 2;
26280 imml
= (value
& 0x00000002) >> 1;
26282 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
26283 newval
|= (imml
<< 11) | (immh
<< 1);
26284 md_number_to_chars (buf
+ THUMB_SIZE
, newval
, THUMB_SIZE
);
26288 case BFD_RELOC_ARM_V4BX
:
26289 /* This will need to go in the object file. */
26293 case BFD_RELOC_UNUSED
:
26295 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26296 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
26300 /* Translate internal representation of relocation info to BFD target
26304 tc_gen_reloc (asection
*section
, fixS
*fixp
)
26307 bfd_reloc_code_real_type code
;
26309 reloc
= XNEW (arelent
);
26311 reloc
->sym_ptr_ptr
= XNEW (asymbol
*);
26312 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
26313 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
26315 if (fixp
->fx_pcrel
)
26317 if (section
->use_rela_p
)
26318 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
26320 fixp
->fx_offset
= reloc
->address
;
26322 reloc
->addend
= fixp
->fx_offset
;
26324 switch (fixp
->fx_r_type
)
26327 if (fixp
->fx_pcrel
)
26329 code
= BFD_RELOC_8_PCREL
;
26332 /* Fall through. */
26335 if (fixp
->fx_pcrel
)
26337 code
= BFD_RELOC_16_PCREL
;
26340 /* Fall through. */
26343 if (fixp
->fx_pcrel
)
26345 code
= BFD_RELOC_32_PCREL
;
26348 /* Fall through. */
26350 case BFD_RELOC_ARM_MOVW
:
26351 if (fixp
->fx_pcrel
)
26353 code
= BFD_RELOC_ARM_MOVW_PCREL
;
26356 /* Fall through. */
26358 case BFD_RELOC_ARM_MOVT
:
26359 if (fixp
->fx_pcrel
)
26361 code
= BFD_RELOC_ARM_MOVT_PCREL
;
26364 /* Fall through. */
26366 case BFD_RELOC_ARM_THUMB_MOVW
:
26367 if (fixp
->fx_pcrel
)
26369 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
26372 /* Fall through. */
26374 case BFD_RELOC_ARM_THUMB_MOVT
:
26375 if (fixp
->fx_pcrel
)
26377 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
26380 /* Fall through. */
26382 case BFD_RELOC_NONE
:
26383 case BFD_RELOC_ARM_PCREL_BRANCH
:
26384 case BFD_RELOC_ARM_PCREL_BLX
:
26385 case BFD_RELOC_RVA
:
26386 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
26387 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
26388 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
26389 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
26390 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
26391 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
26392 case BFD_RELOC_VTABLE_ENTRY
:
26393 case BFD_RELOC_VTABLE_INHERIT
:
26395 case BFD_RELOC_32_SECREL
:
26397 code
= fixp
->fx_r_type
;
26400 case BFD_RELOC_THUMB_PCREL_BLX
:
26402 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
26403 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
26406 code
= BFD_RELOC_THUMB_PCREL_BLX
;
26409 case BFD_RELOC_ARM_LITERAL
:
26410 case BFD_RELOC_ARM_HWLITERAL
:
26411 /* If this is called then the a literal has
26412 been referenced across a section boundary. */
26413 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26414 _("literal referenced across section boundary"));
26418 case BFD_RELOC_ARM_TLS_CALL
:
26419 case BFD_RELOC_ARM_THM_TLS_CALL
:
26420 case BFD_RELOC_ARM_TLS_DESCSEQ
:
26421 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
26422 case BFD_RELOC_ARM_GOT32
:
26423 case BFD_RELOC_ARM_GOTOFF
:
26424 case BFD_RELOC_ARM_GOT_PREL
:
26425 case BFD_RELOC_ARM_PLT32
:
26426 case BFD_RELOC_ARM_TARGET1
:
26427 case BFD_RELOC_ARM_ROSEGREL32
:
26428 case BFD_RELOC_ARM_SBREL32
:
26429 case BFD_RELOC_ARM_PREL31
:
26430 case BFD_RELOC_ARM_TARGET2
:
26431 case BFD_RELOC_ARM_TLS_LDO32
:
26432 case BFD_RELOC_ARM_PCREL_CALL
:
26433 case BFD_RELOC_ARM_PCREL_JUMP
:
26434 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
26435 case BFD_RELOC_ARM_ALU_PC_G0
:
26436 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
26437 case BFD_RELOC_ARM_ALU_PC_G1
:
26438 case BFD_RELOC_ARM_ALU_PC_G2
:
26439 case BFD_RELOC_ARM_LDR_PC_G0
:
26440 case BFD_RELOC_ARM_LDR_PC_G1
:
26441 case BFD_RELOC_ARM_LDR_PC_G2
:
26442 case BFD_RELOC_ARM_LDRS_PC_G0
:
26443 case BFD_RELOC_ARM_LDRS_PC_G1
:
26444 case BFD_RELOC_ARM_LDRS_PC_G2
:
26445 case BFD_RELOC_ARM_LDC_PC_G0
:
26446 case BFD_RELOC_ARM_LDC_PC_G1
:
26447 case BFD_RELOC_ARM_LDC_PC_G2
:
26448 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
26449 case BFD_RELOC_ARM_ALU_SB_G0
:
26450 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
26451 case BFD_RELOC_ARM_ALU_SB_G1
:
26452 case BFD_RELOC_ARM_ALU_SB_G2
:
26453 case BFD_RELOC_ARM_LDR_SB_G0
:
26454 case BFD_RELOC_ARM_LDR_SB_G1
:
26455 case BFD_RELOC_ARM_LDR_SB_G2
:
26456 case BFD_RELOC_ARM_LDRS_SB_G0
:
26457 case BFD_RELOC_ARM_LDRS_SB_G1
:
26458 case BFD_RELOC_ARM_LDRS_SB_G2
:
26459 case BFD_RELOC_ARM_LDC_SB_G0
:
26460 case BFD_RELOC_ARM_LDC_SB_G1
:
26461 case BFD_RELOC_ARM_LDC_SB_G2
:
26462 case BFD_RELOC_ARM_V4BX
:
26463 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
26464 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
26465 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
26466 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
26467 case BFD_RELOC_ARM_GOTFUNCDESC
:
26468 case BFD_RELOC_ARM_GOTOFFFUNCDESC
:
26469 case BFD_RELOC_ARM_FUNCDESC
:
26470 case BFD_RELOC_ARM_THUMB_BF17
:
26471 case BFD_RELOC_ARM_THUMB_BF19
:
26472 case BFD_RELOC_ARM_THUMB_BF13
:
26473 code
= fixp
->fx_r_type
;
26476 case BFD_RELOC_ARM_TLS_GOTDESC
:
26477 case BFD_RELOC_ARM_TLS_GD32
:
26478 case BFD_RELOC_ARM_TLS_GD32_FDPIC
:
26479 case BFD_RELOC_ARM_TLS_LE32
:
26480 case BFD_RELOC_ARM_TLS_IE32
:
26481 case BFD_RELOC_ARM_TLS_IE32_FDPIC
:
26482 case BFD_RELOC_ARM_TLS_LDM32
:
26483 case BFD_RELOC_ARM_TLS_LDM32_FDPIC
:
26484 /* BFD will include the symbol's address in the addend.
26485 But we don't want that, so subtract it out again here. */
26486 if (!S_IS_COMMON (fixp
->fx_addsy
))
26487 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
26488 code
= fixp
->fx_r_type
;
26492 case BFD_RELOC_ARM_IMMEDIATE
:
26493 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26494 _("internal relocation (type: IMMEDIATE) not fixed up"));
26497 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
26498 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26499 _("ADRL used for a symbol not defined in the same file"));
26502 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
26503 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
26504 case BFD_RELOC_ARM_THUMB_LOOP12
:
26505 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26506 _("%s used for a symbol not defined in the same file"),
26507 bfd_get_reloc_code_name (fixp
->fx_r_type
));
26510 case BFD_RELOC_ARM_OFFSET_IMM
:
26511 if (section
->use_rela_p
)
26513 code
= fixp
->fx_r_type
;
26517 if (fixp
->fx_addsy
!= NULL
26518 && !S_IS_DEFINED (fixp
->fx_addsy
)
26519 && S_IS_LOCAL (fixp
->fx_addsy
))
26521 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26522 _("undefined local label `%s'"),
26523 S_GET_NAME (fixp
->fx_addsy
));
26527 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26528 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
26535 switch (fixp
->fx_r_type
)
26537 case BFD_RELOC_NONE
: type
= "NONE"; break;
26538 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
26539 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
26540 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
26541 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
26542 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
26543 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
26544 case BFD_RELOC_ARM_T32_OFFSET_IMM
: type
= "T32_OFFSET_IMM"; break;
26545 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
26546 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
26547 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
26548 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
26549 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
26550 default: type
= _("<unknown>"); break;
26552 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26553 _("cannot represent %s relocation in this object file format"),
26560 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
26562 && fixp
->fx_addsy
== GOT_symbol
)
26564 code
= BFD_RELOC_ARM_GOTPC
;
26565 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
26569 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
26571 if (reloc
->howto
== NULL
)
26573 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26574 _("cannot represent %s relocation in this object file format"),
26575 bfd_get_reloc_code_name (code
));
26579 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
26580 vtable entry to be used in the relocation's section offset. */
26581 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
26582 reloc
->address
= fixp
->fx_offset
;
26587 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
26590 cons_fix_new_arm (fragS
* frag
,
26594 bfd_reloc_code_real_type reloc
)
26599 FIXME: @@ Should look at CPU word size. */
26603 reloc
= BFD_RELOC_8
;
26606 reloc
= BFD_RELOC_16
;
26610 reloc
= BFD_RELOC_32
;
26613 reloc
= BFD_RELOC_64
;
26618 if (exp
->X_op
== O_secrel
)
26620 exp
->X_op
= O_symbol
;
26621 reloc
= BFD_RELOC_32_SECREL
;
26625 fix_new_exp (frag
, where
, size
, exp
, pcrel
, reloc
);
26628 #if defined (OBJ_COFF)
26630 arm_validate_fix (fixS
* fixP
)
26632 /* If the destination of the branch is a defined symbol which does not have
26633 the THUMB_FUNC attribute, then we must be calling a function which has
26634 the (interfacearm) attribute. We look for the Thumb entry point to that
26635 function and change the branch to refer to that function instead. */
26636 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
26637 && fixP
->fx_addsy
!= NULL
26638 && S_IS_DEFINED (fixP
->fx_addsy
)
26639 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
26641 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
26648 arm_force_relocation (struct fix
* fixp
)
26650 #if defined (OBJ_COFF) && defined (TE_PE)
26651 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
26655 /* In case we have a call or a branch to a function in ARM ISA mode from
26656 a thumb function or vice-versa force the relocation. These relocations
26657 are cleared off for some cores that might have blx and simple transformations
26661 switch (fixp
->fx_r_type
)
26663 case BFD_RELOC_ARM_PCREL_JUMP
:
26664 case BFD_RELOC_ARM_PCREL_CALL
:
26665 case BFD_RELOC_THUMB_PCREL_BLX
:
26666 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
26670 case BFD_RELOC_ARM_PCREL_BLX
:
26671 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
26672 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
26673 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
26674 if (ARM_IS_FUNC (fixp
->fx_addsy
))
26683 /* Resolve these relocations even if the symbol is extern or weak.
26684 Technically this is probably wrong due to symbol preemption.
26685 In practice these relocations do not have enough range to be useful
26686 at dynamic link time, and some code (e.g. in the Linux kernel)
26687 expects these references to be resolved. */
26688 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
26689 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
26690 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM8
26691 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
26692 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
26693 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
26694 || fixp
->fx_r_type
== BFD_RELOC_ARM_THUMB_OFFSET
26695 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
26696 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
26697 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
26698 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_OFFSET_IMM
26699 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
26700 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM
26701 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
)
26704 /* Always leave these relocations for the linker. */
26705 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
26706 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
26707 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
26710 /* Always generate relocations against function symbols. */
26711 if (fixp
->fx_r_type
== BFD_RELOC_32
26713 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
26716 return generic_force_reloc (fixp
);
26719 #if defined (OBJ_ELF) || defined (OBJ_COFF)
26720 /* Relocations against function names must be left unadjusted,
26721 so that the linker can use this information to generate interworking
26722 stubs. The MIPS version of this function
26723 also prevents relocations that are mips-16 specific, but I do not
26724 know why it does this.
26727 There is one other problem that ought to be addressed here, but
26728 which currently is not: Taking the address of a label (rather
26729 than a function) and then later jumping to that address. Such
26730 addresses also ought to have their bottom bit set (assuming that
26731 they reside in Thumb code), but at the moment they will not. */
26734 arm_fix_adjustable (fixS
* fixP
)
26736 if (fixP
->fx_addsy
== NULL
)
26739 /* Preserve relocations against symbols with function type. */
26740 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
26743 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
26744 && fixP
->fx_subsy
== NULL
)
26747 /* We need the symbol name for the VTABLE entries. */
26748 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
26749 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
26752 /* Don't allow symbols to be discarded on GOT related relocs. */
26753 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
26754 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
26755 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
26756 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
26757 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32_FDPIC
26758 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
26759 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
26760 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32_FDPIC
26761 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
26762 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32_FDPIC
26763 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
26764 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GOTDESC
26765 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_CALL
26766 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_CALL
26767 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_DESCSEQ
26768 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_DESCSEQ
26769 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
26772 /* Similarly for group relocations. */
26773 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
26774 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
26775 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
26778 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
26779 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
26780 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
26781 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
26782 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
26783 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
26784 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
26785 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
26786 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
26789 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
26790 offsets, so keep these symbols. */
26791 if (fixP
->fx_r_type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
26792 && fixP
->fx_r_type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
26797 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
26801 elf32_arm_target_format (void)
26804 return (target_big_endian
26805 ? "elf32-bigarm-symbian"
26806 : "elf32-littlearm-symbian");
26807 #elif defined (TE_VXWORKS)
26808 return (target_big_endian
26809 ? "elf32-bigarm-vxworks"
26810 : "elf32-littlearm-vxworks");
26811 #elif defined (TE_NACL)
26812 return (target_big_endian
26813 ? "elf32-bigarm-nacl"
26814 : "elf32-littlearm-nacl");
26818 if (target_big_endian
)
26819 return "elf32-bigarm-fdpic";
26821 return "elf32-littlearm-fdpic";
26825 if (target_big_endian
)
26826 return "elf32-bigarm";
26828 return "elf32-littlearm";
26834 armelf_frob_symbol (symbolS
* symp
,
26837 elf_frob_symbol (symp
, puntp
);
26841 /* MD interface: Finalization. */
26846 literal_pool
* pool
;
26848 /* Ensure that all the predication blocks are properly closed. */
26849 check_pred_blocks_finished ();
26851 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
26853 /* Put it at the end of the relevant section. */
26854 subseg_set (pool
->section
, pool
->sub_section
);
26856 arm_elf_change_section ();
26863 /* Remove any excess mapping symbols generated for alignment frags in
26864 SEC. We may have created a mapping symbol before a zero byte
26865 alignment; remove it if there's a mapping symbol after the
26868 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
26869 void *dummy ATTRIBUTE_UNUSED
)
26871 segment_info_type
*seginfo
= seg_info (sec
);
26874 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
26877 for (fragp
= seginfo
->frchainP
->frch_root
;
26879 fragp
= fragp
->fr_next
)
26881 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
26882 fragS
*next
= fragp
->fr_next
;
26884 /* Variable-sized frags have been converted to fixed size by
26885 this point. But if this was variable-sized to start with,
26886 there will be a fixed-size frag after it. So don't handle
26888 if (sym
== NULL
|| next
== NULL
)
26891 if (S_GET_VALUE (sym
) < next
->fr_address
)
26892 /* Not at the end of this frag. */
26894 know (S_GET_VALUE (sym
) == next
->fr_address
);
26898 if (next
->tc_frag_data
.first_map
!= NULL
)
26900 /* Next frag starts with a mapping symbol. Discard this
26902 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
26906 if (next
->fr_next
== NULL
)
26908 /* This mapping symbol is at the end of the section. Discard
26910 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
26911 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
26915 /* As long as we have empty frags without any mapping symbols,
26917 /* If the next frag is non-empty and does not start with a
26918 mapping symbol, then this mapping symbol is required. */
26919 if (next
->fr_address
!= next
->fr_next
->fr_address
)
26922 next
= next
->fr_next
;
26924 while (next
!= NULL
);
26929 /* Adjust the symbol table. This marks Thumb symbols as distinct from
26933 arm_adjust_symtab (void)
26938 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
26940 if (ARM_IS_THUMB (sym
))
26942 if (THUMB_IS_FUNC (sym
))
26944 /* Mark the symbol as a Thumb function. */
26945 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
26946 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
26947 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
26949 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
26950 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
26952 as_bad (_("%s: unexpected function type: %d"),
26953 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
26955 else switch (S_GET_STORAGE_CLASS (sym
))
26958 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
26961 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
26964 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
26972 if (ARM_IS_INTERWORK (sym
))
26973 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
26980 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
26982 if (ARM_IS_THUMB (sym
))
26984 elf_symbol_type
* elf_sym
;
26986 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
26987 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
26989 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
26990 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
26992 /* If it's a .thumb_func, declare it as so,
26993 otherwise tag label as .code 16. */
26994 if (THUMB_IS_FUNC (sym
))
26995 ARM_SET_SYM_BRANCH_TYPE (elf_sym
->internal_elf_sym
.st_target_internal
,
26996 ST_BRANCH_TO_THUMB
);
26997 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
26998 elf_sym
->internal_elf_sym
.st_info
=
26999 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
27004 /* Remove any overlapping mapping symbols generated by alignment frags. */
27005 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
27006 /* Now do generic ELF adjustments. */
27007 elf_adjust_symtab ();
27011 /* MD interface: Initialization. */
27014 set_constant_flonums (void)
27018 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
27019 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
27023 /* Auto-select Thumb mode if it's the only available instruction set for the
27024 given architecture. */
27027 autoselect_thumb_from_cpu_variant (void)
27029 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
27030 opcode_select (16);
27039 if ( (arm_ops_hsh
= hash_new ()) == NULL
27040 || (arm_cond_hsh
= hash_new ()) == NULL
27041 || (arm_vcond_hsh
= hash_new ()) == NULL
27042 || (arm_shift_hsh
= hash_new ()) == NULL
27043 || (arm_psr_hsh
= hash_new ()) == NULL
27044 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
27045 || (arm_reg_hsh
= hash_new ()) == NULL
27046 || (arm_reloc_hsh
= hash_new ()) == NULL
27047 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
27048 as_fatal (_("virtual memory exhausted"));
27050 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
27051 hash_insert (arm_ops_hsh
, insns
[i
].template_name
, (void *) (insns
+ i
));
27052 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
27053 hash_insert (arm_cond_hsh
, conds
[i
].template_name
, (void *) (conds
+ i
));
27054 for (i
= 0; i
< sizeof (vconds
) / sizeof (struct asm_cond
); i
++)
27055 hash_insert (arm_vcond_hsh
, vconds
[i
].template_name
, (void *) (vconds
+ i
));
27056 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
27057 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
27058 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
27059 hash_insert (arm_psr_hsh
, psrs
[i
].template_name
, (void *) (psrs
+ i
));
27060 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
27061 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template_name
,
27062 (void *) (v7m_psrs
+ i
));
27063 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
27064 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
27066 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
27068 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template_name
,
27069 (void *) (barrier_opt_names
+ i
));
27071 for (i
= 0; i
< ARRAY_SIZE (reloc_names
); i
++)
27073 struct reloc_entry
* entry
= reloc_names
+ i
;
27075 if (arm_is_eabi() && entry
->reloc
== BFD_RELOC_ARM_PLT32
)
27076 /* This makes encode_branch() use the EABI versions of this relocation. */
27077 entry
->reloc
= BFD_RELOC_UNUSED
;
27079 hash_insert (arm_reloc_hsh
, entry
->name
, (void *) entry
);
27083 set_constant_flonums ();
27085 /* Set the cpu variant based on the command-line options. We prefer
27086 -mcpu= over -march= if both are set (as for GCC); and we prefer
27087 -mfpu= over any other way of setting the floating point unit.
27088 Use of legacy options with new options are faulted. */
27091 if (mcpu_cpu_opt
|| march_cpu_opt
)
27092 as_bad (_("use of old and new-style options to set CPU type"));
27094 selected_arch
= *legacy_cpu
;
27096 else if (mcpu_cpu_opt
)
27098 selected_arch
= *mcpu_cpu_opt
;
27099 selected_ext
= *mcpu_ext_opt
;
27101 else if (march_cpu_opt
)
27103 selected_arch
= *march_cpu_opt
;
27104 selected_ext
= *march_ext_opt
;
27106 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
27111 as_bad (_("use of old and new-style options to set FPU type"));
27113 selected_fpu
= *legacy_fpu
;
27116 selected_fpu
= *mfpu_opt
;
27119 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
27120 || defined (TE_NetBSD) || defined (TE_VXWORKS))
27121 /* Some environments specify a default FPU. If they don't, infer it
27122 from the processor. */
27124 selected_fpu
= *mcpu_fpu_opt
;
27125 else if (march_fpu_opt
)
27126 selected_fpu
= *march_fpu_opt
;
27128 selected_fpu
= fpu_default
;
27132 if (ARM_FEATURE_ZERO (selected_fpu
))
27134 if (!no_cpu_selected ())
27135 selected_fpu
= fpu_default
;
27137 selected_fpu
= fpu_arch_fpa
;
27141 if (ARM_FEATURE_ZERO (selected_arch
))
27143 selected_arch
= cpu_default
;
27144 selected_cpu
= selected_arch
;
27146 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
27148 /* Autodection of feature mode: allow all features in cpu_variant but leave
27149 selected_cpu unset. It will be set in aeabi_set_public_attributes ()
27150 after all instruction have been processed and we can decide what CPU
27151 should be selected. */
27152 if (ARM_FEATURE_ZERO (selected_arch
))
27153 ARM_MERGE_FEATURE_SETS (cpu_variant
, arm_arch_any
, selected_fpu
);
27155 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
27158 autoselect_thumb_from_cpu_variant ();
27160 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
27162 #if defined OBJ_COFF || defined OBJ_ELF
27164 unsigned int flags
= 0;
27166 #if defined OBJ_ELF
27167 flags
= meabi_flags
;
27169 switch (meabi_flags
)
27171 case EF_ARM_EABI_UNKNOWN
:
27173 /* Set the flags in the private structure. */
27174 if (uses_apcs_26
) flags
|= F_APCS26
;
27175 if (support_interwork
) flags
|= F_INTERWORK
;
27176 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
27177 if (pic_code
) flags
|= F_PIC
;
27178 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
27179 flags
|= F_SOFT_FLOAT
;
27181 switch (mfloat_abi_opt
)
27183 case ARM_FLOAT_ABI_SOFT
:
27184 case ARM_FLOAT_ABI_SOFTFP
:
27185 flags
|= F_SOFT_FLOAT
;
27188 case ARM_FLOAT_ABI_HARD
:
27189 if (flags
& F_SOFT_FLOAT
)
27190 as_bad (_("hard-float conflicts with specified fpu"));
27194 /* Using pure-endian doubles (even if soft-float). */
27195 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
27196 flags
|= F_VFP_FLOAT
;
27198 #if defined OBJ_ELF
27199 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
27200 flags
|= EF_ARM_MAVERICK_FLOAT
;
27203 case EF_ARM_EABI_VER4
:
27204 case EF_ARM_EABI_VER5
:
27205 /* No additional flags to set. */
27212 bfd_set_private_flags (stdoutput
, flags
);
27214 /* We have run out flags in the COFF header to encode the
27215 status of ATPCS support, so instead we create a dummy,
27216 empty, debug section called .arm.atpcs. */
27221 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
27225 bfd_set_section_flags
27226 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
27227 bfd_set_section_size (stdoutput
, sec
, 0);
27228 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
27234 /* Record the CPU type as well. */
27235 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
27236 mach
= bfd_mach_arm_iWMMXt2
;
27237 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
27238 mach
= bfd_mach_arm_iWMMXt
;
27239 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
27240 mach
= bfd_mach_arm_XScale
;
27241 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
27242 mach
= bfd_mach_arm_ep9312
;
27243 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
27244 mach
= bfd_mach_arm_5TE
;
27245 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
27247 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
27248 mach
= bfd_mach_arm_5T
;
27250 mach
= bfd_mach_arm_5
;
27252 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
27254 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
27255 mach
= bfd_mach_arm_4T
;
27257 mach
= bfd_mach_arm_4
;
27259 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
27260 mach
= bfd_mach_arm_3M
;
27261 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
27262 mach
= bfd_mach_arm_3
;
27263 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
27264 mach
= bfd_mach_arm_2a
;
27265 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
27266 mach
= bfd_mach_arm_2
;
27268 mach
= bfd_mach_arm_unknown
;
27270 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
27273 /* Command line processing. */
27276 Invocation line includes a switch not recognized by the base assembler.
27277 See if it's a processor-specific option.
27279 This routine is somewhat complicated by the need for backwards
27280 compatibility (since older releases of gcc can't be changed).
27281 The new options try to make the interface as compatible as
27284 New options (supported) are:
27286 -mcpu=<cpu name> Assemble for selected processor
27287 -march=<architecture name> Assemble for selected architecture
27288 -mfpu=<fpu architecture> Assemble for selected FPU.
27289 -EB/-mbig-endian Big-endian
27290 -EL/-mlittle-endian Little-endian
27291 -k Generate PIC code
27292 -mthumb Start in Thumb mode
27293 -mthumb-interwork Code supports ARM/Thumb interworking
27295 -m[no-]warn-deprecated Warn about deprecated features
27296 -m[no-]warn-syms Warn when symbols match instructions
27298 For now we will also provide support for:
27300 -mapcs-32 32-bit Program counter
27301 -mapcs-26 26-bit Program counter
27302 -macps-float Floats passed in FP registers
27303 -mapcs-reentrant Reentrant code
27305 (sometime these will probably be replaced with -mapcs=<list of options>
27306 and -matpcs=<list of options>)
27308 The remaining options are only supported for back-wards compatibility.
27309 Cpu variants, the arm part is optional:
27310 -m[arm]1 Currently not supported.
27311 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
27312 -m[arm]3 Arm 3 processor
27313 -m[arm]6[xx], Arm 6 processors
27314 -m[arm]7[xx][t][[d]m] Arm 7 processors
27315 -m[arm]8[10] Arm 8 processors
27316 -m[arm]9[20][tdmi] Arm 9 processors
27317 -mstrongarm[110[0]] StrongARM processors
27318 -mxscale XScale processors
27319 -m[arm]v[2345[t[e]]] Arm architectures
27320 -mall All (except the ARM1)
27322 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
27323 -mfpe-old (No float load/store multiples)
27324 -mvfpxd VFP Single precision
27326 -mno-fpu Disable all floating point instructions
27328 The following CPU names are recognized:
27329 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
27330 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
27331 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
27332 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
27333 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
27334 arm10t arm10e, arm1020t, arm1020e, arm10200e,
27335 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
27339 const char * md_shortopts
= "m:k";
27341 #ifdef ARM_BI_ENDIAN
27342 #define OPTION_EB (OPTION_MD_BASE + 0)
27343 #define OPTION_EL (OPTION_MD_BASE + 1)
27345 #if TARGET_BYTES_BIG_ENDIAN
27346 #define OPTION_EB (OPTION_MD_BASE + 0)
27348 #define OPTION_EL (OPTION_MD_BASE + 1)
27351 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
27352 #define OPTION_FDPIC (OPTION_MD_BASE + 3)
27354 struct option md_longopts
[] =
27357 {"EB", no_argument
, NULL
, OPTION_EB
},
27360 {"EL", no_argument
, NULL
, OPTION_EL
},
27362 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
27364 {"fdpic", no_argument
, NULL
, OPTION_FDPIC
},
27366 {NULL
, no_argument
, NULL
, 0}
27369 size_t md_longopts_size
= sizeof (md_longopts
);
27371 struct arm_option_table
27373 const char * option
; /* Option name to match. */
27374 const char * help
; /* Help information. */
27375 int * var
; /* Variable to change. */
27376 int value
; /* What to change it to. */
27377 const char * deprecated
; /* If non-null, print this message. */
27380 struct arm_option_table arm_opts
[] =
27382 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
27383 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
27384 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
27385 &support_interwork
, 1, NULL
},
27386 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
27387 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
27388 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
27390 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
27391 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
27392 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
27393 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
27396 /* These are recognized by the assembler, but have no affect on code. */
27397 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
27398 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
27400 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
27401 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
27402 &warn_on_deprecated
, 0, NULL
},
27403 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms
), TRUE
, NULL
},
27404 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms
), FALSE
, NULL
},
27405 {NULL
, NULL
, NULL
, 0, NULL
}
27408 struct arm_legacy_option_table
27410 const char * option
; /* Option name to match. */
27411 const arm_feature_set
** var
; /* Variable to change. */
27412 const arm_feature_set value
; /* What to change it to. */
27413 const char * deprecated
; /* If non-null, print this message. */
27416 const struct arm_legacy_option_table arm_legacy_opts
[] =
27418 /* DON'T add any new processors to this list -- we want the whole list
27419 to go away... Add them to the processors table instead. */
27420 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
27421 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
27422 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
27423 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
27424 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
27425 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
27426 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
27427 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
27428 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
27429 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
27430 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
27431 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
27432 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
27433 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
27434 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
27435 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
27436 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
27437 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
27438 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
27439 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
27440 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
27441 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
27442 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
27443 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
27444 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
27445 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
27446 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
27447 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
27448 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
27449 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
27450 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
27451 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
27452 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
27453 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
27454 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
27455 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
27456 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
27457 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
27458 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
27459 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
27460 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
27461 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
27462 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
27463 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
27464 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
27465 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
27466 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
27467 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
27468 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
27469 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
27470 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
27471 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
27472 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
27473 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
27474 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
27475 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
27476 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
27477 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
27478 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
27479 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
27480 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
27481 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
27482 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
27483 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
27484 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
27485 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
27486 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
27487 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
27488 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
27489 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
27490 N_("use -mcpu=strongarm110")},
27491 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
27492 N_("use -mcpu=strongarm1100")},
27493 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
27494 N_("use -mcpu=strongarm1110")},
27495 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
27496 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
27497 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
27499 /* Architecture variants -- don't add any more to this list either. */
27500 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
27501 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
27502 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
27503 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
27504 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
27505 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
27506 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
27507 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
27508 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
27509 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
27510 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
27511 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
27512 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
27513 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
27514 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
27515 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
27516 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
27517 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
27519 /* Floating point variants -- don't add any more to this list either. */
27520 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
27521 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
27522 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
27523 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
27524 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
27526 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
27529 struct arm_cpu_option_table
27533 const arm_feature_set value
;
27534 const arm_feature_set ext
;
27535 /* For some CPUs we assume an FPU unless the user explicitly sets
27537 const arm_feature_set default_fpu
;
27538 /* The canonical name of the CPU, or NULL to use NAME converted to upper
27540 const char * canonical_name
;
27543 /* This list should, at a minimum, contain all the cpu names
27544 recognized by GCC. */
27545 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
27547 static const struct arm_cpu_option_table arm_cpus
[] =
27549 ARM_CPU_OPT ("all", NULL
, ARM_ANY
,
27552 ARM_CPU_OPT ("arm1", NULL
, ARM_ARCH_V1
,
27555 ARM_CPU_OPT ("arm2", NULL
, ARM_ARCH_V2
,
27558 ARM_CPU_OPT ("arm250", NULL
, ARM_ARCH_V2S
,
27561 ARM_CPU_OPT ("arm3", NULL
, ARM_ARCH_V2S
,
27564 ARM_CPU_OPT ("arm6", NULL
, ARM_ARCH_V3
,
27567 ARM_CPU_OPT ("arm60", NULL
, ARM_ARCH_V3
,
27570 ARM_CPU_OPT ("arm600", NULL
, ARM_ARCH_V3
,
27573 ARM_CPU_OPT ("arm610", NULL
, ARM_ARCH_V3
,
27576 ARM_CPU_OPT ("arm620", NULL
, ARM_ARCH_V3
,
27579 ARM_CPU_OPT ("arm7", NULL
, ARM_ARCH_V3
,
27582 ARM_CPU_OPT ("arm7m", NULL
, ARM_ARCH_V3M
,
27585 ARM_CPU_OPT ("arm7d", NULL
, ARM_ARCH_V3
,
27588 ARM_CPU_OPT ("arm7dm", NULL
, ARM_ARCH_V3M
,
27591 ARM_CPU_OPT ("arm7di", NULL
, ARM_ARCH_V3
,
27594 ARM_CPU_OPT ("arm7dmi", NULL
, ARM_ARCH_V3M
,
27597 ARM_CPU_OPT ("arm70", NULL
, ARM_ARCH_V3
,
27600 ARM_CPU_OPT ("arm700", NULL
, ARM_ARCH_V3
,
27603 ARM_CPU_OPT ("arm700i", NULL
, ARM_ARCH_V3
,
27606 ARM_CPU_OPT ("arm710", NULL
, ARM_ARCH_V3
,
27609 ARM_CPU_OPT ("arm710t", NULL
, ARM_ARCH_V4T
,
27612 ARM_CPU_OPT ("arm720", NULL
, ARM_ARCH_V3
,
27615 ARM_CPU_OPT ("arm720t", NULL
, ARM_ARCH_V4T
,
27618 ARM_CPU_OPT ("arm740t", NULL
, ARM_ARCH_V4T
,
27621 ARM_CPU_OPT ("arm710c", NULL
, ARM_ARCH_V3
,
27624 ARM_CPU_OPT ("arm7100", NULL
, ARM_ARCH_V3
,
27627 ARM_CPU_OPT ("arm7500", NULL
, ARM_ARCH_V3
,
27630 ARM_CPU_OPT ("arm7500fe", NULL
, ARM_ARCH_V3
,
27633 ARM_CPU_OPT ("arm7t", NULL
, ARM_ARCH_V4T
,
27636 ARM_CPU_OPT ("arm7tdmi", NULL
, ARM_ARCH_V4T
,
27639 ARM_CPU_OPT ("arm7tdmi-s", NULL
, ARM_ARCH_V4T
,
27642 ARM_CPU_OPT ("arm8", NULL
, ARM_ARCH_V4
,
27645 ARM_CPU_OPT ("arm810", NULL
, ARM_ARCH_V4
,
27648 ARM_CPU_OPT ("strongarm", NULL
, ARM_ARCH_V4
,
27651 ARM_CPU_OPT ("strongarm1", NULL
, ARM_ARCH_V4
,
27654 ARM_CPU_OPT ("strongarm110", NULL
, ARM_ARCH_V4
,
27657 ARM_CPU_OPT ("strongarm1100", NULL
, ARM_ARCH_V4
,
27660 ARM_CPU_OPT ("strongarm1110", NULL
, ARM_ARCH_V4
,
27663 ARM_CPU_OPT ("arm9", NULL
, ARM_ARCH_V4T
,
27666 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T
,
27669 ARM_CPU_OPT ("arm920t", NULL
, ARM_ARCH_V4T
,
27672 ARM_CPU_OPT ("arm922t", NULL
, ARM_ARCH_V4T
,
27675 ARM_CPU_OPT ("arm940t", NULL
, ARM_ARCH_V4T
,
27678 ARM_CPU_OPT ("arm9tdmi", NULL
, ARM_ARCH_V4T
,
27681 ARM_CPU_OPT ("fa526", NULL
, ARM_ARCH_V4
,
27684 ARM_CPU_OPT ("fa626", NULL
, ARM_ARCH_V4
,
27688 /* For V5 or later processors we default to using VFP; but the user
27689 should really set the FPU type explicitly. */
27690 ARM_CPU_OPT ("arm9e-r0", NULL
, ARM_ARCH_V5TExP
,
27693 ARM_CPU_OPT ("arm9e", NULL
, ARM_ARCH_V5TE
,
27696 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ
,
27699 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ
,
27702 ARM_CPU_OPT ("arm926ej-s", NULL
, ARM_ARCH_V5TEJ
,
27705 ARM_CPU_OPT ("arm946e-r0", NULL
, ARM_ARCH_V5TExP
,
27708 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE
,
27711 ARM_CPU_OPT ("arm946e-s", NULL
, ARM_ARCH_V5TE
,
27714 ARM_CPU_OPT ("arm966e-r0", NULL
, ARM_ARCH_V5TExP
,
27717 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE
,
27720 ARM_CPU_OPT ("arm966e-s", NULL
, ARM_ARCH_V5TE
,
27723 ARM_CPU_OPT ("arm968e-s", NULL
, ARM_ARCH_V5TE
,
27726 ARM_CPU_OPT ("arm10t", NULL
, ARM_ARCH_V5T
,
27729 ARM_CPU_OPT ("arm10tdmi", NULL
, ARM_ARCH_V5T
,
27732 ARM_CPU_OPT ("arm10e", NULL
, ARM_ARCH_V5TE
,
27735 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE
,
27738 ARM_CPU_OPT ("arm1020t", NULL
, ARM_ARCH_V5T
,
27741 ARM_CPU_OPT ("arm1020e", NULL
, ARM_ARCH_V5TE
,
27744 ARM_CPU_OPT ("arm1022e", NULL
, ARM_ARCH_V5TE
,
27747 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ
,
27750 ARM_CPU_OPT ("arm1026ej-s", NULL
, ARM_ARCH_V5TEJ
,
27753 ARM_CPU_OPT ("fa606te", NULL
, ARM_ARCH_V5TE
,
27756 ARM_CPU_OPT ("fa616te", NULL
, ARM_ARCH_V5TE
,
27759 ARM_CPU_OPT ("fa626te", NULL
, ARM_ARCH_V5TE
,
27762 ARM_CPU_OPT ("fmp626", NULL
, ARM_ARCH_V5TE
,
27765 ARM_CPU_OPT ("fa726te", NULL
, ARM_ARCH_V5TE
,
27768 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6
,
27771 ARM_CPU_OPT ("arm1136j-s", NULL
, ARM_ARCH_V6
,
27774 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6
,
27777 ARM_CPU_OPT ("arm1136jf-s", NULL
, ARM_ARCH_V6
,
27780 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K
,
27783 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K
,
27786 ARM_CPU_OPT ("arm1156t2-s", NULL
, ARM_ARCH_V6T2
,
27789 ARM_CPU_OPT ("arm1156t2f-s", NULL
, ARM_ARCH_V6T2
,
27792 ARM_CPU_OPT ("arm1176jz-s", NULL
, ARM_ARCH_V6KZ
,
27795 ARM_CPU_OPT ("arm1176jzf-s", NULL
, ARM_ARCH_V6KZ
,
27798 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A
,
27799 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
27801 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE
,
27803 FPU_ARCH_NEON_VFP_V4
),
27804 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A
,
27805 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
27806 ARM_FEATURE_COPROC (FPU_VFP_V3
| FPU_NEON_EXT_V1
)),
27807 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A
,
27808 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
27809 ARM_FEATURE_COPROC (FPU_VFP_V3
| FPU_NEON_EXT_V1
)),
27810 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE
,
27812 FPU_ARCH_NEON_VFP_V4
),
27813 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE
,
27815 FPU_ARCH_NEON_VFP_V4
),
27816 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE
,
27818 FPU_ARCH_NEON_VFP_V4
),
27819 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A
,
27820 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27821 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27822 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A
,
27823 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27824 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27825 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A
,
27826 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27827 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27828 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A
,
27829 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27830 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
27831 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A
,
27832 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27833 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27834 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A
,
27835 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27836 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27837 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A
,
27838 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27839 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27840 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A
,
27841 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27842 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
27843 ARM_CPU_OPT ("cortex-a76", "Cortex-A76", ARM_ARCH_V8_2A
,
27844 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27845 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
27846 ARM_CPU_OPT ("ares", "Ares", ARM_ARCH_V8_2A
,
27847 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27848 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
27849 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R
,
27852 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R
,
27854 FPU_ARCH_VFP_V3D16
),
27855 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R
,
27856 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
27858 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R
,
27859 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
27860 FPU_ARCH_VFP_V3D16
),
27861 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R
,
27862 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
27863 FPU_ARCH_VFP_V3D16
),
27864 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R
,
27865 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27866 FPU_ARCH_NEON_VFP_ARMV8
),
27867 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN
,
27868 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
27870 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE
,
27873 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM
,
27876 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM
,
27879 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M
,
27882 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM
,
27885 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM
,
27888 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM
,
27891 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A
,
27892 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27893 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27894 ARM_CPU_OPT ("neoverse-n1", "Neoverse N1", ARM_ARCH_V8_2A
,
27895 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27896 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
27897 /* ??? XSCALE is really an architecture. */
27898 ARM_CPU_OPT ("xscale", NULL
, ARM_ARCH_XSCALE
,
27902 /* ??? iwmmxt is not a processor. */
27903 ARM_CPU_OPT ("iwmmxt", NULL
, ARM_ARCH_IWMMXT
,
27906 ARM_CPU_OPT ("iwmmxt2", NULL
, ARM_ARCH_IWMMXT2
,
27909 ARM_CPU_OPT ("i80200", NULL
, ARM_ARCH_XSCALE
,
27914 ARM_CPU_OPT ("ep9312", "ARM920T",
27915 ARM_FEATURE_LOW (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
),
27916 ARM_ARCH_NONE
, FPU_ARCH_MAVERICK
),
27918 /* Marvell processors. */
27919 ARM_CPU_OPT ("marvell-pj4", NULL
, ARM_ARCH_V7A
,
27920 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
27921 FPU_ARCH_VFP_V3D16
),
27922 ARM_CPU_OPT ("marvell-whitney", NULL
, ARM_ARCH_V7A
,
27923 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
27924 FPU_ARCH_NEON_VFP_V4
),
27926 /* APM X-Gene family. */
27927 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A
,
27929 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27930 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A
,
27931 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27932 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27934 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
27938 struct arm_ext_table
27942 const arm_feature_set merge
;
27943 const arm_feature_set clear
;
27946 struct arm_arch_option_table
27950 const arm_feature_set value
;
27951 const arm_feature_set default_fpu
;
27952 const struct arm_ext_table
* ext_table
;
27955 /* Used to add support for +E and +noE extension. */
27956 #define ARM_EXT(E, M, C) { E, sizeof (E) - 1, M, C }
27957 /* Used to add support for a +E extension. */
27958 #define ARM_ADD(E, M) { E, sizeof(E) - 1, M, ARM_ARCH_NONE }
27959 /* Used to add support for a +noE extension. */
27960 #define ARM_REMOVE(E, C) { E, sizeof(E) -1, ARM_ARCH_NONE, C }
27962 #define ALL_FP ARM_FEATURE (0, ARM_EXT2_FP16_INST | ARM_EXT2_FP16_FML, \
27963 ~0 & ~FPU_ENDIAN_PURE)
27965 static const struct arm_ext_table armv5te_ext_table
[] =
27967 ARM_EXT ("fp", FPU_ARCH_VFP_V2
, ALL_FP
),
27968 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27971 static const struct arm_ext_table armv7_ext_table
[] =
27973 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
27974 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27977 static const struct arm_ext_table armv7ve_ext_table
[] =
27979 ARM_EXT ("fp", FPU_ARCH_VFP_V4D16
, ALL_FP
),
27980 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
),
27981 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3
),
27982 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
27983 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
),
27984 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16
), /* Alias for +fp. */
27985 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4
),
27987 ARM_EXT ("simd", FPU_ARCH_NEON_VFP_V4
,
27988 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_NEON_EXT_FMA
)),
27990 /* Aliases for +simd. */
27991 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4
),
27993 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
27994 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
27995 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16
),
27997 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28000 static const struct arm_ext_table armv7a_ext_table
[] =
28002 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
28003 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
), /* Alias for +fp. */
28004 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3
),
28005 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
28006 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
),
28007 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16
),
28008 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4
),
28010 ARM_EXT ("simd", FPU_ARCH_VFP_V3_PLUS_NEON_V1
,
28011 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_NEON_EXT_FMA
)),
28013 /* Aliases for +simd. */
28014 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
28015 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
28017 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16
),
28018 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4
),
28020 ARM_ADD ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
)),
28021 ARM_ADD ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
)),
28022 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28025 static const struct arm_ext_table armv7r_ext_table
[] =
28027 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V3xD
),
28028 ARM_ADD ("vfpv3xd", FPU_ARCH_VFP_V3xD
), /* Alias for +fp.sp. */
28029 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
28030 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
), /* Alias for +fp. */
28031 ARM_ADD ("vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
),
28032 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
28033 ARM_EXT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
28034 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
)),
28035 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28038 static const struct arm_ext_table armv7em_ext_table
[] =
28040 ARM_EXT ("fp", FPU_ARCH_VFP_V4_SP_D16
, ALL_FP
),
28041 /* Alias for +fp, used to be known as fpv4-sp-d16. */
28042 ARM_ADD ("vfpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
),
28043 ARM_ADD ("fpv5", FPU_ARCH_VFP_V5_SP_D16
),
28044 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16
),
28045 ARM_ADD ("fpv5-d16", FPU_ARCH_VFP_V5D16
),
28046 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28049 static const struct arm_ext_table armv8a_ext_table
[] =
28051 ARM_ADD ("crc", ARCH_CRC_ARMV8
),
28052 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8
),
28053 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
28054 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
28056 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28057 should use the +simd option to turn on FP. */
28058 ARM_REMOVE ("fp", ALL_FP
),
28059 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
28060 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
28061 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28065 static const struct arm_ext_table armv81a_ext_table
[] =
28067 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1
),
28068 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
,
28069 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
28071 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28072 should use the +simd option to turn on FP. */
28073 ARM_REMOVE ("fp", ALL_FP
),
28074 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
28075 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
28076 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28079 static const struct arm_ext_table armv82a_ext_table
[] =
28081 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1
),
28082 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16
),
28083 ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML
),
28084 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
,
28085 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
28086 ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
28088 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28089 should use the +simd option to turn on FP. */
28090 ARM_REMOVE ("fp", ALL_FP
),
28091 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
28092 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
28093 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28096 static const struct arm_ext_table armv84a_ext_table
[] =
28098 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
28099 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML
),
28100 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4
,
28101 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
28103 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28104 should use the +simd option to turn on FP. */
28105 ARM_REMOVE ("fp", ALL_FP
),
28106 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
28107 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
28108 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28111 static const struct arm_ext_table armv85a_ext_table
[] =
28113 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
28114 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML
),
28115 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4
,
28116 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
28118 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28119 should use the +simd option to turn on FP. */
28120 ARM_REMOVE ("fp", ALL_FP
),
28121 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28124 static const struct arm_ext_table armv8m_main_ext_table
[] =
28126 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
28127 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
)),
28128 ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16
, ALL_FP
),
28129 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16
),
28130 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28133 static const struct arm_ext_table armv8_1m_main_ext_table
[] =
28135 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
28136 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
)),
28138 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
28139 FPU_VFP_V5_SP_D16
| FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
),
28142 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
28143 FPU_VFP_V5D16
| FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
)),
28144 ARM_EXT ("mve", ARM_FEATURE_COPROC (FPU_MVE
),
28145 ARM_FEATURE_COPROC (FPU_MVE
| FPU_MVE_FP
)),
28147 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
28148 FPU_MVE
| FPU_MVE_FP
| FPU_VFP_V5_SP_D16
|
28149 FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
)),
28150 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28153 static const struct arm_ext_table armv8r_ext_table
[] =
28155 ARM_ADD ("crc", ARCH_CRC_ARMV8
),
28156 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8
),
28157 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
28158 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
28159 ARM_REMOVE ("fp", ALL_FP
),
28160 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V5_SP_D16
),
28161 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28164 /* This list should, at a minimum, contain all the architecture names
28165 recognized by GCC. */
28166 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF, NULL }
28167 #define ARM_ARCH_OPT2(N, V, DF, ext) \
28168 { N, sizeof (N) - 1, V, DF, ext##_ext_table }
28170 static const struct arm_arch_option_table arm_archs
[] =
28172 ARM_ARCH_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
),
28173 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
),
28174 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
),
28175 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
28176 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
28177 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
),
28178 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
),
28179 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
),
28180 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
),
28181 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
),
28182 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
),
28183 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
),
28184 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
),
28185 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
),
28186 ARM_ARCH_OPT2 ("armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
, armv5te
),
28187 ARM_ARCH_OPT2 ("armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
, armv5te
),
28188 ARM_ARCH_OPT2 ("armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
, armv5te
),
28189 ARM_ARCH_OPT2 ("armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
, armv5te
),
28190 ARM_ARCH_OPT2 ("armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
, armv5te
),
28191 ARM_ARCH_OPT2 ("armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
, armv5te
),
28192 ARM_ARCH_OPT2 ("armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
, armv5te
),
28193 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
28194 kept to preserve existing behaviour. */
28195 ARM_ARCH_OPT2 ("armv6kz", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
, armv5te
),
28196 ARM_ARCH_OPT2 ("armv6zk", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
, armv5te
),
28197 ARM_ARCH_OPT2 ("armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
, armv5te
),
28198 ARM_ARCH_OPT2 ("armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
, armv5te
),
28199 ARM_ARCH_OPT2 ("armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
, armv5te
),
28200 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
28201 kept to preserve existing behaviour. */
28202 ARM_ARCH_OPT2 ("armv6kzt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
, armv5te
),
28203 ARM_ARCH_OPT2 ("armv6zkt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
, armv5te
),
28204 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
),
28205 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM
, FPU_ARCH_VFP
),
28206 ARM_ARCH_OPT2 ("armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
, armv7
),
28207 /* The official spelling of the ARMv7 profile variants is the dashed form.
28208 Accept the non-dashed form for compatibility with old toolchains. */
28209 ARM_ARCH_OPT2 ("armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
, armv7a
),
28210 ARM_ARCH_OPT2 ("armv7ve", ARM_ARCH_V7VE
, FPU_ARCH_VFP
, armv7ve
),
28211 ARM_ARCH_OPT2 ("armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
, armv7r
),
28212 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
28213 ARM_ARCH_OPT2 ("armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
, armv7a
),
28214 ARM_ARCH_OPT2 ("armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
, armv7r
),
28215 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
28216 ARM_ARCH_OPT2 ("armv7e-m", ARM_ARCH_V7EM
, FPU_ARCH_VFP
, armv7em
),
28217 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE
, FPU_ARCH_VFP
),
28218 ARM_ARCH_OPT2 ("armv8-m.main", ARM_ARCH_V8M_MAIN
, FPU_ARCH_VFP
,
28220 ARM_ARCH_OPT2 ("armv8.1-m.main", ARM_ARCH_V8_1M_MAIN
, FPU_ARCH_VFP
,
28222 ARM_ARCH_OPT2 ("armv8-a", ARM_ARCH_V8A
, FPU_ARCH_VFP
, armv8a
),
28223 ARM_ARCH_OPT2 ("armv8.1-a", ARM_ARCH_V8_1A
, FPU_ARCH_VFP
, armv81a
),
28224 ARM_ARCH_OPT2 ("armv8.2-a", ARM_ARCH_V8_2A
, FPU_ARCH_VFP
, armv82a
),
28225 ARM_ARCH_OPT2 ("armv8.3-a", ARM_ARCH_V8_3A
, FPU_ARCH_VFP
, armv82a
),
28226 ARM_ARCH_OPT2 ("armv8-r", ARM_ARCH_V8R
, FPU_ARCH_VFP
, armv8r
),
28227 ARM_ARCH_OPT2 ("armv8.4-a", ARM_ARCH_V8_4A
, FPU_ARCH_VFP
, armv84a
),
28228 ARM_ARCH_OPT2 ("armv8.5-a", ARM_ARCH_V8_5A
, FPU_ARCH_VFP
, armv85a
),
28229 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
),
28230 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
),
28231 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
, FPU_ARCH_VFP
),
28232 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
28234 #undef ARM_ARCH_OPT
28236 /* ISA extensions in the co-processor and main instruction set space. */
28238 struct arm_option_extension_value_table
28242 const arm_feature_set merge_value
;
28243 const arm_feature_set clear_value
;
28244 /* List of architectures for which an extension is available. ARM_ARCH_NONE
28245 indicates that an extension is available for all architectures while
28246 ARM_ANY marks an empty entry. */
28247 const arm_feature_set allowed_archs
[2];
28250 /* The following table must be in alphabetical order with a NULL last entry. */
28252 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
28253 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
28255 /* DEPRECATED: Refrain from using this table to add any new extensions, instead
28256 use the context sensitive approach using arm_ext_table's. */
28257 static const struct arm_option_extension_value_table arm_extensions
[] =
28259 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8
, ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
28260 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
28261 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
28262 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
),
28263 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
28264 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
,
28265 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD
),
28267 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
28268 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
28269 ARM_FEATURE_CORE (ARM_EXT_V7M
, ARM_EXT2_V8M
)),
28270 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8
, ARM_FEATURE_COPROC (FPU_VFP_ARMV8
),
28271 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
28272 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
28273 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
28275 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
28276 | ARM_EXT2_FP16_FML
),
28277 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
28278 | ARM_EXT2_FP16_FML
),
28280 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
28281 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
28282 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
28283 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
28284 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
28285 Thumb divide instruction. Due to this having the same name as the
28286 previous entry, this will be ignored when doing command-line parsing and
28287 only considered by build attribute selection code. */
28288 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
),
28289 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
),
28290 ARM_FEATURE_CORE_LOW (ARM_EXT_V7
)),
28291 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
),
28292 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
), ARM_ARCH_NONE
),
28293 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
),
28294 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
), ARM_ARCH_NONE
),
28295 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
),
28296 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
), ARM_ARCH_NONE
),
28297 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
28298 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
28299 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
28300 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
28301 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
28302 ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
28303 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
)),
28304 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
),
28305 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_PAN
, 0),
28306 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
28307 ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
),
28308 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
),
28310 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
),
28311 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_RAS
, 0),
28312 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
28313 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1
,
28314 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
| FPU_NEON_EXT_RDMA
),
28315 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
28316 ARM_EXT_OPT ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
),
28317 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
),
28319 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
28320 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
28321 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
),
28322 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
28323 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8
,
28324 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
),
28325 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
28326 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
| ARM_EXT_ADIV
28328 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
),
28329 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
28330 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
),
28331 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
), ARM_ARCH_NONE
),
28332 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, { ARM_ARCH_NONE
, ARM_ARCH_NONE
} }
28336 /* ISA floating-point and Advanced SIMD extensions. */
28337 struct arm_option_fpu_value_table
28340 const arm_feature_set value
;
28343 /* This list should, at a minimum, contain all the fpu names
28344 recognized by GCC. */
28345 static const struct arm_option_fpu_value_table arm_fpus
[] =
28347 {"softfpa", FPU_NONE
},
28348 {"fpe", FPU_ARCH_FPE
},
28349 {"fpe2", FPU_ARCH_FPE
},
28350 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
28351 {"fpa", FPU_ARCH_FPA
},
28352 {"fpa10", FPU_ARCH_FPA
},
28353 {"fpa11", FPU_ARCH_FPA
},
28354 {"arm7500fe", FPU_ARCH_FPA
},
28355 {"softvfp", FPU_ARCH_VFP
},
28356 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
28357 {"vfp", FPU_ARCH_VFP_V2
},
28358 {"vfp9", FPU_ARCH_VFP_V2
},
28359 {"vfp3", FPU_ARCH_VFP_V3
}, /* Undocumented, use vfpv3. */
28360 {"vfp10", FPU_ARCH_VFP_V2
},
28361 {"vfp10-r0", FPU_ARCH_VFP_V1
},
28362 {"vfpxd", FPU_ARCH_VFP_V1xD
},
28363 {"vfpv2", FPU_ARCH_VFP_V2
},
28364 {"vfpv3", FPU_ARCH_VFP_V3
},
28365 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
},
28366 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
28367 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
},
28368 {"vfpv3xd", FPU_ARCH_VFP_V3xD
},
28369 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
},
28370 {"arm1020t", FPU_ARCH_VFP_V1
},
28371 {"arm1020e", FPU_ARCH_VFP_V2
},
28372 {"arm1136jfs", FPU_ARCH_VFP_V2
}, /* Undocumented, use arm1136jf-s. */
28373 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
28374 {"maverick", FPU_ARCH_MAVERICK
},
28375 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
28376 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
28377 {"neon-fp16", FPU_ARCH_NEON_FP16
},
28378 {"vfpv4", FPU_ARCH_VFP_V4
},
28379 {"vfpv4-d16", FPU_ARCH_VFP_V4D16
},
28380 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
},
28381 {"fpv5-d16", FPU_ARCH_VFP_V5D16
},
28382 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16
},
28383 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4
},
28384 {"fp-armv8", FPU_ARCH_VFP_ARMV8
},
28385 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8
},
28386 {"crypto-neon-fp-armv8",
28387 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
},
28388 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1
},
28389 {"crypto-neon-fp-armv8.1",
28390 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
},
28391 {NULL
, ARM_ARCH_NONE
}
28394 struct arm_option_value_table
28400 static const struct arm_option_value_table arm_float_abis
[] =
28402 {"hard", ARM_FLOAT_ABI_HARD
},
28403 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
28404 {"soft", ARM_FLOAT_ABI_SOFT
},
28409 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
28410 static const struct arm_option_value_table arm_eabis
[] =
28412 {"gnu", EF_ARM_EABI_UNKNOWN
},
28413 {"4", EF_ARM_EABI_VER4
},
28414 {"5", EF_ARM_EABI_VER5
},
28419 struct arm_long_option_table
28421 const char * option
; /* Substring to match. */
28422 const char * help
; /* Help information. */
28423 int (* func
) (const char * subopt
); /* Function to decode sub-option. */
28424 const char * deprecated
; /* If non-null, print this message. */
28428 arm_parse_extension (const char *str
, const arm_feature_set
*opt_set
,
28429 arm_feature_set
*ext_set
,
28430 const struct arm_ext_table
*ext_table
)
28432 /* We insist on extensions being specified in alphabetical order, and with
28433 extensions being added before being removed. We achieve this by having
28434 the global ARM_EXTENSIONS table in alphabetical order, and using the
28435 ADDING_VALUE variable to indicate whether we are adding an extension (1)
28436 or removing it (0) and only allowing it to change in the order
28438 const struct arm_option_extension_value_table
* opt
= NULL
;
28439 const arm_feature_set arm_any
= ARM_ANY
;
28440 int adding_value
= -1;
28442 while (str
!= NULL
&& *str
!= 0)
28449 as_bad (_("invalid architectural extension"));
28454 ext
= strchr (str
, '+');
28459 len
= strlen (str
);
28461 if (len
>= 2 && strncmp (str
, "no", 2) == 0)
28463 if (adding_value
!= 0)
28466 opt
= arm_extensions
;
28474 if (adding_value
== -1)
28477 opt
= arm_extensions
;
28479 else if (adding_value
!= 1)
28481 as_bad (_("must specify extensions to add before specifying "
28482 "those to remove"));
28489 as_bad (_("missing architectural extension"));
28493 gas_assert (adding_value
!= -1);
28494 gas_assert (opt
!= NULL
);
28496 if (ext_table
!= NULL
)
28498 const struct arm_ext_table
* ext_opt
= ext_table
;
28499 bfd_boolean found
= FALSE
;
28500 for (; ext_opt
->name
!= NULL
; ext_opt
++)
28501 if (ext_opt
->name_len
== len
28502 && strncmp (ext_opt
->name
, str
, len
) == 0)
28506 if (ARM_FEATURE_ZERO (ext_opt
->merge
))
28507 /* TODO: Option not supported. When we remove the
28508 legacy table this case should error out. */
28511 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, ext_opt
->merge
);
28515 if (ARM_FEATURE_ZERO (ext_opt
->clear
))
28516 /* TODO: Option not supported. When we remove the
28517 legacy table this case should error out. */
28519 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, ext_opt
->clear
);
28531 /* Scan over the options table trying to find an exact match. */
28532 for (; opt
->name
!= NULL
; opt
++)
28533 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
28535 int i
, nb_allowed_archs
=
28536 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
28537 /* Check we can apply the extension to this architecture. */
28538 for (i
= 0; i
< nb_allowed_archs
; i
++)
28541 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_any
))
28543 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *opt_set
))
28546 if (i
== nb_allowed_archs
)
28548 as_bad (_("extension does not apply to the base architecture"));
28552 /* Add or remove the extension. */
28554 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->merge_value
);
28556 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, opt
->clear_value
);
28558 /* Allowing Thumb division instructions for ARMv7 in autodetection
28559 rely on this break so that duplicate extensions (extensions
28560 with the same name as a previous extension in the list) are not
28561 considered for command-line parsing. */
28565 if (opt
->name
== NULL
)
28567 /* Did we fail to find an extension because it wasn't specified in
28568 alphabetical order, or because it does not exist? */
28570 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
28571 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
28574 if (opt
->name
== NULL
)
28575 as_bad (_("unknown architectural extension `%s'"), str
);
28577 as_bad (_("architectural extensions must be specified in "
28578 "alphabetical order"));
28584 /* We should skip the extension we've just matched the next time
28596 arm_parse_cpu (const char *str
)
28598 const struct arm_cpu_option_table
*opt
;
28599 const char *ext
= strchr (str
, '+');
28605 len
= strlen (str
);
28609 as_bad (_("missing cpu name `%s'"), str
);
28613 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
28614 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
28616 mcpu_cpu_opt
= &opt
->value
;
28617 if (mcpu_ext_opt
== NULL
)
28618 mcpu_ext_opt
= XNEW (arm_feature_set
);
28619 *mcpu_ext_opt
= opt
->ext
;
28620 mcpu_fpu_opt
= &opt
->default_fpu
;
28621 if (opt
->canonical_name
)
28623 gas_assert (sizeof selected_cpu_name
> strlen (opt
->canonical_name
));
28624 strcpy (selected_cpu_name
, opt
->canonical_name
);
28630 if (len
>= sizeof selected_cpu_name
)
28631 len
= (sizeof selected_cpu_name
) - 1;
28633 for (i
= 0; i
< len
; i
++)
28634 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
28635 selected_cpu_name
[i
] = 0;
28639 return arm_parse_extension (ext
, mcpu_cpu_opt
, mcpu_ext_opt
, NULL
);
28644 as_bad (_("unknown cpu `%s'"), str
);
28649 arm_parse_arch (const char *str
)
28651 const struct arm_arch_option_table
*opt
;
28652 const char *ext
= strchr (str
, '+');
28658 len
= strlen (str
);
28662 as_bad (_("missing architecture name `%s'"), str
);
28666 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
28667 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
28669 march_cpu_opt
= &opt
->value
;
28670 if (march_ext_opt
== NULL
)
28671 march_ext_opt
= XNEW (arm_feature_set
);
28672 *march_ext_opt
= arm_arch_none
;
28673 march_fpu_opt
= &opt
->default_fpu
;
28674 strcpy (selected_cpu_name
, opt
->name
);
28677 return arm_parse_extension (ext
, march_cpu_opt
, march_ext_opt
,
28683 as_bad (_("unknown architecture `%s'\n"), str
);
28688 arm_parse_fpu (const char * str
)
28690 const struct arm_option_fpu_value_table
* opt
;
28692 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
28693 if (streq (opt
->name
, str
))
28695 mfpu_opt
= &opt
->value
;
28699 as_bad (_("unknown floating point format `%s'\n"), str
);
28704 arm_parse_float_abi (const char * str
)
28706 const struct arm_option_value_table
* opt
;
28708 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
28709 if (streq (opt
->name
, str
))
28711 mfloat_abi_opt
= opt
->value
;
28715 as_bad (_("unknown floating point abi `%s'\n"), str
);
28721 arm_parse_eabi (const char * str
)
28723 const struct arm_option_value_table
*opt
;
28725 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
28726 if (streq (opt
->name
, str
))
28728 meabi_flags
= opt
->value
;
28731 as_bad (_("unknown EABI `%s'\n"), str
);
28737 arm_parse_it_mode (const char * str
)
28739 bfd_boolean ret
= TRUE
;
28741 if (streq ("arm", str
))
28742 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
28743 else if (streq ("thumb", str
))
28744 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
28745 else if (streq ("always", str
))
28746 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
28747 else if (streq ("never", str
))
28748 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
28751 as_bad (_("unknown implicit IT mode `%s', should be "\
28752 "arm, thumb, always, or never."), str
);
28760 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED
)
28762 codecomposer_syntax
= TRUE
;
28763 arm_comment_chars
[0] = ';';
28764 arm_line_separator_chars
[0] = 0;
28768 struct arm_long_option_table arm_long_opts
[] =
28770 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
28771 arm_parse_cpu
, NULL
},
28772 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
28773 arm_parse_arch
, NULL
},
28774 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
28775 arm_parse_fpu
, NULL
},
28776 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
28777 arm_parse_float_abi
, NULL
},
28779 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
28780 arm_parse_eabi
, NULL
},
28782 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
28783 arm_parse_it_mode
, NULL
},
28784 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
28785 arm_ccs_mode
, NULL
},
28786 {NULL
, NULL
, 0, NULL
}
28790 md_parse_option (int c
, const char * arg
)
28792 struct arm_option_table
*opt
;
28793 const struct arm_legacy_option_table
*fopt
;
28794 struct arm_long_option_table
*lopt
;
28800 target_big_endian
= 1;
28806 target_big_endian
= 0;
28810 case OPTION_FIX_V4BX
:
28818 #endif /* OBJ_ELF */
28821 /* Listing option. Just ignore these, we don't support additional
28826 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
28828 if (c
== opt
->option
[0]
28829 && ((arg
== NULL
&& opt
->option
[1] == 0)
28830 || streq (arg
, opt
->option
+ 1)))
28832 /* If the option is deprecated, tell the user. */
28833 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
28834 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
28835 arg
? arg
: "", _(opt
->deprecated
));
28837 if (opt
->var
!= NULL
)
28838 *opt
->var
= opt
->value
;
28844 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
28846 if (c
== fopt
->option
[0]
28847 && ((arg
== NULL
&& fopt
->option
[1] == 0)
28848 || streq (arg
, fopt
->option
+ 1)))
28850 /* If the option is deprecated, tell the user. */
28851 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
28852 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
28853 arg
? arg
: "", _(fopt
->deprecated
));
28855 if (fopt
->var
!= NULL
)
28856 *fopt
->var
= &fopt
->value
;
28862 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
28864 /* These options are expected to have an argument. */
28865 if (c
== lopt
->option
[0]
28867 && strncmp (arg
, lopt
->option
+ 1,
28868 strlen (lopt
->option
+ 1)) == 0)
28870 /* If the option is deprecated, tell the user. */
28871 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
28872 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
28873 _(lopt
->deprecated
));
28875 /* Call the sup-option parser. */
28876 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
28887 md_show_usage (FILE * fp
)
28889 struct arm_option_table
*opt
;
28890 struct arm_long_option_table
*lopt
;
28892 fprintf (fp
, _(" ARM-specific assembler options:\n"));
28894 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
28895 if (opt
->help
!= NULL
)
28896 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
28898 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
28899 if (lopt
->help
!= NULL
)
28900 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
28904 -EB assemble code for a big-endian cpu\n"));
28909 -EL assemble code for a little-endian cpu\n"));
28913 --fix-v4bx Allow BX in ARMv4 code\n"));
28917 --fdpic generate an FDPIC object file\n"));
28918 #endif /* OBJ_ELF */
28926 arm_feature_set flags
;
28927 } cpu_arch_ver_table
;
28929 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
28930 chronologically for architectures, with an exception for ARMv6-M and
28931 ARMv6S-M due to legacy reasons. No new architecture should have a
28932 special case. This allows for build attribute selection results to be
28933 stable when new architectures are added. */
28934 static const cpu_arch_ver_table cpu_arch_ver
[] =
28936 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V1
},
28937 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V2
},
28938 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V2S
},
28939 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V3
},
28940 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V3M
},
28941 {TAG_CPU_ARCH_V4
, ARM_ARCH_V4xM
},
28942 {TAG_CPU_ARCH_V4
, ARM_ARCH_V4
},
28943 {TAG_CPU_ARCH_V4T
, ARM_ARCH_V4TxM
},
28944 {TAG_CPU_ARCH_V4T
, ARM_ARCH_V4T
},
28945 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5xM
},
28946 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5
},
28947 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5TxM
},
28948 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5T
},
28949 {TAG_CPU_ARCH_V5TE
, ARM_ARCH_V5TExP
},
28950 {TAG_CPU_ARCH_V5TE
, ARM_ARCH_V5TE
},
28951 {TAG_CPU_ARCH_V5TEJ
, ARM_ARCH_V5TEJ
},
28952 {TAG_CPU_ARCH_V6
, ARM_ARCH_V6
},
28953 {TAG_CPU_ARCH_V6KZ
, ARM_ARCH_V6Z
},
28954 {TAG_CPU_ARCH_V6KZ
, ARM_ARCH_V6KZ
},
28955 {TAG_CPU_ARCH_V6K
, ARM_ARCH_V6K
},
28956 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6T2
},
28957 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6KT2
},
28958 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6ZT2
},
28959 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6KZT2
},
28961 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
28962 always selected build attributes to match those of ARMv6-M
28963 (resp. ARMv6S-M). However, due to these architectures being a strict
28964 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
28965 would be selected when fully respecting chronology of architectures.
28966 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
28967 move them before ARMv7 architectures. */
28968 {TAG_CPU_ARCH_V6_M
, ARM_ARCH_V6M
},
28969 {TAG_CPU_ARCH_V6S_M
, ARM_ARCH_V6SM
},
28971 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7
},
28972 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7A
},
28973 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7R
},
28974 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7M
},
28975 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7VE
},
28976 {TAG_CPU_ARCH_V7E_M
, ARM_ARCH_V7EM
},
28977 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8A
},
28978 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_1A
},
28979 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_2A
},
28980 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_3A
},
28981 {TAG_CPU_ARCH_V8M_BASE
, ARM_ARCH_V8M_BASE
},
28982 {TAG_CPU_ARCH_V8M_MAIN
, ARM_ARCH_V8M_MAIN
},
28983 {TAG_CPU_ARCH_V8R
, ARM_ARCH_V8R
},
28984 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_4A
},
28985 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_5A
},
28986 {TAG_CPU_ARCH_V8_1M_MAIN
, ARM_ARCH_V8_1M_MAIN
},
28987 {-1, ARM_ARCH_NONE
}
28990 /* Set an attribute if it has not already been set by the user. */
28993 aeabi_set_attribute_int (int tag
, int value
)
28996 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
28997 || !attributes_set_explicitly
[tag
])
28998 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
29002 aeabi_set_attribute_string (int tag
, const char *value
)
29005 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
29006 || !attributes_set_explicitly
[tag
])
29007 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
29010 /* Return whether features in the *NEEDED feature set are available via
29011 extensions for the architecture whose feature set is *ARCH_FSET. */
29014 have_ext_for_needed_feat_p (const arm_feature_set
*arch_fset
,
29015 const arm_feature_set
*needed
)
29017 int i
, nb_allowed_archs
;
29018 arm_feature_set ext_fset
;
29019 const struct arm_option_extension_value_table
*opt
;
29021 ext_fset
= arm_arch_none
;
29022 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
29024 /* Extension does not provide any feature we need. */
29025 if (!ARM_CPU_HAS_FEATURE (*needed
, opt
->merge_value
))
29029 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
29030 for (i
= 0; i
< nb_allowed_archs
; i
++)
29033 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_arch_any
))
29036 /* Extension is available, add it. */
29037 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *arch_fset
))
29038 ARM_MERGE_FEATURE_SETS (ext_fset
, ext_fset
, opt
->merge_value
);
29042 /* Can we enable all features in *needed? */
29043 return ARM_FSET_CPU_SUBSET (*needed
, ext_fset
);
29046 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
29047 a given architecture feature set *ARCH_EXT_FSET including extension feature
29048 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
29049 - if true, check for an exact match of the architecture modulo extensions;
29050 - otherwise, select build attribute value of the first superset
29051 architecture released so that results remains stable when new architectures
29053 For -march/-mcpu=all the build attribute value of the most featureful
29054 architecture is returned. Tag_CPU_arch_profile result is returned in
29058 get_aeabi_cpu_arch_from_fset (const arm_feature_set
*arch_ext_fset
,
29059 const arm_feature_set
*ext_fset
,
29060 char *profile
, int exact_match
)
29062 arm_feature_set arch_fset
;
29063 const cpu_arch_ver_table
*p_ver
, *p_ver_ret
= NULL
;
29065 /* Select most featureful architecture with all its extensions if building
29066 for -march=all as the feature sets used to set build attributes. */
29067 if (ARM_FEATURE_EQUAL (*arch_ext_fset
, arm_arch_any
))
29069 /* Force revisiting of decision for each new architecture. */
29070 gas_assert (MAX_TAG_CPU_ARCH
<= TAG_CPU_ARCH_V8_1M_MAIN
);
29072 return TAG_CPU_ARCH_V8
;
29075 ARM_CLEAR_FEATURE (arch_fset
, *arch_ext_fset
, *ext_fset
);
29077 for (p_ver
= cpu_arch_ver
; p_ver
->val
!= -1; p_ver
++)
29079 arm_feature_set known_arch_fset
;
29081 ARM_CLEAR_FEATURE (known_arch_fset
, p_ver
->flags
, fpu_any
);
29084 /* Base architecture match user-specified architecture and
29085 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
29086 if (ARM_FEATURE_EQUAL (*arch_ext_fset
, known_arch_fset
))
29091 /* Base architecture match user-specified architecture only
29092 (eg. ARMv6-M in the same case as above). Record it in case we
29093 find a match with above condition. */
29094 else if (p_ver_ret
== NULL
29095 && ARM_FEATURE_EQUAL (arch_fset
, known_arch_fset
))
29101 /* Architecture has all features wanted. */
29102 if (ARM_FSET_CPU_SUBSET (arch_fset
, known_arch_fset
))
29104 arm_feature_set added_fset
;
29106 /* Compute features added by this architecture over the one
29107 recorded in p_ver_ret. */
29108 if (p_ver_ret
!= NULL
)
29109 ARM_CLEAR_FEATURE (added_fset
, known_arch_fset
,
29111 /* First architecture that match incl. with extensions, or the
29112 only difference in features over the recorded match is
29113 features that were optional and are now mandatory. */
29114 if (p_ver_ret
== NULL
29115 || ARM_FSET_CPU_SUBSET (added_fset
, arch_fset
))
29121 else if (p_ver_ret
== NULL
)
29123 arm_feature_set needed_ext_fset
;
29125 ARM_CLEAR_FEATURE (needed_ext_fset
, arch_fset
, known_arch_fset
);
29127 /* Architecture has all features needed when using some
29128 extensions. Record it and continue searching in case there
29129 exist an architecture providing all needed features without
29130 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
29132 if (have_ext_for_needed_feat_p (&known_arch_fset
,
29139 if (p_ver_ret
== NULL
)
29143 /* Tag_CPU_arch_profile. */
29144 if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v7a
)
29145 || ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v8
)
29146 || (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_atomics
)
29147 && !ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v8m_m_only
)))
29149 else if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v7r
))
29151 else if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_m
))
29155 return p_ver_ret
->val
;
29158 /* Set the public EABI object attributes. */
29161 aeabi_set_public_attributes (void)
29163 char profile
= '\0';
29166 int fp16_optional
= 0;
29167 int skip_exact_match
= 0;
29168 arm_feature_set flags
, flags_arch
, flags_ext
;
29170 /* Autodetection mode, choose the architecture based the instructions
29172 if (no_cpu_selected ())
29174 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
29176 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
))
29177 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v1
);
29179 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_any
))
29180 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v4t
);
29182 /* Code run during relaxation relies on selected_cpu being set. */
29183 ARM_CLEAR_FEATURE (flags_arch
, flags
, fpu_any
);
29184 flags_ext
= arm_arch_none
;
29185 ARM_CLEAR_FEATURE (selected_arch
, flags_arch
, flags_ext
);
29186 selected_ext
= flags_ext
;
29187 selected_cpu
= flags
;
29189 /* Otherwise, choose the architecture based on the capabilities of the
29193 ARM_MERGE_FEATURE_SETS (flags_arch
, selected_arch
, selected_ext
);
29194 ARM_CLEAR_FEATURE (flags_arch
, flags_arch
, fpu_any
);
29195 flags_ext
= selected_ext
;
29196 flags
= selected_cpu
;
29198 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_fpu
);
29200 /* Allow the user to override the reported architecture. */
29201 if (!ARM_FEATURE_ZERO (selected_object_arch
))
29203 ARM_CLEAR_FEATURE (flags_arch
, selected_object_arch
, fpu_any
);
29204 flags_ext
= arm_arch_none
;
29207 skip_exact_match
= ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_any
);
29209 /* When this function is run again after relaxation has happened there is no
29210 way to determine whether an architecture or CPU was specified by the user:
29211 - selected_cpu is set above for relaxation to work;
29212 - march_cpu_opt is not set if only -mcpu or .cpu is used;
29213 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
29214 Therefore, if not in -march=all case we first try an exact match and fall
29215 back to autodetection. */
29216 if (!skip_exact_match
)
29217 arch
= get_aeabi_cpu_arch_from_fset (&flags_arch
, &flags_ext
, &profile
, 1);
29219 arch
= get_aeabi_cpu_arch_from_fset (&flags_arch
, &flags_ext
, &profile
, 0);
29221 as_bad (_("no architecture contains all the instructions used\n"));
29223 /* Tag_CPU_name. */
29224 if (selected_cpu_name
[0])
29228 q
= selected_cpu_name
;
29229 if (strncmp (q
, "armv", 4) == 0)
29234 for (i
= 0; q
[i
]; i
++)
29235 q
[i
] = TOUPPER (q
[i
]);
29237 aeabi_set_attribute_string (Tag_CPU_name
, q
);
29240 /* Tag_CPU_arch. */
29241 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
29243 /* Tag_CPU_arch_profile. */
29244 if (profile
!= '\0')
29245 aeabi_set_attribute_int (Tag_CPU_arch_profile
, profile
);
29247 /* Tag_DSP_extension. */
29248 if (ARM_CPU_HAS_FEATURE (selected_ext
, arm_ext_dsp
))
29249 aeabi_set_attribute_int (Tag_DSP_extension
, 1);
29251 ARM_CLEAR_FEATURE (flags_arch
, flags
, fpu_any
);
29252 /* Tag_ARM_ISA_use. */
29253 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
29254 || ARM_FEATURE_ZERO (flags_arch
))
29255 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
29257 /* Tag_THUMB_ISA_use. */
29258 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
29259 || ARM_FEATURE_ZERO (flags_arch
))
29263 if (!ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
29264 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m_m_only
))
29266 else if (ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
))
29270 aeabi_set_attribute_int (Tag_THUMB_ISA_use
, thumb_isa_use
);
29273 /* Tag_VFP_arch. */
29274 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_armv8xd
))
29275 aeabi_set_attribute_int (Tag_VFP_arch
,
29276 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
29278 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_fma
))
29279 aeabi_set_attribute_int (Tag_VFP_arch
,
29280 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
29282 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
29285 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
29287 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3xd
))
29289 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
29292 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
29293 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
29294 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
29295 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
29296 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
29298 /* Tag_ABI_HardFP_use. */
29299 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
)
29300 && !ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
))
29301 aeabi_set_attribute_int (Tag_ABI_HardFP_use
, 1);
29303 /* Tag_WMMX_arch. */
29304 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
29305 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
29306 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
29307 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
29309 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
29310 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v8_1
))
29311 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 4);
29312 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_armv8
))
29313 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 3);
29314 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
29316 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_fma
))
29318 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 2);
29322 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 1);
29327 if (ARM_CPU_HAS_FEATURE (flags
, mve_fp_ext
))
29328 aeabi_set_attribute_int (Tag_MVE_arch
, 2);
29329 else if (ARM_CPU_HAS_FEATURE (flags
, mve_ext
))
29330 aeabi_set_attribute_int (Tag_MVE_arch
, 1);
29332 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
29333 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_fp16
) && fp16_optional
)
29334 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
29338 We set Tag_DIV_use to two when integer divide instructions have been used
29339 in ARM state, or when Thumb integer divide instructions have been used,
29340 but we have no architecture profile set, nor have we any ARM instructions.
29342 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
29343 by the base architecture.
29345 For new architectures we will have to check these tests. */
29346 gas_assert (arch
<= TAG_CPU_ARCH_V8_1M_MAIN
);
29347 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
29348 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
))
29349 aeabi_set_attribute_int (Tag_DIV_use
, 0);
29350 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_adiv
)
29351 || (profile
== '\0'
29352 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_div
)
29353 && !ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
)))
29354 aeabi_set_attribute_int (Tag_DIV_use
, 2);
29356 /* Tag_MP_extension_use. */
29357 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_mp
))
29358 aeabi_set_attribute_int (Tag_MPextension_use
, 1);
29360 /* Tag Virtualization_use. */
29361 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_sec
))
29363 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_virt
))
29366 aeabi_set_attribute_int (Tag_Virtualization_use
, virt_sec
);
29369 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
29370 finished and free extension feature bits which will not be used anymore. */
29373 arm_md_post_relax (void)
29375 aeabi_set_public_attributes ();
29376 XDELETE (mcpu_ext_opt
);
29377 mcpu_ext_opt
= NULL
;
29378 XDELETE (march_ext_opt
);
29379 march_ext_opt
= NULL
;
29382 /* Add the default contents for the .ARM.attributes section. */
29387 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
29390 aeabi_set_public_attributes ();
29392 #endif /* OBJ_ELF */
29394 /* Parse a .cpu directive. */
29397 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
29399 const struct arm_cpu_option_table
*opt
;
29403 name
= input_line_pointer
;
29404 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
29405 input_line_pointer
++;
29406 saved_char
= *input_line_pointer
;
29407 *input_line_pointer
= 0;
29409 /* Skip the first "all" entry. */
29410 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
29411 if (streq (opt
->name
, name
))
29413 selected_arch
= opt
->value
;
29414 selected_ext
= opt
->ext
;
29415 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
29416 if (opt
->canonical_name
)
29417 strcpy (selected_cpu_name
, opt
->canonical_name
);
29421 for (i
= 0; opt
->name
[i
]; i
++)
29422 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
29424 selected_cpu_name
[i
] = 0;
29426 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
29428 *input_line_pointer
= saved_char
;
29429 demand_empty_rest_of_line ();
29432 as_bad (_("unknown cpu `%s'"), name
);
29433 *input_line_pointer
= saved_char
;
29434 ignore_rest_of_line ();
29437 /* Parse a .arch directive. */
29440 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
29442 const struct arm_arch_option_table
*opt
;
29446 name
= input_line_pointer
;
29447 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
29448 input_line_pointer
++;
29449 saved_char
= *input_line_pointer
;
29450 *input_line_pointer
= 0;
29452 /* Skip the first "all" entry. */
29453 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
29454 if (streq (opt
->name
, name
))
29456 selected_arch
= opt
->value
;
29457 selected_ext
= arm_arch_none
;
29458 selected_cpu
= selected_arch
;
29459 strcpy (selected_cpu_name
, opt
->name
);
29460 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
29461 *input_line_pointer
= saved_char
;
29462 demand_empty_rest_of_line ();
29466 as_bad (_("unknown architecture `%s'\n"), name
);
29467 *input_line_pointer
= saved_char
;
29468 ignore_rest_of_line ();
29471 /* Parse a .object_arch directive. */
29474 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
29476 const struct arm_arch_option_table
*opt
;
29480 name
= input_line_pointer
;
29481 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
29482 input_line_pointer
++;
29483 saved_char
= *input_line_pointer
;
29484 *input_line_pointer
= 0;
29486 /* Skip the first "all" entry. */
29487 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
29488 if (streq (opt
->name
, name
))
29490 selected_object_arch
= opt
->value
;
29491 *input_line_pointer
= saved_char
;
29492 demand_empty_rest_of_line ();
29496 as_bad (_("unknown architecture `%s'\n"), name
);
29497 *input_line_pointer
= saved_char
;
29498 ignore_rest_of_line ();
29501 /* Parse a .arch_extension directive. */
29504 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED
)
29506 const struct arm_option_extension_value_table
*opt
;
29509 int adding_value
= 1;
29511 name
= input_line_pointer
;
29512 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
29513 input_line_pointer
++;
29514 saved_char
= *input_line_pointer
;
29515 *input_line_pointer
= 0;
29517 if (strlen (name
) >= 2
29518 && strncmp (name
, "no", 2) == 0)
29524 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
29525 if (streq (opt
->name
, name
))
29527 int i
, nb_allowed_archs
=
29528 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[i
]);
29529 for (i
= 0; i
< nb_allowed_archs
; i
++)
29532 if (ARM_CPU_IS_ANY (opt
->allowed_archs
[i
]))
29534 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], selected_arch
))
29538 if (i
== nb_allowed_archs
)
29540 as_bad (_("architectural extension `%s' is not allowed for the "
29541 "current base architecture"), name
);
29546 ARM_MERGE_FEATURE_SETS (selected_ext
, selected_ext
,
29549 ARM_CLEAR_FEATURE (selected_ext
, selected_ext
, opt
->clear_value
);
29551 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
29552 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
29553 *input_line_pointer
= saved_char
;
29554 demand_empty_rest_of_line ();
29555 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
29556 on this return so that duplicate extensions (extensions with the
29557 same name as a previous extension in the list) are not considered
29558 for command-line parsing. */
29562 if (opt
->name
== NULL
)
29563 as_bad (_("unknown architecture extension `%s'\n"), name
);
29565 *input_line_pointer
= saved_char
;
29566 ignore_rest_of_line ();
29569 /* Parse a .fpu directive. */
29572 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
29574 const struct arm_option_fpu_value_table
*opt
;
29578 name
= input_line_pointer
;
29579 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
29580 input_line_pointer
++;
29581 saved_char
= *input_line_pointer
;
29582 *input_line_pointer
= 0;
29584 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
29585 if (streq (opt
->name
, name
))
29587 selected_fpu
= opt
->value
;
29588 #ifndef CPU_DEFAULT
29589 if (no_cpu_selected ())
29590 ARM_MERGE_FEATURE_SETS (cpu_variant
, arm_arch_any
, selected_fpu
);
29593 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
29594 *input_line_pointer
= saved_char
;
29595 demand_empty_rest_of_line ();
29599 as_bad (_("unknown floating point format `%s'\n"), name
);
29600 *input_line_pointer
= saved_char
;
29601 ignore_rest_of_line ();
29604 /* Copy symbol information. */
29607 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
29609 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
29613 /* Given a symbolic attribute NAME, return the proper integer value.
29614 Returns -1 if the attribute is not known. */
29617 arm_convert_symbolic_attribute (const char *name
)
29619 static const struct
29624 attribute_table
[] =
29626 /* When you modify this table you should
29627 also modify the list in doc/c-arm.texi. */
29628 #define T(tag) {#tag, tag}
29629 T (Tag_CPU_raw_name
),
29632 T (Tag_CPU_arch_profile
),
29633 T (Tag_ARM_ISA_use
),
29634 T (Tag_THUMB_ISA_use
),
29638 T (Tag_Advanced_SIMD_arch
),
29639 T (Tag_PCS_config
),
29640 T (Tag_ABI_PCS_R9_use
),
29641 T (Tag_ABI_PCS_RW_data
),
29642 T (Tag_ABI_PCS_RO_data
),
29643 T (Tag_ABI_PCS_GOT_use
),
29644 T (Tag_ABI_PCS_wchar_t
),
29645 T (Tag_ABI_FP_rounding
),
29646 T (Tag_ABI_FP_denormal
),
29647 T (Tag_ABI_FP_exceptions
),
29648 T (Tag_ABI_FP_user_exceptions
),
29649 T (Tag_ABI_FP_number_model
),
29650 T (Tag_ABI_align_needed
),
29651 T (Tag_ABI_align8_needed
),
29652 T (Tag_ABI_align_preserved
),
29653 T (Tag_ABI_align8_preserved
),
29654 T (Tag_ABI_enum_size
),
29655 T (Tag_ABI_HardFP_use
),
29656 T (Tag_ABI_VFP_args
),
29657 T (Tag_ABI_WMMX_args
),
29658 T (Tag_ABI_optimization_goals
),
29659 T (Tag_ABI_FP_optimization_goals
),
29660 T (Tag_compatibility
),
29661 T (Tag_CPU_unaligned_access
),
29662 T (Tag_FP_HP_extension
),
29663 T (Tag_VFP_HP_extension
),
29664 T (Tag_ABI_FP_16bit_format
),
29665 T (Tag_MPextension_use
),
29667 T (Tag_nodefaults
),
29668 T (Tag_also_compatible_with
),
29669 T (Tag_conformance
),
29671 T (Tag_Virtualization_use
),
29672 T (Tag_DSP_extension
),
29674 /* We deliberately do not include Tag_MPextension_use_legacy. */
29682 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
29683 if (streq (name
, attribute_table
[i
].name
))
29684 return attribute_table
[i
].tag
;
29689 /* Apply sym value for relocations only in the case that they are for
29690 local symbols in the same segment as the fixup and you have the
29691 respective architectural feature for blx and simple switches. */
29694 arm_apply_sym_value (struct fix
* fixP
, segT this_seg
)
29697 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
29698 /* PR 17444: If the local symbol is in a different section then a reloc
29699 will always be generated for it, so applying the symbol value now
29700 will result in a double offset being stored in the relocation. */
29701 && (S_GET_SEGMENT (fixP
->fx_addsy
) == this_seg
)
29702 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
))
29704 switch (fixP
->fx_r_type
)
29706 case BFD_RELOC_ARM_PCREL_BLX
:
29707 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
29708 if (ARM_IS_FUNC (fixP
->fx_addsy
))
29712 case BFD_RELOC_ARM_PCREL_CALL
:
29713 case BFD_RELOC_THUMB_PCREL_BLX
:
29714 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
29725 #endif /* OBJ_ELF */