1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2016 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
9 This file is part of GAS, the GNU Assembler.
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
30 #include "safe-ctype.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
38 #include "dw2gencfi.h"
41 #include "dwarf2dbg.h"
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
47 /* This structure holds the unwinding state. */
52 symbolS
* table_entry
;
53 symbolS
* personality_routine
;
54 int personality_index
;
55 /* The segment containing the function. */
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes
;
62 /* The number of bytes pushed to the stack. */
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset
;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
72 /* Nonzero if an unwind_setfp directive has been seen. */
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored
:1;
80 /* Results from operand parsing worker functions. */
84 PARSE_OPERAND_SUCCESS
,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result
;
96 /* Types of processor to assemble for. */
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
122 #endif /* ifndef FPU_DEFAULT */
124 #define streq(a, b) (strcmp (a, b) == 0)
126 static arm_feature_set cpu_variant
;
127 static arm_feature_set arm_arch_used
;
128 static arm_feature_set thumb_arch_used
;
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26
= FALSE
;
132 static int atpcs
= FALSE
;
133 static int support_interwork
= FALSE
;
134 static int uses_apcs_float
= FALSE
;
135 static int pic_code
= FALSE
;
136 static int fix_v4bx
= FALSE
;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated
= TRUE
;
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax
= FALSE
;
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
146 static const arm_feature_set
*legacy_cpu
= NULL
;
147 static const arm_feature_set
*legacy_fpu
= NULL
;
149 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
150 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
151 static const arm_feature_set
*march_cpu_opt
= NULL
;
152 static const arm_feature_set
*march_fpu_opt
= NULL
;
153 static const arm_feature_set
*mfpu_opt
= NULL
;
154 static const arm_feature_set
*object_arch
= NULL
;
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
158 static const arm_feature_set fpu_arch_vfp_v1
= FPU_ARCH_VFP_V1
;
159 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
160 static const arm_feature_set fpu_arch_vfp_v3
= FPU_ARCH_VFP_V3
;
161 static const arm_feature_set fpu_arch_neon_v1
= FPU_ARCH_NEON_V1
;
162 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
163 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
164 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
165 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
168 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
171 static const arm_feature_set arm_ext_v1
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
172 static const arm_feature_set arm_ext_v2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
173 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2S
);
174 static const arm_feature_set arm_ext_v3
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3
);
175 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3M
);
176 static const arm_feature_set arm_ext_v4
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4
);
177 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
);
178 static const arm_feature_set arm_ext_v5
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5
);
179 static const arm_feature_set arm_ext_v4t_5
=
180 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
| ARM_EXT_V5
);
181 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5T
);
182 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
);
183 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
);
184 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5J
);
185 static const arm_feature_set arm_ext_v6
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6
);
186 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
);
187 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2
);
188 static const arm_feature_set arm_ext_v6m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
);
189 static const arm_feature_set arm_ext_v6_notm
=
190 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM
);
191 static const arm_feature_set arm_ext_v6_dsp
=
192 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP
);
193 static const arm_feature_set arm_ext_barrier
=
194 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER
);
195 static const arm_feature_set arm_ext_msr
=
196 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR
);
197 static const arm_feature_set arm_ext_div
= ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
);
198 static const arm_feature_set arm_ext_v7
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7
);
199 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
);
200 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
);
201 static const arm_feature_set arm_ext_v7m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7M
);
202 static const arm_feature_set arm_ext_v8
= ARM_FEATURE_CORE_LOW (ARM_EXT_V8
);
203 static const arm_feature_set arm_ext_m
=
204 ARM_FEATURE_CORE (ARM_EXT_V6M
| ARM_EXT_OS
| ARM_EXT_V7M
, ARM_EXT2_V8M
);
205 static const arm_feature_set arm_ext_mp
= ARM_FEATURE_CORE_LOW (ARM_EXT_MP
);
206 static const arm_feature_set arm_ext_sec
= ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
);
207 static const arm_feature_set arm_ext_os
= ARM_FEATURE_CORE_LOW (ARM_EXT_OS
);
208 static const arm_feature_set arm_ext_adiv
= ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
);
209 static const arm_feature_set arm_ext_virt
= ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
);
210 static const arm_feature_set arm_ext_pan
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
);
211 static const arm_feature_set arm_ext_v8m
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
);
212 static const arm_feature_set arm_ext_v6t2_v8m
=
213 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M
);
214 /* Instructions shared between ARMv8-A and ARMv8-M. */
215 static const arm_feature_set arm_ext_atomics
=
216 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS
);
217 static const arm_feature_set arm_ext_v8_2
=
218 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A
);
219 /* FP16 instructions. */
220 static const arm_feature_set arm_ext_fp16
=
221 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
);
223 static const arm_feature_set arm_arch_any
= ARM_ANY
;
224 static const arm_feature_set arm_arch_full
= ARM_FEATURE (-1, -1, -1);
225 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
226 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
227 static const arm_feature_set arm_arch_v6m_only
= ARM_ARCH_V6M_ONLY
;
229 static const arm_feature_set arm_cext_iwmmxt2
=
230 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
);
231 static const arm_feature_set arm_cext_iwmmxt
=
232 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
);
233 static const arm_feature_set arm_cext_xscale
=
234 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
);
235 static const arm_feature_set arm_cext_maverick
=
236 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
);
237 static const arm_feature_set fpu_fpa_ext_v1
=
238 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1
);
239 static const arm_feature_set fpu_fpa_ext_v2
=
240 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2
);
241 static const arm_feature_set fpu_vfp_ext_v1xd
=
242 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD
);
243 static const arm_feature_set fpu_vfp_ext_v1
=
244 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1
);
245 static const arm_feature_set fpu_vfp_ext_v2
=
246 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2
);
247 static const arm_feature_set fpu_vfp_ext_v3xd
=
248 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD
);
249 static const arm_feature_set fpu_vfp_ext_v3
=
250 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3
);
251 static const arm_feature_set fpu_vfp_ext_d32
=
252 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32
);
253 static const arm_feature_set fpu_neon_ext_v1
=
254 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
);
255 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
256 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
257 static const arm_feature_set fpu_vfp_fp16
=
258 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16
);
259 static const arm_feature_set fpu_neon_ext_fma
=
260 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA
);
261 static const arm_feature_set fpu_vfp_ext_fma
=
262 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA
);
263 static const arm_feature_set fpu_vfp_ext_armv8
=
264 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8
);
265 static const arm_feature_set fpu_vfp_ext_armv8xd
=
266 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD
);
267 static const arm_feature_set fpu_neon_ext_armv8
=
268 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
);
269 static const arm_feature_set fpu_crypto_ext_armv8
=
270 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8
);
271 static const arm_feature_set crc_ext_armv8
=
272 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
);
273 static const arm_feature_set fpu_neon_ext_v8_1
=
274 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
| FPU_NEON_EXT_RDMA
);
276 static int mfloat_abi_opt
= -1;
277 /* Record user cpu selection for object attributes. */
278 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
279 /* Must be long enough to hold any of the names in arm_cpus. */
280 static char selected_cpu_name
[20];
282 extern FLONUM_TYPE generic_floating_point_number
;
284 /* Return if no cpu was selected on command-line. */
286 no_cpu_selected (void)
288 return ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_none
);
293 static int meabi_flags
= EABI_DEFAULT
;
295 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
298 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
303 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
308 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
309 symbolS
* GOT_symbol
;
312 /* 0: assemble for ARM,
313 1: assemble for Thumb,
314 2: assemble for Thumb even though target CPU does not support thumb
316 static int thumb_mode
= 0;
317 /* A value distinct from the possible values for thumb_mode that we
318 can use to record whether thumb_mode has been copied into the
319 tc_frag_data field of a frag. */
320 #define MODE_RECORDED (1 << 4)
322 /* Specifies the intrinsic IT insn behavior mode. */
323 enum implicit_it_mode
325 IMPLICIT_IT_MODE_NEVER
= 0x00,
326 IMPLICIT_IT_MODE_ARM
= 0x01,
327 IMPLICIT_IT_MODE_THUMB
= 0x02,
328 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
330 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
332 /* If unified_syntax is true, we are processing the new unified
333 ARM/Thumb syntax. Important differences from the old ARM mode:
335 - Immediate operands do not require a # prefix.
336 - Conditional affixes always appear at the end of the
337 instruction. (For backward compatibility, those instructions
338 that formerly had them in the middle, continue to accept them
340 - The IT instruction may appear, and if it does is validated
341 against subsequent conditional affixes. It does not generate
344 Important differences from the old Thumb mode:
346 - Immediate operands do not require a # prefix.
347 - Most of the V6T2 instructions are only available in unified mode.
348 - The .N and .W suffixes are recognized and honored (it is an error
349 if they cannot be honored).
350 - All instructions set the flags if and only if they have an 's' affix.
351 - Conditional affixes may be used. They are validated against
352 preceding IT instructions. Unlike ARM mode, you cannot use a
353 conditional affix except in the scope of an IT instruction. */
355 static bfd_boolean unified_syntax
= FALSE
;
357 /* An immediate operand can start with #, and ld*, st*, pld operands
358 can contain [ and ]. We need to tell APP not to elide whitespace
359 before a [, which can appear as the first operand for pld.
360 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
361 const char arm_symbol_chars
[] = "#[]{}";
376 enum neon_el_type type
;
380 #define NEON_MAX_TYPE_ELS 4
384 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
388 enum it_instruction_type
393 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
394 if inside, should be the last one. */
395 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
396 i.e. BKPT and NOP. */
397 IT_INSN
/* The IT insn has been parsed. */
400 /* The maximum number of operands we need. */
401 #define ARM_IT_MAX_OPERANDS 6
406 unsigned long instruction
;
410 /* "uncond_value" is set to the value in place of the conditional field in
411 unconditional versions of the instruction, or -1 if nothing is
414 struct neon_type vectype
;
415 /* This does not indicate an actual NEON instruction, only that
416 the mnemonic accepts neon-style type suffixes. */
418 /* Set to the opcode if the instruction needs relaxation.
419 Zero if the instruction is not relaxed. */
423 bfd_reloc_code_real_type type
;
428 enum it_instruction_type it_insn_type
;
434 struct neon_type_el vectype
;
435 unsigned present
: 1; /* Operand present. */
436 unsigned isreg
: 1; /* Operand was a register. */
437 unsigned immisreg
: 1; /* .imm field is a second register. */
438 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
439 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
440 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
441 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
442 instructions. This allows us to disambiguate ARM <-> vector insns. */
443 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
444 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
445 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
446 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
447 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
448 unsigned writeback
: 1; /* Operand has trailing ! */
449 unsigned preind
: 1; /* Preindexed address. */
450 unsigned postind
: 1; /* Postindexed address. */
451 unsigned negative
: 1; /* Index register was negated. */
452 unsigned shifted
: 1; /* Shift applied to operation. */
453 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
454 } operands
[ARM_IT_MAX_OPERANDS
];
457 static struct arm_it inst
;
459 #define NUM_FLOAT_VALS 8
461 const char * fp_const
[] =
463 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
466 /* Number of littlenums required to hold an extended precision number. */
467 #define MAX_LITTLENUMS 6
469 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
479 #define CP_T_X 0x00008000
480 #define CP_T_Y 0x00400000
482 #define CONDS_BIT 0x00100000
483 #define LOAD_BIT 0x00100000
485 #define DOUBLE_LOAD_FLAG 0x00000001
489 const char * template_name
;
493 #define COND_ALWAYS 0xE
497 const char * template_name
;
501 struct asm_barrier_opt
503 const char * template_name
;
505 const arm_feature_set arch
;
508 /* The bit that distinguishes CPSR and SPSR. */
509 #define SPSR_BIT (1 << 22)
511 /* The individual PSR flag bits. */
512 #define PSR_c (1 << 16)
513 #define PSR_x (1 << 17)
514 #define PSR_s (1 << 18)
515 #define PSR_f (1 << 19)
520 bfd_reloc_code_real_type reloc
;
525 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
526 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
531 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
534 /* Bits for DEFINED field in neon_typed_alias. */
535 #define NTA_HASTYPE 1
536 #define NTA_HASINDEX 2
538 struct neon_typed_alias
540 unsigned char defined
;
542 struct neon_type_el eltype
;
545 /* ARM register categories. This includes coprocessor numbers and various
546 architecture extensions' registers. */
573 /* Structure for a hash table entry for a register.
574 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
575 information which states whether a vector type or index is specified (for a
576 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
582 unsigned char builtin
;
583 struct neon_typed_alias
* neon
;
586 /* Diagnostics used when we don't get a register of the expected type. */
587 const char * const reg_expected_msgs
[] =
589 N_("ARM register expected"),
590 N_("bad or missing co-processor number"),
591 N_("co-processor register expected"),
592 N_("FPA register expected"),
593 N_("VFP single precision register expected"),
594 N_("VFP/Neon double precision register expected"),
595 N_("Neon quad precision register expected"),
596 N_("VFP single or double precision register expected"),
597 N_("Neon double or quad precision register expected"),
598 N_("VFP single, double or Neon quad precision register expected"),
599 N_("VFP system register expected"),
600 N_("Maverick MVF register expected"),
601 N_("Maverick MVD register expected"),
602 N_("Maverick MVFX register expected"),
603 N_("Maverick MVDX register expected"),
604 N_("Maverick MVAX register expected"),
605 N_("Maverick DSPSC register expected"),
606 N_("iWMMXt data register expected"),
607 N_("iWMMXt control register expected"),
608 N_("iWMMXt scalar register expected"),
609 N_("XScale accumulator register expected"),
612 /* Some well known registers that we refer to directly elsewhere. */
618 /* ARM instructions take 4bytes in the object file, Thumb instructions
624 /* Basic string to match. */
625 const char * template_name
;
627 /* Parameters to instruction. */
628 unsigned int operands
[8];
630 /* Conditional tag - see opcode_lookup. */
631 unsigned int tag
: 4;
633 /* Basic instruction code. */
634 unsigned int avalue
: 28;
636 /* Thumb-format instruction code. */
639 /* Which architecture variant provides this instruction. */
640 const arm_feature_set
* avariant
;
641 const arm_feature_set
* tvariant
;
643 /* Function to call to encode instruction in ARM format. */
644 void (* aencode
) (void);
646 /* Function to call to encode instruction in Thumb format. */
647 void (* tencode
) (void);
650 /* Defines for various bits that we will want to toggle. */
651 #define INST_IMMEDIATE 0x02000000
652 #define OFFSET_REG 0x02000000
653 #define HWOFFSET_IMM 0x00400000
654 #define SHIFT_BY_REG 0x00000010
655 #define PRE_INDEX 0x01000000
656 #define INDEX_UP 0x00800000
657 #define WRITE_BACK 0x00200000
658 #define LDM_TYPE_2_OR_3 0x00400000
659 #define CPSI_MMOD 0x00020000
661 #define LITERAL_MASK 0xf000f000
662 #define OPCODE_MASK 0xfe1fffff
663 #define V4_STR_BIT 0x00000020
664 #define VLDR_VMOV_SAME 0x0040f000
666 #define T2_SUBS_PC_LR 0xf3de8f00
668 #define DATA_OP_SHIFT 21
670 #define T2_OPCODE_MASK 0xfe1fffff
671 #define T2_DATA_OP_SHIFT 21
673 #define A_COND_MASK 0xf0000000
674 #define A_PUSH_POP_OP_MASK 0x0fff0000
676 /* Opcodes for pushing/poping registers to/from the stack. */
677 #define A1_OPCODE_PUSH 0x092d0000
678 #define A2_OPCODE_PUSH 0x052d0004
679 #define A2_OPCODE_POP 0x049d0004
681 /* Codes to distinguish the arithmetic instructions. */
692 #define OPCODE_CMP 10
693 #define OPCODE_CMN 11
694 #define OPCODE_ORR 12
695 #define OPCODE_MOV 13
696 #define OPCODE_BIC 14
697 #define OPCODE_MVN 15
699 #define T2_OPCODE_AND 0
700 #define T2_OPCODE_BIC 1
701 #define T2_OPCODE_ORR 2
702 #define T2_OPCODE_ORN 3
703 #define T2_OPCODE_EOR 4
704 #define T2_OPCODE_ADD 8
705 #define T2_OPCODE_ADC 10
706 #define T2_OPCODE_SBC 11
707 #define T2_OPCODE_SUB 13
708 #define T2_OPCODE_RSB 14
710 #define T_OPCODE_MUL 0x4340
711 #define T_OPCODE_TST 0x4200
712 #define T_OPCODE_CMN 0x42c0
713 #define T_OPCODE_NEG 0x4240
714 #define T_OPCODE_MVN 0x43c0
716 #define T_OPCODE_ADD_R3 0x1800
717 #define T_OPCODE_SUB_R3 0x1a00
718 #define T_OPCODE_ADD_HI 0x4400
719 #define T_OPCODE_ADD_ST 0xb000
720 #define T_OPCODE_SUB_ST 0xb080
721 #define T_OPCODE_ADD_SP 0xa800
722 #define T_OPCODE_ADD_PC 0xa000
723 #define T_OPCODE_ADD_I8 0x3000
724 #define T_OPCODE_SUB_I8 0x3800
725 #define T_OPCODE_ADD_I3 0x1c00
726 #define T_OPCODE_SUB_I3 0x1e00
728 #define T_OPCODE_ASR_R 0x4100
729 #define T_OPCODE_LSL_R 0x4080
730 #define T_OPCODE_LSR_R 0x40c0
731 #define T_OPCODE_ROR_R 0x41c0
732 #define T_OPCODE_ASR_I 0x1000
733 #define T_OPCODE_LSL_I 0x0000
734 #define T_OPCODE_LSR_I 0x0800
736 #define T_OPCODE_MOV_I8 0x2000
737 #define T_OPCODE_CMP_I8 0x2800
738 #define T_OPCODE_CMP_LR 0x4280
739 #define T_OPCODE_MOV_HR 0x4600
740 #define T_OPCODE_CMP_HR 0x4500
742 #define T_OPCODE_LDR_PC 0x4800
743 #define T_OPCODE_LDR_SP 0x9800
744 #define T_OPCODE_STR_SP 0x9000
745 #define T_OPCODE_LDR_IW 0x6800
746 #define T_OPCODE_STR_IW 0x6000
747 #define T_OPCODE_LDR_IH 0x8800
748 #define T_OPCODE_STR_IH 0x8000
749 #define T_OPCODE_LDR_IB 0x7800
750 #define T_OPCODE_STR_IB 0x7000
751 #define T_OPCODE_LDR_RW 0x5800
752 #define T_OPCODE_STR_RW 0x5000
753 #define T_OPCODE_LDR_RH 0x5a00
754 #define T_OPCODE_STR_RH 0x5200
755 #define T_OPCODE_LDR_RB 0x5c00
756 #define T_OPCODE_STR_RB 0x5400
758 #define T_OPCODE_PUSH 0xb400
759 #define T_OPCODE_POP 0xbc00
761 #define T_OPCODE_BRANCH 0xe000
763 #define THUMB_SIZE 2 /* Size of thumb instruction. */
764 #define THUMB_PP_PC_LR 0x0100
765 #define THUMB_LOAD_BIT 0x0800
766 #define THUMB2_LOAD_BIT 0x00100000
768 #define BAD_ARGS _("bad arguments to instruction")
769 #define BAD_SP _("r13 not allowed here")
770 #define BAD_PC _("r15 not allowed here")
771 #define BAD_COND _("instruction cannot be conditional")
772 #define BAD_OVERLAP _("registers may not be the same")
773 #define BAD_HIREG _("lo register required")
774 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
775 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
776 #define BAD_BRANCH _("branch must be last instruction in IT block")
777 #define BAD_NOT_IT _("instruction not allowed in IT block")
778 #define BAD_FPU _("selected FPU does not support instruction")
779 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
780 #define BAD_IT_COND _("incorrect condition in IT block")
781 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
782 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
783 #define BAD_PC_ADDRESSING \
784 _("cannot use register index with PC-relative addressing")
785 #define BAD_PC_WRITEBACK \
786 _("cannot use writeback with PC-relative addressing")
787 #define BAD_RANGE _("branch out of range")
788 #define BAD_FP16 _("selected processor does not support fp16 instruction")
789 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
791 static struct hash_control
* arm_ops_hsh
;
792 static struct hash_control
* arm_cond_hsh
;
793 static struct hash_control
* arm_shift_hsh
;
794 static struct hash_control
* arm_psr_hsh
;
795 static struct hash_control
* arm_v7m_psr_hsh
;
796 static struct hash_control
* arm_reg_hsh
;
797 static struct hash_control
* arm_reloc_hsh
;
798 static struct hash_control
* arm_barrier_opt_hsh
;
800 /* Stuff needed to resolve the label ambiguity
809 symbolS
* last_label_seen
;
810 static int label_is_thumb_function_name
= FALSE
;
812 /* Literal pool structure. Held on a per-section
813 and per-sub-section basis. */
815 #define MAX_LITERAL_POOL_SIZE 1024
816 typedef struct literal_pool
818 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
819 unsigned int next_free_entry
;
825 struct dwarf2_line_info locs
[MAX_LITERAL_POOL_SIZE
];
827 struct literal_pool
* next
;
828 unsigned int alignment
;
831 /* Pointer to a linked list of literal pools. */
832 literal_pool
* list_of_pools
= NULL
;
834 typedef enum asmfunc_states
837 WAITING_ASMFUNC_NAME
,
841 static asmfunc_states asmfunc_state
= OUTSIDE_ASMFUNC
;
844 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
846 static struct current_it now_it
;
850 now_it_compatible (int cond
)
852 return (cond
& ~1) == (now_it
.cc
& ~1);
856 conditional_insn (void)
858 return inst
.cond
!= COND_ALWAYS
;
861 static int in_it_block (void);
863 static int handle_it_state (void);
865 static void force_automatic_it_block_close (void);
867 static void it_fsm_post_encode (void);
869 #define set_it_insn_type(type) \
872 inst.it_insn_type = type; \
873 if (handle_it_state () == FAIL) \
878 #define set_it_insn_type_nonvoid(type, failret) \
881 inst.it_insn_type = type; \
882 if (handle_it_state () == FAIL) \
887 #define set_it_insn_type_last() \
890 if (inst.cond == COND_ALWAYS) \
891 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
893 set_it_insn_type (INSIDE_IT_LAST_INSN); \
899 /* This array holds the chars that always start a comment. If the
900 pre-processor is disabled, these aren't very useful. */
901 char arm_comment_chars
[] = "@";
903 /* This array holds the chars that only start a comment at the beginning of
904 a line. If the line seems to have the form '# 123 filename'
905 .line and .file directives will appear in the pre-processed output. */
906 /* Note that input_file.c hand checks for '#' at the beginning of the
907 first line of the input file. This is because the compiler outputs
908 #NO_APP at the beginning of its output. */
909 /* Also note that comments like this one will always work. */
910 const char line_comment_chars
[] = "#";
912 char arm_line_separator_chars
[] = ";";
914 /* Chars that can be used to separate mant
915 from exp in floating point numbers. */
916 const char EXP_CHARS
[] = "eE";
918 /* Chars that mean this number is a floating point constant. */
922 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
924 /* Prefix characters that indicate the start of an immediate
926 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
928 /* Separator character handling. */
930 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
933 skip_past_char (char ** str
, char c
)
935 /* PR gas/14987: Allow for whitespace before the expected character. */
936 skip_whitespace (*str
);
947 #define skip_past_comma(str) skip_past_char (str, ',')
949 /* Arithmetic expressions (possibly involving symbols). */
951 /* Return TRUE if anything in the expression is a bignum. */
954 walk_no_bignums (symbolS
* sp
)
956 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
959 if (symbol_get_value_expression (sp
)->X_add_symbol
)
961 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
962 || (symbol_get_value_expression (sp
)->X_op_symbol
963 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
969 static int in_my_get_expression
= 0;
971 /* Third argument to my_get_expression. */
972 #define GE_NO_PREFIX 0
973 #define GE_IMM_PREFIX 1
974 #define GE_OPT_PREFIX 2
975 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
976 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
977 #define GE_OPT_PREFIX_BIG 3
980 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
985 /* In unified syntax, all prefixes are optional. */
987 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
992 case GE_NO_PREFIX
: break;
994 if (!is_immediate_prefix (**str
))
996 inst
.error
= _("immediate expression requires a # prefix");
1002 case GE_OPT_PREFIX_BIG
:
1003 if (is_immediate_prefix (**str
))
1009 memset (ep
, 0, sizeof (expressionS
));
1011 save_in
= input_line_pointer
;
1012 input_line_pointer
= *str
;
1013 in_my_get_expression
= 1;
1014 seg
= expression (ep
);
1015 in_my_get_expression
= 0;
1017 if (ep
->X_op
== O_illegal
|| ep
->X_op
== O_absent
)
1019 /* We found a bad or missing expression in md_operand(). */
1020 *str
= input_line_pointer
;
1021 input_line_pointer
= save_in
;
1022 if (inst
.error
== NULL
)
1023 inst
.error
= (ep
->X_op
== O_absent
1024 ? _("missing expression") :_("bad expression"));
1029 if (seg
!= absolute_section
1030 && seg
!= text_section
1031 && seg
!= data_section
1032 && seg
!= bss_section
1033 && seg
!= undefined_section
)
1035 inst
.error
= _("bad segment");
1036 *str
= input_line_pointer
;
1037 input_line_pointer
= save_in
;
1044 /* Get rid of any bignums now, so that we don't generate an error for which
1045 we can't establish a line number later on. Big numbers are never valid
1046 in instructions, which is where this routine is always called. */
1047 if (prefix_mode
!= GE_OPT_PREFIX_BIG
1048 && (ep
->X_op
== O_big
1049 || (ep
->X_add_symbol
1050 && (walk_no_bignums (ep
->X_add_symbol
)
1052 && walk_no_bignums (ep
->X_op_symbol
))))))
1054 inst
.error
= _("invalid constant");
1055 *str
= input_line_pointer
;
1056 input_line_pointer
= save_in
;
1060 *str
= input_line_pointer
;
1061 input_line_pointer
= save_in
;
1065 /* Turn a string in input_line_pointer into a floating point constant
1066 of type TYPE, and store the appropriate bytes in *LITP. The number
1067 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1068 returned, or NULL on OK.
1070 Note that fp constants aren't represent in the normal way on the ARM.
1071 In big endian mode, things are as expected. However, in little endian
1072 mode fp constants are big-endian word-wise, and little-endian byte-wise
1073 within the words. For example, (double) 1.1 in big endian mode is
1074 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1075 the byte sequence 99 99 f1 3f 9a 99 99 99.
1077 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1080 md_atof (int type
, char * litP
, int * sizeP
)
1083 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1115 return _("Unrecognized or unsupported floating point constant");
1118 t
= atof_ieee (input_line_pointer
, type
, words
);
1120 input_line_pointer
= t
;
1121 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1123 if (target_big_endian
)
1125 for (i
= 0; i
< prec
; i
++)
1127 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1128 litP
+= sizeof (LITTLENUM_TYPE
);
1133 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1134 for (i
= prec
- 1; i
>= 0; i
--)
1136 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1137 litP
+= sizeof (LITTLENUM_TYPE
);
1140 /* For a 4 byte float the order of elements in `words' is 1 0.
1141 For an 8 byte float the order is 1 0 3 2. */
1142 for (i
= 0; i
< prec
; i
+= 2)
1144 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1145 sizeof (LITTLENUM_TYPE
));
1146 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1147 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1148 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1155 /* We handle all bad expressions here, so that we can report the faulty
1156 instruction in the error message. */
1158 md_operand (expressionS
* exp
)
1160 if (in_my_get_expression
)
1161 exp
->X_op
= O_illegal
;
1164 /* Immediate values. */
1166 /* Generic immediate-value read function for use in directives.
1167 Accepts anything that 'expression' can fold to a constant.
1168 *val receives the number. */
1171 immediate_for_directive (int *val
)
1174 exp
.X_op
= O_illegal
;
1176 if (is_immediate_prefix (*input_line_pointer
))
1178 input_line_pointer
++;
1182 if (exp
.X_op
!= O_constant
)
1184 as_bad (_("expected #constant"));
1185 ignore_rest_of_line ();
1188 *val
= exp
.X_add_number
;
1193 /* Register parsing. */
1195 /* Generic register parser. CCP points to what should be the
1196 beginning of a register name. If it is indeed a valid register
1197 name, advance CCP over it and return the reg_entry structure;
1198 otherwise return NULL. Does not issue diagnostics. */
1200 static struct reg_entry
*
1201 arm_reg_parse_multi (char **ccp
)
1205 struct reg_entry
*reg
;
1207 skip_whitespace (start
);
1209 #ifdef REGISTER_PREFIX
1210 if (*start
!= REGISTER_PREFIX
)
1214 #ifdef OPTIONAL_REGISTER_PREFIX
1215 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1220 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1225 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1227 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1237 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1238 enum arm_reg_type type
)
1240 /* Alternative syntaxes are accepted for a few register classes. */
1247 /* Generic coprocessor register names are allowed for these. */
1248 if (reg
&& reg
->type
== REG_TYPE_CN
)
1253 /* For backward compatibility, a bare number is valid here. */
1255 unsigned long processor
= strtoul (start
, ccp
, 10);
1256 if (*ccp
!= start
&& processor
<= 15)
1260 case REG_TYPE_MMXWC
:
1261 /* WC includes WCG. ??? I'm not sure this is true for all
1262 instructions that take WC registers. */
1263 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1274 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1275 return value is the register number or FAIL. */
1278 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1281 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1284 /* Do not allow a scalar (reg+index) to parse as a register. */
1285 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1288 if (reg
&& reg
->type
== type
)
1291 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1298 /* Parse a Neon type specifier. *STR should point at the leading '.'
1299 character. Does no verification at this stage that the type fits the opcode
1306 Can all be legally parsed by this function.
1308 Fills in neon_type struct pointer with parsed information, and updates STR
1309 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1310 type, FAIL if not. */
1313 parse_neon_type (struct neon_type
*type
, char **str
)
1320 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1322 enum neon_el_type thistype
= NT_untyped
;
1323 unsigned thissize
= -1u;
1330 /* Just a size without an explicit type. */
1334 switch (TOLOWER (*ptr
))
1336 case 'i': thistype
= NT_integer
; break;
1337 case 'f': thistype
= NT_float
; break;
1338 case 'p': thistype
= NT_poly
; break;
1339 case 's': thistype
= NT_signed
; break;
1340 case 'u': thistype
= NT_unsigned
; break;
1342 thistype
= NT_float
;
1347 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1353 /* .f is an abbreviation for .f32. */
1354 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1359 thissize
= strtoul (ptr
, &ptr
, 10);
1361 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1364 as_bad (_("bad size %d in type specifier"), thissize
);
1372 type
->el
[type
->elems
].type
= thistype
;
1373 type
->el
[type
->elems
].size
= thissize
;
1378 /* Empty/missing type is not a successful parse. */
1379 if (type
->elems
== 0)
1387 /* Errors may be set multiple times during parsing or bit encoding
1388 (particularly in the Neon bits), but usually the earliest error which is set
1389 will be the most meaningful. Avoid overwriting it with later (cascading)
1390 errors by calling this function. */
1393 first_error (const char *err
)
1399 /* Parse a single type, e.g. ".s32", leading period included. */
1401 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1404 struct neon_type optype
;
1408 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1410 if (optype
.elems
== 1)
1411 *vectype
= optype
.el
[0];
1414 first_error (_("only one type should be specified for operand"));
1420 first_error (_("vector type expected"));
1432 /* Special meanings for indices (which have a range of 0-7), which will fit into
1435 #define NEON_ALL_LANES 15
1436 #define NEON_INTERLEAVE_LANES 14
1438 /* Parse either a register or a scalar, with an optional type. Return the
1439 register number, and optionally fill in the actual type of the register
1440 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1441 type/index information in *TYPEINFO. */
1444 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1445 enum arm_reg_type
*rtype
,
1446 struct neon_typed_alias
*typeinfo
)
1449 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1450 struct neon_typed_alias atype
;
1451 struct neon_type_el parsetype
;
1455 atype
.eltype
.type
= NT_invtype
;
1456 atype
.eltype
.size
= -1;
1458 /* Try alternate syntax for some types of register. Note these are mutually
1459 exclusive with the Neon syntax extensions. */
1462 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1470 /* Undo polymorphism when a set of register types may be accepted. */
1471 if ((type
== REG_TYPE_NDQ
1472 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1473 || (type
== REG_TYPE_VFSD
1474 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1475 || (type
== REG_TYPE_NSDQ
1476 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1477 || reg
->type
== REG_TYPE_NQ
))
1478 || (type
== REG_TYPE_MMXWC
1479 && (reg
->type
== REG_TYPE_MMXWCG
)))
1480 type
= (enum arm_reg_type
) reg
->type
;
1482 if (type
!= reg
->type
)
1488 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1490 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1492 first_error (_("can't redefine type for operand"));
1495 atype
.defined
|= NTA_HASTYPE
;
1496 atype
.eltype
= parsetype
;
1499 if (skip_past_char (&str
, '[') == SUCCESS
)
1501 if (type
!= REG_TYPE_VFD
)
1503 first_error (_("only D registers may be indexed"));
1507 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1509 first_error (_("can't change index for operand"));
1513 atype
.defined
|= NTA_HASINDEX
;
1515 if (skip_past_char (&str
, ']') == SUCCESS
)
1516 atype
.index
= NEON_ALL_LANES
;
1521 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1523 if (exp
.X_op
!= O_constant
)
1525 first_error (_("constant expression required"));
1529 if (skip_past_char (&str
, ']') == FAIL
)
1532 atype
.index
= exp
.X_add_number
;
1547 /* Like arm_reg_parse, but allow allow the following extra features:
1548 - If RTYPE is non-zero, return the (possibly restricted) type of the
1549 register (e.g. Neon double or quad reg when either has been requested).
1550 - If this is a Neon vector type with additional type information, fill
1551 in the struct pointed to by VECTYPE (if non-NULL).
1552 This function will fault on encountering a scalar. */
1555 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1556 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1558 struct neon_typed_alias atype
;
1560 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1565 /* Do not allow regname(... to parse as a register. */
1569 /* Do not allow a scalar (reg+index) to parse as a register. */
1570 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1572 first_error (_("register operand expected, but got scalar"));
1577 *vectype
= atype
.eltype
;
1584 #define NEON_SCALAR_REG(X) ((X) >> 4)
1585 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1587 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1588 have enough information to be able to do a good job bounds-checking. So, we
1589 just do easy checks here, and do further checks later. */
1592 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1596 struct neon_typed_alias atype
;
1598 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1600 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1603 if (atype
.index
== NEON_ALL_LANES
)
1605 first_error (_("scalar must have an index"));
1608 else if (atype
.index
>= 64 / elsize
)
1610 first_error (_("scalar index out of range"));
1615 *type
= atype
.eltype
;
1619 return reg
* 16 + atype
.index
;
1622 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1625 parse_reg_list (char ** strp
)
1627 char * str
= * strp
;
1631 /* We come back here if we get ranges concatenated by '+' or '|'. */
1634 skip_whitespace (str
);
1648 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1650 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1660 first_error (_("bad range in register list"));
1664 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1666 if (range
& (1 << i
))
1668 (_("Warning: duplicated register (r%d) in register list"),
1676 if (range
& (1 << reg
))
1677 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1679 else if (reg
<= cur_reg
)
1680 as_tsktsk (_("Warning: register range not in ascending order"));
1685 while (skip_past_comma (&str
) != FAIL
1686 || (in_range
= 1, *str
++ == '-'));
1689 if (skip_past_char (&str
, '}') == FAIL
)
1691 first_error (_("missing `}'"));
1699 if (my_get_expression (&exp
, &str
, GE_NO_PREFIX
))
1702 if (exp
.X_op
== O_constant
)
1704 if (exp
.X_add_number
1705 != (exp
.X_add_number
& 0x0000ffff))
1707 inst
.error
= _("invalid register mask");
1711 if ((range
& exp
.X_add_number
) != 0)
1713 int regno
= range
& exp
.X_add_number
;
1716 regno
= (1 << regno
) - 1;
1718 (_("Warning: duplicated register (r%d) in register list"),
1722 range
|= exp
.X_add_number
;
1726 if (inst
.reloc
.type
!= 0)
1728 inst
.error
= _("expression too complex");
1732 memcpy (&inst
.reloc
.exp
, &exp
, sizeof (expressionS
));
1733 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1734 inst
.reloc
.pc_rel
= 0;
1738 if (*str
== '|' || *str
== '+')
1744 while (another_range
);
1750 /* Types of registers in a list. */
1759 /* Parse a VFP register list. If the string is invalid return FAIL.
1760 Otherwise return the number of registers, and set PBASE to the first
1761 register. Parses registers of type ETYPE.
1762 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1763 - Q registers can be used to specify pairs of D registers
1764 - { } can be omitted from around a singleton register list
1765 FIXME: This is not implemented, as it would require backtracking in
1768 This could be done (the meaning isn't really ambiguous), but doesn't
1769 fit in well with the current parsing framework.
1770 - 32 D registers may be used (also true for VFPv3).
1771 FIXME: Types are ignored in these register lists, which is probably a
1775 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
)
1780 enum arm_reg_type regtype
= (enum arm_reg_type
) 0;
1784 unsigned long mask
= 0;
1787 if (skip_past_char (&str
, '{') == FAIL
)
1789 inst
.error
= _("expecting {");
1796 regtype
= REG_TYPE_VFS
;
1801 regtype
= REG_TYPE_VFD
;
1804 case REGLIST_NEON_D
:
1805 regtype
= REG_TYPE_NDQ
;
1809 if (etype
!= REGLIST_VFP_S
)
1811 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1812 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
1816 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1819 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1826 base_reg
= max_regs
;
1830 int setmask
= 1, addregs
= 1;
1832 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1834 if (new_base
== FAIL
)
1836 first_error (_(reg_expected_msgs
[regtype
]));
1840 if (new_base
>= max_regs
)
1842 first_error (_("register out of range in list"));
1846 /* Note: a value of 2 * n is returned for the register Q<n>. */
1847 if (regtype
== REG_TYPE_NQ
)
1853 if (new_base
< base_reg
)
1854 base_reg
= new_base
;
1856 if (mask
& (setmask
<< new_base
))
1858 first_error (_("invalid register list"));
1862 if ((mask
>> new_base
) != 0 && ! warned
)
1864 as_tsktsk (_("register list not in ascending order"));
1868 mask
|= setmask
<< new_base
;
1871 if (*str
== '-') /* We have the start of a range expression */
1877 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
1880 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1884 if (high_range
>= max_regs
)
1886 first_error (_("register out of range in list"));
1890 if (regtype
== REG_TYPE_NQ
)
1891 high_range
= high_range
+ 1;
1893 if (high_range
<= new_base
)
1895 inst
.error
= _("register range not in ascending order");
1899 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1901 if (mask
& (setmask
<< new_base
))
1903 inst
.error
= _("invalid register list");
1907 mask
|= setmask
<< new_base
;
1912 while (skip_past_comma (&str
) != FAIL
);
1916 /* Sanity check -- should have raised a parse error above. */
1917 if (count
== 0 || count
> max_regs
)
1922 /* Final test -- the registers must be consecutive. */
1924 for (i
= 0; i
< count
; i
++)
1926 if ((mask
& (1u << i
)) == 0)
1928 inst
.error
= _("non-contiguous register range");
1938 /* True if two alias types are the same. */
1941 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1949 if (a
->defined
!= b
->defined
)
1952 if ((a
->defined
& NTA_HASTYPE
) != 0
1953 && (a
->eltype
.type
!= b
->eltype
.type
1954 || a
->eltype
.size
!= b
->eltype
.size
))
1957 if ((a
->defined
& NTA_HASINDEX
) != 0
1958 && (a
->index
!= b
->index
))
1964 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1965 The base register is put in *PBASE.
1966 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1968 The register stride (minus one) is put in bit 4 of the return value.
1969 Bits [6:5] encode the list length (minus one).
1970 The type of the list elements is put in *ELTYPE, if non-NULL. */
1972 #define NEON_LANE(X) ((X) & 0xf)
1973 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1974 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1977 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
1978 struct neon_type_el
*eltype
)
1985 int leading_brace
= 0;
1986 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
1987 const char *const incr_error
= _("register stride must be 1 or 2");
1988 const char *const type_error
= _("mismatched element/structure types in list");
1989 struct neon_typed_alias firsttype
;
1991 if (skip_past_char (&ptr
, '{') == SUCCESS
)
1996 struct neon_typed_alias atype
;
1997 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
2001 first_error (_(reg_expected_msgs
[rtype
]));
2008 if (rtype
== REG_TYPE_NQ
)
2014 else if (reg_incr
== -1)
2016 reg_incr
= getreg
- base_reg
;
2017 if (reg_incr
< 1 || reg_incr
> 2)
2019 first_error (_(incr_error
));
2023 else if (getreg
!= base_reg
+ reg_incr
* count
)
2025 first_error (_(incr_error
));
2029 if (! neon_alias_types_same (&atype
, &firsttype
))
2031 first_error (_(type_error
));
2035 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2039 struct neon_typed_alias htype
;
2040 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
2042 lane
= NEON_INTERLEAVE_LANES
;
2043 else if (lane
!= NEON_INTERLEAVE_LANES
)
2045 first_error (_(type_error
));
2050 else if (reg_incr
!= 1)
2052 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2056 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
2059 first_error (_(reg_expected_msgs
[rtype
]));
2062 if (! neon_alias_types_same (&htype
, &firsttype
))
2064 first_error (_(type_error
));
2067 count
+= hireg
+ dregs
- getreg
;
2071 /* If we're using Q registers, we can't use [] or [n] syntax. */
2072 if (rtype
== REG_TYPE_NQ
)
2078 if ((atype
.defined
& NTA_HASINDEX
) != 0)
2082 else if (lane
!= atype
.index
)
2084 first_error (_(type_error
));
2088 else if (lane
== -1)
2089 lane
= NEON_INTERLEAVE_LANES
;
2090 else if (lane
!= NEON_INTERLEAVE_LANES
)
2092 first_error (_(type_error
));
2097 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
2099 /* No lane set by [x]. We must be interleaving structures. */
2101 lane
= NEON_INTERLEAVE_LANES
;
2104 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
2105 || (count
> 1 && reg_incr
== -1))
2107 first_error (_("error parsing element/structure list"));
2111 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2113 first_error (_("expected }"));
2121 *eltype
= firsttype
.eltype
;
2126 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2129 /* Parse an explicit relocation suffix on an expression. This is
2130 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2131 arm_reloc_hsh contains no entries, so this function can only
2132 succeed if there is no () after the word. Returns -1 on error,
2133 BFD_RELOC_UNUSED if there wasn't any suffix. */
2136 parse_reloc (char **str
)
2138 struct reloc_entry
*r
;
2142 return BFD_RELOC_UNUSED
;
2147 while (*q
&& *q
!= ')' && *q
!= ',')
2152 if ((r
= (struct reloc_entry
*)
2153 hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2160 /* Directives: register aliases. */
2162 static struct reg_entry
*
2163 insert_reg_alias (char *str
, unsigned number
, int type
)
2165 struct reg_entry
*new_reg
;
2168 if ((new_reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, str
)) != 0)
2170 if (new_reg
->builtin
)
2171 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2173 /* Only warn about a redefinition if it's not defined as the
2175 else if (new_reg
->number
!= number
|| new_reg
->type
!= type
)
2176 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2181 name
= xstrdup (str
);
2182 new_reg
= (struct reg_entry
*) xmalloc (sizeof (struct reg_entry
));
2184 new_reg
->name
= name
;
2185 new_reg
->number
= number
;
2186 new_reg
->type
= type
;
2187 new_reg
->builtin
= FALSE
;
2188 new_reg
->neon
= NULL
;
2190 if (hash_insert (arm_reg_hsh
, name
, (void *) new_reg
))
2197 insert_neon_reg_alias (char *str
, int number
, int type
,
2198 struct neon_typed_alias
*atype
)
2200 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2204 first_error (_("attempt to redefine typed alias"));
2210 reg
->neon
= (struct neon_typed_alias
*)
2211 xmalloc (sizeof (struct neon_typed_alias
));
2212 *reg
->neon
= *atype
;
2216 /* Look for the .req directive. This is of the form:
2218 new_register_name .req existing_register_name
2220 If we find one, or if it looks sufficiently like one that we want to
2221 handle any error here, return TRUE. Otherwise return FALSE. */
2224 create_register_alias (char * newname
, char *p
)
2226 struct reg_entry
*old
;
2227 char *oldname
, *nbuf
;
2230 /* The input scrubber ensures that whitespace after the mnemonic is
2231 collapsed to single spaces. */
2233 if (strncmp (oldname
, " .req ", 6) != 0)
2237 if (*oldname
== '\0')
2240 old
= (struct reg_entry
*) hash_find (arm_reg_hsh
, oldname
);
2243 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2247 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2248 the desired alias name, and p points to its end. If not, then
2249 the desired alias name is in the global original_case_string. */
2250 #ifdef TC_CASE_SENSITIVE
2253 newname
= original_case_string
;
2254 nlen
= strlen (newname
);
2257 nbuf
= (char *) alloca (nlen
+ 1);
2258 memcpy (nbuf
, newname
, nlen
);
2261 /* Create aliases under the new name as stated; an all-lowercase
2262 version of the new name; and an all-uppercase version of the new
2264 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2266 for (p
= nbuf
; *p
; p
++)
2269 if (strncmp (nbuf
, newname
, nlen
))
2271 /* If this attempt to create an additional alias fails, do not bother
2272 trying to create the all-lower case alias. We will fail and issue
2273 a second, duplicate error message. This situation arises when the
2274 programmer does something like:
2277 The second .req creates the "Foo" alias but then fails to create
2278 the artificial FOO alias because it has already been created by the
2280 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2284 for (p
= nbuf
; *p
; p
++)
2287 if (strncmp (nbuf
, newname
, nlen
))
2288 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2294 /* Create a Neon typed/indexed register alias using directives, e.g.:
2299 These typed registers can be used instead of the types specified after the
2300 Neon mnemonic, so long as all operands given have types. Types can also be
2301 specified directly, e.g.:
2302 vadd d0.s32, d1.s32, d2.s32 */
2305 create_neon_reg_alias (char *newname
, char *p
)
2307 enum arm_reg_type basetype
;
2308 struct reg_entry
*basereg
;
2309 struct reg_entry mybasereg
;
2310 struct neon_type ntype
;
2311 struct neon_typed_alias typeinfo
;
2312 char *namebuf
, *nameend ATTRIBUTE_UNUSED
;
2315 typeinfo
.defined
= 0;
2316 typeinfo
.eltype
.type
= NT_invtype
;
2317 typeinfo
.eltype
.size
= -1;
2318 typeinfo
.index
= -1;
2322 if (strncmp (p
, " .dn ", 5) == 0)
2323 basetype
= REG_TYPE_VFD
;
2324 else if (strncmp (p
, " .qn ", 5) == 0)
2325 basetype
= REG_TYPE_NQ
;
2334 basereg
= arm_reg_parse_multi (&p
);
2336 if (basereg
&& basereg
->type
!= basetype
)
2338 as_bad (_("bad type for register"));
2342 if (basereg
== NULL
)
2345 /* Try parsing as an integer. */
2346 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2347 if (exp
.X_op
!= O_constant
)
2349 as_bad (_("expression must be constant"));
2352 basereg
= &mybasereg
;
2353 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2359 typeinfo
= *basereg
->neon
;
2361 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2363 /* We got a type. */
2364 if (typeinfo
.defined
& NTA_HASTYPE
)
2366 as_bad (_("can't redefine the type of a register alias"));
2370 typeinfo
.defined
|= NTA_HASTYPE
;
2371 if (ntype
.elems
!= 1)
2373 as_bad (_("you must specify a single type only"));
2376 typeinfo
.eltype
= ntype
.el
[0];
2379 if (skip_past_char (&p
, '[') == SUCCESS
)
2382 /* We got a scalar index. */
2384 if (typeinfo
.defined
& NTA_HASINDEX
)
2386 as_bad (_("can't redefine the index of a scalar alias"));
2390 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2392 if (exp
.X_op
!= O_constant
)
2394 as_bad (_("scalar index must be constant"));
2398 typeinfo
.defined
|= NTA_HASINDEX
;
2399 typeinfo
.index
= exp
.X_add_number
;
2401 if (skip_past_char (&p
, ']') == FAIL
)
2403 as_bad (_("expecting ]"));
2408 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2409 the desired alias name, and p points to its end. If not, then
2410 the desired alias name is in the global original_case_string. */
2411 #ifdef TC_CASE_SENSITIVE
2412 namelen
= nameend
- newname
;
2414 newname
= original_case_string
;
2415 namelen
= strlen (newname
);
2418 namebuf
= (char *) alloca (namelen
+ 1);
2419 strncpy (namebuf
, newname
, namelen
);
2420 namebuf
[namelen
] = '\0';
2422 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2423 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2425 /* Insert name in all uppercase. */
2426 for (p
= namebuf
; *p
; p
++)
2429 if (strncmp (namebuf
, newname
, namelen
))
2430 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2431 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2433 /* Insert name in all lowercase. */
2434 for (p
= namebuf
; *p
; p
++)
2437 if (strncmp (namebuf
, newname
, namelen
))
2438 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2439 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2444 /* Should never be called, as .req goes between the alias and the
2445 register name, not at the beginning of the line. */
2448 s_req (int a ATTRIBUTE_UNUSED
)
2450 as_bad (_("invalid syntax for .req directive"));
2454 s_dn (int a ATTRIBUTE_UNUSED
)
2456 as_bad (_("invalid syntax for .dn directive"));
2460 s_qn (int a ATTRIBUTE_UNUSED
)
2462 as_bad (_("invalid syntax for .qn directive"));
2465 /* The .unreq directive deletes an alias which was previously defined
2466 by .req. For example:
2472 s_unreq (int a ATTRIBUTE_UNUSED
)
2477 name
= input_line_pointer
;
2479 while (*input_line_pointer
!= 0
2480 && *input_line_pointer
!= ' '
2481 && *input_line_pointer
!= '\n')
2482 ++input_line_pointer
;
2484 saved_char
= *input_line_pointer
;
2485 *input_line_pointer
= 0;
2488 as_bad (_("invalid syntax for .unreq directive"));
2491 struct reg_entry
*reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
,
2495 as_bad (_("unknown register alias '%s'"), name
);
2496 else if (reg
->builtin
)
2497 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2504 hash_delete (arm_reg_hsh
, name
, FALSE
);
2505 free ((char *) reg
->name
);
2510 /* Also locate the all upper case and all lower case versions.
2511 Do not complain if we cannot find one or the other as it
2512 was probably deleted above. */
2514 nbuf
= strdup (name
);
2515 for (p
= nbuf
; *p
; p
++)
2517 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2520 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2521 free ((char *) reg
->name
);
2527 for (p
= nbuf
; *p
; p
++)
2529 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2532 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2533 free ((char *) reg
->name
);
2543 *input_line_pointer
= saved_char
;
2544 demand_empty_rest_of_line ();
2547 /* Directives: Instruction set selection. */
2550 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2551 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2552 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2553 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2555 /* Create a new mapping symbol for the transition to STATE. */
2558 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2561 const char * symname
;
2568 type
= BSF_NO_FLAGS
;
2572 type
= BSF_NO_FLAGS
;
2576 type
= BSF_NO_FLAGS
;
2582 symbolP
= symbol_new (symname
, now_seg
, value
, frag
);
2583 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2588 THUMB_SET_FUNC (symbolP
, 0);
2589 ARM_SET_THUMB (symbolP
, 0);
2590 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2594 THUMB_SET_FUNC (symbolP
, 1);
2595 ARM_SET_THUMB (symbolP
, 1);
2596 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2604 /* Save the mapping symbols for future reference. Also check that
2605 we do not place two mapping symbols at the same offset within a
2606 frag. We'll handle overlap between frags in
2607 check_mapping_symbols.
2609 If .fill or other data filling directive generates zero sized data,
2610 the mapping symbol for the following code will have the same value
2611 as the one generated for the data filling directive. In this case,
2612 we replace the old symbol with the new one at the same address. */
2615 if (frag
->tc_frag_data
.first_map
!= NULL
)
2617 know (S_GET_VALUE (frag
->tc_frag_data
.first_map
) == 0);
2618 symbol_remove (frag
->tc_frag_data
.first_map
, &symbol_rootP
, &symbol_lastP
);
2620 frag
->tc_frag_data
.first_map
= symbolP
;
2622 if (frag
->tc_frag_data
.last_map
!= NULL
)
2624 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) <= S_GET_VALUE (symbolP
));
2625 if (S_GET_VALUE (frag
->tc_frag_data
.last_map
) == S_GET_VALUE (symbolP
))
2626 symbol_remove (frag
->tc_frag_data
.last_map
, &symbol_rootP
, &symbol_lastP
);
2628 frag
->tc_frag_data
.last_map
= symbolP
;
2631 /* We must sometimes convert a region marked as code to data during
2632 code alignment, if an odd number of bytes have to be padded. The
2633 code mapping symbol is pushed to an aligned address. */
2636 insert_data_mapping_symbol (enum mstate state
,
2637 valueT value
, fragS
*frag
, offsetT bytes
)
2639 /* If there was already a mapping symbol, remove it. */
2640 if (frag
->tc_frag_data
.last_map
!= NULL
2641 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
2643 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
2647 know (frag
->tc_frag_data
.first_map
== symp
);
2648 frag
->tc_frag_data
.first_map
= NULL
;
2650 frag
->tc_frag_data
.last_map
= NULL
;
2651 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
2654 make_mapping_symbol (MAP_DATA
, value
, frag
);
2655 make_mapping_symbol (state
, value
+ bytes
, frag
);
2658 static void mapping_state_2 (enum mstate state
, int max_chars
);
2660 /* Set the mapping state to STATE. Only call this when about to
2661 emit some STATE bytes to the file. */
2663 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2665 mapping_state (enum mstate state
)
2667 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2669 if (mapstate
== state
)
2670 /* The mapping symbol has already been emitted.
2671 There is nothing else to do. */
2674 if (state
== MAP_ARM
|| state
== MAP_THUMB
)
2676 All ARM instructions require 4-byte alignment.
2677 (Almost) all Thumb instructions require 2-byte alignment.
2679 When emitting instructions into any section, mark the section
2682 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2683 but themselves require 2-byte alignment; this applies to some
2684 PC- relative forms. However, these cases will invovle implicit
2685 literal pool generation or an explicit .align >=2, both of
2686 which will cause the section to me marked with sufficient
2687 alignment. Thus, we don't handle those cases here. */
2688 record_alignment (now_seg
, state
== MAP_ARM
? 2 : 1);
2690 if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
2691 /* This case will be evaluated later. */
2694 mapping_state_2 (state
, 0);
2697 /* Same as mapping_state, but MAX_CHARS bytes have already been
2698 allocated. Put the mapping symbol that far back. */
2701 mapping_state_2 (enum mstate state
, int max_chars
)
2703 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2705 if (!SEG_NORMAL (now_seg
))
2708 if (mapstate
== state
)
2709 /* The mapping symbol has already been emitted.
2710 There is nothing else to do. */
2713 if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
2714 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
2716 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
2717 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
2720 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
2723 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2724 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
2728 #define mapping_state(x) ((void)0)
2729 #define mapping_state_2(x, y) ((void)0)
2732 /* Find the real, Thumb encoded start of a Thumb function. */
2736 find_real_start (symbolS
* symbolP
)
2739 const char * name
= S_GET_NAME (symbolP
);
2740 symbolS
* new_target
;
2742 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2743 #define STUB_NAME ".real_start_of"
2748 /* The compiler may generate BL instructions to local labels because
2749 it needs to perform a branch to a far away location. These labels
2750 do not have a corresponding ".real_start_of" label. We check
2751 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2752 the ".real_start_of" convention for nonlocal branches. */
2753 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2756 real_start
= ACONCAT ((STUB_NAME
, name
, NULL
));
2757 new_target
= symbol_find (real_start
);
2759 if (new_target
== NULL
)
2761 as_warn (_("Failed to find real start of function: %s\n"), name
);
2762 new_target
= symbolP
;
2770 opcode_select (int width
)
2777 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2778 as_bad (_("selected processor does not support THUMB opcodes"));
2781 /* No need to force the alignment, since we will have been
2782 coming from ARM mode, which is word-aligned. */
2783 record_alignment (now_seg
, 1);
2790 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2791 as_bad (_("selected processor does not support ARM opcodes"));
2796 frag_align (2, 0, 0);
2798 record_alignment (now_seg
, 1);
2803 as_bad (_("invalid instruction size selected (%d)"), width
);
2808 s_arm (int ignore ATTRIBUTE_UNUSED
)
2811 demand_empty_rest_of_line ();
2815 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2818 demand_empty_rest_of_line ();
2822 s_code (int unused ATTRIBUTE_UNUSED
)
2826 temp
= get_absolute_expression ();
2831 opcode_select (temp
);
2835 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2840 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2842 /* If we are not already in thumb mode go into it, EVEN if
2843 the target processor does not support thumb instructions.
2844 This is used by gcc/config/arm/lib1funcs.asm for example
2845 to compile interworking support functions even if the
2846 target processor should not support interworking. */
2850 record_alignment (now_seg
, 1);
2853 demand_empty_rest_of_line ();
2857 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2861 /* The following label is the name/address of the start of a Thumb function.
2862 We need to know this for the interworking support. */
2863 label_is_thumb_function_name
= TRUE
;
2866 /* Perform a .set directive, but also mark the alias as
2867 being a thumb function. */
2870 s_thumb_set (int equiv
)
2872 /* XXX the following is a duplicate of the code for s_set() in read.c
2873 We cannot just call that code as we need to get at the symbol that
2880 /* Especial apologies for the random logic:
2881 This just grew, and could be parsed much more simply!
2883 delim
= get_symbol_name (& name
);
2884 end_name
= input_line_pointer
;
2885 (void) restore_line_pointer (delim
);
2887 if (*input_line_pointer
!= ',')
2890 as_bad (_("expected comma after name \"%s\""), name
);
2892 ignore_rest_of_line ();
2896 input_line_pointer
++;
2899 if (name
[0] == '.' && name
[1] == '\0')
2901 /* XXX - this should not happen to .thumb_set. */
2905 if ((symbolP
= symbol_find (name
)) == NULL
2906 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2909 /* When doing symbol listings, play games with dummy fragments living
2910 outside the normal fragment chain to record the file and line info
2912 if (listing
& LISTING_SYMBOLS
)
2914 extern struct list_info_struct
* listing_tail
;
2915 fragS
* dummy_frag
= (fragS
* ) xmalloc (sizeof (fragS
));
2917 memset (dummy_frag
, 0, sizeof (fragS
));
2918 dummy_frag
->fr_type
= rs_fill
;
2919 dummy_frag
->line
= listing_tail
;
2920 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2921 dummy_frag
->fr_symbol
= symbolP
;
2925 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
2928 /* "set" symbols are local unless otherwise specified. */
2929 SF_SET_LOCAL (symbolP
);
2930 #endif /* OBJ_COFF */
2931 } /* Make a new symbol. */
2933 symbol_table_insert (symbolP
);
2938 && S_IS_DEFINED (symbolP
)
2939 && S_GET_SEGMENT (symbolP
) != reg_section
)
2940 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2942 pseudo_set (symbolP
);
2944 demand_empty_rest_of_line ();
2946 /* XXX Now we come to the Thumb specific bit of code. */
2948 THUMB_SET_FUNC (symbolP
, 1);
2949 ARM_SET_THUMB (symbolP
, 1);
2950 #if defined OBJ_ELF || defined OBJ_COFF
2951 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2955 /* Directives: Mode selection. */
2957 /* .syntax [unified|divided] - choose the new unified syntax
2958 (same for Arm and Thumb encoding, modulo slight differences in what
2959 can be represented) or the old divergent syntax for each mode. */
2961 s_syntax (int unused ATTRIBUTE_UNUSED
)
2965 delim
= get_symbol_name (& name
);
2967 if (!strcasecmp (name
, "unified"))
2968 unified_syntax
= TRUE
;
2969 else if (!strcasecmp (name
, "divided"))
2970 unified_syntax
= FALSE
;
2973 as_bad (_("unrecognized syntax mode \"%s\""), name
);
2976 (void) restore_line_pointer (delim
);
2977 demand_empty_rest_of_line ();
2980 /* Directives: sectioning and alignment. */
2983 s_bss (int ignore ATTRIBUTE_UNUSED
)
2985 /* We don't support putting frags in the BSS segment, we fake it by
2986 marking in_bss, then looking at s_skip for clues. */
2987 subseg_set (bss_section
, 0);
2988 demand_empty_rest_of_line ();
2990 #ifdef md_elf_section_change_hook
2991 md_elf_section_change_hook ();
2996 s_even (int ignore ATTRIBUTE_UNUSED
)
2998 /* Never make frag if expect extra pass. */
3000 frag_align (1, 0, 0);
3002 record_alignment (now_seg
, 1);
3004 demand_empty_rest_of_line ();
3007 /* Directives: CodeComposer Studio. */
3009 /* .ref (for CodeComposer Studio syntax only). */
3011 s_ccs_ref (int unused ATTRIBUTE_UNUSED
)
3013 if (codecomposer_syntax
)
3014 ignore_rest_of_line ();
3016 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3019 /* If name is not NULL, then it is used for marking the beginning of a
3020 function, wherease if it is NULL then it means the function end. */
3022 asmfunc_debug (const char * name
)
3024 static const char * last_name
= NULL
;
3028 gas_assert (last_name
== NULL
);
3031 if (debug_type
== DEBUG_STABS
)
3032 stabs_generate_asm_func (name
, name
);
3036 gas_assert (last_name
!= NULL
);
3038 if (debug_type
== DEBUG_STABS
)
3039 stabs_generate_asm_endfunc (last_name
, last_name
);
3046 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED
)
3048 if (codecomposer_syntax
)
3050 switch (asmfunc_state
)
3052 case OUTSIDE_ASMFUNC
:
3053 asmfunc_state
= WAITING_ASMFUNC_NAME
;
3056 case WAITING_ASMFUNC_NAME
:
3057 as_bad (_(".asmfunc repeated."));
3060 case WAITING_ENDASMFUNC
:
3061 as_bad (_(".asmfunc without function."));
3064 demand_empty_rest_of_line ();
3067 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3071 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED
)
3073 if (codecomposer_syntax
)
3075 switch (asmfunc_state
)
3077 case OUTSIDE_ASMFUNC
:
3078 as_bad (_(".endasmfunc without a .asmfunc."));
3081 case WAITING_ASMFUNC_NAME
:
3082 as_bad (_(".endasmfunc without function."));
3085 case WAITING_ENDASMFUNC
:
3086 asmfunc_state
= OUTSIDE_ASMFUNC
;
3087 asmfunc_debug (NULL
);
3090 demand_empty_rest_of_line ();
3093 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3097 s_ccs_def (int name
)
3099 if (codecomposer_syntax
)
3102 as_bad (_(".def pseudo-op only available with -mccs flag."));
3105 /* Directives: Literal pools. */
3107 static literal_pool
*
3108 find_literal_pool (void)
3110 literal_pool
* pool
;
3112 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
3114 if (pool
->section
== now_seg
3115 && pool
->sub_section
== now_subseg
)
3122 static literal_pool
*
3123 find_or_make_literal_pool (void)
3125 /* Next literal pool ID number. */
3126 static unsigned int latest_pool_num
= 1;
3127 literal_pool
* pool
;
3129 pool
= find_literal_pool ();
3133 /* Create a new pool. */
3134 pool
= (literal_pool
*) xmalloc (sizeof (* pool
));
3138 pool
->next_free_entry
= 0;
3139 pool
->section
= now_seg
;
3140 pool
->sub_section
= now_subseg
;
3141 pool
->next
= list_of_pools
;
3142 pool
->symbol
= NULL
;
3143 pool
->alignment
= 2;
3145 /* Add it to the list. */
3146 list_of_pools
= pool
;
3149 /* New pools, and emptied pools, will have a NULL symbol. */
3150 if (pool
->symbol
== NULL
)
3152 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
3153 (valueT
) 0, &zero_address_frag
);
3154 pool
->id
= latest_pool_num
++;
3161 /* Add the literal in the global 'inst'
3162 structure to the relevant literal pool. */
3165 add_to_lit_pool (unsigned int nbytes
)
3167 #define PADDING_SLOT 0x1
3168 #define LIT_ENTRY_SIZE_MASK 0xFF
3169 literal_pool
* pool
;
3170 unsigned int entry
, pool_size
= 0;
3171 bfd_boolean padding_slot_p
= FALSE
;
3177 imm1
= inst
.operands
[1].imm
;
3178 imm2
= (inst
.operands
[1].regisimm
? inst
.operands
[1].reg
3179 : inst
.reloc
.exp
.X_unsigned
? 0
3180 : ((bfd_int64_t
) inst
.operands
[1].imm
) >> 32);
3181 if (target_big_endian
)
3184 imm2
= inst
.operands
[1].imm
;
3188 pool
= find_or_make_literal_pool ();
3190 /* Check if this literal value is already in the pool. */
3191 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3195 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3196 && (inst
.reloc
.exp
.X_op
== O_constant
)
3197 && (pool
->literals
[entry
].X_add_number
3198 == inst
.reloc
.exp
.X_add_number
)
3199 && (pool
->literals
[entry
].X_md
== nbytes
)
3200 && (pool
->literals
[entry
].X_unsigned
3201 == inst
.reloc
.exp
.X_unsigned
))
3204 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3205 && (inst
.reloc
.exp
.X_op
== O_symbol
)
3206 && (pool
->literals
[entry
].X_add_number
3207 == inst
.reloc
.exp
.X_add_number
)
3208 && (pool
->literals
[entry
].X_add_symbol
3209 == inst
.reloc
.exp
.X_add_symbol
)
3210 && (pool
->literals
[entry
].X_op_symbol
3211 == inst
.reloc
.exp
.X_op_symbol
)
3212 && (pool
->literals
[entry
].X_md
== nbytes
))
3215 else if ((nbytes
== 8)
3216 && !(pool_size
& 0x7)
3217 && ((entry
+ 1) != pool
->next_free_entry
)
3218 && (pool
->literals
[entry
].X_op
== O_constant
)
3219 && (pool
->literals
[entry
].X_add_number
== (offsetT
) imm1
)
3220 && (pool
->literals
[entry
].X_unsigned
3221 == inst
.reloc
.exp
.X_unsigned
)
3222 && (pool
->literals
[entry
+ 1].X_op
== O_constant
)
3223 && (pool
->literals
[entry
+ 1].X_add_number
== (offsetT
) imm2
)
3224 && (pool
->literals
[entry
+ 1].X_unsigned
3225 == inst
.reloc
.exp
.X_unsigned
))
3228 padding_slot_p
= ((pool
->literals
[entry
].X_md
>> 8) == PADDING_SLOT
);
3229 if (padding_slot_p
&& (nbytes
== 4))
3235 /* Do we need to create a new entry? */
3236 if (entry
== pool
->next_free_entry
)
3238 if (entry
>= MAX_LITERAL_POOL_SIZE
)
3240 inst
.error
= _("literal pool overflow");
3246 /* For 8-byte entries, we align to an 8-byte boundary,
3247 and split it into two 4-byte entries, because on 32-bit
3248 host, 8-byte constants are treated as big num, thus
3249 saved in "generic_bignum" which will be overwritten
3250 by later assignments.
3252 We also need to make sure there is enough space for
3255 We also check to make sure the literal operand is a
3257 if (!(inst
.reloc
.exp
.X_op
== O_constant
3258 || inst
.reloc
.exp
.X_op
== O_big
))
3260 inst
.error
= _("invalid type for literal pool");
3263 else if (pool_size
& 0x7)
3265 if ((entry
+ 2) >= MAX_LITERAL_POOL_SIZE
)
3267 inst
.error
= _("literal pool overflow");
3271 pool
->literals
[entry
] = inst
.reloc
.exp
;
3272 pool
->literals
[entry
].X_add_number
= 0;
3273 pool
->literals
[entry
++].X_md
= (PADDING_SLOT
<< 8) | 4;
3274 pool
->next_free_entry
+= 1;
3277 else if ((entry
+ 1) >= MAX_LITERAL_POOL_SIZE
)
3279 inst
.error
= _("literal pool overflow");
3283 pool
->literals
[entry
] = inst
.reloc
.exp
;
3284 pool
->literals
[entry
].X_op
= O_constant
;
3285 pool
->literals
[entry
].X_add_number
= imm1
;
3286 pool
->literals
[entry
].X_unsigned
= inst
.reloc
.exp
.X_unsigned
;
3287 pool
->literals
[entry
++].X_md
= 4;
3288 pool
->literals
[entry
] = inst
.reloc
.exp
;
3289 pool
->literals
[entry
].X_op
= O_constant
;
3290 pool
->literals
[entry
].X_add_number
= imm2
;
3291 pool
->literals
[entry
].X_unsigned
= inst
.reloc
.exp
.X_unsigned
;
3292 pool
->literals
[entry
].X_md
= 4;
3293 pool
->alignment
= 3;
3294 pool
->next_free_entry
+= 1;
3298 pool
->literals
[entry
] = inst
.reloc
.exp
;
3299 pool
->literals
[entry
].X_md
= 4;
3303 /* PR ld/12974: Record the location of the first source line to reference
3304 this entry in the literal pool. If it turns out during linking that the
3305 symbol does not exist we will be able to give an accurate line number for
3306 the (first use of the) missing reference. */
3307 if (debug_type
== DEBUG_DWARF2
)
3308 dwarf2_where (pool
->locs
+ entry
);
3310 pool
->next_free_entry
+= 1;
3312 else if (padding_slot_p
)
3314 pool
->literals
[entry
] = inst
.reloc
.exp
;
3315 pool
->literals
[entry
].X_md
= nbytes
;
3318 inst
.reloc
.exp
.X_op
= O_symbol
;
3319 inst
.reloc
.exp
.X_add_number
= pool_size
;
3320 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
3326 tc_start_label_without_colon (void)
3328 bfd_boolean ret
= TRUE
;
3330 if (codecomposer_syntax
&& asmfunc_state
== WAITING_ASMFUNC_NAME
)
3332 const char *label
= input_line_pointer
;
3334 while (!is_end_of_line
[(int) label
[-1]])
3339 as_bad (_("Invalid label '%s'"), label
);
3343 asmfunc_debug (label
);
3345 asmfunc_state
= WAITING_ENDASMFUNC
;
3351 /* Can't use symbol_new here, so have to create a symbol and then at
3352 a later date assign it a value. Thats what these functions do. */
3355 symbol_locate (symbolS
* symbolP
,
3356 const char * name
, /* It is copied, the caller can modify. */
3357 segT segment
, /* Segment identifier (SEG_<something>). */
3358 valueT valu
, /* Symbol value. */
3359 fragS
* frag
) /* Associated fragment. */
3362 char * preserved_copy_of_name
;
3364 name_length
= strlen (name
) + 1; /* +1 for \0. */
3365 obstack_grow (¬es
, name
, name_length
);
3366 preserved_copy_of_name
= (char *) obstack_finish (¬es
);
3368 #ifdef tc_canonicalize_symbol_name
3369 preserved_copy_of_name
=
3370 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3373 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3375 S_SET_SEGMENT (symbolP
, segment
);
3376 S_SET_VALUE (symbolP
, valu
);
3377 symbol_clear_list_pointers (symbolP
);
3379 symbol_set_frag (symbolP
, frag
);
3381 /* Link to end of symbol chain. */
3383 extern int symbol_table_frozen
;
3385 if (symbol_table_frozen
)
3389 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3391 obj_symbol_new_hook (symbolP
);
3393 #ifdef tc_symbol_new_hook
3394 tc_symbol_new_hook (symbolP
);
3398 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3399 #endif /* DEBUG_SYMS */
3403 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3406 literal_pool
* pool
;
3409 pool
= find_literal_pool ();
3411 || pool
->symbol
== NULL
3412 || pool
->next_free_entry
== 0)
3415 /* Align pool as you have word accesses.
3416 Only make a frag if we have to. */
3418 frag_align (pool
->alignment
, 0, 0);
3420 record_alignment (now_seg
, 2);
3423 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= MAP_DATA
;
3424 make_mapping_symbol (MAP_DATA
, (valueT
) frag_now_fix (), frag_now
);
3426 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3428 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3429 (valueT
) frag_now_fix (), frag_now
);
3430 symbol_table_insert (pool
->symbol
);
3432 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3434 #if defined OBJ_COFF || defined OBJ_ELF
3435 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3438 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3441 if (debug_type
== DEBUG_DWARF2
)
3442 dwarf2_gen_line_info (frag_now_fix (), pool
->locs
+ entry
);
3444 /* First output the expression in the instruction to the pool. */
3445 emit_expr (&(pool
->literals
[entry
]),
3446 pool
->literals
[entry
].X_md
& LIT_ENTRY_SIZE_MASK
);
3449 /* Mark the pool as empty. */
3450 pool
->next_free_entry
= 0;
3451 pool
->symbol
= NULL
;
3455 /* Forward declarations for functions below, in the MD interface
3457 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3458 static valueT
create_unwind_entry (int);
3459 static void start_unwind_section (const segT
, int);
3460 static void add_unwind_opcode (valueT
, int);
3461 static void flush_pending_unwind (void);
3463 /* Directives: Data. */
3466 s_arm_elf_cons (int nbytes
)
3470 #ifdef md_flush_pending_output
3471 md_flush_pending_output ();
3474 if (is_it_end_of_statement ())
3476 demand_empty_rest_of_line ();
3480 #ifdef md_cons_align
3481 md_cons_align (nbytes
);
3484 mapping_state (MAP_DATA
);
3488 char *base
= input_line_pointer
;
3492 if (exp
.X_op
!= O_symbol
)
3493 emit_expr (&exp
, (unsigned int) nbytes
);
3496 char *before_reloc
= input_line_pointer
;
3497 reloc
= parse_reloc (&input_line_pointer
);
3500 as_bad (_("unrecognized relocation suffix"));
3501 ignore_rest_of_line ();
3504 else if (reloc
== BFD_RELOC_UNUSED
)
3505 emit_expr (&exp
, (unsigned int) nbytes
);
3508 reloc_howto_type
*howto
= (reloc_howto_type
*)
3509 bfd_reloc_type_lookup (stdoutput
,
3510 (bfd_reloc_code_real_type
) reloc
);
3511 int size
= bfd_get_reloc_size (howto
);
3513 if (reloc
== BFD_RELOC_ARM_PLT32
)
3515 as_bad (_("(plt) is only valid on branch targets"));
3516 reloc
= BFD_RELOC_UNUSED
;
3521 as_bad (_("%s relocations do not fit in %d bytes"),
3522 howto
->name
, nbytes
);
3525 /* We've parsed an expression stopping at O_symbol.
3526 But there may be more expression left now that we
3527 have parsed the relocation marker. Parse it again.
3528 XXX Surely there is a cleaner way to do this. */
3529 char *p
= input_line_pointer
;
3531 char *save_buf
= (char *) alloca (input_line_pointer
- base
);
3532 memcpy (save_buf
, base
, input_line_pointer
- base
);
3533 memmove (base
+ (input_line_pointer
- before_reloc
),
3534 base
, before_reloc
- base
);
3536 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3538 memcpy (base
, save_buf
, p
- base
);
3540 offset
= nbytes
- size
;
3541 p
= frag_more (nbytes
);
3542 memset (p
, 0, nbytes
);
3543 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3544 size
, &exp
, 0, (enum bfd_reloc_code_real
) reloc
);
3549 while (*input_line_pointer
++ == ',');
3551 /* Put terminator back into stream. */
3552 input_line_pointer
--;
3553 demand_empty_rest_of_line ();
3556 /* Emit an expression containing a 32-bit thumb instruction.
3557 Implementation based on put_thumb32_insn. */
3560 emit_thumb32_expr (expressionS
* exp
)
3562 expressionS exp_high
= *exp
;
3564 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3565 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3566 exp
->X_add_number
&= 0xffff;
3567 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3570 /* Guess the instruction size based on the opcode. */
3573 thumb_insn_size (int opcode
)
3575 if ((unsigned int) opcode
< 0xe800u
)
3577 else if ((unsigned int) opcode
>= 0xe8000000u
)
3584 emit_insn (expressionS
*exp
, int nbytes
)
3588 if (exp
->X_op
== O_constant
)
3593 size
= thumb_insn_size (exp
->X_add_number
);
3597 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3599 as_bad (_(".inst.n operand too big. "\
3600 "Use .inst.w instead"));
3605 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
3606 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN
, 0);
3608 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3610 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3611 emit_thumb32_expr (exp
);
3613 emit_expr (exp
, (unsigned int) size
);
3615 it_fsm_post_encode ();
3619 as_bad (_("cannot determine Thumb instruction size. " \
3620 "Use .inst.n/.inst.w instead"));
3623 as_bad (_("constant expression required"));
3628 /* Like s_arm_elf_cons but do not use md_cons_align and
3629 set the mapping state to MAP_ARM/MAP_THUMB. */
3632 s_arm_elf_inst (int nbytes
)
3634 if (is_it_end_of_statement ())
3636 demand_empty_rest_of_line ();
3640 /* Calling mapping_state () here will not change ARM/THUMB,
3641 but will ensure not to be in DATA state. */
3644 mapping_state (MAP_THUMB
);
3649 as_bad (_("width suffixes are invalid in ARM mode"));
3650 ignore_rest_of_line ();
3656 mapping_state (MAP_ARM
);
3665 if (! emit_insn (& exp
, nbytes
))
3667 ignore_rest_of_line ();
3671 while (*input_line_pointer
++ == ',');
3673 /* Put terminator back into stream. */
3674 input_line_pointer
--;
3675 demand_empty_rest_of_line ();
3678 /* Parse a .rel31 directive. */
3681 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3688 if (*input_line_pointer
== '1')
3689 highbit
= 0x80000000;
3690 else if (*input_line_pointer
!= '0')
3691 as_bad (_("expected 0 or 1"));
3693 input_line_pointer
++;
3694 if (*input_line_pointer
!= ',')
3695 as_bad (_("missing comma"));
3696 input_line_pointer
++;
3698 #ifdef md_flush_pending_output
3699 md_flush_pending_output ();
3702 #ifdef md_cons_align
3706 mapping_state (MAP_DATA
);
3711 md_number_to_chars (p
, highbit
, 4);
3712 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3713 BFD_RELOC_ARM_PREL31
);
3715 demand_empty_rest_of_line ();
3718 /* Directives: AEABI stack-unwind tables. */
3720 /* Parse an unwind_fnstart directive. Simply records the current location. */
3723 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3725 demand_empty_rest_of_line ();
3726 if (unwind
.proc_start
)
3728 as_bad (_("duplicate .fnstart directive"));
3732 /* Mark the start of the function. */
3733 unwind
.proc_start
= expr_build_dot ();
3735 /* Reset the rest of the unwind info. */
3736 unwind
.opcode_count
= 0;
3737 unwind
.table_entry
= NULL
;
3738 unwind
.personality_routine
= NULL
;
3739 unwind
.personality_index
= -1;
3740 unwind
.frame_size
= 0;
3741 unwind
.fp_offset
= 0;
3742 unwind
.fp_reg
= REG_SP
;
3744 unwind
.sp_restored
= 0;
3748 /* Parse a handlerdata directive. Creates the exception handling table entry
3749 for the function. */
3752 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3754 demand_empty_rest_of_line ();
3755 if (!unwind
.proc_start
)
3756 as_bad (MISSING_FNSTART
);
3758 if (unwind
.table_entry
)
3759 as_bad (_("duplicate .handlerdata directive"));
3761 create_unwind_entry (1);
3764 /* Parse an unwind_fnend directive. Generates the index table entry. */
3767 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3772 unsigned int marked_pr_dependency
;
3774 demand_empty_rest_of_line ();
3776 if (!unwind
.proc_start
)
3778 as_bad (_(".fnend directive without .fnstart"));
3782 /* Add eh table entry. */
3783 if (unwind
.table_entry
== NULL
)
3784 val
= create_unwind_entry (0);
3788 /* Add index table entry. This is two words. */
3789 start_unwind_section (unwind
.saved_seg
, 1);
3790 frag_align (2, 0, 0);
3791 record_alignment (now_seg
, 2);
3793 ptr
= frag_more (8);
3795 where
= frag_now_fix () - 8;
3797 /* Self relative offset of the function start. */
3798 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3799 BFD_RELOC_ARM_PREL31
);
3801 /* Indicate dependency on EHABI-defined personality routines to the
3802 linker, if it hasn't been done already. */
3803 marked_pr_dependency
3804 = seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
;
3805 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3806 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3808 static const char *const name
[] =
3810 "__aeabi_unwind_cpp_pr0",
3811 "__aeabi_unwind_cpp_pr1",
3812 "__aeabi_unwind_cpp_pr2"
3814 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3815 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3816 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3817 |= 1 << unwind
.personality_index
;
3821 /* Inline exception table entry. */
3822 md_number_to_chars (ptr
+ 4, val
, 4);
3824 /* Self relative offset of the table entry. */
3825 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3826 BFD_RELOC_ARM_PREL31
);
3828 /* Restore the original section. */
3829 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3831 unwind
.proc_start
= NULL
;
3835 /* Parse an unwind_cantunwind directive. */
3838 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3840 demand_empty_rest_of_line ();
3841 if (!unwind
.proc_start
)
3842 as_bad (MISSING_FNSTART
);
3844 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3845 as_bad (_("personality routine specified for cantunwind frame"));
3847 unwind
.personality_index
= -2;
3851 /* Parse a personalityindex directive. */
3854 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3858 if (!unwind
.proc_start
)
3859 as_bad (MISSING_FNSTART
);
3861 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3862 as_bad (_("duplicate .personalityindex directive"));
3866 if (exp
.X_op
!= O_constant
3867 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3869 as_bad (_("bad personality routine number"));
3870 ignore_rest_of_line ();
3874 unwind
.personality_index
= exp
.X_add_number
;
3876 demand_empty_rest_of_line ();
3880 /* Parse a personality directive. */
3883 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3887 if (!unwind
.proc_start
)
3888 as_bad (MISSING_FNSTART
);
3890 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3891 as_bad (_("duplicate .personality directive"));
3893 c
= get_symbol_name (& name
);
3894 p
= input_line_pointer
;
3896 ++ input_line_pointer
;
3897 unwind
.personality_routine
= symbol_find_or_make (name
);
3899 demand_empty_rest_of_line ();
3903 /* Parse a directive saving core registers. */
3906 s_arm_unwind_save_core (void)
3912 range
= parse_reg_list (&input_line_pointer
);
3915 as_bad (_("expected register list"));
3916 ignore_rest_of_line ();
3920 demand_empty_rest_of_line ();
3922 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3923 into .unwind_save {..., sp...}. We aren't bothered about the value of
3924 ip because it is clobbered by calls. */
3925 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
3926 && (range
& 0x3000) == 0x1000)
3928 unwind
.opcode_count
--;
3929 unwind
.sp_restored
= 0;
3930 range
= (range
| 0x2000) & ~0x1000;
3931 unwind
.pending_offset
= 0;
3937 /* See if we can use the short opcodes. These pop a block of up to 8
3938 registers starting with r4, plus maybe r14. */
3939 for (n
= 0; n
< 8; n
++)
3941 /* Break at the first non-saved register. */
3942 if ((range
& (1 << (n
+ 4))) == 0)
3945 /* See if there are any other bits set. */
3946 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
3948 /* Use the long form. */
3949 op
= 0x8000 | ((range
>> 4) & 0xfff);
3950 add_unwind_opcode (op
, 2);
3954 /* Use the short form. */
3956 op
= 0xa8; /* Pop r14. */
3958 op
= 0xa0; /* Do not pop r14. */
3960 add_unwind_opcode (op
, 1);
3967 op
= 0xb100 | (range
& 0xf);
3968 add_unwind_opcode (op
, 2);
3971 /* Record the number of bytes pushed. */
3972 for (n
= 0; n
< 16; n
++)
3974 if (range
& (1 << n
))
3975 unwind
.frame_size
+= 4;
3980 /* Parse a directive saving FPA registers. */
3983 s_arm_unwind_save_fpa (int reg
)
3989 /* Get Number of registers to transfer. */
3990 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3993 exp
.X_op
= O_illegal
;
3995 if (exp
.X_op
!= O_constant
)
3997 as_bad (_("expected , <constant>"));
3998 ignore_rest_of_line ();
4002 num_regs
= exp
.X_add_number
;
4004 if (num_regs
< 1 || num_regs
> 4)
4006 as_bad (_("number of registers must be in the range [1:4]"));
4007 ignore_rest_of_line ();
4011 demand_empty_rest_of_line ();
4016 op
= 0xb4 | (num_regs
- 1);
4017 add_unwind_opcode (op
, 1);
4022 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
4023 add_unwind_opcode (op
, 2);
4025 unwind
.frame_size
+= num_regs
* 12;
4029 /* Parse a directive saving VFP registers for ARMv6 and above. */
4032 s_arm_unwind_save_vfp_armv6 (void)
4037 int num_vfpv3_regs
= 0;
4038 int num_regs_below_16
;
4040 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
);
4043 as_bad (_("expected register list"));
4044 ignore_rest_of_line ();
4048 demand_empty_rest_of_line ();
4050 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4051 than FSTMX/FLDMX-style ones). */
4053 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4055 num_vfpv3_regs
= count
;
4056 else if (start
+ count
> 16)
4057 num_vfpv3_regs
= start
+ count
- 16;
4059 if (num_vfpv3_regs
> 0)
4061 int start_offset
= start
> 16 ? start
- 16 : 0;
4062 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
4063 add_unwind_opcode (op
, 2);
4066 /* Generate opcode for registers numbered in the range 0 .. 15. */
4067 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
4068 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
4069 if (num_regs_below_16
> 0)
4071 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
4072 add_unwind_opcode (op
, 2);
4075 unwind
.frame_size
+= count
* 8;
4079 /* Parse a directive saving VFP registers for pre-ARMv6. */
4082 s_arm_unwind_save_vfp (void)
4088 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
4091 as_bad (_("expected register list"));
4092 ignore_rest_of_line ();
4096 demand_empty_rest_of_line ();
4101 op
= 0xb8 | (count
- 1);
4102 add_unwind_opcode (op
, 1);
4107 op
= 0xb300 | (reg
<< 4) | (count
- 1);
4108 add_unwind_opcode (op
, 2);
4110 unwind
.frame_size
+= count
* 8 + 4;
4114 /* Parse a directive saving iWMMXt data registers. */
4117 s_arm_unwind_save_mmxwr (void)
4125 if (*input_line_pointer
== '{')
4126 input_line_pointer
++;
4130 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4134 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4139 as_tsktsk (_("register list not in ascending order"));
4142 if (*input_line_pointer
== '-')
4144 input_line_pointer
++;
4145 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4148 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4151 else if (reg
>= hi_reg
)
4153 as_bad (_("bad register range"));
4156 for (; reg
< hi_reg
; reg
++)
4160 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4162 skip_past_char (&input_line_pointer
, '}');
4164 demand_empty_rest_of_line ();
4166 /* Generate any deferred opcodes because we're going to be looking at
4168 flush_pending_unwind ();
4170 for (i
= 0; i
< 16; i
++)
4172 if (mask
& (1 << i
))
4173 unwind
.frame_size
+= 8;
4176 /* Attempt to combine with a previous opcode. We do this because gcc
4177 likes to output separate unwind directives for a single block of
4179 if (unwind
.opcode_count
> 0)
4181 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
4182 if ((i
& 0xf8) == 0xc0)
4185 /* Only merge if the blocks are contiguous. */
4188 if ((mask
& 0xfe00) == (1 << 9))
4190 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
4191 unwind
.opcode_count
--;
4194 else if (i
== 6 && unwind
.opcode_count
>= 2)
4196 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
4200 op
= 0xffff << (reg
- 1);
4202 && ((mask
& op
) == (1u << (reg
- 1))))
4204 op
= (1 << (reg
+ i
+ 1)) - 1;
4205 op
&= ~((1 << reg
) - 1);
4207 unwind
.opcode_count
-= 2;
4214 /* We want to generate opcodes in the order the registers have been
4215 saved, ie. descending order. */
4216 for (reg
= 15; reg
>= -1; reg
--)
4218 /* Save registers in blocks. */
4220 || !(mask
& (1 << reg
)))
4222 /* We found an unsaved reg. Generate opcodes to save the
4229 op
= 0xc0 | (hi_reg
- 10);
4230 add_unwind_opcode (op
, 1);
4235 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
4236 add_unwind_opcode (op
, 2);
4245 ignore_rest_of_line ();
4249 s_arm_unwind_save_mmxwcg (void)
4256 if (*input_line_pointer
== '{')
4257 input_line_pointer
++;
4259 skip_whitespace (input_line_pointer
);
4263 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4267 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4273 as_tsktsk (_("register list not in ascending order"));
4276 if (*input_line_pointer
== '-')
4278 input_line_pointer
++;
4279 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4282 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4285 else if (reg
>= hi_reg
)
4287 as_bad (_("bad register range"));
4290 for (; reg
< hi_reg
; reg
++)
4294 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4296 skip_past_char (&input_line_pointer
, '}');
4298 demand_empty_rest_of_line ();
4300 /* Generate any deferred opcodes because we're going to be looking at
4302 flush_pending_unwind ();
4304 for (reg
= 0; reg
< 16; reg
++)
4306 if (mask
& (1 << reg
))
4307 unwind
.frame_size
+= 4;
4310 add_unwind_opcode (op
, 2);
4313 ignore_rest_of_line ();
4317 /* Parse an unwind_save directive.
4318 If the argument is non-zero, this is a .vsave directive. */
4321 s_arm_unwind_save (int arch_v6
)
4324 struct reg_entry
*reg
;
4325 bfd_boolean had_brace
= FALSE
;
4327 if (!unwind
.proc_start
)
4328 as_bad (MISSING_FNSTART
);
4330 /* Figure out what sort of save we have. */
4331 peek
= input_line_pointer
;
4339 reg
= arm_reg_parse_multi (&peek
);
4343 as_bad (_("register expected"));
4344 ignore_rest_of_line ();
4353 as_bad (_("FPA .unwind_save does not take a register list"));
4354 ignore_rest_of_line ();
4357 input_line_pointer
= peek
;
4358 s_arm_unwind_save_fpa (reg
->number
);
4362 s_arm_unwind_save_core ();
4367 s_arm_unwind_save_vfp_armv6 ();
4369 s_arm_unwind_save_vfp ();
4372 case REG_TYPE_MMXWR
:
4373 s_arm_unwind_save_mmxwr ();
4376 case REG_TYPE_MMXWCG
:
4377 s_arm_unwind_save_mmxwcg ();
4381 as_bad (_(".unwind_save does not support this kind of register"));
4382 ignore_rest_of_line ();
4387 /* Parse an unwind_movsp directive. */
4390 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4396 if (!unwind
.proc_start
)
4397 as_bad (MISSING_FNSTART
);
4399 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4402 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4403 ignore_rest_of_line ();
4407 /* Optional constant. */
4408 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4410 if (immediate_for_directive (&offset
) == FAIL
)
4416 demand_empty_rest_of_line ();
4418 if (reg
== REG_SP
|| reg
== REG_PC
)
4420 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4424 if (unwind
.fp_reg
!= REG_SP
)
4425 as_bad (_("unexpected .unwind_movsp directive"));
4427 /* Generate opcode to restore the value. */
4429 add_unwind_opcode (op
, 1);
4431 /* Record the information for later. */
4432 unwind
.fp_reg
= reg
;
4433 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4434 unwind
.sp_restored
= 1;
4437 /* Parse an unwind_pad directive. */
4440 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4444 if (!unwind
.proc_start
)
4445 as_bad (MISSING_FNSTART
);
4447 if (immediate_for_directive (&offset
) == FAIL
)
4452 as_bad (_("stack increment must be multiple of 4"));
4453 ignore_rest_of_line ();
4457 /* Don't generate any opcodes, just record the details for later. */
4458 unwind
.frame_size
+= offset
;
4459 unwind
.pending_offset
+= offset
;
4461 demand_empty_rest_of_line ();
4464 /* Parse an unwind_setfp directive. */
4467 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4473 if (!unwind
.proc_start
)
4474 as_bad (MISSING_FNSTART
);
4476 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4477 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4480 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4482 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4484 as_bad (_("expected <reg>, <reg>"));
4485 ignore_rest_of_line ();
4489 /* Optional constant. */
4490 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4492 if (immediate_for_directive (&offset
) == FAIL
)
4498 demand_empty_rest_of_line ();
4500 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4502 as_bad (_("register must be either sp or set by a previous"
4503 "unwind_movsp directive"));
4507 /* Don't generate any opcodes, just record the information for later. */
4508 unwind
.fp_reg
= fp_reg
;
4510 if (sp_reg
== REG_SP
)
4511 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4513 unwind
.fp_offset
-= offset
;
4516 /* Parse an unwind_raw directive. */
4519 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4522 /* This is an arbitrary limit. */
4523 unsigned char op
[16];
4526 if (!unwind
.proc_start
)
4527 as_bad (MISSING_FNSTART
);
4530 if (exp
.X_op
== O_constant
4531 && skip_past_comma (&input_line_pointer
) != FAIL
)
4533 unwind
.frame_size
+= exp
.X_add_number
;
4537 exp
.X_op
= O_illegal
;
4539 if (exp
.X_op
!= O_constant
)
4541 as_bad (_("expected <offset>, <opcode>"));
4542 ignore_rest_of_line ();
4548 /* Parse the opcode. */
4553 as_bad (_("unwind opcode too long"));
4554 ignore_rest_of_line ();
4556 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4558 as_bad (_("invalid unwind opcode"));
4559 ignore_rest_of_line ();
4562 op
[count
++] = exp
.X_add_number
;
4564 /* Parse the next byte. */
4565 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4571 /* Add the opcode bytes in reverse order. */
4573 add_unwind_opcode (op
[count
], 1);
4575 demand_empty_rest_of_line ();
4579 /* Parse a .eabi_attribute directive. */
4582 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4584 int tag
= obj_elf_vendor_attribute (OBJ_ATTR_PROC
);
4586 if (tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4587 attributes_set_explicitly
[tag
] = 1;
4590 /* Emit a tls fix for the symbol. */
4593 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED
)
4597 #ifdef md_flush_pending_output
4598 md_flush_pending_output ();
4601 #ifdef md_cons_align
4605 /* Since we're just labelling the code, there's no need to define a
4608 p
= obstack_next_free (&frchain_now
->frch_obstack
);
4609 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 0,
4610 thumb_mode
? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4611 : BFD_RELOC_ARM_TLS_DESCSEQ
);
4613 #endif /* OBJ_ELF */
4615 static void s_arm_arch (int);
4616 static void s_arm_object_arch (int);
4617 static void s_arm_cpu (int);
4618 static void s_arm_fpu (int);
4619 static void s_arm_arch_extension (int);
4624 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
4631 if (exp
.X_op
== O_symbol
)
4632 exp
.X_op
= O_secrel
;
4634 emit_expr (&exp
, 4);
4636 while (*input_line_pointer
++ == ',');
4638 input_line_pointer
--;
4639 demand_empty_rest_of_line ();
4643 /* This table describes all the machine specific pseudo-ops the assembler
4644 has to support. The fields are:
4645 pseudo-op name without dot
4646 function to call to execute this pseudo-op
4647 Integer arg to pass to the function. */
4649 const pseudo_typeS md_pseudo_table
[] =
4651 /* Never called because '.req' does not start a line. */
4652 { "req", s_req
, 0 },
4653 /* Following two are likewise never called. */
4656 { "unreq", s_unreq
, 0 },
4657 { "bss", s_bss
, 0 },
4658 { "align", s_align_ptwo
, 2 },
4659 { "arm", s_arm
, 0 },
4660 { "thumb", s_thumb
, 0 },
4661 { "code", s_code
, 0 },
4662 { "force_thumb", s_force_thumb
, 0 },
4663 { "thumb_func", s_thumb_func
, 0 },
4664 { "thumb_set", s_thumb_set
, 0 },
4665 { "even", s_even
, 0 },
4666 { "ltorg", s_ltorg
, 0 },
4667 { "pool", s_ltorg
, 0 },
4668 { "syntax", s_syntax
, 0 },
4669 { "cpu", s_arm_cpu
, 0 },
4670 { "arch", s_arm_arch
, 0 },
4671 { "object_arch", s_arm_object_arch
, 0 },
4672 { "fpu", s_arm_fpu
, 0 },
4673 { "arch_extension", s_arm_arch_extension
, 0 },
4675 { "word", s_arm_elf_cons
, 4 },
4676 { "long", s_arm_elf_cons
, 4 },
4677 { "inst.n", s_arm_elf_inst
, 2 },
4678 { "inst.w", s_arm_elf_inst
, 4 },
4679 { "inst", s_arm_elf_inst
, 0 },
4680 { "rel31", s_arm_rel31
, 0 },
4681 { "fnstart", s_arm_unwind_fnstart
, 0 },
4682 { "fnend", s_arm_unwind_fnend
, 0 },
4683 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
4684 { "personality", s_arm_unwind_personality
, 0 },
4685 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
4686 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
4687 { "save", s_arm_unwind_save
, 0 },
4688 { "vsave", s_arm_unwind_save
, 1 },
4689 { "movsp", s_arm_unwind_movsp
, 0 },
4690 { "pad", s_arm_unwind_pad
, 0 },
4691 { "setfp", s_arm_unwind_setfp
, 0 },
4692 { "unwind_raw", s_arm_unwind_raw
, 0 },
4693 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
4694 { "tlsdescseq", s_arm_tls_descseq
, 0 },
4698 /* These are used for dwarf. */
4702 /* These are used for dwarf2. */
4703 { "file", (void (*) (int)) dwarf2_directive_file
, 0 },
4704 { "loc", dwarf2_directive_loc
, 0 },
4705 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
4707 { "extend", float_cons
, 'x' },
4708 { "ldouble", float_cons
, 'x' },
4709 { "packed", float_cons
, 'p' },
4711 {"secrel32", pe_directive_secrel
, 0},
4714 /* These are for compatibility with CodeComposer Studio. */
4715 {"ref", s_ccs_ref
, 0},
4716 {"def", s_ccs_def
, 0},
4717 {"asmfunc", s_ccs_asmfunc
, 0},
4718 {"endasmfunc", s_ccs_endasmfunc
, 0},
4723 /* Parser functions used exclusively in instruction operands. */
4725 /* Generic immediate-value read function for use in insn parsing.
4726 STR points to the beginning of the immediate (the leading #);
4727 VAL receives the value; if the value is outside [MIN, MAX]
4728 issue an error. PREFIX_OPT is true if the immediate prefix is
4732 parse_immediate (char **str
, int *val
, int min
, int max
,
4733 bfd_boolean prefix_opt
)
4736 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4737 if (exp
.X_op
!= O_constant
)
4739 inst
.error
= _("constant expression required");
4743 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4745 inst
.error
= _("immediate value out of range");
4749 *val
= exp
.X_add_number
;
4753 /* Less-generic immediate-value read function with the possibility of loading a
4754 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4755 instructions. Puts the result directly in inst.operands[i]. */
4758 parse_big_immediate (char **str
, int i
, expressionS
*in_exp
,
4759 bfd_boolean allow_symbol_p
)
4762 expressionS
*exp_p
= in_exp
? in_exp
: &exp
;
4765 my_get_expression (exp_p
, &ptr
, GE_OPT_PREFIX_BIG
);
4767 if (exp_p
->X_op
== O_constant
)
4769 inst
.operands
[i
].imm
= exp_p
->X_add_number
& 0xffffffff;
4770 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4771 O_constant. We have to be careful not to break compilation for
4772 32-bit X_add_number, though. */
4773 if ((exp_p
->X_add_number
& ~(offsetT
)(0xffffffffU
)) != 0)
4775 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4776 inst
.operands
[i
].reg
= (((exp_p
->X_add_number
>> 16) >> 16)
4778 inst
.operands
[i
].regisimm
= 1;
4781 else if (exp_p
->X_op
== O_big
4782 && LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 32)
4784 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
4786 /* Bignums have their least significant bits in
4787 generic_bignum[0]. Make sure we put 32 bits in imm and
4788 32 bits in reg, in a (hopefully) portable way. */
4789 gas_assert (parts
!= 0);
4791 /* Make sure that the number is not too big.
4792 PR 11972: Bignums can now be sign-extended to the
4793 size of a .octa so check that the out of range bits
4794 are all zero or all one. */
4795 if (LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 64)
4797 LITTLENUM_TYPE m
= -1;
4799 if (generic_bignum
[parts
* 2] != 0
4800 && generic_bignum
[parts
* 2] != m
)
4803 for (j
= parts
* 2 + 1; j
< (unsigned) exp_p
->X_add_number
; j
++)
4804 if (generic_bignum
[j
] != generic_bignum
[j
-1])
4808 inst
.operands
[i
].imm
= 0;
4809 for (j
= 0; j
< parts
; j
++, idx
++)
4810 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
4811 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4812 inst
.operands
[i
].reg
= 0;
4813 for (j
= 0; j
< parts
; j
++, idx
++)
4814 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
4815 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4816 inst
.operands
[i
].regisimm
= 1;
4818 else if (!(exp_p
->X_op
== O_symbol
&& allow_symbol_p
))
4826 /* Returns the pseudo-register number of an FPA immediate constant,
4827 or FAIL if there isn't a valid constant here. */
4830 parse_fpa_immediate (char ** str
)
4832 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4838 /* First try and match exact strings, this is to guarantee
4839 that some formats will work even for cross assembly. */
4841 for (i
= 0; fp_const
[i
]; i
++)
4843 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
4847 *str
+= strlen (fp_const
[i
]);
4848 if (is_end_of_line
[(unsigned char) **str
])
4854 /* Just because we didn't get a match doesn't mean that the constant
4855 isn't valid, just that it is in a format that we don't
4856 automatically recognize. Try parsing it with the standard
4857 expression routines. */
4859 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
4861 /* Look for a raw floating point number. */
4862 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
4863 && is_end_of_line
[(unsigned char) *save_in
])
4865 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4867 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4869 if (words
[j
] != fp_values
[i
][j
])
4873 if (j
== MAX_LITTLENUMS
)
4881 /* Try and parse a more complex expression, this will probably fail
4882 unless the code uses a floating point prefix (eg "0f"). */
4883 save_in
= input_line_pointer
;
4884 input_line_pointer
= *str
;
4885 if (expression (&exp
) == absolute_section
4886 && exp
.X_op
== O_big
4887 && exp
.X_add_number
< 0)
4889 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4891 #define X_PRECISION 5
4892 #define E_PRECISION 15L
4893 if (gen_to_words (words
, X_PRECISION
, E_PRECISION
) == 0)
4895 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4897 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4899 if (words
[j
] != fp_values
[i
][j
])
4903 if (j
== MAX_LITTLENUMS
)
4905 *str
= input_line_pointer
;
4906 input_line_pointer
= save_in
;
4913 *str
= input_line_pointer
;
4914 input_line_pointer
= save_in
;
4915 inst
.error
= _("invalid FPA immediate expression");
4919 /* Returns 1 if a number has "quarter-precision" float format
4920 0baBbbbbbc defgh000 00000000 00000000. */
4923 is_quarter_float (unsigned imm
)
4925 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
4926 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
4930 /* Detect the presence of a floating point or integer zero constant,
4934 parse_ifimm_zero (char **in
)
4938 if (!is_immediate_prefix (**in
))
4943 /* Accept #0x0 as a synonym for #0. */
4944 if (strncmp (*in
, "0x", 2) == 0)
4947 if (parse_immediate (in
, &val
, 0, 0, TRUE
) == FAIL
)
4952 error_code
= atof_generic (in
, ".", EXP_CHARS
,
4953 &generic_floating_point_number
);
4956 && generic_floating_point_number
.sign
== '+'
4957 && (generic_floating_point_number
.low
4958 > generic_floating_point_number
.leader
))
4964 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4965 0baBbbbbbc defgh000 00000000 00000000.
4966 The zero and minus-zero cases need special handling, since they can't be
4967 encoded in the "quarter-precision" float format, but can nonetheless be
4968 loaded as integer constants. */
4971 parse_qfloat_immediate (char **ccp
, int *immed
)
4975 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4976 int found_fpchar
= 0;
4978 skip_past_char (&str
, '#');
4980 /* We must not accidentally parse an integer as a floating-point number. Make
4981 sure that the value we parse is not an integer by checking for special
4982 characters '.' or 'e'.
4983 FIXME: This is a horrible hack, but doing better is tricky because type
4984 information isn't in a very usable state at parse time. */
4986 skip_whitespace (fpnum
);
4988 if (strncmp (fpnum
, "0x", 2) == 0)
4992 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
4993 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
5003 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
5005 unsigned fpword
= 0;
5008 /* Our FP word must be 32 bits (single-precision FP). */
5009 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
5011 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
5015 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
5028 /* Shift operands. */
5031 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
5034 struct asm_shift_name
5037 enum shift_kind kind
;
5040 /* Third argument to parse_shift. */
5041 enum parse_shift_mode
5043 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
5044 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
5045 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
5046 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
5047 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
5050 /* Parse a <shift> specifier on an ARM data processing instruction.
5051 This has three forms:
5053 (LSL|LSR|ASL|ASR|ROR) Rs
5054 (LSL|LSR|ASL|ASR|ROR) #imm
5057 Note that ASL is assimilated to LSL in the instruction encoding, and
5058 RRX to ROR #0 (which cannot be written as such). */
5061 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
5063 const struct asm_shift_name
*shift_name
;
5064 enum shift_kind shift
;
5069 for (p
= *str
; ISALPHA (*p
); p
++)
5074 inst
.error
= _("shift expression expected");
5078 shift_name
= (const struct asm_shift_name
*) hash_find_n (arm_shift_hsh
, *str
,
5081 if (shift_name
== NULL
)
5083 inst
.error
= _("shift expression expected");
5087 shift
= shift_name
->kind
;
5091 case NO_SHIFT_RESTRICT
:
5092 case SHIFT_IMMEDIATE
: break;
5094 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
5095 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
5097 inst
.error
= _("'LSL' or 'ASR' required");
5102 case SHIFT_LSL_IMMEDIATE
:
5103 if (shift
!= SHIFT_LSL
)
5105 inst
.error
= _("'LSL' required");
5110 case SHIFT_ASR_IMMEDIATE
:
5111 if (shift
!= SHIFT_ASR
)
5113 inst
.error
= _("'ASR' required");
5121 if (shift
!= SHIFT_RRX
)
5123 /* Whitespace can appear here if the next thing is a bare digit. */
5124 skip_whitespace (p
);
5126 if (mode
== NO_SHIFT_RESTRICT
5127 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5129 inst
.operands
[i
].imm
= reg
;
5130 inst
.operands
[i
].immisreg
= 1;
5132 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5135 inst
.operands
[i
].shift_kind
= shift
;
5136 inst
.operands
[i
].shifted
= 1;
5141 /* Parse a <shifter_operand> for an ARM data processing instruction:
5144 #<immediate>, <rotate>
5148 where <shift> is defined by parse_shift above, and <rotate> is a
5149 multiple of 2 between 0 and 30. Validation of immediate operands
5150 is deferred to md_apply_fix. */
5153 parse_shifter_operand (char **str
, int i
)
5158 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
5160 inst
.operands
[i
].reg
= value
;
5161 inst
.operands
[i
].isreg
= 1;
5163 /* parse_shift will override this if appropriate */
5164 inst
.reloc
.exp
.X_op
= O_constant
;
5165 inst
.reloc
.exp
.X_add_number
= 0;
5167 if (skip_past_comma (str
) == FAIL
)
5170 /* Shift operation on register. */
5171 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
5174 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
5177 if (skip_past_comma (str
) == SUCCESS
)
5179 /* #x, y -- ie explicit rotation by Y. */
5180 if (my_get_expression (&exp
, str
, GE_NO_PREFIX
))
5183 if (exp
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
5185 inst
.error
= _("constant expression expected");
5189 value
= exp
.X_add_number
;
5190 if (value
< 0 || value
> 30 || value
% 2 != 0)
5192 inst
.error
= _("invalid rotation");
5195 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
5197 inst
.error
= _("invalid constant");
5201 /* Encode as specified. */
5202 inst
.operands
[i
].imm
= inst
.reloc
.exp
.X_add_number
| value
<< 7;
5206 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
5207 inst
.reloc
.pc_rel
= 0;
5211 /* Group relocation information. Each entry in the table contains the
5212 textual name of the relocation as may appear in assembler source
5213 and must end with a colon.
5214 Along with this textual name are the relocation codes to be used if
5215 the corresponding instruction is an ALU instruction (ADD or SUB only),
5216 an LDR, an LDRS, or an LDC. */
5218 struct group_reloc_table_entry
5229 /* Varieties of non-ALU group relocation. */
5236 static struct group_reloc_table_entry group_reloc_table
[] =
5237 { /* Program counter relative: */
5239 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
5244 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
5245 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
5246 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
5247 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
5249 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
5254 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
5255 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
5256 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
5257 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
5259 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
5260 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
5261 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
5262 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
5263 /* Section base relative */
5265 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
5270 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
5271 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
5272 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
5273 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
5275 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
5280 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
5281 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
5282 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
5283 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
5285 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
5286 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
5287 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
5288 BFD_RELOC_ARM_LDC_SB_G2
}, /* LDC */
5289 /* Absolute thumb alu relocations. */
5291 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
,/* ALU. */
5296 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
,/* ALU. */
5301 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
,/* ALU. */
5306 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,/* ALU. */
5311 /* Given the address of a pointer pointing to the textual name of a group
5312 relocation as may appear in assembler source, attempt to find its details
5313 in group_reloc_table. The pointer will be updated to the character after
5314 the trailing colon. On failure, FAIL will be returned; SUCCESS
5315 otherwise. On success, *entry will be updated to point at the relevant
5316 group_reloc_table entry. */
5319 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
5322 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
5324 int length
= strlen (group_reloc_table
[i
].name
);
5326 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
5327 && (*str
)[length
] == ':')
5329 *out
= &group_reloc_table
[i
];
5330 *str
+= (length
+ 1);
5338 /* Parse a <shifter_operand> for an ARM data processing instruction
5339 (as for parse_shifter_operand) where group relocations are allowed:
5342 #<immediate>, <rotate>
5343 #:<group_reloc>:<expression>
5347 where <group_reloc> is one of the strings defined in group_reloc_table.
5348 The hashes are optional.
5350 Everything else is as for parse_shifter_operand. */
5352 static parse_operand_result
5353 parse_shifter_operand_group_reloc (char **str
, int i
)
5355 /* Determine if we have the sequence of characters #: or just :
5356 coming next. If we do, then we check for a group relocation.
5357 If we don't, punt the whole lot to parse_shifter_operand. */
5359 if (((*str
)[0] == '#' && (*str
)[1] == ':')
5360 || (*str
)[0] == ':')
5362 struct group_reloc_table_entry
*entry
;
5364 if ((*str
)[0] == '#')
5369 /* Try to parse a group relocation. Anything else is an error. */
5370 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
5372 inst
.error
= _("unknown group relocation");
5373 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5376 /* We now have the group relocation table entry corresponding to
5377 the name in the assembler source. Next, we parse the expression. */
5378 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_NO_PREFIX
))
5379 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5381 /* Record the relocation type (always the ALU variant here). */
5382 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->alu_code
;
5383 gas_assert (inst
.reloc
.type
!= 0);
5385 return PARSE_OPERAND_SUCCESS
;
5388 return parse_shifter_operand (str
, i
) == SUCCESS
5389 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
5391 /* Never reached. */
5394 /* Parse a Neon alignment expression. Information is written to
5395 inst.operands[i]. We assume the initial ':' has been skipped.
5397 align .imm = align << 8, .immisalign=1, .preind=0 */
5398 static parse_operand_result
5399 parse_neon_alignment (char **str
, int i
)
5404 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
5406 if (exp
.X_op
!= O_constant
)
5408 inst
.error
= _("alignment must be constant");
5409 return PARSE_OPERAND_FAIL
;
5412 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
5413 inst
.operands
[i
].immisalign
= 1;
5414 /* Alignments are not pre-indexes. */
5415 inst
.operands
[i
].preind
= 0;
5418 return PARSE_OPERAND_SUCCESS
;
5421 /* Parse all forms of an ARM address expression. Information is written
5422 to inst.operands[i] and/or inst.reloc.
5424 Preindexed addressing (.preind=1):
5426 [Rn, #offset] .reg=Rn .reloc.exp=offset
5427 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5428 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5429 .shift_kind=shift .reloc.exp=shift_imm
5431 These three may have a trailing ! which causes .writeback to be set also.
5433 Postindexed addressing (.postind=1, .writeback=1):
5435 [Rn], #offset .reg=Rn .reloc.exp=offset
5436 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5437 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5438 .shift_kind=shift .reloc.exp=shift_imm
5440 Unindexed addressing (.preind=0, .postind=0):
5442 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5446 [Rn]{!} shorthand for [Rn,#0]{!}
5447 =immediate .isreg=0 .reloc.exp=immediate
5448 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5450 It is the caller's responsibility to check for addressing modes not
5451 supported by the instruction, and to set inst.reloc.type. */
5453 static parse_operand_result
5454 parse_address_main (char **str
, int i
, int group_relocations
,
5455 group_reloc_type group_type
)
5460 if (skip_past_char (&p
, '[') == FAIL
)
5462 if (skip_past_char (&p
, '=') == FAIL
)
5464 /* Bare address - translate to PC-relative offset. */
5465 inst
.reloc
.pc_rel
= 1;
5466 inst
.operands
[i
].reg
= REG_PC
;
5467 inst
.operands
[i
].isreg
= 1;
5468 inst
.operands
[i
].preind
= 1;
5470 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_OPT_PREFIX_BIG
))
5471 return PARSE_OPERAND_FAIL
;
5473 else if (parse_big_immediate (&p
, i
, &inst
.reloc
.exp
,
5474 /*allow_symbol_p=*/TRUE
))
5475 return PARSE_OPERAND_FAIL
;
5478 return PARSE_OPERAND_SUCCESS
;
5481 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5482 skip_whitespace (p
);
5484 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5486 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5487 return PARSE_OPERAND_FAIL
;
5489 inst
.operands
[i
].reg
= reg
;
5490 inst
.operands
[i
].isreg
= 1;
5492 if (skip_past_comma (&p
) == SUCCESS
)
5494 inst
.operands
[i
].preind
= 1;
5497 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5499 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5501 inst
.operands
[i
].imm
= reg
;
5502 inst
.operands
[i
].immisreg
= 1;
5504 if (skip_past_comma (&p
) == SUCCESS
)
5505 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5506 return PARSE_OPERAND_FAIL
;
5508 else if (skip_past_char (&p
, ':') == SUCCESS
)
5510 /* FIXME: '@' should be used here, but it's filtered out by generic
5511 code before we get to see it here. This may be subject to
5513 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5515 if (result
!= PARSE_OPERAND_SUCCESS
)
5520 if (inst
.operands
[i
].negative
)
5522 inst
.operands
[i
].negative
= 0;
5526 if (group_relocations
5527 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
5529 struct group_reloc_table_entry
*entry
;
5531 /* Skip over the #: or : sequence. */
5537 /* Try to parse a group relocation. Anything else is an
5539 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
5541 inst
.error
= _("unknown group relocation");
5542 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5545 /* We now have the group relocation table entry corresponding to
5546 the name in the assembler source. Next, we parse the
5548 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5549 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5551 /* Record the relocation type. */
5555 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldr_code
;
5559 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldrs_code
;
5563 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldc_code
;
5570 if (inst
.reloc
.type
== 0)
5572 inst
.error
= _("this group relocation is not allowed on this instruction");
5573 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5579 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5580 return PARSE_OPERAND_FAIL
;
5581 /* If the offset is 0, find out if it's a +0 or -0. */
5582 if (inst
.reloc
.exp
.X_op
== O_constant
5583 && inst
.reloc
.exp
.X_add_number
== 0)
5585 skip_whitespace (q
);
5589 skip_whitespace (q
);
5592 inst
.operands
[i
].negative
= 1;
5597 else if (skip_past_char (&p
, ':') == SUCCESS
)
5599 /* FIXME: '@' should be used here, but it's filtered out by generic code
5600 before we get to see it here. This may be subject to change. */
5601 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5603 if (result
!= PARSE_OPERAND_SUCCESS
)
5607 if (skip_past_char (&p
, ']') == FAIL
)
5609 inst
.error
= _("']' expected");
5610 return PARSE_OPERAND_FAIL
;
5613 if (skip_past_char (&p
, '!') == SUCCESS
)
5614 inst
.operands
[i
].writeback
= 1;
5616 else if (skip_past_comma (&p
) == SUCCESS
)
5618 if (skip_past_char (&p
, '{') == SUCCESS
)
5620 /* [Rn], {expr} - unindexed, with option */
5621 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
5622 0, 255, TRUE
) == FAIL
)
5623 return PARSE_OPERAND_FAIL
;
5625 if (skip_past_char (&p
, '}') == FAIL
)
5627 inst
.error
= _("'}' expected at end of 'option' field");
5628 return PARSE_OPERAND_FAIL
;
5630 if (inst
.operands
[i
].preind
)
5632 inst
.error
= _("cannot combine index with option");
5633 return PARSE_OPERAND_FAIL
;
5636 return PARSE_OPERAND_SUCCESS
;
5640 inst
.operands
[i
].postind
= 1;
5641 inst
.operands
[i
].writeback
= 1;
5643 if (inst
.operands
[i
].preind
)
5645 inst
.error
= _("cannot combine pre- and post-indexing");
5646 return PARSE_OPERAND_FAIL
;
5650 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5652 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5654 /* We might be using the immediate for alignment already. If we
5655 are, OR the register number into the low-order bits. */
5656 if (inst
.operands
[i
].immisalign
)
5657 inst
.operands
[i
].imm
|= reg
;
5659 inst
.operands
[i
].imm
= reg
;
5660 inst
.operands
[i
].immisreg
= 1;
5662 if (skip_past_comma (&p
) == SUCCESS
)
5663 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5664 return PARSE_OPERAND_FAIL
;
5669 if (inst
.operands
[i
].negative
)
5671 inst
.operands
[i
].negative
= 0;
5674 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5675 return PARSE_OPERAND_FAIL
;
5676 /* If the offset is 0, find out if it's a +0 or -0. */
5677 if (inst
.reloc
.exp
.X_op
== O_constant
5678 && inst
.reloc
.exp
.X_add_number
== 0)
5680 skip_whitespace (q
);
5684 skip_whitespace (q
);
5687 inst
.operands
[i
].negative
= 1;
5693 /* If at this point neither .preind nor .postind is set, we have a
5694 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5695 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
5697 inst
.operands
[i
].preind
= 1;
5698 inst
.reloc
.exp
.X_op
= O_constant
;
5699 inst
.reloc
.exp
.X_add_number
= 0;
5702 return PARSE_OPERAND_SUCCESS
;
5706 parse_address (char **str
, int i
)
5708 return parse_address_main (str
, i
, 0, GROUP_LDR
) == PARSE_OPERAND_SUCCESS
5712 static parse_operand_result
5713 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
5715 return parse_address_main (str
, i
, 1, type
);
5718 /* Parse an operand for a MOVW or MOVT instruction. */
5720 parse_half (char **str
)
5725 skip_past_char (&p
, '#');
5726 if (strncasecmp (p
, ":lower16:", 9) == 0)
5727 inst
.reloc
.type
= BFD_RELOC_ARM_MOVW
;
5728 else if (strncasecmp (p
, ":upper16:", 9) == 0)
5729 inst
.reloc
.type
= BFD_RELOC_ARM_MOVT
;
5731 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
5734 skip_whitespace (p
);
5737 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5740 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5742 if (inst
.reloc
.exp
.X_op
!= O_constant
)
5744 inst
.error
= _("constant expression expected");
5747 if (inst
.reloc
.exp
.X_add_number
< 0
5748 || inst
.reloc
.exp
.X_add_number
> 0xffff)
5750 inst
.error
= _("immediate value out of range");
5758 /* Miscellaneous. */
5760 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5761 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5763 parse_psr (char **str
, bfd_boolean lhs
)
5766 unsigned long psr_field
;
5767 const struct asm_psr
*psr
;
5769 bfd_boolean is_apsr
= FALSE
;
5770 bfd_boolean m_profile
= ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
);
5772 /* PR gas/12698: If the user has specified -march=all then m_profile will
5773 be TRUE, but we want to ignore it in this case as we are building for any
5774 CPU type, including non-m variants. */
5775 if (ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
5778 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5779 feature for ease of use and backwards compatibility. */
5781 if (strncasecmp (p
, "SPSR", 4) == 0)
5784 goto unsupported_psr
;
5786 psr_field
= SPSR_BIT
;
5788 else if (strncasecmp (p
, "CPSR", 4) == 0)
5791 goto unsupported_psr
;
5795 else if (strncasecmp (p
, "APSR", 4) == 0)
5797 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5798 and ARMv7-R architecture CPUs. */
5807 while (ISALNUM (*p
) || *p
== '_');
5809 if (strncasecmp (start
, "iapsr", 5) == 0
5810 || strncasecmp (start
, "eapsr", 5) == 0
5811 || strncasecmp (start
, "xpsr", 4) == 0
5812 || strncasecmp (start
, "psr", 3) == 0)
5813 p
= start
+ strcspn (start
, "rR") + 1;
5815 psr
= (const struct asm_psr
*) hash_find_n (arm_v7m_psr_hsh
, start
,
5821 /* If APSR is being written, a bitfield may be specified. Note that
5822 APSR itself is handled above. */
5823 if (psr
->field
<= 3)
5825 psr_field
= psr
->field
;
5831 /* M-profile MSR instructions have the mask field set to "10", except
5832 *PSR variants which modify APSR, which may use a different mask (and
5833 have been handled already). Do that by setting the PSR_f field
5835 return psr
->field
| (lhs
? PSR_f
: 0);
5838 goto unsupported_psr
;
5844 /* A suffix follows. */
5850 while (ISALNUM (*p
) || *p
== '_');
5854 /* APSR uses a notation for bits, rather than fields. */
5855 unsigned int nzcvq_bits
= 0;
5856 unsigned int g_bit
= 0;
5859 for (bit
= start
; bit
!= p
; bit
++)
5861 switch (TOLOWER (*bit
))
5864 nzcvq_bits
|= (nzcvq_bits
& 0x01) ? 0x20 : 0x01;
5868 nzcvq_bits
|= (nzcvq_bits
& 0x02) ? 0x20 : 0x02;
5872 nzcvq_bits
|= (nzcvq_bits
& 0x04) ? 0x20 : 0x04;
5876 nzcvq_bits
|= (nzcvq_bits
& 0x08) ? 0x20 : 0x08;
5880 nzcvq_bits
|= (nzcvq_bits
& 0x10) ? 0x20 : 0x10;
5884 g_bit
|= (g_bit
& 0x1) ? 0x2 : 0x1;
5888 inst
.error
= _("unexpected bit specified after APSR");
5893 if (nzcvq_bits
== 0x1f)
5898 if (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
))
5900 inst
.error
= _("selected processor does not "
5901 "support DSP extension");
5908 if ((nzcvq_bits
& 0x20) != 0
5909 || (nzcvq_bits
!= 0x1f && nzcvq_bits
!= 0)
5910 || (g_bit
& 0x2) != 0)
5912 inst
.error
= _("bad bitmask specified after APSR");
5918 psr
= (const struct asm_psr
*) hash_find_n (arm_psr_hsh
, start
,
5923 psr_field
|= psr
->field
;
5929 goto error
; /* Garbage after "[CS]PSR". */
5931 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5932 is deprecated, but allow it anyway. */
5936 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5939 else if (!m_profile
)
5940 /* These bits are never right for M-profile devices: don't set them
5941 (only code paths which read/write APSR reach here). */
5942 psr_field
|= (PSR_c
| PSR_f
);
5948 inst
.error
= _("selected processor does not support requested special "
5949 "purpose register");
5953 inst
.error
= _("flag for {c}psr instruction expected");
5957 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5958 value suitable for splatting into the AIF field of the instruction. */
5961 parse_cps_flags (char **str
)
5970 case '\0': case ',':
5973 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
5974 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
5975 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
5978 inst
.error
= _("unrecognized CPS flag");
5983 if (saw_a_flag
== 0)
5985 inst
.error
= _("missing CPS flags");
5993 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5994 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5997 parse_endian_specifier (char **str
)
6002 if (strncasecmp (s
, "BE", 2))
6004 else if (strncasecmp (s
, "LE", 2))
6008 inst
.error
= _("valid endian specifiers are be or le");
6012 if (ISALNUM (s
[2]) || s
[2] == '_')
6014 inst
.error
= _("valid endian specifiers are be or le");
6019 return little_endian
;
6022 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6023 value suitable for poking into the rotate field of an sxt or sxta
6024 instruction, or FAIL on error. */
6027 parse_ror (char **str
)
6032 if (strncasecmp (s
, "ROR", 3) == 0)
6036 inst
.error
= _("missing rotation field after comma");
6040 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
6045 case 0: *str
= s
; return 0x0;
6046 case 8: *str
= s
; return 0x1;
6047 case 16: *str
= s
; return 0x2;
6048 case 24: *str
= s
; return 0x3;
6051 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
6056 /* Parse a conditional code (from conds[] below). The value returned is in the
6057 range 0 .. 14, or FAIL. */
6059 parse_cond (char **str
)
6062 const struct asm_cond
*c
;
6064 /* Condition codes are always 2 characters, so matching up to
6065 3 characters is sufficient. */
6070 while (ISALPHA (*q
) && n
< 3)
6072 cond
[n
] = TOLOWER (*q
);
6077 c
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, cond
, n
);
6080 inst
.error
= _("condition required");
6088 /* If the given feature available in the selected CPU, mark it as used.
6089 Returns TRUE iff feature is available. */
6091 mark_feature_used (const arm_feature_set
*feature
)
6093 /* Ensure the option is valid on the current architecture. */
6094 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
6097 /* Add the appropriate architecture feature for the barrier option used.
6100 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, *feature
);
6102 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, *feature
);
6107 /* Parse an option for a barrier instruction. Returns the encoding for the
6110 parse_barrier (char **str
)
6113 const struct asm_barrier_opt
*o
;
6116 while (ISALPHA (*q
))
6119 o
= (const struct asm_barrier_opt
*) hash_find_n (arm_barrier_opt_hsh
, p
,
6124 if (!mark_feature_used (&o
->arch
))
6131 /* Parse the operands of a table branch instruction. Similar to a memory
6134 parse_tb (char **str
)
6139 if (skip_past_char (&p
, '[') == FAIL
)
6141 inst
.error
= _("'[' expected");
6145 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6147 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6150 inst
.operands
[0].reg
= reg
;
6152 if (skip_past_comma (&p
) == FAIL
)
6154 inst
.error
= _("',' expected");
6158 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6160 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6163 inst
.operands
[0].imm
= reg
;
6165 if (skip_past_comma (&p
) == SUCCESS
)
6167 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
6169 if (inst
.reloc
.exp
.X_add_number
!= 1)
6171 inst
.error
= _("invalid shift");
6174 inst
.operands
[0].shifted
= 1;
6177 if (skip_past_char (&p
, ']') == FAIL
)
6179 inst
.error
= _("']' expected");
6186 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6187 information on the types the operands can take and how they are encoded.
6188 Up to four operands may be read; this function handles setting the
6189 ".present" field for each read operand itself.
6190 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6191 else returns FAIL. */
6194 parse_neon_mov (char **str
, int *which_operand
)
6196 int i
= *which_operand
, val
;
6197 enum arm_reg_type rtype
;
6199 struct neon_type_el optype
;
6201 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6203 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6204 inst
.operands
[i
].reg
= val
;
6205 inst
.operands
[i
].isscalar
= 1;
6206 inst
.operands
[i
].vectype
= optype
;
6207 inst
.operands
[i
++].present
= 1;
6209 if (skip_past_comma (&ptr
) == FAIL
)
6212 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6215 inst
.operands
[i
].reg
= val
;
6216 inst
.operands
[i
].isreg
= 1;
6217 inst
.operands
[i
].present
= 1;
6219 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
6222 /* Cases 0, 1, 2, 3, 5 (D only). */
6223 if (skip_past_comma (&ptr
) == FAIL
)
6226 inst
.operands
[i
].reg
= val
;
6227 inst
.operands
[i
].isreg
= 1;
6228 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6229 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6230 inst
.operands
[i
].isvec
= 1;
6231 inst
.operands
[i
].vectype
= optype
;
6232 inst
.operands
[i
++].present
= 1;
6234 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6236 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6237 Case 13: VMOV <Sd>, <Rm> */
6238 inst
.operands
[i
].reg
= val
;
6239 inst
.operands
[i
].isreg
= 1;
6240 inst
.operands
[i
].present
= 1;
6242 if (rtype
== REG_TYPE_NQ
)
6244 first_error (_("can't use Neon quad register here"));
6247 else if (rtype
!= REG_TYPE_VFS
)
6250 if (skip_past_comma (&ptr
) == FAIL
)
6252 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6254 inst
.operands
[i
].reg
= val
;
6255 inst
.operands
[i
].isreg
= 1;
6256 inst
.operands
[i
].present
= 1;
6259 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
6262 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6263 Case 1: VMOV<c><q> <Dd>, <Dm>
6264 Case 8: VMOV.F32 <Sd>, <Sm>
6265 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6267 inst
.operands
[i
].reg
= val
;
6268 inst
.operands
[i
].isreg
= 1;
6269 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6270 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6271 inst
.operands
[i
].isvec
= 1;
6272 inst
.operands
[i
].vectype
= optype
;
6273 inst
.operands
[i
].present
= 1;
6275 if (skip_past_comma (&ptr
) == SUCCESS
)
6280 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6283 inst
.operands
[i
].reg
= val
;
6284 inst
.operands
[i
].isreg
= 1;
6285 inst
.operands
[i
++].present
= 1;
6287 if (skip_past_comma (&ptr
) == FAIL
)
6290 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6293 inst
.operands
[i
].reg
= val
;
6294 inst
.operands
[i
].isreg
= 1;
6295 inst
.operands
[i
].present
= 1;
6298 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
6299 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6300 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6301 Case 10: VMOV.F32 <Sd>, #<imm>
6302 Case 11: VMOV.F64 <Dd>, #<imm> */
6303 inst
.operands
[i
].immisfloat
= 1;
6304 else if (parse_big_immediate (&ptr
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6306 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6307 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6311 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6315 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6318 inst
.operands
[i
].reg
= val
;
6319 inst
.operands
[i
].isreg
= 1;
6320 inst
.operands
[i
++].present
= 1;
6322 if (skip_past_comma (&ptr
) == FAIL
)
6325 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6327 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6328 inst
.operands
[i
].reg
= val
;
6329 inst
.operands
[i
].isscalar
= 1;
6330 inst
.operands
[i
].present
= 1;
6331 inst
.operands
[i
].vectype
= optype
;
6333 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6335 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6336 inst
.operands
[i
].reg
= val
;
6337 inst
.operands
[i
].isreg
= 1;
6338 inst
.operands
[i
++].present
= 1;
6340 if (skip_past_comma (&ptr
) == FAIL
)
6343 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
6346 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
6350 inst
.operands
[i
].reg
= val
;
6351 inst
.operands
[i
].isreg
= 1;
6352 inst
.operands
[i
].isvec
= 1;
6353 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6354 inst
.operands
[i
].vectype
= optype
;
6355 inst
.operands
[i
].present
= 1;
6357 if (rtype
== REG_TYPE_VFS
)
6361 if (skip_past_comma (&ptr
) == FAIL
)
6363 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
6366 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
6369 inst
.operands
[i
].reg
= val
;
6370 inst
.operands
[i
].isreg
= 1;
6371 inst
.operands
[i
].isvec
= 1;
6372 inst
.operands
[i
].issingle
= 1;
6373 inst
.operands
[i
].vectype
= optype
;
6374 inst
.operands
[i
].present
= 1;
6377 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
6381 inst
.operands
[i
].reg
= val
;
6382 inst
.operands
[i
].isreg
= 1;
6383 inst
.operands
[i
].isvec
= 1;
6384 inst
.operands
[i
].issingle
= 1;
6385 inst
.operands
[i
].vectype
= optype
;
6386 inst
.operands
[i
].present
= 1;
6391 first_error (_("parse error"));
6395 /* Successfully parsed the operands. Update args. */
6401 first_error (_("expected comma"));
6405 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
6409 /* Use this macro when the operand constraints are different
6410 for ARM and THUMB (e.g. ldrd). */
6411 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6412 ((arm_operand) | ((thumb_operand) << 16))
6414 /* Matcher codes for parse_operands. */
6415 enum operand_parse_code
6417 OP_stop
, /* end of line */
6419 OP_RR
, /* ARM register */
6420 OP_RRnpc
, /* ARM register, not r15 */
6421 OP_RRnpcsp
, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6422 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
6423 OP_RRnpctw
, /* ARM register, not r15 in Thumb-state or with writeback,
6424 optional trailing ! */
6425 OP_RRw
, /* ARM register, not r15, optional trailing ! */
6426 OP_RCP
, /* Coprocessor number */
6427 OP_RCN
, /* Coprocessor register */
6428 OP_RF
, /* FPA register */
6429 OP_RVS
, /* VFP single precision register */
6430 OP_RVD
, /* VFP double precision register (0..15) */
6431 OP_RND
, /* Neon double precision register (0..31) */
6432 OP_RNQ
, /* Neon quad precision register */
6433 OP_RVSD
, /* VFP single or double precision register */
6434 OP_RNDQ
, /* Neon double or quad precision register */
6435 OP_RNSDQ
, /* Neon single, double or quad precision register */
6436 OP_RNSC
, /* Neon scalar D[X] */
6437 OP_RVC
, /* VFP control register */
6438 OP_RMF
, /* Maverick F register */
6439 OP_RMD
, /* Maverick D register */
6440 OP_RMFX
, /* Maverick FX register */
6441 OP_RMDX
, /* Maverick DX register */
6442 OP_RMAX
, /* Maverick AX register */
6443 OP_RMDS
, /* Maverick DSPSC register */
6444 OP_RIWR
, /* iWMMXt wR register */
6445 OP_RIWC
, /* iWMMXt wC register */
6446 OP_RIWG
, /* iWMMXt wCG register */
6447 OP_RXA
, /* XScale accumulator register */
6449 OP_REGLST
, /* ARM register list */
6450 OP_VRSLST
, /* VFP single-precision register list */
6451 OP_VRDLST
, /* VFP double-precision register list */
6452 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
6453 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
6454 OP_NSTRLST
, /* Neon element/structure list */
6456 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
6457 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
6458 OP_RSVD_FI0
, /* VFP S or D reg, or floating point immediate zero. */
6459 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
6460 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
6461 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
6462 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
6463 OP_VMOV
, /* Neon VMOV operands. */
6464 OP_RNDQ_Ibig
, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6465 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
6466 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6468 OP_I0
, /* immediate zero */
6469 OP_I7
, /* immediate value 0 .. 7 */
6470 OP_I15
, /* 0 .. 15 */
6471 OP_I16
, /* 1 .. 16 */
6472 OP_I16z
, /* 0 .. 16 */
6473 OP_I31
, /* 0 .. 31 */
6474 OP_I31w
, /* 0 .. 31, optional trailing ! */
6475 OP_I32
, /* 1 .. 32 */
6476 OP_I32z
, /* 0 .. 32 */
6477 OP_I63
, /* 0 .. 63 */
6478 OP_I63s
, /* -64 .. 63 */
6479 OP_I64
, /* 1 .. 64 */
6480 OP_I64z
, /* 0 .. 64 */
6481 OP_I255
, /* 0 .. 255 */
6483 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
6484 OP_I7b
, /* 0 .. 7 */
6485 OP_I15b
, /* 0 .. 15 */
6486 OP_I31b
, /* 0 .. 31 */
6488 OP_SH
, /* shifter operand */
6489 OP_SHG
, /* shifter operand with possible group relocation */
6490 OP_ADDR
, /* Memory address expression (any mode) */
6491 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
6492 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
6493 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
6494 OP_EXP
, /* arbitrary expression */
6495 OP_EXPi
, /* same, with optional immediate prefix */
6496 OP_EXPr
, /* same, with optional relocation suffix */
6497 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
6499 OP_CPSF
, /* CPS flags */
6500 OP_ENDI
, /* Endianness specifier */
6501 OP_wPSR
, /* CPSR/SPSR/APSR mask for msr (writing). */
6502 OP_rPSR
, /* CPSR/SPSR/APSR mask for msr (reading). */
6503 OP_COND
, /* conditional code */
6504 OP_TB
, /* Table branch. */
6506 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
6508 OP_RRnpc_I0
, /* ARM register or literal 0 */
6509 OP_RR_EXr
, /* ARM register or expression with opt. reloc suff. */
6510 OP_RR_EXi
, /* ARM register or expression with imm prefix */
6511 OP_RF_IF
, /* FPA register or immediate */
6512 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
6513 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
6515 /* Optional operands. */
6516 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
6517 OP_oI31b
, /* 0 .. 31 */
6518 OP_oI32b
, /* 1 .. 32 */
6519 OP_oI32z
, /* 0 .. 32 */
6520 OP_oIffffb
, /* 0 .. 65535 */
6521 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
6523 OP_oRR
, /* ARM register */
6524 OP_oRRnpc
, /* ARM register, not the PC */
6525 OP_oRRnpcsp
, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6526 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
6527 OP_oRND
, /* Optional Neon double precision register */
6528 OP_oRNQ
, /* Optional Neon quad precision register */
6529 OP_oRNDQ
, /* Optional Neon double or quad precision register */
6530 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
6531 OP_oSHll
, /* LSL immediate */
6532 OP_oSHar
, /* ASR immediate */
6533 OP_oSHllar
, /* LSL or ASR immediate */
6534 OP_oROR
, /* ROR 0/8/16/24 */
6535 OP_oBARRIER_I15
, /* Option argument for a barrier instruction. */
6537 /* Some pre-defined mixed (ARM/THUMB) operands. */
6538 OP_RR_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RR
, OP_RRnpcsp
),
6539 OP_RRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RRnpc
, OP_RRnpcsp
),
6540 OP_oRRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc
, OP_oRRnpcsp
),
6542 OP_FIRST_OPTIONAL
= OP_oI7b
6545 /* Generic instruction operand parser. This does no encoding and no
6546 semantic validation; it merely squirrels values away in the inst
6547 structure. Returns SUCCESS or FAIL depending on whether the
6548 specified grammar matched. */
6550 parse_operands (char *str
, const unsigned int *pattern
, bfd_boolean thumb
)
6552 unsigned const int *upat
= pattern
;
6553 char *backtrack_pos
= 0;
6554 const char *backtrack_error
= 0;
6555 int i
, val
= 0, backtrack_index
= 0;
6556 enum arm_reg_type rtype
;
6557 parse_operand_result result
;
6558 unsigned int op_parse_code
;
6560 #define po_char_or_fail(chr) \
6563 if (skip_past_char (&str, chr) == FAIL) \
6568 #define po_reg_or_fail(regtype) \
6571 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6572 & inst.operands[i].vectype); \
6575 first_error (_(reg_expected_msgs[regtype])); \
6578 inst.operands[i].reg = val; \
6579 inst.operands[i].isreg = 1; \
6580 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6581 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6582 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6583 || rtype == REG_TYPE_VFD \
6584 || rtype == REG_TYPE_NQ); \
6588 #define po_reg_or_goto(regtype, label) \
6591 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6592 & inst.operands[i].vectype); \
6596 inst.operands[i].reg = val; \
6597 inst.operands[i].isreg = 1; \
6598 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6599 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6600 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6601 || rtype == REG_TYPE_VFD \
6602 || rtype == REG_TYPE_NQ); \
6606 #define po_imm_or_fail(min, max, popt) \
6609 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6611 inst.operands[i].imm = val; \
6615 #define po_scalar_or_goto(elsz, label) \
6618 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6621 inst.operands[i].reg = val; \
6622 inst.operands[i].isscalar = 1; \
6626 #define po_misc_or_fail(expr) \
6634 #define po_misc_or_fail_no_backtrack(expr) \
6638 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6639 backtrack_pos = 0; \
6640 if (result != PARSE_OPERAND_SUCCESS) \
6645 #define po_barrier_or_imm(str) \
6648 val = parse_barrier (&str); \
6649 if (val == FAIL && ! ISALPHA (*str)) \
6652 /* ISB can only take SY as an option. */ \
6653 || ((inst.instruction & 0xf0) == 0x60 \
6656 inst.error = _("invalid barrier type"); \
6657 backtrack_pos = 0; \
6663 skip_whitespace (str
);
6665 for (i
= 0; upat
[i
] != OP_stop
; i
++)
6667 op_parse_code
= upat
[i
];
6668 if (op_parse_code
>= 1<<16)
6669 op_parse_code
= thumb
? (op_parse_code
>> 16)
6670 : (op_parse_code
& ((1<<16)-1));
6672 if (op_parse_code
>= OP_FIRST_OPTIONAL
)
6674 /* Remember where we are in case we need to backtrack. */
6675 gas_assert (!backtrack_pos
);
6676 backtrack_pos
= str
;
6677 backtrack_error
= inst
.error
;
6678 backtrack_index
= i
;
6681 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
6682 po_char_or_fail (',');
6684 switch (op_parse_code
)
6692 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
6693 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
6694 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
6695 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
6696 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
6697 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
6699 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
6701 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
6703 /* Also accept generic coprocessor regs for unknown registers. */
6705 po_reg_or_fail (REG_TYPE_CN
);
6707 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
6708 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
6709 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
6710 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
6711 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
6712 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
6713 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
6714 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
6715 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
6716 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
6718 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
6720 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
6721 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
6723 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
6725 /* Neon scalar. Using an element size of 8 means that some invalid
6726 scalars are accepted here, so deal with those in later code. */
6727 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
6731 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
6734 po_imm_or_fail (0, 0, TRUE
);
6739 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
6744 po_reg_or_goto (REG_TYPE_VFSD
, try_ifimm0
);
6747 if (parse_ifimm_zero (&str
))
6748 inst
.operands
[i
].imm
= 0;
6752 = _("only floating point zero is allowed as immediate value");
6760 po_scalar_or_goto (8, try_rr
);
6763 po_reg_or_fail (REG_TYPE_RN
);
6769 po_scalar_or_goto (8, try_nsdq
);
6772 po_reg_or_fail (REG_TYPE_NSDQ
);
6778 po_scalar_or_goto (8, try_ndq
);
6781 po_reg_or_fail (REG_TYPE_NDQ
);
6787 po_scalar_or_goto (8, try_vfd
);
6790 po_reg_or_fail (REG_TYPE_VFD
);
6795 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6796 not careful then bad things might happen. */
6797 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
6802 po_reg_or_goto (REG_TYPE_NDQ
, try_immbig
);
6805 /* There's a possibility of getting a 64-bit immediate here, so
6806 we need special handling. */
6807 if (parse_big_immediate (&str
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6810 inst
.error
= _("immediate value is out of range");
6818 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
6821 po_imm_or_fail (0, 63, TRUE
);
6826 po_char_or_fail ('[');
6827 po_reg_or_fail (REG_TYPE_RN
);
6828 po_char_or_fail (']');
6834 po_reg_or_fail (REG_TYPE_RN
);
6835 if (skip_past_char (&str
, '!') == SUCCESS
)
6836 inst
.operands
[i
].writeback
= 1;
6840 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
6841 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
6842 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
6843 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
6844 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
6845 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
6846 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
6847 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
6848 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
6849 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
6850 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
6851 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
6853 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
6855 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
6856 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
6858 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
6859 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
6860 case OP_oI32z
: po_imm_or_fail ( 0, 32, TRUE
); break;
6861 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
6863 /* Immediate variants */
6865 po_char_or_fail ('{');
6866 po_imm_or_fail (0, 255, TRUE
);
6867 po_char_or_fail ('}');
6871 /* The expression parser chokes on a trailing !, so we have
6872 to find it first and zap it. */
6875 while (*s
&& *s
!= ',')
6880 inst
.operands
[i
].writeback
= 1;
6882 po_imm_or_fail (0, 31, TRUE
);
6890 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6895 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6900 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6902 if (inst
.reloc
.exp
.X_op
== O_symbol
)
6904 val
= parse_reloc (&str
);
6907 inst
.error
= _("unrecognized relocation suffix");
6910 else if (val
!= BFD_RELOC_UNUSED
)
6912 inst
.operands
[i
].imm
= val
;
6913 inst
.operands
[i
].hasreloc
= 1;
6918 /* Operand for MOVW or MOVT. */
6920 po_misc_or_fail (parse_half (&str
));
6923 /* Register or expression. */
6924 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
6925 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
6927 /* Register or immediate. */
6928 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
6929 I0
: po_imm_or_fail (0, 0, FALSE
); break;
6931 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
6933 if (!is_immediate_prefix (*str
))
6936 val
= parse_fpa_immediate (&str
);
6939 /* FPA immediates are encoded as registers 8-15.
6940 parse_fpa_immediate has already applied the offset. */
6941 inst
.operands
[i
].reg
= val
;
6942 inst
.operands
[i
].isreg
= 1;
6945 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
6946 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
6948 /* Two kinds of register. */
6951 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
6953 || (rege
->type
!= REG_TYPE_MMXWR
6954 && rege
->type
!= REG_TYPE_MMXWC
6955 && rege
->type
!= REG_TYPE_MMXWCG
))
6957 inst
.error
= _("iWMMXt data or control register expected");
6960 inst
.operands
[i
].reg
= rege
->number
;
6961 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
6967 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
6969 || (rege
->type
!= REG_TYPE_MMXWC
6970 && rege
->type
!= REG_TYPE_MMXWCG
))
6972 inst
.error
= _("iWMMXt control register expected");
6975 inst
.operands
[i
].reg
= rege
->number
;
6976 inst
.operands
[i
].isreg
= 1;
6981 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
6982 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
6983 case OP_oROR
: val
= parse_ror (&str
); break;
6984 case OP_COND
: val
= parse_cond (&str
); break;
6985 case OP_oBARRIER_I15
:
6986 po_barrier_or_imm (str
); break;
6988 if (parse_immediate (&str
, &val
, 0, 15, TRUE
) == FAIL
)
6994 po_reg_or_goto (REG_TYPE_RNB
, try_psr
);
6995 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_virt
))
6997 inst
.error
= _("Banked registers are not available with this "
7003 val
= parse_psr (&str
, op_parse_code
== OP_wPSR
);
7007 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
7010 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7012 if (strncasecmp (str
, "APSR_", 5) == 0)
7019 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
7020 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
7021 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
7022 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
7023 default: found
= 16;
7027 inst
.operands
[i
].isvec
= 1;
7028 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7029 inst
.operands
[i
].reg
= REG_PC
;
7036 po_misc_or_fail (parse_tb (&str
));
7039 /* Register lists. */
7041 val
= parse_reg_list (&str
);
7044 inst
.operands
[i
].writeback
= 1;
7050 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
7054 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
7058 /* Allow Q registers too. */
7059 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7064 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7066 inst
.operands
[i
].issingle
= 1;
7071 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7076 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
7077 &inst
.operands
[i
].vectype
);
7080 /* Addressing modes */
7082 po_misc_or_fail (parse_address (&str
, i
));
7086 po_misc_or_fail_no_backtrack (
7087 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
7091 po_misc_or_fail_no_backtrack (
7092 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
7096 po_misc_or_fail_no_backtrack (
7097 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
7101 po_misc_or_fail (parse_shifter_operand (&str
, i
));
7105 po_misc_or_fail_no_backtrack (
7106 parse_shifter_operand_group_reloc (&str
, i
));
7110 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
7114 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
7118 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
7122 as_fatal (_("unhandled operand code %d"), op_parse_code
);
7125 /* Various value-based sanity checks and shared operations. We
7126 do not signal immediate failures for the register constraints;
7127 this allows a syntax error to take precedence. */
7128 switch (op_parse_code
)
7136 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
7137 inst
.error
= BAD_PC
;
7142 if (inst
.operands
[i
].isreg
)
7144 if (inst
.operands
[i
].reg
== REG_PC
)
7145 inst
.error
= BAD_PC
;
7146 else if (inst
.operands
[i
].reg
== REG_SP
)
7147 inst
.error
= BAD_SP
;
7152 if (inst
.operands
[i
].isreg
7153 && inst
.operands
[i
].reg
== REG_PC
7154 && (inst
.operands
[i
].writeback
|| thumb
))
7155 inst
.error
= BAD_PC
;
7164 case OP_oBARRIER_I15
:
7173 inst
.operands
[i
].imm
= val
;
7180 /* If we get here, this operand was successfully parsed. */
7181 inst
.operands
[i
].present
= 1;
7185 inst
.error
= BAD_ARGS
;
7190 /* The parse routine should already have set inst.error, but set a
7191 default here just in case. */
7193 inst
.error
= _("syntax error");
7197 /* Do not backtrack over a trailing optional argument that
7198 absorbed some text. We will only fail again, with the
7199 'garbage following instruction' error message, which is
7200 probably less helpful than the current one. */
7201 if (backtrack_index
== i
&& backtrack_pos
!= str
7202 && upat
[i
+1] == OP_stop
)
7205 inst
.error
= _("syntax error");
7209 /* Try again, skipping the optional argument at backtrack_pos. */
7210 str
= backtrack_pos
;
7211 inst
.error
= backtrack_error
;
7212 inst
.operands
[backtrack_index
].present
= 0;
7213 i
= backtrack_index
;
7217 /* Check that we have parsed all the arguments. */
7218 if (*str
!= '\0' && !inst
.error
)
7219 inst
.error
= _("garbage following instruction");
7221 return inst
.error
? FAIL
: SUCCESS
;
7224 #undef po_char_or_fail
7225 #undef po_reg_or_fail
7226 #undef po_reg_or_goto
7227 #undef po_imm_or_fail
7228 #undef po_scalar_or_fail
7229 #undef po_barrier_or_imm
7231 /* Shorthand macro for instruction encoding functions issuing errors. */
7232 #define constraint(expr, err) \
7243 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7244 instructions are unpredictable if these registers are used. This
7245 is the BadReg predicate in ARM's Thumb-2 documentation. */
7246 #define reject_bad_reg(reg) \
7248 if (reg == REG_SP || reg == REG_PC) \
7250 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
7255 /* If REG is R13 (the stack pointer), warn that its use is
7257 #define warn_deprecated_sp(reg) \
7259 if (warn_on_deprecated && reg == REG_SP) \
7260 as_tsktsk (_("use of r13 is deprecated")); \
7263 /* Functions for operand encoding. ARM, then Thumb. */
7265 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7267 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7269 The only binary encoding difference is the Coprocessor number. Coprocessor
7270 9 is used for half-precision calculations or conversions. The format of the
7271 instruction is the same as the equivalent Coprocessor 10 instuction that
7272 exists for Single-Precision operation. */
7275 do_scalar_fp16_v82_encode (void)
7277 if (inst
.cond
!= COND_ALWAYS
)
7278 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7279 " the behaviour is UNPREDICTABLE"));
7280 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
7283 inst
.instruction
= (inst
.instruction
& 0xfffff0ff) | 0x900;
7284 mark_feature_used (&arm_ext_fp16
);
7287 /* If VAL can be encoded in the immediate field of an ARM instruction,
7288 return the encoded form. Otherwise, return FAIL. */
7291 encode_arm_immediate (unsigned int val
)
7298 for (i
= 2; i
< 32; i
+= 2)
7299 if ((a
= rotate_left (val
, i
)) <= 0xff)
7300 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
7305 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7306 return the encoded form. Otherwise, return FAIL. */
7308 encode_thumb32_immediate (unsigned int val
)
7315 for (i
= 1; i
<= 24; i
++)
7318 if ((val
& ~(0xff << i
)) == 0)
7319 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
7323 if (val
== ((a
<< 16) | a
))
7325 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
7329 if (val
== ((a
<< 16) | a
))
7330 return 0x200 | (a
>> 8);
7334 /* Encode a VFP SP or DP register number into inst.instruction. */
7337 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
7339 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
7342 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
7345 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
7348 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
7353 first_error (_("D register out of range for selected VFP version"));
7361 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
7365 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
7369 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
7373 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
7377 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
7381 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
7389 /* Encode a <shift> in an ARM-format instruction. The immediate,
7390 if any, is handled by md_apply_fix. */
7392 encode_arm_shift (int i
)
7394 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7395 inst
.instruction
|= SHIFT_ROR
<< 5;
7398 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7399 if (inst
.operands
[i
].immisreg
)
7401 inst
.instruction
|= SHIFT_BY_REG
;
7402 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
7405 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7410 encode_arm_shifter_operand (int i
)
7412 if (inst
.operands
[i
].isreg
)
7414 inst
.instruction
|= inst
.operands
[i
].reg
;
7415 encode_arm_shift (i
);
7419 inst
.instruction
|= INST_IMMEDIATE
;
7420 if (inst
.reloc
.type
!= BFD_RELOC_ARM_IMMEDIATE
)
7421 inst
.instruction
|= inst
.operands
[i
].imm
;
7425 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7427 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
7430 Generate an error if the operand is not a register. */
7431 constraint (!inst
.operands
[i
].isreg
,
7432 _("Instruction does not support =N addresses"));
7434 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7436 if (inst
.operands
[i
].preind
)
7440 inst
.error
= _("instruction does not accept preindexed addressing");
7443 inst
.instruction
|= PRE_INDEX
;
7444 if (inst
.operands
[i
].writeback
)
7445 inst
.instruction
|= WRITE_BACK
;
7448 else if (inst
.operands
[i
].postind
)
7450 gas_assert (inst
.operands
[i
].writeback
);
7452 inst
.instruction
|= WRITE_BACK
;
7454 else /* unindexed - only for coprocessor */
7456 inst
.error
= _("instruction does not accept unindexed addressing");
7460 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
7461 && (((inst
.instruction
& 0x000f0000) >> 16)
7462 == ((inst
.instruction
& 0x0000f000) >> 12)))
7463 as_warn ((inst
.instruction
& LOAD_BIT
)
7464 ? _("destination register same as write-back base")
7465 : _("source register same as write-back base"));
7468 /* inst.operands[i] was set up by parse_address. Encode it into an
7469 ARM-format mode 2 load or store instruction. If is_t is true,
7470 reject forms that cannot be used with a T instruction (i.e. not
7473 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
7475 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
7477 encode_arm_addr_mode_common (i
, is_t
);
7479 if (inst
.operands
[i
].immisreg
)
7481 constraint ((inst
.operands
[i
].imm
== REG_PC
7482 || (is_pc
&& inst
.operands
[i
].writeback
)),
7484 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
7485 inst
.instruction
|= inst
.operands
[i
].imm
;
7486 if (!inst
.operands
[i
].negative
)
7487 inst
.instruction
|= INDEX_UP
;
7488 if (inst
.operands
[i
].shifted
)
7490 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7491 inst
.instruction
|= SHIFT_ROR
<< 5;
7494 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7495 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7499 else /* immediate offset in inst.reloc */
7501 if (is_pc
&& !inst
.reloc
.pc_rel
)
7503 const bfd_boolean is_load
= ((inst
.instruction
& LOAD_BIT
) != 0);
7505 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7506 cannot use PC in addressing.
7507 PC cannot be used in writeback addressing, either. */
7508 constraint ((is_t
|| inst
.operands
[i
].writeback
),
7511 /* Use of PC in str is deprecated for ARMv7. */
7512 if (warn_on_deprecated
7514 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
))
7515 as_tsktsk (_("use of PC in this instruction is deprecated"));
7518 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7520 /* Prefer + for zero encoded value. */
7521 if (!inst
.operands
[i
].negative
)
7522 inst
.instruction
|= INDEX_UP
;
7523 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
7528 /* inst.operands[i] was set up by parse_address. Encode it into an
7529 ARM-format mode 3 load or store instruction. Reject forms that
7530 cannot be used with such instructions. If is_t is true, reject
7531 forms that cannot be used with a T instruction (i.e. not
7534 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
7536 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
7538 inst
.error
= _("instruction does not accept scaled register index");
7542 encode_arm_addr_mode_common (i
, is_t
);
7544 if (inst
.operands
[i
].immisreg
)
7546 constraint ((inst
.operands
[i
].imm
== REG_PC
7547 || (is_t
&& inst
.operands
[i
].reg
== REG_PC
)),
7549 constraint (inst
.operands
[i
].reg
== REG_PC
&& inst
.operands
[i
].writeback
,
7551 inst
.instruction
|= inst
.operands
[i
].imm
;
7552 if (!inst
.operands
[i
].negative
)
7553 inst
.instruction
|= INDEX_UP
;
7555 else /* immediate offset in inst.reloc */
7557 constraint ((inst
.operands
[i
].reg
== REG_PC
&& !inst
.reloc
.pc_rel
7558 && inst
.operands
[i
].writeback
),
7560 inst
.instruction
|= HWOFFSET_IMM
;
7561 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7563 /* Prefer + for zero encoded value. */
7564 if (!inst
.operands
[i
].negative
)
7565 inst
.instruction
|= INDEX_UP
;
7567 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
7572 /* Write immediate bits [7:0] to the following locations:
7574 |28/24|23 19|18 16|15 4|3 0|
7575 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7577 This function is used by VMOV/VMVN/VORR/VBIC. */
7580 neon_write_immbits (unsigned immbits
)
7582 inst
.instruction
|= immbits
& 0xf;
7583 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
7584 inst
.instruction
|= ((immbits
>> 7) & 0x1) << (thumb_mode
? 28 : 24);
7587 /* Invert low-order SIZE bits of XHI:XLO. */
7590 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
7592 unsigned immlo
= xlo
? *xlo
: 0;
7593 unsigned immhi
= xhi
? *xhi
: 0;
7598 immlo
= (~immlo
) & 0xff;
7602 immlo
= (~immlo
) & 0xffff;
7606 immhi
= (~immhi
) & 0xffffffff;
7610 immlo
= (~immlo
) & 0xffffffff;
7624 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7628 neon_bits_same_in_bytes (unsigned imm
)
7630 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
7631 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
7632 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
7633 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
7636 /* For immediate of above form, return 0bABCD. */
7639 neon_squash_bits (unsigned imm
)
7641 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
7642 | ((imm
& 0x01000000) >> 21);
7645 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7648 neon_qfloat_bits (unsigned imm
)
7650 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
7653 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7654 the instruction. *OP is passed as the initial value of the op field, and
7655 may be set to a different value depending on the constant (i.e.
7656 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7657 MVN). If the immediate looks like a repeated pattern then also
7658 try smaller element sizes. */
7661 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
7662 unsigned *immbits
, int *op
, int size
,
7663 enum neon_el_type type
)
7665 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7667 if (type
== NT_float
&& !float_p
)
7670 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
7672 if (size
!= 32 || *op
== 1)
7674 *immbits
= neon_qfloat_bits (immlo
);
7680 if (neon_bits_same_in_bytes (immhi
)
7681 && neon_bits_same_in_bytes (immlo
))
7685 *immbits
= (neon_squash_bits (immhi
) << 4)
7686 | neon_squash_bits (immlo
);
7697 if (immlo
== (immlo
& 0x000000ff))
7702 else if (immlo
== (immlo
& 0x0000ff00))
7704 *immbits
= immlo
>> 8;
7707 else if (immlo
== (immlo
& 0x00ff0000))
7709 *immbits
= immlo
>> 16;
7712 else if (immlo
== (immlo
& 0xff000000))
7714 *immbits
= immlo
>> 24;
7717 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
7719 *immbits
= (immlo
>> 8) & 0xff;
7722 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
7724 *immbits
= (immlo
>> 16) & 0xff;
7728 if ((immlo
& 0xffff) != (immlo
>> 16))
7735 if (immlo
== (immlo
& 0x000000ff))
7740 else if (immlo
== (immlo
& 0x0000ff00))
7742 *immbits
= immlo
>> 8;
7746 if ((immlo
& 0xff) != (immlo
>> 8))
7751 if (immlo
== (immlo
& 0x000000ff))
7753 /* Don't allow MVN with 8-bit immediate. */
7763 #if defined BFD_HOST_64_BIT
7764 /* Returns TRUE if double precision value V may be cast
7765 to single precision without loss of accuracy. */
7768 is_double_a_single (bfd_int64_t v
)
7770 int exp
= (int)((v
>> 52) & 0x7FF);
7771 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
7773 return (exp
== 0 || exp
== 0x7FF
7774 || (exp
>= 1023 - 126 && exp
<= 1023 + 127))
7775 && (mantissa
& 0x1FFFFFFFl
) == 0;
7778 /* Returns a double precision value casted to single precision
7779 (ignoring the least significant bits in exponent and mantissa). */
7782 double_to_single (bfd_int64_t v
)
7784 int sign
= (int) ((v
>> 63) & 1l);
7785 int exp
= (int) ((v
>> 52) & 0x7FF);
7786 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
7792 exp
= exp
- 1023 + 127;
7801 /* No denormalized numbers. */
7807 return (sign
<< 31) | (exp
<< 23) | mantissa
;
7809 #endif /* BFD_HOST_64_BIT */
7818 static void do_vfp_nsyn_opcode (const char *);
7820 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7821 Determine whether it can be performed with a move instruction; if
7822 it can, convert inst.instruction to that move instruction and
7823 return TRUE; if it can't, convert inst.instruction to a literal-pool
7824 load and return FALSE. If this is not a valid thing to do in the
7825 current context, set inst.error and return TRUE.
7827 inst.operands[i] describes the destination register. */
7830 move_or_literal_pool (int i
, enum lit_type t
, bfd_boolean mode_3
)
7833 bfd_boolean thumb_p
= (t
== CONST_THUMB
);
7834 bfd_boolean arm_p
= (t
== CONST_ARM
);
7837 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
7841 if ((inst
.instruction
& tbit
) == 0)
7843 inst
.error
= _("invalid pseudo operation");
7847 if (inst
.reloc
.exp
.X_op
!= O_constant
7848 && inst
.reloc
.exp
.X_op
!= O_symbol
7849 && inst
.reloc
.exp
.X_op
!= O_big
)
7851 inst
.error
= _("constant expression expected");
7855 if (inst
.reloc
.exp
.X_op
== O_constant
7856 || inst
.reloc
.exp
.X_op
== O_big
)
7858 #if defined BFD_HOST_64_BIT
7863 if (inst
.reloc
.exp
.X_op
== O_big
)
7865 LITTLENUM_TYPE w
[X_PRECISION
];
7868 if (inst
.reloc
.exp
.X_add_number
== -1)
7870 gen_to_words (w
, X_PRECISION
, E_PRECISION
);
7872 /* FIXME: Should we check words w[2..5] ? */
7877 #if defined BFD_HOST_64_BIT
7879 ((((((((bfd_int64_t
) l
[3] & LITTLENUM_MASK
)
7880 << LITTLENUM_NUMBER_OF_BITS
)
7881 | ((bfd_int64_t
) l
[2] & LITTLENUM_MASK
))
7882 << LITTLENUM_NUMBER_OF_BITS
)
7883 | ((bfd_int64_t
) l
[1] & LITTLENUM_MASK
))
7884 << LITTLENUM_NUMBER_OF_BITS
)
7885 | ((bfd_int64_t
) l
[0] & LITTLENUM_MASK
));
7887 v
= ((l
[1] & LITTLENUM_MASK
) << LITTLENUM_NUMBER_OF_BITS
)
7888 | (l
[0] & LITTLENUM_MASK
);
7892 v
= inst
.reloc
.exp
.X_add_number
;
7894 if (!inst
.operands
[i
].issingle
)
7898 /* This can be encoded only for a low register. */
7899 if ((v
& ~0xFF) == 0 && (inst
.operands
[i
].reg
< 8))
7901 /* This can be done with a mov(1) instruction. */
7902 inst
.instruction
= T_OPCODE_MOV_I8
| (inst
.operands
[i
].reg
<< 8);
7903 inst
.instruction
|= v
;
7907 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
7908 || ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
7910 /* Check if on thumb2 it can be done with a mov.w, mvn or
7911 movw instruction. */
7912 unsigned int newimm
;
7913 bfd_boolean isNegated
;
7915 newimm
= encode_thumb32_immediate (v
);
7916 if (newimm
!= (unsigned int) FAIL
)
7920 newimm
= encode_thumb32_immediate (~v
);
7921 if (newimm
!= (unsigned int) FAIL
)
7925 /* The number can be loaded with a mov.w or mvn
7927 if (newimm
!= (unsigned int) FAIL
7928 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
7930 inst
.instruction
= (0xf04f0000 /* MOV.W. */
7931 | (inst
.operands
[i
].reg
<< 8));
7932 /* Change to MOVN. */
7933 inst
.instruction
|= (isNegated
? 0x200000 : 0);
7934 inst
.instruction
|= (newimm
& 0x800) << 15;
7935 inst
.instruction
|= (newimm
& 0x700) << 4;
7936 inst
.instruction
|= (newimm
& 0x0ff);
7939 /* The number can be loaded with a movw instruction. */
7940 else if ((v
& ~0xFFFF) == 0
7941 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
7943 int imm
= v
& 0xFFFF;
7945 inst
.instruction
= 0xf2400000; /* MOVW. */
7946 inst
.instruction
|= (inst
.operands
[i
].reg
<< 8);
7947 inst
.instruction
|= (imm
& 0xf000) << 4;
7948 inst
.instruction
|= (imm
& 0x0800) << 15;
7949 inst
.instruction
|= (imm
& 0x0700) << 4;
7950 inst
.instruction
|= (imm
& 0x00ff);
7957 int value
= encode_arm_immediate (v
);
7961 /* This can be done with a mov instruction. */
7962 inst
.instruction
&= LITERAL_MASK
;
7963 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
7964 inst
.instruction
|= value
& 0xfff;
7968 value
= encode_arm_immediate (~ v
);
7971 /* This can be done with a mvn instruction. */
7972 inst
.instruction
&= LITERAL_MASK
;
7973 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
7974 inst
.instruction
|= value
& 0xfff;
7978 else if (t
== CONST_VEC
)
7981 unsigned immbits
= 0;
7982 unsigned immlo
= inst
.operands
[1].imm
;
7983 unsigned immhi
= inst
.operands
[1].regisimm
7984 ? inst
.operands
[1].reg
7985 : inst
.reloc
.exp
.X_unsigned
7987 : ((bfd_int64_t
)((int) immlo
)) >> 32;
7988 int cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
7989 &op
, 64, NT_invtype
);
7993 neon_invert_size (&immlo
, &immhi
, 64);
7995 cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
7996 &op
, 64, NT_invtype
);
8001 inst
.instruction
= (inst
.instruction
& VLDR_VMOV_SAME
)
8007 /* Fill other bits in vmov encoding for both thumb and arm. */
8009 inst
.instruction
|= (0x7U
<< 29) | (0xF << 24);
8011 inst
.instruction
|= (0xFU
<< 28) | (0x1 << 25);
8012 neon_write_immbits (immbits
);
8020 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8021 if (inst
.operands
[i
].issingle
8022 && is_quarter_float (inst
.operands
[1].imm
)
8023 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3xd
))
8025 inst
.operands
[1].imm
=
8026 neon_qfloat_bits (v
);
8027 do_vfp_nsyn_opcode ("fconsts");
8031 /* If our host does not support a 64-bit type then we cannot perform
8032 the following optimization. This mean that there will be a
8033 discrepancy between the output produced by an assembler built for
8034 a 32-bit-only host and the output produced from a 64-bit host, but
8035 this cannot be helped. */
8036 #if defined BFD_HOST_64_BIT
8037 else if (!inst
.operands
[1].issingle
8038 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
8040 if (is_double_a_single (v
)
8041 && is_quarter_float (double_to_single (v
)))
8043 inst
.operands
[1].imm
=
8044 neon_qfloat_bits (double_to_single (v
));
8045 do_vfp_nsyn_opcode ("fconstd");
8053 if (add_to_lit_pool ((!inst
.operands
[i
].isvec
8054 || inst
.operands
[i
].issingle
) ? 4 : 8) == FAIL
)
8057 inst
.operands
[1].reg
= REG_PC
;
8058 inst
.operands
[1].isreg
= 1;
8059 inst
.operands
[1].preind
= 1;
8060 inst
.reloc
.pc_rel
= 1;
8061 inst
.reloc
.type
= (thumb_p
8062 ? BFD_RELOC_ARM_THUMB_OFFSET
8064 ? BFD_RELOC_ARM_HWLITERAL
8065 : BFD_RELOC_ARM_LITERAL
));
8069 /* inst.operands[i] was set up by parse_address. Encode it into an
8070 ARM-format instruction. Reject all forms which cannot be encoded
8071 into a coprocessor load/store instruction. If wb_ok is false,
8072 reject use of writeback; if unind_ok is false, reject use of
8073 unindexed addressing. If reloc_override is not 0, use it instead
8074 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8075 (in which case it is preserved). */
8078 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
8080 if (!inst
.operands
[i
].isreg
)
8083 if (! inst
.operands
[0].isvec
)
8085 inst
.error
= _("invalid co-processor operand");
8088 if (move_or_literal_pool (0, CONST_VEC
, /*mode_3=*/FALSE
))
8092 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8094 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
8096 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
8098 gas_assert (!inst
.operands
[i
].writeback
);
8101 inst
.error
= _("instruction does not support unindexed addressing");
8104 inst
.instruction
|= inst
.operands
[i
].imm
;
8105 inst
.instruction
|= INDEX_UP
;
8109 if (inst
.operands
[i
].preind
)
8110 inst
.instruction
|= PRE_INDEX
;
8112 if (inst
.operands
[i
].writeback
)
8114 if (inst
.operands
[i
].reg
== REG_PC
)
8116 inst
.error
= _("pc may not be used with write-back");
8121 inst
.error
= _("instruction does not support writeback");
8124 inst
.instruction
|= WRITE_BACK
;
8128 inst
.reloc
.type
= (bfd_reloc_code_real_type
) reloc_override
;
8129 else if ((inst
.reloc
.type
< BFD_RELOC_ARM_ALU_PC_G0_NC
8130 || inst
.reloc
.type
> BFD_RELOC_ARM_LDC_SB_G2
)
8131 && inst
.reloc
.type
!= BFD_RELOC_ARM_LDR_PC_G0
)
8134 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
8136 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
8139 /* Prefer + for zero encoded value. */
8140 if (!inst
.operands
[i
].negative
)
8141 inst
.instruction
|= INDEX_UP
;
8146 /* Functions for instruction encoding, sorted by sub-architecture.
8147 First some generics; their names are taken from the conventional
8148 bit positions for register arguments in ARM format instructions. */
8158 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8164 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8165 inst
.instruction
|= inst
.operands
[1].reg
;
8171 inst
.instruction
|= inst
.operands
[0].reg
;
8172 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8178 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8179 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8185 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8186 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8192 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8193 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8197 check_obsolete (const arm_feature_set
*feature
, const char *msg
)
8199 if (ARM_CPU_IS_ANY (cpu_variant
))
8201 as_tsktsk ("%s", msg
);
8204 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
8216 unsigned Rn
= inst
.operands
[2].reg
;
8217 /* Enforce restrictions on SWP instruction. */
8218 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
8220 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
8221 _("Rn must not overlap other operands"));
8223 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8225 if (!check_obsolete (&arm_ext_v8
,
8226 _("swp{b} use is obsoleted for ARMv8 and later"))
8227 && warn_on_deprecated
8228 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
))
8229 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8232 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8233 inst
.instruction
|= inst
.operands
[1].reg
;
8234 inst
.instruction
|= Rn
<< 16;
8240 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8241 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8242 inst
.instruction
|= inst
.operands
[2].reg
;
8248 constraint ((inst
.operands
[2].reg
== REG_PC
), BAD_PC
);
8249 constraint (((inst
.reloc
.exp
.X_op
!= O_constant
8250 && inst
.reloc
.exp
.X_op
!= O_illegal
)
8251 || inst
.reloc
.exp
.X_add_number
!= 0),
8253 inst
.instruction
|= inst
.operands
[0].reg
;
8254 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8255 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8261 inst
.instruction
|= inst
.operands
[0].imm
;
8267 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8268 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8271 /* ARM instructions, in alphabetical order by function name (except
8272 that wrapper functions appear immediately after the function they
8275 /* This is a pseudo-op of the form "adr rd, label" to be converted
8276 into a relative address of the form "add rd, pc, #label-.-8". */
8281 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8283 /* Frag hacking will turn this into a sub instruction if the offset turns
8284 out to be negative. */
8285 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
8286 inst
.reloc
.pc_rel
= 1;
8287 inst
.reloc
.exp
.X_add_number
-= 8;
8290 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8291 into a relative address of the form:
8292 add rd, pc, #low(label-.-8)"
8293 add rd, rd, #high(label-.-8)" */
8298 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8300 /* Frag hacking will turn this into a sub instruction if the offset turns
8301 out to be negative. */
8302 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
8303 inst
.reloc
.pc_rel
= 1;
8304 inst
.size
= INSN_SIZE
* 2;
8305 inst
.reloc
.exp
.X_add_number
-= 8;
8311 if (!inst
.operands
[1].present
)
8312 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8313 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8314 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8315 encode_arm_shifter_operand (2);
8321 if (inst
.operands
[0].present
)
8322 inst
.instruction
|= inst
.operands
[0].imm
;
8324 inst
.instruction
|= 0xf;
8330 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8331 constraint (msb
> 32, _("bit-field extends past end of register"));
8332 /* The instruction encoding stores the LSB and MSB,
8333 not the LSB and width. */
8334 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8335 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
8336 inst
.instruction
|= (msb
- 1) << 16;
8344 /* #0 in second position is alternative syntax for bfc, which is
8345 the same instruction but with REG_PC in the Rm field. */
8346 if (!inst
.operands
[1].isreg
)
8347 inst
.operands
[1].reg
= REG_PC
;
8349 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8350 constraint (msb
> 32, _("bit-field extends past end of register"));
8351 /* The instruction encoding stores the LSB and MSB,
8352 not the LSB and width. */
8353 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8354 inst
.instruction
|= inst
.operands
[1].reg
;
8355 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8356 inst
.instruction
|= (msb
- 1) << 16;
8362 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8363 _("bit-field extends past end of register"));
8364 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8365 inst
.instruction
|= inst
.operands
[1].reg
;
8366 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8367 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
8370 /* ARM V5 breakpoint instruction (argument parse)
8371 BKPT <16 bit unsigned immediate>
8372 Instruction is not conditional.
8373 The bit pattern given in insns[] has the COND_ALWAYS condition,
8374 and it is an error if the caller tried to override that. */
8379 /* Top 12 of 16 bits to bits 19:8. */
8380 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
8382 /* Bottom 4 of 16 bits to bits 3:0. */
8383 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
8387 encode_branch (int default_reloc
)
8389 if (inst
.operands
[0].hasreloc
)
8391 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
8392 && inst
.operands
[0].imm
!= BFD_RELOC_ARM_TLS_CALL
,
8393 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8394 inst
.reloc
.type
= inst
.operands
[0].imm
== BFD_RELOC_ARM_PLT32
8395 ? BFD_RELOC_ARM_PLT32
8396 : thumb_mode
? BFD_RELOC_ARM_THM_TLS_CALL
: BFD_RELOC_ARM_TLS_CALL
;
8399 inst
.reloc
.type
= (bfd_reloc_code_real_type
) default_reloc
;
8400 inst
.reloc
.pc_rel
= 1;
8407 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8408 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8411 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8418 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8420 if (inst
.cond
== COND_ALWAYS
)
8421 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
8423 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8427 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8430 /* ARM V5 branch-link-exchange instruction (argument parse)
8431 BLX <target_addr> ie BLX(1)
8432 BLX{<condition>} <Rm> ie BLX(2)
8433 Unfortunately, there are two different opcodes for this mnemonic.
8434 So, the insns[].value is not used, and the code here zaps values
8435 into inst.instruction.
8436 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8441 if (inst
.operands
[0].isreg
)
8443 /* Arg is a register; the opcode provided by insns[] is correct.
8444 It is not illegal to do "blx pc", just useless. */
8445 if (inst
.operands
[0].reg
== REG_PC
)
8446 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8448 inst
.instruction
|= inst
.operands
[0].reg
;
8452 /* Arg is an address; this instruction cannot be executed
8453 conditionally, and the opcode must be adjusted.
8454 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8455 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8456 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
8457 inst
.instruction
= 0xfa000000;
8458 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
8465 bfd_boolean want_reloc
;
8467 if (inst
.operands
[0].reg
== REG_PC
)
8468 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8470 inst
.instruction
|= inst
.operands
[0].reg
;
8471 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8472 it is for ARMv4t or earlier. */
8473 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
8474 if (object_arch
&& !ARM_CPU_HAS_FEATURE (*object_arch
, arm_ext_v5
))
8478 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
8483 inst
.reloc
.type
= BFD_RELOC_ARM_V4BX
;
8487 /* ARM v5TEJ. Jump to Jazelle code. */
8492 if (inst
.operands
[0].reg
== REG_PC
)
8493 as_tsktsk (_("use of r15 in bxj is not really useful"));
8495 inst
.instruction
|= inst
.operands
[0].reg
;
8498 /* Co-processor data operation:
8499 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8500 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8504 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8505 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
8506 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8507 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8508 inst
.instruction
|= inst
.operands
[4].reg
;
8509 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8515 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8516 encode_arm_shifter_operand (1);
8519 /* Transfer between coprocessor and ARM registers.
8520 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8525 No special properties. */
8527 struct deprecated_coproc_regs_s
8534 arm_feature_set deprecated
;
8535 arm_feature_set obsoleted
;
8536 const char *dep_msg
;
8537 const char *obs_msg
;
8540 #define DEPR_ACCESS_V8 \
8541 N_("This coprocessor register access is deprecated in ARMv8")
8543 /* Table of all deprecated coprocessor registers. */
8544 static struct deprecated_coproc_regs_s deprecated_coproc_regs
[] =
8546 {15, 0, 7, 10, 5, /* CP15DMB. */
8547 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8548 DEPR_ACCESS_V8
, NULL
},
8549 {15, 0, 7, 10, 4, /* CP15DSB. */
8550 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8551 DEPR_ACCESS_V8
, NULL
},
8552 {15, 0, 7, 5, 4, /* CP15ISB. */
8553 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8554 DEPR_ACCESS_V8
, NULL
},
8555 {14, 6, 1, 0, 0, /* TEEHBR. */
8556 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8557 DEPR_ACCESS_V8
, NULL
},
8558 {14, 6, 0, 0, 0, /* TEECR. */
8559 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8560 DEPR_ACCESS_V8
, NULL
},
8563 #undef DEPR_ACCESS_V8
8565 static const size_t deprecated_coproc_reg_count
=
8566 sizeof (deprecated_coproc_regs
) / sizeof (deprecated_coproc_regs
[0]);
8574 Rd
= inst
.operands
[2].reg
;
8577 if (inst
.instruction
== 0xee000010
8578 || inst
.instruction
== 0xfe000010)
8580 reject_bad_reg (Rd
);
8583 constraint (Rd
== REG_SP
, BAD_SP
);
8588 if (inst
.instruction
== 0xe000010)
8589 constraint (Rd
== REG_PC
, BAD_PC
);
8592 for (i
= 0; i
< deprecated_coproc_reg_count
; ++i
)
8594 const struct deprecated_coproc_regs_s
*r
=
8595 deprecated_coproc_regs
+ i
;
8597 if (inst
.operands
[0].reg
== r
->cp
8598 && inst
.operands
[1].imm
== r
->opc1
8599 && inst
.operands
[3].reg
== r
->crn
8600 && inst
.operands
[4].reg
== r
->crm
8601 && inst
.operands
[5].imm
== r
->opc2
)
8603 if (! ARM_CPU_IS_ANY (cpu_variant
)
8604 && warn_on_deprecated
8605 && ARM_CPU_HAS_FEATURE (cpu_variant
, r
->deprecated
))
8606 as_tsktsk ("%s", r
->dep_msg
);
8610 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8611 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
8612 inst
.instruction
|= Rd
<< 12;
8613 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8614 inst
.instruction
|= inst
.operands
[4].reg
;
8615 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8618 /* Transfer between coprocessor register and pair of ARM registers.
8619 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8624 Two XScale instructions are special cases of these:
8626 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8627 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8629 Result unpredictable if Rd or Rn is R15. */
8636 Rd
= inst
.operands
[2].reg
;
8637 Rn
= inst
.operands
[3].reg
;
8641 reject_bad_reg (Rd
);
8642 reject_bad_reg (Rn
);
8646 constraint (Rd
== REG_PC
, BAD_PC
);
8647 constraint (Rn
== REG_PC
, BAD_PC
);
8650 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8651 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
8652 inst
.instruction
|= Rd
<< 12;
8653 inst
.instruction
|= Rn
<< 16;
8654 inst
.instruction
|= inst
.operands
[4].reg
;
8660 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
8661 if (inst
.operands
[1].present
)
8663 inst
.instruction
|= CPSI_MMOD
;
8664 inst
.instruction
|= inst
.operands
[1].imm
;
8671 inst
.instruction
|= inst
.operands
[0].imm
;
8677 unsigned Rd
, Rn
, Rm
;
8679 Rd
= inst
.operands
[0].reg
;
8680 Rn
= (inst
.operands
[1].present
8681 ? inst
.operands
[1].reg
: Rd
);
8682 Rm
= inst
.operands
[2].reg
;
8684 constraint ((Rd
== REG_PC
), BAD_PC
);
8685 constraint ((Rn
== REG_PC
), BAD_PC
);
8686 constraint ((Rm
== REG_PC
), BAD_PC
);
8688 inst
.instruction
|= Rd
<< 16;
8689 inst
.instruction
|= Rn
<< 0;
8690 inst
.instruction
|= Rm
<< 8;
8696 /* There is no IT instruction in ARM mode. We
8697 process it to do the validation as if in
8698 thumb mode, just in case the code gets
8699 assembled for thumb using the unified syntax. */
8704 set_it_insn_type (IT_INSN
);
8705 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
8706 now_it
.cc
= inst
.operands
[0].imm
;
8710 /* If there is only one register in the register list,
8711 then return its register number. Otherwise return -1. */
8713 only_one_reg_in_list (int range
)
8715 int i
= ffs (range
) - 1;
8716 return (i
> 15 || range
!= (1 << i
)) ? -1 : i
;
8720 encode_ldmstm(int from_push_pop_mnem
)
8722 int base_reg
= inst
.operands
[0].reg
;
8723 int range
= inst
.operands
[1].imm
;
8726 inst
.instruction
|= base_reg
<< 16;
8727 inst
.instruction
|= range
;
8729 if (inst
.operands
[1].writeback
)
8730 inst
.instruction
|= LDM_TYPE_2_OR_3
;
8732 if (inst
.operands
[0].writeback
)
8734 inst
.instruction
|= WRITE_BACK
;
8735 /* Check for unpredictable uses of writeback. */
8736 if (inst
.instruction
& LOAD_BIT
)
8738 /* Not allowed in LDM type 2. */
8739 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
8740 && ((range
& (1 << REG_PC
)) == 0))
8741 as_warn (_("writeback of base register is UNPREDICTABLE"));
8742 /* Only allowed if base reg not in list for other types. */
8743 else if (range
& (1 << base_reg
))
8744 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8748 /* Not allowed for type 2. */
8749 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
8750 as_warn (_("writeback of base register is UNPREDICTABLE"));
8751 /* Only allowed if base reg not in list, or first in list. */
8752 else if ((range
& (1 << base_reg
))
8753 && (range
& ((1 << base_reg
) - 1)))
8754 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8758 /* If PUSH/POP has only one register, then use the A2 encoding. */
8759 one_reg
= only_one_reg_in_list (range
);
8760 if (from_push_pop_mnem
&& one_reg
>= 0)
8762 int is_push
= (inst
.instruction
& A_PUSH_POP_OP_MASK
) == A1_OPCODE_PUSH
;
8764 inst
.instruction
&= A_COND_MASK
;
8765 inst
.instruction
|= is_push
? A2_OPCODE_PUSH
: A2_OPCODE_POP
;
8766 inst
.instruction
|= one_reg
<< 12;
8773 encode_ldmstm (/*from_push_pop_mnem=*/FALSE
);
8776 /* ARMv5TE load-consecutive (argument parse)
8785 constraint (inst
.operands
[0].reg
% 2 != 0,
8786 _("first transfer register must be even"));
8787 constraint (inst
.operands
[1].present
8788 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
8789 _("can only transfer two consecutive registers"));
8790 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
8791 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
8793 if (!inst
.operands
[1].present
)
8794 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
8796 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8797 register and the first register written; we have to diagnose
8798 overlap between the base and the second register written here. */
8800 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
8801 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
8802 as_warn (_("base register written back, and overlaps "
8803 "second transfer register"));
8805 if (!(inst
.instruction
& V4_STR_BIT
))
8807 /* For an index-register load, the index register must not overlap the
8808 destination (even if not write-back). */
8809 if (inst
.operands
[2].immisreg
8810 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
8811 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
8812 as_warn (_("index register overlaps transfer register"));
8814 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8815 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
8821 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
8822 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
8823 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
8824 || inst
.operands
[1].negative
8825 /* This can arise if the programmer has written
8827 or if they have mistakenly used a register name as the last
8830 It is very difficult to distinguish between these two cases
8831 because "rX" might actually be a label. ie the register
8832 name has been occluded by a symbol of the same name. So we
8833 just generate a general 'bad addressing mode' type error
8834 message and leave it up to the programmer to discover the
8835 true cause and fix their mistake. */
8836 || (inst
.operands
[1].reg
== REG_PC
),
8839 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8840 || inst
.reloc
.exp
.X_add_number
!= 0,
8841 _("offset must be zero in ARM encoding"));
8843 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
8845 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8846 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8847 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8853 constraint (inst
.operands
[0].reg
% 2 != 0,
8854 _("even register required"));
8855 constraint (inst
.operands
[1].present
8856 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
8857 _("can only load two consecutive registers"));
8858 /* If op 1 were present and equal to PC, this function wouldn't
8859 have been called in the first place. */
8860 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
8862 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8863 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8866 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8867 which is not a multiple of four is UNPREDICTABLE. */
8869 check_ldr_r15_aligned (void)
8871 constraint (!(inst
.operands
[1].immisreg
)
8872 && (inst
.operands
[0].reg
== REG_PC
8873 && inst
.operands
[1].reg
== REG_PC
8874 && (inst
.reloc
.exp
.X_add_number
& 0x3)),
8875 _("ldr to register 15 must be 4-byte alligned"));
8881 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8882 if (!inst
.operands
[1].isreg
)
8883 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/FALSE
))
8885 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
8886 check_ldr_r15_aligned ();
8892 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8894 if (inst
.operands
[1].preind
)
8896 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8897 || inst
.reloc
.exp
.X_add_number
!= 0,
8898 _("this instruction requires a post-indexed address"));
8900 inst
.operands
[1].preind
= 0;
8901 inst
.operands
[1].postind
= 1;
8902 inst
.operands
[1].writeback
= 1;
8904 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8905 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
8908 /* Halfword and signed-byte load/store operations. */
8913 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
8914 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8915 if (!inst
.operands
[1].isreg
)
8916 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/TRUE
))
8918 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
8924 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8926 if (inst
.operands
[1].preind
)
8928 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8929 || inst
.reloc
.exp
.X_add_number
!= 0,
8930 _("this instruction requires a post-indexed address"));
8932 inst
.operands
[1].preind
= 0;
8933 inst
.operands
[1].postind
= 1;
8934 inst
.operands
[1].writeback
= 1;
8936 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8937 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
8940 /* Co-processor register load/store.
8941 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
8945 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8946 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8947 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
8953 /* This restriction does not apply to mls (nor to mla in v6 or later). */
8954 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
8955 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
8956 && !(inst
.instruction
& 0x00400000))
8957 as_tsktsk (_("Rd and Rm should be different in mla"));
8959 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8960 inst
.instruction
|= inst
.operands
[1].reg
;
8961 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
8962 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
8968 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8969 encode_arm_shifter_operand (1);
8972 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
8979 top
= (inst
.instruction
& 0x00400000) != 0;
8980 constraint (top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
,
8981 _(":lower16: not allowed this instruction"));
8982 constraint (!top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
,
8983 _(":upper16: not allowed instruction"));
8984 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8985 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
8987 imm
= inst
.reloc
.exp
.X_add_number
;
8988 /* The value is in two pieces: 0:11, 16:19. */
8989 inst
.instruction
|= (imm
& 0x00000fff);
8990 inst
.instruction
|= (imm
& 0x0000f000) << 4;
8995 do_vfp_nsyn_mrs (void)
8997 if (inst
.operands
[0].isvec
)
8999 if (inst
.operands
[1].reg
!= 1)
9000 first_error (_("operand 1 must be FPSCR"));
9001 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
9002 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
9003 do_vfp_nsyn_opcode ("fmstat");
9005 else if (inst
.operands
[1].isvec
)
9006 do_vfp_nsyn_opcode ("fmrx");
9014 do_vfp_nsyn_msr (void)
9016 if (inst
.operands
[0].isvec
)
9017 do_vfp_nsyn_opcode ("fmxr");
9027 unsigned Rt
= inst
.operands
[0].reg
;
9029 if (thumb_mode
&& Rt
== REG_SP
)
9031 inst
.error
= BAD_SP
;
9035 /* APSR_ sets isvec. All other refs to PC are illegal. */
9036 if (!inst
.operands
[0].isvec
&& Rt
== REG_PC
)
9038 inst
.error
= BAD_PC
;
9042 /* If we get through parsing the register name, we just insert the number
9043 generated into the instruction without further validation. */
9044 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
9045 inst
.instruction
|= (Rt
<< 12);
9051 unsigned Rt
= inst
.operands
[1].reg
;
9054 reject_bad_reg (Rt
);
9055 else if (Rt
== REG_PC
)
9057 inst
.error
= BAD_PC
;
9061 /* If we get through parsing the register name, we just insert the number
9062 generated into the instruction without further validation. */
9063 inst
.instruction
|= (inst
.operands
[0].reg
<< 16);
9064 inst
.instruction
|= (Rt
<< 12);
9072 if (do_vfp_nsyn_mrs () == SUCCESS
)
9075 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9076 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9078 if (inst
.operands
[1].isreg
)
9080 br
= inst
.operands
[1].reg
;
9081 if (((br
& 0x200) == 0) && ((br
& 0xf0000) != 0xf000))
9082 as_bad (_("bad register for mrs"));
9086 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9087 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
9089 _("'APSR', 'CPSR' or 'SPSR' expected"));
9090 br
= (15<<16) | (inst
.operands
[1].imm
& SPSR_BIT
);
9093 inst
.instruction
|= br
;
9096 /* Two possible forms:
9097 "{C|S}PSR_<field>, Rm",
9098 "{C|S}PSR_f, #expression". */
9103 if (do_vfp_nsyn_msr () == SUCCESS
)
9106 inst
.instruction
|= inst
.operands
[0].imm
;
9107 if (inst
.operands
[1].isreg
)
9108 inst
.instruction
|= inst
.operands
[1].reg
;
9111 inst
.instruction
|= INST_IMMEDIATE
;
9112 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
9113 inst
.reloc
.pc_rel
= 0;
9120 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
9122 if (!inst
.operands
[2].present
)
9123 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
9124 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9125 inst
.instruction
|= inst
.operands
[1].reg
;
9126 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9128 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9129 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9130 as_tsktsk (_("Rd and Rm should be different in mul"));
9133 /* Long Multiply Parser
9134 UMULL RdLo, RdHi, Rm, Rs
9135 SMULL RdLo, RdHi, Rm, Rs
9136 UMLAL RdLo, RdHi, Rm, Rs
9137 SMLAL RdLo, RdHi, Rm, Rs. */
9142 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9143 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9144 inst
.instruction
|= inst
.operands
[2].reg
;
9145 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9147 /* rdhi and rdlo must be different. */
9148 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9149 as_tsktsk (_("rdhi and rdlo must be different"));
9151 /* rdhi, rdlo and rm must all be different before armv6. */
9152 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
9153 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
9154 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9155 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9161 if (inst
.operands
[0].present
9162 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
9164 /* Architectural NOP hints are CPSR sets with no bits selected. */
9165 inst
.instruction
&= 0xf0000000;
9166 inst
.instruction
|= 0x0320f000;
9167 if (inst
.operands
[0].present
)
9168 inst
.instruction
|= inst
.operands
[0].imm
;
9172 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9173 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9174 Condition defaults to COND_ALWAYS.
9175 Error if Rd, Rn or Rm are R15. */
9180 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9181 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9182 inst
.instruction
|= inst
.operands
[2].reg
;
9183 if (inst
.operands
[3].present
)
9184 encode_arm_shift (3);
9187 /* ARM V6 PKHTB (Argument Parse). */
9192 if (!inst
.operands
[3].present
)
9194 /* If the shift specifier is omitted, turn the instruction
9195 into pkhbt rd, rm, rn. */
9196 inst
.instruction
&= 0xfff00010;
9197 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9198 inst
.instruction
|= inst
.operands
[1].reg
;
9199 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9203 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9204 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9205 inst
.instruction
|= inst
.operands
[2].reg
;
9206 encode_arm_shift (3);
9210 /* ARMv5TE: Preload-Cache
9211 MP Extensions: Preload for write
9215 Syntactically, like LDR with B=1, W=0, L=1. */
9220 constraint (!inst
.operands
[0].isreg
,
9221 _("'[' expected after PLD mnemonic"));
9222 constraint (inst
.operands
[0].postind
,
9223 _("post-indexed expression used in preload instruction"));
9224 constraint (inst
.operands
[0].writeback
,
9225 _("writeback used in preload instruction"));
9226 constraint (!inst
.operands
[0].preind
,
9227 _("unindexed addressing used in preload instruction"));
9228 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9231 /* ARMv7: PLI <addr_mode> */
9235 constraint (!inst
.operands
[0].isreg
,
9236 _("'[' expected after PLI mnemonic"));
9237 constraint (inst
.operands
[0].postind
,
9238 _("post-indexed expression used in preload instruction"));
9239 constraint (inst
.operands
[0].writeback
,
9240 _("writeback used in preload instruction"));
9241 constraint (!inst
.operands
[0].preind
,
9242 _("unindexed addressing used in preload instruction"));
9243 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9244 inst
.instruction
&= ~PRE_INDEX
;
9250 constraint (inst
.operands
[0].writeback
,
9251 _("push/pop do not support {reglist}^"));
9252 inst
.operands
[1] = inst
.operands
[0];
9253 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
9254 inst
.operands
[0].isreg
= 1;
9255 inst
.operands
[0].writeback
= 1;
9256 inst
.operands
[0].reg
= REG_SP
;
9257 encode_ldmstm (/*from_push_pop_mnem=*/TRUE
);
9260 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9261 word at the specified address and the following word
9263 Unconditionally executed.
9264 Error if Rn is R15. */
9269 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9270 if (inst
.operands
[0].writeback
)
9271 inst
.instruction
|= WRITE_BACK
;
9274 /* ARM V6 ssat (argument parse). */
9279 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9280 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
9281 inst
.instruction
|= inst
.operands
[2].reg
;
9283 if (inst
.operands
[3].present
)
9284 encode_arm_shift (3);
9287 /* ARM V6 usat (argument parse). */
9292 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9293 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9294 inst
.instruction
|= inst
.operands
[2].reg
;
9296 if (inst
.operands
[3].present
)
9297 encode_arm_shift (3);
9300 /* ARM V6 ssat16 (argument parse). */
9305 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9306 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
9307 inst
.instruction
|= inst
.operands
[2].reg
;
9313 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9314 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9315 inst
.instruction
|= inst
.operands
[2].reg
;
9318 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9319 preserving the other bits.
9321 setend <endian_specifier>, where <endian_specifier> is either
9327 if (warn_on_deprecated
9328 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9329 as_tsktsk (_("setend use is deprecated for ARMv8"));
9331 if (inst
.operands
[0].imm
)
9332 inst
.instruction
|= 0x200;
9338 unsigned int Rm
= (inst
.operands
[1].present
9339 ? inst
.operands
[1].reg
9340 : inst
.operands
[0].reg
);
9342 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9343 inst
.instruction
|= Rm
;
9344 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
9346 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9347 inst
.instruction
|= SHIFT_BY_REG
;
9348 /* PR 12854: Error on extraneous shifts. */
9349 constraint (inst
.operands
[2].shifted
,
9350 _("extraneous shift as part of operand to shift insn"));
9353 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
9359 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
9360 inst
.reloc
.pc_rel
= 0;
9366 inst
.reloc
.type
= BFD_RELOC_ARM_HVC
;
9367 inst
.reloc
.pc_rel
= 0;
9373 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
9374 inst
.reloc
.pc_rel
= 0;
9380 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9381 _("selected processor does not support SETPAN instruction"));
9383 inst
.instruction
|= ((inst
.operands
[0].imm
& 1) << 9);
9389 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9390 _("selected processor does not support SETPAN instruction"));
9392 inst
.instruction
|= (inst
.operands
[0].imm
<< 3);
9395 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9396 SMLAxy{cond} Rd,Rm,Rs,Rn
9397 SMLAWy{cond} Rd,Rm,Rs,Rn
9398 Error if any register is R15. */
9403 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9404 inst
.instruction
|= inst
.operands
[1].reg
;
9405 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9406 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9409 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9410 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9411 Error if any register is R15.
9412 Warning if Rdlo == Rdhi. */
9417 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9418 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9419 inst
.instruction
|= inst
.operands
[2].reg
;
9420 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9422 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9423 as_tsktsk (_("rdhi and rdlo must be different"));
9426 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9427 SMULxy{cond} Rd,Rm,Rs
9428 Error if any register is R15. */
9433 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9434 inst
.instruction
|= inst
.operands
[1].reg
;
9435 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9438 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9439 the same for both ARM and Thumb-2. */
9446 if (inst
.operands
[0].present
)
9448 reg
= inst
.operands
[0].reg
;
9449 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
9454 inst
.instruction
|= reg
<< 16;
9455 inst
.instruction
|= inst
.operands
[1].imm
;
9456 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
9457 inst
.instruction
|= WRITE_BACK
;
9460 /* ARM V6 strex (argument parse). */
9465 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9466 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9467 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9468 || inst
.operands
[2].negative
9469 /* See comment in do_ldrex(). */
9470 || (inst
.operands
[2].reg
== REG_PC
),
9473 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9474 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9476 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9477 || inst
.reloc
.exp
.X_add_number
!= 0,
9478 _("offset must be zero in ARM encoding"));
9480 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9481 inst
.instruction
|= inst
.operands
[1].reg
;
9482 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9483 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9489 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9490 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9491 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9492 || inst
.operands
[2].negative
,
9495 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9496 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9504 constraint (inst
.operands
[1].reg
% 2 != 0,
9505 _("even register required"));
9506 constraint (inst
.operands
[2].present
9507 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
9508 _("can only store two consecutive registers"));
9509 /* If op 2 were present and equal to PC, this function wouldn't
9510 have been called in the first place. */
9511 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
9513 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9514 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
9515 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
9518 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9519 inst
.instruction
|= inst
.operands
[1].reg
;
9520 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9527 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9528 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9536 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9537 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9542 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9543 extends it to 32-bits, and adds the result to a value in another
9544 register. You can specify a rotation by 0, 8, 16, or 24 bits
9545 before extracting the 16-bit value.
9546 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9547 Condition defaults to COND_ALWAYS.
9548 Error if any register uses R15. */
9553 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9554 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9555 inst
.instruction
|= inst
.operands
[2].reg
;
9556 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
9561 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9562 Condition defaults to COND_ALWAYS.
9563 Error if any register uses R15. */
9568 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9569 inst
.instruction
|= inst
.operands
[1].reg
;
9570 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
9573 /* VFP instructions. In a logical order: SP variant first, monad
9574 before dyad, arithmetic then move then load/store. */
9577 do_vfp_sp_monadic (void)
9579 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9580 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9584 do_vfp_sp_dyadic (void)
9586 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9587 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9588 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9592 do_vfp_sp_compare_z (void)
9594 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9598 do_vfp_dp_sp_cvt (void)
9600 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9601 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9605 do_vfp_sp_dp_cvt (void)
9607 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9608 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9612 do_vfp_reg_from_sp (void)
9614 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9615 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9619 do_vfp_reg2_from_sp2 (void)
9621 constraint (inst
.operands
[2].imm
!= 2,
9622 _("only two consecutive VFP SP registers allowed here"));
9623 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9624 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9625 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9629 do_vfp_sp_from_reg (void)
9631 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
9632 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9636 do_vfp_sp2_from_reg2 (void)
9638 constraint (inst
.operands
[0].imm
!= 2,
9639 _("only two consecutive VFP SP registers allowed here"));
9640 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
9641 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9642 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9646 do_vfp_sp_ldst (void)
9648 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9649 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
9653 do_vfp_dp_ldst (void)
9655 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9656 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
9661 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
9663 if (inst
.operands
[0].writeback
)
9664 inst
.instruction
|= WRITE_BACK
;
9666 constraint (ldstm_type
!= VFP_LDSTMIA
,
9667 _("this addressing mode requires base-register writeback"));
9668 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9669 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
9670 inst
.instruction
|= inst
.operands
[1].imm
;
9674 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
9678 if (inst
.operands
[0].writeback
)
9679 inst
.instruction
|= WRITE_BACK
;
9681 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
9682 _("this addressing mode requires base-register writeback"));
9684 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9685 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9687 count
= inst
.operands
[1].imm
<< 1;
9688 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
9691 inst
.instruction
|= count
;
9695 do_vfp_sp_ldstmia (void)
9697 vfp_sp_ldstm (VFP_LDSTMIA
);
9701 do_vfp_sp_ldstmdb (void)
9703 vfp_sp_ldstm (VFP_LDSTMDB
);
9707 do_vfp_dp_ldstmia (void)
9709 vfp_dp_ldstm (VFP_LDSTMIA
);
9713 do_vfp_dp_ldstmdb (void)
9715 vfp_dp_ldstm (VFP_LDSTMDB
);
9719 do_vfp_xp_ldstmia (void)
9721 vfp_dp_ldstm (VFP_LDSTMIAX
);
9725 do_vfp_xp_ldstmdb (void)
9727 vfp_dp_ldstm (VFP_LDSTMDBX
);
9731 do_vfp_dp_rd_rm (void)
9733 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9734 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9738 do_vfp_dp_rn_rd (void)
9740 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
9741 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9745 do_vfp_dp_rd_rn (void)
9747 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9748 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
9752 do_vfp_dp_rd_rn_rm (void)
9754 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9755 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
9756 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
9762 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9766 do_vfp_dp_rm_rd_rn (void)
9768 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
9769 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9770 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
9773 /* VFPv3 instructions. */
9775 do_vfp_sp_const (void)
9777 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9778 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
9779 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
9783 do_vfp_dp_const (void)
9785 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9786 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
9787 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
9791 vfp_conv (int srcsize
)
9793 int immbits
= srcsize
- inst
.operands
[1].imm
;
9795 if (srcsize
== 16 && !(immbits
>= 0 && immbits
<= srcsize
))
9797 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9798 i.e. immbits must be in range 0 - 16. */
9799 inst
.error
= _("immediate value out of range, expected range [0, 16]");
9802 else if (srcsize
== 32 && !(immbits
>= 0 && immbits
< srcsize
))
9804 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9805 i.e. immbits must be in range 0 - 31. */
9806 inst
.error
= _("immediate value out of range, expected range [1, 32]");
9810 inst
.instruction
|= (immbits
& 1) << 5;
9811 inst
.instruction
|= (immbits
>> 1);
9815 do_vfp_sp_conv_16 (void)
9817 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9822 do_vfp_dp_conv_16 (void)
9824 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9829 do_vfp_sp_conv_32 (void)
9831 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9836 do_vfp_dp_conv_32 (void)
9838 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9842 /* FPA instructions. Also in a logical order. */
9847 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9848 inst
.instruction
|= inst
.operands
[1].reg
;
9852 do_fpa_ldmstm (void)
9854 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9855 switch (inst
.operands
[1].imm
)
9857 case 1: inst
.instruction
|= CP_T_X
; break;
9858 case 2: inst
.instruction
|= CP_T_Y
; break;
9859 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
9864 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
9866 /* The instruction specified "ea" or "fd", so we can only accept
9867 [Rn]{!}. The instruction does not really support stacking or
9868 unstacking, so we have to emulate these by setting appropriate
9869 bits and offsets. */
9870 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9871 || inst
.reloc
.exp
.X_add_number
!= 0,
9872 _("this instruction does not support indexing"));
9874 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
9875 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
9877 if (!(inst
.instruction
& INDEX_UP
))
9878 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
9880 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
9882 inst
.operands
[2].preind
= 0;
9883 inst
.operands
[2].postind
= 1;
9887 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
9890 /* iWMMXt instructions: strictly in alphabetical order. */
9893 do_iwmmxt_tandorc (void)
9895 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
9899 do_iwmmxt_textrc (void)
9901 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9902 inst
.instruction
|= inst
.operands
[1].imm
;
9906 do_iwmmxt_textrm (void)
9908 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9909 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9910 inst
.instruction
|= inst
.operands
[2].imm
;
9914 do_iwmmxt_tinsr (void)
9916 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9917 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9918 inst
.instruction
|= inst
.operands
[2].imm
;
9922 do_iwmmxt_tmia (void)
9924 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
9925 inst
.instruction
|= inst
.operands
[1].reg
;
9926 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
9930 do_iwmmxt_waligni (void)
9932 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9933 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9934 inst
.instruction
|= inst
.operands
[2].reg
;
9935 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
9939 do_iwmmxt_wmerge (void)
9941 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9942 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9943 inst
.instruction
|= inst
.operands
[2].reg
;
9944 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
9948 do_iwmmxt_wmov (void)
9950 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
9951 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9952 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9953 inst
.instruction
|= inst
.operands
[1].reg
;
9957 do_iwmmxt_wldstbh (void)
9960 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9962 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
9964 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
9965 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
9969 do_iwmmxt_wldstw (void)
9971 /* RIWR_RIWC clears .isreg for a control register. */
9972 if (!inst
.operands
[0].isreg
)
9974 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
9975 inst
.instruction
|= 0xf0000000;
9978 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9979 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
9983 do_iwmmxt_wldstd (void)
9985 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9986 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
9987 && inst
.operands
[1].immisreg
)
9989 inst
.instruction
&= ~0x1a000ff;
9990 inst
.instruction
|= (0xfU
<< 28);
9991 if (inst
.operands
[1].preind
)
9992 inst
.instruction
|= PRE_INDEX
;
9993 if (!inst
.operands
[1].negative
)
9994 inst
.instruction
|= INDEX_UP
;
9995 if (inst
.operands
[1].writeback
)
9996 inst
.instruction
|= WRITE_BACK
;
9997 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9998 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
9999 inst
.instruction
|= inst
.operands
[1].imm
;
10002 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
10006 do_iwmmxt_wshufh (void)
10008 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10009 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10010 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
10011 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
10015 do_iwmmxt_wzero (void)
10017 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10018 inst
.instruction
|= inst
.operands
[0].reg
;
10019 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10020 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10024 do_iwmmxt_wrwrwr_or_imm5 (void)
10026 if (inst
.operands
[2].isreg
)
10029 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
10030 _("immediate operand requires iWMMXt2"));
10032 if (inst
.operands
[2].imm
== 0)
10034 switch ((inst
.instruction
>> 20) & 0xf)
10040 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10041 inst
.operands
[2].imm
= 16;
10042 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
10048 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10049 inst
.operands
[2].imm
= 32;
10050 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
10057 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10059 wrn
= (inst
.instruction
>> 16) & 0xf;
10060 inst
.instruction
&= 0xff0fff0f;
10061 inst
.instruction
|= wrn
;
10062 /* Bail out here; the instruction is now assembled. */
10067 /* Map 32 -> 0, etc. */
10068 inst
.operands
[2].imm
&= 0x1f;
10069 inst
.instruction
|= (0xfU
<< 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
10073 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10074 operations first, then control, shift, and load/store. */
10076 /* Insns like "foo X,Y,Z". */
10079 do_mav_triple (void)
10081 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10082 inst
.instruction
|= inst
.operands
[1].reg
;
10083 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10086 /* Insns like "foo W,X,Y,Z".
10087 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10092 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10093 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10094 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10095 inst
.instruction
|= inst
.operands
[3].reg
;
10098 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10100 do_mav_dspsc (void)
10102 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10105 /* Maverick shift immediate instructions.
10106 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10107 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10110 do_mav_shift (void)
10112 int imm
= inst
.operands
[2].imm
;
10114 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10115 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10117 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10118 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10119 Bit 4 should be 0. */
10120 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
10122 inst
.instruction
|= imm
;
10125 /* XScale instructions. Also sorted arithmetic before move. */
10127 /* Xscale multiply-accumulate (argument parse)
10130 MIAxycc acc0,Rm,Rs. */
10135 inst
.instruction
|= inst
.operands
[1].reg
;
10136 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10139 /* Xscale move-accumulator-register (argument parse)
10141 MARcc acc0,RdLo,RdHi. */
10146 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10147 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10150 /* Xscale move-register-accumulator (argument parse)
10152 MRAcc RdLo,RdHi,acc0. */
10157 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
10158 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10159 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10162 /* Encoding functions relevant only to Thumb. */
10164 /* inst.operands[i] is a shifted-register operand; encode
10165 it into inst.instruction in the format used by Thumb32. */
10168 encode_thumb32_shifted_operand (int i
)
10170 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10171 unsigned int shift
= inst
.operands
[i
].shift_kind
;
10173 constraint (inst
.operands
[i
].immisreg
,
10174 _("shift by register not allowed in thumb mode"));
10175 inst
.instruction
|= inst
.operands
[i
].reg
;
10176 if (shift
== SHIFT_RRX
)
10177 inst
.instruction
|= SHIFT_ROR
<< 4;
10180 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10181 _("expression too complex"));
10183 constraint (value
> 32
10184 || (value
== 32 && (shift
== SHIFT_LSL
10185 || shift
== SHIFT_ROR
)),
10186 _("shift expression is too large"));
10190 else if (value
== 32)
10193 inst
.instruction
|= shift
<< 4;
10194 inst
.instruction
|= (value
& 0x1c) << 10;
10195 inst
.instruction
|= (value
& 0x03) << 6;
10200 /* inst.operands[i] was set up by parse_address. Encode it into a
10201 Thumb32 format load or store instruction. Reject forms that cannot
10202 be used with such instructions. If is_t is true, reject forms that
10203 cannot be used with a T instruction; if is_d is true, reject forms
10204 that cannot be used with a D instruction. If it is a store insn,
10205 reject PC in Rn. */
10208 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
10210 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
10212 constraint (!inst
.operands
[i
].isreg
,
10213 _("Instruction does not support =N addresses"));
10215 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
10216 if (inst
.operands
[i
].immisreg
)
10218 constraint (is_pc
, BAD_PC_ADDRESSING
);
10219 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
10220 constraint (inst
.operands
[i
].negative
,
10221 _("Thumb does not support negative register indexing"));
10222 constraint (inst
.operands
[i
].postind
,
10223 _("Thumb does not support register post-indexing"));
10224 constraint (inst
.operands
[i
].writeback
,
10225 _("Thumb does not support register indexing with writeback"));
10226 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
10227 _("Thumb supports only LSL in shifted register indexing"));
10229 inst
.instruction
|= inst
.operands
[i
].imm
;
10230 if (inst
.operands
[i
].shifted
)
10232 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10233 _("expression too complex"));
10234 constraint (inst
.reloc
.exp
.X_add_number
< 0
10235 || inst
.reloc
.exp
.X_add_number
> 3,
10236 _("shift out of range"));
10237 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
10239 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10241 else if (inst
.operands
[i
].preind
)
10243 constraint (is_pc
&& inst
.operands
[i
].writeback
, BAD_PC_WRITEBACK
);
10244 constraint (is_t
&& inst
.operands
[i
].writeback
,
10245 _("cannot use writeback with this instruction"));
10246 constraint (is_pc
&& ((inst
.instruction
& THUMB2_LOAD_BIT
) == 0),
10247 BAD_PC_ADDRESSING
);
10251 inst
.instruction
|= 0x01000000;
10252 if (inst
.operands
[i
].writeback
)
10253 inst
.instruction
|= 0x00200000;
10257 inst
.instruction
|= 0x00000c00;
10258 if (inst
.operands
[i
].writeback
)
10259 inst
.instruction
|= 0x00000100;
10261 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10263 else if (inst
.operands
[i
].postind
)
10265 gas_assert (inst
.operands
[i
].writeback
);
10266 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
10267 constraint (is_t
, _("cannot use post-indexing with this instruction"));
10270 inst
.instruction
|= 0x00200000;
10272 inst
.instruction
|= 0x00000900;
10273 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10275 else /* unindexed - only for coprocessor */
10276 inst
.error
= _("instruction does not accept unindexed addressing");
10279 /* Table of Thumb instructions which exist in both 16- and 32-bit
10280 encodings (the latter only in post-V6T2 cores). The index is the
10281 value used in the insns table below. When there is more than one
10282 possible 16-bit encoding for the instruction, this table always
10284 Also contains several pseudo-instructions used during relaxation. */
10285 #define T16_32_TAB \
10286 X(_adc, 4140, eb400000), \
10287 X(_adcs, 4140, eb500000), \
10288 X(_add, 1c00, eb000000), \
10289 X(_adds, 1c00, eb100000), \
10290 X(_addi, 0000, f1000000), \
10291 X(_addis, 0000, f1100000), \
10292 X(_add_pc,000f, f20f0000), \
10293 X(_add_sp,000d, f10d0000), \
10294 X(_adr, 000f, f20f0000), \
10295 X(_and, 4000, ea000000), \
10296 X(_ands, 4000, ea100000), \
10297 X(_asr, 1000, fa40f000), \
10298 X(_asrs, 1000, fa50f000), \
10299 X(_b, e000, f000b000), \
10300 X(_bcond, d000, f0008000), \
10301 X(_bic, 4380, ea200000), \
10302 X(_bics, 4380, ea300000), \
10303 X(_cmn, 42c0, eb100f00), \
10304 X(_cmp, 2800, ebb00f00), \
10305 X(_cpsie, b660, f3af8400), \
10306 X(_cpsid, b670, f3af8600), \
10307 X(_cpy, 4600, ea4f0000), \
10308 X(_dec_sp,80dd, f1ad0d00), \
10309 X(_eor, 4040, ea800000), \
10310 X(_eors, 4040, ea900000), \
10311 X(_inc_sp,00dd, f10d0d00), \
10312 X(_ldmia, c800, e8900000), \
10313 X(_ldr, 6800, f8500000), \
10314 X(_ldrb, 7800, f8100000), \
10315 X(_ldrh, 8800, f8300000), \
10316 X(_ldrsb, 5600, f9100000), \
10317 X(_ldrsh, 5e00, f9300000), \
10318 X(_ldr_pc,4800, f85f0000), \
10319 X(_ldr_pc2,4800, f85f0000), \
10320 X(_ldr_sp,9800, f85d0000), \
10321 X(_lsl, 0000, fa00f000), \
10322 X(_lsls, 0000, fa10f000), \
10323 X(_lsr, 0800, fa20f000), \
10324 X(_lsrs, 0800, fa30f000), \
10325 X(_mov, 2000, ea4f0000), \
10326 X(_movs, 2000, ea5f0000), \
10327 X(_mul, 4340, fb00f000), \
10328 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10329 X(_mvn, 43c0, ea6f0000), \
10330 X(_mvns, 43c0, ea7f0000), \
10331 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10332 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10333 X(_orr, 4300, ea400000), \
10334 X(_orrs, 4300, ea500000), \
10335 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10336 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10337 X(_rev, ba00, fa90f080), \
10338 X(_rev16, ba40, fa90f090), \
10339 X(_revsh, bac0, fa90f0b0), \
10340 X(_ror, 41c0, fa60f000), \
10341 X(_rors, 41c0, fa70f000), \
10342 X(_sbc, 4180, eb600000), \
10343 X(_sbcs, 4180, eb700000), \
10344 X(_stmia, c000, e8800000), \
10345 X(_str, 6000, f8400000), \
10346 X(_strb, 7000, f8000000), \
10347 X(_strh, 8000, f8200000), \
10348 X(_str_sp,9000, f84d0000), \
10349 X(_sub, 1e00, eba00000), \
10350 X(_subs, 1e00, ebb00000), \
10351 X(_subi, 8000, f1a00000), \
10352 X(_subis, 8000, f1b00000), \
10353 X(_sxtb, b240, fa4ff080), \
10354 X(_sxth, b200, fa0ff080), \
10355 X(_tst, 4200, ea100f00), \
10356 X(_uxtb, b2c0, fa5ff080), \
10357 X(_uxth, b280, fa1ff080), \
10358 X(_nop, bf00, f3af8000), \
10359 X(_yield, bf10, f3af8001), \
10360 X(_wfe, bf20, f3af8002), \
10361 X(_wfi, bf30, f3af8003), \
10362 X(_sev, bf40, f3af8004), \
10363 X(_sevl, bf50, f3af8005), \
10364 X(_udf, de00, f7f0a000)
10366 /* To catch errors in encoding functions, the codes are all offset by
10367 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10368 as 16-bit instructions. */
10369 #define X(a,b,c) T_MNEM##a
10370 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
10373 #define X(a,b,c) 0x##b
10374 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
10375 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10378 #define X(a,b,c) 0x##c
10379 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
10380 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10381 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10385 /* Thumb instruction encoders, in alphabetical order. */
10387 /* ADDW or SUBW. */
10390 do_t_add_sub_w (void)
10394 Rd
= inst
.operands
[0].reg
;
10395 Rn
= inst
.operands
[1].reg
;
10397 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10398 is the SP-{plus,minus}-immediate form of the instruction. */
10400 constraint (Rd
== REG_PC
, BAD_PC
);
10402 reject_bad_reg (Rd
);
10404 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
10405 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
10408 /* Parse an add or subtract instruction. We get here with inst.instruction
10409 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
10412 do_t_add_sub (void)
10416 Rd
= inst
.operands
[0].reg
;
10417 Rs
= (inst
.operands
[1].present
10418 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10419 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10422 set_it_insn_type_last ();
10424 if (unified_syntax
)
10427 bfd_boolean narrow
;
10430 flags
= (inst
.instruction
== T_MNEM_adds
10431 || inst
.instruction
== T_MNEM_subs
);
10433 narrow
= !in_it_block ();
10435 narrow
= in_it_block ();
10436 if (!inst
.operands
[2].isreg
)
10440 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10442 add
= (inst
.instruction
== T_MNEM_add
10443 || inst
.instruction
== T_MNEM_adds
);
10445 if (inst
.size_req
!= 4)
10447 /* Attempt to use a narrow opcode, with relaxation if
10449 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
10450 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
10451 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
10452 opcode
= T_MNEM_add_sp
;
10453 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
10454 opcode
= T_MNEM_add_pc
;
10455 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
10458 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
10460 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
10464 inst
.instruction
= THUMB_OP16(opcode
);
10465 inst
.instruction
|= (Rd
<< 4) | Rs
;
10466 if (inst
.reloc
.type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10467 || inst
.reloc
.type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
10468 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10469 if (inst
.size_req
!= 2)
10470 inst
.relax
= opcode
;
10473 constraint (inst
.size_req
== 2, BAD_HIREG
);
10475 if (inst
.size_req
== 4
10476 || (inst
.size_req
!= 2 && !opcode
))
10480 constraint (add
, BAD_PC
);
10481 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
10482 _("only SUBS PC, LR, #const allowed"));
10483 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10484 _("expression too complex"));
10485 constraint (inst
.reloc
.exp
.X_add_number
< 0
10486 || inst
.reloc
.exp
.X_add_number
> 0xff,
10487 _("immediate value out of range"));
10488 inst
.instruction
= T2_SUBS_PC_LR
10489 | inst
.reloc
.exp
.X_add_number
;
10490 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10493 else if (Rs
== REG_PC
)
10495 /* Always use addw/subw. */
10496 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
10497 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
10501 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10502 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
10505 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10507 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_IMM
;
10509 inst
.instruction
|= Rd
<< 8;
10510 inst
.instruction
|= Rs
<< 16;
10515 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10516 unsigned int shift
= inst
.operands
[2].shift_kind
;
10518 Rn
= inst
.operands
[2].reg
;
10519 /* See if we can do this with a 16-bit instruction. */
10520 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
10522 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10527 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
10528 || inst
.instruction
== T_MNEM_add
)
10530 : T_OPCODE_SUB_R3
);
10531 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10535 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
10537 /* Thumb-1 cores (except v6-M) require at least one high
10538 register in a narrow non flag setting add. */
10539 if (Rd
> 7 || Rn
> 7
10540 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
10541 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
10548 inst
.instruction
= T_OPCODE_ADD_HI
;
10549 inst
.instruction
|= (Rd
& 8) << 4;
10550 inst
.instruction
|= (Rd
& 7);
10551 inst
.instruction
|= Rn
<< 3;
10557 constraint (Rd
== REG_PC
, BAD_PC
);
10558 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10559 constraint (Rs
== REG_PC
, BAD_PC
);
10560 reject_bad_reg (Rn
);
10562 /* If we get here, it can't be done in 16 bits. */
10563 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
10564 _("shift must be constant"));
10565 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10566 inst
.instruction
|= Rd
<< 8;
10567 inst
.instruction
|= Rs
<< 16;
10568 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& value
> 3,
10569 _("shift value over 3 not allowed in thumb mode"));
10570 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& shift
!= SHIFT_LSL
,
10571 _("only LSL shift allowed in thumb mode"));
10572 encode_thumb32_shifted_operand (2);
10577 constraint (inst
.instruction
== T_MNEM_adds
10578 || inst
.instruction
== T_MNEM_subs
,
10581 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
10583 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
10584 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
10587 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10588 ? 0x0000 : 0x8000);
10589 inst
.instruction
|= (Rd
<< 4) | Rs
;
10590 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10594 Rn
= inst
.operands
[2].reg
;
10595 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
10597 /* We now have Rd, Rs, and Rn set to registers. */
10598 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10600 /* Can't do this for SUB. */
10601 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
10602 inst
.instruction
= T_OPCODE_ADD_HI
;
10603 inst
.instruction
|= (Rd
& 8) << 4;
10604 inst
.instruction
|= (Rd
& 7);
10606 inst
.instruction
|= Rn
<< 3;
10608 inst
.instruction
|= Rs
<< 3;
10610 constraint (1, _("dest must overlap one source register"));
10614 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10615 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
10616 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10626 Rd
= inst
.operands
[0].reg
;
10627 reject_bad_reg (Rd
);
10629 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
10631 /* Defer to section relaxation. */
10632 inst
.relax
= inst
.instruction
;
10633 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10634 inst
.instruction
|= Rd
<< 4;
10636 else if (unified_syntax
&& inst
.size_req
!= 2)
10638 /* Generate a 32-bit opcode. */
10639 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10640 inst
.instruction
|= Rd
<< 8;
10641 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
10642 inst
.reloc
.pc_rel
= 1;
10646 /* Generate a 16-bit opcode. */
10647 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10648 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10649 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
10650 inst
.reloc
.pc_rel
= 1;
10652 inst
.instruction
|= Rd
<< 4;
10656 /* Arithmetic instructions for which there is just one 16-bit
10657 instruction encoding, and it allows only two low registers.
10658 For maximal compatibility with ARM syntax, we allow three register
10659 operands even when Thumb-32 instructions are not available, as long
10660 as the first two are identical. For instance, both "sbc r0,r1" and
10661 "sbc r0,r0,r1" are allowed. */
10667 Rd
= inst
.operands
[0].reg
;
10668 Rs
= (inst
.operands
[1].present
10669 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10670 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10671 Rn
= inst
.operands
[2].reg
;
10673 reject_bad_reg (Rd
);
10674 reject_bad_reg (Rs
);
10675 if (inst
.operands
[2].isreg
)
10676 reject_bad_reg (Rn
);
10678 if (unified_syntax
)
10680 if (!inst
.operands
[2].isreg
)
10682 /* For an immediate, we always generate a 32-bit opcode;
10683 section relaxation will shrink it later if possible. */
10684 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10685 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10686 inst
.instruction
|= Rd
<< 8;
10687 inst
.instruction
|= Rs
<< 16;
10688 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10692 bfd_boolean narrow
;
10694 /* See if we can do this with a 16-bit instruction. */
10695 if (THUMB_SETS_FLAGS (inst
.instruction
))
10696 narrow
= !in_it_block ();
10698 narrow
= in_it_block ();
10700 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
10702 if (inst
.operands
[2].shifted
)
10704 if (inst
.size_req
== 4)
10710 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10711 inst
.instruction
|= Rd
;
10712 inst
.instruction
|= Rn
<< 3;
10716 /* If we get here, it can't be done in 16 bits. */
10717 constraint (inst
.operands
[2].shifted
10718 && inst
.operands
[2].immisreg
,
10719 _("shift must be constant"));
10720 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10721 inst
.instruction
|= Rd
<< 8;
10722 inst
.instruction
|= Rs
<< 16;
10723 encode_thumb32_shifted_operand (2);
10728 /* On its face this is a lie - the instruction does set the
10729 flags. However, the only supported mnemonic in this mode
10730 says it doesn't. */
10731 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10733 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
10734 _("unshifted register required"));
10735 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
10736 constraint (Rd
!= Rs
,
10737 _("dest and source1 must be the same register"));
10739 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10740 inst
.instruction
|= Rd
;
10741 inst
.instruction
|= Rn
<< 3;
10745 /* Similarly, but for instructions where the arithmetic operation is
10746 commutative, so we can allow either of them to be different from
10747 the destination operand in a 16-bit instruction. For instance, all
10748 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10755 Rd
= inst
.operands
[0].reg
;
10756 Rs
= (inst
.operands
[1].present
10757 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10758 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10759 Rn
= inst
.operands
[2].reg
;
10761 reject_bad_reg (Rd
);
10762 reject_bad_reg (Rs
);
10763 if (inst
.operands
[2].isreg
)
10764 reject_bad_reg (Rn
);
10766 if (unified_syntax
)
10768 if (!inst
.operands
[2].isreg
)
10770 /* For an immediate, we always generate a 32-bit opcode;
10771 section relaxation will shrink it later if possible. */
10772 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10773 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10774 inst
.instruction
|= Rd
<< 8;
10775 inst
.instruction
|= Rs
<< 16;
10776 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10780 bfd_boolean narrow
;
10782 /* See if we can do this with a 16-bit instruction. */
10783 if (THUMB_SETS_FLAGS (inst
.instruction
))
10784 narrow
= !in_it_block ();
10786 narrow
= in_it_block ();
10788 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
10790 if (inst
.operands
[2].shifted
)
10792 if (inst
.size_req
== 4)
10799 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10800 inst
.instruction
|= Rd
;
10801 inst
.instruction
|= Rn
<< 3;
10806 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10807 inst
.instruction
|= Rd
;
10808 inst
.instruction
|= Rs
<< 3;
10813 /* If we get here, it can't be done in 16 bits. */
10814 constraint (inst
.operands
[2].shifted
10815 && inst
.operands
[2].immisreg
,
10816 _("shift must be constant"));
10817 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10818 inst
.instruction
|= Rd
<< 8;
10819 inst
.instruction
|= Rs
<< 16;
10820 encode_thumb32_shifted_operand (2);
10825 /* On its face this is a lie - the instruction does set the
10826 flags. However, the only supported mnemonic in this mode
10827 says it doesn't. */
10828 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10830 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
10831 _("unshifted register required"));
10832 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
10834 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10835 inst
.instruction
|= Rd
;
10838 inst
.instruction
|= Rn
<< 3;
10840 inst
.instruction
|= Rs
<< 3;
10842 constraint (1, _("dest must overlap one source register"));
10850 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
10851 constraint (msb
> 32, _("bit-field extends past end of register"));
10852 /* The instruction encoding stores the LSB and MSB,
10853 not the LSB and width. */
10854 Rd
= inst
.operands
[0].reg
;
10855 reject_bad_reg (Rd
);
10856 inst
.instruction
|= Rd
<< 8;
10857 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
10858 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
10859 inst
.instruction
|= msb
- 1;
10868 Rd
= inst
.operands
[0].reg
;
10869 reject_bad_reg (Rd
);
10871 /* #0 in second position is alternative syntax for bfc, which is
10872 the same instruction but with REG_PC in the Rm field. */
10873 if (!inst
.operands
[1].isreg
)
10877 Rn
= inst
.operands
[1].reg
;
10878 reject_bad_reg (Rn
);
10881 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
10882 constraint (msb
> 32, _("bit-field extends past end of register"));
10883 /* The instruction encoding stores the LSB and MSB,
10884 not the LSB and width. */
10885 inst
.instruction
|= Rd
<< 8;
10886 inst
.instruction
|= Rn
<< 16;
10887 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
10888 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
10889 inst
.instruction
|= msb
- 1;
10897 Rd
= inst
.operands
[0].reg
;
10898 Rn
= inst
.operands
[1].reg
;
10900 reject_bad_reg (Rd
);
10901 reject_bad_reg (Rn
);
10903 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
10904 _("bit-field extends past end of register"));
10905 inst
.instruction
|= Rd
<< 8;
10906 inst
.instruction
|= Rn
<< 16;
10907 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
10908 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
10909 inst
.instruction
|= inst
.operands
[3].imm
- 1;
10912 /* ARM V5 Thumb BLX (argument parse)
10913 BLX <target_addr> which is BLX(1)
10914 BLX <Rm> which is BLX(2)
10915 Unfortunately, there are two different opcodes for this mnemonic.
10916 So, the insns[].value is not used, and the code here zaps values
10917 into inst.instruction.
10919 ??? How to take advantage of the additional two bits of displacement
10920 available in Thumb32 mode? Need new relocation? */
10925 set_it_insn_type_last ();
10927 if (inst
.operands
[0].isreg
)
10929 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
10930 /* We have a register, so this is BLX(2). */
10931 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
10935 /* No register. This must be BLX(1). */
10936 inst
.instruction
= 0xf000e800;
10937 encode_branch (BFD_RELOC_THUMB_PCREL_BLX
);
10949 set_it_insn_type (IF_INSIDE_IT_LAST_INSN
);
10951 if (in_it_block ())
10953 /* Conditional branches inside IT blocks are encoded as unconditional
10955 cond
= COND_ALWAYS
;
10960 if (cond
!= COND_ALWAYS
)
10961 opcode
= T_MNEM_bcond
;
10963 opcode
= inst
.instruction
;
10966 && (inst
.size_req
== 4
10967 || (inst
.size_req
!= 2
10968 && (inst
.operands
[0].hasreloc
10969 || inst
.reloc
.exp
.X_op
== O_constant
))))
10971 inst
.instruction
= THUMB_OP32(opcode
);
10972 if (cond
== COND_ALWAYS
)
10973 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
10976 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
),
10977 _("selected architecture does not support "
10978 "wide conditional branch instruction"));
10980 gas_assert (cond
!= 0xF);
10981 inst
.instruction
|= cond
<< 22;
10982 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
10987 inst
.instruction
= THUMB_OP16(opcode
);
10988 if (cond
== COND_ALWAYS
)
10989 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
10992 inst
.instruction
|= cond
<< 8;
10993 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
10995 /* Allow section relaxation. */
10996 if (unified_syntax
&& inst
.size_req
!= 2)
10997 inst
.relax
= opcode
;
10999 inst
.reloc
.type
= reloc
;
11000 inst
.reloc
.pc_rel
= 1;
11003 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11004 between the two is the maximum immediate allowed - which is passed in
11007 do_t_bkpt_hlt1 (int range
)
11009 constraint (inst
.cond
!= COND_ALWAYS
,
11010 _("instruction is always unconditional"));
11011 if (inst
.operands
[0].present
)
11013 constraint (inst
.operands
[0].imm
> range
,
11014 _("immediate value out of range"));
11015 inst
.instruction
|= inst
.operands
[0].imm
;
11018 set_it_insn_type (NEUTRAL_IT_INSN
);
11024 do_t_bkpt_hlt1 (63);
11030 do_t_bkpt_hlt1 (255);
11034 do_t_branch23 (void)
11036 set_it_insn_type_last ();
11037 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23
);
11039 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11040 this file. We used to simply ignore the PLT reloc type here --
11041 the branch encoding is now needed to deal with TLSCALL relocs.
11042 So if we see a PLT reloc now, put it back to how it used to be to
11043 keep the preexisting behaviour. */
11044 if (inst
.reloc
.type
== BFD_RELOC_ARM_PLT32
)
11045 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
11047 #if defined(OBJ_COFF)
11048 /* If the destination of the branch is a defined symbol which does not have
11049 the THUMB_FUNC attribute, then we must be calling a function which has
11050 the (interfacearm) attribute. We look for the Thumb entry point to that
11051 function and change the branch to refer to that function instead. */
11052 if ( inst
.reloc
.exp
.X_op
== O_symbol
11053 && inst
.reloc
.exp
.X_add_symbol
!= NULL
11054 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
11055 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
11056 inst
.reloc
.exp
.X_add_symbol
=
11057 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
11064 set_it_insn_type_last ();
11065 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11066 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11067 should cause the alignment to be checked once it is known. This is
11068 because BX PC only works if the instruction is word aligned. */
11076 set_it_insn_type_last ();
11077 Rm
= inst
.operands
[0].reg
;
11078 reject_bad_reg (Rm
);
11079 inst
.instruction
|= Rm
<< 16;
11088 Rd
= inst
.operands
[0].reg
;
11089 Rm
= inst
.operands
[1].reg
;
11091 reject_bad_reg (Rd
);
11092 reject_bad_reg (Rm
);
11094 inst
.instruction
|= Rd
<< 8;
11095 inst
.instruction
|= Rm
<< 16;
11096 inst
.instruction
|= Rm
;
11102 set_it_insn_type (OUTSIDE_IT_INSN
);
11103 inst
.instruction
|= inst
.operands
[0].imm
;
11109 set_it_insn_type (OUTSIDE_IT_INSN
);
11111 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
11112 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
11114 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
11115 inst
.instruction
= 0xf3af8000;
11116 inst
.instruction
|= imod
<< 9;
11117 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
11118 if (inst
.operands
[1].present
)
11119 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
11123 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
11124 && (inst
.operands
[0].imm
& 4),
11125 _("selected processor does not support 'A' form "
11126 "of this instruction"));
11127 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
11128 _("Thumb does not support the 2-argument "
11129 "form of this instruction"));
11130 inst
.instruction
|= inst
.operands
[0].imm
;
11134 /* THUMB CPY instruction (argument parse). */
11139 if (inst
.size_req
== 4)
11141 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
11142 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11143 inst
.instruction
|= inst
.operands
[1].reg
;
11147 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
11148 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
11149 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11156 set_it_insn_type (OUTSIDE_IT_INSN
);
11157 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11158 inst
.instruction
|= inst
.operands
[0].reg
;
11159 inst
.reloc
.pc_rel
= 1;
11160 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
11166 inst
.instruction
|= inst
.operands
[0].imm
;
11172 unsigned Rd
, Rn
, Rm
;
11174 Rd
= inst
.operands
[0].reg
;
11175 Rn
= (inst
.operands
[1].present
11176 ? inst
.operands
[1].reg
: Rd
);
11177 Rm
= inst
.operands
[2].reg
;
11179 reject_bad_reg (Rd
);
11180 reject_bad_reg (Rn
);
11181 reject_bad_reg (Rm
);
11183 inst
.instruction
|= Rd
<< 8;
11184 inst
.instruction
|= Rn
<< 16;
11185 inst
.instruction
|= Rm
;
11191 if (unified_syntax
&& inst
.size_req
== 4)
11192 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11194 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11200 unsigned int cond
= inst
.operands
[0].imm
;
11202 set_it_insn_type (IT_INSN
);
11203 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
11205 now_it
.warn_deprecated
= FALSE
;
11207 /* If the condition is a negative condition, invert the mask. */
11208 if ((cond
& 0x1) == 0x0)
11210 unsigned int mask
= inst
.instruction
& 0x000f;
11212 if ((mask
& 0x7) == 0)
11214 /* No conversion needed. */
11215 now_it
.block_length
= 1;
11217 else if ((mask
& 0x3) == 0)
11220 now_it
.block_length
= 2;
11222 else if ((mask
& 0x1) == 0)
11225 now_it
.block_length
= 3;
11230 now_it
.block_length
= 4;
11233 inst
.instruction
&= 0xfff0;
11234 inst
.instruction
|= mask
;
11237 inst
.instruction
|= cond
<< 4;
11240 /* Helper function used for both push/pop and ldm/stm. */
11242 encode_thumb2_ldmstm (int base
, unsigned mask
, bfd_boolean writeback
)
11246 load
= (inst
.instruction
& (1 << 20)) != 0;
11248 if (mask
& (1 << 13))
11249 inst
.error
= _("SP not allowed in register list");
11251 if ((mask
& (1 << base
)) != 0
11253 inst
.error
= _("having the base register in the register list when "
11254 "using write back is UNPREDICTABLE");
11258 if (mask
& (1 << 15))
11260 if (mask
& (1 << 14))
11261 inst
.error
= _("LR and PC should not both be in register list");
11263 set_it_insn_type_last ();
11268 if (mask
& (1 << 15))
11269 inst
.error
= _("PC not allowed in register list");
11272 if ((mask
& (mask
- 1)) == 0)
11274 /* Single register transfers implemented as str/ldr. */
11277 if (inst
.instruction
& (1 << 23))
11278 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
11280 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
11284 if (inst
.instruction
& (1 << 23))
11285 inst
.instruction
= 0x00800000; /* ia -> [base] */
11287 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
11290 inst
.instruction
|= 0xf8400000;
11292 inst
.instruction
|= 0x00100000;
11294 mask
= ffs (mask
) - 1;
11297 else if (writeback
)
11298 inst
.instruction
|= WRITE_BACK
;
11300 inst
.instruction
|= mask
;
11301 inst
.instruction
|= base
<< 16;
11307 /* This really doesn't seem worth it. */
11308 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
11309 _("expression too complex"));
11310 constraint (inst
.operands
[1].writeback
,
11311 _("Thumb load/store multiple does not support {reglist}^"));
11313 if (unified_syntax
)
11315 bfd_boolean narrow
;
11319 /* See if we can use a 16-bit instruction. */
11320 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
11321 && inst
.size_req
!= 4
11322 && !(inst
.operands
[1].imm
& ~0xff))
11324 mask
= 1 << inst
.operands
[0].reg
;
11326 if (inst
.operands
[0].reg
<= 7)
11328 if (inst
.instruction
== T_MNEM_stmia
11329 ? inst
.operands
[0].writeback
11330 : (inst
.operands
[0].writeback
11331 == !(inst
.operands
[1].imm
& mask
)))
11333 if (inst
.instruction
== T_MNEM_stmia
11334 && (inst
.operands
[1].imm
& mask
)
11335 && (inst
.operands
[1].imm
& (mask
- 1)))
11336 as_warn (_("value stored for r%d is UNKNOWN"),
11337 inst
.operands
[0].reg
);
11339 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11340 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11341 inst
.instruction
|= inst
.operands
[1].imm
;
11344 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11346 /* This means 1 register in reg list one of 3 situations:
11347 1. Instruction is stmia, but without writeback.
11348 2. lmdia without writeback, but with Rn not in
11350 3. ldmia with writeback, but with Rn in reglist.
11351 Case 3 is UNPREDICTABLE behaviour, so we handle
11352 case 1 and 2 which can be converted into a 16-bit
11353 str or ldr. The SP cases are handled below. */
11354 unsigned long opcode
;
11355 /* First, record an error for Case 3. */
11356 if (inst
.operands
[1].imm
& mask
11357 && inst
.operands
[0].writeback
)
11359 _("having the base register in the register list when "
11360 "using write back is UNPREDICTABLE");
11362 opcode
= (inst
.instruction
== T_MNEM_stmia
? T_MNEM_str
11364 inst
.instruction
= THUMB_OP16 (opcode
);
11365 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11366 inst
.instruction
|= (ffs (inst
.operands
[1].imm
)-1);
11370 else if (inst
.operands
[0] .reg
== REG_SP
)
11372 if (inst
.operands
[0].writeback
)
11375 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11376 ? T_MNEM_push
: T_MNEM_pop
);
11377 inst
.instruction
|= inst
.operands
[1].imm
;
11380 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11383 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11384 ? T_MNEM_str_sp
: T_MNEM_ldr_sp
);
11385 inst
.instruction
|= ((ffs (inst
.operands
[1].imm
)-1) << 8);
11393 if (inst
.instruction
< 0xffff)
11394 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11396 encode_thumb2_ldmstm (inst
.operands
[0].reg
, inst
.operands
[1].imm
,
11397 inst
.operands
[0].writeback
);
11402 constraint (inst
.operands
[0].reg
> 7
11403 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
11404 constraint (inst
.instruction
!= T_MNEM_ldmia
11405 && inst
.instruction
!= T_MNEM_stmia
,
11406 _("Thumb-2 instruction only valid in unified syntax"));
11407 if (inst
.instruction
== T_MNEM_stmia
)
11409 if (!inst
.operands
[0].writeback
)
11410 as_warn (_("this instruction will write back the base register"));
11411 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
11412 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
11413 as_warn (_("value stored for r%d is UNKNOWN"),
11414 inst
.operands
[0].reg
);
11418 if (!inst
.operands
[0].writeback
11419 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11420 as_warn (_("this instruction will write back the base register"));
11421 else if (inst
.operands
[0].writeback
11422 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11423 as_warn (_("this instruction will not write back the base register"));
11426 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11427 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11428 inst
.instruction
|= inst
.operands
[1].imm
;
11435 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
11436 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
11437 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
11438 || inst
.operands
[1].negative
,
11441 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
11443 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11444 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11445 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
11451 if (!inst
.operands
[1].present
)
11453 constraint (inst
.operands
[0].reg
== REG_LR
,
11454 _("r14 not allowed as first register "
11455 "when second register is omitted"));
11456 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11458 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11461 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11462 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11463 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
11469 unsigned long opcode
;
11472 if (inst
.operands
[0].isreg
11473 && !inst
.operands
[0].preind
11474 && inst
.operands
[0].reg
== REG_PC
)
11475 set_it_insn_type_last ();
11477 opcode
= inst
.instruction
;
11478 if (unified_syntax
)
11480 if (!inst
.operands
[1].isreg
)
11482 if (opcode
<= 0xffff)
11483 inst
.instruction
= THUMB_OP32 (opcode
);
11484 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11487 if (inst
.operands
[1].isreg
11488 && !inst
.operands
[1].writeback
11489 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
11490 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
11491 && opcode
<= 0xffff
11492 && inst
.size_req
!= 4)
11494 /* Insn may have a 16-bit form. */
11495 Rn
= inst
.operands
[1].reg
;
11496 if (inst
.operands
[1].immisreg
)
11498 inst
.instruction
= THUMB_OP16 (opcode
);
11500 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
11502 else if (opcode
!= T_MNEM_ldr
&& opcode
!= T_MNEM_str
)
11503 reject_bad_reg (inst
.operands
[1].imm
);
11505 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
11506 && opcode
!= T_MNEM_ldrsb
)
11507 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
11508 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
11515 if (inst
.reloc
.pc_rel
)
11516 opcode
= T_MNEM_ldr_pc2
;
11518 opcode
= T_MNEM_ldr_pc
;
11522 if (opcode
== T_MNEM_ldr
)
11523 opcode
= T_MNEM_ldr_sp
;
11525 opcode
= T_MNEM_str_sp
;
11527 inst
.instruction
= inst
.operands
[0].reg
<< 8;
11531 inst
.instruction
= inst
.operands
[0].reg
;
11532 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11534 inst
.instruction
|= THUMB_OP16 (opcode
);
11535 if (inst
.size_req
== 2)
11536 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11538 inst
.relax
= opcode
;
11542 /* Definitely a 32-bit variant. */
11544 /* Warning for Erratum 752419. */
11545 if (opcode
== T_MNEM_ldr
11546 && inst
.operands
[0].reg
== REG_SP
11547 && inst
.operands
[1].writeback
== 1
11548 && !inst
.operands
[1].immisreg
)
11550 if (no_cpu_selected ()
11551 || (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
)
11552 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
)
11553 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7r
)))
11554 as_warn (_("This instruction may be unpredictable "
11555 "if executed on M-profile cores "
11556 "with interrupts enabled."));
11559 /* Do some validations regarding addressing modes. */
11560 if (inst
.operands
[1].immisreg
)
11561 reject_bad_reg (inst
.operands
[1].imm
);
11563 constraint (inst
.operands
[1].writeback
== 1
11564 && inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11567 inst
.instruction
= THUMB_OP32 (opcode
);
11568 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11569 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
11570 check_ldr_r15_aligned ();
11574 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11576 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
11578 /* Only [Rn,Rm] is acceptable. */
11579 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
11580 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
11581 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
11582 || inst
.operands
[1].negative
,
11583 _("Thumb does not support this addressing mode"));
11584 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11588 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11589 if (!inst
.operands
[1].isreg
)
11590 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11593 constraint (!inst
.operands
[1].preind
11594 || inst
.operands
[1].shifted
11595 || inst
.operands
[1].writeback
,
11596 _("Thumb does not support this addressing mode"));
11597 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
11599 constraint (inst
.instruction
& 0x0600,
11600 _("byte or halfword not valid for base register"));
11601 constraint (inst
.operands
[1].reg
== REG_PC
11602 && !(inst
.instruction
& THUMB_LOAD_BIT
),
11603 _("r15 based store not allowed"));
11604 constraint (inst
.operands
[1].immisreg
,
11605 _("invalid base register for register offset"));
11607 if (inst
.operands
[1].reg
== REG_PC
)
11608 inst
.instruction
= T_OPCODE_LDR_PC
;
11609 else if (inst
.instruction
& THUMB_LOAD_BIT
)
11610 inst
.instruction
= T_OPCODE_LDR_SP
;
11612 inst
.instruction
= T_OPCODE_STR_SP
;
11614 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11615 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11619 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
11620 if (!inst
.operands
[1].immisreg
)
11622 /* Immediate offset. */
11623 inst
.instruction
|= inst
.operands
[0].reg
;
11624 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11625 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11629 /* Register offset. */
11630 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
11631 constraint (inst
.operands
[1].negative
,
11632 _("Thumb does not support this addressing mode"));
11635 switch (inst
.instruction
)
11637 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
11638 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
11639 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
11640 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
11641 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
11642 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
11643 case 0x5600 /* ldrsb */:
11644 case 0x5e00 /* ldrsh */: break;
11648 inst
.instruction
|= inst
.operands
[0].reg
;
11649 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11650 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
11656 if (!inst
.operands
[1].present
)
11658 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11659 constraint (inst
.operands
[0].reg
== REG_LR
,
11660 _("r14 not allowed here"));
11661 constraint (inst
.operands
[0].reg
== REG_R12
,
11662 _("r12 not allowed here"));
11665 if (inst
.operands
[2].writeback
11666 && (inst
.operands
[0].reg
== inst
.operands
[2].reg
11667 || inst
.operands
[1].reg
== inst
.operands
[2].reg
))
11668 as_warn (_("base register written back, and overlaps "
11669 "one of transfer registers"));
11671 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11672 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11673 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
11679 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11680 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
11686 unsigned Rd
, Rn
, Rm
, Ra
;
11688 Rd
= inst
.operands
[0].reg
;
11689 Rn
= inst
.operands
[1].reg
;
11690 Rm
= inst
.operands
[2].reg
;
11691 Ra
= inst
.operands
[3].reg
;
11693 reject_bad_reg (Rd
);
11694 reject_bad_reg (Rn
);
11695 reject_bad_reg (Rm
);
11696 reject_bad_reg (Ra
);
11698 inst
.instruction
|= Rd
<< 8;
11699 inst
.instruction
|= Rn
<< 16;
11700 inst
.instruction
|= Rm
;
11701 inst
.instruction
|= Ra
<< 12;
11707 unsigned RdLo
, RdHi
, Rn
, Rm
;
11709 RdLo
= inst
.operands
[0].reg
;
11710 RdHi
= inst
.operands
[1].reg
;
11711 Rn
= inst
.operands
[2].reg
;
11712 Rm
= inst
.operands
[3].reg
;
11714 reject_bad_reg (RdLo
);
11715 reject_bad_reg (RdHi
);
11716 reject_bad_reg (Rn
);
11717 reject_bad_reg (Rm
);
11719 inst
.instruction
|= RdLo
<< 12;
11720 inst
.instruction
|= RdHi
<< 8;
11721 inst
.instruction
|= Rn
<< 16;
11722 inst
.instruction
|= Rm
;
11726 do_t_mov_cmp (void)
11730 Rn
= inst
.operands
[0].reg
;
11731 Rm
= inst
.operands
[1].reg
;
11734 set_it_insn_type_last ();
11736 if (unified_syntax
)
11738 int r0off
= (inst
.instruction
== T_MNEM_mov
11739 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
11740 unsigned long opcode
;
11741 bfd_boolean narrow
;
11742 bfd_boolean low_regs
;
11744 low_regs
= (Rn
<= 7 && Rm
<= 7);
11745 opcode
= inst
.instruction
;
11746 if (in_it_block ())
11747 narrow
= opcode
!= T_MNEM_movs
;
11749 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
11750 if (inst
.size_req
== 4
11751 || inst
.operands
[1].shifted
)
11754 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11755 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
11756 && !inst
.operands
[1].shifted
11760 inst
.instruction
= T2_SUBS_PC_LR
;
11764 if (opcode
== T_MNEM_cmp
)
11766 constraint (Rn
== REG_PC
, BAD_PC
);
11769 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11771 warn_deprecated_sp (Rm
);
11772 /* R15 was documented as a valid choice for Rm in ARMv6,
11773 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11774 tools reject R15, so we do too. */
11775 constraint (Rm
== REG_PC
, BAD_PC
);
11778 reject_bad_reg (Rm
);
11780 else if (opcode
== T_MNEM_mov
11781 || opcode
== T_MNEM_movs
)
11783 if (inst
.operands
[1].isreg
)
11785 if (opcode
== T_MNEM_movs
)
11787 reject_bad_reg (Rn
);
11788 reject_bad_reg (Rm
);
11792 /* This is mov.n. */
11793 if ((Rn
== REG_SP
|| Rn
== REG_PC
)
11794 && (Rm
== REG_SP
|| Rm
== REG_PC
))
11796 as_tsktsk (_("Use of r%u as a source register is "
11797 "deprecated when r%u is the destination "
11798 "register."), Rm
, Rn
);
11803 /* This is mov.w. */
11804 constraint (Rn
== REG_PC
, BAD_PC
);
11805 constraint (Rm
== REG_PC
, BAD_PC
);
11806 constraint (Rn
== REG_SP
&& Rm
== REG_SP
, BAD_SP
);
11810 reject_bad_reg (Rn
);
11813 if (!inst
.operands
[1].isreg
)
11815 /* Immediate operand. */
11816 if (!in_it_block () && opcode
== T_MNEM_mov
)
11818 if (low_regs
&& narrow
)
11820 inst
.instruction
= THUMB_OP16 (opcode
);
11821 inst
.instruction
|= Rn
<< 8;
11822 if (inst
.size_req
== 2)
11824 if (inst
.reloc
.type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11825 || inst
.reloc
.type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
11826 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
11829 inst
.relax
= opcode
;
11833 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11834 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11835 inst
.instruction
|= Rn
<< r0off
;
11836 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11839 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
11840 && (inst
.instruction
== T_MNEM_mov
11841 || inst
.instruction
== T_MNEM_movs
))
11843 /* Register shifts are encoded as separate shift instructions. */
11844 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
11846 if (in_it_block ())
11851 if (inst
.size_req
== 4)
11854 if (!low_regs
|| inst
.operands
[1].imm
> 7)
11860 switch (inst
.operands
[1].shift_kind
)
11863 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
11866 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
11869 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
11872 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
11878 inst
.instruction
= opcode
;
11881 inst
.instruction
|= Rn
;
11882 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
11887 inst
.instruction
|= CONDS_BIT
;
11889 inst
.instruction
|= Rn
<< 8;
11890 inst
.instruction
|= Rm
<< 16;
11891 inst
.instruction
|= inst
.operands
[1].imm
;
11896 /* Some mov with immediate shift have narrow variants.
11897 Register shifts are handled above. */
11898 if (low_regs
&& inst
.operands
[1].shifted
11899 && (inst
.instruction
== T_MNEM_mov
11900 || inst
.instruction
== T_MNEM_movs
))
11902 if (in_it_block ())
11903 narrow
= (inst
.instruction
== T_MNEM_mov
);
11905 narrow
= (inst
.instruction
== T_MNEM_movs
);
11910 switch (inst
.operands
[1].shift_kind
)
11912 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
11913 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
11914 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
11915 default: narrow
= FALSE
; break;
11921 inst
.instruction
|= Rn
;
11922 inst
.instruction
|= Rm
<< 3;
11923 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
11927 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11928 inst
.instruction
|= Rn
<< r0off
;
11929 encode_thumb32_shifted_operand (1);
11933 switch (inst
.instruction
)
11936 /* In v4t or v5t a move of two lowregs produces unpredictable
11937 results. Don't allow this. */
11940 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
),
11941 "MOV Rd, Rs with two low registers is not "
11942 "permitted on this architecture");
11943 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
11947 inst
.instruction
= T_OPCODE_MOV_HR
;
11948 inst
.instruction
|= (Rn
& 0x8) << 4;
11949 inst
.instruction
|= (Rn
& 0x7);
11950 inst
.instruction
|= Rm
<< 3;
11954 /* We know we have low registers at this point.
11955 Generate LSLS Rd, Rs, #0. */
11956 inst
.instruction
= T_OPCODE_LSL_I
;
11957 inst
.instruction
|= Rn
;
11958 inst
.instruction
|= Rm
<< 3;
11964 inst
.instruction
= T_OPCODE_CMP_LR
;
11965 inst
.instruction
|= Rn
;
11966 inst
.instruction
|= Rm
<< 3;
11970 inst
.instruction
= T_OPCODE_CMP_HR
;
11971 inst
.instruction
|= (Rn
& 0x8) << 4;
11972 inst
.instruction
|= (Rn
& 0x7);
11973 inst
.instruction
|= Rm
<< 3;
11980 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11982 /* PR 10443: Do not silently ignore shifted operands. */
11983 constraint (inst
.operands
[1].shifted
,
11984 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
11986 if (inst
.operands
[1].isreg
)
11988 if (Rn
< 8 && Rm
< 8)
11990 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
11991 since a MOV instruction produces unpredictable results. */
11992 if (inst
.instruction
== T_OPCODE_MOV_I8
)
11993 inst
.instruction
= T_OPCODE_ADD_I3
;
11995 inst
.instruction
= T_OPCODE_CMP_LR
;
11997 inst
.instruction
|= Rn
;
11998 inst
.instruction
|= Rm
<< 3;
12002 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12003 inst
.instruction
= T_OPCODE_MOV_HR
;
12005 inst
.instruction
= T_OPCODE_CMP_HR
;
12011 constraint (Rn
> 7,
12012 _("only lo regs allowed with immediate"));
12013 inst
.instruction
|= Rn
<< 8;
12014 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
12025 top
= (inst
.instruction
& 0x00800000) != 0;
12026 if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
)
12028 constraint (top
, _(":lower16: not allowed this instruction"));
12029 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVW
;
12031 else if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
)
12033 constraint (!top
, _(":upper16: not allowed this instruction"));
12034 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVT
;
12037 Rd
= inst
.operands
[0].reg
;
12038 reject_bad_reg (Rd
);
12040 inst
.instruction
|= Rd
<< 8;
12041 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
12043 imm
= inst
.reloc
.exp
.X_add_number
;
12044 inst
.instruction
|= (imm
& 0xf000) << 4;
12045 inst
.instruction
|= (imm
& 0x0800) << 15;
12046 inst
.instruction
|= (imm
& 0x0700) << 4;
12047 inst
.instruction
|= (imm
& 0x00ff);
12052 do_t_mvn_tst (void)
12056 Rn
= inst
.operands
[0].reg
;
12057 Rm
= inst
.operands
[1].reg
;
12059 if (inst
.instruction
== T_MNEM_cmp
12060 || inst
.instruction
== T_MNEM_cmn
)
12061 constraint (Rn
== REG_PC
, BAD_PC
);
12063 reject_bad_reg (Rn
);
12064 reject_bad_reg (Rm
);
12066 if (unified_syntax
)
12068 int r0off
= (inst
.instruction
== T_MNEM_mvn
12069 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
12070 bfd_boolean narrow
;
12072 if (inst
.size_req
== 4
12073 || inst
.instruction
> 0xffff
12074 || inst
.operands
[1].shifted
12075 || Rn
> 7 || Rm
> 7)
12077 else if (inst
.instruction
== T_MNEM_cmn
12078 || inst
.instruction
== T_MNEM_tst
)
12080 else if (THUMB_SETS_FLAGS (inst
.instruction
))
12081 narrow
= !in_it_block ();
12083 narrow
= in_it_block ();
12085 if (!inst
.operands
[1].isreg
)
12087 /* For an immediate, we always generate a 32-bit opcode;
12088 section relaxation will shrink it later if possible. */
12089 if (inst
.instruction
< 0xffff)
12090 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12091 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12092 inst
.instruction
|= Rn
<< r0off
;
12093 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12097 /* See if we can do this with a 16-bit instruction. */
12100 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12101 inst
.instruction
|= Rn
;
12102 inst
.instruction
|= Rm
<< 3;
12106 constraint (inst
.operands
[1].shifted
12107 && inst
.operands
[1].immisreg
,
12108 _("shift must be constant"));
12109 if (inst
.instruction
< 0xffff)
12110 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12111 inst
.instruction
|= Rn
<< r0off
;
12112 encode_thumb32_shifted_operand (1);
12118 constraint (inst
.instruction
> 0xffff
12119 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
12120 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
12121 _("unshifted register required"));
12122 constraint (Rn
> 7 || Rm
> 7,
12125 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12126 inst
.instruction
|= Rn
;
12127 inst
.instruction
|= Rm
<< 3;
12136 if (do_vfp_nsyn_mrs () == SUCCESS
)
12139 Rd
= inst
.operands
[0].reg
;
12140 reject_bad_reg (Rd
);
12141 inst
.instruction
|= Rd
<< 8;
12143 if (inst
.operands
[1].isreg
)
12145 unsigned br
= inst
.operands
[1].reg
;
12146 if (((br
& 0x200) == 0) && ((br
& 0xf000) != 0xf000))
12147 as_bad (_("bad register for mrs"));
12149 inst
.instruction
|= br
& (0xf << 16);
12150 inst
.instruction
|= (br
& 0x300) >> 4;
12151 inst
.instruction
|= (br
& SPSR_BIT
) >> 2;
12155 int flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12157 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12159 /* PR gas/12698: The constraint is only applied for m_profile.
12160 If the user has specified -march=all, we want to ignore it as
12161 we are building for any CPU type, including non-m variants. */
12162 bfd_boolean m_profile
=
12163 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12164 constraint ((flags
!= 0) && m_profile
, _("selected processor does "
12165 "not support requested special purpose register"));
12168 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12170 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
12171 _("'APSR', 'CPSR' or 'SPSR' expected"));
12173 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12174 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
12175 inst
.instruction
|= 0xf0000;
12185 if (do_vfp_nsyn_msr () == SUCCESS
)
12188 constraint (!inst
.operands
[1].isreg
,
12189 _("Thumb encoding does not support an immediate here"));
12191 if (inst
.operands
[0].isreg
)
12192 flags
= (int)(inst
.operands
[0].reg
);
12194 flags
= inst
.operands
[0].imm
;
12196 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12198 int bits
= inst
.operands
[0].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12200 /* PR gas/12698: The constraint is only applied for m_profile.
12201 If the user has specified -march=all, we want to ignore it as
12202 we are building for any CPU type, including non-m variants. */
12203 bfd_boolean m_profile
=
12204 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12205 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12206 && (bits
& ~(PSR_s
| PSR_f
)) != 0)
12207 || (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12208 && bits
!= PSR_f
)) && m_profile
,
12209 _("selected processor does not support requested special "
12210 "purpose register"));
12213 constraint ((flags
& 0xff) != 0, _("selected processor does not support "
12214 "requested special purpose register"));
12216 Rn
= inst
.operands
[1].reg
;
12217 reject_bad_reg (Rn
);
12219 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12220 inst
.instruction
|= (flags
& 0xf0000) >> 8;
12221 inst
.instruction
|= (flags
& 0x300) >> 4;
12222 inst
.instruction
|= (flags
& 0xff);
12223 inst
.instruction
|= Rn
<< 16;
12229 bfd_boolean narrow
;
12230 unsigned Rd
, Rn
, Rm
;
12232 if (!inst
.operands
[2].present
)
12233 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
12235 Rd
= inst
.operands
[0].reg
;
12236 Rn
= inst
.operands
[1].reg
;
12237 Rm
= inst
.operands
[2].reg
;
12239 if (unified_syntax
)
12241 if (inst
.size_req
== 4
12247 else if (inst
.instruction
== T_MNEM_muls
)
12248 narrow
= !in_it_block ();
12250 narrow
= in_it_block ();
12254 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
12255 constraint (Rn
> 7 || Rm
> 7,
12262 /* 16-bit MULS/Conditional MUL. */
12263 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12264 inst
.instruction
|= Rd
;
12267 inst
.instruction
|= Rm
<< 3;
12269 inst
.instruction
|= Rn
<< 3;
12271 constraint (1, _("dest must overlap one source register"));
12275 constraint (inst
.instruction
!= T_MNEM_mul
,
12276 _("Thumb-2 MUL must not set flags"));
12278 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12279 inst
.instruction
|= Rd
<< 8;
12280 inst
.instruction
|= Rn
<< 16;
12281 inst
.instruction
|= Rm
<< 0;
12283 reject_bad_reg (Rd
);
12284 reject_bad_reg (Rn
);
12285 reject_bad_reg (Rm
);
12292 unsigned RdLo
, RdHi
, Rn
, Rm
;
12294 RdLo
= inst
.operands
[0].reg
;
12295 RdHi
= inst
.operands
[1].reg
;
12296 Rn
= inst
.operands
[2].reg
;
12297 Rm
= inst
.operands
[3].reg
;
12299 reject_bad_reg (RdLo
);
12300 reject_bad_reg (RdHi
);
12301 reject_bad_reg (Rn
);
12302 reject_bad_reg (Rm
);
12304 inst
.instruction
|= RdLo
<< 12;
12305 inst
.instruction
|= RdHi
<< 8;
12306 inst
.instruction
|= Rn
<< 16;
12307 inst
.instruction
|= Rm
;
12310 as_tsktsk (_("rdhi and rdlo must be different"));
12316 set_it_insn_type (NEUTRAL_IT_INSN
);
12318 if (unified_syntax
)
12320 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
12322 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12323 inst
.instruction
|= inst
.operands
[0].imm
;
12327 /* PR9722: Check for Thumb2 availability before
12328 generating a thumb2 nop instruction. */
12329 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
12331 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12332 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
12335 inst
.instruction
= 0x46c0;
12340 constraint (inst
.operands
[0].present
,
12341 _("Thumb does not support NOP with hints"));
12342 inst
.instruction
= 0x46c0;
12349 if (unified_syntax
)
12351 bfd_boolean narrow
;
12353 if (THUMB_SETS_FLAGS (inst
.instruction
))
12354 narrow
= !in_it_block ();
12356 narrow
= in_it_block ();
12357 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12359 if (inst
.size_req
== 4)
12364 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12365 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12366 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12370 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12371 inst
.instruction
|= inst
.operands
[0].reg
;
12372 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12377 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
12379 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12381 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12382 inst
.instruction
|= inst
.operands
[0].reg
;
12383 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12392 Rd
= inst
.operands
[0].reg
;
12393 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
12395 reject_bad_reg (Rd
);
12396 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12397 reject_bad_reg (Rn
);
12399 inst
.instruction
|= Rd
<< 8;
12400 inst
.instruction
|= Rn
<< 16;
12402 if (!inst
.operands
[2].isreg
)
12404 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12405 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12411 Rm
= inst
.operands
[2].reg
;
12412 reject_bad_reg (Rm
);
12414 constraint (inst
.operands
[2].shifted
12415 && inst
.operands
[2].immisreg
,
12416 _("shift must be constant"));
12417 encode_thumb32_shifted_operand (2);
12424 unsigned Rd
, Rn
, Rm
;
12426 Rd
= inst
.operands
[0].reg
;
12427 Rn
= inst
.operands
[1].reg
;
12428 Rm
= inst
.operands
[2].reg
;
12430 reject_bad_reg (Rd
);
12431 reject_bad_reg (Rn
);
12432 reject_bad_reg (Rm
);
12434 inst
.instruction
|= Rd
<< 8;
12435 inst
.instruction
|= Rn
<< 16;
12436 inst
.instruction
|= Rm
;
12437 if (inst
.operands
[3].present
)
12439 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
12440 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12441 _("expression too complex"));
12442 inst
.instruction
|= (val
& 0x1c) << 10;
12443 inst
.instruction
|= (val
& 0x03) << 6;
12450 if (!inst
.operands
[3].present
)
12454 inst
.instruction
&= ~0x00000020;
12456 /* PR 10168. Swap the Rm and Rn registers. */
12457 Rtmp
= inst
.operands
[1].reg
;
12458 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
12459 inst
.operands
[2].reg
= Rtmp
;
12467 if (inst
.operands
[0].immisreg
)
12468 reject_bad_reg (inst
.operands
[0].imm
);
12470 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
12474 do_t_push_pop (void)
12478 constraint (inst
.operands
[0].writeback
,
12479 _("push/pop do not support {reglist}^"));
12480 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
12481 _("expression too complex"));
12483 mask
= inst
.operands
[0].imm
;
12484 if (inst
.size_req
!= 4 && (mask
& ~0xff) == 0)
12485 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
12486 else if (inst
.size_req
!= 4
12487 && (mask
& ~0xff) == (1 << (inst
.instruction
== T_MNEM_push
12488 ? REG_LR
: REG_PC
)))
12490 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12491 inst
.instruction
|= THUMB_PP_PC_LR
;
12492 inst
.instruction
|= mask
& 0xff;
12494 else if (unified_syntax
)
12496 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12497 encode_thumb2_ldmstm (13, mask
, TRUE
);
12501 inst
.error
= _("invalid register list to push/pop instruction");
12511 Rd
= inst
.operands
[0].reg
;
12512 Rm
= inst
.operands
[1].reg
;
12514 reject_bad_reg (Rd
);
12515 reject_bad_reg (Rm
);
12517 inst
.instruction
|= Rd
<< 8;
12518 inst
.instruction
|= Rm
<< 16;
12519 inst
.instruction
|= Rm
;
12527 Rd
= inst
.operands
[0].reg
;
12528 Rm
= inst
.operands
[1].reg
;
12530 reject_bad_reg (Rd
);
12531 reject_bad_reg (Rm
);
12533 if (Rd
<= 7 && Rm
<= 7
12534 && inst
.size_req
!= 4)
12536 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12537 inst
.instruction
|= Rd
;
12538 inst
.instruction
|= Rm
<< 3;
12540 else if (unified_syntax
)
12542 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12543 inst
.instruction
|= Rd
<< 8;
12544 inst
.instruction
|= Rm
<< 16;
12545 inst
.instruction
|= Rm
;
12548 inst
.error
= BAD_HIREG
;
12556 Rd
= inst
.operands
[0].reg
;
12557 Rm
= inst
.operands
[1].reg
;
12559 reject_bad_reg (Rd
);
12560 reject_bad_reg (Rm
);
12562 inst
.instruction
|= Rd
<< 8;
12563 inst
.instruction
|= Rm
;
12571 Rd
= inst
.operands
[0].reg
;
12572 Rs
= (inst
.operands
[1].present
12573 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
12574 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
12576 reject_bad_reg (Rd
);
12577 reject_bad_reg (Rs
);
12578 if (inst
.operands
[2].isreg
)
12579 reject_bad_reg (inst
.operands
[2].reg
);
12581 inst
.instruction
|= Rd
<< 8;
12582 inst
.instruction
|= Rs
<< 16;
12583 if (!inst
.operands
[2].isreg
)
12585 bfd_boolean narrow
;
12587 if ((inst
.instruction
& 0x00100000) != 0)
12588 narrow
= !in_it_block ();
12590 narrow
= in_it_block ();
12592 if (Rd
> 7 || Rs
> 7)
12595 if (inst
.size_req
== 4 || !unified_syntax
)
12598 if (inst
.reloc
.exp
.X_op
!= O_constant
12599 || inst
.reloc
.exp
.X_add_number
!= 0)
12602 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12603 relaxation, but it doesn't seem worth the hassle. */
12606 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12607 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
12608 inst
.instruction
|= Rs
<< 3;
12609 inst
.instruction
|= Rd
;
12613 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12614 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12618 encode_thumb32_shifted_operand (2);
12624 if (warn_on_deprecated
12625 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
12626 as_tsktsk (_("setend use is deprecated for ARMv8"));
12628 set_it_insn_type (OUTSIDE_IT_INSN
);
12629 if (inst
.operands
[0].imm
)
12630 inst
.instruction
|= 0x8;
12636 if (!inst
.operands
[1].present
)
12637 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
12639 if (unified_syntax
)
12641 bfd_boolean narrow
;
12644 switch (inst
.instruction
)
12647 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
12649 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
12651 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
12653 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
12657 if (THUMB_SETS_FLAGS (inst
.instruction
))
12658 narrow
= !in_it_block ();
12660 narrow
= in_it_block ();
12661 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12663 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
12665 if (inst
.operands
[2].isreg
12666 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
12667 || inst
.operands
[2].reg
> 7))
12669 if (inst
.size_req
== 4)
12672 reject_bad_reg (inst
.operands
[0].reg
);
12673 reject_bad_reg (inst
.operands
[1].reg
);
12677 if (inst
.operands
[2].isreg
)
12679 reject_bad_reg (inst
.operands
[2].reg
);
12680 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12681 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12682 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12683 inst
.instruction
|= inst
.operands
[2].reg
;
12685 /* PR 12854: Error on extraneous shifts. */
12686 constraint (inst
.operands
[2].shifted
,
12687 _("extraneous shift as part of operand to shift insn"));
12691 inst
.operands
[1].shifted
= 1;
12692 inst
.operands
[1].shift_kind
= shift_kind
;
12693 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
12694 ? T_MNEM_movs
: T_MNEM_mov
);
12695 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12696 encode_thumb32_shifted_operand (1);
12697 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12698 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12703 if (inst
.operands
[2].isreg
)
12705 switch (shift_kind
)
12707 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
12708 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
12709 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
12710 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
12714 inst
.instruction
|= inst
.operands
[0].reg
;
12715 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
12717 /* PR 12854: Error on extraneous shifts. */
12718 constraint (inst
.operands
[2].shifted
,
12719 _("extraneous shift as part of operand to shift insn"));
12723 switch (shift_kind
)
12725 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12726 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12727 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12730 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12731 inst
.instruction
|= inst
.operands
[0].reg
;
12732 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12738 constraint (inst
.operands
[0].reg
> 7
12739 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
12740 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12742 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
12744 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
12745 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
12746 _("source1 and dest must be same register"));
12748 switch (inst
.instruction
)
12750 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
12751 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
12752 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
12753 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
12757 inst
.instruction
|= inst
.operands
[0].reg
;
12758 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
12760 /* PR 12854: Error on extraneous shifts. */
12761 constraint (inst
.operands
[2].shifted
,
12762 _("extraneous shift as part of operand to shift insn"));
12766 switch (inst
.instruction
)
12768 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12769 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12770 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12771 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
12774 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12775 inst
.instruction
|= inst
.operands
[0].reg
;
12776 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12784 unsigned Rd
, Rn
, Rm
;
12786 Rd
= inst
.operands
[0].reg
;
12787 Rn
= inst
.operands
[1].reg
;
12788 Rm
= inst
.operands
[2].reg
;
12790 reject_bad_reg (Rd
);
12791 reject_bad_reg (Rn
);
12792 reject_bad_reg (Rm
);
12794 inst
.instruction
|= Rd
<< 8;
12795 inst
.instruction
|= Rn
<< 16;
12796 inst
.instruction
|= Rm
;
12802 unsigned Rd
, Rn
, Rm
;
12804 Rd
= inst
.operands
[0].reg
;
12805 Rm
= inst
.operands
[1].reg
;
12806 Rn
= inst
.operands
[2].reg
;
12808 reject_bad_reg (Rd
);
12809 reject_bad_reg (Rn
);
12810 reject_bad_reg (Rm
);
12812 inst
.instruction
|= Rd
<< 8;
12813 inst
.instruction
|= Rn
<< 16;
12814 inst
.instruction
|= Rm
;
12820 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
12821 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
),
12822 _("SMC is not permitted on this architecture"));
12823 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12824 _("expression too complex"));
12825 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12826 inst
.instruction
|= (value
& 0xf000) >> 12;
12827 inst
.instruction
|= (value
& 0x0ff0);
12828 inst
.instruction
|= (value
& 0x000f) << 16;
12829 /* PR gas/15623: SMC instructions must be last in an IT block. */
12830 set_it_insn_type_last ();
12836 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
12838 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12839 inst
.instruction
|= (value
& 0x0fff);
12840 inst
.instruction
|= (value
& 0xf000) << 4;
12844 do_t_ssat_usat (int bias
)
12848 Rd
= inst
.operands
[0].reg
;
12849 Rn
= inst
.operands
[2].reg
;
12851 reject_bad_reg (Rd
);
12852 reject_bad_reg (Rn
);
12854 inst
.instruction
|= Rd
<< 8;
12855 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
12856 inst
.instruction
|= Rn
<< 16;
12858 if (inst
.operands
[3].present
)
12860 offsetT shift_amount
= inst
.reloc
.exp
.X_add_number
;
12862 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12864 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12865 _("expression too complex"));
12867 if (shift_amount
!= 0)
12869 constraint (shift_amount
> 31,
12870 _("shift expression is too large"));
12872 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
12873 inst
.instruction
|= 0x00200000; /* sh bit. */
12875 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
12876 inst
.instruction
|= (shift_amount
& 0x03) << 6;
12884 do_t_ssat_usat (1);
12892 Rd
= inst
.operands
[0].reg
;
12893 Rn
= inst
.operands
[2].reg
;
12895 reject_bad_reg (Rd
);
12896 reject_bad_reg (Rn
);
12898 inst
.instruction
|= Rd
<< 8;
12899 inst
.instruction
|= inst
.operands
[1].imm
- 1;
12900 inst
.instruction
|= Rn
<< 16;
12906 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
12907 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
12908 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
12909 || inst
.operands
[2].negative
,
12912 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
12914 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12915 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
12916 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
12917 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
12923 if (!inst
.operands
[2].present
)
12924 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
12926 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
12927 || inst
.operands
[0].reg
== inst
.operands
[2].reg
12928 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
12931 inst
.instruction
|= inst
.operands
[0].reg
;
12932 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
12933 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
12934 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
12940 unsigned Rd
, Rn
, Rm
;
12942 Rd
= inst
.operands
[0].reg
;
12943 Rn
= inst
.operands
[1].reg
;
12944 Rm
= inst
.operands
[2].reg
;
12946 reject_bad_reg (Rd
);
12947 reject_bad_reg (Rn
);
12948 reject_bad_reg (Rm
);
12950 inst
.instruction
|= Rd
<< 8;
12951 inst
.instruction
|= Rn
<< 16;
12952 inst
.instruction
|= Rm
;
12953 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
12961 Rd
= inst
.operands
[0].reg
;
12962 Rm
= inst
.operands
[1].reg
;
12964 reject_bad_reg (Rd
);
12965 reject_bad_reg (Rm
);
12967 if (inst
.instruction
<= 0xffff
12968 && inst
.size_req
!= 4
12969 && Rd
<= 7 && Rm
<= 7
12970 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
12972 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12973 inst
.instruction
|= Rd
;
12974 inst
.instruction
|= Rm
<< 3;
12976 else if (unified_syntax
)
12978 if (inst
.instruction
<= 0xffff)
12979 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12980 inst
.instruction
|= Rd
<< 8;
12981 inst
.instruction
|= Rm
;
12982 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
12986 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
12987 _("Thumb encoding does not support rotation"));
12988 constraint (1, BAD_HIREG
);
12995 /* We have to do the following check manually as ARM_EXT_OS only applies
12997 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6m
))
12999 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_os
)
13000 /* This only applies to the v6m howver, not later architectures. */
13001 && ! ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
))
13002 as_bad (_("SVC is not permitted on this architecture"));
13003 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, arm_ext_os
);
13006 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
13015 half
= (inst
.instruction
& 0x10) != 0;
13016 set_it_insn_type_last ();
13017 constraint (inst
.operands
[0].immisreg
,
13018 _("instruction requires register index"));
13020 Rn
= inst
.operands
[0].reg
;
13021 Rm
= inst
.operands
[0].imm
;
13023 constraint (Rn
== REG_SP
, BAD_SP
);
13024 reject_bad_reg (Rm
);
13026 constraint (!half
&& inst
.operands
[0].shifted
,
13027 _("instruction does not allow shifted index"));
13028 inst
.instruction
|= (Rn
<< 16) | Rm
;
13034 if (!inst
.operands
[0].present
)
13035 inst
.operands
[0].imm
= 0;
13037 if ((unsigned int) inst
.operands
[0].imm
> 255 || inst
.size_req
== 4)
13039 constraint (inst
.size_req
== 2,
13040 _("immediate value out of range"));
13041 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13042 inst
.instruction
|= (inst
.operands
[0].imm
& 0xf000u
) << 4;
13043 inst
.instruction
|= (inst
.operands
[0].imm
& 0x0fffu
) << 0;
13047 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13048 inst
.instruction
|= inst
.operands
[0].imm
;
13051 set_it_insn_type (NEUTRAL_IT_INSN
);
13058 do_t_ssat_usat (0);
13066 Rd
= inst
.operands
[0].reg
;
13067 Rn
= inst
.operands
[2].reg
;
13069 reject_bad_reg (Rd
);
13070 reject_bad_reg (Rn
);
13072 inst
.instruction
|= Rd
<< 8;
13073 inst
.instruction
|= inst
.operands
[1].imm
;
13074 inst
.instruction
|= Rn
<< 16;
13077 /* Neon instruction encoder helpers. */
13079 /* Encodings for the different types for various Neon opcodes. */
13081 /* An "invalid" code for the following tables. */
13084 struct neon_tab_entry
13087 unsigned float_or_poly
;
13088 unsigned scalar_or_imm
;
13091 /* Map overloaded Neon opcodes to their respective encodings. */
13092 #define NEON_ENC_TAB \
13093 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13094 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13095 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13096 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13097 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13098 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13099 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13100 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13101 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13102 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13103 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13104 /* Register variants of the following two instructions are encoded as
13105 vcge / vcgt with the operands reversed. */ \
13106 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13107 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13108 X(vfma, N_INV, 0x0000c10, N_INV), \
13109 X(vfms, N_INV, 0x0200c10, N_INV), \
13110 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13111 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13112 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13113 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13114 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13115 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13116 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13117 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13118 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13119 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13120 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13121 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13122 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13123 X(vshl, 0x0000400, N_INV, 0x0800510), \
13124 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13125 X(vand, 0x0000110, N_INV, 0x0800030), \
13126 X(vbic, 0x0100110, N_INV, 0x0800030), \
13127 X(veor, 0x1000110, N_INV, N_INV), \
13128 X(vorn, 0x0300110, N_INV, 0x0800010), \
13129 X(vorr, 0x0200110, N_INV, 0x0800010), \
13130 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13131 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13132 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13133 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13134 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13135 X(vst1, 0x0000000, 0x0800000, N_INV), \
13136 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13137 X(vst2, 0x0000100, 0x0800100, N_INV), \
13138 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13139 X(vst3, 0x0000200, 0x0800200, N_INV), \
13140 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13141 X(vst4, 0x0000300, 0x0800300, N_INV), \
13142 X(vmovn, 0x1b20200, N_INV, N_INV), \
13143 X(vtrn, 0x1b20080, N_INV, N_INV), \
13144 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13145 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13146 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13147 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13148 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13149 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13150 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13151 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13152 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13153 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13154 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13155 X(vseleq, 0xe000a00, N_INV, N_INV), \
13156 X(vselvs, 0xe100a00, N_INV, N_INV), \
13157 X(vselge, 0xe200a00, N_INV, N_INV), \
13158 X(vselgt, 0xe300a00, N_INV, N_INV), \
13159 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13160 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13161 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13162 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13163 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13164 X(aes, 0x3b00300, N_INV, N_INV), \
13165 X(sha3op, 0x2000c00, N_INV, N_INV), \
13166 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13167 X(sha2op, 0x3ba0380, N_INV, N_INV)
13171 #define X(OPC,I,F,S) N_MNEM_##OPC
13176 static const struct neon_tab_entry neon_enc_tab
[] =
13178 #define X(OPC,I,F,S) { (I), (F), (S) }
13183 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13184 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13185 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13186 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13187 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13188 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13189 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13190 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13191 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13192 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13193 #define NEON_ENC_SINGLE_(X) \
13194 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13195 #define NEON_ENC_DOUBLE_(X) \
13196 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13197 #define NEON_ENC_FPV8_(X) \
13198 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13200 #define NEON_ENCODE(type, inst) \
13203 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13204 inst.is_neon = 1; \
13208 #define check_neon_suffixes \
13211 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13213 as_bad (_("invalid neon suffix for non neon instruction")); \
13219 /* Define shapes for instruction operands. The following mnemonic characters
13220 are used in this table:
13222 F - VFP S<n> register
13223 D - Neon D<n> register
13224 Q - Neon Q<n> register
13228 L - D<n> register list
13230 This table is used to generate various data:
13231 - enumerations of the form NS_DDR to be used as arguments to
13233 - a table classifying shapes into single, double, quad, mixed.
13234 - a table used to drive neon_select_shape. */
13236 #define NEON_SHAPE_DEF \
13237 X(3, (D, D, D), DOUBLE), \
13238 X(3, (Q, Q, Q), QUAD), \
13239 X(3, (D, D, I), DOUBLE), \
13240 X(3, (Q, Q, I), QUAD), \
13241 X(3, (D, D, S), DOUBLE), \
13242 X(3, (Q, Q, S), QUAD), \
13243 X(2, (D, D), DOUBLE), \
13244 X(2, (Q, Q), QUAD), \
13245 X(2, (D, S), DOUBLE), \
13246 X(2, (Q, S), QUAD), \
13247 X(2, (D, R), DOUBLE), \
13248 X(2, (Q, R), QUAD), \
13249 X(2, (D, I), DOUBLE), \
13250 X(2, (Q, I), QUAD), \
13251 X(3, (D, L, D), DOUBLE), \
13252 X(2, (D, Q), MIXED), \
13253 X(2, (Q, D), MIXED), \
13254 X(3, (D, Q, I), MIXED), \
13255 X(3, (Q, D, I), MIXED), \
13256 X(3, (Q, D, D), MIXED), \
13257 X(3, (D, Q, Q), MIXED), \
13258 X(3, (Q, Q, D), MIXED), \
13259 X(3, (Q, D, S), MIXED), \
13260 X(3, (D, Q, S), MIXED), \
13261 X(4, (D, D, D, I), DOUBLE), \
13262 X(4, (Q, Q, Q, I), QUAD), \
13263 X(2, (F, F), SINGLE), \
13264 X(3, (F, F, F), SINGLE), \
13265 X(2, (F, I), SINGLE), \
13266 X(2, (F, D), MIXED), \
13267 X(2, (D, F), MIXED), \
13268 X(3, (F, F, I), MIXED), \
13269 X(4, (R, R, F, F), SINGLE), \
13270 X(4, (F, F, R, R), SINGLE), \
13271 X(3, (D, R, R), DOUBLE), \
13272 X(3, (R, R, D), DOUBLE), \
13273 X(2, (S, R), SINGLE), \
13274 X(2, (R, S), SINGLE), \
13275 X(2, (F, R), SINGLE), \
13276 X(2, (R, F), SINGLE), \
13277 /* Half float shape supported so far. */\
13278 X (2, (H, D), MIXED), \
13279 X (2, (D, H), MIXED), \
13280 X (2, (H, F), MIXED), \
13281 X (2, (F, H), MIXED), \
13282 X (2, (H, H), HALF), \
13283 X (2, (H, R), HALF), \
13284 X (2, (R, H), HALF), \
13285 X (2, (H, I), HALF), \
13286 X (3, (H, H, H), HALF), \
13287 X (3, (H, F, I), MIXED), \
13288 X (3, (F, H, I), MIXED)
13290 #define S2(A,B) NS_##A##B
13291 #define S3(A,B,C) NS_##A##B##C
13292 #define S4(A,B,C,D) NS_##A##B##C##D
13294 #define X(N, L, C) S##N L
13307 enum neon_shape_class
13316 #define X(N, L, C) SC_##C
13318 static enum neon_shape_class neon_shape_class
[] =
13337 /* Register widths of above. */
13338 static unsigned neon_shape_el_size
[] =
13350 struct neon_shape_info
13353 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
13356 #define S2(A,B) { SE_##A, SE_##B }
13357 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13358 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13360 #define X(N, L, C) { N, S##N L }
13362 static struct neon_shape_info neon_shape_tab
[] =
13372 /* Bit masks used in type checking given instructions.
13373 'N_EQK' means the type must be the same as (or based on in some way) the key
13374 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13375 set, various other bits can be set as well in order to modify the meaning of
13376 the type constraint. */
13378 enum neon_type_mask
13402 N_KEY
= 0x1000000, /* Key element (main type specifier). */
13403 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
13404 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
13405 N_UNT
= 0x8000000, /* Must be explicitly untyped. */
13406 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
13407 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
13408 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13409 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13410 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13411 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
13412 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13414 N_MAX_NONSPECIAL
= N_P64
13417 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13419 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13420 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13421 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13422 #define N_SUF_32 (N_SU_32 | N_F32)
13423 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13424 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
13425 #define N_F_ALL (N_F16 | N_F32 | N_F64)
13427 /* Pass this as the first type argument to neon_check_type to ignore types
13429 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13431 /* Select a "shape" for the current instruction (describing register types or
13432 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13433 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13434 function of operand parsing, so this function doesn't need to be called.
13435 Shapes should be listed in order of decreasing length. */
13437 static enum neon_shape
13438 neon_select_shape (enum neon_shape shape
, ...)
13441 enum neon_shape first_shape
= shape
;
13443 /* Fix missing optional operands. FIXME: we don't know at this point how
13444 many arguments we should have, so this makes the assumption that we have
13445 > 1. This is true of all current Neon opcodes, I think, but may not be
13446 true in the future. */
13447 if (!inst
.operands
[1].present
)
13448 inst
.operands
[1] = inst
.operands
[0];
13450 va_start (ap
, shape
);
13452 for (; shape
!= NS_NULL
; shape
= (enum neon_shape
) va_arg (ap
, int))
13457 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
13459 if (!inst
.operands
[j
].present
)
13465 switch (neon_shape_tab
[shape
].el
[j
])
13467 /* If a .f16, .16, .u16, .s16 type specifier is given over
13468 a VFP single precision register operand, it's essentially
13469 means only half of the register is used.
13471 If the type specifier is given after the mnemonics, the
13472 information is stored in inst.vectype. If the type specifier
13473 is given after register operand, the information is stored
13474 in inst.operands[].vectype.
13476 When there is only one type specifier, and all the register
13477 operands are the same type of hardware register, the type
13478 specifier applies to all register operands.
13480 If no type specifier is given, the shape is inferred from
13481 operand information.
13484 vadd.f16 s0, s1, s2: NS_HHH
13485 vabs.f16 s0, s1: NS_HH
13486 vmov.f16 s0, r1: NS_HR
13487 vmov.f16 r0, s1: NS_RH
13488 vcvt.f16 r0, s1: NS_RH
13489 vcvt.f16.s32 s2, s2, #29: NS_HFI
13490 vcvt.f16.s32 s2, s2: NS_HF
13493 if (!(inst
.operands
[j
].isreg
13494 && inst
.operands
[j
].isvec
13495 && inst
.operands
[j
].issingle
13496 && !inst
.operands
[j
].isquad
13497 && ((inst
.vectype
.elems
== 1
13498 && inst
.vectype
.el
[0].size
== 16)
13499 || (inst
.vectype
.elems
> 1
13500 && inst
.vectype
.el
[j
].size
== 16)
13501 || (inst
.vectype
.elems
== 0
13502 && inst
.operands
[j
].vectype
.type
!= NT_invtype
13503 && inst
.operands
[j
].vectype
.size
== 16))))
13508 if (!(inst
.operands
[j
].isreg
13509 && inst
.operands
[j
].isvec
13510 && inst
.operands
[j
].issingle
13511 && !inst
.operands
[j
].isquad
13512 && ((inst
.vectype
.elems
== 1 && inst
.vectype
.el
[0].size
== 32)
13513 || (inst
.vectype
.elems
> 1 && inst
.vectype
.el
[j
].size
== 32)
13514 || (inst
.vectype
.elems
== 0
13515 && (inst
.operands
[j
].vectype
.size
== 32
13516 || inst
.operands
[j
].vectype
.type
== NT_invtype
)))))
13521 if (!(inst
.operands
[j
].isreg
13522 && inst
.operands
[j
].isvec
13523 && !inst
.operands
[j
].isquad
13524 && !inst
.operands
[j
].issingle
))
13529 if (!(inst
.operands
[j
].isreg
13530 && !inst
.operands
[j
].isvec
))
13535 if (!(inst
.operands
[j
].isreg
13536 && inst
.operands
[j
].isvec
13537 && inst
.operands
[j
].isquad
13538 && !inst
.operands
[j
].issingle
))
13543 if (!(!inst
.operands
[j
].isreg
13544 && !inst
.operands
[j
].isscalar
))
13549 if (!(!inst
.operands
[j
].isreg
13550 && inst
.operands
[j
].isscalar
))
13560 if (matches
&& (j
>= ARM_IT_MAX_OPERANDS
|| !inst
.operands
[j
].present
))
13561 /* We've matched all the entries in the shape table, and we don't
13562 have any left over operands which have not been matched. */
13568 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
13569 first_error (_("invalid instruction shape"));
13574 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13575 means the Q bit should be set). */
13578 neon_quad (enum neon_shape shape
)
13580 return neon_shape_class
[shape
] == SC_QUAD
;
13584 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
13587 /* Allow modification to be made to types which are constrained to be
13588 based on the key element, based on bits set alongside N_EQK. */
13589 if ((typebits
& N_EQK
) != 0)
13591 if ((typebits
& N_HLF
) != 0)
13593 else if ((typebits
& N_DBL
) != 0)
13595 if ((typebits
& N_SGN
) != 0)
13596 *g_type
= NT_signed
;
13597 else if ((typebits
& N_UNS
) != 0)
13598 *g_type
= NT_unsigned
;
13599 else if ((typebits
& N_INT
) != 0)
13600 *g_type
= NT_integer
;
13601 else if ((typebits
& N_FLT
) != 0)
13602 *g_type
= NT_float
;
13603 else if ((typebits
& N_SIZ
) != 0)
13604 *g_type
= NT_untyped
;
13608 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13609 operand type, i.e. the single type specified in a Neon instruction when it
13610 is the only one given. */
13612 static struct neon_type_el
13613 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
13615 struct neon_type_el dest
= *key
;
13617 gas_assert ((thisarg
& N_EQK
) != 0);
13619 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
13624 /* Convert Neon type and size into compact bitmask representation. */
13626 static enum neon_type_mask
13627 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
13634 case 8: return N_8
;
13635 case 16: return N_16
;
13636 case 32: return N_32
;
13637 case 64: return N_64
;
13645 case 8: return N_I8
;
13646 case 16: return N_I16
;
13647 case 32: return N_I32
;
13648 case 64: return N_I64
;
13656 case 16: return N_F16
;
13657 case 32: return N_F32
;
13658 case 64: return N_F64
;
13666 case 8: return N_P8
;
13667 case 16: return N_P16
;
13668 case 64: return N_P64
;
13676 case 8: return N_S8
;
13677 case 16: return N_S16
;
13678 case 32: return N_S32
;
13679 case 64: return N_S64
;
13687 case 8: return N_U8
;
13688 case 16: return N_U16
;
13689 case 32: return N_U32
;
13690 case 64: return N_U64
;
13701 /* Convert compact Neon bitmask type representation to a type and size. Only
13702 handles the case where a single bit is set in the mask. */
13705 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
13706 enum neon_type_mask mask
)
13708 if ((mask
& N_EQK
) != 0)
13711 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
13713 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_F16
| N_P16
)) != 0)
13715 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
13717 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
| N_P64
)) != 0)
13722 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
13724 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
13725 *type
= NT_unsigned
;
13726 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
13727 *type
= NT_integer
;
13728 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
13729 *type
= NT_untyped
;
13730 else if ((mask
& (N_P8
| N_P16
| N_P64
)) != 0)
13732 else if ((mask
& (N_F_ALL
)) != 0)
13740 /* Modify a bitmask of allowed types. This is only needed for type
13744 modify_types_allowed (unsigned allowed
, unsigned mods
)
13747 enum neon_el_type type
;
13753 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
13755 if (el_type_of_type_chk (&type
, &size
,
13756 (enum neon_type_mask
) (allowed
& i
)) == SUCCESS
)
13758 neon_modify_type_size (mods
, &type
, &size
);
13759 destmask
|= type_chk_of_el_type (type
, size
);
13766 /* Check type and return type classification.
13767 The manual states (paraphrase): If one datatype is given, it indicates the
13769 - the second operand, if there is one
13770 - the operand, if there is no second operand
13771 - the result, if there are no operands.
13772 This isn't quite good enough though, so we use a concept of a "key" datatype
13773 which is set on a per-instruction basis, which is the one which matters when
13774 only one data type is written.
13775 Note: this function has side-effects (e.g. filling in missing operands). All
13776 Neon instructions should call it before performing bit encoding. */
13778 static struct neon_type_el
13779 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
13782 unsigned i
, pass
, key_el
= 0;
13783 unsigned types
[NEON_MAX_TYPE_ELS
];
13784 enum neon_el_type k_type
= NT_invtype
;
13785 unsigned k_size
= -1u;
13786 struct neon_type_el badtype
= {NT_invtype
, -1};
13787 unsigned key_allowed
= 0;
13789 /* Optional registers in Neon instructions are always (not) in operand 1.
13790 Fill in the missing operand here, if it was omitted. */
13791 if (els
> 1 && !inst
.operands
[1].present
)
13792 inst
.operands
[1] = inst
.operands
[0];
13794 /* Suck up all the varargs. */
13796 for (i
= 0; i
< els
; i
++)
13798 unsigned thisarg
= va_arg (ap
, unsigned);
13799 if (thisarg
== N_IGNORE_TYPE
)
13804 types
[i
] = thisarg
;
13805 if ((thisarg
& N_KEY
) != 0)
13810 if (inst
.vectype
.elems
> 0)
13811 for (i
= 0; i
< els
; i
++)
13812 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
13814 first_error (_("types specified in both the mnemonic and operands"));
13818 /* Duplicate inst.vectype elements here as necessary.
13819 FIXME: No idea if this is exactly the same as the ARM assembler,
13820 particularly when an insn takes one register and one non-register
13822 if (inst
.vectype
.elems
== 1 && els
> 1)
13825 inst
.vectype
.elems
= els
;
13826 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
13827 for (j
= 0; j
< els
; j
++)
13829 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
13832 else if (inst
.vectype
.elems
== 0 && els
> 0)
13835 /* No types were given after the mnemonic, so look for types specified
13836 after each operand. We allow some flexibility here; as long as the
13837 "key" operand has a type, we can infer the others. */
13838 for (j
= 0; j
< els
; j
++)
13839 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
13840 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
13842 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
13844 for (j
= 0; j
< els
; j
++)
13845 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
13846 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
13851 first_error (_("operand types can't be inferred"));
13855 else if (inst
.vectype
.elems
!= els
)
13857 first_error (_("type specifier has the wrong number of parts"));
13861 for (pass
= 0; pass
< 2; pass
++)
13863 for (i
= 0; i
< els
; i
++)
13865 unsigned thisarg
= types
[i
];
13866 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
13867 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
13868 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
13869 unsigned g_size
= inst
.vectype
.el
[i
].size
;
13871 /* Decay more-specific signed & unsigned types to sign-insensitive
13872 integer types if sign-specific variants are unavailable. */
13873 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
13874 && (types_allowed
& N_SU_ALL
) == 0)
13875 g_type
= NT_integer
;
13877 /* If only untyped args are allowed, decay any more specific types to
13878 them. Some instructions only care about signs for some element
13879 sizes, so handle that properly. */
13880 if (((types_allowed
& N_UNT
) == 0)
13881 && ((g_size
== 8 && (types_allowed
& N_8
) != 0)
13882 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
13883 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
13884 || (g_size
== 64 && (types_allowed
& N_64
) != 0)))
13885 g_type
= NT_untyped
;
13889 if ((thisarg
& N_KEY
) != 0)
13893 key_allowed
= thisarg
& ~N_KEY
;
13898 if ((thisarg
& N_VFP
) != 0)
13900 enum neon_shape_el regshape
;
13901 unsigned regwidth
, match
;
13903 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
13906 first_error (_("invalid instruction shape"));
13909 regshape
= neon_shape_tab
[ns
].el
[i
];
13910 regwidth
= neon_shape_el_size
[regshape
];
13912 /* In VFP mode, operands must match register widths. If we
13913 have a key operand, use its width, else use the width of
13914 the current operand. */
13920 /* FP16 will use a single precision register. */
13921 if (regwidth
== 32 && match
== 16)
13923 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
13927 inst
.error
= _(BAD_FP16
);
13932 if (regwidth
!= match
)
13934 first_error (_("operand size must match register width"));
13939 if ((thisarg
& N_EQK
) == 0)
13941 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
13943 if ((given_type
& types_allowed
) == 0)
13945 first_error (_("bad type in Neon instruction"));
13951 enum neon_el_type mod_k_type
= k_type
;
13952 unsigned mod_k_size
= k_size
;
13953 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
13954 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
13956 first_error (_("inconsistent types in Neon instruction"));
13964 return inst
.vectype
.el
[key_el
];
13967 /* Neon-style VFP instruction forwarding. */
13969 /* Thumb VFP instructions have 0xE in the condition field. */
13972 do_vfp_cond_or_thumb (void)
13977 inst
.instruction
|= 0xe0000000;
13979 inst
.instruction
|= inst
.cond
<< 28;
13982 /* Look up and encode a simple mnemonic, for use as a helper function for the
13983 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
13984 etc. It is assumed that operand parsing has already been done, and that the
13985 operands are in the form expected by the given opcode (this isn't necessarily
13986 the same as the form in which they were parsed, hence some massaging must
13987 take place before this function is called).
13988 Checks current arch version against that in the looked-up opcode. */
13991 do_vfp_nsyn_opcode (const char *opname
)
13993 const struct asm_opcode
*opcode
;
13995 opcode
= (const struct asm_opcode
*) hash_find (arm_ops_hsh
, opname
);
14000 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
14001 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
14008 inst
.instruction
= opcode
->tvalue
;
14009 opcode
->tencode ();
14013 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
14014 opcode
->aencode ();
14019 do_vfp_nsyn_add_sub (enum neon_shape rs
)
14021 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
14023 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14026 do_vfp_nsyn_opcode ("fadds");
14028 do_vfp_nsyn_opcode ("fsubs");
14030 /* ARMv8.2 fp16 instruction. */
14032 do_scalar_fp16_v82_encode ();
14037 do_vfp_nsyn_opcode ("faddd");
14039 do_vfp_nsyn_opcode ("fsubd");
14043 /* Check operand types to see if this is a VFP instruction, and if so call
14047 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
14049 enum neon_shape rs
;
14050 struct neon_type_el et
;
14055 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14056 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14060 rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14061 et
= neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14062 N_F_ALL
| N_KEY
| N_VFP
);
14069 if (et
.type
!= NT_invtype
)
14080 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
14082 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
14084 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14087 do_vfp_nsyn_opcode ("fmacs");
14089 do_vfp_nsyn_opcode ("fnmacs");
14091 /* ARMv8.2 fp16 instruction. */
14093 do_scalar_fp16_v82_encode ();
14098 do_vfp_nsyn_opcode ("fmacd");
14100 do_vfp_nsyn_opcode ("fnmacd");
14105 do_vfp_nsyn_fma_fms (enum neon_shape rs
)
14107 int is_fma
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vfma
;
14109 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14112 do_vfp_nsyn_opcode ("ffmas");
14114 do_vfp_nsyn_opcode ("ffnmas");
14116 /* ARMv8.2 fp16 instruction. */
14118 do_scalar_fp16_v82_encode ();
14123 do_vfp_nsyn_opcode ("ffmad");
14125 do_vfp_nsyn_opcode ("ffnmad");
14130 do_vfp_nsyn_mul (enum neon_shape rs
)
14132 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14134 do_vfp_nsyn_opcode ("fmuls");
14136 /* ARMv8.2 fp16 instruction. */
14138 do_scalar_fp16_v82_encode ();
14141 do_vfp_nsyn_opcode ("fmuld");
14145 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
14147 int is_neg
= (inst
.instruction
& 0x80) != 0;
14148 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_VFP
| N_KEY
);
14150 if (rs
== NS_FF
|| rs
== NS_HH
)
14153 do_vfp_nsyn_opcode ("fnegs");
14155 do_vfp_nsyn_opcode ("fabss");
14157 /* ARMv8.2 fp16 instruction. */
14159 do_scalar_fp16_v82_encode ();
14164 do_vfp_nsyn_opcode ("fnegd");
14166 do_vfp_nsyn_opcode ("fabsd");
14170 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14171 insns belong to Neon, and are handled elsewhere. */
14174 do_vfp_nsyn_ldm_stm (int is_dbmode
)
14176 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
14180 do_vfp_nsyn_opcode ("fldmdbs");
14182 do_vfp_nsyn_opcode ("fldmias");
14187 do_vfp_nsyn_opcode ("fstmdbs");
14189 do_vfp_nsyn_opcode ("fstmias");
14194 do_vfp_nsyn_sqrt (void)
14196 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14197 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14199 if (rs
== NS_FF
|| rs
== NS_HH
)
14201 do_vfp_nsyn_opcode ("fsqrts");
14203 /* ARMv8.2 fp16 instruction. */
14205 do_scalar_fp16_v82_encode ();
14208 do_vfp_nsyn_opcode ("fsqrtd");
14212 do_vfp_nsyn_div (void)
14214 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14215 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14216 N_F_ALL
| N_KEY
| N_VFP
);
14218 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14220 do_vfp_nsyn_opcode ("fdivs");
14222 /* ARMv8.2 fp16 instruction. */
14224 do_scalar_fp16_v82_encode ();
14227 do_vfp_nsyn_opcode ("fdivd");
14231 do_vfp_nsyn_nmul (void)
14233 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14234 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14235 N_F_ALL
| N_KEY
| N_VFP
);
14237 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14239 NEON_ENCODE (SINGLE
, inst
);
14240 do_vfp_sp_dyadic ();
14242 /* ARMv8.2 fp16 instruction. */
14244 do_scalar_fp16_v82_encode ();
14248 NEON_ENCODE (DOUBLE
, inst
);
14249 do_vfp_dp_rd_rn_rm ();
14251 do_vfp_cond_or_thumb ();
14256 do_vfp_nsyn_cmp (void)
14258 enum neon_shape rs
;
14259 if (inst
.operands
[1].isreg
)
14261 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14262 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14264 if (rs
== NS_FF
|| rs
== NS_HH
)
14266 NEON_ENCODE (SINGLE
, inst
);
14267 do_vfp_sp_monadic ();
14271 NEON_ENCODE (DOUBLE
, inst
);
14272 do_vfp_dp_rd_rm ();
14277 rs
= neon_select_shape (NS_HI
, NS_FI
, NS_DI
, NS_NULL
);
14278 neon_check_type (2, rs
, N_F_ALL
| N_KEY
| N_VFP
, N_EQK
);
14280 switch (inst
.instruction
& 0x0fffffff)
14283 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
14286 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
14292 if (rs
== NS_FI
|| rs
== NS_HI
)
14294 NEON_ENCODE (SINGLE
, inst
);
14295 do_vfp_sp_compare_z ();
14299 NEON_ENCODE (DOUBLE
, inst
);
14303 do_vfp_cond_or_thumb ();
14305 /* ARMv8.2 fp16 instruction. */
14306 if (rs
== NS_HI
|| rs
== NS_HH
)
14307 do_scalar_fp16_v82_encode ();
14311 nsyn_insert_sp (void)
14313 inst
.operands
[1] = inst
.operands
[0];
14314 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
14315 inst
.operands
[0].reg
= REG_SP
;
14316 inst
.operands
[0].isreg
= 1;
14317 inst
.operands
[0].writeback
= 1;
14318 inst
.operands
[0].present
= 1;
14322 do_vfp_nsyn_push (void)
14325 if (inst
.operands
[1].issingle
)
14326 do_vfp_nsyn_opcode ("fstmdbs");
14328 do_vfp_nsyn_opcode ("fstmdbd");
14332 do_vfp_nsyn_pop (void)
14335 if (inst
.operands
[1].issingle
)
14336 do_vfp_nsyn_opcode ("fldmias");
14338 do_vfp_nsyn_opcode ("fldmiad");
14341 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14342 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14345 neon_dp_fixup (struct arm_it
* insn
)
14347 unsigned int i
= insn
->instruction
;
14352 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14363 insn
->instruction
= i
;
14366 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14370 neon_logbits (unsigned x
)
14372 return ffs (x
) - 4;
14375 #define LOW4(R) ((R) & 0xf)
14376 #define HI1(R) (((R) >> 4) & 1)
14378 /* Encode insns with bit pattern:
14380 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14381 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14383 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14384 different meaning for some instruction. */
14387 neon_three_same (int isquad
, int ubit
, int size
)
14389 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14390 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14391 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14392 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14393 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14394 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14395 inst
.instruction
|= (isquad
!= 0) << 6;
14396 inst
.instruction
|= (ubit
!= 0) << 24;
14398 inst
.instruction
|= neon_logbits (size
) << 20;
14400 neon_dp_fixup (&inst
);
14403 /* Encode instructions of the form:
14405 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14406 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14408 Don't write size if SIZE == -1. */
14411 neon_two_same (int qbit
, int ubit
, int size
)
14413 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14414 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14415 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14416 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14417 inst
.instruction
|= (qbit
!= 0) << 6;
14418 inst
.instruction
|= (ubit
!= 0) << 24;
14421 inst
.instruction
|= neon_logbits (size
) << 18;
14423 neon_dp_fixup (&inst
);
14426 /* Neon instruction encoders, in approximate order of appearance. */
14429 do_neon_dyadic_i_su (void)
14431 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14432 struct neon_type_el et
= neon_check_type (3, rs
,
14433 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
14434 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14438 do_neon_dyadic_i64_su (void)
14440 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14441 struct neon_type_el et
= neon_check_type (3, rs
,
14442 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
14443 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14447 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
14450 unsigned size
= et
.size
>> 3;
14451 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14452 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14453 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14454 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14455 inst
.instruction
|= (isquad
!= 0) << 6;
14456 inst
.instruction
|= immbits
<< 16;
14457 inst
.instruction
|= (size
>> 3) << 7;
14458 inst
.instruction
|= (size
& 0x7) << 19;
14460 inst
.instruction
|= (uval
!= 0) << 24;
14462 neon_dp_fixup (&inst
);
14466 do_neon_shl_imm (void)
14468 if (!inst
.operands
[2].isreg
)
14470 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14471 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
14472 int imm
= inst
.operands
[2].imm
;
14474 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14475 _("immediate out of range for shift"));
14476 NEON_ENCODE (IMMED
, inst
);
14477 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
14481 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14482 struct neon_type_el et
= neon_check_type (3, rs
,
14483 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
14486 /* VSHL/VQSHL 3-register variants have syntax such as:
14488 whereas other 3-register operations encoded by neon_three_same have
14491 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14493 tmp
= inst
.operands
[2].reg
;
14494 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14495 inst
.operands
[1].reg
= tmp
;
14496 NEON_ENCODE (INTEGER
, inst
);
14497 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14502 do_neon_qshl_imm (void)
14504 if (!inst
.operands
[2].isreg
)
14506 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14507 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
14508 int imm
= inst
.operands
[2].imm
;
14510 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14511 _("immediate out of range for shift"));
14512 NEON_ENCODE (IMMED
, inst
);
14513 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
, imm
);
14517 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14518 struct neon_type_el et
= neon_check_type (3, rs
,
14519 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
14522 /* See note in do_neon_shl_imm. */
14523 tmp
= inst
.operands
[2].reg
;
14524 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14525 inst
.operands
[1].reg
= tmp
;
14526 NEON_ENCODE (INTEGER
, inst
);
14527 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14532 do_neon_rshl (void)
14534 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14535 struct neon_type_el et
= neon_check_type (3, rs
,
14536 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
14539 tmp
= inst
.operands
[2].reg
;
14540 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14541 inst
.operands
[1].reg
= tmp
;
14542 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14546 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
14548 /* Handle .I8 pseudo-instructions. */
14551 /* Unfortunately, this will make everything apart from zero out-of-range.
14552 FIXME is this the intended semantics? There doesn't seem much point in
14553 accepting .I8 if so. */
14554 immediate
|= immediate
<< 8;
14560 if (immediate
== (immediate
& 0x000000ff))
14562 *immbits
= immediate
;
14565 else if (immediate
== (immediate
& 0x0000ff00))
14567 *immbits
= immediate
>> 8;
14570 else if (immediate
== (immediate
& 0x00ff0000))
14572 *immbits
= immediate
>> 16;
14575 else if (immediate
== (immediate
& 0xff000000))
14577 *immbits
= immediate
>> 24;
14580 if ((immediate
& 0xffff) != (immediate
>> 16))
14581 goto bad_immediate
;
14582 immediate
&= 0xffff;
14585 if (immediate
== (immediate
& 0x000000ff))
14587 *immbits
= immediate
;
14590 else if (immediate
== (immediate
& 0x0000ff00))
14592 *immbits
= immediate
>> 8;
14597 first_error (_("immediate value out of range"));
14602 do_neon_logic (void)
14604 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
14606 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14607 neon_check_type (3, rs
, N_IGNORE_TYPE
);
14608 /* U bit and size field were set as part of the bitmask. */
14609 NEON_ENCODE (INTEGER
, inst
);
14610 neon_three_same (neon_quad (rs
), 0, -1);
14614 const int three_ops_form
= (inst
.operands
[2].present
14615 && !inst
.operands
[2].isreg
);
14616 const int immoperand
= (three_ops_form
? 2 : 1);
14617 enum neon_shape rs
= (three_ops_form
14618 ? neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
)
14619 : neon_select_shape (NS_DI
, NS_QI
, NS_NULL
));
14620 struct neon_type_el et
= neon_check_type (2, rs
,
14621 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
14622 enum neon_opc opcode
= (enum neon_opc
) inst
.instruction
& 0x0fffffff;
14626 if (et
.type
== NT_invtype
)
14629 if (three_ops_form
)
14630 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
14631 _("first and second operands shall be the same register"));
14633 NEON_ENCODE (IMMED
, inst
);
14635 immbits
= inst
.operands
[immoperand
].imm
;
14638 /* .i64 is a pseudo-op, so the immediate must be a repeating
14640 if (immbits
!= (inst
.operands
[immoperand
].regisimm
?
14641 inst
.operands
[immoperand
].reg
: 0))
14643 /* Set immbits to an invalid constant. */
14644 immbits
= 0xdeadbeef;
14651 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14655 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14659 /* Pseudo-instruction for VBIC. */
14660 neon_invert_size (&immbits
, 0, et
.size
);
14661 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14665 /* Pseudo-instruction for VORR. */
14666 neon_invert_size (&immbits
, 0, et
.size
);
14667 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14677 inst
.instruction
|= neon_quad (rs
) << 6;
14678 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14679 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14680 inst
.instruction
|= cmode
<< 8;
14681 neon_write_immbits (immbits
);
14683 neon_dp_fixup (&inst
);
14688 do_neon_bitfield (void)
14690 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14691 neon_check_type (3, rs
, N_IGNORE_TYPE
);
14692 neon_three_same (neon_quad (rs
), 0, -1);
14696 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
14699 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14700 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
14702 if (et
.type
== NT_float
)
14704 NEON_ENCODE (FLOAT
, inst
);
14705 neon_three_same (neon_quad (rs
), 0, -1);
14709 NEON_ENCODE (INTEGER
, inst
);
14710 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
14715 do_neon_dyadic_if_su (void)
14717 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
14721 do_neon_dyadic_if_su_d (void)
14723 /* This version only allow D registers, but that constraint is enforced during
14724 operand parsing so we don't need to do anything extra here. */
14725 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
14729 do_neon_dyadic_if_i_d (void)
14731 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14732 affected if we specify unsigned args. */
14733 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
14736 enum vfp_or_neon_is_neon_bits
14739 NEON_CHECK_ARCH
= 2,
14740 NEON_CHECK_ARCH8
= 4
14743 /* Call this function if an instruction which may have belonged to the VFP or
14744 Neon instruction sets, but turned out to be a Neon instruction (due to the
14745 operand types involved, etc.). We have to check and/or fix-up a couple of
14748 - Make sure the user hasn't attempted to make a Neon instruction
14750 - Alter the value in the condition code field if necessary.
14751 - Make sure that the arch supports Neon instructions.
14753 Which of these operations take place depends on bits from enum
14754 vfp_or_neon_is_neon_bits.
14756 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14757 current instruction's condition is COND_ALWAYS, the condition field is
14758 changed to inst.uncond_value. This is necessary because instructions shared
14759 between VFP and Neon may be conditional for the VFP variants only, and the
14760 unconditional Neon version must have, e.g., 0xF in the condition field. */
14763 vfp_or_neon_is_neon (unsigned check
)
14765 /* Conditions are always legal in Thumb mode (IT blocks). */
14766 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
14768 if (inst
.cond
!= COND_ALWAYS
)
14770 first_error (_(BAD_COND
));
14773 if (inst
.uncond_value
!= -1)
14774 inst
.instruction
|= inst
.uncond_value
<< 28;
14777 if ((check
& NEON_CHECK_ARCH
)
14778 && !mark_feature_used (&fpu_neon_ext_v1
))
14780 first_error (_(BAD_FPU
));
14784 if ((check
& NEON_CHECK_ARCH8
)
14785 && !mark_feature_used (&fpu_neon_ext_armv8
))
14787 first_error (_(BAD_FPU
));
14795 do_neon_addsub_if_i (void)
14797 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
14800 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14803 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14804 affected if we specify unsigned args. */
14805 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
14808 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14810 V<op> A,B (A is operand 0, B is operand 2)
14815 so handle that case specially. */
14818 neon_exchange_operands (void)
14820 void *scratch
= alloca (sizeof (inst
.operands
[0]));
14821 if (inst
.operands
[1].present
)
14823 /* Swap operands[1] and operands[2]. */
14824 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
14825 inst
.operands
[1] = inst
.operands
[2];
14826 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
14830 inst
.operands
[1] = inst
.operands
[2];
14831 inst
.operands
[2] = inst
.operands
[0];
14836 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
14838 if (inst
.operands
[2].isreg
)
14841 neon_exchange_operands ();
14842 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
14846 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14847 struct neon_type_el et
= neon_check_type (2, rs
,
14848 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
14850 NEON_ENCODE (IMMED
, inst
);
14851 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14852 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14853 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14854 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14855 inst
.instruction
|= neon_quad (rs
) << 6;
14856 inst
.instruction
|= (et
.type
== NT_float
) << 10;
14857 inst
.instruction
|= neon_logbits (et
.size
) << 18;
14859 neon_dp_fixup (&inst
);
14866 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, FALSE
);
14870 do_neon_cmp_inv (void)
14872 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, TRUE
);
14878 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
14881 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14882 scalars, which are encoded in 5 bits, M : Rm.
14883 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14884 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14888 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
14890 unsigned regno
= NEON_SCALAR_REG (scalar
);
14891 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
14896 if (regno
> 7 || elno
> 3)
14898 return regno
| (elno
<< 3);
14901 if (regno
> 15 || elno
> 1)
14903 return regno
| (elno
<< 4);
14907 first_error (_("scalar out of range for multiply instruction"));
14913 /* Encode multiply / multiply-accumulate scalar instructions. */
14916 neon_mul_mac (struct neon_type_el et
, int ubit
)
14920 /* Give a more helpful error message if we have an invalid type. */
14921 if (et
.type
== NT_invtype
)
14924 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
14925 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14926 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14927 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14928 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14929 inst
.instruction
|= LOW4 (scalar
);
14930 inst
.instruction
|= HI1 (scalar
) << 5;
14931 inst
.instruction
|= (et
.type
== NT_float
) << 8;
14932 inst
.instruction
|= neon_logbits (et
.size
) << 20;
14933 inst
.instruction
|= (ubit
!= 0) << 24;
14935 neon_dp_fixup (&inst
);
14939 do_neon_mac_maybe_scalar (void)
14941 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
14944 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14947 if (inst
.operands
[2].isscalar
)
14949 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
14950 struct neon_type_el et
= neon_check_type (3, rs
,
14951 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F32
| N_KEY
);
14952 NEON_ENCODE (SCALAR
, inst
);
14953 neon_mul_mac (et
, neon_quad (rs
));
14957 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14958 affected if we specify unsigned args. */
14959 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
14964 do_neon_fmac (void)
14966 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms
) == SUCCESS
)
14969 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14972 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
14978 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14979 struct neon_type_el et
= neon_check_type (3, rs
,
14980 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
14981 neon_three_same (neon_quad (rs
), 0, et
.size
);
14984 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
14985 same types as the MAC equivalents. The polynomial type for this instruction
14986 is encoded the same as the integer type. */
14991 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
14994 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14997 if (inst
.operands
[2].isscalar
)
14998 do_neon_mac_maybe_scalar ();
15000 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F32
| N_P8
, 0);
15004 do_neon_qdmulh (void)
15006 if (inst
.operands
[2].isscalar
)
15008 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15009 struct neon_type_el et
= neon_check_type (3, rs
,
15010 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15011 NEON_ENCODE (SCALAR
, inst
);
15012 neon_mul_mac (et
, neon_quad (rs
));
15016 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15017 struct neon_type_el et
= neon_check_type (3, rs
,
15018 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15019 NEON_ENCODE (INTEGER
, inst
);
15020 /* The U bit (rounding) comes from bit mask. */
15021 neon_three_same (neon_quad (rs
), 0, et
.size
);
15026 do_neon_fcmp_absolute (void)
15028 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15029 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
15030 /* Size field comes from bit mask. */
15031 neon_three_same (neon_quad (rs
), 1, -1);
15035 do_neon_fcmp_absolute_inv (void)
15037 neon_exchange_operands ();
15038 do_neon_fcmp_absolute ();
15042 do_neon_step (void)
15044 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15045 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
15046 neon_three_same (neon_quad (rs
), 0, -1);
15050 do_neon_abs_neg (void)
15052 enum neon_shape rs
;
15053 struct neon_type_el et
;
15055 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
15058 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15061 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15062 et
= neon_check_type (2, rs
, N_EQK
, N_S8
| N_S16
| N_S32
| N_F32
| N_KEY
);
15064 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15065 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15066 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15067 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15068 inst
.instruction
|= neon_quad (rs
) << 6;
15069 inst
.instruction
|= (et
.type
== NT_float
) << 10;
15070 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15072 neon_dp_fixup (&inst
);
15078 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15079 struct neon_type_el et
= neon_check_type (2, rs
,
15080 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15081 int imm
= inst
.operands
[2].imm
;
15082 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15083 _("immediate out of range for insert"));
15084 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15090 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15091 struct neon_type_el et
= neon_check_type (2, rs
,
15092 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15093 int imm
= inst
.operands
[2].imm
;
15094 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15095 _("immediate out of range for insert"));
15096 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
15100 do_neon_qshlu_imm (void)
15102 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15103 struct neon_type_el et
= neon_check_type (2, rs
,
15104 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
15105 int imm
= inst
.operands
[2].imm
;
15106 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15107 _("immediate out of range for shift"));
15108 /* Only encodes the 'U present' variant of the instruction.
15109 In this case, signed types have OP (bit 8) set to 0.
15110 Unsigned types have OP set to 1. */
15111 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
15112 /* The rest of the bits are the same as other immediate shifts. */
15113 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15117 do_neon_qmovn (void)
15119 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15120 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
15121 /* Saturating move where operands can be signed or unsigned, and the
15122 destination has the same signedness. */
15123 NEON_ENCODE (INTEGER
, inst
);
15124 if (et
.type
== NT_unsigned
)
15125 inst
.instruction
|= 0xc0;
15127 inst
.instruction
|= 0x80;
15128 neon_two_same (0, 1, et
.size
/ 2);
15132 do_neon_qmovun (void)
15134 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15135 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
15136 /* Saturating move with unsigned results. Operands must be signed. */
15137 NEON_ENCODE (INTEGER
, inst
);
15138 neon_two_same (0, 1, et
.size
/ 2);
15142 do_neon_rshift_sat_narrow (void)
15144 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15145 or unsigned. If operands are unsigned, results must also be unsigned. */
15146 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15147 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
15148 int imm
= inst
.operands
[2].imm
;
15149 /* This gets the bounds check, size encoding and immediate bits calculation
15153 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15154 VQMOVN.I<size> <Dd>, <Qm>. */
15157 inst
.operands
[2].present
= 0;
15158 inst
.instruction
= N_MNEM_vqmovn
;
15163 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15164 _("immediate out of range"));
15165 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
15169 do_neon_rshift_sat_narrow_u (void)
15171 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15172 or unsigned. If operands are unsigned, results must also be unsigned. */
15173 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15174 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
15175 int imm
= inst
.operands
[2].imm
;
15176 /* This gets the bounds check, size encoding and immediate bits calculation
15180 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15181 VQMOVUN.I<size> <Dd>, <Qm>. */
15184 inst
.operands
[2].present
= 0;
15185 inst
.instruction
= N_MNEM_vqmovun
;
15190 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15191 _("immediate out of range"));
15192 /* FIXME: The manual is kind of unclear about what value U should have in
15193 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15195 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
15199 do_neon_movn (void)
15201 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15202 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15203 NEON_ENCODE (INTEGER
, inst
);
15204 neon_two_same (0, 1, et
.size
/ 2);
15208 do_neon_rshift_narrow (void)
15210 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15211 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15212 int imm
= inst
.operands
[2].imm
;
15213 /* This gets the bounds check, size encoding and immediate bits calculation
15217 /* If immediate is zero then we are a pseudo-instruction for
15218 VMOVN.I<size> <Dd>, <Qm> */
15221 inst
.operands
[2].present
= 0;
15222 inst
.instruction
= N_MNEM_vmovn
;
15227 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15228 _("immediate out of range for narrowing operation"));
15229 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
15233 do_neon_shll (void)
15235 /* FIXME: Type checking when lengthening. */
15236 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
15237 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
15238 unsigned imm
= inst
.operands
[2].imm
;
15240 if (imm
== et
.size
)
15242 /* Maximum shift variant. */
15243 NEON_ENCODE (INTEGER
, inst
);
15244 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15245 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15246 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15247 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15248 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15250 neon_dp_fixup (&inst
);
15254 /* A more-specific type check for non-max versions. */
15255 et
= neon_check_type (2, NS_QDI
,
15256 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
15257 NEON_ENCODE (IMMED
, inst
);
15258 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
15262 /* Check the various types for the VCVT instruction, and return which version
15263 the current instruction is. */
15265 #define CVT_FLAVOUR_VAR \
15266 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15267 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15268 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15269 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15270 /* Half-precision conversions. */ \
15271 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15272 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15273 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
15274 Compared with single/double precision variants, only the co-processor \
15275 field is different, so the encoding flow is reused here. */ \
15276 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
15277 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
15278 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15279 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15280 /* VFP instructions. */ \
15281 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15282 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15283 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15284 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15285 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15286 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15287 /* VFP instructions with bitshift. */ \
15288 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15289 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15290 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15291 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15292 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15293 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15294 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15295 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15297 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15298 neon_cvt_flavour_##C,
15300 /* The different types of conversions we can do. */
15301 enum neon_cvt_flavour
15304 neon_cvt_flavour_invalid
,
15305 neon_cvt_flavour_first_fp
= neon_cvt_flavour_f32_f64
15310 static enum neon_cvt_flavour
15311 get_neon_cvt_flavour (enum neon_shape rs
)
15313 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15314 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15315 if (et.type != NT_invtype) \
15317 inst.error = NULL; \
15318 return (neon_cvt_flavour_##C); \
15321 struct neon_type_el et
;
15322 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
15323 || rs
== NS_FF
) ? N_VFP
: 0;
15324 /* The instruction versions which take an immediate take one register
15325 argument, which is extended to the width of the full register. Thus the
15326 "source" and "destination" registers must have the same width. Hack that
15327 here by making the size equal to the key (wider, in this case) operand. */
15328 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
15332 return neon_cvt_flavour_invalid
;
15347 /* Neon-syntax VFP conversions. */
15350 do_vfp_nsyn_cvt (enum neon_shape rs
, enum neon_cvt_flavour flavour
)
15352 const char *opname
= 0;
15354 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
15355 || rs
== NS_FHI
|| rs
== NS_HFI
)
15357 /* Conversions with immediate bitshift. */
15358 const char *enc
[] =
15360 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15366 if (flavour
< (int) ARRAY_SIZE (enc
))
15368 opname
= enc
[flavour
];
15369 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
15370 _("operands 0 and 1 must be the same register"));
15371 inst
.operands
[1] = inst
.operands
[2];
15372 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
15377 /* Conversions without bitshift. */
15378 const char *enc
[] =
15380 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15386 if (flavour
< (int) ARRAY_SIZE (enc
))
15387 opname
= enc
[flavour
];
15391 do_vfp_nsyn_opcode (opname
);
15393 /* ARMv8.2 fp16 VCVT instruction. */
15394 if (flavour
== neon_cvt_flavour_s32_f16
15395 || flavour
== neon_cvt_flavour_u32_f16
15396 || flavour
== neon_cvt_flavour_f16_u32
15397 || flavour
== neon_cvt_flavour_f16_s32
)
15398 do_scalar_fp16_v82_encode ();
15402 do_vfp_nsyn_cvtz (void)
15404 enum neon_shape rs
= neon_select_shape (NS_FH
, NS_FF
, NS_FD
, NS_NULL
);
15405 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
15406 const char *enc
[] =
15408 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15414 if (flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
15415 do_vfp_nsyn_opcode (enc
[flavour
]);
15419 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour
,
15420 enum neon_cvt_mode mode
)
15425 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15426 D register operands. */
15427 if (flavour
== neon_cvt_flavour_s32_f64
15428 || flavour
== neon_cvt_flavour_u32_f64
)
15429 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15432 if (flavour
== neon_cvt_flavour_s32_f16
15433 || flavour
== neon_cvt_flavour_u32_f16
)
15434 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
15437 set_it_insn_type (OUTSIDE_IT_INSN
);
15441 case neon_cvt_flavour_s32_f64
:
15445 case neon_cvt_flavour_s32_f32
:
15449 case neon_cvt_flavour_s32_f16
:
15453 case neon_cvt_flavour_u32_f64
:
15457 case neon_cvt_flavour_u32_f32
:
15461 case neon_cvt_flavour_u32_f16
:
15466 first_error (_("invalid instruction shape"));
15472 case neon_cvt_mode_a
: rm
= 0; break;
15473 case neon_cvt_mode_n
: rm
= 1; break;
15474 case neon_cvt_mode_p
: rm
= 2; break;
15475 case neon_cvt_mode_m
: rm
= 3; break;
15476 default: first_error (_("invalid rounding mode")); return;
15479 NEON_ENCODE (FPV8
, inst
);
15480 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
15481 encode_arm_vfp_reg (inst
.operands
[1].reg
, sz
== 1 ? VFP_REG_Dm
: VFP_REG_Sm
);
15482 inst
.instruction
|= sz
<< 8;
15484 /* ARMv8.2 fp16 VCVT instruction. */
15485 if (flavour
== neon_cvt_flavour_s32_f16
15486 ||flavour
== neon_cvt_flavour_u32_f16
)
15487 do_scalar_fp16_v82_encode ();
15488 inst
.instruction
|= op
<< 7;
15489 inst
.instruction
|= rm
<< 16;
15490 inst
.instruction
|= 0xf0000000;
15491 inst
.is_neon
= TRUE
;
15495 do_neon_cvt_1 (enum neon_cvt_mode mode
)
15497 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
15498 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
,
15499 NS_FH
, NS_HF
, NS_FHI
, NS_HFI
,
15501 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
15503 /* PR11109: Handle round-to-zero for VCVT conversions. */
15504 if (mode
== neon_cvt_mode_z
15505 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_vfp_v2
)
15506 && (flavour
== neon_cvt_flavour_s32_f32
15507 || flavour
== neon_cvt_flavour_u32_f32
15508 || flavour
== neon_cvt_flavour_s32_f64
15509 || flavour
== neon_cvt_flavour_u32_f64
)
15510 && (rs
== NS_FD
|| rs
== NS_FF
))
15512 do_vfp_nsyn_cvtz ();
15516 /* ARMv8.2 fp16 VCVT conversions. */
15517 if (mode
== neon_cvt_mode_z
15518 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
)
15519 && (flavour
== neon_cvt_flavour_s32_f16
15520 || flavour
== neon_cvt_flavour_u32_f16
)
15523 do_vfp_nsyn_cvtz ();
15524 do_scalar_fp16_v82_encode ();
15528 /* VFP rather than Neon conversions. */
15529 if (flavour
>= neon_cvt_flavour_first_fp
)
15531 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
15532 do_vfp_nsyn_cvt (rs
, flavour
);
15534 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
15545 unsigned enctab
[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
15547 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15550 /* Fixed-point conversion with #0 immediate is encoded as an
15551 integer conversion. */
15552 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
15554 immbits
= 32 - inst
.operands
[2].imm
;
15555 NEON_ENCODE (IMMED
, inst
);
15556 if (flavour
!= neon_cvt_flavour_invalid
)
15557 inst
.instruction
|= enctab
[flavour
];
15558 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15559 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15560 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15561 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15562 inst
.instruction
|= neon_quad (rs
) << 6;
15563 inst
.instruction
|= 1 << 21;
15564 inst
.instruction
|= immbits
<< 16;
15566 neon_dp_fixup (&inst
);
15572 if (mode
!= neon_cvt_mode_x
&& mode
!= neon_cvt_mode_z
)
15574 NEON_ENCODE (FLOAT
, inst
);
15575 set_it_insn_type (OUTSIDE_IT_INSN
);
15577 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
15580 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15581 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15582 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15583 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15584 inst
.instruction
|= neon_quad (rs
) << 6;
15585 inst
.instruction
|= (flavour
== neon_cvt_flavour_u32_f32
) << 7;
15586 inst
.instruction
|= mode
<< 8;
15588 inst
.instruction
|= 0xfc000000;
15590 inst
.instruction
|= 0xf0000000;
15596 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080 };
15598 NEON_ENCODE (INTEGER
, inst
);
15600 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15603 if (flavour
!= neon_cvt_flavour_invalid
)
15604 inst
.instruction
|= enctab
[flavour
];
15606 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15607 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15608 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15609 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15610 inst
.instruction
|= neon_quad (rs
) << 6;
15611 inst
.instruction
|= 2 << 18;
15613 neon_dp_fixup (&inst
);
15618 /* Half-precision conversions for Advanced SIMD -- neon. */
15623 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
15625 as_bad (_("operand size must match register width"));
15630 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
15632 as_bad (_("operand size must match register width"));
15637 inst
.instruction
= 0x3b60600;
15639 inst
.instruction
= 0x3b60700;
15641 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15642 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15643 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15644 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15645 neon_dp_fixup (&inst
);
15649 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15650 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
15651 do_vfp_nsyn_cvt (rs
, flavour
);
15653 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
15658 do_neon_cvtr (void)
15660 do_neon_cvt_1 (neon_cvt_mode_x
);
15666 do_neon_cvt_1 (neon_cvt_mode_z
);
15670 do_neon_cvta (void)
15672 do_neon_cvt_1 (neon_cvt_mode_a
);
15676 do_neon_cvtn (void)
15678 do_neon_cvt_1 (neon_cvt_mode_n
);
15682 do_neon_cvtp (void)
15684 do_neon_cvt_1 (neon_cvt_mode_p
);
15688 do_neon_cvtm (void)
15690 do_neon_cvt_1 (neon_cvt_mode_m
);
15694 do_neon_cvttb_2 (bfd_boolean t
, bfd_boolean to
, bfd_boolean is_double
)
15697 mark_feature_used (&fpu_vfp_ext_armv8
);
15699 encode_arm_vfp_reg (inst
.operands
[0].reg
,
15700 (is_double
&& !to
) ? VFP_REG_Dd
: VFP_REG_Sd
);
15701 encode_arm_vfp_reg (inst
.operands
[1].reg
,
15702 (is_double
&& to
) ? VFP_REG_Dm
: VFP_REG_Sm
);
15703 inst
.instruction
|= to
? 0x10000 : 0;
15704 inst
.instruction
|= t
? 0x80 : 0;
15705 inst
.instruction
|= is_double
? 0x100 : 0;
15706 do_vfp_cond_or_thumb ();
15710 do_neon_cvttb_1 (bfd_boolean t
)
15712 enum neon_shape rs
= neon_select_shape (NS_HF
, NS_HD
, NS_FH
, NS_FF
, NS_FD
,
15713 NS_DF
, NS_DH
, NS_NULL
);
15717 else if (neon_check_type (2, rs
, N_F16
, N_F32
| N_VFP
).type
!= NT_invtype
)
15720 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/FALSE
);
15722 else if (neon_check_type (2, rs
, N_F32
| N_VFP
, N_F16
).type
!= NT_invtype
)
15725 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/FALSE
);
15727 else if (neon_check_type (2, rs
, N_F16
, N_F64
| N_VFP
).type
!= NT_invtype
)
15729 /* The VCVTB and VCVTT instructions with D-register operands
15730 don't work for SP only targets. */
15731 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15735 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/TRUE
);
15737 else if (neon_check_type (2, rs
, N_F64
| N_VFP
, N_F16
).type
!= NT_invtype
)
15739 /* The VCVTB and VCVTT instructions with D-register operands
15740 don't work for SP only targets. */
15741 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15745 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/TRUE
);
15752 do_neon_cvtb (void)
15754 do_neon_cvttb_1 (FALSE
);
15759 do_neon_cvtt (void)
15761 do_neon_cvttb_1 (TRUE
);
15765 neon_move_immediate (void)
15767 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
15768 struct neon_type_el et
= neon_check_type (2, rs
,
15769 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
15770 unsigned immlo
, immhi
= 0, immbits
;
15771 int op
, cmode
, float_p
;
15773 constraint (et
.type
== NT_invtype
,
15774 _("operand size must be specified for immediate VMOV"));
15776 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15777 op
= (inst
.instruction
& (1 << 5)) != 0;
15779 immlo
= inst
.operands
[1].imm
;
15780 if (inst
.operands
[1].regisimm
)
15781 immhi
= inst
.operands
[1].reg
;
15783 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
15784 _("immediate has bits set outside the operand size"));
15786 float_p
= inst
.operands
[1].immisfloat
;
15788 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
15789 et
.size
, et
.type
)) == FAIL
)
15791 /* Invert relevant bits only. */
15792 neon_invert_size (&immlo
, &immhi
, et
.size
);
15793 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15794 with one or the other; those cases are caught by
15795 neon_cmode_for_move_imm. */
15797 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
15798 &op
, et
.size
, et
.type
)) == FAIL
)
15800 first_error (_("immediate out of range"));
15805 inst
.instruction
&= ~(1 << 5);
15806 inst
.instruction
|= op
<< 5;
15808 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15809 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15810 inst
.instruction
|= neon_quad (rs
) << 6;
15811 inst
.instruction
|= cmode
<< 8;
15813 neon_write_immbits (immbits
);
15819 if (inst
.operands
[1].isreg
)
15821 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15823 NEON_ENCODE (INTEGER
, inst
);
15824 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15825 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15826 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15827 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15828 inst
.instruction
|= neon_quad (rs
) << 6;
15832 NEON_ENCODE (IMMED
, inst
);
15833 neon_move_immediate ();
15836 neon_dp_fixup (&inst
);
15839 /* Encode instructions of form:
15841 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15842 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
15845 neon_mixed_length (struct neon_type_el et
, unsigned size
)
15847 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15848 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15849 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15850 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15851 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15852 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15853 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
15854 inst
.instruction
|= neon_logbits (size
) << 20;
15856 neon_dp_fixup (&inst
);
15860 do_neon_dyadic_long (void)
15862 /* FIXME: Type checking for lengthening op. */
15863 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15864 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
15865 neon_mixed_length (et
, et
.size
);
15869 do_neon_abal (void)
15871 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15872 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
15873 neon_mixed_length (et
, et
.size
);
15877 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
15879 if (inst
.operands
[2].isscalar
)
15881 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
15882 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
15883 NEON_ENCODE (SCALAR
, inst
);
15884 neon_mul_mac (et
, et
.type
== NT_unsigned
);
15888 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15889 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
15890 NEON_ENCODE (INTEGER
, inst
);
15891 neon_mixed_length (et
, et
.size
);
15896 do_neon_mac_maybe_scalar_long (void)
15898 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
15902 do_neon_dyadic_wide (void)
15904 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
15905 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
15906 neon_mixed_length (et
, et
.size
);
15910 do_neon_dyadic_narrow (void)
15912 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15913 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
15914 /* Operand sign is unimportant, and the U bit is part of the opcode,
15915 so force the operand type to integer. */
15916 et
.type
= NT_integer
;
15917 neon_mixed_length (et
, et
.size
/ 2);
15921 do_neon_mul_sat_scalar_long (void)
15923 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
15927 do_neon_vmull (void)
15929 if (inst
.operands
[2].isscalar
)
15930 do_neon_mac_maybe_scalar_long ();
15933 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15934 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_P64
| N_KEY
);
15936 if (et
.type
== NT_poly
)
15937 NEON_ENCODE (POLY
, inst
);
15939 NEON_ENCODE (INTEGER
, inst
);
15941 /* For polynomial encoding the U bit must be zero, and the size must
15942 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
15943 obviously, as 0b10). */
15946 /* Check we're on the correct architecture. */
15947 if (!mark_feature_used (&fpu_crypto_ext_armv8
))
15949 _("Instruction form not available on this architecture.");
15954 neon_mixed_length (et
, et
.size
);
15961 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
15962 struct neon_type_el et
= neon_check_type (3, rs
,
15963 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15964 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
15966 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
15967 _("shift out of range"));
15968 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15969 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15970 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15971 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15972 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15973 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15974 inst
.instruction
|= neon_quad (rs
) << 6;
15975 inst
.instruction
|= imm
<< 8;
15977 neon_dp_fixup (&inst
);
15983 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15984 struct neon_type_el et
= neon_check_type (2, rs
,
15985 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
15986 unsigned op
= (inst
.instruction
>> 7) & 3;
15987 /* N (width of reversed regions) is encoded as part of the bitmask. We
15988 extract it here to check the elements to be reversed are smaller.
15989 Otherwise we'd get a reserved instruction. */
15990 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
15991 gas_assert (elsize
!= 0);
15992 constraint (et
.size
>= elsize
,
15993 _("elements must be smaller than reversal region"));
15994 neon_two_same (neon_quad (rs
), 1, et
.size
);
16000 if (inst
.operands
[1].isscalar
)
16002 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
16003 struct neon_type_el et
= neon_check_type (2, rs
,
16004 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16005 unsigned sizebits
= et
.size
>> 3;
16006 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
16007 int logsize
= neon_logbits (et
.size
);
16008 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
16010 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
16013 NEON_ENCODE (SCALAR
, inst
);
16014 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16015 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16016 inst
.instruction
|= LOW4 (dm
);
16017 inst
.instruction
|= HI1 (dm
) << 5;
16018 inst
.instruction
|= neon_quad (rs
) << 6;
16019 inst
.instruction
|= x
<< 17;
16020 inst
.instruction
|= sizebits
<< 16;
16022 neon_dp_fixup (&inst
);
16026 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
16027 struct neon_type_el et
= neon_check_type (2, rs
,
16028 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
16029 /* Duplicate ARM register to lanes of vector. */
16030 NEON_ENCODE (ARMREG
, inst
);
16033 case 8: inst
.instruction
|= 0x400000; break;
16034 case 16: inst
.instruction
|= 0x000020; break;
16035 case 32: inst
.instruction
|= 0x000000; break;
16038 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
16039 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
16040 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
16041 inst
.instruction
|= neon_quad (rs
) << 21;
16042 /* The encoding for this instruction is identical for the ARM and Thumb
16043 variants, except for the condition field. */
16044 do_vfp_cond_or_thumb ();
16048 /* VMOV has particularly many variations. It can be one of:
16049 0. VMOV<c><q> <Qd>, <Qm>
16050 1. VMOV<c><q> <Dd>, <Dm>
16051 (Register operations, which are VORR with Rm = Rn.)
16052 2. VMOV<c><q>.<dt> <Qd>, #<imm>
16053 3. VMOV<c><q>.<dt> <Dd>, #<imm>
16055 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16056 (ARM register to scalar.)
16057 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16058 (Two ARM registers to vector.)
16059 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16060 (Scalar to ARM register.)
16061 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16062 (Vector to two ARM registers.)
16063 8. VMOV.F32 <Sd>, <Sm>
16064 9. VMOV.F64 <Dd>, <Dm>
16065 (VFP register moves.)
16066 10. VMOV.F32 <Sd>, #imm
16067 11. VMOV.F64 <Dd>, #imm
16068 (VFP float immediate load.)
16069 12. VMOV <Rd>, <Sm>
16070 (VFP single to ARM reg.)
16071 13. VMOV <Sd>, <Rm>
16072 (ARM reg to VFP single.)
16073 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16074 (Two ARM regs to two VFP singles.)
16075 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16076 (Two VFP singles to two ARM regs.)
16078 These cases can be disambiguated using neon_select_shape, except cases 1/9
16079 and 3/11 which depend on the operand type too.
16081 All the encoded bits are hardcoded by this function.
16083 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16084 Cases 5, 7 may be used with VFPv2 and above.
16086 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16087 can specify a type where it doesn't make sense to, and is ignored). */
16092 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
16093 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
,
16094 NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
16095 NS_HR
, NS_RH
, NS_HI
, NS_NULL
);
16096 struct neon_type_el et
;
16097 const char *ldconst
= 0;
16101 case NS_DD
: /* case 1/9. */
16102 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
16103 /* It is not an error here if no type is given. */
16105 if (et
.type
== NT_float
&& et
.size
== 64)
16107 do_vfp_nsyn_opcode ("fcpyd");
16110 /* fall through. */
16112 case NS_QQ
: /* case 0/1. */
16114 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16116 /* The architecture manual I have doesn't explicitly state which
16117 value the U bit should have for register->register moves, but
16118 the equivalent VORR instruction has U = 0, so do that. */
16119 inst
.instruction
= 0x0200110;
16120 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16121 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16122 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16123 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16124 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16125 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16126 inst
.instruction
|= neon_quad (rs
) << 6;
16128 neon_dp_fixup (&inst
);
16132 case NS_DI
: /* case 3/11. */
16133 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
16135 if (et
.type
== NT_float
&& et
.size
== 64)
16137 /* case 11 (fconstd). */
16138 ldconst
= "fconstd";
16139 goto encode_fconstd
;
16141 /* fall through. */
16143 case NS_QI
: /* case 2/3. */
16144 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16146 inst
.instruction
= 0x0800010;
16147 neon_move_immediate ();
16148 neon_dp_fixup (&inst
);
16151 case NS_SR
: /* case 4. */
16153 unsigned bcdebits
= 0;
16155 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
16156 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
16158 /* .<size> is optional here, defaulting to .32. */
16159 if (inst
.vectype
.elems
== 0
16160 && inst
.operands
[0].vectype
.type
== NT_invtype
16161 && inst
.operands
[1].vectype
.type
== NT_invtype
)
16163 inst
.vectype
.el
[0].type
= NT_untyped
;
16164 inst
.vectype
.el
[0].size
= 32;
16165 inst
.vectype
.elems
= 1;
16168 et
= neon_check_type (2, NS_NULL
, N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
16169 logsize
= neon_logbits (et
.size
);
16171 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
16173 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
16174 && et
.size
!= 32, _(BAD_FPU
));
16175 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
16176 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
16180 case 8: bcdebits
= 0x8; break;
16181 case 16: bcdebits
= 0x1; break;
16182 case 32: bcdebits
= 0x0; break;
16186 bcdebits
|= x
<< logsize
;
16188 inst
.instruction
= 0xe000b10;
16189 do_vfp_cond_or_thumb ();
16190 inst
.instruction
|= LOW4 (dn
) << 16;
16191 inst
.instruction
|= HI1 (dn
) << 7;
16192 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
16193 inst
.instruction
|= (bcdebits
& 3) << 5;
16194 inst
.instruction
|= (bcdebits
>> 2) << 21;
16198 case NS_DRR
: /* case 5 (fmdrr). */
16199 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
16202 inst
.instruction
= 0xc400b10;
16203 do_vfp_cond_or_thumb ();
16204 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
16205 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
16206 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
16207 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
16210 case NS_RS
: /* case 6. */
16213 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
16214 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
16215 unsigned abcdebits
= 0;
16217 /* .<dt> is optional here, defaulting to .32. */
16218 if (inst
.vectype
.elems
== 0
16219 && inst
.operands
[0].vectype
.type
== NT_invtype
16220 && inst
.operands
[1].vectype
.type
== NT_invtype
)
16222 inst
.vectype
.el
[0].type
= NT_untyped
;
16223 inst
.vectype
.el
[0].size
= 32;
16224 inst
.vectype
.elems
= 1;
16227 et
= neon_check_type (2, NS_NULL
,
16228 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
16229 logsize
= neon_logbits (et
.size
);
16231 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
16233 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
16234 && et
.size
!= 32, _(BAD_FPU
));
16235 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
16236 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
16240 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
16241 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
16242 case 32: abcdebits
= 0x00; break;
16246 abcdebits
|= x
<< logsize
;
16247 inst
.instruction
= 0xe100b10;
16248 do_vfp_cond_or_thumb ();
16249 inst
.instruction
|= LOW4 (dn
) << 16;
16250 inst
.instruction
|= HI1 (dn
) << 7;
16251 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
16252 inst
.instruction
|= (abcdebits
& 3) << 5;
16253 inst
.instruction
|= (abcdebits
>> 2) << 21;
16257 case NS_RRD
: /* case 7 (fmrrd). */
16258 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
16261 inst
.instruction
= 0xc500b10;
16262 do_vfp_cond_or_thumb ();
16263 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
16264 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
16265 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16266 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16269 case NS_FF
: /* case 8 (fcpys). */
16270 do_vfp_nsyn_opcode ("fcpys");
16274 case NS_FI
: /* case 10 (fconsts). */
16275 ldconst
= "fconsts";
16277 if (is_quarter_float (inst
.operands
[1].imm
))
16279 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
16280 do_vfp_nsyn_opcode (ldconst
);
16282 /* ARMv8.2 fp16 vmov.f16 instruction. */
16284 do_scalar_fp16_v82_encode ();
16287 first_error (_("immediate out of range"));
16291 case NS_RF
: /* case 12 (fmrs). */
16292 do_vfp_nsyn_opcode ("fmrs");
16293 /* ARMv8.2 fp16 vmov.f16 instruction. */
16295 do_scalar_fp16_v82_encode ();
16299 case NS_FR
: /* case 13 (fmsr). */
16300 do_vfp_nsyn_opcode ("fmsr");
16301 /* ARMv8.2 fp16 vmov.f16 instruction. */
16303 do_scalar_fp16_v82_encode ();
16306 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16307 (one of which is a list), but we have parsed four. Do some fiddling to
16308 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16310 case NS_RRFF
: /* case 14 (fmrrs). */
16311 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
16312 _("VFP registers must be adjacent"));
16313 inst
.operands
[2].imm
= 2;
16314 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
16315 do_vfp_nsyn_opcode ("fmrrs");
16318 case NS_FFRR
: /* case 15 (fmsrr). */
16319 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
16320 _("VFP registers must be adjacent"));
16321 inst
.operands
[1] = inst
.operands
[2];
16322 inst
.operands
[2] = inst
.operands
[3];
16323 inst
.operands
[0].imm
= 2;
16324 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
16325 do_vfp_nsyn_opcode ("fmsrr");
16329 /* neon_select_shape has determined that the instruction
16330 shape is wrong and has already set the error message. */
16339 do_neon_rshift_round_imm (void)
16341 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16342 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
16343 int imm
= inst
.operands
[2].imm
;
16345 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16348 inst
.operands
[2].present
= 0;
16353 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16354 _("immediate out of range for shift"));
16355 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
16360 do_neon_movhf (void)
16362 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_NULL
);
16363 constraint (rs
!= NS_HH
, _("invalid suffix"));
16365 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16368 do_vfp_sp_monadic ();
16371 inst
.instruction
|= 0xf0000000;
16375 do_neon_movl (void)
16377 struct neon_type_el et
= neon_check_type (2, NS_QD
,
16378 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
16379 unsigned sizebits
= et
.size
>> 3;
16380 inst
.instruction
|= sizebits
<< 19;
16381 neon_two_same (0, et
.type
== NT_unsigned
, -1);
16387 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16388 struct neon_type_el et
= neon_check_type (2, rs
,
16389 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16390 NEON_ENCODE (INTEGER
, inst
);
16391 neon_two_same (neon_quad (rs
), 1, et
.size
);
16395 do_neon_zip_uzp (void)
16397 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16398 struct neon_type_el et
= neon_check_type (2, rs
,
16399 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16400 if (rs
== NS_DD
&& et
.size
== 32)
16402 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16403 inst
.instruction
= N_MNEM_vtrn
;
16407 neon_two_same (neon_quad (rs
), 1, et
.size
);
16411 do_neon_sat_abs_neg (void)
16413 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16414 struct neon_type_el et
= neon_check_type (2, rs
,
16415 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
16416 neon_two_same (neon_quad (rs
), 1, et
.size
);
16420 do_neon_pair_long (void)
16422 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16423 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
16424 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16425 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
16426 neon_two_same (neon_quad (rs
), 1, et
.size
);
16430 do_neon_recip_est (void)
16432 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16433 struct neon_type_el et
= neon_check_type (2, rs
,
16434 N_EQK
| N_FLT
, N_F32
| N_U32
| N_KEY
);
16435 inst
.instruction
|= (et
.type
== NT_float
) << 8;
16436 neon_two_same (neon_quad (rs
), 1, et
.size
);
16442 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16443 struct neon_type_el et
= neon_check_type (2, rs
,
16444 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
16445 neon_two_same (neon_quad (rs
), 1, et
.size
);
16451 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16452 struct neon_type_el et
= neon_check_type (2, rs
,
16453 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
16454 neon_two_same (neon_quad (rs
), 1, et
.size
);
16460 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16461 struct neon_type_el et
= neon_check_type (2, rs
,
16462 N_EQK
| N_INT
, N_8
| N_KEY
);
16463 neon_two_same (neon_quad (rs
), 1, et
.size
);
16469 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16470 neon_two_same (neon_quad (rs
), 1, -1);
16474 do_neon_tbl_tbx (void)
16476 unsigned listlenbits
;
16477 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
16479 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
16481 first_error (_("bad list length for table lookup"));
16485 listlenbits
= inst
.operands
[1].imm
- 1;
16486 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16487 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16488 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16489 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16490 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16491 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16492 inst
.instruction
|= listlenbits
<< 8;
16494 neon_dp_fixup (&inst
);
16498 do_neon_ldm_stm (void)
16500 /* P, U and L bits are part of bitmask. */
16501 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
16502 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
16504 if (inst
.operands
[1].issingle
)
16506 do_vfp_nsyn_ldm_stm (is_dbmode
);
16510 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
16511 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16513 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
16514 _("register list must contain at least 1 and at most 16 "
16517 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
16518 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
16519 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
16520 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
16522 inst
.instruction
|= offsetbits
;
16524 do_vfp_cond_or_thumb ();
16528 do_neon_ldr_str (void)
16530 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
16532 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16533 And is UNPREDICTABLE in thumb mode. */
16535 && inst
.operands
[1].reg
== REG_PC
16536 && (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
) || thumb_mode
))
16539 inst
.error
= _("Use of PC here is UNPREDICTABLE");
16540 else if (warn_on_deprecated
)
16541 as_tsktsk (_("Use of PC here is deprecated"));
16544 if (inst
.operands
[0].issingle
)
16547 do_vfp_nsyn_opcode ("flds");
16549 do_vfp_nsyn_opcode ("fsts");
16551 /* ARMv8.2 vldr.16/vstr.16 instruction. */
16552 if (inst
.vectype
.el
[0].size
== 16)
16553 do_scalar_fp16_v82_encode ();
16558 do_vfp_nsyn_opcode ("fldd");
16560 do_vfp_nsyn_opcode ("fstd");
16564 /* "interleave" version also handles non-interleaving register VLD1/VST1
16568 do_neon_ld_st_interleave (void)
16570 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
16571 N_8
| N_16
| N_32
| N_64
);
16572 unsigned alignbits
= 0;
16574 /* The bits in this table go:
16575 0: register stride of one (0) or two (1)
16576 1,2: register list length, minus one (1, 2, 3, 4).
16577 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16578 We use -1 for invalid entries. */
16579 const int typetable
[] =
16581 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16582 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16583 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16584 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16588 if (et
.type
== NT_invtype
)
16591 if (inst
.operands
[1].immisalign
)
16592 switch (inst
.operands
[1].imm
>> 8)
16594 case 64: alignbits
= 1; break;
16596 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2
16597 && NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
16598 goto bad_alignment
;
16602 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
16603 goto bad_alignment
;
16608 first_error (_("bad alignment"));
16612 inst
.instruction
|= alignbits
<< 4;
16613 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16615 /* Bits [4:6] of the immediate in a list specifier encode register stride
16616 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16617 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16618 up the right value for "type" in a table based on this value and the given
16619 list style, then stick it back. */
16620 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
16621 | (((inst
.instruction
>> 8) & 3) << 3);
16623 typebits
= typetable
[idx
];
16625 constraint (typebits
== -1, _("bad list type for instruction"));
16626 constraint (((inst
.instruction
>> 8) & 3) && et
.size
== 64,
16627 _("bad element type for instruction"));
16629 inst
.instruction
&= ~0xf00;
16630 inst
.instruction
|= typebits
<< 8;
16633 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16634 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16635 otherwise. The variable arguments are a list of pairs of legal (size, align)
16636 values, terminated with -1. */
16639 neon_alignment_bit (int size
, int align
, int *do_align
, ...)
16642 int result
= FAIL
, thissize
, thisalign
;
16644 if (!inst
.operands
[1].immisalign
)
16650 va_start (ap
, do_align
);
16654 thissize
= va_arg (ap
, int);
16655 if (thissize
== -1)
16657 thisalign
= va_arg (ap
, int);
16659 if (size
== thissize
&& align
== thisalign
)
16662 while (result
!= SUCCESS
);
16666 if (result
== SUCCESS
)
16669 first_error (_("unsupported alignment for instruction"));
16675 do_neon_ld_st_lane (void)
16677 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
16678 int align_good
, do_align
= 0;
16679 int logsize
= neon_logbits (et
.size
);
16680 int align
= inst
.operands
[1].imm
>> 8;
16681 int n
= (inst
.instruction
>> 8) & 3;
16682 int max_el
= 64 / et
.size
;
16684 if (et
.type
== NT_invtype
)
16687 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
16688 _("bad list length"));
16689 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
16690 _("scalar index out of range"));
16691 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
16693 _("stride of 2 unavailable when element size is 8"));
16697 case 0: /* VLD1 / VST1. */
16698 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 16, 16,
16700 if (align_good
== FAIL
)
16704 unsigned alignbits
= 0;
16707 case 16: alignbits
= 0x1; break;
16708 case 32: alignbits
= 0x3; break;
16711 inst
.instruction
|= alignbits
<< 4;
16715 case 1: /* VLD2 / VST2. */
16716 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 16, 16, 32,
16718 if (align_good
== FAIL
)
16721 inst
.instruction
|= 1 << 4;
16724 case 2: /* VLD3 / VST3. */
16725 constraint (inst
.operands
[1].immisalign
,
16726 _("can't use alignment with this instruction"));
16729 case 3: /* VLD4 / VST4. */
16730 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
16731 16, 64, 32, 64, 32, 128, -1);
16732 if (align_good
== FAIL
)
16736 unsigned alignbits
= 0;
16739 case 8: alignbits
= 0x1; break;
16740 case 16: alignbits
= 0x1; break;
16741 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
16744 inst
.instruction
|= alignbits
<< 4;
16751 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16752 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16753 inst
.instruction
|= 1 << (4 + logsize
);
16755 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
16756 inst
.instruction
|= logsize
<< 10;
16759 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16762 do_neon_ld_dup (void)
16764 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
16765 int align_good
, do_align
= 0;
16767 if (et
.type
== NT_invtype
)
16770 switch ((inst
.instruction
>> 8) & 3)
16772 case 0: /* VLD1. */
16773 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
16774 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
16775 &do_align
, 16, 16, 32, 32, -1);
16776 if (align_good
== FAIL
)
16778 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
16781 case 2: inst
.instruction
|= 1 << 5; break;
16782 default: first_error (_("bad list length")); return;
16784 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16787 case 1: /* VLD2. */
16788 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
16789 &do_align
, 8, 16, 16, 32, 32, 64, -1);
16790 if (align_good
== FAIL
)
16792 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
16793 _("bad list length"));
16794 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16795 inst
.instruction
|= 1 << 5;
16796 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16799 case 2: /* VLD3. */
16800 constraint (inst
.operands
[1].immisalign
,
16801 _("can't use alignment with this instruction"));
16802 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
16803 _("bad list length"));
16804 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16805 inst
.instruction
|= 1 << 5;
16806 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16809 case 3: /* VLD4. */
16811 int align
= inst
.operands
[1].imm
>> 8;
16812 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
16813 16, 64, 32, 64, 32, 128, -1);
16814 if (align_good
== FAIL
)
16816 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
16817 _("bad list length"));
16818 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16819 inst
.instruction
|= 1 << 5;
16820 if (et
.size
== 32 && align
== 128)
16821 inst
.instruction
|= 0x3 << 6;
16823 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16830 inst
.instruction
|= do_align
<< 4;
16833 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
16834 apart from bits [11:4]. */
16837 do_neon_ldx_stx (void)
16839 if (inst
.operands
[1].isreg
)
16840 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
16842 switch (NEON_LANE (inst
.operands
[0].imm
))
16844 case NEON_INTERLEAVE_LANES
:
16845 NEON_ENCODE (INTERLV
, inst
);
16846 do_neon_ld_st_interleave ();
16849 case NEON_ALL_LANES
:
16850 NEON_ENCODE (DUP
, inst
);
16851 if (inst
.instruction
== N_INV
)
16853 first_error ("only loads support such operands");
16860 NEON_ENCODE (LANE
, inst
);
16861 do_neon_ld_st_lane ();
16864 /* L bit comes from bit mask. */
16865 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16866 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16867 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
16869 if (inst
.operands
[1].postind
)
16871 int postreg
= inst
.operands
[1].imm
& 0xf;
16872 constraint (!inst
.operands
[1].immisreg
,
16873 _("post-index must be a register"));
16874 constraint (postreg
== 0xd || postreg
== 0xf,
16875 _("bad register for post-index"));
16876 inst
.instruction
|= postreg
;
16880 constraint (inst
.operands
[1].immisreg
, BAD_ADDR_MODE
);
16881 constraint (inst
.reloc
.exp
.X_op
!= O_constant
16882 || inst
.reloc
.exp
.X_add_number
!= 0,
16885 if (inst
.operands
[1].writeback
)
16887 inst
.instruction
|= 0xd;
16890 inst
.instruction
|= 0xf;
16894 inst
.instruction
|= 0xf9000000;
16896 inst
.instruction
|= 0xf4000000;
16901 do_vfp_nsyn_fpv8 (enum neon_shape rs
)
16903 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16904 D register operands. */
16905 if (neon_shape_class
[rs
] == SC_DOUBLE
)
16906 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16909 NEON_ENCODE (FPV8
, inst
);
16911 if (rs
== NS_FFF
|| rs
== NS_HHH
)
16913 do_vfp_sp_dyadic ();
16915 /* ARMv8.2 fp16 instruction. */
16917 do_scalar_fp16_v82_encode ();
16920 do_vfp_dp_rd_rn_rm ();
16923 inst
.instruction
|= 0x100;
16925 inst
.instruction
|= 0xf0000000;
16931 set_it_insn_type (OUTSIDE_IT_INSN
);
16933 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) != SUCCESS
)
16934 first_error (_("invalid instruction shape"));
16940 set_it_insn_type (OUTSIDE_IT_INSN
);
16942 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) == SUCCESS
)
16945 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
16948 neon_dyadic_misc (NT_untyped
, N_F32
, 0);
16952 do_vrint_1 (enum neon_cvt_mode mode
)
16954 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_QQ
, NS_NULL
);
16955 struct neon_type_el et
;
16960 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16961 D register operands. */
16962 if (neon_shape_class
[rs
] == SC_DOUBLE
)
16963 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16966 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
16968 if (et
.type
!= NT_invtype
)
16970 /* VFP encodings. */
16971 if (mode
== neon_cvt_mode_a
|| mode
== neon_cvt_mode_n
16972 || mode
== neon_cvt_mode_p
|| mode
== neon_cvt_mode_m
)
16973 set_it_insn_type (OUTSIDE_IT_INSN
);
16975 NEON_ENCODE (FPV8
, inst
);
16976 if (rs
== NS_FF
|| rs
== NS_HH
)
16977 do_vfp_sp_monadic ();
16979 do_vfp_dp_rd_rm ();
16983 case neon_cvt_mode_r
: inst
.instruction
|= 0x00000000; break;
16984 case neon_cvt_mode_z
: inst
.instruction
|= 0x00000080; break;
16985 case neon_cvt_mode_x
: inst
.instruction
|= 0x00010000; break;
16986 case neon_cvt_mode_a
: inst
.instruction
|= 0xf0000000; break;
16987 case neon_cvt_mode_n
: inst
.instruction
|= 0xf0010000; break;
16988 case neon_cvt_mode_p
: inst
.instruction
|= 0xf0020000; break;
16989 case neon_cvt_mode_m
: inst
.instruction
|= 0xf0030000; break;
16993 inst
.instruction
|= (rs
== NS_DD
) << 8;
16994 do_vfp_cond_or_thumb ();
16996 /* ARMv8.2 fp16 vrint instruction. */
16998 do_scalar_fp16_v82_encode ();
17002 /* Neon encodings (or something broken...). */
17004 et
= neon_check_type (2, rs
, N_EQK
, N_F32
| N_KEY
);
17006 if (et
.type
== NT_invtype
)
17009 set_it_insn_type (OUTSIDE_IT_INSN
);
17010 NEON_ENCODE (FLOAT
, inst
);
17012 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
17015 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17016 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17017 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17018 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17019 inst
.instruction
|= neon_quad (rs
) << 6;
17022 case neon_cvt_mode_z
: inst
.instruction
|= 3 << 7; break;
17023 case neon_cvt_mode_x
: inst
.instruction
|= 1 << 7; break;
17024 case neon_cvt_mode_a
: inst
.instruction
|= 2 << 7; break;
17025 case neon_cvt_mode_n
: inst
.instruction
|= 0 << 7; break;
17026 case neon_cvt_mode_p
: inst
.instruction
|= 7 << 7; break;
17027 case neon_cvt_mode_m
: inst
.instruction
|= 5 << 7; break;
17028 case neon_cvt_mode_r
: inst
.error
= _("invalid rounding mode"); break;
17033 inst
.instruction
|= 0xfc000000;
17035 inst
.instruction
|= 0xf0000000;
17042 do_vrint_1 (neon_cvt_mode_x
);
17048 do_vrint_1 (neon_cvt_mode_z
);
17054 do_vrint_1 (neon_cvt_mode_r
);
17060 do_vrint_1 (neon_cvt_mode_a
);
17066 do_vrint_1 (neon_cvt_mode_n
);
17072 do_vrint_1 (neon_cvt_mode_p
);
17078 do_vrint_1 (neon_cvt_mode_m
);
17081 /* Crypto v1 instructions. */
17083 do_crypto_2op_1 (unsigned elttype
, int op
)
17085 set_it_insn_type (OUTSIDE_IT_INSN
);
17087 if (neon_check_type (2, NS_QQ
, N_EQK
| N_UNT
, elttype
| N_UNT
| N_KEY
).type
17093 NEON_ENCODE (INTEGER
, inst
);
17094 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17095 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17096 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17097 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17099 inst
.instruction
|= op
<< 6;
17102 inst
.instruction
|= 0xfc000000;
17104 inst
.instruction
|= 0xf0000000;
17108 do_crypto_3op_1 (int u
, int op
)
17110 set_it_insn_type (OUTSIDE_IT_INSN
);
17112 if (neon_check_type (3, NS_QQQ
, N_EQK
| N_UNT
, N_EQK
| N_UNT
,
17113 N_32
| N_UNT
| N_KEY
).type
== NT_invtype
)
17118 NEON_ENCODE (INTEGER
, inst
);
17119 neon_three_same (1, u
, 8 << op
);
17125 do_crypto_2op_1 (N_8
, 0);
17131 do_crypto_2op_1 (N_8
, 1);
17137 do_crypto_2op_1 (N_8
, 2);
17143 do_crypto_2op_1 (N_8
, 3);
17149 do_crypto_3op_1 (0, 0);
17155 do_crypto_3op_1 (0, 1);
17161 do_crypto_3op_1 (0, 2);
17167 do_crypto_3op_1 (0, 3);
17173 do_crypto_3op_1 (1, 0);
17179 do_crypto_3op_1 (1, 1);
17183 do_sha256su1 (void)
17185 do_crypto_3op_1 (1, 2);
17191 do_crypto_2op_1 (N_32
, -1);
17197 do_crypto_2op_1 (N_32
, 0);
17201 do_sha256su0 (void)
17203 do_crypto_2op_1 (N_32
, 1);
17207 do_crc32_1 (unsigned int poly
, unsigned int sz
)
17209 unsigned int Rd
= inst
.operands
[0].reg
;
17210 unsigned int Rn
= inst
.operands
[1].reg
;
17211 unsigned int Rm
= inst
.operands
[2].reg
;
17213 set_it_insn_type (OUTSIDE_IT_INSN
);
17214 inst
.instruction
|= LOW4 (Rd
) << (thumb_mode
? 8 : 12);
17215 inst
.instruction
|= LOW4 (Rn
) << 16;
17216 inst
.instruction
|= LOW4 (Rm
);
17217 inst
.instruction
|= sz
<< (thumb_mode
? 4 : 21);
17218 inst
.instruction
|= poly
<< (thumb_mode
? 20 : 9);
17220 if (Rd
== REG_PC
|| Rn
== REG_PC
|| Rm
== REG_PC
)
17221 as_warn (UNPRED_REG ("r15"));
17222 if (thumb_mode
&& (Rd
== REG_SP
|| Rn
== REG_SP
|| Rm
== REG_SP
))
17223 as_warn (UNPRED_REG ("r13"));
17263 /* Overall per-instruction processing. */
17265 /* We need to be able to fix up arbitrary expressions in some statements.
17266 This is so that we can handle symbols that are an arbitrary distance from
17267 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17268 which returns part of an address in a form which will be valid for
17269 a data instruction. We do this by pushing the expression into a symbol
17270 in the expr_section, and creating a fix for that. */
17273 fix_new_arm (fragS
* frag
,
17287 /* Create an absolute valued symbol, so we have something to
17288 refer to in the object file. Unfortunately for us, gas's
17289 generic expression parsing will already have folded out
17290 any use of .set foo/.type foo %function that may have
17291 been used to set type information of the target location,
17292 that's being specified symbolically. We have to presume
17293 the user knows what they are doing. */
17297 sprintf (name
, "*ABS*0x%lx", (unsigned long)exp
->X_add_number
);
17299 symbol
= symbol_find_or_make (name
);
17300 S_SET_SEGMENT (symbol
, absolute_section
);
17301 symbol_set_frag (symbol
, &zero_address_frag
);
17302 S_SET_VALUE (symbol
, exp
->X_add_number
);
17303 exp
->X_op
= O_symbol
;
17304 exp
->X_add_symbol
= symbol
;
17305 exp
->X_add_number
= 0;
17311 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
,
17312 (enum bfd_reloc_code_real
) reloc
);
17316 new_fix
= (fixS
*) fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
17317 pc_rel
, (enum bfd_reloc_code_real
) reloc
);
17321 /* Mark whether the fix is to a THUMB instruction, or an ARM
17323 new_fix
->tc_fix_data
= thumb_mode
;
17326 /* Create a frg for an instruction requiring relaxation. */
17328 output_relax_insn (void)
17334 /* The size of the instruction is unknown, so tie the debug info to the
17335 start of the instruction. */
17336 dwarf2_emit_insn (0);
17338 switch (inst
.reloc
.exp
.X_op
)
17341 sym
= inst
.reloc
.exp
.X_add_symbol
;
17342 offset
= inst
.reloc
.exp
.X_add_number
;
17346 offset
= inst
.reloc
.exp
.X_add_number
;
17349 sym
= make_expr_symbol (&inst
.reloc
.exp
);
17353 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
17354 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
17355 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
17358 /* Write a 32-bit thumb instruction to buf. */
17360 put_thumb32_insn (char * buf
, unsigned long insn
)
17362 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
17363 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
17367 output_inst (const char * str
)
17373 as_bad ("%s -- `%s'", inst
.error
, str
);
17378 output_relax_insn ();
17381 if (inst
.size
== 0)
17384 to
= frag_more (inst
.size
);
17385 /* PR 9814: Record the thumb mode into the current frag so that we know
17386 what type of NOP padding to use, if necessary. We override any previous
17387 setting so that if the mode has changed then the NOPS that we use will
17388 match the encoding of the last instruction in the frag. */
17389 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
17391 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
17393 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
17394 put_thumb32_insn (to
, inst
.instruction
);
17396 else if (inst
.size
> INSN_SIZE
)
17398 gas_assert (inst
.size
== (2 * INSN_SIZE
));
17399 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
17400 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
17403 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
17405 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
17406 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
17407 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
17410 dwarf2_emit_insn (inst
.size
);
17414 output_it_inst (int cond
, int mask
, char * to
)
17416 unsigned long instruction
= 0xbf00;
17419 instruction
|= mask
;
17420 instruction
|= cond
<< 4;
17424 to
= frag_more (2);
17426 dwarf2_emit_insn (2);
17430 md_number_to_chars (to
, instruction
, 2);
17435 /* Tag values used in struct asm_opcode's tag field. */
17438 OT_unconditional
, /* Instruction cannot be conditionalized.
17439 The ARM condition field is still 0xE. */
17440 OT_unconditionalF
, /* Instruction cannot be conditionalized
17441 and carries 0xF in its ARM condition field. */
17442 OT_csuffix
, /* Instruction takes a conditional suffix. */
17443 OT_csuffixF
, /* Some forms of the instruction take a conditional
17444 suffix, others place 0xF where the condition field
17446 OT_cinfix3
, /* Instruction takes a conditional infix,
17447 beginning at character index 3. (In
17448 unified mode, it becomes a suffix.) */
17449 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
17450 tsts, cmps, cmns, and teqs. */
17451 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
17452 character index 3, even in unified mode. Used for
17453 legacy instructions where suffix and infix forms
17454 may be ambiguous. */
17455 OT_csuf_or_in3
, /* Instruction takes either a conditional
17456 suffix or an infix at character index 3. */
17457 OT_odd_infix_unc
, /* This is the unconditional variant of an
17458 instruction that takes a conditional infix
17459 at an unusual position. In unified mode,
17460 this variant will accept a suffix. */
17461 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
17462 are the conditional variants of instructions that
17463 take conditional infixes in unusual positions.
17464 The infix appears at character index
17465 (tag - OT_odd_infix_0). These are not accepted
17466 in unified mode. */
17469 /* Subroutine of md_assemble, responsible for looking up the primary
17470 opcode from the mnemonic the user wrote. STR points to the
17471 beginning of the mnemonic.
17473 This is not simply a hash table lookup, because of conditional
17474 variants. Most instructions have conditional variants, which are
17475 expressed with a _conditional affix_ to the mnemonic. If we were
17476 to encode each conditional variant as a literal string in the opcode
17477 table, it would have approximately 20,000 entries.
17479 Most mnemonics take this affix as a suffix, and in unified syntax,
17480 'most' is upgraded to 'all'. However, in the divided syntax, some
17481 instructions take the affix as an infix, notably the s-variants of
17482 the arithmetic instructions. Of those instructions, all but six
17483 have the infix appear after the third character of the mnemonic.
17485 Accordingly, the algorithm for looking up primary opcodes given
17488 1. Look up the identifier in the opcode table.
17489 If we find a match, go to step U.
17491 2. Look up the last two characters of the identifier in the
17492 conditions table. If we find a match, look up the first N-2
17493 characters of the identifier in the opcode table. If we
17494 find a match, go to step CE.
17496 3. Look up the fourth and fifth characters of the identifier in
17497 the conditions table. If we find a match, extract those
17498 characters from the identifier, and look up the remaining
17499 characters in the opcode table. If we find a match, go
17504 U. Examine the tag field of the opcode structure, in case this is
17505 one of the six instructions with its conditional infix in an
17506 unusual place. If it is, the tag tells us where to find the
17507 infix; look it up in the conditions table and set inst.cond
17508 accordingly. Otherwise, this is an unconditional instruction.
17509 Again set inst.cond accordingly. Return the opcode structure.
17511 CE. Examine the tag field to make sure this is an instruction that
17512 should receive a conditional suffix. If it is not, fail.
17513 Otherwise, set inst.cond from the suffix we already looked up,
17514 and return the opcode structure.
17516 CM. Examine the tag field to make sure this is an instruction that
17517 should receive a conditional infix after the third character.
17518 If it is not, fail. Otherwise, undo the edits to the current
17519 line of input and proceed as for case CE. */
17521 static const struct asm_opcode
*
17522 opcode_lookup (char **str
)
17526 const struct asm_opcode
*opcode
;
17527 const struct asm_cond
*cond
;
17530 /* Scan up to the end of the mnemonic, which must end in white space,
17531 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17532 for (base
= end
= *str
; *end
!= '\0'; end
++)
17533 if (*end
== ' ' || *end
== '.')
17539 /* Handle a possible width suffix and/or Neon type suffix. */
17544 /* The .w and .n suffixes are only valid if the unified syntax is in
17546 if (unified_syntax
&& end
[1] == 'w')
17548 else if (unified_syntax
&& end
[1] == 'n')
17553 inst
.vectype
.elems
= 0;
17555 *str
= end
+ offset
;
17557 if (end
[offset
] == '.')
17559 /* See if we have a Neon type suffix (possible in either unified or
17560 non-unified ARM syntax mode). */
17561 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
17564 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
17570 /* Look for unaffixed or special-case affixed mnemonic. */
17571 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17576 if (opcode
->tag
< OT_odd_infix_0
)
17578 inst
.cond
= COND_ALWAYS
;
17582 if (warn_on_deprecated
&& unified_syntax
)
17583 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17584 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
17585 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17588 inst
.cond
= cond
->value
;
17592 /* Cannot have a conditional suffix on a mnemonic of less than two
17594 if (end
- base
< 3)
17597 /* Look for suffixed mnemonic. */
17599 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17600 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17602 if (opcode
&& cond
)
17605 switch (opcode
->tag
)
17607 case OT_cinfix3_legacy
:
17608 /* Ignore conditional suffixes matched on infix only mnemonics. */
17612 case OT_cinfix3_deprecated
:
17613 case OT_odd_infix_unc
:
17614 if (!unified_syntax
)
17616 /* else fall through */
17620 case OT_csuf_or_in3
:
17621 inst
.cond
= cond
->value
;
17624 case OT_unconditional
:
17625 case OT_unconditionalF
:
17627 inst
.cond
= cond
->value
;
17630 /* Delayed diagnostic. */
17631 inst
.error
= BAD_COND
;
17632 inst
.cond
= COND_ALWAYS
;
17641 /* Cannot have a usual-position infix on a mnemonic of less than
17642 six characters (five would be a suffix). */
17643 if (end
- base
< 6)
17646 /* Look for infixed mnemonic in the usual position. */
17648 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17652 memcpy (save
, affix
, 2);
17653 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
17654 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17656 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
17657 memcpy (affix
, save
, 2);
17660 && (opcode
->tag
== OT_cinfix3
17661 || opcode
->tag
== OT_cinfix3_deprecated
17662 || opcode
->tag
== OT_csuf_or_in3
17663 || opcode
->tag
== OT_cinfix3_legacy
))
17666 if (warn_on_deprecated
&& unified_syntax
17667 && (opcode
->tag
== OT_cinfix3
17668 || opcode
->tag
== OT_cinfix3_deprecated
))
17669 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17671 inst
.cond
= cond
->value
;
17678 /* This function generates an initial IT instruction, leaving its block
17679 virtually open for the new instructions. Eventually,
17680 the mask will be updated by now_it_add_mask () each time
17681 a new instruction needs to be included in the IT block.
17682 Finally, the block is closed with close_automatic_it_block ().
17683 The block closure can be requested either from md_assemble (),
17684 a tencode (), or due to a label hook. */
17687 new_automatic_it_block (int cond
)
17689 now_it
.state
= AUTOMATIC_IT_BLOCK
;
17690 now_it
.mask
= 0x18;
17692 now_it
.block_length
= 1;
17693 mapping_state (MAP_THUMB
);
17694 now_it
.insn
= output_it_inst (cond
, now_it
.mask
, NULL
);
17695 now_it
.warn_deprecated
= FALSE
;
17696 now_it
.insn_cond
= TRUE
;
17699 /* Close an automatic IT block.
17700 See comments in new_automatic_it_block (). */
17703 close_automatic_it_block (void)
17705 now_it
.mask
= 0x10;
17706 now_it
.block_length
= 0;
17709 /* Update the mask of the current automatically-generated IT
17710 instruction. See comments in new_automatic_it_block (). */
17713 now_it_add_mask (int cond
)
17715 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
17716 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
17717 | ((bitvalue) << (nbit)))
17718 const int resulting_bit
= (cond
& 1);
17720 now_it
.mask
&= 0xf;
17721 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
17723 (5 - now_it
.block_length
));
17724 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
17726 ((5 - now_it
.block_length
) - 1) );
17727 output_it_inst (now_it
.cc
, now_it
.mask
, now_it
.insn
);
17730 #undef SET_BIT_VALUE
17733 /* The IT blocks handling machinery is accessed through the these functions:
17734 it_fsm_pre_encode () from md_assemble ()
17735 set_it_insn_type () optional, from the tencode functions
17736 set_it_insn_type_last () ditto
17737 in_it_block () ditto
17738 it_fsm_post_encode () from md_assemble ()
17739 force_automatic_it_block_close () from label habdling functions
17742 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17743 initializing the IT insn type with a generic initial value depending
17744 on the inst.condition.
17745 2) During the tencode function, two things may happen:
17746 a) The tencode function overrides the IT insn type by
17747 calling either set_it_insn_type (type) or set_it_insn_type_last ().
17748 b) The tencode function queries the IT block state by
17749 calling in_it_block () (i.e. to determine narrow/not narrow mode).
17751 Both set_it_insn_type and in_it_block run the internal FSM state
17752 handling function (handle_it_state), because: a) setting the IT insn
17753 type may incur in an invalid state (exiting the function),
17754 and b) querying the state requires the FSM to be updated.
17755 Specifically we want to avoid creating an IT block for conditional
17756 branches, so it_fsm_pre_encode is actually a guess and we can't
17757 determine whether an IT block is required until the tencode () routine
17758 has decided what type of instruction this actually it.
17759 Because of this, if set_it_insn_type and in_it_block have to be used,
17760 set_it_insn_type has to be called first.
17762 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17763 determines the insn IT type depending on the inst.cond code.
17764 When a tencode () routine encodes an instruction that can be
17765 either outside an IT block, or, in the case of being inside, has to be
17766 the last one, set_it_insn_type_last () will determine the proper
17767 IT instruction type based on the inst.cond code. Otherwise,
17768 set_it_insn_type can be called for overriding that logic or
17769 for covering other cases.
17771 Calling handle_it_state () may not transition the IT block state to
17772 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
17773 still queried. Instead, if the FSM determines that the state should
17774 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17775 after the tencode () function: that's what it_fsm_post_encode () does.
17777 Since in_it_block () calls the state handling function to get an
17778 updated state, an error may occur (due to invalid insns combination).
17779 In that case, inst.error is set.
17780 Therefore, inst.error has to be checked after the execution of
17781 the tencode () routine.
17783 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17784 any pending state change (if any) that didn't take place in
17785 handle_it_state () as explained above. */
17788 it_fsm_pre_encode (void)
17790 if (inst
.cond
!= COND_ALWAYS
)
17791 inst
.it_insn_type
= INSIDE_IT_INSN
;
17793 inst
.it_insn_type
= OUTSIDE_IT_INSN
;
17795 now_it
.state_handled
= 0;
17798 /* IT state FSM handling function. */
17801 handle_it_state (void)
17803 now_it
.state_handled
= 1;
17804 now_it
.insn_cond
= FALSE
;
17806 switch (now_it
.state
)
17808 case OUTSIDE_IT_BLOCK
:
17809 switch (inst
.it_insn_type
)
17811 case OUTSIDE_IT_INSN
:
17814 case INSIDE_IT_INSN
:
17815 case INSIDE_IT_LAST_INSN
:
17816 if (thumb_mode
== 0)
17819 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
17820 as_tsktsk (_("Warning: conditional outside an IT block"\
17825 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
17826 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
17828 /* Automatically generate the IT instruction. */
17829 new_automatic_it_block (inst
.cond
);
17830 if (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
)
17831 close_automatic_it_block ();
17835 inst
.error
= BAD_OUT_IT
;
17841 case IF_INSIDE_IT_LAST_INSN
:
17842 case NEUTRAL_IT_INSN
:
17846 now_it
.state
= MANUAL_IT_BLOCK
;
17847 now_it
.block_length
= 0;
17852 case AUTOMATIC_IT_BLOCK
:
17853 /* Three things may happen now:
17854 a) We should increment current it block size;
17855 b) We should close current it block (closing insn or 4 insns);
17856 c) We should close current it block and start a new one (due
17857 to incompatible conditions or
17858 4 insns-length block reached). */
17860 switch (inst
.it_insn_type
)
17862 case OUTSIDE_IT_INSN
:
17863 /* The closure of the block shall happen immediatelly,
17864 so any in_it_block () call reports the block as closed. */
17865 force_automatic_it_block_close ();
17868 case INSIDE_IT_INSN
:
17869 case INSIDE_IT_LAST_INSN
:
17870 case IF_INSIDE_IT_LAST_INSN
:
17871 now_it
.block_length
++;
17873 if (now_it
.block_length
> 4
17874 || !now_it_compatible (inst
.cond
))
17876 force_automatic_it_block_close ();
17877 if (inst
.it_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
17878 new_automatic_it_block (inst
.cond
);
17882 now_it
.insn_cond
= TRUE
;
17883 now_it_add_mask (inst
.cond
);
17886 if (now_it
.state
== AUTOMATIC_IT_BLOCK
17887 && (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
17888 || inst
.it_insn_type
== IF_INSIDE_IT_LAST_INSN
))
17889 close_automatic_it_block ();
17892 case NEUTRAL_IT_INSN
:
17893 now_it
.block_length
++;
17894 now_it
.insn_cond
= TRUE
;
17896 if (now_it
.block_length
> 4)
17897 force_automatic_it_block_close ();
17899 now_it_add_mask (now_it
.cc
& 1);
17903 close_automatic_it_block ();
17904 now_it
.state
= MANUAL_IT_BLOCK
;
17909 case MANUAL_IT_BLOCK
:
17911 /* Check conditional suffixes. */
17912 const int cond
= now_it
.cc
^ ((now_it
.mask
>> 4) & 1) ^ 1;
17915 now_it
.mask
&= 0x1f;
17916 is_last
= (now_it
.mask
== 0x10);
17917 now_it
.insn_cond
= TRUE
;
17919 switch (inst
.it_insn_type
)
17921 case OUTSIDE_IT_INSN
:
17922 inst
.error
= BAD_NOT_IT
;
17925 case INSIDE_IT_INSN
:
17926 if (cond
!= inst
.cond
)
17928 inst
.error
= BAD_IT_COND
;
17933 case INSIDE_IT_LAST_INSN
:
17934 case IF_INSIDE_IT_LAST_INSN
:
17935 if (cond
!= inst
.cond
)
17937 inst
.error
= BAD_IT_COND
;
17942 inst
.error
= BAD_BRANCH
;
17947 case NEUTRAL_IT_INSN
:
17948 /* The BKPT instruction is unconditional even in an IT block. */
17952 inst
.error
= BAD_IT_IT
;
17962 struct depr_insn_mask
17964 unsigned long pattern
;
17965 unsigned long mask
;
17966 const char* description
;
17969 /* List of 16-bit instruction patterns deprecated in an IT block in
17971 static const struct depr_insn_mask depr_it_insns
[] = {
17972 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
17973 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
17974 { 0xa000, 0xb800, N_("ADR") },
17975 { 0x4800, 0xf800, N_("Literal loads") },
17976 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
17977 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
17978 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
17979 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
17980 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
17985 it_fsm_post_encode (void)
17989 if (!now_it
.state_handled
)
17990 handle_it_state ();
17992 if (now_it
.insn_cond
17993 && !now_it
.warn_deprecated
17994 && warn_on_deprecated
17995 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
17997 if (inst
.instruction
>= 0x10000)
17999 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
18000 "deprecated in ARMv8"));
18001 now_it
.warn_deprecated
= TRUE
;
18005 const struct depr_insn_mask
*p
= depr_it_insns
;
18007 while (p
->mask
!= 0)
18009 if ((inst
.instruction
& p
->mask
) == p
->pattern
)
18011 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
18012 "of the following class are deprecated in ARMv8: "
18013 "%s"), p
->description
);
18014 now_it
.warn_deprecated
= TRUE
;
18022 if (now_it
.block_length
> 1)
18024 as_tsktsk (_("IT blocks containing more than one conditional "
18025 "instruction are deprecated in ARMv8"));
18026 now_it
.warn_deprecated
= TRUE
;
18030 is_last
= (now_it
.mask
== 0x10);
18033 now_it
.state
= OUTSIDE_IT_BLOCK
;
18039 force_automatic_it_block_close (void)
18041 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
18043 close_automatic_it_block ();
18044 now_it
.state
= OUTSIDE_IT_BLOCK
;
18052 if (!now_it
.state_handled
)
18053 handle_it_state ();
18055 return now_it
.state
!= OUTSIDE_IT_BLOCK
;
18058 /* Whether OPCODE only has T32 encoding. Since this function is only used by
18059 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
18060 here, hence the "known" in the function name. */
18063 known_t32_only_insn (const struct asm_opcode
*opcode
)
18065 /* Original Thumb-1 wide instruction. */
18066 if (opcode
->tencode
== do_t_blx
18067 || opcode
->tencode
== do_t_branch23
18068 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
18069 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
))
18072 /* Wide-only instruction added to ARMv8-M. */
18073 if (ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v8m
)
18074 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_atomics
)
18075 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v6t2_v8m
)
18076 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_div
))
18082 /* Whether wide instruction variant can be used if available for a valid OPCODE
18086 t32_insn_ok (arm_feature_set arch
, const struct asm_opcode
*opcode
)
18088 if (known_t32_only_insn (opcode
))
18091 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
18092 of variant T3 of B.W is checked in do_t_branch. */
18093 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
18094 && opcode
->tencode
== do_t_branch
)
18097 /* Wide instruction variants of all instructions with narrow *and* wide
18098 variants become available with ARMv6t2. Other opcodes are either
18099 narrow-only or wide-only and are thus available if OPCODE is valid. */
18100 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v6t2
))
18103 /* OPCODE with narrow only instruction variant or wide variant not
18109 md_assemble (char *str
)
18112 const struct asm_opcode
* opcode
;
18114 /* Align the previous label if needed. */
18115 if (last_label_seen
!= NULL
)
18117 symbol_set_frag (last_label_seen
, frag_now
);
18118 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
18119 S_SET_SEGMENT (last_label_seen
, now_seg
);
18122 memset (&inst
, '\0', sizeof (inst
));
18123 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
18125 opcode
= opcode_lookup (&p
);
18128 /* It wasn't an instruction, but it might be a register alias of
18129 the form alias .req reg, or a Neon .dn/.qn directive. */
18130 if (! create_register_alias (str
, p
)
18131 && ! create_neon_reg_alias (str
, p
))
18132 as_bad (_("bad instruction `%s'"), str
);
18137 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
18138 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
18140 /* The value which unconditional instructions should have in place of the
18141 condition field. */
18142 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
18146 arm_feature_set variant
;
18148 variant
= cpu_variant
;
18149 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
18150 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
18151 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
18152 /* Check that this instruction is supported for this CPU. */
18153 if (!opcode
->tvariant
18154 || (thumb_mode
== 1
18155 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
18157 as_bad (_("selected processor does not support `%s' in Thumb mode"), str
);
18160 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
18161 && opcode
->tencode
!= do_t_branch
)
18163 as_bad (_("Thumb does not support conditional execution"));
18167 /* Two things are addressed here:
18168 1) Implicit require narrow instructions on Thumb-1.
18169 This avoids relaxation accidentally introducing Thumb-2
18171 2) Reject wide instructions in non Thumb-2 cores.
18173 Only instructions with narrow and wide variants need to be handled
18174 but selecting all non wide-only instructions is easier. */
18175 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
)
18176 && !t32_insn_ok (variant
, opcode
))
18178 if (inst
.size_req
== 0)
18180 else if (inst
.size_req
== 4)
18182 if (ARM_CPU_HAS_FEATURE (variant
, arm_ext_v8m
))
18183 as_bad (_("selected processor does not support 32bit wide "
18184 "variant of instruction `%s'"), str
);
18186 as_bad (_("selected processor does not support `%s' in "
18187 "Thumb-2 mode"), str
);
18192 inst
.instruction
= opcode
->tvalue
;
18194 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/TRUE
))
18196 /* Prepare the it_insn_type for those encodings that don't set
18198 it_fsm_pre_encode ();
18200 opcode
->tencode ();
18202 it_fsm_post_encode ();
18205 if (!(inst
.error
|| inst
.relax
))
18207 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
18208 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
18209 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
18211 as_bad (_("cannot honor width suffix -- `%s'"), str
);
18216 /* Something has gone badly wrong if we try to relax a fixed size
18218 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
18220 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
18221 *opcode
->tvariant
);
18222 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
18223 set those bits when Thumb-2 32-bit instructions are seen. The impact
18224 of relaxable instructions will be considered later after we finish all
18226 if (ARM_FEATURE_CORE_EQUAL (cpu_variant
, arm_arch_any
))
18227 variant
= arm_arch_none
;
18229 variant
= cpu_variant
;
18230 if (inst
.size
== 4 && !t32_insn_ok (variant
, opcode
))
18231 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
18234 check_neon_suffixes
;
18238 mapping_state (MAP_THUMB
);
18241 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
18245 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18246 is_bx
= (opcode
->aencode
== do_bx
);
18248 /* Check that this instruction is supported for this CPU. */
18249 if (!(is_bx
&& fix_v4bx
)
18250 && !(opcode
->avariant
&&
18251 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
18253 as_bad (_("selected processor does not support `%s' in ARM mode"), str
);
18258 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
18262 inst
.instruction
= opcode
->avalue
;
18263 if (opcode
->tag
== OT_unconditionalF
)
18264 inst
.instruction
|= 0xFU
<< 28;
18266 inst
.instruction
|= inst
.cond
<< 28;
18267 inst
.size
= INSN_SIZE
;
18268 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/FALSE
))
18270 it_fsm_pre_encode ();
18271 opcode
->aencode ();
18272 it_fsm_post_encode ();
18274 /* Arm mode bx is marked as both v4T and v5 because it's still required
18275 on a hypothetical non-thumb v5 core. */
18277 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
18279 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
18280 *opcode
->avariant
);
18282 check_neon_suffixes
;
18286 mapping_state (MAP_ARM
);
18291 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18299 check_it_blocks_finished (void)
18304 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
18305 if (seg_info (sect
)->tc_segment_info_data
.current_it
.state
18306 == MANUAL_IT_BLOCK
)
18308 as_warn (_("section '%s' finished with an open IT block."),
18312 if (now_it
.state
== MANUAL_IT_BLOCK
)
18313 as_warn (_("file finished with an open IT block."));
18317 /* Various frobbings of labels and their addresses. */
18320 arm_start_line_hook (void)
18322 last_label_seen
= NULL
;
18326 arm_frob_label (symbolS
* sym
)
18328 last_label_seen
= sym
;
18330 ARM_SET_THUMB (sym
, thumb_mode
);
18332 #if defined OBJ_COFF || defined OBJ_ELF
18333 ARM_SET_INTERWORK (sym
, support_interwork
);
18336 force_automatic_it_block_close ();
18338 /* Note - do not allow local symbols (.Lxxx) to be labelled
18339 as Thumb functions. This is because these labels, whilst
18340 they exist inside Thumb code, are not the entry points for
18341 possible ARM->Thumb calls. Also, these labels can be used
18342 as part of a computed goto or switch statement. eg gcc
18343 can generate code that looks like this:
18345 ldr r2, [pc, .Laaa]
18355 The first instruction loads the address of the jump table.
18356 The second instruction converts a table index into a byte offset.
18357 The third instruction gets the jump address out of the table.
18358 The fourth instruction performs the jump.
18360 If the address stored at .Laaa is that of a symbol which has the
18361 Thumb_Func bit set, then the linker will arrange for this address
18362 to have the bottom bit set, which in turn would mean that the
18363 address computation performed by the third instruction would end
18364 up with the bottom bit set. Since the ARM is capable of unaligned
18365 word loads, the instruction would then load the incorrect address
18366 out of the jump table, and chaos would ensue. */
18367 if (label_is_thumb_function_name
18368 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
18369 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
18371 /* When the address of a Thumb function is taken the bottom
18372 bit of that address should be set. This will allow
18373 interworking between Arm and Thumb functions to work
18376 THUMB_SET_FUNC (sym
, 1);
18378 label_is_thumb_function_name
= FALSE
;
18381 dwarf2_emit_label (sym
);
18385 arm_data_in_code (void)
18387 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
18389 *input_line_pointer
= '/';
18390 input_line_pointer
+= 5;
18391 *input_line_pointer
= 0;
18399 arm_canonicalize_symbol_name (char * name
)
18403 if (thumb_mode
&& (len
= strlen (name
)) > 5
18404 && streq (name
+ len
- 5, "/data"))
18405 *(name
+ len
- 5) = 0;
18410 /* Table of all register names defined by default. The user can
18411 define additional names with .req. Note that all register names
18412 should appear in both upper and lowercase variants. Some registers
18413 also have mixed-case names. */
18415 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
18416 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
18417 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
18418 #define REGSET(p,t) \
18419 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
18420 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
18421 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
18422 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
18423 #define REGSETH(p,t) \
18424 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
18425 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
18426 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
18427 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
18428 #define REGSET2(p,t) \
18429 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
18430 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
18431 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
18432 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
18433 #define SPLRBANK(base,bank,t) \
18434 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
18435 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
18436 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
18437 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
18438 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
18439 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
18441 static const struct reg_entry reg_names
[] =
18443 /* ARM integer registers. */
18444 REGSET(r
, RN
), REGSET(R
, RN
),
18446 /* ATPCS synonyms. */
18447 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
18448 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
18449 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
18451 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
18452 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
18453 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
18455 /* Well-known aliases. */
18456 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
18457 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
18459 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
18460 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
18462 /* Coprocessor numbers. */
18463 REGSET(p
, CP
), REGSET(P
, CP
),
18465 /* Coprocessor register numbers. The "cr" variants are for backward
18467 REGSET(c
, CN
), REGSET(C
, CN
),
18468 REGSET(cr
, CN
), REGSET(CR
, CN
),
18470 /* ARM banked registers. */
18471 REGDEF(R8_usr
,512|(0<<16),RNB
), REGDEF(r8_usr
,512|(0<<16),RNB
),
18472 REGDEF(R9_usr
,512|(1<<16),RNB
), REGDEF(r9_usr
,512|(1<<16),RNB
),
18473 REGDEF(R10_usr
,512|(2<<16),RNB
), REGDEF(r10_usr
,512|(2<<16),RNB
),
18474 REGDEF(R11_usr
,512|(3<<16),RNB
), REGDEF(r11_usr
,512|(3<<16),RNB
),
18475 REGDEF(R12_usr
,512|(4<<16),RNB
), REGDEF(r12_usr
,512|(4<<16),RNB
),
18476 REGDEF(SP_usr
,512|(5<<16),RNB
), REGDEF(sp_usr
,512|(5<<16),RNB
),
18477 REGDEF(LR_usr
,512|(6<<16),RNB
), REGDEF(lr_usr
,512|(6<<16),RNB
),
18479 REGDEF(R8_fiq
,512|(8<<16),RNB
), REGDEF(r8_fiq
,512|(8<<16),RNB
),
18480 REGDEF(R9_fiq
,512|(9<<16),RNB
), REGDEF(r9_fiq
,512|(9<<16),RNB
),
18481 REGDEF(R10_fiq
,512|(10<<16),RNB
), REGDEF(r10_fiq
,512|(10<<16),RNB
),
18482 REGDEF(R11_fiq
,512|(11<<16),RNB
), REGDEF(r11_fiq
,512|(11<<16),RNB
),
18483 REGDEF(R12_fiq
,512|(12<<16),RNB
), REGDEF(r12_fiq
,512|(12<<16),RNB
),
18484 REGDEF(SP_fiq
,512|(13<<16),RNB
), REGDEF(sp_fiq
,512|(13<<16),RNB
),
18485 REGDEF(LR_fiq
,512|(14<<16),RNB
), REGDEF(lr_fiq
,512|(14<<16),RNB
),
18486 REGDEF(SPSR_fiq
,512|(14<<16)|SPSR_BIT
,RNB
), REGDEF(spsr_fiq
,512|(14<<16)|SPSR_BIT
,RNB
),
18488 SPLRBANK(0,IRQ
,RNB
), SPLRBANK(0,irq
,RNB
),
18489 SPLRBANK(2,SVC
,RNB
), SPLRBANK(2,svc
,RNB
),
18490 SPLRBANK(4,ABT
,RNB
), SPLRBANK(4,abt
,RNB
),
18491 SPLRBANK(6,UND
,RNB
), SPLRBANK(6,und
,RNB
),
18492 SPLRBANK(12,MON
,RNB
), SPLRBANK(12,mon
,RNB
),
18493 REGDEF(elr_hyp
,768|(14<<16),RNB
), REGDEF(ELR_hyp
,768|(14<<16),RNB
),
18494 REGDEF(sp_hyp
,768|(15<<16),RNB
), REGDEF(SP_hyp
,768|(15<<16),RNB
),
18495 REGDEF(spsr_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
18496 REGDEF(SPSR_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
18498 /* FPA registers. */
18499 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
18500 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
18502 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
18503 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
18505 /* VFP SP registers. */
18506 REGSET(s
,VFS
), REGSET(S
,VFS
),
18507 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
18509 /* VFP DP Registers. */
18510 REGSET(d
,VFD
), REGSET(D
,VFD
),
18511 /* Extra Neon DP registers. */
18512 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
18514 /* Neon QP registers. */
18515 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
18517 /* VFP control registers. */
18518 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
18519 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
18520 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
18521 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
18522 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
18523 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
18525 /* Maverick DSP coprocessor registers. */
18526 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
18527 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
18529 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
18530 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
18531 REGDEF(dspsc
,0,DSPSC
),
18533 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
18534 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
18535 REGDEF(DSPSC
,0,DSPSC
),
18537 /* iWMMXt data registers - p0, c0-15. */
18538 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
18540 /* iWMMXt control registers - p1, c0-3. */
18541 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
18542 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
18543 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
18544 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
18546 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
18547 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
18548 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
18549 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
18550 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
18552 /* XScale accumulator registers. */
18553 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
18559 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18560 within psr_required_here. */
18561 static const struct asm_psr psrs
[] =
18563 /* Backward compatibility notation. Note that "all" is no longer
18564 truly all possible PSR bits. */
18565 {"all", PSR_c
| PSR_f
},
18569 /* Individual flags. */
18575 /* Combinations of flags. */
18576 {"fs", PSR_f
| PSR_s
},
18577 {"fx", PSR_f
| PSR_x
},
18578 {"fc", PSR_f
| PSR_c
},
18579 {"sf", PSR_s
| PSR_f
},
18580 {"sx", PSR_s
| PSR_x
},
18581 {"sc", PSR_s
| PSR_c
},
18582 {"xf", PSR_x
| PSR_f
},
18583 {"xs", PSR_x
| PSR_s
},
18584 {"xc", PSR_x
| PSR_c
},
18585 {"cf", PSR_c
| PSR_f
},
18586 {"cs", PSR_c
| PSR_s
},
18587 {"cx", PSR_c
| PSR_x
},
18588 {"fsx", PSR_f
| PSR_s
| PSR_x
},
18589 {"fsc", PSR_f
| PSR_s
| PSR_c
},
18590 {"fxs", PSR_f
| PSR_x
| PSR_s
},
18591 {"fxc", PSR_f
| PSR_x
| PSR_c
},
18592 {"fcs", PSR_f
| PSR_c
| PSR_s
},
18593 {"fcx", PSR_f
| PSR_c
| PSR_x
},
18594 {"sfx", PSR_s
| PSR_f
| PSR_x
},
18595 {"sfc", PSR_s
| PSR_f
| PSR_c
},
18596 {"sxf", PSR_s
| PSR_x
| PSR_f
},
18597 {"sxc", PSR_s
| PSR_x
| PSR_c
},
18598 {"scf", PSR_s
| PSR_c
| PSR_f
},
18599 {"scx", PSR_s
| PSR_c
| PSR_x
},
18600 {"xfs", PSR_x
| PSR_f
| PSR_s
},
18601 {"xfc", PSR_x
| PSR_f
| PSR_c
},
18602 {"xsf", PSR_x
| PSR_s
| PSR_f
},
18603 {"xsc", PSR_x
| PSR_s
| PSR_c
},
18604 {"xcf", PSR_x
| PSR_c
| PSR_f
},
18605 {"xcs", PSR_x
| PSR_c
| PSR_s
},
18606 {"cfs", PSR_c
| PSR_f
| PSR_s
},
18607 {"cfx", PSR_c
| PSR_f
| PSR_x
},
18608 {"csf", PSR_c
| PSR_s
| PSR_f
},
18609 {"csx", PSR_c
| PSR_s
| PSR_x
},
18610 {"cxf", PSR_c
| PSR_x
| PSR_f
},
18611 {"cxs", PSR_c
| PSR_x
| PSR_s
},
18612 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
18613 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
18614 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
18615 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
18616 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
18617 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
18618 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
18619 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
18620 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
18621 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
18622 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
18623 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
18624 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
18625 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
18626 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
18627 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
18628 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
18629 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
18630 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
18631 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
18632 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
18633 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
18634 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
18635 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
18638 /* Table of V7M psr names. */
18639 static const struct asm_psr v7m_psrs
[] =
18641 {"apsr", 0 }, {"APSR", 0 },
18642 {"iapsr", 1 }, {"IAPSR", 1 },
18643 {"eapsr", 2 }, {"EAPSR", 2 },
18644 {"psr", 3 }, {"PSR", 3 },
18645 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
18646 {"ipsr", 5 }, {"IPSR", 5 },
18647 {"epsr", 6 }, {"EPSR", 6 },
18648 {"iepsr", 7 }, {"IEPSR", 7 },
18649 {"msp", 8 }, {"MSP", 8 },
18650 {"psp", 9 }, {"PSP", 9 },
18651 {"primask", 16}, {"PRIMASK", 16},
18652 {"basepri", 17}, {"BASEPRI", 17},
18653 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
18654 {"basepri_max", 18}, {"BASEPRI_MASK", 18}, /* Typo, preserved for backwards compatibility. */
18655 {"faultmask", 19}, {"FAULTMASK", 19},
18656 {"control", 20}, {"CONTROL", 20}
18659 /* Table of all shift-in-operand names. */
18660 static const struct asm_shift_name shift_names
[] =
18662 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
18663 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
18664 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
18665 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
18666 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
18667 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
18670 /* Table of all explicit relocation names. */
18672 static struct reloc_entry reloc_names
[] =
18674 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
18675 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
18676 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
18677 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
18678 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
18679 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
18680 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
18681 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
18682 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
18683 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
18684 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
},
18685 { "got_prel", BFD_RELOC_ARM_GOT_PREL
}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL
},
18686 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC
},
18687 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC
},
18688 { "tlscall", BFD_RELOC_ARM_TLS_CALL
},
18689 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL
},
18690 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ
},
18691 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ
}
18695 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
18696 static const struct asm_cond conds
[] =
18700 {"cs", 0x2}, {"hs", 0x2},
18701 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18715 #define UL_BARRIER(L,U,CODE,FEAT) \
18716 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
18717 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
18719 static struct asm_barrier_opt barrier_opt_names
[] =
18721 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER
),
18722 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER
),
18723 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8
),
18724 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER
),
18725 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER
),
18726 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER
),
18727 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER
),
18728 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8
),
18729 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER
),
18730 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER
),
18731 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER
),
18732 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER
),
18733 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8
),
18734 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER
),
18735 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER
),
18736 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8
)
18741 /* Table of ARM-format instructions. */
18743 /* Macros for gluing together operand strings. N.B. In all cases
18744 other than OPS0, the trailing OP_stop comes from default
18745 zero-initialization of the unspecified elements of the array. */
18746 #define OPS0() { OP_stop, }
18747 #define OPS1(a) { OP_##a, }
18748 #define OPS2(a,b) { OP_##a,OP_##b, }
18749 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
18750 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
18751 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18752 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18754 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18755 This is useful when mixing operands for ARM and THUMB, i.e. using the
18756 MIX_ARM_THUMB_OPERANDS macro.
18757 In order to use these macros, prefix the number of operands with _
18759 #define OPS_1(a) { a, }
18760 #define OPS_2(a,b) { a,b, }
18761 #define OPS_3(a,b,c) { a,b,c, }
18762 #define OPS_4(a,b,c,d) { a,b,c,d, }
18763 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
18764 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18766 /* These macros abstract out the exact format of the mnemonic table and
18767 save some repeated characters. */
18769 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
18770 #define TxCE(mnem, op, top, nops, ops, ae, te) \
18771 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18772 THUMB_VARIANT, do_##ae, do_##te }
18774 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18775 a T_MNEM_xyz enumerator. */
18776 #define TCE(mnem, aop, top, nops, ops, ae, te) \
18777 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18778 #define tCE(mnem, aop, top, nops, ops, ae, te) \
18779 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18781 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18782 infix after the third character. */
18783 #define TxC3(mnem, op, top, nops, ops, ae, te) \
18784 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18785 THUMB_VARIANT, do_##ae, do_##te }
18786 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
18787 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18788 THUMB_VARIANT, do_##ae, do_##te }
18789 #define TC3(mnem, aop, top, nops, ops, ae, te) \
18790 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18791 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
18792 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18793 #define tC3(mnem, aop, top, nops, ops, ae, te) \
18794 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18795 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
18796 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18798 /* Mnemonic that cannot be conditionalized. The ARM condition-code
18799 field is still 0xE. Many of the Thumb variants can be executed
18800 conditionally, so this is checked separately. */
18801 #define TUE(mnem, op, top, nops, ops, ae, te) \
18802 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18803 THUMB_VARIANT, do_##ae, do_##te }
18805 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
18806 Used by mnemonics that have very minimal differences in the encoding for
18807 ARM and Thumb variants and can be handled in a common function. */
18808 #define TUEc(mnem, op, top, nops, ops, en) \
18809 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18810 THUMB_VARIANT, do_##en, do_##en }
18812 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
18813 condition code field. */
18814 #define TUF(mnem, op, top, nops, ops, ae, te) \
18815 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
18816 THUMB_VARIANT, do_##ae, do_##te }
18818 /* ARM-only variants of all the above. */
18819 #define CE(mnem, op, nops, ops, ae) \
18820 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18822 #define C3(mnem, op, nops, ops, ae) \
18823 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18825 /* Legacy mnemonics that always have conditional infix after the third
18827 #define CL(mnem, op, nops, ops, ae) \
18828 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18829 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18831 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
18832 #define cCE(mnem, op, nops, ops, ae) \
18833 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18835 /* Legacy coprocessor instructions where conditional infix and conditional
18836 suffix are ambiguous. For consistency this includes all FPA instructions,
18837 not just the potentially ambiguous ones. */
18838 #define cCL(mnem, op, nops, ops, ae) \
18839 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18840 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18842 /* Coprocessor, takes either a suffix or a position-3 infix
18843 (for an FPA corner case). */
18844 #define C3E(mnem, op, nops, ops, ae) \
18845 { mnem, OPS##nops ops, OT_csuf_or_in3, \
18846 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18848 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
18849 { m1 #m2 m3, OPS##nops ops, \
18850 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
18851 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18853 #define CM(m1, m2, op, nops, ops, ae) \
18854 xCM_ (m1, , m2, op, nops, ops, ae), \
18855 xCM_ (m1, eq, m2, op, nops, ops, ae), \
18856 xCM_ (m1, ne, m2, op, nops, ops, ae), \
18857 xCM_ (m1, cs, m2, op, nops, ops, ae), \
18858 xCM_ (m1, hs, m2, op, nops, ops, ae), \
18859 xCM_ (m1, cc, m2, op, nops, ops, ae), \
18860 xCM_ (m1, ul, m2, op, nops, ops, ae), \
18861 xCM_ (m1, lo, m2, op, nops, ops, ae), \
18862 xCM_ (m1, mi, m2, op, nops, ops, ae), \
18863 xCM_ (m1, pl, m2, op, nops, ops, ae), \
18864 xCM_ (m1, vs, m2, op, nops, ops, ae), \
18865 xCM_ (m1, vc, m2, op, nops, ops, ae), \
18866 xCM_ (m1, hi, m2, op, nops, ops, ae), \
18867 xCM_ (m1, ls, m2, op, nops, ops, ae), \
18868 xCM_ (m1, ge, m2, op, nops, ops, ae), \
18869 xCM_ (m1, lt, m2, op, nops, ops, ae), \
18870 xCM_ (m1, gt, m2, op, nops, ops, ae), \
18871 xCM_ (m1, le, m2, op, nops, ops, ae), \
18872 xCM_ (m1, al, m2, op, nops, ops, ae)
18874 #define UE(mnem, op, nops, ops, ae) \
18875 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18877 #define UF(mnem, op, nops, ops, ae) \
18878 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18880 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
18881 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
18882 use the same encoding function for each. */
18883 #define NUF(mnem, op, nops, ops, enc) \
18884 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
18885 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18887 /* Neon data processing, version which indirects through neon_enc_tab for
18888 the various overloaded versions of opcodes. */
18889 #define nUF(mnem, op, nops, ops, enc) \
18890 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
18891 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18893 /* Neon insn with conditional suffix for the ARM version, non-overloaded
18895 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
18896 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
18897 THUMB_VARIANT, do_##enc, do_##enc }
18899 #define NCE(mnem, op, nops, ops, enc) \
18900 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18902 #define NCEF(mnem, op, nops, ops, enc) \
18903 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18905 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
18906 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
18907 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
18908 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18910 #define nCE(mnem, op, nops, ops, enc) \
18911 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18913 #define nCEF(mnem, op, nops, ops, enc) \
18914 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18918 static const struct asm_opcode insns
[] =
18920 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
18921 #define THUMB_VARIANT & arm_ext_v4t
18922 tCE("and", 0000000, _and
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18923 tC3("ands", 0100000, _ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18924 tCE("eor", 0200000, _eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18925 tC3("eors", 0300000, _eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18926 tCE("sub", 0400000, _sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
18927 tC3("subs", 0500000, _subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
18928 tCE("add", 0800000, _add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
18929 tC3("adds", 0900000, _adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
18930 tCE("adc", 0a00000
, _adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18931 tC3("adcs", 0b00000, _adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18932 tCE("sbc", 0c00000
, _sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
18933 tC3("sbcs", 0d00000
, _sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
18934 tCE("orr", 1800000, _orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18935 tC3("orrs", 1900000, _orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18936 tCE("bic", 1c00000
, _bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
18937 tC3("bics", 1d00000
, _bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
18939 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
18940 for setting PSR flag bits. They are obsolete in V6 and do not
18941 have Thumb equivalents. */
18942 tCE("tst", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18943 tC3w("tsts", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18944 CL("tstp", 110f000
, 2, (RR
, SH
), cmp
),
18945 tCE("cmp", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
18946 tC3w("cmps", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
18947 CL("cmpp", 150f000
, 2, (RR
, SH
), cmp
),
18948 tCE("cmn", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18949 tC3w("cmns", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18950 CL("cmnp", 170f000
, 2, (RR
, SH
), cmp
),
18952 tCE("mov", 1a00000
, _mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
18953 tC3("movs", 1b00000
, _movs
, 2, (RR
, SHG
), mov
, t_mov_cmp
),
18954 tCE("mvn", 1e00000
, _mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
18955 tC3("mvns", 1f00000
, _mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
18957 tCE("ldr", 4100000, _ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
18958 tC3("ldrb", 4500000, _ldrb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
18959 tCE("str", 4000000, _str
, _2
, (MIX_ARM_THUMB_OPERANDS (OP_RR
,
18961 OP_ADDRGLDR
),ldst
, t_ldst
),
18962 tC3("strb", 4400000, _strb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
18964 tCE("stm", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18965 tC3("stmia", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18966 tC3("stmea", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18967 tCE("ldm", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18968 tC3("ldmia", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18969 tC3("ldmfd", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18971 TCE("swi", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
18972 TCE("svc", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
18973 tCE("b", a000000
, _b
, 1, (EXPr
), branch
, t_branch
),
18974 TCE("bl", b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
18977 tCE("adr", 28f0000
, _adr
, 2, (RR
, EXP
), adr
, t_adr
),
18978 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
18979 tCE("nop", 1a00000
, _nop
, 1, (oI255c
), nop
, t_nop
),
18980 tCE("udf", 7f000f0
, _udf
, 1, (oIffffb
), bkpt
, t_udf
),
18982 /* Thumb-compatibility pseudo ops. */
18983 tCE("lsl", 1a00000
, _lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18984 tC3("lsls", 1b00000
, _lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18985 tCE("lsr", 1a00020
, _lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18986 tC3("lsrs", 1b00020
, _lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18987 tCE("asr", 1a00040
, _asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18988 tC3("asrs", 1b00040
, _asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18989 tCE("ror", 1a00060
, _ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18990 tC3("rors", 1b00060
, _rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18991 tCE("neg", 2600000, _neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
18992 tC3("negs", 2700000, _negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
18993 tCE("push", 92d0000
, _push
, 1, (REGLST
), push_pop
, t_push_pop
),
18994 tCE("pop", 8bd0000
, _pop
, 1, (REGLST
), push_pop
, t_push_pop
),
18996 /* These may simplify to neg. */
18997 TCE("rsb", 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
18998 TC3("rsbs", 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
19000 #undef THUMB_VARIANT
19001 #define THUMB_VARIANT & arm_ext_v6
19003 TCE("cpy", 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
19005 /* V1 instructions with no Thumb analogue prior to V6T2. */
19006 #undef THUMB_VARIANT
19007 #define THUMB_VARIANT & arm_ext_v6t2
19009 TCE("teq", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19010 TC3w("teqs", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19011 CL("teqp", 130f000
, 2, (RR
, SH
), cmp
),
19013 TC3("ldrt", 4300000, f8500e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
19014 TC3("ldrbt", 4700000, f8100e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
19015 TC3("strt", 4200000, f8400e00
, 2, (RR_npcsp
, ADDR
), ldstt
, t_ldstt
),
19016 TC3("strbt", 4600000, f8000e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
19018 TC3("stmdb", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19019 TC3("stmfd", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19021 TC3("ldmdb", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19022 TC3("ldmea", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19024 /* V1 instructions with no Thumb analogue at all. */
19025 CE("rsc", 0e00000
, 3, (RR
, oRR
, SH
), arit
),
19026 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
19028 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
19029 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
19030 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
19031 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
19032 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
19033 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
19034 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
19035 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
19038 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
19039 #undef THUMB_VARIANT
19040 #define THUMB_VARIANT & arm_ext_v4t
19042 tCE("mul", 0000090, _mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
19043 tC3("muls", 0100090, _muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
19045 #undef THUMB_VARIANT
19046 #define THUMB_VARIANT & arm_ext_v6t2
19048 TCE("mla", 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
19049 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
19051 /* Generic coprocessor instructions. */
19052 TCE("cdp", e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
19053 TCE("ldc", c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19054 TC3("ldcl", c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19055 TCE("stc", c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19056 TC3("stcl", c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19057 TCE("mcr", e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
19058 TCE("mrc", e100010
, ee100010
, 6, (RCP
, I7b
, APSR_RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
19061 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
19063 CE("swp", 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
19064 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
19067 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
19068 #undef THUMB_VARIANT
19069 #define THUMB_VARIANT & arm_ext_msr
19071 TCE("mrs", 1000000, f3e08000
, 2, (RRnpc
, rPSR
), mrs
, t_mrs
),
19072 TCE("msr", 120f000
, f3808000
, 2, (wPSR
, RR_EXi
), msr
, t_msr
),
19075 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
19076 #undef THUMB_VARIANT
19077 #define THUMB_VARIANT & arm_ext_v6t2
19079 TCE("smull", 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
19080 CM("smull","s", 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
19081 TCE("umull", 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
19082 CM("umull","s", 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
19083 TCE("smlal", 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
19084 CM("smlal","s", 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
19085 TCE("umlal", 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
19086 CM("umlal","s", 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
19089 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
19090 #undef THUMB_VARIANT
19091 #define THUMB_VARIANT & arm_ext_v4t
19093 tC3("ldrh", 01000b0
, _ldrh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19094 tC3("strh", 00000b0
, _strh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19095 tC3("ldrsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19096 tC3("ldrsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19097 tC3("ldsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19098 tC3("ldsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19101 #define ARM_VARIANT & arm_ext_v4t_5
19103 /* ARM Architecture 4T. */
19104 /* Note: bx (and blx) are required on V5, even if the processor does
19105 not support Thumb. */
19106 TCE("bx", 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
19109 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
19110 #undef THUMB_VARIANT
19111 #define THUMB_VARIANT & arm_ext_v5t
19113 /* Note: blx has 2 variants; the .value coded here is for
19114 BLX(2). Only this variant has conditional execution. */
19115 TCE("blx", 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
19116 TUE("bkpt", 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
19118 #undef THUMB_VARIANT
19119 #define THUMB_VARIANT & arm_ext_v6t2
19121 TCE("clz", 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
19122 TUF("ldc2", c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19123 TUF("ldc2l", c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19124 TUF("stc2", c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19125 TUF("stc2l", c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19126 TUF("cdp2", e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
19127 TUF("mcr2", e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
19128 TUF("mrc2", e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
19131 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
19132 #undef THUMB_VARIANT
19133 #define THUMB_VARIANT & arm_ext_v5exp
19135 TCE("smlabb", 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19136 TCE("smlatb", 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19137 TCE("smlabt", 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19138 TCE("smlatt", 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19140 TCE("smlawb", 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19141 TCE("smlawt", 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19143 TCE("smlalbb", 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
19144 TCE("smlaltb", 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
19145 TCE("smlalbt", 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
19146 TCE("smlaltt", 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
19148 TCE("smulbb", 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19149 TCE("smultb", 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19150 TCE("smulbt", 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19151 TCE("smultt", 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19153 TCE("smulwb", 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19154 TCE("smulwt", 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19156 TCE("qadd", 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
19157 TCE("qdadd", 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
19158 TCE("qsub", 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
19159 TCE("qdsub", 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
19162 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
19163 #undef THUMB_VARIANT
19164 #define THUMB_VARIANT & arm_ext_v6t2
19166 TUF("pld", 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
19167 TC3("ldrd", 00000d0
, e8500000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, ADDRGLDRS
),
19169 TC3("strd", 00000f0
, e8400000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
,
19170 ADDRGLDRS
), ldrd
, t_ldstd
),
19172 TCE("mcrr", c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19173 TCE("mrrc", c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19176 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
19178 TCE("bxj", 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
19181 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
19182 #undef THUMB_VARIANT
19183 #define THUMB_VARIANT & arm_ext_v6
19185 TUF("cpsie", 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
19186 TUF("cpsid", 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
19187 tCE("rev", 6bf0f30
, _rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
19188 tCE("rev16", 6bf0fb0
, _rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
19189 tCE("revsh", 6ff0fb0
, _revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
19190 tCE("sxth", 6bf0070
, _sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19191 tCE("uxth", 6ff0070
, _uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19192 tCE("sxtb", 6af0070
, _sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19193 tCE("uxtb", 6ef0070
, _uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19194 TUF("setend", 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
19196 #undef THUMB_VARIANT
19197 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19199 TCE("ldrex", 1900f9f
, e8500f00
, 2, (RRnpc_npcsp
, ADDR
), ldrex
, t_ldrex
),
19200 TCE("strex", 1800f90
, e8400000
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19202 #undef THUMB_VARIANT
19203 #define THUMB_VARIANT & arm_ext_v6t2
19205 TUF("mcrr2", c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19206 TUF("mrrc2", c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19208 TCE("ssat", 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
19209 TCE("usat", 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
19211 /* ARM V6 not included in V7M. */
19212 #undef THUMB_VARIANT
19213 #define THUMB_VARIANT & arm_ext_v6_notm
19214 TUF("rfeia", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
19215 TUF("rfe", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
19216 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
19217 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
19218 TUF("rfedb", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
19219 TUF("rfefd", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
19220 UF(rfefa
, 8100a00
, 1, (RRw
), rfe
),
19221 TUF("rfeea", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
19222 UF(rfeed
, 9900a00
, 1, (RRw
), rfe
),
19223 TUF("srsia", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
19224 TUF("srs", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
19225 TUF("srsea", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
19226 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
19227 UF(srsfa
, 9c00500
, 2, (oRRw
, I31w
), srs
),
19228 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
19229 UF(srsed
, 8400500, 2, (oRRw
, I31w
), srs
),
19230 TUF("srsdb", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
19231 TUF("srsfd", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
19232 TUF("cps", 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
19234 /* ARM V6 not included in V7M (eg. integer SIMD). */
19235 #undef THUMB_VARIANT
19236 #define THUMB_VARIANT & arm_ext_v6_dsp
19237 TCE("pkhbt", 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
19238 TCE("pkhtb", 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
19239 TCE("qadd16", 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19240 TCE("qadd8", 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19241 TCE("qasx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19242 /* Old name for QASX. */
19243 TCE("qaddsubx",6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19244 TCE("qsax", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19245 /* Old name for QSAX. */
19246 TCE("qsubaddx",6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19247 TCE("qsub16", 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19248 TCE("qsub8", 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19249 TCE("sadd16", 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19250 TCE("sadd8", 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19251 TCE("sasx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19252 /* Old name for SASX. */
19253 TCE("saddsubx",6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19254 TCE("shadd16", 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19255 TCE("shadd8", 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19256 TCE("shasx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19257 /* Old name for SHASX. */
19258 TCE("shaddsubx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19259 TCE("shsax", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19260 /* Old name for SHSAX. */
19261 TCE("shsubaddx", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19262 TCE("shsub16", 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19263 TCE("shsub8", 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19264 TCE("ssax", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19265 /* Old name for SSAX. */
19266 TCE("ssubaddx",6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19267 TCE("ssub16", 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19268 TCE("ssub8", 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19269 TCE("uadd16", 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19270 TCE("uadd8", 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19271 TCE("uasx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19272 /* Old name for UASX. */
19273 TCE("uaddsubx",6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19274 TCE("uhadd16", 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19275 TCE("uhadd8", 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19276 TCE("uhasx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19277 /* Old name for UHASX. */
19278 TCE("uhaddsubx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19279 TCE("uhsax", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19280 /* Old name for UHSAX. */
19281 TCE("uhsubaddx", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19282 TCE("uhsub16", 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19283 TCE("uhsub8", 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19284 TCE("uqadd16", 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19285 TCE("uqadd8", 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19286 TCE("uqasx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19287 /* Old name for UQASX. */
19288 TCE("uqaddsubx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19289 TCE("uqsax", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19290 /* Old name for UQSAX. */
19291 TCE("uqsubaddx", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19292 TCE("uqsub16", 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19293 TCE("uqsub8", 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19294 TCE("usub16", 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19295 TCE("usax", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19296 /* Old name for USAX. */
19297 TCE("usubaddx",6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19298 TCE("usub8", 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19299 TCE("sxtah", 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19300 TCE("sxtab16", 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19301 TCE("sxtab", 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19302 TCE("sxtb16", 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19303 TCE("uxtah", 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19304 TCE("uxtab16", 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19305 TCE("uxtab", 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19306 TCE("uxtb16", 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19307 TCE("sel", 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19308 TCE("smlad", 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19309 TCE("smladx", 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19310 TCE("smlald", 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19311 TCE("smlaldx", 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19312 TCE("smlsd", 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19313 TCE("smlsdx", 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19314 TCE("smlsld", 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19315 TCE("smlsldx", 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19316 TCE("smmla", 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19317 TCE("smmlar", 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19318 TCE("smmls", 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19319 TCE("smmlsr", 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19320 TCE("smmul", 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19321 TCE("smmulr", 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19322 TCE("smuad", 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19323 TCE("smuadx", 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19324 TCE("smusd", 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19325 TCE("smusdx", 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19326 TCE("ssat16", 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
19327 TCE("umaal", 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
19328 TCE("usad8", 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19329 TCE("usada8", 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19330 TCE("usat16", 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
19333 #define ARM_VARIANT & arm_ext_v6k
19334 #undef THUMB_VARIANT
19335 #define THUMB_VARIANT & arm_ext_v6k
19337 tCE("yield", 320f001
, _yield
, 0, (), noargs
, t_hint
),
19338 tCE("wfe", 320f002
, _wfe
, 0, (), noargs
, t_hint
),
19339 tCE("wfi", 320f003
, _wfi
, 0, (), noargs
, t_hint
),
19340 tCE("sev", 320f004
, _sev
, 0, (), noargs
, t_hint
),
19342 #undef THUMB_VARIANT
19343 #define THUMB_VARIANT & arm_ext_v6_notm
19344 TCE("ldrexd", 1b00f9f
, e8d0007f
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, RRnpcb
),
19346 TCE("strexd", 1a00f90
, e8c00070
, 4, (RRnpc_npcsp
, RRnpc_npcsp
, oRRnpc_npcsp
,
19347 RRnpcb
), strexd
, t_strexd
),
19349 #undef THUMB_VARIANT
19350 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19351 TCE("ldrexb", 1d00f9f
, e8d00f4f
, 2, (RRnpc_npcsp
,RRnpcb
),
19353 TCE("ldrexh", 1f00f9f
, e8d00f5f
, 2, (RRnpc_npcsp
, RRnpcb
),
19355 TCE("strexb", 1c00f90
, e8c00f40
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19357 TCE("strexh", 1e00f90
, e8c00f50
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19359 TUF("clrex", 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
19362 #define ARM_VARIANT & arm_ext_sec
19363 #undef THUMB_VARIANT
19364 #define THUMB_VARIANT & arm_ext_sec
19366 TCE("smc", 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
19369 #define ARM_VARIANT & arm_ext_virt
19370 #undef THUMB_VARIANT
19371 #define THUMB_VARIANT & arm_ext_virt
19373 TCE("hvc", 1400070, f7e08000
, 1, (EXPi
), hvc
, t_hvc
),
19374 TCE("eret", 160006e
, f3de8f00
, 0, (), noargs
, noargs
),
19377 #define ARM_VARIANT & arm_ext_pan
19378 #undef THUMB_VARIANT
19379 #define THUMB_VARIANT & arm_ext_pan
19381 TUF("setpan", 1100000, b610
, 1, (I7
), setpan
, t_setpan
),
19384 #define ARM_VARIANT & arm_ext_v6t2
19385 #undef THUMB_VARIANT
19386 #define THUMB_VARIANT & arm_ext_v6t2
19388 TCE("bfc", 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
19389 TCE("bfi", 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
19390 TCE("sbfx", 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
19391 TCE("ubfx", 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
19393 TCE("mls", 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
19394 TCE("rbit", 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
19396 TC3("ldrht", 03000b0
, f8300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19397 TC3("ldrsht", 03000f0
, f9300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19398 TC3("ldrsbt", 03000d0
, f9100e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19399 TC3("strht", 02000b0
, f8200e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19401 #undef THUMB_VARIANT
19402 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19403 TCE("movw", 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
19404 TCE("movt", 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
19406 /* Thumb-only instructions. */
19408 #define ARM_VARIANT NULL
19409 TUE("cbnz", 0, b900
, 2, (RR
, EXP
), 0, t_cbz
),
19410 TUE("cbz", 0, b100
, 2, (RR
, EXP
), 0, t_cbz
),
19412 /* ARM does not really have an IT instruction, so always allow it.
19413 The opcode is copied from Thumb in order to allow warnings in
19414 -mimplicit-it=[never | arm] modes. */
19416 #define ARM_VARIANT & arm_ext_v1
19417 #undef THUMB_VARIANT
19418 #define THUMB_VARIANT & arm_ext_v6t2
19420 TUE("it", bf08
, bf08
, 1, (COND
), it
, t_it
),
19421 TUE("itt", bf0c
, bf0c
, 1, (COND
), it
, t_it
),
19422 TUE("ite", bf04
, bf04
, 1, (COND
), it
, t_it
),
19423 TUE("ittt", bf0e
, bf0e
, 1, (COND
), it
, t_it
),
19424 TUE("itet", bf06
, bf06
, 1, (COND
), it
, t_it
),
19425 TUE("itte", bf0a
, bf0a
, 1, (COND
), it
, t_it
),
19426 TUE("itee", bf02
, bf02
, 1, (COND
), it
, t_it
),
19427 TUE("itttt", bf0f
, bf0f
, 1, (COND
), it
, t_it
),
19428 TUE("itett", bf07
, bf07
, 1, (COND
), it
, t_it
),
19429 TUE("ittet", bf0b
, bf0b
, 1, (COND
), it
, t_it
),
19430 TUE("iteet", bf03
, bf03
, 1, (COND
), it
, t_it
),
19431 TUE("ittte", bf0d
, bf0d
, 1, (COND
), it
, t_it
),
19432 TUE("itete", bf05
, bf05
, 1, (COND
), it
, t_it
),
19433 TUE("ittee", bf09
, bf09
, 1, (COND
), it
, t_it
),
19434 TUE("iteee", bf01
, bf01
, 1, (COND
), it
, t_it
),
19435 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
19436 TC3("rrx", 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
19437 TC3("rrxs", 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
19439 /* Thumb2 only instructions. */
19441 #define ARM_VARIANT NULL
19443 TCE("addw", 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
19444 TCE("subw", 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
19445 TCE("orn", 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
19446 TCE("orns", 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
19447 TCE("tbb", 0, e8d0f000
, 1, (TB
), 0, t_tb
),
19448 TCE("tbh", 0, e8d0f010
, 1, (TB
), 0, t_tb
),
19450 /* Hardware division instructions. */
19452 #define ARM_VARIANT & arm_ext_adiv
19453 #undef THUMB_VARIANT
19454 #define THUMB_VARIANT & arm_ext_div
19456 TCE("sdiv", 710f010
, fb90f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
19457 TCE("udiv", 730f010
, fbb0f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
19459 /* ARM V6M/V7 instructions. */
19461 #define ARM_VARIANT & arm_ext_barrier
19462 #undef THUMB_VARIANT
19463 #define THUMB_VARIANT & arm_ext_barrier
19465 TUF("dmb", 57ff050
, f3bf8f50
, 1, (oBARRIER_I15
), barrier
, barrier
),
19466 TUF("dsb", 57ff040
, f3bf8f40
, 1, (oBARRIER_I15
), barrier
, barrier
),
19467 TUF("isb", 57ff060
, f3bf8f60
, 1, (oBARRIER_I15
), barrier
, barrier
),
19469 /* ARM V7 instructions. */
19471 #define ARM_VARIANT & arm_ext_v7
19472 #undef THUMB_VARIANT
19473 #define THUMB_VARIANT & arm_ext_v7
19475 TUF("pli", 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
19476 TCE("dbg", 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
19479 #define ARM_VARIANT & arm_ext_mp
19480 #undef THUMB_VARIANT
19481 #define THUMB_VARIANT & arm_ext_mp
19483 TUF("pldw", 410f000
, f830f000
, 1, (ADDR
), pld
, t_pld
),
19485 /* AArchv8 instructions. */
19487 #define ARM_VARIANT & arm_ext_v8
19489 /* Instructions shared between armv8-a and armv8-m. */
19490 #undef THUMB_VARIANT
19491 #define THUMB_VARIANT & arm_ext_atomics
19493 TCE("lda", 1900c9f
, e8d00faf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19494 TCE("ldab", 1d00c9f
, e8d00f8f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19495 TCE("ldah", 1f00c9f
, e8d00f9f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19496 TCE("stl", 180fc90
, e8c00faf
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19497 TCE("stlb", 1c0fc90
, e8c00f8f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19498 TCE("stlh", 1e0fc90
, e8c00f9f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19499 TCE("ldaex", 1900e9f
, e8d00fef
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19500 TCE("ldaexb", 1d00e9f
, e8d00fcf
, 2, (RRnpc
,RRnpcb
), rd_rn
, rd_rn
),
19501 TCE("ldaexh", 1f00e9f
, e8d00fdf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19502 TCE("stlex", 1800e90
, e8c00fe0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19504 TCE("stlexb", 1c00e90
, e8c00fc0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19506 TCE("stlexh", 1e00e90
, e8c00fd0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19508 #undef THUMB_VARIANT
19509 #define THUMB_VARIANT & arm_ext_v8
19511 tCE("sevl", 320f005
, _sevl
, 0, (), noargs
, t_hint
),
19512 TUE("hlt", 1000070, ba80
, 1, (oIffffb
), bkpt
, t_hlt
),
19513 TCE("ldaexd", 1b00e9f
, e8d000ff
, 3, (RRnpc
, oRRnpc
, RRnpcb
),
19515 TCE("stlexd", 1a00e90
, e8c000f0
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
),
19517 /* ARMv8 T32 only. */
19519 #define ARM_VARIANT NULL
19520 TUF("dcps1", 0, f78f8001
, 0, (), noargs
, noargs
),
19521 TUF("dcps2", 0, f78f8002
, 0, (), noargs
, noargs
),
19522 TUF("dcps3", 0, f78f8003
, 0, (), noargs
, noargs
),
19524 /* FP for ARMv8. */
19526 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
19527 #undef THUMB_VARIANT
19528 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19530 nUF(vseleq
, _vseleq
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19531 nUF(vselvs
, _vselvs
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19532 nUF(vselge
, _vselge
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19533 nUF(vselgt
, _vselgt
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19534 nUF(vmaxnm
, _vmaxnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
19535 nUF(vminnm
, _vminnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
19536 nUF(vcvta
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvta
),
19537 nUF(vcvtn
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtn
),
19538 nUF(vcvtp
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtp
),
19539 nUF(vcvtm
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtm
),
19540 nCE(vrintr
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintr
),
19541 nCE(vrintz
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintz
),
19542 nCE(vrintx
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintx
),
19543 nUF(vrinta
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrinta
),
19544 nUF(vrintn
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintn
),
19545 nUF(vrintp
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintp
),
19546 nUF(vrintm
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintm
),
19548 /* Crypto v1 extensions. */
19550 #define ARM_VARIANT & fpu_crypto_ext_armv8
19551 #undef THUMB_VARIANT
19552 #define THUMB_VARIANT & fpu_crypto_ext_armv8
19554 nUF(aese
, _aes
, 2, (RNQ
, RNQ
), aese
),
19555 nUF(aesd
, _aes
, 2, (RNQ
, RNQ
), aesd
),
19556 nUF(aesmc
, _aes
, 2, (RNQ
, RNQ
), aesmc
),
19557 nUF(aesimc
, _aes
, 2, (RNQ
, RNQ
), aesimc
),
19558 nUF(sha1c
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1c
),
19559 nUF(sha1p
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1p
),
19560 nUF(sha1m
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1m
),
19561 nUF(sha1su0
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1su0
),
19562 nUF(sha256h
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h
),
19563 nUF(sha256h2
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h2
),
19564 nUF(sha256su1
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256su1
),
19565 nUF(sha1h
, _sha1h
, 2, (RNQ
, RNQ
), sha1h
),
19566 nUF(sha1su1
, _sha2op
, 2, (RNQ
, RNQ
), sha1su1
),
19567 nUF(sha256su0
, _sha2op
, 2, (RNQ
, RNQ
), sha256su0
),
19570 #define ARM_VARIANT & crc_ext_armv8
19571 #undef THUMB_VARIANT
19572 #define THUMB_VARIANT & crc_ext_armv8
19573 TUEc("crc32b", 1000040, fac0f080
, 3, (RR
, oRR
, RR
), crc32b
),
19574 TUEc("crc32h", 1200040, fac0f090
, 3, (RR
, oRR
, RR
), crc32h
),
19575 TUEc("crc32w", 1400040, fac0f0a0
, 3, (RR
, oRR
, RR
), crc32w
),
19576 TUEc("crc32cb",1000240, fad0f080
, 3, (RR
, oRR
, RR
), crc32cb
),
19577 TUEc("crc32ch",1200240, fad0f090
, 3, (RR
, oRR
, RR
), crc32ch
),
19578 TUEc("crc32cw",1400240, fad0f0a0
, 3, (RR
, oRR
, RR
), crc32cw
),
19580 /* ARMv8.2 RAS extension. */
19582 #define ARM_VARIANT & arm_ext_v8_2
19583 #undef THUMB_VARIANT
19584 #define THUMB_VARIANT & arm_ext_v8_2
19585 TUE ("esb", 320f010
, f3af8010
, 0, (), noargs
, noargs
),
19588 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
19589 #undef THUMB_VARIANT
19590 #define THUMB_VARIANT NULL
19592 cCE("wfs", e200110
, 1, (RR
), rd
),
19593 cCE("rfs", e300110
, 1, (RR
), rd
),
19594 cCE("wfc", e400110
, 1, (RR
), rd
),
19595 cCE("rfc", e500110
, 1, (RR
), rd
),
19597 cCL("ldfs", c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19598 cCL("ldfd", c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19599 cCL("ldfe", c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19600 cCL("ldfp", c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19602 cCL("stfs", c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19603 cCL("stfd", c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19604 cCL("stfe", c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19605 cCL("stfp", c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19607 cCL("mvfs", e008100
, 2, (RF
, RF_IF
), rd_rm
),
19608 cCL("mvfsp", e008120
, 2, (RF
, RF_IF
), rd_rm
),
19609 cCL("mvfsm", e008140
, 2, (RF
, RF_IF
), rd_rm
),
19610 cCL("mvfsz", e008160
, 2, (RF
, RF_IF
), rd_rm
),
19611 cCL("mvfd", e008180
, 2, (RF
, RF_IF
), rd_rm
),
19612 cCL("mvfdp", e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
19613 cCL("mvfdm", e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
19614 cCL("mvfdz", e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
19615 cCL("mvfe", e088100
, 2, (RF
, RF_IF
), rd_rm
),
19616 cCL("mvfep", e088120
, 2, (RF
, RF_IF
), rd_rm
),
19617 cCL("mvfem", e088140
, 2, (RF
, RF_IF
), rd_rm
),
19618 cCL("mvfez", e088160
, 2, (RF
, RF_IF
), rd_rm
),
19620 cCL("mnfs", e108100
, 2, (RF
, RF_IF
), rd_rm
),
19621 cCL("mnfsp", e108120
, 2, (RF
, RF_IF
), rd_rm
),
19622 cCL("mnfsm", e108140
, 2, (RF
, RF_IF
), rd_rm
),
19623 cCL("mnfsz", e108160
, 2, (RF
, RF_IF
), rd_rm
),
19624 cCL("mnfd", e108180
, 2, (RF
, RF_IF
), rd_rm
),
19625 cCL("mnfdp", e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
19626 cCL("mnfdm", e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
19627 cCL("mnfdz", e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
19628 cCL("mnfe", e188100
, 2, (RF
, RF_IF
), rd_rm
),
19629 cCL("mnfep", e188120
, 2, (RF
, RF_IF
), rd_rm
),
19630 cCL("mnfem", e188140
, 2, (RF
, RF_IF
), rd_rm
),
19631 cCL("mnfez", e188160
, 2, (RF
, RF_IF
), rd_rm
),
19633 cCL("abss", e208100
, 2, (RF
, RF_IF
), rd_rm
),
19634 cCL("abssp", e208120
, 2, (RF
, RF_IF
), rd_rm
),
19635 cCL("abssm", e208140
, 2, (RF
, RF_IF
), rd_rm
),
19636 cCL("abssz", e208160
, 2, (RF
, RF_IF
), rd_rm
),
19637 cCL("absd", e208180
, 2, (RF
, RF_IF
), rd_rm
),
19638 cCL("absdp", e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
19639 cCL("absdm", e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
19640 cCL("absdz", e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
19641 cCL("abse", e288100
, 2, (RF
, RF_IF
), rd_rm
),
19642 cCL("absep", e288120
, 2, (RF
, RF_IF
), rd_rm
),
19643 cCL("absem", e288140
, 2, (RF
, RF_IF
), rd_rm
),
19644 cCL("absez", e288160
, 2, (RF
, RF_IF
), rd_rm
),
19646 cCL("rnds", e308100
, 2, (RF
, RF_IF
), rd_rm
),
19647 cCL("rndsp", e308120
, 2, (RF
, RF_IF
), rd_rm
),
19648 cCL("rndsm", e308140
, 2, (RF
, RF_IF
), rd_rm
),
19649 cCL("rndsz", e308160
, 2, (RF
, RF_IF
), rd_rm
),
19650 cCL("rndd", e308180
, 2, (RF
, RF_IF
), rd_rm
),
19651 cCL("rnddp", e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
19652 cCL("rnddm", e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
19653 cCL("rnddz", e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
19654 cCL("rnde", e388100
, 2, (RF
, RF_IF
), rd_rm
),
19655 cCL("rndep", e388120
, 2, (RF
, RF_IF
), rd_rm
),
19656 cCL("rndem", e388140
, 2, (RF
, RF_IF
), rd_rm
),
19657 cCL("rndez", e388160
, 2, (RF
, RF_IF
), rd_rm
),
19659 cCL("sqts", e408100
, 2, (RF
, RF_IF
), rd_rm
),
19660 cCL("sqtsp", e408120
, 2, (RF
, RF_IF
), rd_rm
),
19661 cCL("sqtsm", e408140
, 2, (RF
, RF_IF
), rd_rm
),
19662 cCL("sqtsz", e408160
, 2, (RF
, RF_IF
), rd_rm
),
19663 cCL("sqtd", e408180
, 2, (RF
, RF_IF
), rd_rm
),
19664 cCL("sqtdp", e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
19665 cCL("sqtdm", e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
19666 cCL("sqtdz", e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
19667 cCL("sqte", e488100
, 2, (RF
, RF_IF
), rd_rm
),
19668 cCL("sqtep", e488120
, 2, (RF
, RF_IF
), rd_rm
),
19669 cCL("sqtem", e488140
, 2, (RF
, RF_IF
), rd_rm
),
19670 cCL("sqtez", e488160
, 2, (RF
, RF_IF
), rd_rm
),
19672 cCL("logs", e508100
, 2, (RF
, RF_IF
), rd_rm
),
19673 cCL("logsp", e508120
, 2, (RF
, RF_IF
), rd_rm
),
19674 cCL("logsm", e508140
, 2, (RF
, RF_IF
), rd_rm
),
19675 cCL("logsz", e508160
, 2, (RF
, RF_IF
), rd_rm
),
19676 cCL("logd", e508180
, 2, (RF
, RF_IF
), rd_rm
),
19677 cCL("logdp", e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
19678 cCL("logdm", e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
19679 cCL("logdz", e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
19680 cCL("loge", e588100
, 2, (RF
, RF_IF
), rd_rm
),
19681 cCL("logep", e588120
, 2, (RF
, RF_IF
), rd_rm
),
19682 cCL("logem", e588140
, 2, (RF
, RF_IF
), rd_rm
),
19683 cCL("logez", e588160
, 2, (RF
, RF_IF
), rd_rm
),
19685 cCL("lgns", e608100
, 2, (RF
, RF_IF
), rd_rm
),
19686 cCL("lgnsp", e608120
, 2, (RF
, RF_IF
), rd_rm
),
19687 cCL("lgnsm", e608140
, 2, (RF
, RF_IF
), rd_rm
),
19688 cCL("lgnsz", e608160
, 2, (RF
, RF_IF
), rd_rm
),
19689 cCL("lgnd", e608180
, 2, (RF
, RF_IF
), rd_rm
),
19690 cCL("lgndp", e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
19691 cCL("lgndm", e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
19692 cCL("lgndz", e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
19693 cCL("lgne", e688100
, 2, (RF
, RF_IF
), rd_rm
),
19694 cCL("lgnep", e688120
, 2, (RF
, RF_IF
), rd_rm
),
19695 cCL("lgnem", e688140
, 2, (RF
, RF_IF
), rd_rm
),
19696 cCL("lgnez", e688160
, 2, (RF
, RF_IF
), rd_rm
),
19698 cCL("exps", e708100
, 2, (RF
, RF_IF
), rd_rm
),
19699 cCL("expsp", e708120
, 2, (RF
, RF_IF
), rd_rm
),
19700 cCL("expsm", e708140
, 2, (RF
, RF_IF
), rd_rm
),
19701 cCL("expsz", e708160
, 2, (RF
, RF_IF
), rd_rm
),
19702 cCL("expd", e708180
, 2, (RF
, RF_IF
), rd_rm
),
19703 cCL("expdp", e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
19704 cCL("expdm", e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
19705 cCL("expdz", e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
19706 cCL("expe", e788100
, 2, (RF
, RF_IF
), rd_rm
),
19707 cCL("expep", e788120
, 2, (RF
, RF_IF
), rd_rm
),
19708 cCL("expem", e788140
, 2, (RF
, RF_IF
), rd_rm
),
19709 cCL("expdz", e788160
, 2, (RF
, RF_IF
), rd_rm
),
19711 cCL("sins", e808100
, 2, (RF
, RF_IF
), rd_rm
),
19712 cCL("sinsp", e808120
, 2, (RF
, RF_IF
), rd_rm
),
19713 cCL("sinsm", e808140
, 2, (RF
, RF_IF
), rd_rm
),
19714 cCL("sinsz", e808160
, 2, (RF
, RF_IF
), rd_rm
),
19715 cCL("sind", e808180
, 2, (RF
, RF_IF
), rd_rm
),
19716 cCL("sindp", e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
19717 cCL("sindm", e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
19718 cCL("sindz", e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
19719 cCL("sine", e888100
, 2, (RF
, RF_IF
), rd_rm
),
19720 cCL("sinep", e888120
, 2, (RF
, RF_IF
), rd_rm
),
19721 cCL("sinem", e888140
, 2, (RF
, RF_IF
), rd_rm
),
19722 cCL("sinez", e888160
, 2, (RF
, RF_IF
), rd_rm
),
19724 cCL("coss", e908100
, 2, (RF
, RF_IF
), rd_rm
),
19725 cCL("cossp", e908120
, 2, (RF
, RF_IF
), rd_rm
),
19726 cCL("cossm", e908140
, 2, (RF
, RF_IF
), rd_rm
),
19727 cCL("cossz", e908160
, 2, (RF
, RF_IF
), rd_rm
),
19728 cCL("cosd", e908180
, 2, (RF
, RF_IF
), rd_rm
),
19729 cCL("cosdp", e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
19730 cCL("cosdm", e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
19731 cCL("cosdz", e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
19732 cCL("cose", e988100
, 2, (RF
, RF_IF
), rd_rm
),
19733 cCL("cosep", e988120
, 2, (RF
, RF_IF
), rd_rm
),
19734 cCL("cosem", e988140
, 2, (RF
, RF_IF
), rd_rm
),
19735 cCL("cosez", e988160
, 2, (RF
, RF_IF
), rd_rm
),
19737 cCL("tans", ea08100
, 2, (RF
, RF_IF
), rd_rm
),
19738 cCL("tansp", ea08120
, 2, (RF
, RF_IF
), rd_rm
),
19739 cCL("tansm", ea08140
, 2, (RF
, RF_IF
), rd_rm
),
19740 cCL("tansz", ea08160
, 2, (RF
, RF_IF
), rd_rm
),
19741 cCL("tand", ea08180
, 2, (RF
, RF_IF
), rd_rm
),
19742 cCL("tandp", ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
19743 cCL("tandm", ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
19744 cCL("tandz", ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
19745 cCL("tane", ea88100
, 2, (RF
, RF_IF
), rd_rm
),
19746 cCL("tanep", ea88120
, 2, (RF
, RF_IF
), rd_rm
),
19747 cCL("tanem", ea88140
, 2, (RF
, RF_IF
), rd_rm
),
19748 cCL("tanez", ea88160
, 2, (RF
, RF_IF
), rd_rm
),
19750 cCL("asns", eb08100
, 2, (RF
, RF_IF
), rd_rm
),
19751 cCL("asnsp", eb08120
, 2, (RF
, RF_IF
), rd_rm
),
19752 cCL("asnsm", eb08140
, 2, (RF
, RF_IF
), rd_rm
),
19753 cCL("asnsz", eb08160
, 2, (RF
, RF_IF
), rd_rm
),
19754 cCL("asnd", eb08180
, 2, (RF
, RF_IF
), rd_rm
),
19755 cCL("asndp", eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
19756 cCL("asndm", eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
19757 cCL("asndz", eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
19758 cCL("asne", eb88100
, 2, (RF
, RF_IF
), rd_rm
),
19759 cCL("asnep", eb88120
, 2, (RF
, RF_IF
), rd_rm
),
19760 cCL("asnem", eb88140
, 2, (RF
, RF_IF
), rd_rm
),
19761 cCL("asnez", eb88160
, 2, (RF
, RF_IF
), rd_rm
),
19763 cCL("acss", ec08100
, 2, (RF
, RF_IF
), rd_rm
),
19764 cCL("acssp", ec08120
, 2, (RF
, RF_IF
), rd_rm
),
19765 cCL("acssm", ec08140
, 2, (RF
, RF_IF
), rd_rm
),
19766 cCL("acssz", ec08160
, 2, (RF
, RF_IF
), rd_rm
),
19767 cCL("acsd", ec08180
, 2, (RF
, RF_IF
), rd_rm
),
19768 cCL("acsdp", ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
19769 cCL("acsdm", ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
19770 cCL("acsdz", ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
19771 cCL("acse", ec88100
, 2, (RF
, RF_IF
), rd_rm
),
19772 cCL("acsep", ec88120
, 2, (RF
, RF_IF
), rd_rm
),
19773 cCL("acsem", ec88140
, 2, (RF
, RF_IF
), rd_rm
),
19774 cCL("acsez", ec88160
, 2, (RF
, RF_IF
), rd_rm
),
19776 cCL("atns", ed08100
, 2, (RF
, RF_IF
), rd_rm
),
19777 cCL("atnsp", ed08120
, 2, (RF
, RF_IF
), rd_rm
),
19778 cCL("atnsm", ed08140
, 2, (RF
, RF_IF
), rd_rm
),
19779 cCL("atnsz", ed08160
, 2, (RF
, RF_IF
), rd_rm
),
19780 cCL("atnd", ed08180
, 2, (RF
, RF_IF
), rd_rm
),
19781 cCL("atndp", ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
19782 cCL("atndm", ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
19783 cCL("atndz", ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
19784 cCL("atne", ed88100
, 2, (RF
, RF_IF
), rd_rm
),
19785 cCL("atnep", ed88120
, 2, (RF
, RF_IF
), rd_rm
),
19786 cCL("atnem", ed88140
, 2, (RF
, RF_IF
), rd_rm
),
19787 cCL("atnez", ed88160
, 2, (RF
, RF_IF
), rd_rm
),
19789 cCL("urds", ee08100
, 2, (RF
, RF_IF
), rd_rm
),
19790 cCL("urdsp", ee08120
, 2, (RF
, RF_IF
), rd_rm
),
19791 cCL("urdsm", ee08140
, 2, (RF
, RF_IF
), rd_rm
),
19792 cCL("urdsz", ee08160
, 2, (RF
, RF_IF
), rd_rm
),
19793 cCL("urdd", ee08180
, 2, (RF
, RF_IF
), rd_rm
),
19794 cCL("urddp", ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
19795 cCL("urddm", ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
19796 cCL("urddz", ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
19797 cCL("urde", ee88100
, 2, (RF
, RF_IF
), rd_rm
),
19798 cCL("urdep", ee88120
, 2, (RF
, RF_IF
), rd_rm
),
19799 cCL("urdem", ee88140
, 2, (RF
, RF_IF
), rd_rm
),
19800 cCL("urdez", ee88160
, 2, (RF
, RF_IF
), rd_rm
),
19802 cCL("nrms", ef08100
, 2, (RF
, RF_IF
), rd_rm
),
19803 cCL("nrmsp", ef08120
, 2, (RF
, RF_IF
), rd_rm
),
19804 cCL("nrmsm", ef08140
, 2, (RF
, RF_IF
), rd_rm
),
19805 cCL("nrmsz", ef08160
, 2, (RF
, RF_IF
), rd_rm
),
19806 cCL("nrmd", ef08180
, 2, (RF
, RF_IF
), rd_rm
),
19807 cCL("nrmdp", ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
19808 cCL("nrmdm", ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
19809 cCL("nrmdz", ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
19810 cCL("nrme", ef88100
, 2, (RF
, RF_IF
), rd_rm
),
19811 cCL("nrmep", ef88120
, 2, (RF
, RF_IF
), rd_rm
),
19812 cCL("nrmem", ef88140
, 2, (RF
, RF_IF
), rd_rm
),
19813 cCL("nrmez", ef88160
, 2, (RF
, RF_IF
), rd_rm
),
19815 cCL("adfs", e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19816 cCL("adfsp", e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19817 cCL("adfsm", e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19818 cCL("adfsz", e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19819 cCL("adfd", e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19820 cCL("adfdp", e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19821 cCL("adfdm", e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19822 cCL("adfdz", e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19823 cCL("adfe", e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19824 cCL("adfep", e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19825 cCL("adfem", e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19826 cCL("adfez", e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19828 cCL("sufs", e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19829 cCL("sufsp", e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19830 cCL("sufsm", e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19831 cCL("sufsz", e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19832 cCL("sufd", e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19833 cCL("sufdp", e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19834 cCL("sufdm", e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19835 cCL("sufdz", e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19836 cCL("sufe", e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19837 cCL("sufep", e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19838 cCL("sufem", e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19839 cCL("sufez", e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19841 cCL("rsfs", e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19842 cCL("rsfsp", e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19843 cCL("rsfsm", e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19844 cCL("rsfsz", e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19845 cCL("rsfd", e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19846 cCL("rsfdp", e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19847 cCL("rsfdm", e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19848 cCL("rsfdz", e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19849 cCL("rsfe", e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19850 cCL("rsfep", e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19851 cCL("rsfem", e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19852 cCL("rsfez", e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19854 cCL("mufs", e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19855 cCL("mufsp", e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19856 cCL("mufsm", e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19857 cCL("mufsz", e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19858 cCL("mufd", e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19859 cCL("mufdp", e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19860 cCL("mufdm", e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19861 cCL("mufdz", e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19862 cCL("mufe", e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19863 cCL("mufep", e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19864 cCL("mufem", e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19865 cCL("mufez", e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19867 cCL("dvfs", e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19868 cCL("dvfsp", e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19869 cCL("dvfsm", e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19870 cCL("dvfsz", e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19871 cCL("dvfd", e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19872 cCL("dvfdp", e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19873 cCL("dvfdm", e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19874 cCL("dvfdz", e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19875 cCL("dvfe", e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19876 cCL("dvfep", e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19877 cCL("dvfem", e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19878 cCL("dvfez", e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19880 cCL("rdfs", e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19881 cCL("rdfsp", e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19882 cCL("rdfsm", e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19883 cCL("rdfsz", e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19884 cCL("rdfd", e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19885 cCL("rdfdp", e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19886 cCL("rdfdm", e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19887 cCL("rdfdz", e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19888 cCL("rdfe", e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19889 cCL("rdfep", e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19890 cCL("rdfem", e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19891 cCL("rdfez", e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19893 cCL("pows", e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19894 cCL("powsp", e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19895 cCL("powsm", e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19896 cCL("powsz", e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19897 cCL("powd", e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19898 cCL("powdp", e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19899 cCL("powdm", e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19900 cCL("powdz", e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19901 cCL("powe", e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19902 cCL("powep", e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19903 cCL("powem", e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19904 cCL("powez", e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19906 cCL("rpws", e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19907 cCL("rpwsp", e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19908 cCL("rpwsm", e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19909 cCL("rpwsz", e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19910 cCL("rpwd", e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19911 cCL("rpwdp", e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19912 cCL("rpwdm", e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19913 cCL("rpwdz", e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19914 cCL("rpwe", e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19915 cCL("rpwep", e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19916 cCL("rpwem", e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19917 cCL("rpwez", e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19919 cCL("rmfs", e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19920 cCL("rmfsp", e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19921 cCL("rmfsm", e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19922 cCL("rmfsz", e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19923 cCL("rmfd", e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19924 cCL("rmfdp", e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19925 cCL("rmfdm", e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19926 cCL("rmfdz", e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19927 cCL("rmfe", e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19928 cCL("rmfep", e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19929 cCL("rmfem", e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19930 cCL("rmfez", e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19932 cCL("fmls", e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19933 cCL("fmlsp", e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19934 cCL("fmlsm", e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19935 cCL("fmlsz", e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19936 cCL("fmld", e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19937 cCL("fmldp", e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19938 cCL("fmldm", e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19939 cCL("fmldz", e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19940 cCL("fmle", e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19941 cCL("fmlep", e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19942 cCL("fmlem", e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19943 cCL("fmlez", e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19945 cCL("fdvs", ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19946 cCL("fdvsp", ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19947 cCL("fdvsm", ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19948 cCL("fdvsz", ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19949 cCL("fdvd", ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19950 cCL("fdvdp", ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19951 cCL("fdvdm", ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19952 cCL("fdvdz", ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19953 cCL("fdve", ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19954 cCL("fdvep", ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19955 cCL("fdvem", ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19956 cCL("fdvez", ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19958 cCL("frds", eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19959 cCL("frdsp", eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19960 cCL("frdsm", eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19961 cCL("frdsz", eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19962 cCL("frdd", eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19963 cCL("frddp", eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19964 cCL("frddm", eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19965 cCL("frddz", eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19966 cCL("frde", eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19967 cCL("frdep", eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19968 cCL("frdem", eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19969 cCL("frdez", eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19971 cCL("pols", ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19972 cCL("polsp", ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19973 cCL("polsm", ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19974 cCL("polsz", ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19975 cCL("pold", ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19976 cCL("poldp", ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19977 cCL("poldm", ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19978 cCL("poldz", ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19979 cCL("pole", ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19980 cCL("polep", ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19981 cCL("polem", ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19982 cCL("polez", ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19984 cCE("cmf", e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
19985 C3E("cmfe", ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
19986 cCE("cnf", eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
19987 C3E("cnfe", ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
19989 cCL("flts", e000110
, 2, (RF
, RR
), rn_rd
),
19990 cCL("fltsp", e000130
, 2, (RF
, RR
), rn_rd
),
19991 cCL("fltsm", e000150
, 2, (RF
, RR
), rn_rd
),
19992 cCL("fltsz", e000170
, 2, (RF
, RR
), rn_rd
),
19993 cCL("fltd", e000190
, 2, (RF
, RR
), rn_rd
),
19994 cCL("fltdp", e0001b0
, 2, (RF
, RR
), rn_rd
),
19995 cCL("fltdm", e0001d0
, 2, (RF
, RR
), rn_rd
),
19996 cCL("fltdz", e0001f0
, 2, (RF
, RR
), rn_rd
),
19997 cCL("flte", e080110
, 2, (RF
, RR
), rn_rd
),
19998 cCL("fltep", e080130
, 2, (RF
, RR
), rn_rd
),
19999 cCL("fltem", e080150
, 2, (RF
, RR
), rn_rd
),
20000 cCL("fltez", e080170
, 2, (RF
, RR
), rn_rd
),
20002 /* The implementation of the FIX instruction is broken on some
20003 assemblers, in that it accepts a precision specifier as well as a
20004 rounding specifier, despite the fact that this is meaningless.
20005 To be more compatible, we accept it as well, though of course it
20006 does not set any bits. */
20007 cCE("fix", e100110
, 2, (RR
, RF
), rd_rm
),
20008 cCL("fixp", e100130
, 2, (RR
, RF
), rd_rm
),
20009 cCL("fixm", e100150
, 2, (RR
, RF
), rd_rm
),
20010 cCL("fixz", e100170
, 2, (RR
, RF
), rd_rm
),
20011 cCL("fixsp", e100130
, 2, (RR
, RF
), rd_rm
),
20012 cCL("fixsm", e100150
, 2, (RR
, RF
), rd_rm
),
20013 cCL("fixsz", e100170
, 2, (RR
, RF
), rd_rm
),
20014 cCL("fixdp", e100130
, 2, (RR
, RF
), rd_rm
),
20015 cCL("fixdm", e100150
, 2, (RR
, RF
), rd_rm
),
20016 cCL("fixdz", e100170
, 2, (RR
, RF
), rd_rm
),
20017 cCL("fixep", e100130
, 2, (RR
, RF
), rd_rm
),
20018 cCL("fixem", e100150
, 2, (RR
, RF
), rd_rm
),
20019 cCL("fixez", e100170
, 2, (RR
, RF
), rd_rm
),
20021 /* Instructions that were new with the real FPA, call them V2. */
20023 #define ARM_VARIANT & fpu_fpa_ext_v2
20025 cCE("lfm", c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20026 cCL("lfmfd", c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20027 cCL("lfmea", d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20028 cCE("sfm", c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20029 cCL("sfmfd", d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20030 cCL("sfmea", c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20033 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
20035 /* Moves and type conversions. */
20036 cCE("fcpys", eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20037 cCE("fmrs", e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
20038 cCE("fmsr", e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
20039 cCE("fmstat", ef1fa10
, 0, (), noargs
),
20040 cCE("vmrs", ef00a10
, 2, (APSR_RR
, RVC
), vmrs
),
20041 cCE("vmsr", ee00a10
, 2, (RVC
, RR
), vmsr
),
20042 cCE("fsitos", eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20043 cCE("fuitos", eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20044 cCE("ftosis", ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20045 cCE("ftosizs", ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20046 cCE("ftouis", ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20047 cCE("ftouizs", ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20048 cCE("fmrx", ef00a10
, 2, (RR
, RVC
), rd_rn
),
20049 cCE("fmxr", ee00a10
, 2, (RVC
, RR
), rn_rd
),
20051 /* Memory operations. */
20052 cCE("flds", d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
20053 cCE("fsts", d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
20054 cCE("fldmias", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
20055 cCE("fldmfds", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
20056 cCE("fldmdbs", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
20057 cCE("fldmeas", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
20058 cCE("fldmiax", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
20059 cCE("fldmfdx", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
20060 cCE("fldmdbx", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
20061 cCE("fldmeax", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
20062 cCE("fstmias", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
20063 cCE("fstmeas", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
20064 cCE("fstmdbs", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
20065 cCE("fstmfds", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
20066 cCE("fstmiax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
20067 cCE("fstmeax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
20068 cCE("fstmdbx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
20069 cCE("fstmfdx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
20071 /* Monadic operations. */
20072 cCE("fabss", eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20073 cCE("fnegs", eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20074 cCE("fsqrts", eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20076 /* Dyadic operations. */
20077 cCE("fadds", e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20078 cCE("fsubs", e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20079 cCE("fmuls", e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20080 cCE("fdivs", e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20081 cCE("fmacs", e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20082 cCE("fmscs", e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20083 cCE("fnmuls", e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20084 cCE("fnmacs", e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20085 cCE("fnmscs", e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20088 cCE("fcmps", eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20089 cCE("fcmpzs", eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
20090 cCE("fcmpes", eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20091 cCE("fcmpezs", eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
20093 /* Double precision load/store are still present on single precision
20094 implementations. */
20095 cCE("fldd", d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
20096 cCE("fstd", d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
20097 cCE("fldmiad", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
20098 cCE("fldmfdd", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
20099 cCE("fldmdbd", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
20100 cCE("fldmead", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
20101 cCE("fstmiad", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
20102 cCE("fstmead", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
20103 cCE("fstmdbd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
20104 cCE("fstmfdd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
20107 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
20109 /* Moves and type conversions. */
20110 cCE("fcpyd", eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20111 cCE("fcvtds", eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
20112 cCE("fcvtsd", eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20113 cCE("fmdhr", e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
20114 cCE("fmdlr", e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
20115 cCE("fmrdh", e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
20116 cCE("fmrdl", e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
20117 cCE("fsitod", eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
20118 cCE("fuitod", eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
20119 cCE("ftosid", ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20120 cCE("ftosizd", ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20121 cCE("ftouid", ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20122 cCE("ftouizd", ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20124 /* Monadic operations. */
20125 cCE("fabsd", eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20126 cCE("fnegd", eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20127 cCE("fsqrtd", eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20129 /* Dyadic operations. */
20130 cCE("faddd", e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20131 cCE("fsubd", e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20132 cCE("fmuld", e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20133 cCE("fdivd", e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20134 cCE("fmacd", e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20135 cCE("fmscd", e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20136 cCE("fnmuld", e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20137 cCE("fnmacd", e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20138 cCE("fnmscd", e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20141 cCE("fcmpd", eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20142 cCE("fcmpzd", eb50b40
, 1, (RVD
), vfp_dp_rd
),
20143 cCE("fcmped", eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20144 cCE("fcmpezd", eb50bc0
, 1, (RVD
), vfp_dp_rd
),
20147 #define ARM_VARIANT & fpu_vfp_ext_v2
20149 cCE("fmsrr", c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
20150 cCE("fmrrs", c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
20151 cCE("fmdrr", c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
20152 cCE("fmrrd", c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
20154 /* Instructions which may belong to either the Neon or VFP instruction sets.
20155 Individual encoder functions perform additional architecture checks. */
20157 #define ARM_VARIANT & fpu_vfp_ext_v1xd
20158 #undef THUMB_VARIANT
20159 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
20161 /* These mnemonics are unique to VFP. */
20162 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
20163 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
20164 nCE(vnmul
, _vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20165 nCE(vnmla
, _vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20166 nCE(vnmls
, _vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20167 nCE(vcmp
, _vcmp
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
20168 nCE(vcmpe
, _vcmpe
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
20169 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
20170 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
20171 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
20173 /* Mnemonics shared by Neon and VFP. */
20174 nCEF(vmul
, _vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
20175 nCEF(vmla
, _vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
20176 nCEF(vmls
, _vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
20178 nCEF(vadd
, _vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
20179 nCEF(vsub
, _vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
20181 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
20182 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
20184 NCE(vldm
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20185 NCE(vldmia
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20186 NCE(vldmdb
, d100b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20187 NCE(vstm
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20188 NCE(vstmia
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20189 NCE(vstmdb
, d000b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20190 NCE(vldr
, d100b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
20191 NCE(vstr
, d000b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
20193 nCEF(vcvt
, _vcvt
, 3, (RNSDQ
, RNSDQ
, oI32z
), neon_cvt
),
20194 nCEF(vcvtr
, _vcvt
, 2, (RNSDQ
, RNSDQ
), neon_cvtr
),
20195 NCEF(vcvtb
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtb
),
20196 NCEF(vcvtt
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtt
),
20199 /* NOTE: All VMOV encoding is special-cased! */
20200 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
20201 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
20204 #define ARM_VARIANT & arm_ext_fp16
20205 #undef THUMB_VARIANT
20206 #define THUMB_VARIANT & arm_ext_fp16
20207 /* New instructions added from v8.2, allowing the extraction and insertion of
20208 the upper 16 bits of a 32-bit vector register. */
20209 NCE (vmovx
, eb00a40
, 2, (RVS
, RVS
), neon_movhf
),
20210 NCE (vins
, eb00ac0
, 2, (RVS
, RVS
), neon_movhf
),
20212 #undef THUMB_VARIANT
20213 #define THUMB_VARIANT & fpu_neon_ext_v1
20215 #define ARM_VARIANT & fpu_neon_ext_v1
20217 /* Data processing with three registers of the same length. */
20218 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
20219 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
20220 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
20221 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
20222 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
20223 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
20224 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
20225 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
20226 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
20227 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
20228 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
20229 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
20230 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
20231 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
20232 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
20233 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
20234 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
20235 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
20236 /* If not immediate, fall back to neon_dyadic_i64_su.
20237 shl_imm should accept I8 I16 I32 I64,
20238 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
20239 nUF(vshl
, _vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
20240 nUF(vshlq
, _vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
20241 nUF(vqshl
, _vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
20242 nUF(vqshlq
, _vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
20243 /* Logic ops, types optional & ignored. */
20244 nUF(vand
, _vand
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20245 nUF(vandq
, _vand
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20246 nUF(vbic
, _vbic
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20247 nUF(vbicq
, _vbic
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20248 nUF(vorr
, _vorr
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20249 nUF(vorrq
, _vorr
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20250 nUF(vorn
, _vorn
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20251 nUF(vornq
, _vorn
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20252 nUF(veor
, _veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
20253 nUF(veorq
, _veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
20254 /* Bitfield ops, untyped. */
20255 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20256 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20257 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20258 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20259 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20260 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20261 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
20262 nUF(vabd
, _vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20263 nUF(vabdq
, _vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20264 nUF(vmax
, _vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20265 nUF(vmaxq
, _vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20266 nUF(vmin
, _vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20267 nUF(vminq
, _vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20268 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20269 back to neon_dyadic_if_su. */
20270 nUF(vcge
, _vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
20271 nUF(vcgeq
, _vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
20272 nUF(vcgt
, _vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
20273 nUF(vcgtq
, _vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
20274 nUF(vclt
, _vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
20275 nUF(vcltq
, _vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
20276 nUF(vcle
, _vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
20277 nUF(vcleq
, _vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
20278 /* Comparison. Type I8 I16 I32 F32. */
20279 nUF(vceq
, _vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
20280 nUF(vceqq
, _vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
20281 /* As above, D registers only. */
20282 nUF(vpmax
, _vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
20283 nUF(vpmin
, _vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
20284 /* Int and float variants, signedness unimportant. */
20285 nUF(vmlaq
, _vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
20286 nUF(vmlsq
, _vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
20287 nUF(vpadd
, _vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
20288 /* Add/sub take types I8 I16 I32 I64 F32. */
20289 nUF(vaddq
, _vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
20290 nUF(vsubq
, _vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
20291 /* vtst takes sizes 8, 16, 32. */
20292 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
20293 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
20294 /* VMUL takes I8 I16 I32 F32 P8. */
20295 nUF(vmulq
, _vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
20296 /* VQD{R}MULH takes S16 S32. */
20297 nUF(vqdmulh
, _vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20298 nUF(vqdmulhq
, _vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20299 nUF(vqrdmulh
, _vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20300 nUF(vqrdmulhq
, _vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20301 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
20302 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
20303 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
20304 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
20305 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
20306 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
20307 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
20308 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
20309 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
20310 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
20311 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
20312 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
20313 /* ARM v8.1 extension. */
20314 nUF(vqrdmlah
, _vqrdmlah
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20315 nUF(vqrdmlahq
, _vqrdmlah
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20316 nUF(vqrdmlsh
, _vqrdmlsh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20317 nUF(vqrdmlshq
, _vqrdmlsh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20319 /* Two address, int/float. Types S8 S16 S32 F32. */
20320 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
20321 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
20323 /* Data processing with two registers and a shift amount. */
20324 /* Right shifts, and variants with rounding.
20325 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
20326 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
20327 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
20328 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
20329 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
20330 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
20331 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
20332 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
20333 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
20334 /* Shift and insert. Sizes accepted 8 16 32 64. */
20335 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
20336 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
20337 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
20338 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
20339 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
20340 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
20341 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
20342 /* Right shift immediate, saturating & narrowing, with rounding variants.
20343 Types accepted S16 S32 S64 U16 U32 U64. */
20344 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
20345 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
20346 /* As above, unsigned. Types accepted S16 S32 S64. */
20347 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
20348 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
20349 /* Right shift narrowing. Types accepted I16 I32 I64. */
20350 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
20351 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
20352 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
20353 nUF(vshll
, _vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
20354 /* CVT with optional immediate for fixed-point variant. */
20355 nUF(vcvtq
, _vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
20357 nUF(vmvn
, _vmvn
, 2, (RNDQ
, RNDQ_Ibig
), neon_mvn
),
20358 nUF(vmvnq
, _vmvn
, 2, (RNQ
, RNDQ_Ibig
), neon_mvn
),
20360 /* Data processing, three registers of different lengths. */
20361 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
20362 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
20363 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20364 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20365 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20366 /* If not scalar, fall back to neon_dyadic_long.
20367 Vector types as above, scalar types S16 S32 U16 U32. */
20368 nUF(vmlal
, _vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
20369 nUF(vmlsl
, _vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
20370 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
20371 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
20372 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
20373 /* Dyadic, narrowing insns. Types I16 I32 I64. */
20374 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20375 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20376 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20377 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20378 /* Saturating doubling multiplies. Types S16 S32. */
20379 nUF(vqdmlal
, _vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20380 nUF(vqdmlsl
, _vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20381 nUF(vqdmull
, _vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20382 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
20383 S16 S32 U16 U32. */
20384 nUF(vmull
, _vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
20386 /* Extract. Size 8. */
20387 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
20388 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
20390 /* Two registers, miscellaneous. */
20391 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
20392 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
20393 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
20394 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
20395 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
20396 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
20397 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
20398 /* Vector replicate. Sizes 8 16 32. */
20399 nCE(vdup
, _vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
20400 nCE(vdupq
, _vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
20401 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
20402 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
20403 /* VMOVN. Types I16 I32 I64. */
20404 nUF(vmovn
, _vmovn
, 2, (RND
, RNQ
), neon_movn
),
20405 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
20406 nUF(vqmovn
, _vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
20407 /* VQMOVUN. Types S16 S32 S64. */
20408 nUF(vqmovun
, _vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
20409 /* VZIP / VUZP. Sizes 8 16 32. */
20410 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
20411 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
20412 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
20413 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
20414 /* VQABS / VQNEG. Types S8 S16 S32. */
20415 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
20416 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
20417 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
20418 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
20419 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
20420 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
20421 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
20422 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
20423 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
20424 /* Reciprocal estimates. Types U32 F32. */
20425 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
20426 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
20427 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
20428 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
20429 /* VCLS. Types S8 S16 S32. */
20430 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
20431 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
20432 /* VCLZ. Types I8 I16 I32. */
20433 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
20434 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
20435 /* VCNT. Size 8. */
20436 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
20437 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
20438 /* Two address, untyped. */
20439 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
20440 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
20441 /* VTRN. Sizes 8 16 32. */
20442 nUF(vtrn
, _vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
20443 nUF(vtrnq
, _vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
20445 /* Table lookup. Size 8. */
20446 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
20447 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
20449 #undef THUMB_VARIANT
20450 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
20452 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
20454 /* Neon element/structure load/store. */
20455 nUF(vld1
, _vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20456 nUF(vst1
, _vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20457 nUF(vld2
, _vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20458 nUF(vst2
, _vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20459 nUF(vld3
, _vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20460 nUF(vst3
, _vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20461 nUF(vld4
, _vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20462 nUF(vst4
, _vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20464 #undef THUMB_VARIANT
20465 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
20467 #define ARM_VARIANT & fpu_vfp_ext_v3xd
20468 cCE("fconsts", eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
20469 cCE("fshtos", eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20470 cCE("fsltos", eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20471 cCE("fuhtos", ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20472 cCE("fultos", ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20473 cCE("ftoshs", ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20474 cCE("ftosls", ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20475 cCE("ftouhs", ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20476 cCE("ftouls", ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20478 #undef THUMB_VARIANT
20479 #define THUMB_VARIANT & fpu_vfp_ext_v3
20481 #define ARM_VARIANT & fpu_vfp_ext_v3
20483 cCE("fconstd", eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
20484 cCE("fshtod", eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20485 cCE("fsltod", eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20486 cCE("fuhtod", ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20487 cCE("fultod", ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20488 cCE("ftoshd", ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20489 cCE("ftosld", ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20490 cCE("ftouhd", ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20491 cCE("ftould", ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20494 #define ARM_VARIANT & fpu_vfp_ext_fma
20495 #undef THUMB_VARIANT
20496 #define THUMB_VARIANT & fpu_vfp_ext_fma
20497 /* Mnemonics shared by Neon and VFP. These are included in the
20498 VFP FMA variant; NEON and VFP FMA always includes the NEON
20499 FMA instructions. */
20500 nCEF(vfma
, _vfma
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
20501 nCEF(vfms
, _vfms
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
20502 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
20503 the v form should always be used. */
20504 cCE("ffmas", ea00a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20505 cCE("ffnmas", ea00a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20506 cCE("ffmad", ea00b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20507 cCE("ffnmad", ea00b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20508 nCE(vfnma
, _vfnma
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20509 nCE(vfnms
, _vfnms
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20511 #undef THUMB_VARIANT
20513 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
20515 cCE("mia", e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20516 cCE("miaph", e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20517 cCE("miabb", e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20518 cCE("miabt", e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20519 cCE("miatb", e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20520 cCE("miatt", e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20521 cCE("mar", c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
20522 cCE("mra", c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
20525 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
20527 cCE("tandcb", e13f130
, 1, (RR
), iwmmxt_tandorc
),
20528 cCE("tandch", e53f130
, 1, (RR
), iwmmxt_tandorc
),
20529 cCE("tandcw", e93f130
, 1, (RR
), iwmmxt_tandorc
),
20530 cCE("tbcstb", e400010
, 2, (RIWR
, RR
), rn_rd
),
20531 cCE("tbcsth", e400050
, 2, (RIWR
, RR
), rn_rd
),
20532 cCE("tbcstw", e400090
, 2, (RIWR
, RR
), rn_rd
),
20533 cCE("textrcb", e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
20534 cCE("textrch", e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
20535 cCE("textrcw", e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
20536 cCE("textrmub",e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20537 cCE("textrmuh",e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20538 cCE("textrmuw",e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20539 cCE("textrmsb",e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20540 cCE("textrmsh",e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20541 cCE("textrmsw",e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20542 cCE("tinsrb", e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20543 cCE("tinsrh", e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20544 cCE("tinsrw", e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20545 cCE("tmcr", e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
20546 cCE("tmcrr", c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
20547 cCE("tmia", e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20548 cCE("tmiaph", e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20549 cCE("tmiabb", e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20550 cCE("tmiabt", e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20551 cCE("tmiatb", e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20552 cCE("tmiatt", e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20553 cCE("tmovmskb",e100030
, 2, (RR
, RIWR
), rd_rn
),
20554 cCE("tmovmskh",e500030
, 2, (RR
, RIWR
), rd_rn
),
20555 cCE("tmovmskw",e900030
, 2, (RR
, RIWR
), rd_rn
),
20556 cCE("tmrc", e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
20557 cCE("tmrrc", c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
20558 cCE("torcb", e13f150
, 1, (RR
), iwmmxt_tandorc
),
20559 cCE("torch", e53f150
, 1, (RR
), iwmmxt_tandorc
),
20560 cCE("torcw", e93f150
, 1, (RR
), iwmmxt_tandorc
),
20561 cCE("waccb", e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20562 cCE("wacch", e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20563 cCE("waccw", e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20564 cCE("waddbss", e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20565 cCE("waddb", e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20566 cCE("waddbus", e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20567 cCE("waddhss", e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20568 cCE("waddh", e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20569 cCE("waddhus", e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20570 cCE("waddwss", eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20571 cCE("waddw", e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20572 cCE("waddwus", e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20573 cCE("waligni", e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
20574 cCE("walignr0",e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20575 cCE("walignr1",e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20576 cCE("walignr2",ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20577 cCE("walignr3",eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20578 cCE("wand", e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20579 cCE("wandn", e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20580 cCE("wavg2b", e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20581 cCE("wavg2br", e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20582 cCE("wavg2h", ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20583 cCE("wavg2hr", ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20584 cCE("wcmpeqb", e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20585 cCE("wcmpeqh", e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20586 cCE("wcmpeqw", e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20587 cCE("wcmpgtub",e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20588 cCE("wcmpgtuh",e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20589 cCE("wcmpgtuw",e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20590 cCE("wcmpgtsb",e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20591 cCE("wcmpgtsh",e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20592 cCE("wcmpgtsw",eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20593 cCE("wldrb", c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20594 cCE("wldrh", c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20595 cCE("wldrw", c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
20596 cCE("wldrd", c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
20597 cCE("wmacs", e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20598 cCE("wmacsz", e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20599 cCE("wmacu", e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20600 cCE("wmacuz", e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20601 cCE("wmadds", ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20602 cCE("wmaddu", e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20603 cCE("wmaxsb", e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20604 cCE("wmaxsh", e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20605 cCE("wmaxsw", ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20606 cCE("wmaxub", e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20607 cCE("wmaxuh", e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20608 cCE("wmaxuw", e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20609 cCE("wminsb", e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20610 cCE("wminsh", e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20611 cCE("wminsw", eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20612 cCE("wminub", e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20613 cCE("wminuh", e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20614 cCE("wminuw", e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20615 cCE("wmov", e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
20616 cCE("wmulsm", e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20617 cCE("wmulsl", e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20618 cCE("wmulum", e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20619 cCE("wmulul", e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20620 cCE("wor", e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20621 cCE("wpackhss",e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20622 cCE("wpackhus",e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20623 cCE("wpackwss",eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20624 cCE("wpackwus",e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20625 cCE("wpackdss",ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20626 cCE("wpackdus",ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20627 cCE("wrorh", e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20628 cCE("wrorhg", e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20629 cCE("wrorw", eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20630 cCE("wrorwg", eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20631 cCE("wrord", ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20632 cCE("wrordg", ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20633 cCE("wsadb", e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20634 cCE("wsadbz", e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20635 cCE("wsadh", e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20636 cCE("wsadhz", e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20637 cCE("wshufh", e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
20638 cCE("wsllh", e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20639 cCE("wsllhg", e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20640 cCE("wsllw", e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20641 cCE("wsllwg", e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20642 cCE("wslld", ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20643 cCE("wslldg", ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20644 cCE("wsrah", e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20645 cCE("wsrahg", e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20646 cCE("wsraw", e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20647 cCE("wsrawg", e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20648 cCE("wsrad", ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20649 cCE("wsradg", ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20650 cCE("wsrlh", e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20651 cCE("wsrlhg", e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20652 cCE("wsrlw", ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20653 cCE("wsrlwg", ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20654 cCE("wsrld", ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20655 cCE("wsrldg", ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20656 cCE("wstrb", c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20657 cCE("wstrh", c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20658 cCE("wstrw", c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
20659 cCE("wstrd", c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
20660 cCE("wsubbss", e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20661 cCE("wsubb", e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20662 cCE("wsubbus", e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20663 cCE("wsubhss", e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20664 cCE("wsubh", e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20665 cCE("wsubhus", e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20666 cCE("wsubwss", eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20667 cCE("wsubw", e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20668 cCE("wsubwus", e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20669 cCE("wunpckehub",e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20670 cCE("wunpckehuh",e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20671 cCE("wunpckehuw",e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20672 cCE("wunpckehsb",e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20673 cCE("wunpckehsh",e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20674 cCE("wunpckehsw",ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20675 cCE("wunpckihb", e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20676 cCE("wunpckihh", e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20677 cCE("wunpckihw", e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20678 cCE("wunpckelub",e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20679 cCE("wunpckeluh",e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20680 cCE("wunpckeluw",e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20681 cCE("wunpckelsb",e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20682 cCE("wunpckelsh",e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20683 cCE("wunpckelsw",ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20684 cCE("wunpckilb", e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20685 cCE("wunpckilh", e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20686 cCE("wunpckilw", e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20687 cCE("wxor", e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20688 cCE("wzero", e300000
, 1, (RIWR
), iwmmxt_wzero
),
20691 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
20693 cCE("torvscb", e12f190
, 1, (RR
), iwmmxt_tandorc
),
20694 cCE("torvsch", e52f190
, 1, (RR
), iwmmxt_tandorc
),
20695 cCE("torvscw", e92f190
, 1, (RR
), iwmmxt_tandorc
),
20696 cCE("wabsb", e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20697 cCE("wabsh", e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20698 cCE("wabsw", ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20699 cCE("wabsdiffb", e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20700 cCE("wabsdiffh", e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20701 cCE("wabsdiffw", e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20702 cCE("waddbhusl", e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20703 cCE("waddbhusm", e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20704 cCE("waddhc", e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20705 cCE("waddwc", ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20706 cCE("waddsubhx", ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20707 cCE("wavg4", e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20708 cCE("wavg4r", e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20709 cCE("wmaddsn", ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20710 cCE("wmaddsx", eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20711 cCE("wmaddun", ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20712 cCE("wmaddux", e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20713 cCE("wmerge", e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
20714 cCE("wmiabb", e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20715 cCE("wmiabt", e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20716 cCE("wmiatb", e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20717 cCE("wmiatt", e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20718 cCE("wmiabbn", e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20719 cCE("wmiabtn", e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20720 cCE("wmiatbn", e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20721 cCE("wmiattn", e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20722 cCE("wmiawbb", e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20723 cCE("wmiawbt", e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20724 cCE("wmiawtb", ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20725 cCE("wmiawtt", eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20726 cCE("wmiawbbn", ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20727 cCE("wmiawbtn", ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20728 cCE("wmiawtbn", ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20729 cCE("wmiawttn", ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20730 cCE("wmulsmr", ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20731 cCE("wmulumr", ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20732 cCE("wmulwumr", ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20733 cCE("wmulwsmr", ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20734 cCE("wmulwum", ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20735 cCE("wmulwsm", ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20736 cCE("wmulwl", eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20737 cCE("wqmiabb", e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20738 cCE("wqmiabt", e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20739 cCE("wqmiatb", ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20740 cCE("wqmiatt", eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20741 cCE("wqmiabbn", ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20742 cCE("wqmiabtn", ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20743 cCE("wqmiatbn", ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20744 cCE("wqmiattn", ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20745 cCE("wqmulm", e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20746 cCE("wqmulmr", e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20747 cCE("wqmulwm", ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20748 cCE("wqmulwmr", ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20749 cCE("wsubaddhx", ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20752 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
20754 cCE("cfldrs", c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
20755 cCE("cfldrd", c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
20756 cCE("cfldr32", c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
20757 cCE("cfldr64", c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
20758 cCE("cfstrs", c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
20759 cCE("cfstrd", c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
20760 cCE("cfstr32", c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
20761 cCE("cfstr64", c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
20762 cCE("cfmvsr", e000450
, 2, (RMF
, RR
), rn_rd
),
20763 cCE("cfmvrs", e100450
, 2, (RR
, RMF
), rd_rn
),
20764 cCE("cfmvdlr", e000410
, 2, (RMD
, RR
), rn_rd
),
20765 cCE("cfmvrdl", e100410
, 2, (RR
, RMD
), rd_rn
),
20766 cCE("cfmvdhr", e000430
, 2, (RMD
, RR
), rn_rd
),
20767 cCE("cfmvrdh", e100430
, 2, (RR
, RMD
), rd_rn
),
20768 cCE("cfmv64lr",e000510
, 2, (RMDX
, RR
), rn_rd
),
20769 cCE("cfmvr64l",e100510
, 2, (RR
, RMDX
), rd_rn
),
20770 cCE("cfmv64hr",e000530
, 2, (RMDX
, RR
), rn_rd
),
20771 cCE("cfmvr64h",e100530
, 2, (RR
, RMDX
), rd_rn
),
20772 cCE("cfmval32",e200440
, 2, (RMAX
, RMFX
), rd_rn
),
20773 cCE("cfmv32al",e100440
, 2, (RMFX
, RMAX
), rd_rn
),
20774 cCE("cfmvam32",e200460
, 2, (RMAX
, RMFX
), rd_rn
),
20775 cCE("cfmv32am",e100460
, 2, (RMFX
, RMAX
), rd_rn
),
20776 cCE("cfmvah32",e200480
, 2, (RMAX
, RMFX
), rd_rn
),
20777 cCE("cfmv32ah",e100480
, 2, (RMFX
, RMAX
), rd_rn
),
20778 cCE("cfmva32", e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
20779 cCE("cfmv32a", e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
20780 cCE("cfmva64", e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
20781 cCE("cfmv64a", e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
20782 cCE("cfmvsc32",e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
20783 cCE("cfmv32sc",e1004e0
, 2, (RMDX
, RMDS
), rd
),
20784 cCE("cfcpys", e000400
, 2, (RMF
, RMF
), rd_rn
),
20785 cCE("cfcpyd", e000420
, 2, (RMD
, RMD
), rd_rn
),
20786 cCE("cfcvtsd", e000460
, 2, (RMD
, RMF
), rd_rn
),
20787 cCE("cfcvtds", e000440
, 2, (RMF
, RMD
), rd_rn
),
20788 cCE("cfcvt32s",e000480
, 2, (RMF
, RMFX
), rd_rn
),
20789 cCE("cfcvt32d",e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
20790 cCE("cfcvt64s",e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
20791 cCE("cfcvt64d",e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
20792 cCE("cfcvts32",e100580
, 2, (RMFX
, RMF
), rd_rn
),
20793 cCE("cfcvtd32",e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
20794 cCE("cftruncs32",e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
20795 cCE("cftruncd32",e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
20796 cCE("cfrshl32",e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
20797 cCE("cfrshl64",e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
20798 cCE("cfsh32", e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
20799 cCE("cfsh64", e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
20800 cCE("cfcmps", e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
20801 cCE("cfcmpd", e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
20802 cCE("cfcmp32", e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
20803 cCE("cfcmp64", e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
20804 cCE("cfabss", e300400
, 2, (RMF
, RMF
), rd_rn
),
20805 cCE("cfabsd", e300420
, 2, (RMD
, RMD
), rd_rn
),
20806 cCE("cfnegs", e300440
, 2, (RMF
, RMF
), rd_rn
),
20807 cCE("cfnegd", e300460
, 2, (RMD
, RMD
), rd_rn
),
20808 cCE("cfadds", e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
20809 cCE("cfaddd", e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
20810 cCE("cfsubs", e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
20811 cCE("cfsubd", e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
20812 cCE("cfmuls", e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
20813 cCE("cfmuld", e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
20814 cCE("cfabs32", e300500
, 2, (RMFX
, RMFX
), rd_rn
),
20815 cCE("cfabs64", e300520
, 2, (RMDX
, RMDX
), rd_rn
),
20816 cCE("cfneg32", e300540
, 2, (RMFX
, RMFX
), rd_rn
),
20817 cCE("cfneg64", e300560
, 2, (RMDX
, RMDX
), rd_rn
),
20818 cCE("cfadd32", e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20819 cCE("cfadd64", e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
20820 cCE("cfsub32", e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20821 cCE("cfsub64", e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
20822 cCE("cfmul32", e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20823 cCE("cfmul64", e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
20824 cCE("cfmac32", e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20825 cCE("cfmsc32", e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20826 cCE("cfmadd32",e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
20827 cCE("cfmsub32",e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
20828 cCE("cfmadda32", e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
20829 cCE("cfmsuba32", e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
20832 #define ARM_VARIANT NULL
20833 #undef THUMB_VARIANT
20834 #define THUMB_VARIANT & arm_ext_v8m
20835 TUE("tt", 0, e840f000
, 2, (RRnpc
, RRnpc
), 0, tt
),
20836 TUE("ttt", 0, e840f040
, 2, (RRnpc
, RRnpc
), 0, tt
),
20839 #undef THUMB_VARIANT
20865 /* MD interface: bits in the object file. */
20867 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
20868 for use in the a.out file, and stores them in the array pointed to by buf.
20869 This knows about the endian-ness of the target machine and does
20870 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
20871 2 (short) and 4 (long) Floating numbers are put out as a series of
20872 LITTLENUMS (shorts, here at least). */
20875 md_number_to_chars (char * buf
, valueT val
, int n
)
20877 if (target_big_endian
)
20878 number_to_chars_bigendian (buf
, val
, n
);
20880 number_to_chars_littleendian (buf
, val
, n
);
20884 md_chars_to_number (char * buf
, int n
)
20887 unsigned char * where
= (unsigned char *) buf
;
20889 if (target_big_endian
)
20894 result
|= (*where
++ & 255);
20902 result
|= (where
[n
] & 255);
20909 /* MD interface: Sections. */
20911 /* Calculate the maximum variable size (i.e., excluding fr_fix)
20912 that an rs_machine_dependent frag may reach. */
20915 arm_frag_max_var (fragS
*fragp
)
20917 /* We only use rs_machine_dependent for variable-size Thumb instructions,
20918 which are either THUMB_SIZE (2) or INSN_SIZE (4).
20920 Note that we generate relaxable instructions even for cases that don't
20921 really need it, like an immediate that's a trivial constant. So we're
20922 overestimating the instruction size for some of those cases. Rather
20923 than putting more intelligence here, it would probably be better to
20924 avoid generating a relaxation frag in the first place when it can be
20925 determined up front that a short instruction will suffice. */
20927 gas_assert (fragp
->fr_type
== rs_machine_dependent
);
20931 /* Estimate the size of a frag before relaxing. Assume everything fits in
20935 md_estimate_size_before_relax (fragS
* fragp
,
20936 segT segtype ATTRIBUTE_UNUSED
)
20942 /* Convert a machine dependent frag. */
20945 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
20947 unsigned long insn
;
20948 unsigned long old_op
;
20956 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
20958 old_op
= bfd_get_16(abfd
, buf
);
20959 if (fragp
->fr_symbol
)
20961 exp
.X_op
= O_symbol
;
20962 exp
.X_add_symbol
= fragp
->fr_symbol
;
20966 exp
.X_op
= O_constant
;
20968 exp
.X_add_number
= fragp
->fr_offset
;
20969 opcode
= fragp
->fr_subtype
;
20972 case T_MNEM_ldr_pc
:
20973 case T_MNEM_ldr_pc2
:
20974 case T_MNEM_ldr_sp
:
20975 case T_MNEM_str_sp
:
20982 if (fragp
->fr_var
== 4)
20984 insn
= THUMB_OP32 (opcode
);
20985 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
20987 insn
|= (old_op
& 0x700) << 4;
20991 insn
|= (old_op
& 7) << 12;
20992 insn
|= (old_op
& 0x38) << 13;
20994 insn
|= 0x00000c00;
20995 put_thumb32_insn (buf
, insn
);
20996 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
21000 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
21002 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
21005 if (fragp
->fr_var
== 4)
21007 insn
= THUMB_OP32 (opcode
);
21008 insn
|= (old_op
& 0xf0) << 4;
21009 put_thumb32_insn (buf
, insn
);
21010 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
21014 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
21015 exp
.X_add_number
-= 4;
21023 if (fragp
->fr_var
== 4)
21025 int r0off
= (opcode
== T_MNEM_mov
21026 || opcode
== T_MNEM_movs
) ? 0 : 8;
21027 insn
= THUMB_OP32 (opcode
);
21028 insn
= (insn
& 0xe1ffffff) | 0x10000000;
21029 insn
|= (old_op
& 0x700) << r0off
;
21030 put_thumb32_insn (buf
, insn
);
21031 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
21035 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
21040 if (fragp
->fr_var
== 4)
21042 insn
= THUMB_OP32(opcode
);
21043 put_thumb32_insn (buf
, insn
);
21044 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
21047 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
21051 if (fragp
->fr_var
== 4)
21053 insn
= THUMB_OP32(opcode
);
21054 insn
|= (old_op
& 0xf00) << 14;
21055 put_thumb32_insn (buf
, insn
);
21056 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
21059 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
21062 case T_MNEM_add_sp
:
21063 case T_MNEM_add_pc
:
21064 case T_MNEM_inc_sp
:
21065 case T_MNEM_dec_sp
:
21066 if (fragp
->fr_var
== 4)
21068 /* ??? Choose between add and addw. */
21069 insn
= THUMB_OP32 (opcode
);
21070 insn
|= (old_op
& 0xf0) << 4;
21071 put_thumb32_insn (buf
, insn
);
21072 if (opcode
== T_MNEM_add_pc
)
21073 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
21075 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
21078 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
21086 if (fragp
->fr_var
== 4)
21088 insn
= THUMB_OP32 (opcode
);
21089 insn
|= (old_op
& 0xf0) << 4;
21090 insn
|= (old_op
& 0xf) << 16;
21091 put_thumb32_insn (buf
, insn
);
21092 if (insn
& (1 << 20))
21093 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
21095 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
21098 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
21104 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
21105 (enum bfd_reloc_code_real
) reloc_type
);
21106 fixp
->fx_file
= fragp
->fr_file
;
21107 fixp
->fx_line
= fragp
->fr_line
;
21108 fragp
->fr_fix
+= fragp
->fr_var
;
21110 /* Set whether we use thumb-2 ISA based on final relaxation results. */
21111 if (thumb_mode
&& fragp
->fr_var
== 4 && no_cpu_selected ()
21112 && !ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
))
21113 ARM_MERGE_FEATURE_SETS (arm_arch_used
, thumb_arch_used
, arm_ext_v6t2
);
21116 /* Return the size of a relaxable immediate operand instruction.
21117 SHIFT and SIZE specify the form of the allowable immediate. */
21119 relax_immediate (fragS
*fragp
, int size
, int shift
)
21125 /* ??? Should be able to do better than this. */
21126 if (fragp
->fr_symbol
)
21129 low
= (1 << shift
) - 1;
21130 mask
= (1 << (shift
+ size
)) - (1 << shift
);
21131 offset
= fragp
->fr_offset
;
21132 /* Force misaligned offsets to 32-bit variant. */
21135 if (offset
& ~mask
)
21140 /* Get the address of a symbol during relaxation. */
21142 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
21148 sym
= fragp
->fr_symbol
;
21149 sym_frag
= symbol_get_frag (sym
);
21150 know (S_GET_SEGMENT (sym
) != absolute_section
21151 || sym_frag
== &zero_address_frag
);
21152 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
21154 /* If frag has yet to be reached on this pass, assume it will
21155 move by STRETCH just as we did. If this is not so, it will
21156 be because some frag between grows, and that will force
21160 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
21164 /* Adjust stretch for any alignment frag. Note that if have
21165 been expanding the earlier code, the symbol may be
21166 defined in what appears to be an earlier frag. FIXME:
21167 This doesn't handle the fr_subtype field, which specifies
21168 a maximum number of bytes to skip when doing an
21170 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
21172 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
21175 stretch
= - ((- stretch
)
21176 & ~ ((1 << (int) f
->fr_offset
) - 1));
21178 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
21190 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
21193 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
21198 /* Assume worst case for symbols not known to be in the same section. */
21199 if (fragp
->fr_symbol
== NULL
21200 || !S_IS_DEFINED (fragp
->fr_symbol
)
21201 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
21202 || S_IS_WEAK (fragp
->fr_symbol
))
21205 val
= relaxed_symbol_addr (fragp
, stretch
);
21206 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
21207 addr
= (addr
+ 4) & ~3;
21208 /* Force misaligned targets to 32-bit variant. */
21212 if (val
< 0 || val
> 1020)
21217 /* Return the size of a relaxable add/sub immediate instruction. */
21219 relax_addsub (fragS
*fragp
, asection
*sec
)
21224 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
21225 op
= bfd_get_16(sec
->owner
, buf
);
21226 if ((op
& 0xf) == ((op
>> 4) & 0xf))
21227 return relax_immediate (fragp
, 8, 0);
21229 return relax_immediate (fragp
, 3, 0);
21232 /* Return TRUE iff the definition of symbol S could be pre-empted
21233 (overridden) at link or load time. */
21235 symbol_preemptible (symbolS
*s
)
21237 /* Weak symbols can always be pre-empted. */
21241 /* Non-global symbols cannot be pre-empted. */
21242 if (! S_IS_EXTERNAL (s
))
21246 /* In ELF, a global symbol can be marked protected, or private. In that
21247 case it can't be pre-empted (other definitions in the same link unit
21248 would violate the ODR). */
21249 if (ELF_ST_VISIBILITY (S_GET_OTHER (s
)) > STV_DEFAULT
)
21253 /* Other global symbols might be pre-empted. */
21257 /* Return the size of a relaxable branch instruction. BITS is the
21258 size of the offset field in the narrow instruction. */
21261 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
21267 /* Assume worst case for symbols not known to be in the same section. */
21268 if (!S_IS_DEFINED (fragp
->fr_symbol
)
21269 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
21270 || S_IS_WEAK (fragp
->fr_symbol
))
21274 /* A branch to a function in ARM state will require interworking. */
21275 if (S_IS_DEFINED (fragp
->fr_symbol
)
21276 && ARM_IS_FUNC (fragp
->fr_symbol
))
21280 if (symbol_preemptible (fragp
->fr_symbol
))
21283 val
= relaxed_symbol_addr (fragp
, stretch
);
21284 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
21287 /* Offset is a signed value *2 */
21289 if (val
>= limit
|| val
< -limit
)
21295 /* Relax a machine dependent frag. This returns the amount by which
21296 the current size of the frag should change. */
21299 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
21304 oldsize
= fragp
->fr_var
;
21305 switch (fragp
->fr_subtype
)
21307 case T_MNEM_ldr_pc2
:
21308 newsize
= relax_adr (fragp
, sec
, stretch
);
21310 case T_MNEM_ldr_pc
:
21311 case T_MNEM_ldr_sp
:
21312 case T_MNEM_str_sp
:
21313 newsize
= relax_immediate (fragp
, 8, 2);
21317 newsize
= relax_immediate (fragp
, 5, 2);
21321 newsize
= relax_immediate (fragp
, 5, 1);
21325 newsize
= relax_immediate (fragp
, 5, 0);
21328 newsize
= relax_adr (fragp
, sec
, stretch
);
21334 newsize
= relax_immediate (fragp
, 8, 0);
21337 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
21340 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
21342 case T_MNEM_add_sp
:
21343 case T_MNEM_add_pc
:
21344 newsize
= relax_immediate (fragp
, 8, 2);
21346 case T_MNEM_inc_sp
:
21347 case T_MNEM_dec_sp
:
21348 newsize
= relax_immediate (fragp
, 7, 2);
21354 newsize
= relax_addsub (fragp
, sec
);
21360 fragp
->fr_var
= newsize
;
21361 /* Freeze wide instructions that are at or before the same location as
21362 in the previous pass. This avoids infinite loops.
21363 Don't freeze them unconditionally because targets may be artificially
21364 misaligned by the expansion of preceding frags. */
21365 if (stretch
<= 0 && newsize
> 2)
21367 md_convert_frag (sec
->owner
, sec
, fragp
);
21371 return newsize
- oldsize
;
21374 /* Round up a section size to the appropriate boundary. */
21377 md_section_align (segT segment ATTRIBUTE_UNUSED
,
21380 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
21381 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
21383 /* For a.out, force the section size to be aligned. If we don't do
21384 this, BFD will align it for us, but it will not write out the
21385 final bytes of the section. This may be a bug in BFD, but it is
21386 easier to fix it here since that is how the other a.out targets
21390 align
= bfd_get_section_alignment (stdoutput
, segment
);
21391 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
21398 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
21399 of an rs_align_code fragment. */
21402 arm_handle_align (fragS
* fragP
)
21404 static char const arm_noop
[2][2][4] =
21407 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
21408 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
21411 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
21412 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
21415 static char const thumb_noop
[2][2][2] =
21418 {0xc0, 0x46}, /* LE */
21419 {0x46, 0xc0}, /* BE */
21422 {0x00, 0xbf}, /* LE */
21423 {0xbf, 0x00} /* BE */
21426 static char const wide_thumb_noop
[2][4] =
21427 { /* Wide Thumb-2 */
21428 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
21429 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
21432 unsigned bytes
, fix
, noop_size
;
21435 const char *narrow_noop
= NULL
;
21440 if (fragP
->fr_type
!= rs_align_code
)
21443 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
21444 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
21447 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
21448 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
21450 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
21452 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
21454 if (ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
21455 ? selected_cpu
: arm_arch_none
, arm_ext_v6t2
))
21457 narrow_noop
= thumb_noop
[1][target_big_endian
];
21458 noop
= wide_thumb_noop
[target_big_endian
];
21461 noop
= thumb_noop
[0][target_big_endian
];
21469 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
21470 ? selected_cpu
: arm_arch_none
,
21472 [target_big_endian
];
21479 fragP
->fr_var
= noop_size
;
21481 if (bytes
& (noop_size
- 1))
21483 fix
= bytes
& (noop_size
- 1);
21485 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
21487 memset (p
, 0, fix
);
21494 if (bytes
& noop_size
)
21496 /* Insert a narrow noop. */
21497 memcpy (p
, narrow_noop
, noop_size
);
21499 bytes
-= noop_size
;
21503 /* Use wide noops for the remainder */
21507 while (bytes
>= noop_size
)
21509 memcpy (p
, noop
, noop_size
);
21511 bytes
-= noop_size
;
21515 fragP
->fr_fix
+= fix
;
21518 /* Called from md_do_align. Used to create an alignment
21519 frag in a code section. */
21522 arm_frag_align_code (int n
, int max
)
21526 /* We assume that there will never be a requirement
21527 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
21528 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
21533 _("alignments greater than %d bytes not supported in .text sections."),
21534 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
21535 as_fatal ("%s", err_msg
);
21538 p
= frag_var (rs_align_code
,
21539 MAX_MEM_FOR_RS_ALIGN_CODE
,
21541 (relax_substateT
) max
,
21548 /* Perform target specific initialisation of a frag.
21549 Note - despite the name this initialisation is not done when the frag
21550 is created, but only when its type is assigned. A frag can be created
21551 and used a long time before its type is set, so beware of assuming that
21552 this initialisationis performed first. */
21556 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
21558 /* Record whether this frag is in an ARM or a THUMB area. */
21559 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
21562 #else /* OBJ_ELF is defined. */
21564 arm_init_frag (fragS
* fragP
, int max_chars
)
21566 int frag_thumb_mode
;
21568 /* If the current ARM vs THUMB mode has not already
21569 been recorded into this frag then do so now. */
21570 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
21571 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
21573 frag_thumb_mode
= fragP
->tc_frag_data
.thumb_mode
^ MODE_RECORDED
;
21575 /* Record a mapping symbol for alignment frags. We will delete this
21576 later if the alignment ends up empty. */
21577 switch (fragP
->fr_type
)
21580 case rs_align_test
:
21582 mapping_state_2 (MAP_DATA
, max_chars
);
21584 case rs_align_code
:
21585 mapping_state_2 (frag_thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
21592 /* When we change sections we need to issue a new mapping symbol. */
21595 arm_elf_change_section (void)
21597 /* Link an unlinked unwind index table section to the .text section. */
21598 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
21599 && elf_linked_to_section (now_seg
) == NULL
)
21600 elf_linked_to_section (now_seg
) = text_section
;
21604 arm_elf_section_type (const char * str
, size_t len
)
21606 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
21607 return SHT_ARM_EXIDX
;
21612 /* Code to deal with unwinding tables. */
21614 static void add_unwind_adjustsp (offsetT
);
21616 /* Generate any deferred unwind frame offset. */
21619 flush_pending_unwind (void)
21623 offset
= unwind
.pending_offset
;
21624 unwind
.pending_offset
= 0;
21626 add_unwind_adjustsp (offset
);
21629 /* Add an opcode to this list for this function. Two-byte opcodes should
21630 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
21634 add_unwind_opcode (valueT op
, int length
)
21636 /* Add any deferred stack adjustment. */
21637 if (unwind
.pending_offset
)
21638 flush_pending_unwind ();
21640 unwind
.sp_restored
= 0;
21642 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
21644 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
21645 if (unwind
.opcodes
)
21646 unwind
.opcodes
= (unsigned char *) xrealloc (unwind
.opcodes
,
21647 unwind
.opcode_alloc
);
21649 unwind
.opcodes
= (unsigned char *) xmalloc (unwind
.opcode_alloc
);
21654 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
21656 unwind
.opcode_count
++;
21660 /* Add unwind opcodes to adjust the stack pointer. */
21663 add_unwind_adjustsp (offsetT offset
)
21667 if (offset
> 0x200)
21669 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
21674 /* Long form: 0xb2, uleb128. */
21675 /* This might not fit in a word so add the individual bytes,
21676 remembering the list is built in reverse order. */
21677 o
= (valueT
) ((offset
- 0x204) >> 2);
21679 add_unwind_opcode (0, 1);
21681 /* Calculate the uleb128 encoding of the offset. */
21685 bytes
[n
] = o
& 0x7f;
21691 /* Add the insn. */
21693 add_unwind_opcode (bytes
[n
- 1], 1);
21694 add_unwind_opcode (0xb2, 1);
21696 else if (offset
> 0x100)
21698 /* Two short opcodes. */
21699 add_unwind_opcode (0x3f, 1);
21700 op
= (offset
- 0x104) >> 2;
21701 add_unwind_opcode (op
, 1);
21703 else if (offset
> 0)
21705 /* Short opcode. */
21706 op
= (offset
- 4) >> 2;
21707 add_unwind_opcode (op
, 1);
21709 else if (offset
< 0)
21712 while (offset
> 0x100)
21714 add_unwind_opcode (0x7f, 1);
21717 op
= ((offset
- 4) >> 2) | 0x40;
21718 add_unwind_opcode (op
, 1);
21722 /* Finish the list of unwind opcodes for this function. */
21724 finish_unwind_opcodes (void)
21728 if (unwind
.fp_used
)
21730 /* Adjust sp as necessary. */
21731 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
21732 flush_pending_unwind ();
21734 /* After restoring sp from the frame pointer. */
21735 op
= 0x90 | unwind
.fp_reg
;
21736 add_unwind_opcode (op
, 1);
21739 flush_pending_unwind ();
21743 /* Start an exception table entry. If idx is nonzero this is an index table
21747 start_unwind_section (const segT text_seg
, int idx
)
21749 const char * text_name
;
21750 const char * prefix
;
21751 const char * prefix_once
;
21752 const char * group_name
;
21756 size_t sec_name_len
;
21763 prefix
= ELF_STRING_ARM_unwind
;
21764 prefix_once
= ELF_STRING_ARM_unwind_once
;
21765 type
= SHT_ARM_EXIDX
;
21769 prefix
= ELF_STRING_ARM_unwind_info
;
21770 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
21771 type
= SHT_PROGBITS
;
21774 text_name
= segment_name (text_seg
);
21775 if (streq (text_name
, ".text"))
21778 if (strncmp (text_name
, ".gnu.linkonce.t.",
21779 strlen (".gnu.linkonce.t.")) == 0)
21781 prefix
= prefix_once
;
21782 text_name
+= strlen (".gnu.linkonce.t.");
21785 prefix_len
= strlen (prefix
);
21786 text_len
= strlen (text_name
);
21787 sec_name_len
= prefix_len
+ text_len
;
21788 sec_name
= (char *) xmalloc (sec_name_len
+ 1);
21789 memcpy (sec_name
, prefix
, prefix_len
);
21790 memcpy (sec_name
+ prefix_len
, text_name
, text_len
);
21791 sec_name
[prefix_len
+ text_len
] = '\0';
21797 /* Handle COMDAT group. */
21798 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
21800 group_name
= elf_group_name (text_seg
);
21801 if (group_name
== NULL
)
21803 as_bad (_("Group section `%s' has no group signature"),
21804 segment_name (text_seg
));
21805 ignore_rest_of_line ();
21808 flags
|= SHF_GROUP
;
21812 obj_elf_change_section (sec_name
, type
, flags
, 0, group_name
, linkonce
, 0);
21814 /* Set the section link for index tables. */
21816 elf_linked_to_section (now_seg
) = text_seg
;
21820 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
21821 personality routine data. Returns zero, or the index table value for
21822 an inline entry. */
21825 create_unwind_entry (int have_data
)
21830 /* The current word of data. */
21832 /* The number of bytes left in this word. */
21835 finish_unwind_opcodes ();
21837 /* Remember the current text section. */
21838 unwind
.saved_seg
= now_seg
;
21839 unwind
.saved_subseg
= now_subseg
;
21841 start_unwind_section (now_seg
, 0);
21843 if (unwind
.personality_routine
== NULL
)
21845 if (unwind
.personality_index
== -2)
21848 as_bad (_("handlerdata in cantunwind frame"));
21849 return 1; /* EXIDX_CANTUNWIND. */
21852 /* Use a default personality routine if none is specified. */
21853 if (unwind
.personality_index
== -1)
21855 if (unwind
.opcode_count
> 3)
21856 unwind
.personality_index
= 1;
21858 unwind
.personality_index
= 0;
21861 /* Space for the personality routine entry. */
21862 if (unwind
.personality_index
== 0)
21864 if (unwind
.opcode_count
> 3)
21865 as_bad (_("too many unwind opcodes for personality routine 0"));
21869 /* All the data is inline in the index table. */
21872 while (unwind
.opcode_count
> 0)
21874 unwind
.opcode_count
--;
21875 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
21879 /* Pad with "finish" opcodes. */
21881 data
= (data
<< 8) | 0xb0;
21888 /* We get two opcodes "free" in the first word. */
21889 size
= unwind
.opcode_count
- 2;
21893 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
21894 if (unwind
.personality_index
!= -1)
21896 as_bad (_("attempt to recreate an unwind entry"));
21900 /* An extra byte is required for the opcode count. */
21901 size
= unwind
.opcode_count
+ 1;
21904 size
= (size
+ 3) >> 2;
21906 as_bad (_("too many unwind opcodes"));
21908 frag_align (2, 0, 0);
21909 record_alignment (now_seg
, 2);
21910 unwind
.table_entry
= expr_build_dot ();
21912 /* Allocate the table entry. */
21913 ptr
= frag_more ((size
<< 2) + 4);
21914 /* PR 13449: Zero the table entries in case some of them are not used. */
21915 memset (ptr
, 0, (size
<< 2) + 4);
21916 where
= frag_now_fix () - ((size
<< 2) + 4);
21918 switch (unwind
.personality_index
)
21921 /* ??? Should this be a PLT generating relocation? */
21922 /* Custom personality routine. */
21923 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
21924 BFD_RELOC_ARM_PREL31
);
21929 /* Set the first byte to the number of additional words. */
21930 data
= size
> 0 ? size
- 1 : 0;
21934 /* ABI defined personality routines. */
21936 /* Three opcodes bytes are packed into the first word. */
21943 /* The size and first two opcode bytes go in the first word. */
21944 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
21949 /* Should never happen. */
21953 /* Pack the opcodes into words (MSB first), reversing the list at the same
21955 while (unwind
.opcode_count
> 0)
21959 md_number_to_chars (ptr
, data
, 4);
21964 unwind
.opcode_count
--;
21966 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
21969 /* Finish off the last word. */
21972 /* Pad with "finish" opcodes. */
21974 data
= (data
<< 8) | 0xb0;
21976 md_number_to_chars (ptr
, data
, 4);
21981 /* Add an empty descriptor if there is no user-specified data. */
21982 ptr
= frag_more (4);
21983 md_number_to_chars (ptr
, 0, 4);
21990 /* Initialize the DWARF-2 unwind information for this procedure. */
21993 tc_arm_frame_initial_instructions (void)
21995 cfi_add_CFA_def_cfa (REG_SP
, 0);
21997 #endif /* OBJ_ELF */
21999 /* Convert REGNAME to a DWARF-2 register number. */
22002 tc_arm_regname_to_dw2regnum (char *regname
)
22004 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
22008 /* PR 16694: Allow VFP registers as well. */
22009 reg
= arm_reg_parse (®name
, REG_TYPE_VFS
);
22013 reg
= arm_reg_parse (®name
, REG_TYPE_VFD
);
22022 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
22026 exp
.X_op
= O_secrel
;
22027 exp
.X_add_symbol
= symbol
;
22028 exp
.X_add_number
= 0;
22029 emit_expr (&exp
, size
);
22033 /* MD interface: Symbol and relocation handling. */
22035 /* Return the address within the segment that a PC-relative fixup is
22036 relative to. For ARM, PC-relative fixups applied to instructions
22037 are generally relative to the location of the fixup plus 8 bytes.
22038 Thumb branches are offset by 4, and Thumb loads relative to PC
22039 require special handling. */
22042 md_pcrel_from_section (fixS
* fixP
, segT seg
)
22044 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22046 /* If this is pc-relative and we are going to emit a relocation
22047 then we just want to put out any pipeline compensation that the linker
22048 will need. Otherwise we want to use the calculated base.
22049 For WinCE we skip the bias for externals as well, since this
22050 is how the MS ARM-CE assembler behaves and we want to be compatible. */
22052 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22053 || (arm_force_relocation (fixP
)
22055 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
22061 switch (fixP
->fx_r_type
)
22063 /* PC relative addressing on the Thumb is slightly odd as the
22064 bottom two bits of the PC are forced to zero for the
22065 calculation. This happens *after* application of the
22066 pipeline offset. However, Thumb adrl already adjusts for
22067 this, so we need not do it again. */
22068 case BFD_RELOC_ARM_THUMB_ADD
:
22071 case BFD_RELOC_ARM_THUMB_OFFSET
:
22072 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
22073 case BFD_RELOC_ARM_T32_ADD_PC12
:
22074 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
22075 return (base
+ 4) & ~3;
22077 /* Thumb branches are simply offset by +4. */
22078 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
22079 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
22080 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
22081 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
22082 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
22085 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
22087 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22088 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22089 && ARM_IS_FUNC (fixP
->fx_addsy
)
22090 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22091 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22094 /* BLX is like branches above, but forces the low two bits of PC to
22096 case BFD_RELOC_THUMB_PCREL_BLX
:
22098 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22099 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22100 && THUMB_IS_FUNC (fixP
->fx_addsy
)
22101 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22102 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22103 return (base
+ 4) & ~3;
22105 /* ARM mode branches are offset by +8. However, the Windows CE
22106 loader expects the relocation not to take this into account. */
22107 case BFD_RELOC_ARM_PCREL_BLX
:
22109 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22110 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22111 && ARM_IS_FUNC (fixP
->fx_addsy
)
22112 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22113 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22116 case BFD_RELOC_ARM_PCREL_CALL
:
22118 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22119 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22120 && THUMB_IS_FUNC (fixP
->fx_addsy
)
22121 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22122 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22125 case BFD_RELOC_ARM_PCREL_BRANCH
:
22126 case BFD_RELOC_ARM_PCREL_JUMP
:
22127 case BFD_RELOC_ARM_PLT32
:
22129 /* When handling fixups immediately, because we have already
22130 discovered the value of a symbol, or the address of the frag involved
22131 we must account for the offset by +8, as the OS loader will never see the reloc.
22132 see fixup_segment() in write.c
22133 The S_IS_EXTERNAL test handles the case of global symbols.
22134 Those need the calculated base, not just the pipe compensation the linker will need. */
22136 && fixP
->fx_addsy
!= NULL
22137 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22138 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
22146 /* ARM mode loads relative to PC are also offset by +8. Unlike
22147 branches, the Windows CE loader *does* expect the relocation
22148 to take this into account. */
22149 case BFD_RELOC_ARM_OFFSET_IMM
:
22150 case BFD_RELOC_ARM_OFFSET_IMM8
:
22151 case BFD_RELOC_ARM_HWLITERAL
:
22152 case BFD_RELOC_ARM_LITERAL
:
22153 case BFD_RELOC_ARM_CP_OFF_IMM
:
22157 /* Other PC-relative relocations are un-offset. */
22163 static bfd_boolean flag_warn_syms
= TRUE
;
22166 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED
, char * name
)
22168 /* PR 18347 - Warn if the user attempts to create a symbol with the same
22169 name as an ARM instruction. Whilst strictly speaking it is allowed, it
22170 does mean that the resulting code might be very confusing to the reader.
22171 Also this warning can be triggered if the user omits an operand before
22172 an immediate address, eg:
22176 GAS treats this as an assignment of the value of the symbol foo to a
22177 symbol LDR, and so (without this code) it will not issue any kind of
22178 warning or error message.
22180 Note - ARM instructions are case-insensitive but the strings in the hash
22181 table are all stored in lower case, so we must first ensure that name is
22183 if (flag_warn_syms
&& arm_ops_hsh
)
22185 char * nbuf
= strdup (name
);
22188 for (p
= nbuf
; *p
; p
++)
22190 if (hash_find (arm_ops_hsh
, nbuf
) != NULL
)
22192 static struct hash_control
* already_warned
= NULL
;
22194 if (already_warned
== NULL
)
22195 already_warned
= hash_new ();
22196 /* Only warn about the symbol once. To keep the code
22197 simple we let hash_insert do the lookup for us. */
22198 if (hash_insert (already_warned
, name
, NULL
) == NULL
)
22199 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name
);
22208 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
22209 Otherwise we have no need to default values of symbols. */
22212 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
22215 if (name
[0] == '_' && name
[1] == 'G'
22216 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
22220 if (symbol_find (name
))
22221 as_bad (_("GOT already in the symbol table"));
22223 GOT_symbol
= symbol_new (name
, undefined_section
,
22224 (valueT
) 0, & zero_address_frag
);
22234 /* Subroutine of md_apply_fix. Check to see if an immediate can be
22235 computed as two separate immediate values, added together. We
22236 already know that this value cannot be computed by just one ARM
22239 static unsigned int
22240 validate_immediate_twopart (unsigned int val
,
22241 unsigned int * highpart
)
22246 for (i
= 0; i
< 32; i
+= 2)
22247 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
22253 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
22255 else if (a
& 0xff0000)
22257 if (a
& 0xff000000)
22259 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
22263 gas_assert (a
& 0xff000000);
22264 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
22267 return (a
& 0xff) | (i
<< 7);
22274 validate_offset_imm (unsigned int val
, int hwse
)
22276 if ((hwse
&& val
> 255) || val
> 4095)
22281 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22282 negative immediate constant by altering the instruction. A bit of
22287 by inverting the second operand, and
22290 by negating the second operand. */
22293 negate_data_op (unsigned long * instruction
,
22294 unsigned long value
)
22297 unsigned long negated
, inverted
;
22299 negated
= encode_arm_immediate (-value
);
22300 inverted
= encode_arm_immediate (~value
);
22302 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
22305 /* First negates. */
22306 case OPCODE_SUB
: /* ADD <-> SUB */
22307 new_inst
= OPCODE_ADD
;
22312 new_inst
= OPCODE_SUB
;
22316 case OPCODE_CMP
: /* CMP <-> CMN */
22317 new_inst
= OPCODE_CMN
;
22322 new_inst
= OPCODE_CMP
;
22326 /* Now Inverted ops. */
22327 case OPCODE_MOV
: /* MOV <-> MVN */
22328 new_inst
= OPCODE_MVN
;
22333 new_inst
= OPCODE_MOV
;
22337 case OPCODE_AND
: /* AND <-> BIC */
22338 new_inst
= OPCODE_BIC
;
22343 new_inst
= OPCODE_AND
;
22347 case OPCODE_ADC
: /* ADC <-> SBC */
22348 new_inst
= OPCODE_SBC
;
22353 new_inst
= OPCODE_ADC
;
22357 /* We cannot do anything. */
22362 if (value
== (unsigned) FAIL
)
22365 *instruction
&= OPCODE_MASK
;
22366 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
22370 /* Like negate_data_op, but for Thumb-2. */
22372 static unsigned int
22373 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
22377 unsigned int negated
, inverted
;
22379 negated
= encode_thumb32_immediate (-value
);
22380 inverted
= encode_thumb32_immediate (~value
);
22382 rd
= (*instruction
>> 8) & 0xf;
22383 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
22386 /* ADD <-> SUB. Includes CMP <-> CMN. */
22387 case T2_OPCODE_SUB
:
22388 new_inst
= T2_OPCODE_ADD
;
22392 case T2_OPCODE_ADD
:
22393 new_inst
= T2_OPCODE_SUB
;
22397 /* ORR <-> ORN. Includes MOV <-> MVN. */
22398 case T2_OPCODE_ORR
:
22399 new_inst
= T2_OPCODE_ORN
;
22403 case T2_OPCODE_ORN
:
22404 new_inst
= T2_OPCODE_ORR
;
22408 /* AND <-> BIC. TST has no inverted equivalent. */
22409 case T2_OPCODE_AND
:
22410 new_inst
= T2_OPCODE_BIC
;
22417 case T2_OPCODE_BIC
:
22418 new_inst
= T2_OPCODE_AND
;
22423 case T2_OPCODE_ADC
:
22424 new_inst
= T2_OPCODE_SBC
;
22428 case T2_OPCODE_SBC
:
22429 new_inst
= T2_OPCODE_ADC
;
22433 /* We cannot do anything. */
22438 if (value
== (unsigned int)FAIL
)
22441 *instruction
&= T2_OPCODE_MASK
;
22442 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
22446 /* Read a 32-bit thumb instruction from buf. */
22447 static unsigned long
22448 get_thumb32_insn (char * buf
)
22450 unsigned long insn
;
22451 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
22452 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22458 /* We usually want to set the low bit on the address of thumb function
22459 symbols. In particular .word foo - . should have the low bit set.
22460 Generic code tries to fold the difference of two symbols to
22461 a constant. Prevent this and force a relocation when the first symbols
22462 is a thumb function. */
22465 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
22467 if (op
== O_subtract
22468 && l
->X_op
== O_symbol
22469 && r
->X_op
== O_symbol
22470 && THUMB_IS_FUNC (l
->X_add_symbol
))
22472 l
->X_op
= O_subtract
;
22473 l
->X_op_symbol
= r
->X_add_symbol
;
22474 l
->X_add_number
-= r
->X_add_number
;
22478 /* Process as normal. */
22482 /* Encode Thumb2 unconditional branches and calls. The encoding
22483 for the 2 are identical for the immediate values. */
22486 encode_thumb2_b_bl_offset (char * buf
, offsetT value
)
22488 #define T2I1I2MASK ((1 << 13) | (1 << 11))
22491 addressT S
, I1
, I2
, lo
, hi
;
22493 S
= (value
>> 24) & 0x01;
22494 I1
= (value
>> 23) & 0x01;
22495 I2
= (value
>> 22) & 0x01;
22496 hi
= (value
>> 12) & 0x3ff;
22497 lo
= (value
>> 1) & 0x7ff;
22498 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22499 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22500 newval
|= (S
<< 10) | hi
;
22501 newval2
&= ~T2I1I2MASK
;
22502 newval2
|= (((I1
^ S
) << 13) | ((I2
^ S
) << 11) | lo
) ^ T2I1I2MASK
;
22503 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22504 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
22508 md_apply_fix (fixS
* fixP
,
22512 offsetT value
= * valP
;
22514 unsigned int newimm
;
22515 unsigned long temp
;
22517 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
22519 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
22521 /* Note whether this will delete the relocation. */
22523 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
22526 /* On a 64-bit host, silently truncate 'value' to 32 bits for
22527 consistency with the behaviour on 32-bit hosts. Remember value
22529 value
&= 0xffffffff;
22530 value
^= 0x80000000;
22531 value
-= 0x80000000;
22534 fixP
->fx_addnumber
= value
;
22536 /* Same treatment for fixP->fx_offset. */
22537 fixP
->fx_offset
&= 0xffffffff;
22538 fixP
->fx_offset
^= 0x80000000;
22539 fixP
->fx_offset
-= 0x80000000;
22541 switch (fixP
->fx_r_type
)
22543 case BFD_RELOC_NONE
:
22544 /* This will need to go in the object file. */
22548 case BFD_RELOC_ARM_IMMEDIATE
:
22549 /* We claim that this fixup has been processed here,
22550 even if in fact we generate an error because we do
22551 not have a reloc for it, so tc_gen_reloc will reject it. */
22554 if (fixP
->fx_addsy
)
22556 const char *msg
= 0;
22558 if (! S_IS_DEFINED (fixP
->fx_addsy
))
22559 msg
= _("undefined symbol %s used as an immediate value");
22560 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22561 msg
= _("symbol %s is in a different section");
22562 else if (S_IS_WEAK (fixP
->fx_addsy
))
22563 msg
= _("symbol %s is weak and may be overridden later");
22567 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22568 msg
, S_GET_NAME (fixP
->fx_addsy
));
22573 temp
= md_chars_to_number (buf
, INSN_SIZE
);
22575 /* If the offset is negative, we should use encoding A2 for ADR. */
22576 if ((temp
& 0xfff0000) == 0x28f0000 && value
< 0)
22577 newimm
= negate_data_op (&temp
, value
);
22580 newimm
= encode_arm_immediate (value
);
22582 /* If the instruction will fail, see if we can fix things up by
22583 changing the opcode. */
22584 if (newimm
== (unsigned int) FAIL
)
22585 newimm
= negate_data_op (&temp
, value
);
22588 if (newimm
== (unsigned int) FAIL
)
22590 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22591 _("invalid constant (%lx) after fixup"),
22592 (unsigned long) value
);
22596 newimm
|= (temp
& 0xfffff000);
22597 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
22600 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
22602 unsigned int highpart
= 0;
22603 unsigned int newinsn
= 0xe1a00000; /* nop. */
22605 if (fixP
->fx_addsy
)
22607 const char *msg
= 0;
22609 if (! S_IS_DEFINED (fixP
->fx_addsy
))
22610 msg
= _("undefined symbol %s used as an immediate value");
22611 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22612 msg
= _("symbol %s is in a different section");
22613 else if (S_IS_WEAK (fixP
->fx_addsy
))
22614 msg
= _("symbol %s is weak and may be overridden later");
22618 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22619 msg
, S_GET_NAME (fixP
->fx_addsy
));
22624 newimm
= encode_arm_immediate (value
);
22625 temp
= md_chars_to_number (buf
, INSN_SIZE
);
22627 /* If the instruction will fail, see if we can fix things up by
22628 changing the opcode. */
22629 if (newimm
== (unsigned int) FAIL
22630 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
22632 /* No ? OK - try using two ADD instructions to generate
22634 newimm
= validate_immediate_twopart (value
, & highpart
);
22636 /* Yes - then make sure that the second instruction is
22638 if (newimm
!= (unsigned int) FAIL
)
22640 /* Still No ? Try using a negated value. */
22641 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
22642 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
22643 /* Otherwise - give up. */
22646 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22647 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
22652 /* Replace the first operand in the 2nd instruction (which
22653 is the PC) with the destination register. We have
22654 already added in the PC in the first instruction and we
22655 do not want to do it again. */
22656 newinsn
&= ~ 0xf0000;
22657 newinsn
|= ((newinsn
& 0x0f000) << 4);
22660 newimm
|= (temp
& 0xfffff000);
22661 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
22663 highpart
|= (newinsn
& 0xfffff000);
22664 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
22668 case BFD_RELOC_ARM_OFFSET_IMM
:
22669 if (!fixP
->fx_done
&& seg
->use_rela_p
)
22672 case BFD_RELOC_ARM_LITERAL
:
22678 if (validate_offset_imm (value
, 0) == FAIL
)
22680 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
22681 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22682 _("invalid literal constant: pool needs to be closer"));
22684 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22685 _("bad immediate value for offset (%ld)"),
22690 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22692 newval
&= 0xfffff000;
22695 newval
&= 0xff7ff000;
22696 newval
|= value
| (sign
? INDEX_UP
: 0);
22698 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22701 case BFD_RELOC_ARM_OFFSET_IMM8
:
22702 case BFD_RELOC_ARM_HWLITERAL
:
22708 if (validate_offset_imm (value
, 1) == FAIL
)
22710 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
22711 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22712 _("invalid literal constant: pool needs to be closer"));
22714 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22715 _("bad immediate value for 8-bit offset (%ld)"),
22720 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22722 newval
&= 0xfffff0f0;
22725 newval
&= 0xff7ff0f0;
22726 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
22728 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22731 case BFD_RELOC_ARM_T32_OFFSET_U8
:
22732 if (value
< 0 || value
> 1020 || value
% 4 != 0)
22733 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22734 _("bad immediate value for offset (%ld)"), (long) value
);
22737 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
22739 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
22742 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
22743 /* This is a complicated relocation used for all varieties of Thumb32
22744 load/store instruction with immediate offset:
22746 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22747 *4, optional writeback(W)
22748 (doubleword load/store)
22750 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22751 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22752 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22753 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22754 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22756 Uppercase letters indicate bits that are already encoded at
22757 this point. Lowercase letters are our problem. For the
22758 second block of instructions, the secondary opcode nybble
22759 (bits 8..11) is present, and bit 23 is zero, even if this is
22760 a PC-relative operation. */
22761 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22763 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
22765 if ((newval
& 0xf0000000) == 0xe0000000)
22767 /* Doubleword load/store: 8-bit offset, scaled by 4. */
22769 newval
|= (1 << 23);
22772 if (value
% 4 != 0)
22774 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22775 _("offset not a multiple of 4"));
22781 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22782 _("offset out of range"));
22787 else if ((newval
& 0x000f0000) == 0x000f0000)
22789 /* PC-relative, 12-bit offset. */
22791 newval
|= (1 << 23);
22796 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22797 _("offset out of range"));
22802 else if ((newval
& 0x00000100) == 0x00000100)
22804 /* Writeback: 8-bit, +/- offset. */
22806 newval
|= (1 << 9);
22811 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22812 _("offset out of range"));
22817 else if ((newval
& 0x00000f00) == 0x00000e00)
22819 /* T-instruction: positive 8-bit offset. */
22820 if (value
< 0 || value
> 0xff)
22822 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22823 _("offset out of range"));
22831 /* Positive 12-bit or negative 8-bit offset. */
22835 newval
|= (1 << 23);
22845 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22846 _("offset out of range"));
22853 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
22854 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
22857 case BFD_RELOC_ARM_SHIFT_IMM
:
22858 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22859 if (((unsigned long) value
) > 32
22861 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
22863 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22864 _("shift expression is too large"));
22869 /* Shifts of zero must be done as lsl. */
22871 else if (value
== 32)
22873 newval
&= 0xfffff07f;
22874 newval
|= (value
& 0x1f) << 7;
22875 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22878 case BFD_RELOC_ARM_T32_IMMEDIATE
:
22879 case BFD_RELOC_ARM_T32_ADD_IMM
:
22880 case BFD_RELOC_ARM_T32_IMM12
:
22881 case BFD_RELOC_ARM_T32_ADD_PC12
:
22882 /* We claim that this fixup has been processed here,
22883 even if in fact we generate an error because we do
22884 not have a reloc for it, so tc_gen_reloc will reject it. */
22888 && ! S_IS_DEFINED (fixP
->fx_addsy
))
22890 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22891 _("undefined symbol %s used as an immediate value"),
22892 S_GET_NAME (fixP
->fx_addsy
));
22896 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22898 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
22901 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
22902 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
22904 newimm
= encode_thumb32_immediate (value
);
22905 if (newimm
== (unsigned int) FAIL
)
22906 newimm
= thumb32_negate_data_op (&newval
, value
);
22908 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
22909 && newimm
== (unsigned int) FAIL
)
22911 /* Turn add/sum into addw/subw. */
22912 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
22913 newval
= (newval
& 0xfeffffff) | 0x02000000;
22914 /* No flat 12-bit imm encoding for addsw/subsw. */
22915 if ((newval
& 0x00100000) == 0)
22917 /* 12 bit immediate for addw/subw. */
22921 newval
^= 0x00a00000;
22924 newimm
= (unsigned int) FAIL
;
22930 if (newimm
== (unsigned int)FAIL
)
22932 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22933 _("invalid constant (%lx) after fixup"),
22934 (unsigned long) value
);
22938 newval
|= (newimm
& 0x800) << 15;
22939 newval
|= (newimm
& 0x700) << 4;
22940 newval
|= (newimm
& 0x0ff);
22942 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
22943 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
22946 case BFD_RELOC_ARM_SMC
:
22947 if (((unsigned long) value
) > 0xffff)
22948 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22949 _("invalid smc expression"));
22950 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22951 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
22952 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22955 case BFD_RELOC_ARM_HVC
:
22956 if (((unsigned long) value
) > 0xffff)
22957 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22958 _("invalid hvc expression"));
22959 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22960 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
22961 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22964 case BFD_RELOC_ARM_SWI
:
22965 if (fixP
->tc_fix_data
!= 0)
22967 if (((unsigned long) value
) > 0xff)
22968 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22969 _("invalid swi expression"));
22970 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22972 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22976 if (((unsigned long) value
) > 0x00ffffff)
22977 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22978 _("invalid swi expression"));
22979 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22981 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22985 case BFD_RELOC_ARM_MULTI
:
22986 if (((unsigned long) value
) > 0xffff)
22987 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22988 _("invalid expression in load/store multiple"));
22989 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
22990 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22994 case BFD_RELOC_ARM_PCREL_CALL
:
22996 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
22998 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22999 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23000 && THUMB_IS_FUNC (fixP
->fx_addsy
))
23001 /* Flip the bl to blx. This is a simple flip
23002 bit here because we generate PCREL_CALL for
23003 unconditional bls. */
23005 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23006 newval
= newval
| 0x10000000;
23007 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23013 goto arm_branch_common
;
23015 case BFD_RELOC_ARM_PCREL_JUMP
:
23016 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
23018 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23019 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23020 && THUMB_IS_FUNC (fixP
->fx_addsy
))
23022 /* This would map to a bl<cond>, b<cond>,
23023 b<always> to a Thumb function. We
23024 need to force a relocation for this particular
23026 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23030 case BFD_RELOC_ARM_PLT32
:
23032 case BFD_RELOC_ARM_PCREL_BRANCH
:
23034 goto arm_branch_common
;
23036 case BFD_RELOC_ARM_PCREL_BLX
:
23039 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
23041 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23042 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23043 && ARM_IS_FUNC (fixP
->fx_addsy
))
23045 /* Flip the blx to a bl and warn. */
23046 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
23047 newval
= 0xeb000000;
23048 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
23049 _("blx to '%s' an ARM ISA state function changed to bl"),
23051 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23057 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
23058 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
23062 /* We are going to store value (shifted right by two) in the
23063 instruction, in a 24 bit, signed field. Bits 26 through 32 either
23064 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
23065 also be be clear. */
23067 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23068 _("misaligned branch destination"));
23069 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
23070 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
23071 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23073 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23075 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23076 newval
|= (value
>> 2) & 0x00ffffff;
23077 /* Set the H bit on BLX instructions. */
23081 newval
|= 0x01000000;
23083 newval
&= ~0x01000000;
23085 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23089 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
23090 /* CBZ can only branch forward. */
23092 /* Attempts to use CBZ to branch to the next instruction
23093 (which, strictly speaking, are prohibited) will be turned into
23096 FIXME: It may be better to remove the instruction completely and
23097 perform relaxation. */
23100 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23101 newval
= 0xbf00; /* NOP encoding T1 */
23102 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23107 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23109 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23111 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23112 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
23113 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23118 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
23119 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
23120 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23122 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23124 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23125 newval
|= (value
& 0x1ff) >> 1;
23126 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23130 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
23131 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
23132 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23134 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23136 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23137 newval
|= (value
& 0xfff) >> 1;
23138 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23142 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
23144 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23145 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23146 && ARM_IS_FUNC (fixP
->fx_addsy
)
23147 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23149 /* Force a relocation for a branch 20 bits wide. */
23152 if ((value
& ~0x1fffff) && ((value
& ~0x0fffff) != ~0x0fffff))
23153 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23154 _("conditional branch out of range"));
23156 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23159 addressT S
, J1
, J2
, lo
, hi
;
23161 S
= (value
& 0x00100000) >> 20;
23162 J2
= (value
& 0x00080000) >> 19;
23163 J1
= (value
& 0x00040000) >> 18;
23164 hi
= (value
& 0x0003f000) >> 12;
23165 lo
= (value
& 0x00000ffe) >> 1;
23167 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23168 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23169 newval
|= (S
<< 10) | hi
;
23170 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
23171 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23172 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
23176 case BFD_RELOC_THUMB_PCREL_BLX
:
23177 /* If there is a blx from a thumb state function to
23178 another thumb function flip this to a bl and warn
23182 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23183 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23184 && THUMB_IS_FUNC (fixP
->fx_addsy
))
23186 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
23187 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
23188 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
23190 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23191 newval
= newval
| 0x1000;
23192 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
23193 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
23198 goto thumb_bl_common
;
23200 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
23201 /* A bl from Thumb state ISA to an internal ARM state function
23202 is converted to a blx. */
23204 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23205 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23206 && ARM_IS_FUNC (fixP
->fx_addsy
)
23207 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23209 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23210 newval
= newval
& ~0x1000;
23211 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
23212 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
23218 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
23219 /* For a BLX instruction, make sure that the relocation is rounded up
23220 to a word boundary. This follows the semantics of the instruction
23221 which specifies that bit 1 of the target address will come from bit
23222 1 of the base address. */
23223 value
= (value
+ 3) & ~ 3;
23226 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
23227 && fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
23228 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
23231 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
23233 if (!(ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)))
23234 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23235 else if ((value
& ~0x1ffffff)
23236 && ((value
& ~0x1ffffff) != ~0x1ffffff))
23237 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23238 _("Thumb2 branch out of range"));
23241 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23242 encode_thumb2_b_bl_offset (buf
, value
);
23246 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
23247 if ((value
& ~0x0ffffff) && ((value
& ~0x0ffffff) != ~0x0ffffff))
23248 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23250 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23251 encode_thumb2_b_bl_offset (buf
, value
);
23256 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23261 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23262 md_number_to_chars (buf
, value
, 2);
23266 case BFD_RELOC_ARM_TLS_CALL
:
23267 case BFD_RELOC_ARM_THM_TLS_CALL
:
23268 case BFD_RELOC_ARM_TLS_DESCSEQ
:
23269 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
23270 case BFD_RELOC_ARM_TLS_GOTDESC
:
23271 case BFD_RELOC_ARM_TLS_GD32
:
23272 case BFD_RELOC_ARM_TLS_LE32
:
23273 case BFD_RELOC_ARM_TLS_IE32
:
23274 case BFD_RELOC_ARM_TLS_LDM32
:
23275 case BFD_RELOC_ARM_TLS_LDO32
:
23276 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
23279 case BFD_RELOC_ARM_GOT32
:
23280 case BFD_RELOC_ARM_GOTOFF
:
23283 case BFD_RELOC_ARM_GOT_PREL
:
23284 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23285 md_number_to_chars (buf
, value
, 4);
23288 case BFD_RELOC_ARM_TARGET2
:
23289 /* TARGET2 is not partial-inplace, so we need to write the
23290 addend here for REL targets, because it won't be written out
23291 during reloc processing later. */
23292 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23293 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
23297 case BFD_RELOC_RVA
:
23299 case BFD_RELOC_ARM_TARGET1
:
23300 case BFD_RELOC_ARM_ROSEGREL32
:
23301 case BFD_RELOC_ARM_SBREL32
:
23302 case BFD_RELOC_32_PCREL
:
23304 case BFD_RELOC_32_SECREL
:
23306 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23308 /* For WinCE we only do this for pcrel fixups. */
23309 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
23311 md_number_to_chars (buf
, value
, 4);
23315 case BFD_RELOC_ARM_PREL31
:
23316 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23318 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
23319 if ((value
^ (value
>> 1)) & 0x40000000)
23321 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23322 _("rel31 relocation overflow"));
23324 newval
|= value
& 0x7fffffff;
23325 md_number_to_chars (buf
, newval
, 4);
23330 case BFD_RELOC_ARM_CP_OFF_IMM
:
23331 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
23332 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
)
23333 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23335 newval
= get_thumb32_insn (buf
);
23336 if ((newval
& 0x0f200f00) == 0x0d000900)
23338 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
23339 has permitted values that are multiples of 2, in the range 0
23341 if (value
< -510 || value
> 510 || (value
& 1))
23342 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23343 _("co-processor offset out of range"));
23345 else if (value
< -1023 || value
> 1023 || (value
& 3))
23346 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23347 _("co-processor offset out of range"));
23352 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
23353 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
23354 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23356 newval
= get_thumb32_insn (buf
);
23358 newval
&= 0xffffff00;
23361 newval
&= 0xff7fff00;
23362 if ((newval
& 0x0f200f00) == 0x0d000900)
23364 /* This is a fp16 vstr/vldr.
23366 It requires the immediate offset in the instruction is shifted
23367 left by 1 to be a half-word offset.
23369 Here, left shift by 1 first, and later right shift by 2
23370 should get the right offset. */
23373 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
23375 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
23376 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
23377 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23379 put_thumb32_insn (buf
, newval
);
23382 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
23383 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
23384 if (value
< -255 || value
> 255)
23385 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23386 _("co-processor offset out of range"));
23388 goto cp_off_common
;
23390 case BFD_RELOC_ARM_THUMB_OFFSET
:
23391 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23392 /* Exactly what ranges, and where the offset is inserted depends
23393 on the type of instruction, we can establish this from the
23395 switch (newval
>> 12)
23397 case 4: /* PC load. */
23398 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
23399 forced to zero for these loads; md_pcrel_from has already
23400 compensated for this. */
23402 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23403 _("invalid offset, target not word aligned (0x%08lX)"),
23404 (((unsigned long) fixP
->fx_frag
->fr_address
23405 + (unsigned long) fixP
->fx_where
) & ~3)
23406 + (unsigned long) value
);
23408 if (value
& ~0x3fc)
23409 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23410 _("invalid offset, value too big (0x%08lX)"),
23413 newval
|= value
>> 2;
23416 case 9: /* SP load/store. */
23417 if (value
& ~0x3fc)
23418 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23419 _("invalid offset, value too big (0x%08lX)"),
23421 newval
|= value
>> 2;
23424 case 6: /* Word load/store. */
23426 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23427 _("invalid offset, value too big (0x%08lX)"),
23429 newval
|= value
<< 4; /* 6 - 2. */
23432 case 7: /* Byte load/store. */
23434 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23435 _("invalid offset, value too big (0x%08lX)"),
23437 newval
|= value
<< 6;
23440 case 8: /* Halfword load/store. */
23442 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23443 _("invalid offset, value too big (0x%08lX)"),
23445 newval
|= value
<< 5; /* 6 - 1. */
23449 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23450 "Unable to process relocation for thumb opcode: %lx",
23451 (unsigned long) newval
);
23454 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23457 case BFD_RELOC_ARM_THUMB_ADD
:
23458 /* This is a complicated relocation, since we use it for all of
23459 the following immediate relocations:
23463 9bit ADD/SUB SP word-aligned
23464 10bit ADD PC/SP word-aligned
23466 The type of instruction being processed is encoded in the
23473 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23475 int rd
= (newval
>> 4) & 0xf;
23476 int rs
= newval
& 0xf;
23477 int subtract
= !!(newval
& 0x8000);
23479 /* Check for HI regs, only very restricted cases allowed:
23480 Adjusting SP, and using PC or SP to get an address. */
23481 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
23482 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
23483 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23484 _("invalid Hi register with immediate"));
23486 /* If value is negative, choose the opposite instruction. */
23490 subtract
= !subtract
;
23492 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23493 _("immediate value out of range"));
23498 if (value
& ~0x1fc)
23499 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23500 _("invalid immediate for stack address calculation"));
23501 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
23502 newval
|= value
>> 2;
23504 else if (rs
== REG_PC
|| rs
== REG_SP
)
23506 /* PR gas/18541. If the addition is for a defined symbol
23507 within range of an ADR instruction then accept it. */
23510 && fixP
->fx_addsy
!= NULL
)
23514 if (! S_IS_DEFINED (fixP
->fx_addsy
)
23515 || S_GET_SEGMENT (fixP
->fx_addsy
) != seg
23516 || S_IS_WEAK (fixP
->fx_addsy
))
23518 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23519 _("address calculation needs a strongly defined nearby symbol"));
23523 offsetT v
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23525 /* Round up to the next 4-byte boundary. */
23530 v
= S_GET_VALUE (fixP
->fx_addsy
) - v
;
23534 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23535 _("symbol too far away"));
23545 if (subtract
|| value
& ~0x3fc)
23546 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23547 _("invalid immediate for address calculation (value = 0x%08lX)"),
23548 (unsigned long) (subtract
? - value
: value
));
23549 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
23551 newval
|= value
>> 2;
23556 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23557 _("immediate value out of range"));
23558 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
23559 newval
|= (rd
<< 8) | value
;
23564 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23565 _("immediate value out of range"));
23566 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
23567 newval
|= rd
| (rs
<< 3) | (value
<< 6);
23570 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23573 case BFD_RELOC_ARM_THUMB_IMM
:
23574 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23575 if (value
< 0 || value
> 255)
23576 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23577 _("invalid immediate: %ld is out of range"),
23580 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23583 case BFD_RELOC_ARM_THUMB_SHIFT
:
23584 /* 5bit shift value (0..32). LSL cannot take 32. */
23585 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
23586 temp
= newval
& 0xf800;
23587 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
23588 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23589 _("invalid shift value: %ld"), (long) value
);
23590 /* Shifts of zero must be encoded as LSL. */
23592 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
23593 /* Shifts of 32 are encoded as zero. */
23594 else if (value
== 32)
23596 newval
|= value
<< 6;
23597 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23600 case BFD_RELOC_VTABLE_INHERIT
:
23601 case BFD_RELOC_VTABLE_ENTRY
:
23605 case BFD_RELOC_ARM_MOVW
:
23606 case BFD_RELOC_ARM_MOVT
:
23607 case BFD_RELOC_ARM_THUMB_MOVW
:
23608 case BFD_RELOC_ARM_THUMB_MOVT
:
23609 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23611 /* REL format relocations are limited to a 16-bit addend. */
23612 if (!fixP
->fx_done
)
23614 if (value
< -0x8000 || value
> 0x7fff)
23615 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23616 _("offset out of range"));
23618 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
23619 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
23624 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
23625 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
23627 newval
= get_thumb32_insn (buf
);
23628 newval
&= 0xfbf08f00;
23629 newval
|= (value
& 0xf000) << 4;
23630 newval
|= (value
& 0x0800) << 15;
23631 newval
|= (value
& 0x0700) << 4;
23632 newval
|= (value
& 0x00ff);
23633 put_thumb32_insn (buf
, newval
);
23637 newval
= md_chars_to_number (buf
, 4);
23638 newval
&= 0xfff0f000;
23639 newval
|= value
& 0x0fff;
23640 newval
|= (value
& 0xf000) << 4;
23641 md_number_to_chars (buf
, newval
, 4);
23646 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
23647 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
23648 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
23649 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
23650 gas_assert (!fixP
->fx_done
);
23653 bfd_boolean is_mov
;
23654 bfd_vma encoded_addend
= value
;
23656 /* Check that addend can be encoded in instruction. */
23657 if (!seg
->use_rela_p
&& (value
< 0 || value
> 255))
23658 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23659 _("the offset 0x%08lX is not representable"),
23660 (unsigned long) encoded_addend
);
23662 /* Extract the instruction. */
23663 insn
= md_chars_to_number (buf
, THUMB_SIZE
);
23664 is_mov
= (insn
& 0xf800) == 0x2000;
23669 if (!seg
->use_rela_p
)
23670 insn
|= encoded_addend
;
23676 /* Extract the instruction. */
23677 /* Encoding is the following
23682 /* The following conditions must be true :
23687 rd
= (insn
>> 4) & 0xf;
23689 if ((insn
& 0x8000) || (rd
!= rs
) || rd
> 7)
23690 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23691 _("Unable to process relocation for thumb opcode: %lx"),
23692 (unsigned long) insn
);
23694 /* Encode as ADD immediate8 thumb 1 code. */
23695 insn
= 0x3000 | (rd
<< 8);
23697 /* Place the encoded addend into the first 8 bits of the
23699 if (!seg
->use_rela_p
)
23700 insn
|= encoded_addend
;
23703 /* Update the instruction. */
23704 md_number_to_chars (buf
, insn
, THUMB_SIZE
);
23708 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
23709 case BFD_RELOC_ARM_ALU_PC_G0
:
23710 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
23711 case BFD_RELOC_ARM_ALU_PC_G1
:
23712 case BFD_RELOC_ARM_ALU_PC_G2
:
23713 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
23714 case BFD_RELOC_ARM_ALU_SB_G0
:
23715 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
23716 case BFD_RELOC_ARM_ALU_SB_G1
:
23717 case BFD_RELOC_ARM_ALU_SB_G2
:
23718 gas_assert (!fixP
->fx_done
);
23719 if (!seg
->use_rela_p
)
23722 bfd_vma encoded_addend
;
23723 bfd_vma addend_abs
= abs (value
);
23725 /* Check that the absolute value of the addend can be
23726 expressed as an 8-bit constant plus a rotation. */
23727 encoded_addend
= encode_arm_immediate (addend_abs
);
23728 if (encoded_addend
== (unsigned int) FAIL
)
23729 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23730 _("the offset 0x%08lX is not representable"),
23731 (unsigned long) addend_abs
);
23733 /* Extract the instruction. */
23734 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23736 /* If the addend is positive, use an ADD instruction.
23737 Otherwise use a SUB. Take care not to destroy the S bit. */
23738 insn
&= 0xff1fffff;
23744 /* Place the encoded addend into the first 12 bits of the
23746 insn
&= 0xfffff000;
23747 insn
|= encoded_addend
;
23749 /* Update the instruction. */
23750 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23754 case BFD_RELOC_ARM_LDR_PC_G0
:
23755 case BFD_RELOC_ARM_LDR_PC_G1
:
23756 case BFD_RELOC_ARM_LDR_PC_G2
:
23757 case BFD_RELOC_ARM_LDR_SB_G0
:
23758 case BFD_RELOC_ARM_LDR_SB_G1
:
23759 case BFD_RELOC_ARM_LDR_SB_G2
:
23760 gas_assert (!fixP
->fx_done
);
23761 if (!seg
->use_rela_p
)
23764 bfd_vma addend_abs
= abs (value
);
23766 /* Check that the absolute value of the addend can be
23767 encoded in 12 bits. */
23768 if (addend_abs
>= 0x1000)
23769 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23770 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
23771 (unsigned long) addend_abs
);
23773 /* Extract the instruction. */
23774 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23776 /* If the addend is negative, clear bit 23 of the instruction.
23777 Otherwise set it. */
23779 insn
&= ~(1 << 23);
23783 /* Place the absolute value of the addend into the first 12 bits
23784 of the instruction. */
23785 insn
&= 0xfffff000;
23786 insn
|= addend_abs
;
23788 /* Update the instruction. */
23789 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23793 case BFD_RELOC_ARM_LDRS_PC_G0
:
23794 case BFD_RELOC_ARM_LDRS_PC_G1
:
23795 case BFD_RELOC_ARM_LDRS_PC_G2
:
23796 case BFD_RELOC_ARM_LDRS_SB_G0
:
23797 case BFD_RELOC_ARM_LDRS_SB_G1
:
23798 case BFD_RELOC_ARM_LDRS_SB_G2
:
23799 gas_assert (!fixP
->fx_done
);
23800 if (!seg
->use_rela_p
)
23803 bfd_vma addend_abs
= abs (value
);
23805 /* Check that the absolute value of the addend can be
23806 encoded in 8 bits. */
23807 if (addend_abs
>= 0x100)
23808 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23809 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
23810 (unsigned long) addend_abs
);
23812 /* Extract the instruction. */
23813 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23815 /* If the addend is negative, clear bit 23 of the instruction.
23816 Otherwise set it. */
23818 insn
&= ~(1 << 23);
23822 /* Place the first four bits of the absolute value of the addend
23823 into the first 4 bits of the instruction, and the remaining
23824 four into bits 8 .. 11. */
23825 insn
&= 0xfffff0f0;
23826 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
23828 /* Update the instruction. */
23829 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23833 case BFD_RELOC_ARM_LDC_PC_G0
:
23834 case BFD_RELOC_ARM_LDC_PC_G1
:
23835 case BFD_RELOC_ARM_LDC_PC_G2
:
23836 case BFD_RELOC_ARM_LDC_SB_G0
:
23837 case BFD_RELOC_ARM_LDC_SB_G1
:
23838 case BFD_RELOC_ARM_LDC_SB_G2
:
23839 gas_assert (!fixP
->fx_done
);
23840 if (!seg
->use_rela_p
)
23843 bfd_vma addend_abs
= abs (value
);
23845 /* Check that the absolute value of the addend is a multiple of
23846 four and, when divided by four, fits in 8 bits. */
23847 if (addend_abs
& 0x3)
23848 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23849 _("bad offset 0x%08lX (must be word-aligned)"),
23850 (unsigned long) addend_abs
);
23852 if ((addend_abs
>> 2) > 0xff)
23853 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23854 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
23855 (unsigned long) addend_abs
);
23857 /* Extract the instruction. */
23858 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23860 /* If the addend is negative, clear bit 23 of the instruction.
23861 Otherwise set it. */
23863 insn
&= ~(1 << 23);
23867 /* Place the addend (divided by four) into the first eight
23868 bits of the instruction. */
23869 insn
&= 0xfffffff0;
23870 insn
|= addend_abs
>> 2;
23872 /* Update the instruction. */
23873 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23877 case BFD_RELOC_ARM_V4BX
:
23878 /* This will need to go in the object file. */
23882 case BFD_RELOC_UNUSED
:
23884 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23885 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
23889 /* Translate internal representation of relocation info to BFD target
23893 tc_gen_reloc (asection
*section
, fixS
*fixp
)
23896 bfd_reloc_code_real_type code
;
23898 reloc
= (arelent
*) xmalloc (sizeof (arelent
));
23900 reloc
->sym_ptr_ptr
= (asymbol
**) xmalloc (sizeof (asymbol
*));
23901 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
23902 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
23904 if (fixp
->fx_pcrel
)
23906 if (section
->use_rela_p
)
23907 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
23909 fixp
->fx_offset
= reloc
->address
;
23911 reloc
->addend
= fixp
->fx_offset
;
23913 switch (fixp
->fx_r_type
)
23916 if (fixp
->fx_pcrel
)
23918 code
= BFD_RELOC_8_PCREL
;
23923 if (fixp
->fx_pcrel
)
23925 code
= BFD_RELOC_16_PCREL
;
23930 if (fixp
->fx_pcrel
)
23932 code
= BFD_RELOC_32_PCREL
;
23936 case BFD_RELOC_ARM_MOVW
:
23937 if (fixp
->fx_pcrel
)
23939 code
= BFD_RELOC_ARM_MOVW_PCREL
;
23943 case BFD_RELOC_ARM_MOVT
:
23944 if (fixp
->fx_pcrel
)
23946 code
= BFD_RELOC_ARM_MOVT_PCREL
;
23950 case BFD_RELOC_ARM_THUMB_MOVW
:
23951 if (fixp
->fx_pcrel
)
23953 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
23957 case BFD_RELOC_ARM_THUMB_MOVT
:
23958 if (fixp
->fx_pcrel
)
23960 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
23964 case BFD_RELOC_NONE
:
23965 case BFD_RELOC_ARM_PCREL_BRANCH
:
23966 case BFD_RELOC_ARM_PCREL_BLX
:
23967 case BFD_RELOC_RVA
:
23968 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
23969 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
23970 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
23971 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
23972 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
23973 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
23974 case BFD_RELOC_VTABLE_ENTRY
:
23975 case BFD_RELOC_VTABLE_INHERIT
:
23977 case BFD_RELOC_32_SECREL
:
23979 code
= fixp
->fx_r_type
;
23982 case BFD_RELOC_THUMB_PCREL_BLX
:
23984 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
23985 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
23988 code
= BFD_RELOC_THUMB_PCREL_BLX
;
23991 case BFD_RELOC_ARM_LITERAL
:
23992 case BFD_RELOC_ARM_HWLITERAL
:
23993 /* If this is called then the a literal has
23994 been referenced across a section boundary. */
23995 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23996 _("literal referenced across section boundary"));
24000 case BFD_RELOC_ARM_TLS_CALL
:
24001 case BFD_RELOC_ARM_THM_TLS_CALL
:
24002 case BFD_RELOC_ARM_TLS_DESCSEQ
:
24003 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
24004 case BFD_RELOC_ARM_GOT32
:
24005 case BFD_RELOC_ARM_GOTOFF
:
24006 case BFD_RELOC_ARM_GOT_PREL
:
24007 case BFD_RELOC_ARM_PLT32
:
24008 case BFD_RELOC_ARM_TARGET1
:
24009 case BFD_RELOC_ARM_ROSEGREL32
:
24010 case BFD_RELOC_ARM_SBREL32
:
24011 case BFD_RELOC_ARM_PREL31
:
24012 case BFD_RELOC_ARM_TARGET2
:
24013 case BFD_RELOC_ARM_TLS_LDO32
:
24014 case BFD_RELOC_ARM_PCREL_CALL
:
24015 case BFD_RELOC_ARM_PCREL_JUMP
:
24016 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
24017 case BFD_RELOC_ARM_ALU_PC_G0
:
24018 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
24019 case BFD_RELOC_ARM_ALU_PC_G1
:
24020 case BFD_RELOC_ARM_ALU_PC_G2
:
24021 case BFD_RELOC_ARM_LDR_PC_G0
:
24022 case BFD_RELOC_ARM_LDR_PC_G1
:
24023 case BFD_RELOC_ARM_LDR_PC_G2
:
24024 case BFD_RELOC_ARM_LDRS_PC_G0
:
24025 case BFD_RELOC_ARM_LDRS_PC_G1
:
24026 case BFD_RELOC_ARM_LDRS_PC_G2
:
24027 case BFD_RELOC_ARM_LDC_PC_G0
:
24028 case BFD_RELOC_ARM_LDC_PC_G1
:
24029 case BFD_RELOC_ARM_LDC_PC_G2
:
24030 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
24031 case BFD_RELOC_ARM_ALU_SB_G0
:
24032 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
24033 case BFD_RELOC_ARM_ALU_SB_G1
:
24034 case BFD_RELOC_ARM_ALU_SB_G2
:
24035 case BFD_RELOC_ARM_LDR_SB_G0
:
24036 case BFD_RELOC_ARM_LDR_SB_G1
:
24037 case BFD_RELOC_ARM_LDR_SB_G2
:
24038 case BFD_RELOC_ARM_LDRS_SB_G0
:
24039 case BFD_RELOC_ARM_LDRS_SB_G1
:
24040 case BFD_RELOC_ARM_LDRS_SB_G2
:
24041 case BFD_RELOC_ARM_LDC_SB_G0
:
24042 case BFD_RELOC_ARM_LDC_SB_G1
:
24043 case BFD_RELOC_ARM_LDC_SB_G2
:
24044 case BFD_RELOC_ARM_V4BX
:
24045 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
24046 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
24047 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
24048 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
24049 code
= fixp
->fx_r_type
;
24052 case BFD_RELOC_ARM_TLS_GOTDESC
:
24053 case BFD_RELOC_ARM_TLS_GD32
:
24054 case BFD_RELOC_ARM_TLS_LE32
:
24055 case BFD_RELOC_ARM_TLS_IE32
:
24056 case BFD_RELOC_ARM_TLS_LDM32
:
24057 /* BFD will include the symbol's address in the addend.
24058 But we don't want that, so subtract it out again here. */
24059 if (!S_IS_COMMON (fixp
->fx_addsy
))
24060 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
24061 code
= fixp
->fx_r_type
;
24065 case BFD_RELOC_ARM_IMMEDIATE
:
24066 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24067 _("internal relocation (type: IMMEDIATE) not fixed up"));
24070 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
24071 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24072 _("ADRL used for a symbol not defined in the same file"));
24075 case BFD_RELOC_ARM_OFFSET_IMM
:
24076 if (section
->use_rela_p
)
24078 code
= fixp
->fx_r_type
;
24082 if (fixp
->fx_addsy
!= NULL
24083 && !S_IS_DEFINED (fixp
->fx_addsy
)
24084 && S_IS_LOCAL (fixp
->fx_addsy
))
24086 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24087 _("undefined local label `%s'"),
24088 S_GET_NAME (fixp
->fx_addsy
));
24092 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24093 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
24100 switch (fixp
->fx_r_type
)
24102 case BFD_RELOC_NONE
: type
= "NONE"; break;
24103 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
24104 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
24105 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
24106 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
24107 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
24108 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
24109 case BFD_RELOC_ARM_T32_OFFSET_IMM
: type
= "T32_OFFSET_IMM"; break;
24110 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
24111 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
24112 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
24113 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
24114 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
24115 default: type
= _("<unknown>"); break;
24117 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24118 _("cannot represent %s relocation in this object file format"),
24125 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
24127 && fixp
->fx_addsy
== GOT_symbol
)
24129 code
= BFD_RELOC_ARM_GOTPC
;
24130 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
24134 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
24136 if (reloc
->howto
== NULL
)
24138 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24139 _("cannot represent %s relocation in this object file format"),
24140 bfd_get_reloc_code_name (code
));
24144 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
24145 vtable entry to be used in the relocation's section offset. */
24146 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
24147 reloc
->address
= fixp
->fx_offset
;
24152 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
24155 cons_fix_new_arm (fragS
* frag
,
24159 bfd_reloc_code_real_type reloc
)
24164 FIXME: @@ Should look at CPU word size. */
24168 reloc
= BFD_RELOC_8
;
24171 reloc
= BFD_RELOC_16
;
24175 reloc
= BFD_RELOC_32
;
24178 reloc
= BFD_RELOC_64
;
24183 if (exp
->X_op
== O_secrel
)
24185 exp
->X_op
= O_symbol
;
24186 reloc
= BFD_RELOC_32_SECREL
;
24190 fix_new_exp (frag
, where
, size
, exp
, pcrel
, reloc
);
24193 #if defined (OBJ_COFF)
24195 arm_validate_fix (fixS
* fixP
)
24197 /* If the destination of the branch is a defined symbol which does not have
24198 the THUMB_FUNC attribute, then we must be calling a function which has
24199 the (interfacearm) attribute. We look for the Thumb entry point to that
24200 function and change the branch to refer to that function instead. */
24201 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
24202 && fixP
->fx_addsy
!= NULL
24203 && S_IS_DEFINED (fixP
->fx_addsy
)
24204 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
24206 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
24213 arm_force_relocation (struct fix
* fixp
)
24215 #if defined (OBJ_COFF) && defined (TE_PE)
24216 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
24220 /* In case we have a call or a branch to a function in ARM ISA mode from
24221 a thumb function or vice-versa force the relocation. These relocations
24222 are cleared off for some cores that might have blx and simple transformations
24226 switch (fixp
->fx_r_type
)
24228 case BFD_RELOC_ARM_PCREL_JUMP
:
24229 case BFD_RELOC_ARM_PCREL_CALL
:
24230 case BFD_RELOC_THUMB_PCREL_BLX
:
24231 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
24235 case BFD_RELOC_ARM_PCREL_BLX
:
24236 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
24237 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
24238 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
24239 if (ARM_IS_FUNC (fixp
->fx_addsy
))
24248 /* Resolve these relocations even if the symbol is extern or weak.
24249 Technically this is probably wrong due to symbol preemption.
24250 In practice these relocations do not have enough range to be useful
24251 at dynamic link time, and some code (e.g. in the Linux kernel)
24252 expects these references to be resolved. */
24253 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
24254 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
24255 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM8
24256 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
24257 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
24258 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
24259 || fixp
->fx_r_type
== BFD_RELOC_ARM_THUMB_OFFSET
24260 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
24261 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
24262 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
24263 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_OFFSET_IMM
24264 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
24265 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM
24266 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
)
24269 /* Always leave these relocations for the linker. */
24270 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
24271 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
24272 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
24275 /* Always generate relocations against function symbols. */
24276 if (fixp
->fx_r_type
== BFD_RELOC_32
24278 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
24281 return generic_force_reloc (fixp
);
24284 #if defined (OBJ_ELF) || defined (OBJ_COFF)
24285 /* Relocations against function names must be left unadjusted,
24286 so that the linker can use this information to generate interworking
24287 stubs. The MIPS version of this function
24288 also prevents relocations that are mips-16 specific, but I do not
24289 know why it does this.
24292 There is one other problem that ought to be addressed here, but
24293 which currently is not: Taking the address of a label (rather
24294 than a function) and then later jumping to that address. Such
24295 addresses also ought to have their bottom bit set (assuming that
24296 they reside in Thumb code), but at the moment they will not. */
24299 arm_fix_adjustable (fixS
* fixP
)
24301 if (fixP
->fx_addsy
== NULL
)
24304 /* Preserve relocations against symbols with function type. */
24305 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
24308 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
24309 && fixP
->fx_subsy
== NULL
)
24312 /* We need the symbol name for the VTABLE entries. */
24313 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
24314 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
24317 /* Don't allow symbols to be discarded on GOT related relocs. */
24318 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
24319 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
24320 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
24321 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
24322 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
24323 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
24324 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
24325 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
24326 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GOTDESC
24327 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_CALL
24328 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_CALL
24329 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_DESCSEQ
24330 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_DESCSEQ
24331 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
24334 /* Similarly for group relocations. */
24335 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
24336 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
24337 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
24340 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
24341 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
24342 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
24343 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
24344 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
24345 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
24346 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
24347 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
24348 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
24351 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
24352 offsets, so keep these symbols. */
24353 if (fixP
->fx_r_type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
24354 && fixP
->fx_r_type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
24359 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
24363 elf32_arm_target_format (void)
24366 return (target_big_endian
24367 ? "elf32-bigarm-symbian"
24368 : "elf32-littlearm-symbian");
24369 #elif defined (TE_VXWORKS)
24370 return (target_big_endian
24371 ? "elf32-bigarm-vxworks"
24372 : "elf32-littlearm-vxworks");
24373 #elif defined (TE_NACL)
24374 return (target_big_endian
24375 ? "elf32-bigarm-nacl"
24376 : "elf32-littlearm-nacl");
24378 if (target_big_endian
)
24379 return "elf32-bigarm";
24381 return "elf32-littlearm";
24386 armelf_frob_symbol (symbolS
* symp
,
24389 elf_frob_symbol (symp
, puntp
);
24393 /* MD interface: Finalization. */
24398 literal_pool
* pool
;
24400 /* Ensure that all the IT blocks are properly closed. */
24401 check_it_blocks_finished ();
24403 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
24405 /* Put it at the end of the relevant section. */
24406 subseg_set (pool
->section
, pool
->sub_section
);
24408 arm_elf_change_section ();
24415 /* Remove any excess mapping symbols generated for alignment frags in
24416 SEC. We may have created a mapping symbol before a zero byte
24417 alignment; remove it if there's a mapping symbol after the
24420 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
24421 void *dummy ATTRIBUTE_UNUSED
)
24423 segment_info_type
*seginfo
= seg_info (sec
);
24426 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
24429 for (fragp
= seginfo
->frchainP
->frch_root
;
24431 fragp
= fragp
->fr_next
)
24433 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
24434 fragS
*next
= fragp
->fr_next
;
24436 /* Variable-sized frags have been converted to fixed size by
24437 this point. But if this was variable-sized to start with,
24438 there will be a fixed-size frag after it. So don't handle
24440 if (sym
== NULL
|| next
== NULL
)
24443 if (S_GET_VALUE (sym
) < next
->fr_address
)
24444 /* Not at the end of this frag. */
24446 know (S_GET_VALUE (sym
) == next
->fr_address
);
24450 if (next
->tc_frag_data
.first_map
!= NULL
)
24452 /* Next frag starts with a mapping symbol. Discard this
24454 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
24458 if (next
->fr_next
== NULL
)
24460 /* This mapping symbol is at the end of the section. Discard
24462 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
24463 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
24467 /* As long as we have empty frags without any mapping symbols,
24469 /* If the next frag is non-empty and does not start with a
24470 mapping symbol, then this mapping symbol is required. */
24471 if (next
->fr_address
!= next
->fr_next
->fr_address
)
24474 next
= next
->fr_next
;
24476 while (next
!= NULL
);
24481 /* Adjust the symbol table. This marks Thumb symbols as distinct from
24485 arm_adjust_symtab (void)
24490 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
24492 if (ARM_IS_THUMB (sym
))
24494 if (THUMB_IS_FUNC (sym
))
24496 /* Mark the symbol as a Thumb function. */
24497 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
24498 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
24499 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
24501 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
24502 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
24504 as_bad (_("%s: unexpected function type: %d"),
24505 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
24507 else switch (S_GET_STORAGE_CLASS (sym
))
24510 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
24513 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
24516 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
24524 if (ARM_IS_INTERWORK (sym
))
24525 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
24532 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
24534 if (ARM_IS_THUMB (sym
))
24536 elf_symbol_type
* elf_sym
;
24538 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
24539 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
24541 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
24542 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
24544 /* If it's a .thumb_func, declare it as so,
24545 otherwise tag label as .code 16. */
24546 if (THUMB_IS_FUNC (sym
))
24547 elf_sym
->internal_elf_sym
.st_target_internal
24548 = ST_BRANCH_TO_THUMB
;
24549 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
24550 elf_sym
->internal_elf_sym
.st_info
=
24551 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
24556 /* Remove any overlapping mapping symbols generated by alignment frags. */
24557 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
24558 /* Now do generic ELF adjustments. */
24559 elf_adjust_symtab ();
24563 /* MD interface: Initialization. */
24566 set_constant_flonums (void)
24570 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
24571 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
24575 /* Auto-select Thumb mode if it's the only available instruction set for the
24576 given architecture. */
24579 autoselect_thumb_from_cpu_variant (void)
24581 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
24582 opcode_select (16);
24591 if ( (arm_ops_hsh
= hash_new ()) == NULL
24592 || (arm_cond_hsh
= hash_new ()) == NULL
24593 || (arm_shift_hsh
= hash_new ()) == NULL
24594 || (arm_psr_hsh
= hash_new ()) == NULL
24595 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
24596 || (arm_reg_hsh
= hash_new ()) == NULL
24597 || (arm_reloc_hsh
= hash_new ()) == NULL
24598 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
24599 as_fatal (_("virtual memory exhausted"));
24601 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
24602 hash_insert (arm_ops_hsh
, insns
[i
].template_name
, (void *) (insns
+ i
));
24603 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
24604 hash_insert (arm_cond_hsh
, conds
[i
].template_name
, (void *) (conds
+ i
));
24605 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
24606 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
24607 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
24608 hash_insert (arm_psr_hsh
, psrs
[i
].template_name
, (void *) (psrs
+ i
));
24609 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
24610 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template_name
,
24611 (void *) (v7m_psrs
+ i
));
24612 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
24613 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
24615 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
24617 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template_name
,
24618 (void *) (barrier_opt_names
+ i
));
24620 for (i
= 0; i
< ARRAY_SIZE (reloc_names
); i
++)
24622 struct reloc_entry
* entry
= reloc_names
+ i
;
24624 if (arm_is_eabi() && entry
->reloc
== BFD_RELOC_ARM_PLT32
)
24625 /* This makes encode_branch() use the EABI versions of this relocation. */
24626 entry
->reloc
= BFD_RELOC_UNUSED
;
24628 hash_insert (arm_reloc_hsh
, entry
->name
, (void *) entry
);
24632 set_constant_flonums ();
24634 /* Set the cpu variant based on the command-line options. We prefer
24635 -mcpu= over -march= if both are set (as for GCC); and we prefer
24636 -mfpu= over any other way of setting the floating point unit.
24637 Use of legacy options with new options are faulted. */
24640 if (mcpu_cpu_opt
|| march_cpu_opt
)
24641 as_bad (_("use of old and new-style options to set CPU type"));
24643 mcpu_cpu_opt
= legacy_cpu
;
24645 else if (!mcpu_cpu_opt
)
24646 mcpu_cpu_opt
= march_cpu_opt
;
24651 as_bad (_("use of old and new-style options to set FPU type"));
24653 mfpu_opt
= legacy_fpu
;
24655 else if (!mfpu_opt
)
24657 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
24658 || defined (TE_NetBSD) || defined (TE_VXWORKS))
24659 /* Some environments specify a default FPU. If they don't, infer it
24660 from the processor. */
24662 mfpu_opt
= mcpu_fpu_opt
;
24664 mfpu_opt
= march_fpu_opt
;
24666 mfpu_opt
= &fpu_default
;
24672 if (mcpu_cpu_opt
!= NULL
)
24673 mfpu_opt
= &fpu_default
;
24674 else if (mcpu_fpu_opt
!= NULL
&& ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
24675 mfpu_opt
= &fpu_arch_vfp_v2
;
24677 mfpu_opt
= &fpu_arch_fpa
;
24683 mcpu_cpu_opt
= &cpu_default
;
24684 selected_cpu
= cpu_default
;
24686 else if (no_cpu_selected ())
24687 selected_cpu
= cpu_default
;
24690 selected_cpu
= *mcpu_cpu_opt
;
24692 mcpu_cpu_opt
= &arm_arch_any
;
24695 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
24697 autoselect_thumb_from_cpu_variant ();
24699 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
24701 #if defined OBJ_COFF || defined OBJ_ELF
24703 unsigned int flags
= 0;
24705 #if defined OBJ_ELF
24706 flags
= meabi_flags
;
24708 switch (meabi_flags
)
24710 case EF_ARM_EABI_UNKNOWN
:
24712 /* Set the flags in the private structure. */
24713 if (uses_apcs_26
) flags
|= F_APCS26
;
24714 if (support_interwork
) flags
|= F_INTERWORK
;
24715 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
24716 if (pic_code
) flags
|= F_PIC
;
24717 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
24718 flags
|= F_SOFT_FLOAT
;
24720 switch (mfloat_abi_opt
)
24722 case ARM_FLOAT_ABI_SOFT
:
24723 case ARM_FLOAT_ABI_SOFTFP
:
24724 flags
|= F_SOFT_FLOAT
;
24727 case ARM_FLOAT_ABI_HARD
:
24728 if (flags
& F_SOFT_FLOAT
)
24729 as_bad (_("hard-float conflicts with specified fpu"));
24733 /* Using pure-endian doubles (even if soft-float). */
24734 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
24735 flags
|= F_VFP_FLOAT
;
24737 #if defined OBJ_ELF
24738 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
24739 flags
|= EF_ARM_MAVERICK_FLOAT
;
24742 case EF_ARM_EABI_VER4
:
24743 case EF_ARM_EABI_VER5
:
24744 /* No additional flags to set. */
24751 bfd_set_private_flags (stdoutput
, flags
);
24753 /* We have run out flags in the COFF header to encode the
24754 status of ATPCS support, so instead we create a dummy,
24755 empty, debug section called .arm.atpcs. */
24760 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
24764 bfd_set_section_flags
24765 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
24766 bfd_set_section_size (stdoutput
, sec
, 0);
24767 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
24773 /* Record the CPU type as well. */
24774 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
24775 mach
= bfd_mach_arm_iWMMXt2
;
24776 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
24777 mach
= bfd_mach_arm_iWMMXt
;
24778 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
24779 mach
= bfd_mach_arm_XScale
;
24780 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
24781 mach
= bfd_mach_arm_ep9312
;
24782 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
24783 mach
= bfd_mach_arm_5TE
;
24784 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
24786 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
24787 mach
= bfd_mach_arm_5T
;
24789 mach
= bfd_mach_arm_5
;
24791 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
24793 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
24794 mach
= bfd_mach_arm_4T
;
24796 mach
= bfd_mach_arm_4
;
24798 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
24799 mach
= bfd_mach_arm_3M
;
24800 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
24801 mach
= bfd_mach_arm_3
;
24802 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
24803 mach
= bfd_mach_arm_2a
;
24804 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
24805 mach
= bfd_mach_arm_2
;
24807 mach
= bfd_mach_arm_unknown
;
24809 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
24812 /* Command line processing. */
24815 Invocation line includes a switch not recognized by the base assembler.
24816 See if it's a processor-specific option.
24818 This routine is somewhat complicated by the need for backwards
24819 compatibility (since older releases of gcc can't be changed).
24820 The new options try to make the interface as compatible as
24823 New options (supported) are:
24825 -mcpu=<cpu name> Assemble for selected processor
24826 -march=<architecture name> Assemble for selected architecture
24827 -mfpu=<fpu architecture> Assemble for selected FPU.
24828 -EB/-mbig-endian Big-endian
24829 -EL/-mlittle-endian Little-endian
24830 -k Generate PIC code
24831 -mthumb Start in Thumb mode
24832 -mthumb-interwork Code supports ARM/Thumb interworking
24834 -m[no-]warn-deprecated Warn about deprecated features
24835 -m[no-]warn-syms Warn when symbols match instructions
24837 For now we will also provide support for:
24839 -mapcs-32 32-bit Program counter
24840 -mapcs-26 26-bit Program counter
24841 -macps-float Floats passed in FP registers
24842 -mapcs-reentrant Reentrant code
24844 (sometime these will probably be replaced with -mapcs=<list of options>
24845 and -matpcs=<list of options>)
24847 The remaining options are only supported for back-wards compatibility.
24848 Cpu variants, the arm part is optional:
24849 -m[arm]1 Currently not supported.
24850 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
24851 -m[arm]3 Arm 3 processor
24852 -m[arm]6[xx], Arm 6 processors
24853 -m[arm]7[xx][t][[d]m] Arm 7 processors
24854 -m[arm]8[10] Arm 8 processors
24855 -m[arm]9[20][tdmi] Arm 9 processors
24856 -mstrongarm[110[0]] StrongARM processors
24857 -mxscale XScale processors
24858 -m[arm]v[2345[t[e]]] Arm architectures
24859 -mall All (except the ARM1)
24861 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
24862 -mfpe-old (No float load/store multiples)
24863 -mvfpxd VFP Single precision
24865 -mno-fpu Disable all floating point instructions
24867 The following CPU names are recognized:
24868 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
24869 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
24870 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
24871 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
24872 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
24873 arm10t arm10e, arm1020t, arm1020e, arm10200e,
24874 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
24878 const char * md_shortopts
= "m:k";
24880 #ifdef ARM_BI_ENDIAN
24881 #define OPTION_EB (OPTION_MD_BASE + 0)
24882 #define OPTION_EL (OPTION_MD_BASE + 1)
24884 #if TARGET_BYTES_BIG_ENDIAN
24885 #define OPTION_EB (OPTION_MD_BASE + 0)
24887 #define OPTION_EL (OPTION_MD_BASE + 1)
24890 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
24892 struct option md_longopts
[] =
24895 {"EB", no_argument
, NULL
, OPTION_EB
},
24898 {"EL", no_argument
, NULL
, OPTION_EL
},
24900 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
24901 {NULL
, no_argument
, NULL
, 0}
24905 size_t md_longopts_size
= sizeof (md_longopts
);
24907 struct arm_option_table
24909 const char *option
; /* Option name to match. */
24910 const char *help
; /* Help information. */
24911 int *var
; /* Variable to change. */
24912 int value
; /* What to change it to. */
24913 const char *deprecated
; /* If non-null, print this message. */
24916 struct arm_option_table arm_opts
[] =
24918 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
24919 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
24920 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
24921 &support_interwork
, 1, NULL
},
24922 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
24923 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
24924 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
24926 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
24927 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
24928 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
24929 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
24932 /* These are recognized by the assembler, but have no affect on code. */
24933 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
24934 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
24936 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
24937 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
24938 &warn_on_deprecated
, 0, NULL
},
24939 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms
), TRUE
, NULL
},
24940 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms
), FALSE
, NULL
},
24941 {NULL
, NULL
, NULL
, 0, NULL
}
24944 struct arm_legacy_option_table
24946 const char *option
; /* Option name to match. */
24947 const arm_feature_set
**var
; /* Variable to change. */
24948 const arm_feature_set value
; /* What to change it to. */
24949 const char *deprecated
; /* If non-null, print this message. */
24952 const struct arm_legacy_option_table arm_legacy_opts
[] =
24954 /* DON'T add any new processors to this list -- we want the whole list
24955 to go away... Add them to the processors table instead. */
24956 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
24957 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
24958 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
24959 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
24960 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
24961 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
24962 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
24963 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
24964 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
24965 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
24966 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
24967 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
24968 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
24969 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
24970 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
24971 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
24972 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
24973 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
24974 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
24975 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
24976 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
24977 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
24978 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
24979 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
24980 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
24981 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
24982 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
24983 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
24984 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
24985 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
24986 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
24987 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
24988 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
24989 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
24990 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
24991 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
24992 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
24993 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
24994 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
24995 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
24996 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
24997 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
24998 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
24999 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
25000 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
25001 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
25002 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
25003 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
25004 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
25005 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
25006 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
25007 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
25008 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
25009 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
25010 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
25011 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
25012 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
25013 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
25014 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
25015 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
25016 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
25017 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
25018 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
25019 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
25020 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
25021 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
25022 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
25023 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
25024 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
25025 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
25026 N_("use -mcpu=strongarm110")},
25027 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
25028 N_("use -mcpu=strongarm1100")},
25029 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
25030 N_("use -mcpu=strongarm1110")},
25031 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
25032 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
25033 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
25035 /* Architecture variants -- don't add any more to this list either. */
25036 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
25037 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
25038 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
25039 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
25040 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
25041 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
25042 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
25043 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
25044 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
25045 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
25046 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
25047 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
25048 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
25049 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
25050 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
25051 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
25052 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
25053 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
25055 /* Floating point variants -- don't add any more to this list either. */
25056 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
25057 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
25058 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
25059 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
25060 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
25062 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
25065 struct arm_cpu_option_table
25069 const arm_feature_set value
;
25070 /* For some CPUs we assume an FPU unless the user explicitly sets
25072 const arm_feature_set default_fpu
;
25073 /* The canonical name of the CPU, or NULL to use NAME converted to upper
25075 const char *canonical_name
;
25078 /* This list should, at a minimum, contain all the cpu names
25079 recognized by GCC. */
25080 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
25081 static const struct arm_cpu_option_table arm_cpus
[] =
25083 ARM_CPU_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
, NULL
),
25084 ARM_CPU_OPT ("arm1", ARM_ARCH_V1
, FPU_ARCH_FPA
, NULL
),
25085 ARM_CPU_OPT ("arm2", ARM_ARCH_V2
, FPU_ARCH_FPA
, NULL
),
25086 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
),
25087 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
),
25088 ARM_CPU_OPT ("arm6", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25089 ARM_CPU_OPT ("arm60", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25090 ARM_CPU_OPT ("arm600", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25091 ARM_CPU_OPT ("arm610", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25092 ARM_CPU_OPT ("arm620", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25093 ARM_CPU_OPT ("arm7", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25094 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
25095 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25096 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
25097 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25098 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
25099 ARM_CPU_OPT ("arm70", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25100 ARM_CPU_OPT ("arm700", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25101 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25102 ARM_CPU_OPT ("arm710", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25103 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25104 ARM_CPU_OPT ("arm720", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25105 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25106 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25107 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25108 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25109 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25110 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25111 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25112 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25113 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25114 ARM_CPU_OPT ("arm8", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25115 ARM_CPU_OPT ("arm810", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25116 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25117 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25118 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25119 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25120 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25121 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25122 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T
, FPU_ARCH_FPA
, "ARM920T"),
25123 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25124 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25125 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25126 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25127 ARM_CPU_OPT ("fa526", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25128 ARM_CPU_OPT ("fa626", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25129 /* For V5 or later processors we default to using VFP; but the user
25130 should really set the FPU type explicitly. */
25131 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
25132 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25133 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"),
25134 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"),
25135 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
),
25136 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
25137 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM946E-S"),
25138 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25139 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
25140 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM966E-S"),
25141 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25142 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25143 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
25144 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
25145 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25146 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM1020E"),
25147 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
25148 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25149 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25150 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
,
25152 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
),
25153 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25154 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25155 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25156 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25157 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25158 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6
, FPU_NONE
, "ARM1136J-S"),
25159 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6
, FPU_NONE
, NULL
),
25160 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
,
25162 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, NULL
),
25163 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K
, FPU_ARCH_VFP_V2
, "MPCore"),
25164 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K
, FPU_NONE
, "MPCore"),
25165 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2
, FPU_NONE
, NULL
),
25166 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2
, FPU_ARCH_VFP_V2
, NULL
),
25167 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6KZ
, FPU_NONE
, NULL
),
25168 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6KZ
, FPU_ARCH_VFP_V2
, NULL
),
25169 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC
,
25170 FPU_NONE
, "Cortex-A5"),
25171 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
25173 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC
,
25174 ARM_FEATURE_COPROC (FPU_VFP_V3
25175 | FPU_NEON_EXT_V1
),
25177 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC
,
25178 ARM_FEATURE_COPROC (FPU_VFP_V3
25179 | FPU_NEON_EXT_V1
),
25181 ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
25183 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
25185 ARM_CPU_OPT ("cortex-a17", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
25187 ARM_CPU_OPT ("cortex-a32", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25189 ARM_CPU_OPT ("cortex-a35", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25191 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25193 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25195 ARM_CPU_OPT ("cortex-a72", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25197 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R
, FPU_NONE
, "Cortex-R4"),
25198 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R
, FPU_ARCH_VFP_V3D16
,
25200 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV
,
25201 FPU_NONE
, "Cortex-R5"),
25202 ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV
,
25203 FPU_ARCH_VFP_V3D16
,
25205 ARM_CPU_OPT ("cortex-m7", ARM_ARCH_V7EM
, FPU_NONE
, "Cortex-M7"),
25206 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM
, FPU_NONE
, "Cortex-M4"),
25207 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M
, FPU_NONE
, "Cortex-M3"),
25208 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M1"),
25209 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M0"),
25210 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M0+"),
25211 ARM_CPU_OPT ("exynos-m1", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25214 ARM_CPU_OPT ("qdf24xx", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25218 /* ??? XSCALE is really an architecture. */
25219 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
),
25220 /* ??? iwmmxt is not a processor. */
25221 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP_V2
, NULL
),
25222 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP_V2
, NULL
),
25223 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
),
25225 ARM_CPU_OPT ("ep9312", ARM_FEATURE_LOW (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
),
25226 FPU_ARCH_MAVERICK
, "ARM920T"),
25227 /* Marvell processors. */
25228 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE_CORE (ARM_AEXT_V7A
| ARM_EXT_MP
25230 ARM_EXT2_V6T2_V8M
),
25231 FPU_ARCH_VFP_V3D16
, NULL
),
25232 ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE_CORE (ARM_AEXT_V7A
| ARM_EXT_MP
25234 ARM_EXT2_V6T2_V8M
),
25235 FPU_ARCH_NEON_VFP_V4
, NULL
),
25236 /* APM X-Gene family. */
25237 ARM_CPU_OPT ("xgene1", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25239 ARM_CPU_OPT ("xgene2", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25242 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
25246 struct arm_arch_option_table
25250 const arm_feature_set value
;
25251 const arm_feature_set default_fpu
;
25254 /* This list should, at a minimum, contain all the architecture names
25255 recognized by GCC. */
25256 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
25257 static const struct arm_arch_option_table arm_archs
[] =
25259 ARM_ARCH_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
),
25260 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
),
25261 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
),
25262 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
25263 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
25264 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
),
25265 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
),
25266 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
),
25267 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
),
25268 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
),
25269 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
),
25270 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
),
25271 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
),
25272 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
),
25273 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
),
25274 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
),
25275 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
),
25276 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
),
25277 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
),
25278 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
),
25279 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
),
25280 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
25281 kept to preserve existing behaviour. */
25282 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
),
25283 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
),
25284 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
),
25285 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
),
25286 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
),
25287 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
25288 kept to preserve existing behaviour. */
25289 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
),
25290 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
),
25291 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
),
25292 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM
, FPU_ARCH_VFP
),
25293 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
),
25294 /* The official spelling of the ARMv7 profile variants is the dashed form.
25295 Accept the non-dashed form for compatibility with old toolchains. */
25296 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
),
25297 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE
, FPU_ARCH_VFP
),
25298 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
),
25299 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
25300 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
),
25301 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
),
25302 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
25303 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM
, FPU_ARCH_VFP
),
25304 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE
, FPU_ARCH_VFP
),
25305 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN
, FPU_ARCH_VFP
),
25306 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A
, FPU_ARCH_VFP
),
25307 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A
, FPU_ARCH_VFP
),
25308 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A
, FPU_ARCH_VFP
),
25309 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
),
25310 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
),
25311 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP
),
25312 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
25314 #undef ARM_ARCH_OPT
25316 /* ISA extensions in the co-processor and main instruction set space. */
25317 struct arm_option_extension_value_table
25321 const arm_feature_set merge_value
;
25322 const arm_feature_set clear_value
;
25323 const arm_feature_set allowed_archs
;
25326 /* The following table must be in alphabetical order with a NULL last entry.
25328 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, AA }
25329 static const struct arm_option_extension_value_table arm_extensions
[] =
25331 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8
, ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25332 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25333 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25334 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
),
25335 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25336 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8
, ARM_FEATURE_COPROC (FPU_VFP_ARMV8
),
25337 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25338 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
25339 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
25341 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
25342 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
25343 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
| ARM_EXT_V7R
)),
25344 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
),
25345 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
), ARM_ANY
),
25346 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
),
25347 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
), ARM_ANY
),
25348 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
),
25349 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
), ARM_ANY
),
25350 ARM_EXT_OPT ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
25351 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
25352 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
| ARM_EXT_V7R
)),
25353 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8
,
25354 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
),
25355 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25356 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
25357 ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
25358 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
)),
25359 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
),
25360 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_PAN
, 0),
25361 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25362 ARM_EXT_OPT ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
25363 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
25364 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
| ARM_EXT_V7A
)),
25365 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
| ARM_EXT_ADIV
25367 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
),
25368 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
25369 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8
,
25370 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
| FPU_NEON_EXT_RDMA
),
25371 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25372 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
),
25373 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
), ARM_ANY
),
25374 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
25378 /* ISA floating-point and Advanced SIMD extensions. */
25379 struct arm_option_fpu_value_table
25382 const arm_feature_set value
;
25385 /* This list should, at a minimum, contain all the fpu names
25386 recognized by GCC. */
25387 static const struct arm_option_fpu_value_table arm_fpus
[] =
25389 {"softfpa", FPU_NONE
},
25390 {"fpe", FPU_ARCH_FPE
},
25391 {"fpe2", FPU_ARCH_FPE
},
25392 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
25393 {"fpa", FPU_ARCH_FPA
},
25394 {"fpa10", FPU_ARCH_FPA
},
25395 {"fpa11", FPU_ARCH_FPA
},
25396 {"arm7500fe", FPU_ARCH_FPA
},
25397 {"softvfp", FPU_ARCH_VFP
},
25398 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
25399 {"vfp", FPU_ARCH_VFP_V2
},
25400 {"vfp9", FPU_ARCH_VFP_V2
},
25401 {"vfp3", FPU_ARCH_VFP_V3
}, /* For backwards compatbility. */
25402 {"vfp10", FPU_ARCH_VFP_V2
},
25403 {"vfp10-r0", FPU_ARCH_VFP_V1
},
25404 {"vfpxd", FPU_ARCH_VFP_V1xD
},
25405 {"vfpv2", FPU_ARCH_VFP_V2
},
25406 {"vfpv3", FPU_ARCH_VFP_V3
},
25407 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
},
25408 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
25409 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
},
25410 {"vfpv3xd", FPU_ARCH_VFP_V3xD
},
25411 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
},
25412 {"arm1020t", FPU_ARCH_VFP_V1
},
25413 {"arm1020e", FPU_ARCH_VFP_V2
},
25414 {"arm1136jfs", FPU_ARCH_VFP_V2
},
25415 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
25416 {"maverick", FPU_ARCH_MAVERICK
},
25417 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
25418 {"neon-fp16", FPU_ARCH_NEON_FP16
},
25419 {"vfpv4", FPU_ARCH_VFP_V4
},
25420 {"vfpv4-d16", FPU_ARCH_VFP_V4D16
},
25421 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
},
25422 {"fpv5-d16", FPU_ARCH_VFP_V5D16
},
25423 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16
},
25424 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4
},
25425 {"fp-armv8", FPU_ARCH_VFP_ARMV8
},
25426 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8
},
25427 {"crypto-neon-fp-armv8",
25428 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
},
25429 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1
},
25430 {"crypto-neon-fp-armv8.1",
25431 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
},
25432 {NULL
, ARM_ARCH_NONE
}
25435 struct arm_option_value_table
25441 static const struct arm_option_value_table arm_float_abis
[] =
25443 {"hard", ARM_FLOAT_ABI_HARD
},
25444 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
25445 {"soft", ARM_FLOAT_ABI_SOFT
},
25450 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
25451 static const struct arm_option_value_table arm_eabis
[] =
25453 {"gnu", EF_ARM_EABI_UNKNOWN
},
25454 {"4", EF_ARM_EABI_VER4
},
25455 {"5", EF_ARM_EABI_VER5
},
25460 struct arm_long_option_table
25462 const char * option
; /* Substring to match. */
25463 const char * help
; /* Help information. */
25464 int (* func
) (char * subopt
); /* Function to decode sub-option. */
25465 const char * deprecated
; /* If non-null, print this message. */
25469 arm_parse_extension (char *str
, const arm_feature_set
**opt_p
)
25471 arm_feature_set
*ext_set
= (arm_feature_set
*)
25472 xmalloc (sizeof (arm_feature_set
));
25474 /* We insist on extensions being specified in alphabetical order, and with
25475 extensions being added before being removed. We achieve this by having
25476 the global ARM_EXTENSIONS table in alphabetical order, and using the
25477 ADDING_VALUE variable to indicate whether we are adding an extension (1)
25478 or removing it (0) and only allowing it to change in the order
25480 const struct arm_option_extension_value_table
* opt
= NULL
;
25481 int adding_value
= -1;
25483 /* Copy the feature set, so that we can modify it. */
25484 *ext_set
= **opt_p
;
25487 while (str
!= NULL
&& *str
!= 0)
25494 as_bad (_("invalid architectural extension"));
25499 ext
= strchr (str
, '+');
25504 len
= strlen (str
);
25506 if (len
>= 2 && strncmp (str
, "no", 2) == 0)
25508 if (adding_value
!= 0)
25511 opt
= arm_extensions
;
25519 if (adding_value
== -1)
25522 opt
= arm_extensions
;
25524 else if (adding_value
!= 1)
25526 as_bad (_("must specify extensions to add before specifying "
25527 "those to remove"));
25534 as_bad (_("missing architectural extension"));
25538 gas_assert (adding_value
!= -1);
25539 gas_assert (opt
!= NULL
);
25541 /* Scan over the options table trying to find an exact match. */
25542 for (; opt
->name
!= NULL
; opt
++)
25543 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25545 /* Check we can apply the extension to this architecture. */
25546 if (!ARM_CPU_HAS_FEATURE (*ext_set
, opt
->allowed_archs
))
25548 as_bad (_("extension does not apply to the base architecture"));
25552 /* Add or remove the extension. */
25554 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->merge_value
);
25556 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, opt
->clear_value
);
25561 if (opt
->name
== NULL
)
25563 /* Did we fail to find an extension because it wasn't specified in
25564 alphabetical order, or because it does not exist? */
25566 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
25567 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25570 if (opt
->name
== NULL
)
25571 as_bad (_("unknown architectural extension `%s'"), str
);
25573 as_bad (_("architectural extensions must be specified in "
25574 "alphabetical order"));
25580 /* We should skip the extension we've just matched the next time
25592 arm_parse_cpu (char *str
)
25594 const struct arm_cpu_option_table
*opt
;
25595 char *ext
= strchr (str
, '+');
25601 len
= strlen (str
);
25605 as_bad (_("missing cpu name `%s'"), str
);
25609 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
25610 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25612 mcpu_cpu_opt
= &opt
->value
;
25613 mcpu_fpu_opt
= &opt
->default_fpu
;
25614 if (opt
->canonical_name
)
25616 gas_assert (sizeof selected_cpu_name
> strlen (opt
->canonical_name
));
25617 strcpy (selected_cpu_name
, opt
->canonical_name
);
25623 if (len
>= sizeof selected_cpu_name
)
25624 len
= (sizeof selected_cpu_name
) - 1;
25626 for (i
= 0; i
< len
; i
++)
25627 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
25628 selected_cpu_name
[i
] = 0;
25632 return arm_parse_extension (ext
, &mcpu_cpu_opt
);
25637 as_bad (_("unknown cpu `%s'"), str
);
25642 arm_parse_arch (char *str
)
25644 const struct arm_arch_option_table
*opt
;
25645 char *ext
= strchr (str
, '+');
25651 len
= strlen (str
);
25655 as_bad (_("missing architecture name `%s'"), str
);
25659 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
25660 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25662 march_cpu_opt
= &opt
->value
;
25663 march_fpu_opt
= &opt
->default_fpu
;
25664 strcpy (selected_cpu_name
, opt
->name
);
25667 return arm_parse_extension (ext
, &march_cpu_opt
);
25672 as_bad (_("unknown architecture `%s'\n"), str
);
25677 arm_parse_fpu (char * str
)
25679 const struct arm_option_fpu_value_table
* opt
;
25681 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
25682 if (streq (opt
->name
, str
))
25684 mfpu_opt
= &opt
->value
;
25688 as_bad (_("unknown floating point format `%s'\n"), str
);
25693 arm_parse_float_abi (char * str
)
25695 const struct arm_option_value_table
* opt
;
25697 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
25698 if (streq (opt
->name
, str
))
25700 mfloat_abi_opt
= opt
->value
;
25704 as_bad (_("unknown floating point abi `%s'\n"), str
);
25710 arm_parse_eabi (char * str
)
25712 const struct arm_option_value_table
*opt
;
25714 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
25715 if (streq (opt
->name
, str
))
25717 meabi_flags
= opt
->value
;
25720 as_bad (_("unknown EABI `%s'\n"), str
);
25726 arm_parse_it_mode (char * str
)
25728 bfd_boolean ret
= TRUE
;
25730 if (streq ("arm", str
))
25731 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
25732 else if (streq ("thumb", str
))
25733 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
25734 else if (streq ("always", str
))
25735 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
25736 else if (streq ("never", str
))
25737 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
25740 as_bad (_("unknown implicit IT mode `%s', should be "\
25741 "arm, thumb, always, or never."), str
);
25749 arm_ccs_mode (char * unused ATTRIBUTE_UNUSED
)
25751 codecomposer_syntax
= TRUE
;
25752 arm_comment_chars
[0] = ';';
25753 arm_line_separator_chars
[0] = 0;
25757 struct arm_long_option_table arm_long_opts
[] =
25759 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
25760 arm_parse_cpu
, NULL
},
25761 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
25762 arm_parse_arch
, NULL
},
25763 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
25764 arm_parse_fpu
, NULL
},
25765 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
25766 arm_parse_float_abi
, NULL
},
25768 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
25769 arm_parse_eabi
, NULL
},
25771 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
25772 arm_parse_it_mode
, NULL
},
25773 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
25774 arm_ccs_mode
, NULL
},
25775 {NULL
, NULL
, 0, NULL
}
25779 md_parse_option (int c
, char * arg
)
25781 struct arm_option_table
*opt
;
25782 const struct arm_legacy_option_table
*fopt
;
25783 struct arm_long_option_table
*lopt
;
25789 target_big_endian
= 1;
25795 target_big_endian
= 0;
25799 case OPTION_FIX_V4BX
:
25804 /* Listing option. Just ignore these, we don't support additional
25809 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
25811 if (c
== opt
->option
[0]
25812 && ((arg
== NULL
&& opt
->option
[1] == 0)
25813 || streq (arg
, opt
->option
+ 1)))
25815 /* If the option is deprecated, tell the user. */
25816 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
25817 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
25818 arg
? arg
: "", _(opt
->deprecated
));
25820 if (opt
->var
!= NULL
)
25821 *opt
->var
= opt
->value
;
25827 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
25829 if (c
== fopt
->option
[0]
25830 && ((arg
== NULL
&& fopt
->option
[1] == 0)
25831 || streq (arg
, fopt
->option
+ 1)))
25833 /* If the option is deprecated, tell the user. */
25834 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
25835 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
25836 arg
? arg
: "", _(fopt
->deprecated
));
25838 if (fopt
->var
!= NULL
)
25839 *fopt
->var
= &fopt
->value
;
25845 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
25847 /* These options are expected to have an argument. */
25848 if (c
== lopt
->option
[0]
25850 && strncmp (arg
, lopt
->option
+ 1,
25851 strlen (lopt
->option
+ 1)) == 0)
25853 /* If the option is deprecated, tell the user. */
25854 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
25855 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
25856 _(lopt
->deprecated
));
25858 /* Call the sup-option parser. */
25859 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
25870 md_show_usage (FILE * fp
)
25872 struct arm_option_table
*opt
;
25873 struct arm_long_option_table
*lopt
;
25875 fprintf (fp
, _(" ARM-specific assembler options:\n"));
25877 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
25878 if (opt
->help
!= NULL
)
25879 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
25881 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
25882 if (lopt
->help
!= NULL
)
25883 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
25887 -EB assemble code for a big-endian cpu\n"));
25892 -EL assemble code for a little-endian cpu\n"));
25896 --fix-v4bx Allow BX in ARMv4 code\n"));
25904 arm_feature_set flags
;
25905 } cpu_arch_ver_table
;
25907 /* Mapping from CPU features to EABI CPU arch values. As a general rule, table
25908 must be sorted least features first but some reordering is needed, eg. for
25909 Thumb-2 instructions to be detected as coming from ARMv6T2. */
25910 static const cpu_arch_ver_table cpu_arch_ver
[] =
25916 {4, ARM_ARCH_V5TE
},
25917 {5, ARM_ARCH_V5TEJ
},
25921 {11, ARM_ARCH_V6M
},
25922 {12, ARM_ARCH_V6SM
},
25923 {8, ARM_ARCH_V6T2
},
25924 {10, ARM_ARCH_V7VE
},
25925 {10, ARM_ARCH_V7R
},
25926 {10, ARM_ARCH_V7M
},
25927 {14, ARM_ARCH_V8A
},
25928 {16, ARM_ARCH_V8M_BASE
},
25929 {17, ARM_ARCH_V8M_MAIN
},
25933 /* Set an attribute if it has not already been set by the user. */
25935 aeabi_set_attribute_int (int tag
, int value
)
25938 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
25939 || !attributes_set_explicitly
[tag
])
25940 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
25944 aeabi_set_attribute_string (int tag
, const char *value
)
25947 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
25948 || !attributes_set_explicitly
[tag
])
25949 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
25952 /* Set the public EABI object attributes. */
25954 aeabi_set_public_attributes (void)
25959 int fp16_optional
= 0;
25960 arm_feature_set flags
;
25961 arm_feature_set tmp
;
25962 arm_feature_set arm_arch_v8m_base
= ARM_ARCH_V8M_BASE
;
25963 const cpu_arch_ver_table
*p
;
25965 /* Choose the architecture based on the capabilities of the requested cpu
25966 (if any) and/or the instructions actually used. */
25967 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
25968 ARM_MERGE_FEATURE_SETS (flags
, flags
, *mfpu_opt
);
25969 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_cpu
);
25971 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
))
25972 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v1
);
25974 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_any
))
25975 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v4t
);
25977 selected_cpu
= flags
;
25979 /* Allow the user to override the reported architecture. */
25982 ARM_CLEAR_FEATURE (flags
, flags
, arm_arch_any
);
25983 ARM_MERGE_FEATURE_SETS (flags
, flags
, *object_arch
);
25986 /* We need to make sure that the attributes do not identify us as v6S-M
25987 when the only v6S-M feature in use is the Operating System Extensions. */
25988 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_os
))
25989 if (!ARM_CPU_HAS_FEATURE (flags
, arm_arch_v6m_only
))
25990 ARM_CLEAR_FEATURE (flags
, flags
, arm_ext_os
);
25994 for (p
= cpu_arch_ver
; p
->val
; p
++)
25996 if (ARM_CPU_HAS_FEATURE (tmp
, p
->flags
))
25999 ARM_CLEAR_FEATURE (tmp
, tmp
, p
->flags
);
26003 /* The table lookup above finds the last architecture to contribute
26004 a new feature. Unfortunately, Tag13 is a subset of the union of
26005 v6T2 and v7-M, so it is never seen as contributing a new feature.
26006 We can not search for the last entry which is entirely used,
26007 because if no CPU is specified we build up only those flags
26008 actually used. Perhaps we should separate out the specified
26009 and implicit cases. Avoid taking this path for -march=all by
26010 checking for contradictory v7-A / v7-M features. */
26011 if (arch
== TAG_CPU_ARCH_V7
26012 && !ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
)
26013 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7m
)
26014 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v6_dsp
))
26015 arch
= TAG_CPU_ARCH_V7E_M
;
26017 ARM_CLEAR_FEATURE (tmp
, flags
, arm_arch_v8m_base
);
26018 if (arch
== TAG_CPU_ARCH_V8M_BASE
&& ARM_CPU_HAS_FEATURE (tmp
, arm_arch_any
))
26019 arch
= TAG_CPU_ARCH_V8M_MAIN
;
26021 /* In cpu_arch_ver ARMv8-A is before ARMv8-M for atomics to be detected as
26022 coming from ARMv8-A. However, since ARMv8-A has more instructions than
26023 ARMv8-M, -march=all must be detected as ARMv8-A. */
26024 if (arch
== TAG_CPU_ARCH_V8M_MAIN
26025 && ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
26026 arch
= TAG_CPU_ARCH_V8
;
26028 /* Tag_CPU_name. */
26029 if (selected_cpu_name
[0])
26033 q
= selected_cpu_name
;
26034 if (strncmp (q
, "armv", 4) == 0)
26039 for (i
= 0; q
[i
]; i
++)
26040 q
[i
] = TOUPPER (q
[i
]);
26042 aeabi_set_attribute_string (Tag_CPU_name
, q
);
26045 /* Tag_CPU_arch. */
26046 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
26048 /* Tag_CPU_arch_profile. */
26049 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
)
26050 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
26051 || (ARM_CPU_HAS_FEATURE (flags
, arm_ext_atomics
)
26052 && !ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
)))
26054 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7r
))
26056 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_m
))
26061 if (profile
!= '\0')
26062 aeabi_set_attribute_int (Tag_CPU_arch_profile
, profile
);
26064 /* Tag_ARM_ISA_use. */
26065 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
26067 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
26069 /* Tag_THUMB_ISA_use. */
26070 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
26075 if (!ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
26076 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
))
26078 else if (ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
))
26082 aeabi_set_attribute_int (Tag_THUMB_ISA_use
, thumb_isa_use
);
26085 /* Tag_VFP_arch. */
26086 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_armv8xd
))
26087 aeabi_set_attribute_int (Tag_VFP_arch
,
26088 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
26090 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_fma
))
26091 aeabi_set_attribute_int (Tag_VFP_arch
,
26092 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
26094 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
26097 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
26099 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3xd
))
26101 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
26104 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
26105 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
26106 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
26107 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
26108 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
26110 /* Tag_ABI_HardFP_use. */
26111 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
)
26112 && !ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
))
26113 aeabi_set_attribute_int (Tag_ABI_HardFP_use
, 1);
26115 /* Tag_WMMX_arch. */
26116 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
26117 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
26118 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
26119 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
26121 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
26122 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_armv8
))
26123 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 3);
26124 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
26126 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_fma
))
26128 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 2);
26132 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 1);
26137 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
26138 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_fp16
) && fp16_optional
)
26139 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
26143 We set Tag_DIV_use to two when integer divide instructions have been used
26144 in ARM state, or when Thumb integer divide instructions have been used,
26145 but we have no architecture profile set, nor have we any ARM instructions.
26147 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
26148 by the base architecture.
26150 For new architectures we will have to check these tests. */
26151 gas_assert (arch
<= TAG_CPU_ARCH_V8
26152 || (arch
>= TAG_CPU_ARCH_V8M_BASE
26153 && arch
<= TAG_CPU_ARCH_V8M_MAIN
));
26154 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
26155 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
))
26156 aeabi_set_attribute_int (Tag_DIV_use
, 0);
26157 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_adiv
)
26158 || (profile
== '\0'
26159 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_div
)
26160 && !ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
)))
26161 aeabi_set_attribute_int (Tag_DIV_use
, 2);
26163 /* Tag_MP_extension_use. */
26164 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_mp
))
26165 aeabi_set_attribute_int (Tag_MPextension_use
, 1);
26167 /* Tag Virtualization_use. */
26168 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_sec
))
26170 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_virt
))
26173 aeabi_set_attribute_int (Tag_Virtualization_use
, virt_sec
);
26176 /* Add the default contents for the .ARM.attributes section. */
26180 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
26183 aeabi_set_public_attributes ();
26185 #endif /* OBJ_ELF */
26188 /* Parse a .cpu directive. */
26191 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
26193 const struct arm_cpu_option_table
*opt
;
26197 name
= input_line_pointer
;
26198 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26199 input_line_pointer
++;
26200 saved_char
= *input_line_pointer
;
26201 *input_line_pointer
= 0;
26203 /* Skip the first "all" entry. */
26204 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
26205 if (streq (opt
->name
, name
))
26207 mcpu_cpu_opt
= &opt
->value
;
26208 selected_cpu
= opt
->value
;
26209 if (opt
->canonical_name
)
26210 strcpy (selected_cpu_name
, opt
->canonical_name
);
26214 for (i
= 0; opt
->name
[i
]; i
++)
26215 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
26217 selected_cpu_name
[i
] = 0;
26219 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26220 *input_line_pointer
= saved_char
;
26221 demand_empty_rest_of_line ();
26224 as_bad (_("unknown cpu `%s'"), name
);
26225 *input_line_pointer
= saved_char
;
26226 ignore_rest_of_line ();
26230 /* Parse a .arch directive. */
26233 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
26235 const struct arm_arch_option_table
*opt
;
26239 name
= input_line_pointer
;
26240 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26241 input_line_pointer
++;
26242 saved_char
= *input_line_pointer
;
26243 *input_line_pointer
= 0;
26245 /* Skip the first "all" entry. */
26246 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
26247 if (streq (opt
->name
, name
))
26249 mcpu_cpu_opt
= &opt
->value
;
26250 selected_cpu
= opt
->value
;
26251 strcpy (selected_cpu_name
, opt
->name
);
26252 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26253 *input_line_pointer
= saved_char
;
26254 demand_empty_rest_of_line ();
26258 as_bad (_("unknown architecture `%s'\n"), name
);
26259 *input_line_pointer
= saved_char
;
26260 ignore_rest_of_line ();
26264 /* Parse a .object_arch directive. */
26267 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
26269 const struct arm_arch_option_table
*opt
;
26273 name
= input_line_pointer
;
26274 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26275 input_line_pointer
++;
26276 saved_char
= *input_line_pointer
;
26277 *input_line_pointer
= 0;
26279 /* Skip the first "all" entry. */
26280 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
26281 if (streq (opt
->name
, name
))
26283 object_arch
= &opt
->value
;
26284 *input_line_pointer
= saved_char
;
26285 demand_empty_rest_of_line ();
26289 as_bad (_("unknown architecture `%s'\n"), name
);
26290 *input_line_pointer
= saved_char
;
26291 ignore_rest_of_line ();
26294 /* Parse a .arch_extension directive. */
26297 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED
)
26299 const struct arm_option_extension_value_table
*opt
;
26302 int adding_value
= 1;
26304 name
= input_line_pointer
;
26305 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26306 input_line_pointer
++;
26307 saved_char
= *input_line_pointer
;
26308 *input_line_pointer
= 0;
26310 if (strlen (name
) >= 2
26311 && strncmp (name
, "no", 2) == 0)
26317 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
26318 if (streq (opt
->name
, name
))
26320 if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt
, opt
->allowed_archs
))
26322 as_bad (_("architectural extension `%s' is not allowed for the "
26323 "current base architecture"), name
);
26328 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_cpu
,
26331 ARM_CLEAR_FEATURE (selected_cpu
, selected_cpu
, opt
->clear_value
);
26333 mcpu_cpu_opt
= &selected_cpu
;
26334 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26335 *input_line_pointer
= saved_char
;
26336 demand_empty_rest_of_line ();
26340 if (opt
->name
== NULL
)
26341 as_bad (_("unknown architecture extension `%s'\n"), name
);
26343 *input_line_pointer
= saved_char
;
26344 ignore_rest_of_line ();
26347 /* Parse a .fpu directive. */
26350 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
26352 const struct arm_option_fpu_value_table
*opt
;
26356 name
= input_line_pointer
;
26357 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26358 input_line_pointer
++;
26359 saved_char
= *input_line_pointer
;
26360 *input_line_pointer
= 0;
26362 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
26363 if (streq (opt
->name
, name
))
26365 mfpu_opt
= &opt
->value
;
26366 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26367 *input_line_pointer
= saved_char
;
26368 demand_empty_rest_of_line ();
26372 as_bad (_("unknown floating point format `%s'\n"), name
);
26373 *input_line_pointer
= saved_char
;
26374 ignore_rest_of_line ();
26377 /* Copy symbol information. */
26380 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
26382 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
26386 /* Given a symbolic attribute NAME, return the proper integer value.
26387 Returns -1 if the attribute is not known. */
26390 arm_convert_symbolic_attribute (const char *name
)
26392 static const struct
26397 attribute_table
[] =
26399 /* When you modify this table you should
26400 also modify the list in doc/c-arm.texi. */
26401 #define T(tag) {#tag, tag}
26402 T (Tag_CPU_raw_name
),
26405 T (Tag_CPU_arch_profile
),
26406 T (Tag_ARM_ISA_use
),
26407 T (Tag_THUMB_ISA_use
),
26411 T (Tag_Advanced_SIMD_arch
),
26412 T (Tag_PCS_config
),
26413 T (Tag_ABI_PCS_R9_use
),
26414 T (Tag_ABI_PCS_RW_data
),
26415 T (Tag_ABI_PCS_RO_data
),
26416 T (Tag_ABI_PCS_GOT_use
),
26417 T (Tag_ABI_PCS_wchar_t
),
26418 T (Tag_ABI_FP_rounding
),
26419 T (Tag_ABI_FP_denormal
),
26420 T (Tag_ABI_FP_exceptions
),
26421 T (Tag_ABI_FP_user_exceptions
),
26422 T (Tag_ABI_FP_number_model
),
26423 T (Tag_ABI_align_needed
),
26424 T (Tag_ABI_align8_needed
),
26425 T (Tag_ABI_align_preserved
),
26426 T (Tag_ABI_align8_preserved
),
26427 T (Tag_ABI_enum_size
),
26428 T (Tag_ABI_HardFP_use
),
26429 T (Tag_ABI_VFP_args
),
26430 T (Tag_ABI_WMMX_args
),
26431 T (Tag_ABI_optimization_goals
),
26432 T (Tag_ABI_FP_optimization_goals
),
26433 T (Tag_compatibility
),
26434 T (Tag_CPU_unaligned_access
),
26435 T (Tag_FP_HP_extension
),
26436 T (Tag_VFP_HP_extension
),
26437 T (Tag_ABI_FP_16bit_format
),
26438 T (Tag_MPextension_use
),
26440 T (Tag_nodefaults
),
26441 T (Tag_also_compatible_with
),
26442 T (Tag_conformance
),
26444 T (Tag_Virtualization_use
),
26445 /* We deliberately do not include Tag_MPextension_use_legacy. */
26453 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
26454 if (streq (name
, attribute_table
[i
].name
))
26455 return attribute_table
[i
].tag
;
26461 /* Apply sym value for relocations only in the case that they are for
26462 local symbols in the same segment as the fixup and you have the
26463 respective architectural feature for blx and simple switches. */
26465 arm_apply_sym_value (struct fix
* fixP
, segT this_seg
)
26468 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
26469 /* PR 17444: If the local symbol is in a different section then a reloc
26470 will always be generated for it, so applying the symbol value now
26471 will result in a double offset being stored in the relocation. */
26472 && (S_GET_SEGMENT (fixP
->fx_addsy
) == this_seg
)
26473 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
))
26475 switch (fixP
->fx_r_type
)
26477 case BFD_RELOC_ARM_PCREL_BLX
:
26478 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
26479 if (ARM_IS_FUNC (fixP
->fx_addsy
))
26483 case BFD_RELOC_ARM_PCREL_CALL
:
26484 case BFD_RELOC_THUMB_PCREL_BLX
:
26485 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
26496 #endif /* OBJ_ELF */