A series of fixes to addres problems detected by compiling the assembler with address...
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2019 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 /* Whether --fdpic was given. */
79 static int arm_fdpic;
80
81 #endif /* OBJ_ELF */
82
83 /* Results from operand parsing worker functions. */
84
85 typedef enum
86 {
87 PARSE_OPERAND_SUCCESS,
88 PARSE_OPERAND_FAIL,
89 PARSE_OPERAND_FAIL_NO_BACKTRACK
90 } parse_operand_result;
91
92 enum arm_float_abi
93 {
94 ARM_FLOAT_ABI_HARD,
95 ARM_FLOAT_ABI_SOFTFP,
96 ARM_FLOAT_ABI_SOFT
97 };
98
99 /* Types of processor to assemble for. */
100 #ifndef CPU_DEFAULT
101 /* The code that was here used to select a default CPU depending on compiler
102 pre-defines which were only present when doing native builds, thus
103 changing gas' default behaviour depending upon the build host.
104
105 If you have a target that requires a default CPU option then the you
106 should define CPU_DEFAULT here. */
107 #endif
108
109 #ifndef FPU_DEFAULT
110 # ifdef TE_LINUX
111 # define FPU_DEFAULT FPU_ARCH_FPA
112 # elif defined (TE_NetBSD)
113 # ifdef OBJ_ELF
114 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
115 # else
116 /* Legacy a.out format. */
117 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
118 # endif
119 # elif defined (TE_VXWORKS)
120 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
121 # else
122 /* For backwards compatibility, default to FPA. */
123 # define FPU_DEFAULT FPU_ARCH_FPA
124 # endif
125 #endif /* ifndef FPU_DEFAULT */
126
127 #define streq(a, b) (strcmp (a, b) == 0)
128
129 /* Current set of feature bits available (CPU+FPU). Different from
130 selected_cpu + selected_fpu in case of autodetection since the CPU
131 feature bits are then all set. */
132 static arm_feature_set cpu_variant;
133 /* Feature bits used in each execution state. Used to set build attribute
134 (in particular Tag_*_ISA_use) in CPU autodetection mode. */
135 static arm_feature_set arm_arch_used;
136 static arm_feature_set thumb_arch_used;
137
138 /* Flags stored in private area of BFD structure. */
139 static int uses_apcs_26 = FALSE;
140 static int atpcs = FALSE;
141 static int support_interwork = FALSE;
142 static int uses_apcs_float = FALSE;
143 static int pic_code = FALSE;
144 static int fix_v4bx = FALSE;
145 /* Warn on using deprecated features. */
146 static int warn_on_deprecated = TRUE;
147
148 /* Understand CodeComposer Studio assembly syntax. */
149 bfd_boolean codecomposer_syntax = FALSE;
150
151 /* Variables that we set while parsing command-line options. Once all
152 options have been read we re-process these values to set the real
153 assembly flags. */
154
155 /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
156 instead of -mcpu=arm1). */
157 static const arm_feature_set *legacy_cpu = NULL;
158 static const arm_feature_set *legacy_fpu = NULL;
159
160 /* CPU, extension and FPU feature bits selected by -mcpu. */
161 static const arm_feature_set *mcpu_cpu_opt = NULL;
162 static arm_feature_set *mcpu_ext_opt = NULL;
163 static const arm_feature_set *mcpu_fpu_opt = NULL;
164
165 /* CPU, extension and FPU feature bits selected by -march. */
166 static const arm_feature_set *march_cpu_opt = NULL;
167 static arm_feature_set *march_ext_opt = NULL;
168 static const arm_feature_set *march_fpu_opt = NULL;
169
170 /* Feature bits selected by -mfpu. */
171 static const arm_feature_set *mfpu_opt = NULL;
172
173 /* Constants for known architecture features. */
174 static const arm_feature_set fpu_default = FPU_DEFAULT;
175 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
176 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
177 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
178 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
179 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
180 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
181 #ifdef OBJ_ELF
182 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
183 #endif
184 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
185
186 #ifdef CPU_DEFAULT
187 static const arm_feature_set cpu_default = CPU_DEFAULT;
188 #endif
189
190 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
191 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V2);
192 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
193 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
194 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
195 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
196 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
197 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
198 static const arm_feature_set arm_ext_v4t_5 =
199 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
200 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
201 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
202 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
203 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
204 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
205 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
206 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
207 /* Only for compatability of hint instructions. */
208 static const arm_feature_set arm_ext_v6k_v6t2 =
209 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K | ARM_EXT_V6T2);
210 static const arm_feature_set arm_ext_v6_notm =
211 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
212 static const arm_feature_set arm_ext_v6_dsp =
213 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
214 static const arm_feature_set arm_ext_barrier =
215 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
216 static const arm_feature_set arm_ext_msr =
217 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
218 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
219 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
220 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
221 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
222 #ifdef OBJ_ELF
223 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
224 #endif
225 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
226 static const arm_feature_set arm_ext_m =
227 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_V7M,
228 ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
229 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
230 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
231 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
232 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
233 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
234 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
235 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
236 static const arm_feature_set arm_ext_v8m_main =
237 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
238 static const arm_feature_set arm_ext_v8_1m_main =
239 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN);
240 /* Instructions in ARMv8-M only found in M profile architectures. */
241 static const arm_feature_set arm_ext_v8m_m_only =
242 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
243 static const arm_feature_set arm_ext_v6t2_v8m =
244 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
245 /* Instructions shared between ARMv8-A and ARMv8-M. */
246 static const arm_feature_set arm_ext_atomics =
247 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
248 #ifdef OBJ_ELF
249 /* DSP instructions Tag_DSP_extension refers to. */
250 static const arm_feature_set arm_ext_dsp =
251 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
252 #endif
253 static const arm_feature_set arm_ext_ras =
254 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
255 /* FP16 instructions. */
256 static const arm_feature_set arm_ext_fp16 =
257 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
258 static const arm_feature_set arm_ext_fp16_fml =
259 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML);
260 static const arm_feature_set arm_ext_v8_2 =
261 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A);
262 static const arm_feature_set arm_ext_v8_3 =
263 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A);
264 static const arm_feature_set arm_ext_sb =
265 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB);
266 static const arm_feature_set arm_ext_predres =
267 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES);
268
269 static const arm_feature_set arm_arch_any = ARM_ANY;
270 #ifdef OBJ_ELF
271 static const arm_feature_set fpu_any = FPU_ANY;
272 #endif
273 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
274 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
275 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
276
277 static const arm_feature_set arm_cext_iwmmxt2 =
278 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
279 static const arm_feature_set arm_cext_iwmmxt =
280 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
281 static const arm_feature_set arm_cext_xscale =
282 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
283 static const arm_feature_set arm_cext_maverick =
284 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
285 static const arm_feature_set fpu_fpa_ext_v1 =
286 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
287 static const arm_feature_set fpu_fpa_ext_v2 =
288 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
289 static const arm_feature_set fpu_vfp_ext_v1xd =
290 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
291 static const arm_feature_set fpu_vfp_ext_v1 =
292 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
293 static const arm_feature_set fpu_vfp_ext_v2 =
294 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
295 static const arm_feature_set fpu_vfp_ext_v3xd =
296 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
297 static const arm_feature_set fpu_vfp_ext_v3 =
298 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
299 static const arm_feature_set fpu_vfp_ext_d32 =
300 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
301 static const arm_feature_set fpu_neon_ext_v1 =
302 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
303 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
304 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
305 #ifdef OBJ_ELF
306 static const arm_feature_set fpu_vfp_fp16 =
307 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
308 static const arm_feature_set fpu_neon_ext_fma =
309 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
310 #endif
311 static const arm_feature_set fpu_vfp_ext_fma =
312 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
313 static const arm_feature_set fpu_vfp_ext_armv8 =
314 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
315 static const arm_feature_set fpu_vfp_ext_armv8xd =
316 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
317 static const arm_feature_set fpu_neon_ext_armv8 =
318 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
319 static const arm_feature_set fpu_crypto_ext_armv8 =
320 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
321 static const arm_feature_set crc_ext_armv8 =
322 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
323 static const arm_feature_set fpu_neon_ext_v8_1 =
324 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
325 static const arm_feature_set fpu_neon_ext_dotprod =
326 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD);
327
328 static int mfloat_abi_opt = -1;
329 /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
330 directive. */
331 static arm_feature_set selected_arch = ARM_ARCH_NONE;
332 /* Extension feature bits selected by the last -mcpu/-march or .arch_extension
333 directive. */
334 static arm_feature_set selected_ext = ARM_ARCH_NONE;
335 /* Feature bits selected by the last -mcpu/-march or by the combination of the
336 last .cpu/.arch directive .arch_extension directives since that
337 directive. */
338 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
339 /* FPU feature bits selected by the last -mfpu or .fpu directive. */
340 static arm_feature_set selected_fpu = FPU_NONE;
341 /* Feature bits selected by the last .object_arch directive. */
342 static arm_feature_set selected_object_arch = ARM_ARCH_NONE;
343 /* Must be long enough to hold any of the names in arm_cpus. */
344 static char selected_cpu_name[20];
345
346 extern FLONUM_TYPE generic_floating_point_number;
347
348 /* Return if no cpu was selected on command-line. */
349 static bfd_boolean
350 no_cpu_selected (void)
351 {
352 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
353 }
354
355 #ifdef OBJ_ELF
356 # ifdef EABI_DEFAULT
357 static int meabi_flags = EABI_DEFAULT;
358 # else
359 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
360 # endif
361
362 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
363
364 bfd_boolean
365 arm_is_eabi (void)
366 {
367 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
368 }
369 #endif
370
371 #ifdef OBJ_ELF
372 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
373 symbolS * GOT_symbol;
374 #endif
375
376 /* 0: assemble for ARM,
377 1: assemble for Thumb,
378 2: assemble for Thumb even though target CPU does not support thumb
379 instructions. */
380 static int thumb_mode = 0;
381 /* A value distinct from the possible values for thumb_mode that we
382 can use to record whether thumb_mode has been copied into the
383 tc_frag_data field of a frag. */
384 #define MODE_RECORDED (1 << 4)
385
386 /* Specifies the intrinsic IT insn behavior mode. */
387 enum implicit_it_mode
388 {
389 IMPLICIT_IT_MODE_NEVER = 0x00,
390 IMPLICIT_IT_MODE_ARM = 0x01,
391 IMPLICIT_IT_MODE_THUMB = 0x02,
392 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
393 };
394 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
395
396 /* If unified_syntax is true, we are processing the new unified
397 ARM/Thumb syntax. Important differences from the old ARM mode:
398
399 - Immediate operands do not require a # prefix.
400 - Conditional affixes always appear at the end of the
401 instruction. (For backward compatibility, those instructions
402 that formerly had them in the middle, continue to accept them
403 there.)
404 - The IT instruction may appear, and if it does is validated
405 against subsequent conditional affixes. It does not generate
406 machine code.
407
408 Important differences from the old Thumb mode:
409
410 - Immediate operands do not require a # prefix.
411 - Most of the V6T2 instructions are only available in unified mode.
412 - The .N and .W suffixes are recognized and honored (it is an error
413 if they cannot be honored).
414 - All instructions set the flags if and only if they have an 's' affix.
415 - Conditional affixes may be used. They are validated against
416 preceding IT instructions. Unlike ARM mode, you cannot use a
417 conditional affix except in the scope of an IT instruction. */
418
419 static bfd_boolean unified_syntax = FALSE;
420
421 /* An immediate operand can start with #, and ld*, st*, pld operands
422 can contain [ and ]. We need to tell APP not to elide whitespace
423 before a [, which can appear as the first operand for pld.
424 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
425 const char arm_symbol_chars[] = "#[]{}";
426
427 enum neon_el_type
428 {
429 NT_invtype,
430 NT_untyped,
431 NT_integer,
432 NT_float,
433 NT_poly,
434 NT_signed,
435 NT_unsigned
436 };
437
438 struct neon_type_el
439 {
440 enum neon_el_type type;
441 unsigned size;
442 };
443
444 #define NEON_MAX_TYPE_ELS 4
445
446 struct neon_type
447 {
448 struct neon_type_el el[NEON_MAX_TYPE_ELS];
449 unsigned elems;
450 };
451
452 enum it_instruction_type
453 {
454 OUTSIDE_IT_INSN,
455 INSIDE_IT_INSN,
456 INSIDE_IT_LAST_INSN,
457 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
458 if inside, should be the last one. */
459 NEUTRAL_IT_INSN, /* This could be either inside or outside,
460 i.e. BKPT and NOP. */
461 IT_INSN /* The IT insn has been parsed. */
462 };
463
464 /* The maximum number of operands we need. */
465 #define ARM_IT_MAX_OPERANDS 6
466 #define ARM_IT_MAX_RELOCS 3
467
468 struct arm_it
469 {
470 const char * error;
471 unsigned long instruction;
472 int size;
473 int size_req;
474 int cond;
475 /* "uncond_value" is set to the value in place of the conditional field in
476 unconditional versions of the instruction, or -1 if nothing is
477 appropriate. */
478 int uncond_value;
479 struct neon_type vectype;
480 /* This does not indicate an actual NEON instruction, only that
481 the mnemonic accepts neon-style type suffixes. */
482 int is_neon;
483 /* Set to the opcode if the instruction needs relaxation.
484 Zero if the instruction is not relaxed. */
485 unsigned long relax;
486 struct
487 {
488 bfd_reloc_code_real_type type;
489 expressionS exp;
490 int pc_rel;
491 } relocs[ARM_IT_MAX_RELOCS];
492
493 enum it_instruction_type it_insn_type;
494
495 struct
496 {
497 unsigned reg;
498 signed int imm;
499 struct neon_type_el vectype;
500 unsigned present : 1; /* Operand present. */
501 unsigned isreg : 1; /* Operand was a register. */
502 unsigned immisreg : 1; /* .imm field is a second register. */
503 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
504 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
505 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
506 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
507 instructions. This allows us to disambiguate ARM <-> vector insns. */
508 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
509 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
510 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
511 unsigned issingle : 1; /* Operand is VFP single-precision register. */
512 unsigned hasreloc : 1; /* Operand has relocation suffix. */
513 unsigned writeback : 1; /* Operand has trailing ! */
514 unsigned preind : 1; /* Preindexed address. */
515 unsigned postind : 1; /* Postindexed address. */
516 unsigned negative : 1; /* Index register was negated. */
517 unsigned shifted : 1; /* Shift applied to operation. */
518 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
519 } operands[ARM_IT_MAX_OPERANDS];
520 };
521
522 static struct arm_it inst;
523
524 #define NUM_FLOAT_VALS 8
525
526 const char * fp_const[] =
527 {
528 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
529 };
530
531 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
532
533 #define FAIL (-1)
534 #define SUCCESS (0)
535
536 #define SUFF_S 1
537 #define SUFF_D 2
538 #define SUFF_E 3
539 #define SUFF_P 4
540
541 #define CP_T_X 0x00008000
542 #define CP_T_Y 0x00400000
543
544 #define CONDS_BIT 0x00100000
545 #define LOAD_BIT 0x00100000
546
547 #define DOUBLE_LOAD_FLAG 0x00000001
548
549 struct asm_cond
550 {
551 const char * template_name;
552 unsigned long value;
553 };
554
555 #define COND_ALWAYS 0xE
556
557 struct asm_psr
558 {
559 const char * template_name;
560 unsigned long field;
561 };
562
563 struct asm_barrier_opt
564 {
565 const char * template_name;
566 unsigned long value;
567 const arm_feature_set arch;
568 };
569
570 /* The bit that distinguishes CPSR and SPSR. */
571 #define SPSR_BIT (1 << 22)
572
573 /* The individual PSR flag bits. */
574 #define PSR_c (1 << 16)
575 #define PSR_x (1 << 17)
576 #define PSR_s (1 << 18)
577 #define PSR_f (1 << 19)
578
579 struct reloc_entry
580 {
581 const char * name;
582 bfd_reloc_code_real_type reloc;
583 };
584
585 enum vfp_reg_pos
586 {
587 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
588 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
589 };
590
591 enum vfp_ldstm_type
592 {
593 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
594 };
595
596 /* Bits for DEFINED field in neon_typed_alias. */
597 #define NTA_HASTYPE 1
598 #define NTA_HASINDEX 2
599
600 struct neon_typed_alias
601 {
602 unsigned char defined;
603 unsigned char index;
604 struct neon_type_el eltype;
605 };
606
607 /* ARM register categories. This includes coprocessor numbers and various
608 architecture extensions' registers. Each entry should have an error message
609 in reg_expected_msgs below. */
610 enum arm_reg_type
611 {
612 REG_TYPE_RN,
613 REG_TYPE_CP,
614 REG_TYPE_CN,
615 REG_TYPE_FN,
616 REG_TYPE_VFS,
617 REG_TYPE_VFD,
618 REG_TYPE_NQ,
619 REG_TYPE_VFSD,
620 REG_TYPE_NDQ,
621 REG_TYPE_NSD,
622 REG_TYPE_NSDQ,
623 REG_TYPE_VFC,
624 REG_TYPE_MVF,
625 REG_TYPE_MVD,
626 REG_TYPE_MVFX,
627 REG_TYPE_MVDX,
628 REG_TYPE_MVAX,
629 REG_TYPE_DSPSC,
630 REG_TYPE_MMXWR,
631 REG_TYPE_MMXWC,
632 REG_TYPE_MMXWCG,
633 REG_TYPE_XSCALE,
634 REG_TYPE_RNB
635 };
636
637 /* Structure for a hash table entry for a register.
638 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
639 information which states whether a vector type or index is specified (for a
640 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
641 struct reg_entry
642 {
643 const char * name;
644 unsigned int number;
645 unsigned char type;
646 unsigned char builtin;
647 struct neon_typed_alias * neon;
648 };
649
650 /* Diagnostics used when we don't get a register of the expected type. */
651 const char * const reg_expected_msgs[] =
652 {
653 [REG_TYPE_RN] = N_("ARM register expected"),
654 [REG_TYPE_CP] = N_("bad or missing co-processor number"),
655 [REG_TYPE_CN] = N_("co-processor register expected"),
656 [REG_TYPE_FN] = N_("FPA register expected"),
657 [REG_TYPE_VFS] = N_("VFP single precision register expected"),
658 [REG_TYPE_VFD] = N_("VFP/Neon double precision register expected"),
659 [REG_TYPE_NQ] = N_("Neon quad precision register expected"),
660 [REG_TYPE_VFSD] = N_("VFP single or double precision register expected"),
661 [REG_TYPE_NDQ] = N_("Neon double or quad precision register expected"),
662 [REG_TYPE_NSD] = N_("Neon single or double precision register expected"),
663 [REG_TYPE_NSDQ] = N_("VFP single, double or Neon quad precision register"
664 " expected"),
665 [REG_TYPE_VFC] = N_("VFP system register expected"),
666 [REG_TYPE_MVF] = N_("Maverick MVF register expected"),
667 [REG_TYPE_MVD] = N_("Maverick MVD register expected"),
668 [REG_TYPE_MVFX] = N_("Maverick MVFX register expected"),
669 [REG_TYPE_MVDX] = N_("Maverick MVDX register expected"),
670 [REG_TYPE_MVAX] = N_("Maverick MVAX register expected"),
671 [REG_TYPE_DSPSC] = N_("Maverick DSPSC register expected"),
672 [REG_TYPE_MMXWR] = N_("iWMMXt data register expected"),
673 [REG_TYPE_MMXWC] = N_("iWMMXt control register expected"),
674 [REG_TYPE_MMXWCG] = N_("iWMMXt scalar register expected"),
675 [REG_TYPE_XSCALE] = N_("XScale accumulator register expected"),
676 [REG_TYPE_RNB] = N_("")
677 };
678
679 /* Some well known registers that we refer to directly elsewhere. */
680 #define REG_R12 12
681 #define REG_SP 13
682 #define REG_LR 14
683 #define REG_PC 15
684
685 /* ARM instructions take 4bytes in the object file, Thumb instructions
686 take 2: */
687 #define INSN_SIZE 4
688
689 struct asm_opcode
690 {
691 /* Basic string to match. */
692 const char * template_name;
693
694 /* Parameters to instruction. */
695 unsigned int operands[8];
696
697 /* Conditional tag - see opcode_lookup. */
698 unsigned int tag : 4;
699
700 /* Basic instruction code. */
701 unsigned int avalue : 28;
702
703 /* Thumb-format instruction code. */
704 unsigned int tvalue;
705
706 /* Which architecture variant provides this instruction. */
707 const arm_feature_set * avariant;
708 const arm_feature_set * tvariant;
709
710 /* Function to call to encode instruction in ARM format. */
711 void (* aencode) (void);
712
713 /* Function to call to encode instruction in Thumb format. */
714 void (* tencode) (void);
715 };
716
717 /* Defines for various bits that we will want to toggle. */
718 #define INST_IMMEDIATE 0x02000000
719 #define OFFSET_REG 0x02000000
720 #define HWOFFSET_IMM 0x00400000
721 #define SHIFT_BY_REG 0x00000010
722 #define PRE_INDEX 0x01000000
723 #define INDEX_UP 0x00800000
724 #define WRITE_BACK 0x00200000
725 #define LDM_TYPE_2_OR_3 0x00400000
726 #define CPSI_MMOD 0x00020000
727
728 #define LITERAL_MASK 0xf000f000
729 #define OPCODE_MASK 0xfe1fffff
730 #define V4_STR_BIT 0x00000020
731 #define VLDR_VMOV_SAME 0x0040f000
732
733 #define T2_SUBS_PC_LR 0xf3de8f00
734
735 #define DATA_OP_SHIFT 21
736 #define SBIT_SHIFT 20
737
738 #define T2_OPCODE_MASK 0xfe1fffff
739 #define T2_DATA_OP_SHIFT 21
740 #define T2_SBIT_SHIFT 20
741
742 #define A_COND_MASK 0xf0000000
743 #define A_PUSH_POP_OP_MASK 0x0fff0000
744
745 /* Opcodes for pushing/poping registers to/from the stack. */
746 #define A1_OPCODE_PUSH 0x092d0000
747 #define A2_OPCODE_PUSH 0x052d0004
748 #define A2_OPCODE_POP 0x049d0004
749
750 /* Codes to distinguish the arithmetic instructions. */
751 #define OPCODE_AND 0
752 #define OPCODE_EOR 1
753 #define OPCODE_SUB 2
754 #define OPCODE_RSB 3
755 #define OPCODE_ADD 4
756 #define OPCODE_ADC 5
757 #define OPCODE_SBC 6
758 #define OPCODE_RSC 7
759 #define OPCODE_TST 8
760 #define OPCODE_TEQ 9
761 #define OPCODE_CMP 10
762 #define OPCODE_CMN 11
763 #define OPCODE_ORR 12
764 #define OPCODE_MOV 13
765 #define OPCODE_BIC 14
766 #define OPCODE_MVN 15
767
768 #define T2_OPCODE_AND 0
769 #define T2_OPCODE_BIC 1
770 #define T2_OPCODE_ORR 2
771 #define T2_OPCODE_ORN 3
772 #define T2_OPCODE_EOR 4
773 #define T2_OPCODE_ADD 8
774 #define T2_OPCODE_ADC 10
775 #define T2_OPCODE_SBC 11
776 #define T2_OPCODE_SUB 13
777 #define T2_OPCODE_RSB 14
778
779 #define T_OPCODE_MUL 0x4340
780 #define T_OPCODE_TST 0x4200
781 #define T_OPCODE_CMN 0x42c0
782 #define T_OPCODE_NEG 0x4240
783 #define T_OPCODE_MVN 0x43c0
784
785 #define T_OPCODE_ADD_R3 0x1800
786 #define T_OPCODE_SUB_R3 0x1a00
787 #define T_OPCODE_ADD_HI 0x4400
788 #define T_OPCODE_ADD_ST 0xb000
789 #define T_OPCODE_SUB_ST 0xb080
790 #define T_OPCODE_ADD_SP 0xa800
791 #define T_OPCODE_ADD_PC 0xa000
792 #define T_OPCODE_ADD_I8 0x3000
793 #define T_OPCODE_SUB_I8 0x3800
794 #define T_OPCODE_ADD_I3 0x1c00
795 #define T_OPCODE_SUB_I3 0x1e00
796
797 #define T_OPCODE_ASR_R 0x4100
798 #define T_OPCODE_LSL_R 0x4080
799 #define T_OPCODE_LSR_R 0x40c0
800 #define T_OPCODE_ROR_R 0x41c0
801 #define T_OPCODE_ASR_I 0x1000
802 #define T_OPCODE_LSL_I 0x0000
803 #define T_OPCODE_LSR_I 0x0800
804
805 #define T_OPCODE_MOV_I8 0x2000
806 #define T_OPCODE_CMP_I8 0x2800
807 #define T_OPCODE_CMP_LR 0x4280
808 #define T_OPCODE_MOV_HR 0x4600
809 #define T_OPCODE_CMP_HR 0x4500
810
811 #define T_OPCODE_LDR_PC 0x4800
812 #define T_OPCODE_LDR_SP 0x9800
813 #define T_OPCODE_STR_SP 0x9000
814 #define T_OPCODE_LDR_IW 0x6800
815 #define T_OPCODE_STR_IW 0x6000
816 #define T_OPCODE_LDR_IH 0x8800
817 #define T_OPCODE_STR_IH 0x8000
818 #define T_OPCODE_LDR_IB 0x7800
819 #define T_OPCODE_STR_IB 0x7000
820 #define T_OPCODE_LDR_RW 0x5800
821 #define T_OPCODE_STR_RW 0x5000
822 #define T_OPCODE_LDR_RH 0x5a00
823 #define T_OPCODE_STR_RH 0x5200
824 #define T_OPCODE_LDR_RB 0x5c00
825 #define T_OPCODE_STR_RB 0x5400
826
827 #define T_OPCODE_PUSH 0xb400
828 #define T_OPCODE_POP 0xbc00
829
830 #define T_OPCODE_BRANCH 0xe000
831
832 #define THUMB_SIZE 2 /* Size of thumb instruction. */
833 #define THUMB_PP_PC_LR 0x0100
834 #define THUMB_LOAD_BIT 0x0800
835 #define THUMB2_LOAD_BIT 0x00100000
836
837 #define BAD_ARGS _("bad arguments to instruction")
838 #define BAD_SP _("r13 not allowed here")
839 #define BAD_PC _("r15 not allowed here")
840 #define BAD_COND _("instruction cannot be conditional")
841 #define BAD_OVERLAP _("registers may not be the same")
842 #define BAD_HIREG _("lo register required")
843 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
844 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
845 #define BAD_BRANCH _("branch must be last instruction in IT block")
846 #define BAD_BRANCH_OFF _("branch out of range or not a multiple of 2")
847 #define BAD_NOT_IT _("instruction not allowed in IT block")
848 #define BAD_FPU _("selected FPU does not support instruction")
849 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
850 #define BAD_IT_COND _("incorrect condition in IT block")
851 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
852 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
853 #define BAD_PC_ADDRESSING \
854 _("cannot use register index with PC-relative addressing")
855 #define BAD_PC_WRITEBACK \
856 _("cannot use writeback with PC-relative addressing")
857 #define BAD_RANGE _("branch out of range")
858 #define BAD_FP16 _("selected processor does not support fp16 instruction")
859 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
860 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
861
862 static struct hash_control * arm_ops_hsh;
863 static struct hash_control * arm_cond_hsh;
864 static struct hash_control * arm_shift_hsh;
865 static struct hash_control * arm_psr_hsh;
866 static struct hash_control * arm_v7m_psr_hsh;
867 static struct hash_control * arm_reg_hsh;
868 static struct hash_control * arm_reloc_hsh;
869 static struct hash_control * arm_barrier_opt_hsh;
870
871 /* Stuff needed to resolve the label ambiguity
872 As:
873 ...
874 label: <insn>
875 may differ from:
876 ...
877 label:
878 <insn> */
879
880 symbolS * last_label_seen;
881 static int label_is_thumb_function_name = FALSE;
882
883 /* Literal pool structure. Held on a per-section
884 and per-sub-section basis. */
885
886 #define MAX_LITERAL_POOL_SIZE 1024
887 typedef struct literal_pool
888 {
889 expressionS literals [MAX_LITERAL_POOL_SIZE];
890 unsigned int next_free_entry;
891 unsigned int id;
892 symbolS * symbol;
893 segT section;
894 subsegT sub_section;
895 #ifdef OBJ_ELF
896 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
897 #endif
898 struct literal_pool * next;
899 unsigned int alignment;
900 } literal_pool;
901
902 /* Pointer to a linked list of literal pools. */
903 literal_pool * list_of_pools = NULL;
904
905 typedef enum asmfunc_states
906 {
907 OUTSIDE_ASMFUNC,
908 WAITING_ASMFUNC_NAME,
909 WAITING_ENDASMFUNC
910 } asmfunc_states;
911
912 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
913
914 #ifdef OBJ_ELF
915 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
916 #else
917 static struct current_it now_it;
918 #endif
919
920 static inline int
921 now_it_compatible (int cond)
922 {
923 return (cond & ~1) == (now_it.cc & ~1);
924 }
925
926 static inline int
927 conditional_insn (void)
928 {
929 return inst.cond != COND_ALWAYS;
930 }
931
932 static int in_it_block (void);
933
934 static int handle_it_state (void);
935
936 static void force_automatic_it_block_close (void);
937
938 static void it_fsm_post_encode (void);
939
940 #define set_it_insn_type(type) \
941 do \
942 { \
943 inst.it_insn_type = type; \
944 if (handle_it_state () == FAIL) \
945 return; \
946 } \
947 while (0)
948
949 #define set_it_insn_type_nonvoid(type, failret) \
950 do \
951 { \
952 inst.it_insn_type = type; \
953 if (handle_it_state () == FAIL) \
954 return failret; \
955 } \
956 while(0)
957
958 #define set_it_insn_type_last() \
959 do \
960 { \
961 if (inst.cond == COND_ALWAYS) \
962 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
963 else \
964 set_it_insn_type (INSIDE_IT_LAST_INSN); \
965 } \
966 while (0)
967
968 /* Pure syntax. */
969
970 /* This array holds the chars that always start a comment. If the
971 pre-processor is disabled, these aren't very useful. */
972 char arm_comment_chars[] = "@";
973
974 /* This array holds the chars that only start a comment at the beginning of
975 a line. If the line seems to have the form '# 123 filename'
976 .line and .file directives will appear in the pre-processed output. */
977 /* Note that input_file.c hand checks for '#' at the beginning of the
978 first line of the input file. This is because the compiler outputs
979 #NO_APP at the beginning of its output. */
980 /* Also note that comments like this one will always work. */
981 const char line_comment_chars[] = "#";
982
983 char arm_line_separator_chars[] = ";";
984
985 /* Chars that can be used to separate mant
986 from exp in floating point numbers. */
987 const char EXP_CHARS[] = "eE";
988
989 /* Chars that mean this number is a floating point constant. */
990 /* As in 0f12.456 */
991 /* or 0d1.2345e12 */
992
993 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
994
995 /* Prefix characters that indicate the start of an immediate
996 value. */
997 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
998
999 /* Separator character handling. */
1000
1001 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
1002
1003 static inline int
1004 skip_past_char (char ** str, char c)
1005 {
1006 /* PR gas/14987: Allow for whitespace before the expected character. */
1007 skip_whitespace (*str);
1008
1009 if (**str == c)
1010 {
1011 (*str)++;
1012 return SUCCESS;
1013 }
1014 else
1015 return FAIL;
1016 }
1017
1018 #define skip_past_comma(str) skip_past_char (str, ',')
1019
1020 /* Arithmetic expressions (possibly involving symbols). */
1021
1022 /* Return TRUE if anything in the expression is a bignum. */
1023
1024 static bfd_boolean
1025 walk_no_bignums (symbolS * sp)
1026 {
1027 if (symbol_get_value_expression (sp)->X_op == O_big)
1028 return TRUE;
1029
1030 if (symbol_get_value_expression (sp)->X_add_symbol)
1031 {
1032 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
1033 || (symbol_get_value_expression (sp)->X_op_symbol
1034 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
1035 }
1036
1037 return FALSE;
1038 }
1039
1040 static bfd_boolean in_my_get_expression = FALSE;
1041
1042 /* Third argument to my_get_expression. */
1043 #define GE_NO_PREFIX 0
1044 #define GE_IMM_PREFIX 1
1045 #define GE_OPT_PREFIX 2
1046 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1047 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1048 #define GE_OPT_PREFIX_BIG 3
1049
1050 static int
1051 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1052 {
1053 char * save_in;
1054
1055 /* In unified syntax, all prefixes are optional. */
1056 if (unified_syntax)
1057 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1058 : GE_OPT_PREFIX;
1059
1060 switch (prefix_mode)
1061 {
1062 case GE_NO_PREFIX: break;
1063 case GE_IMM_PREFIX:
1064 if (!is_immediate_prefix (**str))
1065 {
1066 inst.error = _("immediate expression requires a # prefix");
1067 return FAIL;
1068 }
1069 (*str)++;
1070 break;
1071 case GE_OPT_PREFIX:
1072 case GE_OPT_PREFIX_BIG:
1073 if (is_immediate_prefix (**str))
1074 (*str)++;
1075 break;
1076 default:
1077 abort ();
1078 }
1079
1080 memset (ep, 0, sizeof (expressionS));
1081
1082 save_in = input_line_pointer;
1083 input_line_pointer = *str;
1084 in_my_get_expression = TRUE;
1085 expression (ep);
1086 in_my_get_expression = FALSE;
1087
1088 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1089 {
1090 /* We found a bad or missing expression in md_operand(). */
1091 *str = input_line_pointer;
1092 input_line_pointer = save_in;
1093 if (inst.error == NULL)
1094 inst.error = (ep->X_op == O_absent
1095 ? _("missing expression") :_("bad expression"));
1096 return 1;
1097 }
1098
1099 /* Get rid of any bignums now, so that we don't generate an error for which
1100 we can't establish a line number later on. Big numbers are never valid
1101 in instructions, which is where this routine is always called. */
1102 if (prefix_mode != GE_OPT_PREFIX_BIG
1103 && (ep->X_op == O_big
1104 || (ep->X_add_symbol
1105 && (walk_no_bignums (ep->X_add_symbol)
1106 || (ep->X_op_symbol
1107 && walk_no_bignums (ep->X_op_symbol))))))
1108 {
1109 inst.error = _("invalid constant");
1110 *str = input_line_pointer;
1111 input_line_pointer = save_in;
1112 return 1;
1113 }
1114
1115 *str = input_line_pointer;
1116 input_line_pointer = save_in;
1117 return SUCCESS;
1118 }
1119
1120 /* Turn a string in input_line_pointer into a floating point constant
1121 of type TYPE, and store the appropriate bytes in *LITP. The number
1122 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1123 returned, or NULL on OK.
1124
1125 Note that fp constants aren't represent in the normal way on the ARM.
1126 In big endian mode, things are as expected. However, in little endian
1127 mode fp constants are big-endian word-wise, and little-endian byte-wise
1128 within the words. For example, (double) 1.1 in big endian mode is
1129 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1130 the byte sequence 99 99 f1 3f 9a 99 99 99.
1131
1132 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1133
1134 const char *
1135 md_atof (int type, char * litP, int * sizeP)
1136 {
1137 int prec;
1138 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1139 char *t;
1140 int i;
1141
1142 switch (type)
1143 {
1144 case 'f':
1145 case 'F':
1146 case 's':
1147 case 'S':
1148 prec = 2;
1149 break;
1150
1151 case 'd':
1152 case 'D':
1153 case 'r':
1154 case 'R':
1155 prec = 4;
1156 break;
1157
1158 case 'x':
1159 case 'X':
1160 prec = 5;
1161 break;
1162
1163 case 'p':
1164 case 'P':
1165 prec = 5;
1166 break;
1167
1168 default:
1169 *sizeP = 0;
1170 return _("Unrecognized or unsupported floating point constant");
1171 }
1172
1173 t = atof_ieee (input_line_pointer, type, words);
1174 if (t)
1175 input_line_pointer = t;
1176 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1177
1178 if (target_big_endian)
1179 {
1180 for (i = 0; i < prec; i++)
1181 {
1182 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1183 litP += sizeof (LITTLENUM_TYPE);
1184 }
1185 }
1186 else
1187 {
1188 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1189 for (i = prec - 1; i >= 0; i--)
1190 {
1191 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1192 litP += sizeof (LITTLENUM_TYPE);
1193 }
1194 else
1195 /* For a 4 byte float the order of elements in `words' is 1 0.
1196 For an 8 byte float the order is 1 0 3 2. */
1197 for (i = 0; i < prec; i += 2)
1198 {
1199 md_number_to_chars (litP, (valueT) words[i + 1],
1200 sizeof (LITTLENUM_TYPE));
1201 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1202 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1203 litP += 2 * sizeof (LITTLENUM_TYPE);
1204 }
1205 }
1206
1207 return NULL;
1208 }
1209
1210 /* We handle all bad expressions here, so that we can report the faulty
1211 instruction in the error message. */
1212
1213 void
1214 md_operand (expressionS * exp)
1215 {
1216 if (in_my_get_expression)
1217 exp->X_op = O_illegal;
1218 }
1219
1220 /* Immediate values. */
1221
1222 #ifdef OBJ_ELF
1223 /* Generic immediate-value read function for use in directives.
1224 Accepts anything that 'expression' can fold to a constant.
1225 *val receives the number. */
1226
1227 static int
1228 immediate_for_directive (int *val)
1229 {
1230 expressionS exp;
1231 exp.X_op = O_illegal;
1232
1233 if (is_immediate_prefix (*input_line_pointer))
1234 {
1235 input_line_pointer++;
1236 expression (&exp);
1237 }
1238
1239 if (exp.X_op != O_constant)
1240 {
1241 as_bad (_("expected #constant"));
1242 ignore_rest_of_line ();
1243 return FAIL;
1244 }
1245 *val = exp.X_add_number;
1246 return SUCCESS;
1247 }
1248 #endif
1249
1250 /* Register parsing. */
1251
1252 /* Generic register parser. CCP points to what should be the
1253 beginning of a register name. If it is indeed a valid register
1254 name, advance CCP over it and return the reg_entry structure;
1255 otherwise return NULL. Does not issue diagnostics. */
1256
1257 static struct reg_entry *
1258 arm_reg_parse_multi (char **ccp)
1259 {
1260 char *start = *ccp;
1261 char *p;
1262 struct reg_entry *reg;
1263
1264 skip_whitespace (start);
1265
1266 #ifdef REGISTER_PREFIX
1267 if (*start != REGISTER_PREFIX)
1268 return NULL;
1269 start++;
1270 #endif
1271 #ifdef OPTIONAL_REGISTER_PREFIX
1272 if (*start == OPTIONAL_REGISTER_PREFIX)
1273 start++;
1274 #endif
1275
1276 p = start;
1277 if (!ISALPHA (*p) || !is_name_beginner (*p))
1278 return NULL;
1279
1280 do
1281 p++;
1282 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1283
1284 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1285
1286 if (!reg)
1287 return NULL;
1288
1289 *ccp = p;
1290 return reg;
1291 }
1292
1293 static int
1294 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1295 enum arm_reg_type type)
1296 {
1297 /* Alternative syntaxes are accepted for a few register classes. */
1298 switch (type)
1299 {
1300 case REG_TYPE_MVF:
1301 case REG_TYPE_MVD:
1302 case REG_TYPE_MVFX:
1303 case REG_TYPE_MVDX:
1304 /* Generic coprocessor register names are allowed for these. */
1305 if (reg && reg->type == REG_TYPE_CN)
1306 return reg->number;
1307 break;
1308
1309 case REG_TYPE_CP:
1310 /* For backward compatibility, a bare number is valid here. */
1311 {
1312 unsigned long processor = strtoul (start, ccp, 10);
1313 if (*ccp != start && processor <= 15)
1314 return processor;
1315 }
1316 /* Fall through. */
1317
1318 case REG_TYPE_MMXWC:
1319 /* WC includes WCG. ??? I'm not sure this is true for all
1320 instructions that take WC registers. */
1321 if (reg && reg->type == REG_TYPE_MMXWCG)
1322 return reg->number;
1323 break;
1324
1325 default:
1326 break;
1327 }
1328
1329 return FAIL;
1330 }
1331
1332 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1333 return value is the register number or FAIL. */
1334
1335 static int
1336 arm_reg_parse (char **ccp, enum arm_reg_type type)
1337 {
1338 char *start = *ccp;
1339 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1340 int ret;
1341
1342 /* Do not allow a scalar (reg+index) to parse as a register. */
1343 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1344 return FAIL;
1345
1346 if (reg && reg->type == type)
1347 return reg->number;
1348
1349 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1350 return ret;
1351
1352 *ccp = start;
1353 return FAIL;
1354 }
1355
1356 /* Parse a Neon type specifier. *STR should point at the leading '.'
1357 character. Does no verification at this stage that the type fits the opcode
1358 properly. E.g.,
1359
1360 .i32.i32.s16
1361 .s32.f32
1362 .u16
1363
1364 Can all be legally parsed by this function.
1365
1366 Fills in neon_type struct pointer with parsed information, and updates STR
1367 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1368 type, FAIL if not. */
1369
1370 static int
1371 parse_neon_type (struct neon_type *type, char **str)
1372 {
1373 char *ptr = *str;
1374
1375 if (type)
1376 type->elems = 0;
1377
1378 while (type->elems < NEON_MAX_TYPE_ELS)
1379 {
1380 enum neon_el_type thistype = NT_untyped;
1381 unsigned thissize = -1u;
1382
1383 if (*ptr != '.')
1384 break;
1385
1386 ptr++;
1387
1388 /* Just a size without an explicit type. */
1389 if (ISDIGIT (*ptr))
1390 goto parsesize;
1391
1392 switch (TOLOWER (*ptr))
1393 {
1394 case 'i': thistype = NT_integer; break;
1395 case 'f': thistype = NT_float; break;
1396 case 'p': thistype = NT_poly; break;
1397 case 's': thistype = NT_signed; break;
1398 case 'u': thistype = NT_unsigned; break;
1399 case 'd':
1400 thistype = NT_float;
1401 thissize = 64;
1402 ptr++;
1403 goto done;
1404 default:
1405 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1406 return FAIL;
1407 }
1408
1409 ptr++;
1410
1411 /* .f is an abbreviation for .f32. */
1412 if (thistype == NT_float && !ISDIGIT (*ptr))
1413 thissize = 32;
1414 else
1415 {
1416 parsesize:
1417 thissize = strtoul (ptr, &ptr, 10);
1418
1419 if (thissize != 8 && thissize != 16 && thissize != 32
1420 && thissize != 64)
1421 {
1422 as_bad (_("bad size %d in type specifier"), thissize);
1423 return FAIL;
1424 }
1425 }
1426
1427 done:
1428 if (type)
1429 {
1430 type->el[type->elems].type = thistype;
1431 type->el[type->elems].size = thissize;
1432 type->elems++;
1433 }
1434 }
1435
1436 /* Empty/missing type is not a successful parse. */
1437 if (type->elems == 0)
1438 return FAIL;
1439
1440 *str = ptr;
1441
1442 return SUCCESS;
1443 }
1444
1445 /* Errors may be set multiple times during parsing or bit encoding
1446 (particularly in the Neon bits), but usually the earliest error which is set
1447 will be the most meaningful. Avoid overwriting it with later (cascading)
1448 errors by calling this function. */
1449
1450 static void
1451 first_error (const char *err)
1452 {
1453 if (!inst.error)
1454 inst.error = err;
1455 }
1456
1457 /* Parse a single type, e.g. ".s32", leading period included. */
1458 static int
1459 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1460 {
1461 char *str = *ccp;
1462 struct neon_type optype;
1463
1464 if (*str == '.')
1465 {
1466 if (parse_neon_type (&optype, &str) == SUCCESS)
1467 {
1468 if (optype.elems == 1)
1469 *vectype = optype.el[0];
1470 else
1471 {
1472 first_error (_("only one type should be specified for operand"));
1473 return FAIL;
1474 }
1475 }
1476 else
1477 {
1478 first_error (_("vector type expected"));
1479 return FAIL;
1480 }
1481 }
1482 else
1483 return FAIL;
1484
1485 *ccp = str;
1486
1487 return SUCCESS;
1488 }
1489
1490 /* Special meanings for indices (which have a range of 0-7), which will fit into
1491 a 4-bit integer. */
1492
1493 #define NEON_ALL_LANES 15
1494 #define NEON_INTERLEAVE_LANES 14
1495
1496 /* Parse either a register or a scalar, with an optional type. Return the
1497 register number, and optionally fill in the actual type of the register
1498 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1499 type/index information in *TYPEINFO. */
1500
1501 static int
1502 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1503 enum arm_reg_type *rtype,
1504 struct neon_typed_alias *typeinfo)
1505 {
1506 char *str = *ccp;
1507 struct reg_entry *reg = arm_reg_parse_multi (&str);
1508 struct neon_typed_alias atype;
1509 struct neon_type_el parsetype;
1510
1511 atype.defined = 0;
1512 atype.index = -1;
1513 atype.eltype.type = NT_invtype;
1514 atype.eltype.size = -1;
1515
1516 /* Try alternate syntax for some types of register. Note these are mutually
1517 exclusive with the Neon syntax extensions. */
1518 if (reg == NULL)
1519 {
1520 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1521 if (altreg != FAIL)
1522 *ccp = str;
1523 if (typeinfo)
1524 *typeinfo = atype;
1525 return altreg;
1526 }
1527
1528 /* Undo polymorphism when a set of register types may be accepted. */
1529 if ((type == REG_TYPE_NDQ
1530 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1531 || (type == REG_TYPE_VFSD
1532 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1533 || (type == REG_TYPE_NSDQ
1534 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1535 || reg->type == REG_TYPE_NQ))
1536 || (type == REG_TYPE_NSD
1537 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1538 || (type == REG_TYPE_MMXWC
1539 && (reg->type == REG_TYPE_MMXWCG)))
1540 type = (enum arm_reg_type) reg->type;
1541
1542 if (type != reg->type)
1543 return FAIL;
1544
1545 if (reg->neon)
1546 atype = *reg->neon;
1547
1548 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1549 {
1550 if ((atype.defined & NTA_HASTYPE) != 0)
1551 {
1552 first_error (_("can't redefine type for operand"));
1553 return FAIL;
1554 }
1555 atype.defined |= NTA_HASTYPE;
1556 atype.eltype = parsetype;
1557 }
1558
1559 if (skip_past_char (&str, '[') == SUCCESS)
1560 {
1561 if (type != REG_TYPE_VFD
1562 && !(type == REG_TYPE_VFS
1563 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_2)))
1564 {
1565 first_error (_("only D registers may be indexed"));
1566 return FAIL;
1567 }
1568
1569 if ((atype.defined & NTA_HASINDEX) != 0)
1570 {
1571 first_error (_("can't change index for operand"));
1572 return FAIL;
1573 }
1574
1575 atype.defined |= NTA_HASINDEX;
1576
1577 if (skip_past_char (&str, ']') == SUCCESS)
1578 atype.index = NEON_ALL_LANES;
1579 else
1580 {
1581 expressionS exp;
1582
1583 my_get_expression (&exp, &str, GE_NO_PREFIX);
1584
1585 if (exp.X_op != O_constant)
1586 {
1587 first_error (_("constant expression required"));
1588 return FAIL;
1589 }
1590
1591 if (skip_past_char (&str, ']') == FAIL)
1592 return FAIL;
1593
1594 atype.index = exp.X_add_number;
1595 }
1596 }
1597
1598 if (typeinfo)
1599 *typeinfo = atype;
1600
1601 if (rtype)
1602 *rtype = type;
1603
1604 *ccp = str;
1605
1606 return reg->number;
1607 }
1608
1609 /* Like arm_reg_parse, but also allow the following extra features:
1610 - If RTYPE is non-zero, return the (possibly restricted) type of the
1611 register (e.g. Neon double or quad reg when either has been requested).
1612 - If this is a Neon vector type with additional type information, fill
1613 in the struct pointed to by VECTYPE (if non-NULL).
1614 This function will fault on encountering a scalar. */
1615
1616 static int
1617 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1618 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1619 {
1620 struct neon_typed_alias atype;
1621 char *str = *ccp;
1622 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1623
1624 if (reg == FAIL)
1625 return FAIL;
1626
1627 /* Do not allow regname(... to parse as a register. */
1628 if (*str == '(')
1629 return FAIL;
1630
1631 /* Do not allow a scalar (reg+index) to parse as a register. */
1632 if ((atype.defined & NTA_HASINDEX) != 0)
1633 {
1634 first_error (_("register operand expected, but got scalar"));
1635 return FAIL;
1636 }
1637
1638 if (vectype)
1639 *vectype = atype.eltype;
1640
1641 *ccp = str;
1642
1643 return reg;
1644 }
1645
1646 #define NEON_SCALAR_REG(X) ((X) >> 4)
1647 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1648
1649 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1650 have enough information to be able to do a good job bounds-checking. So, we
1651 just do easy checks here, and do further checks later. */
1652
1653 static int
1654 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1655 {
1656 int reg;
1657 char *str = *ccp;
1658 struct neon_typed_alias atype;
1659 enum arm_reg_type reg_type = REG_TYPE_VFD;
1660
1661 if (elsize == 4)
1662 reg_type = REG_TYPE_VFS;
1663
1664 reg = parse_typed_reg_or_scalar (&str, reg_type, NULL, &atype);
1665
1666 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1667 return FAIL;
1668
1669 if (atype.index == NEON_ALL_LANES)
1670 {
1671 first_error (_("scalar must have an index"));
1672 return FAIL;
1673 }
1674 else if (atype.index >= 64 / elsize)
1675 {
1676 first_error (_("scalar index out of range"));
1677 return FAIL;
1678 }
1679
1680 if (type)
1681 *type = atype.eltype;
1682
1683 *ccp = str;
1684
1685 return reg * 16 + atype.index;
1686 }
1687
1688 /* Types of registers in a list. */
1689
1690 enum reg_list_els
1691 {
1692 REGLIST_RN,
1693 REGLIST_CLRM,
1694 REGLIST_VFP_S,
1695 REGLIST_VFP_S_VPR,
1696 REGLIST_VFP_D,
1697 REGLIST_VFP_D_VPR,
1698 REGLIST_NEON_D
1699 };
1700
1701 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1702
1703 static long
1704 parse_reg_list (char ** strp, enum reg_list_els etype)
1705 {
1706 char *str = *strp;
1707 long range = 0;
1708 int another_range;
1709
1710 gas_assert (etype == REGLIST_RN || etype == REGLIST_CLRM);
1711
1712 /* We come back here if we get ranges concatenated by '+' or '|'. */
1713 do
1714 {
1715 skip_whitespace (str);
1716
1717 another_range = 0;
1718
1719 if (*str == '{')
1720 {
1721 int in_range = 0;
1722 int cur_reg = -1;
1723
1724 str++;
1725 do
1726 {
1727 int reg;
1728 const char apsr_str[] = "apsr";
1729 int apsr_str_len = strlen (apsr_str);
1730
1731 reg = arm_reg_parse (&str, REGLIST_RN);
1732 if (etype == REGLIST_CLRM)
1733 {
1734 if (reg == REG_SP || reg == REG_PC)
1735 reg = FAIL;
1736 else if (reg == FAIL
1737 && !strncasecmp (str, apsr_str, apsr_str_len)
1738 && !ISALPHA (*(str + apsr_str_len)))
1739 {
1740 reg = 15;
1741 str += apsr_str_len;
1742 }
1743
1744 if (reg == FAIL)
1745 {
1746 first_error (_("r0-r12, lr or APSR expected"));
1747 return FAIL;
1748 }
1749 }
1750 else /* etype == REGLIST_RN. */
1751 {
1752 if (reg == FAIL)
1753 {
1754 first_error (_(reg_expected_msgs[REGLIST_RN]));
1755 return FAIL;
1756 }
1757 }
1758
1759 if (in_range)
1760 {
1761 int i;
1762
1763 if (reg <= cur_reg)
1764 {
1765 first_error (_("bad range in register list"));
1766 return FAIL;
1767 }
1768
1769 for (i = cur_reg + 1; i < reg; i++)
1770 {
1771 if (range & (1 << i))
1772 as_tsktsk
1773 (_("Warning: duplicated register (r%d) in register list"),
1774 i);
1775 else
1776 range |= 1 << i;
1777 }
1778 in_range = 0;
1779 }
1780
1781 if (range & (1 << reg))
1782 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1783 reg);
1784 else if (reg <= cur_reg)
1785 as_tsktsk (_("Warning: register range not in ascending order"));
1786
1787 range |= 1 << reg;
1788 cur_reg = reg;
1789 }
1790 while (skip_past_comma (&str) != FAIL
1791 || (in_range = 1, *str++ == '-'));
1792 str--;
1793
1794 if (skip_past_char (&str, '}') == FAIL)
1795 {
1796 first_error (_("missing `}'"));
1797 return FAIL;
1798 }
1799 }
1800 else if (etype == REGLIST_RN)
1801 {
1802 expressionS exp;
1803
1804 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1805 return FAIL;
1806
1807 if (exp.X_op == O_constant)
1808 {
1809 if (exp.X_add_number
1810 != (exp.X_add_number & 0x0000ffff))
1811 {
1812 inst.error = _("invalid register mask");
1813 return FAIL;
1814 }
1815
1816 if ((range & exp.X_add_number) != 0)
1817 {
1818 int regno = range & exp.X_add_number;
1819
1820 regno &= -regno;
1821 regno = (1 << regno) - 1;
1822 as_tsktsk
1823 (_("Warning: duplicated register (r%d) in register list"),
1824 regno);
1825 }
1826
1827 range |= exp.X_add_number;
1828 }
1829 else
1830 {
1831 if (inst.relocs[0].type != 0)
1832 {
1833 inst.error = _("expression too complex");
1834 return FAIL;
1835 }
1836
1837 memcpy (&inst.relocs[0].exp, &exp, sizeof (expressionS));
1838 inst.relocs[0].type = BFD_RELOC_ARM_MULTI;
1839 inst.relocs[0].pc_rel = 0;
1840 }
1841 }
1842
1843 if (*str == '|' || *str == '+')
1844 {
1845 str++;
1846 another_range = 1;
1847 }
1848 }
1849 while (another_range);
1850
1851 *strp = str;
1852 return range;
1853 }
1854
1855 /* Parse a VFP register list. If the string is invalid return FAIL.
1856 Otherwise return the number of registers, and set PBASE to the first
1857 register. Parses registers of type ETYPE.
1858 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1859 - Q registers can be used to specify pairs of D registers
1860 - { } can be omitted from around a singleton register list
1861 FIXME: This is not implemented, as it would require backtracking in
1862 some cases, e.g.:
1863 vtbl.8 d3,d4,d5
1864 This could be done (the meaning isn't really ambiguous), but doesn't
1865 fit in well with the current parsing framework.
1866 - 32 D registers may be used (also true for VFPv3).
1867 FIXME: Types are ignored in these register lists, which is probably a
1868 bug. */
1869
1870 static int
1871 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype,
1872 bfd_boolean *partial_match)
1873 {
1874 char *str = *ccp;
1875 int base_reg;
1876 int new_base;
1877 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1878 int max_regs = 0;
1879 int count = 0;
1880 int warned = 0;
1881 unsigned long mask = 0;
1882 int i;
1883 bfd_boolean vpr_seen = FALSE;
1884 bfd_boolean expect_vpr =
1885 (etype == REGLIST_VFP_S_VPR) || (etype == REGLIST_VFP_D_VPR);
1886
1887 if (skip_past_char (&str, '{') == FAIL)
1888 {
1889 inst.error = _("expecting {");
1890 return FAIL;
1891 }
1892
1893 switch (etype)
1894 {
1895 case REGLIST_VFP_S:
1896 case REGLIST_VFP_S_VPR:
1897 regtype = REG_TYPE_VFS;
1898 max_regs = 32;
1899 break;
1900
1901 case REGLIST_VFP_D:
1902 case REGLIST_VFP_D_VPR:
1903 regtype = REG_TYPE_VFD;
1904 break;
1905
1906 case REGLIST_NEON_D:
1907 regtype = REG_TYPE_NDQ;
1908 break;
1909
1910 default:
1911 gas_assert (0);
1912 }
1913
1914 if (etype != REGLIST_VFP_S && etype != REGLIST_VFP_S_VPR)
1915 {
1916 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1917 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1918 {
1919 max_regs = 32;
1920 if (thumb_mode)
1921 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1922 fpu_vfp_ext_d32);
1923 else
1924 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1925 fpu_vfp_ext_d32);
1926 }
1927 else
1928 max_regs = 16;
1929 }
1930
1931 base_reg = max_regs;
1932 *partial_match = FALSE;
1933
1934 do
1935 {
1936 int setmask = 1, addregs = 1;
1937 const char vpr_str[] = "vpr";
1938 int vpr_str_len = strlen (vpr_str);
1939
1940 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1941
1942 if (expect_vpr)
1943 {
1944 if (new_base == FAIL
1945 && !strncasecmp (str, vpr_str, vpr_str_len)
1946 && !ISALPHA (*(str + vpr_str_len))
1947 && !vpr_seen)
1948 {
1949 vpr_seen = TRUE;
1950 str += vpr_str_len;
1951 if (count == 0)
1952 base_reg = 0; /* Canonicalize VPR only on d0 with 0 regs. */
1953 }
1954 else if (vpr_seen)
1955 {
1956 first_error (_("VPR expected last"));
1957 return FAIL;
1958 }
1959 else if (new_base == FAIL)
1960 {
1961 if (regtype == REG_TYPE_VFS)
1962 first_error (_("VFP single precision register or VPR "
1963 "expected"));
1964 else /* regtype == REG_TYPE_VFD. */
1965 first_error (_("VFP/Neon double precision register or VPR "
1966 "expected"));
1967 return FAIL;
1968 }
1969 }
1970 else if (new_base == FAIL)
1971 {
1972 first_error (_(reg_expected_msgs[regtype]));
1973 return FAIL;
1974 }
1975
1976 *partial_match = TRUE;
1977 if (vpr_seen)
1978 continue;
1979
1980 if (new_base >= max_regs)
1981 {
1982 first_error (_("register out of range in list"));
1983 return FAIL;
1984 }
1985
1986 /* Note: a value of 2 * n is returned for the register Q<n>. */
1987 if (regtype == REG_TYPE_NQ)
1988 {
1989 setmask = 3;
1990 addregs = 2;
1991 }
1992
1993 if (new_base < base_reg)
1994 base_reg = new_base;
1995
1996 if (mask & (setmask << new_base))
1997 {
1998 first_error (_("invalid register list"));
1999 return FAIL;
2000 }
2001
2002 if ((mask >> new_base) != 0 && ! warned && !vpr_seen)
2003 {
2004 as_tsktsk (_("register list not in ascending order"));
2005 warned = 1;
2006 }
2007
2008 mask |= setmask << new_base;
2009 count += addregs;
2010
2011 if (*str == '-') /* We have the start of a range expression */
2012 {
2013 int high_range;
2014
2015 str++;
2016
2017 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
2018 == FAIL)
2019 {
2020 inst.error = gettext (reg_expected_msgs[regtype]);
2021 return FAIL;
2022 }
2023
2024 if (high_range >= max_regs)
2025 {
2026 first_error (_("register out of range in list"));
2027 return FAIL;
2028 }
2029
2030 if (regtype == REG_TYPE_NQ)
2031 high_range = high_range + 1;
2032
2033 if (high_range <= new_base)
2034 {
2035 inst.error = _("register range not in ascending order");
2036 return FAIL;
2037 }
2038
2039 for (new_base += addregs; new_base <= high_range; new_base += addregs)
2040 {
2041 if (mask & (setmask << new_base))
2042 {
2043 inst.error = _("invalid register list");
2044 return FAIL;
2045 }
2046
2047 mask |= setmask << new_base;
2048 count += addregs;
2049 }
2050 }
2051 }
2052 while (skip_past_comma (&str) != FAIL);
2053
2054 str++;
2055
2056 /* Sanity check -- should have raised a parse error above. */
2057 if ((!vpr_seen && count == 0) || count > max_regs)
2058 abort ();
2059
2060 *pbase = base_reg;
2061
2062 if (expect_vpr && !vpr_seen)
2063 {
2064 first_error (_("VPR expected last"));
2065 return FAIL;
2066 }
2067
2068 /* Final test -- the registers must be consecutive. */
2069 mask >>= base_reg;
2070 for (i = 0; i < count; i++)
2071 {
2072 if ((mask & (1u << i)) == 0)
2073 {
2074 inst.error = _("non-contiguous register range");
2075 return FAIL;
2076 }
2077 }
2078
2079 *ccp = str;
2080
2081 return count;
2082 }
2083
2084 /* True if two alias types are the same. */
2085
2086 static bfd_boolean
2087 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
2088 {
2089 if (!a && !b)
2090 return TRUE;
2091
2092 if (!a || !b)
2093 return FALSE;
2094
2095 if (a->defined != b->defined)
2096 return FALSE;
2097
2098 if ((a->defined & NTA_HASTYPE) != 0
2099 && (a->eltype.type != b->eltype.type
2100 || a->eltype.size != b->eltype.size))
2101 return FALSE;
2102
2103 if ((a->defined & NTA_HASINDEX) != 0
2104 && (a->index != b->index))
2105 return FALSE;
2106
2107 return TRUE;
2108 }
2109
2110 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2111 The base register is put in *PBASE.
2112 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2113 the return value.
2114 The register stride (minus one) is put in bit 4 of the return value.
2115 Bits [6:5] encode the list length (minus one).
2116 The type of the list elements is put in *ELTYPE, if non-NULL. */
2117
2118 #define NEON_LANE(X) ((X) & 0xf)
2119 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2120 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2121
2122 static int
2123 parse_neon_el_struct_list (char **str, unsigned *pbase,
2124 struct neon_type_el *eltype)
2125 {
2126 char *ptr = *str;
2127 int base_reg = -1;
2128 int reg_incr = -1;
2129 int count = 0;
2130 int lane = -1;
2131 int leading_brace = 0;
2132 enum arm_reg_type rtype = REG_TYPE_NDQ;
2133 const char *const incr_error = _("register stride must be 1 or 2");
2134 const char *const type_error = _("mismatched element/structure types in list");
2135 struct neon_typed_alias firsttype;
2136 firsttype.defined = 0;
2137 firsttype.eltype.type = NT_invtype;
2138 firsttype.eltype.size = -1;
2139 firsttype.index = -1;
2140
2141 if (skip_past_char (&ptr, '{') == SUCCESS)
2142 leading_brace = 1;
2143
2144 do
2145 {
2146 struct neon_typed_alias atype;
2147 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2148
2149 if (getreg == FAIL)
2150 {
2151 first_error (_(reg_expected_msgs[rtype]));
2152 return FAIL;
2153 }
2154
2155 if (base_reg == -1)
2156 {
2157 base_reg = getreg;
2158 if (rtype == REG_TYPE_NQ)
2159 {
2160 reg_incr = 1;
2161 }
2162 firsttype = atype;
2163 }
2164 else if (reg_incr == -1)
2165 {
2166 reg_incr = getreg - base_reg;
2167 if (reg_incr < 1 || reg_incr > 2)
2168 {
2169 first_error (_(incr_error));
2170 return FAIL;
2171 }
2172 }
2173 else if (getreg != base_reg + reg_incr * count)
2174 {
2175 first_error (_(incr_error));
2176 return FAIL;
2177 }
2178
2179 if (! neon_alias_types_same (&atype, &firsttype))
2180 {
2181 first_error (_(type_error));
2182 return FAIL;
2183 }
2184
2185 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2186 modes. */
2187 if (ptr[0] == '-')
2188 {
2189 struct neon_typed_alias htype;
2190 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2191 if (lane == -1)
2192 lane = NEON_INTERLEAVE_LANES;
2193 else if (lane != NEON_INTERLEAVE_LANES)
2194 {
2195 first_error (_(type_error));
2196 return FAIL;
2197 }
2198 if (reg_incr == -1)
2199 reg_incr = 1;
2200 else if (reg_incr != 1)
2201 {
2202 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2203 return FAIL;
2204 }
2205 ptr++;
2206 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2207 if (hireg == FAIL)
2208 {
2209 first_error (_(reg_expected_msgs[rtype]));
2210 return FAIL;
2211 }
2212 if (! neon_alias_types_same (&htype, &firsttype))
2213 {
2214 first_error (_(type_error));
2215 return FAIL;
2216 }
2217 count += hireg + dregs - getreg;
2218 continue;
2219 }
2220
2221 /* If we're using Q registers, we can't use [] or [n] syntax. */
2222 if (rtype == REG_TYPE_NQ)
2223 {
2224 count += 2;
2225 continue;
2226 }
2227
2228 if ((atype.defined & NTA_HASINDEX) != 0)
2229 {
2230 if (lane == -1)
2231 lane = atype.index;
2232 else if (lane != atype.index)
2233 {
2234 first_error (_(type_error));
2235 return FAIL;
2236 }
2237 }
2238 else if (lane == -1)
2239 lane = NEON_INTERLEAVE_LANES;
2240 else if (lane != NEON_INTERLEAVE_LANES)
2241 {
2242 first_error (_(type_error));
2243 return FAIL;
2244 }
2245 count++;
2246 }
2247 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2248
2249 /* No lane set by [x]. We must be interleaving structures. */
2250 if (lane == -1)
2251 lane = NEON_INTERLEAVE_LANES;
2252
2253 /* Sanity check. */
2254 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2255 || (count > 1 && reg_incr == -1))
2256 {
2257 first_error (_("error parsing element/structure list"));
2258 return FAIL;
2259 }
2260
2261 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2262 {
2263 first_error (_("expected }"));
2264 return FAIL;
2265 }
2266
2267 if (reg_incr == -1)
2268 reg_incr = 1;
2269
2270 if (eltype)
2271 *eltype = firsttype.eltype;
2272
2273 *pbase = base_reg;
2274 *str = ptr;
2275
2276 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2277 }
2278
2279 /* Parse an explicit relocation suffix on an expression. This is
2280 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2281 arm_reloc_hsh contains no entries, so this function can only
2282 succeed if there is no () after the word. Returns -1 on error,
2283 BFD_RELOC_UNUSED if there wasn't any suffix. */
2284
2285 static int
2286 parse_reloc (char **str)
2287 {
2288 struct reloc_entry *r;
2289 char *p, *q;
2290
2291 if (**str != '(')
2292 return BFD_RELOC_UNUSED;
2293
2294 p = *str + 1;
2295 q = p;
2296
2297 while (*q && *q != ')' && *q != ',')
2298 q++;
2299 if (*q != ')')
2300 return -1;
2301
2302 if ((r = (struct reloc_entry *)
2303 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2304 return -1;
2305
2306 *str = q + 1;
2307 return r->reloc;
2308 }
2309
2310 /* Directives: register aliases. */
2311
2312 static struct reg_entry *
2313 insert_reg_alias (char *str, unsigned number, int type)
2314 {
2315 struct reg_entry *new_reg;
2316 const char *name;
2317
2318 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2319 {
2320 if (new_reg->builtin)
2321 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2322
2323 /* Only warn about a redefinition if it's not defined as the
2324 same register. */
2325 else if (new_reg->number != number || new_reg->type != type)
2326 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2327
2328 return NULL;
2329 }
2330
2331 name = xstrdup (str);
2332 new_reg = XNEW (struct reg_entry);
2333
2334 new_reg->name = name;
2335 new_reg->number = number;
2336 new_reg->type = type;
2337 new_reg->builtin = FALSE;
2338 new_reg->neon = NULL;
2339
2340 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2341 abort ();
2342
2343 return new_reg;
2344 }
2345
2346 static void
2347 insert_neon_reg_alias (char *str, int number, int type,
2348 struct neon_typed_alias *atype)
2349 {
2350 struct reg_entry *reg = insert_reg_alias (str, number, type);
2351
2352 if (!reg)
2353 {
2354 first_error (_("attempt to redefine typed alias"));
2355 return;
2356 }
2357
2358 if (atype)
2359 {
2360 reg->neon = XNEW (struct neon_typed_alias);
2361 *reg->neon = *atype;
2362 }
2363 }
2364
2365 /* Look for the .req directive. This is of the form:
2366
2367 new_register_name .req existing_register_name
2368
2369 If we find one, or if it looks sufficiently like one that we want to
2370 handle any error here, return TRUE. Otherwise return FALSE. */
2371
2372 static bfd_boolean
2373 create_register_alias (char * newname, char *p)
2374 {
2375 struct reg_entry *old;
2376 char *oldname, *nbuf;
2377 size_t nlen;
2378
2379 /* The input scrubber ensures that whitespace after the mnemonic is
2380 collapsed to single spaces. */
2381 oldname = p;
2382 if (strncmp (oldname, " .req ", 6) != 0)
2383 return FALSE;
2384
2385 oldname += 6;
2386 if (*oldname == '\0')
2387 return FALSE;
2388
2389 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2390 if (!old)
2391 {
2392 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2393 return TRUE;
2394 }
2395
2396 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2397 the desired alias name, and p points to its end. If not, then
2398 the desired alias name is in the global original_case_string. */
2399 #ifdef TC_CASE_SENSITIVE
2400 nlen = p - newname;
2401 #else
2402 newname = original_case_string;
2403 nlen = strlen (newname);
2404 #endif
2405
2406 nbuf = xmemdup0 (newname, nlen);
2407
2408 /* Create aliases under the new name as stated; an all-lowercase
2409 version of the new name; and an all-uppercase version of the new
2410 name. */
2411 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2412 {
2413 for (p = nbuf; *p; p++)
2414 *p = TOUPPER (*p);
2415
2416 if (strncmp (nbuf, newname, nlen))
2417 {
2418 /* If this attempt to create an additional alias fails, do not bother
2419 trying to create the all-lower case alias. We will fail and issue
2420 a second, duplicate error message. This situation arises when the
2421 programmer does something like:
2422 foo .req r0
2423 Foo .req r1
2424 The second .req creates the "Foo" alias but then fails to create
2425 the artificial FOO alias because it has already been created by the
2426 first .req. */
2427 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2428 {
2429 free (nbuf);
2430 return TRUE;
2431 }
2432 }
2433
2434 for (p = nbuf; *p; p++)
2435 *p = TOLOWER (*p);
2436
2437 if (strncmp (nbuf, newname, nlen))
2438 insert_reg_alias (nbuf, old->number, old->type);
2439 }
2440
2441 free (nbuf);
2442 return TRUE;
2443 }
2444
2445 /* Create a Neon typed/indexed register alias using directives, e.g.:
2446 X .dn d5.s32[1]
2447 Y .qn 6.s16
2448 Z .dn d7
2449 T .dn Z[0]
2450 These typed registers can be used instead of the types specified after the
2451 Neon mnemonic, so long as all operands given have types. Types can also be
2452 specified directly, e.g.:
2453 vadd d0.s32, d1.s32, d2.s32 */
2454
2455 static bfd_boolean
2456 create_neon_reg_alias (char *newname, char *p)
2457 {
2458 enum arm_reg_type basetype;
2459 struct reg_entry *basereg;
2460 struct reg_entry mybasereg;
2461 struct neon_type ntype;
2462 struct neon_typed_alias typeinfo;
2463 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2464 int namelen;
2465
2466 typeinfo.defined = 0;
2467 typeinfo.eltype.type = NT_invtype;
2468 typeinfo.eltype.size = -1;
2469 typeinfo.index = -1;
2470
2471 nameend = p;
2472
2473 if (strncmp (p, " .dn ", 5) == 0)
2474 basetype = REG_TYPE_VFD;
2475 else if (strncmp (p, " .qn ", 5) == 0)
2476 basetype = REG_TYPE_NQ;
2477 else
2478 return FALSE;
2479
2480 p += 5;
2481
2482 if (*p == '\0')
2483 return FALSE;
2484
2485 basereg = arm_reg_parse_multi (&p);
2486
2487 if (basereg && basereg->type != basetype)
2488 {
2489 as_bad (_("bad type for register"));
2490 return FALSE;
2491 }
2492
2493 if (basereg == NULL)
2494 {
2495 expressionS exp;
2496 /* Try parsing as an integer. */
2497 my_get_expression (&exp, &p, GE_NO_PREFIX);
2498 if (exp.X_op != O_constant)
2499 {
2500 as_bad (_("expression must be constant"));
2501 return FALSE;
2502 }
2503 basereg = &mybasereg;
2504 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2505 : exp.X_add_number;
2506 basereg->neon = 0;
2507 }
2508
2509 if (basereg->neon)
2510 typeinfo = *basereg->neon;
2511
2512 if (parse_neon_type (&ntype, &p) == SUCCESS)
2513 {
2514 /* We got a type. */
2515 if (typeinfo.defined & NTA_HASTYPE)
2516 {
2517 as_bad (_("can't redefine the type of a register alias"));
2518 return FALSE;
2519 }
2520
2521 typeinfo.defined |= NTA_HASTYPE;
2522 if (ntype.elems != 1)
2523 {
2524 as_bad (_("you must specify a single type only"));
2525 return FALSE;
2526 }
2527 typeinfo.eltype = ntype.el[0];
2528 }
2529
2530 if (skip_past_char (&p, '[') == SUCCESS)
2531 {
2532 expressionS exp;
2533 /* We got a scalar index. */
2534
2535 if (typeinfo.defined & NTA_HASINDEX)
2536 {
2537 as_bad (_("can't redefine the index of a scalar alias"));
2538 return FALSE;
2539 }
2540
2541 my_get_expression (&exp, &p, GE_NO_PREFIX);
2542
2543 if (exp.X_op != O_constant)
2544 {
2545 as_bad (_("scalar index must be constant"));
2546 return FALSE;
2547 }
2548
2549 typeinfo.defined |= NTA_HASINDEX;
2550 typeinfo.index = exp.X_add_number;
2551
2552 if (skip_past_char (&p, ']') == FAIL)
2553 {
2554 as_bad (_("expecting ]"));
2555 return FALSE;
2556 }
2557 }
2558
2559 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2560 the desired alias name, and p points to its end. If not, then
2561 the desired alias name is in the global original_case_string. */
2562 #ifdef TC_CASE_SENSITIVE
2563 namelen = nameend - newname;
2564 #else
2565 newname = original_case_string;
2566 namelen = strlen (newname);
2567 #endif
2568
2569 namebuf = xmemdup0 (newname, namelen);
2570
2571 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2572 typeinfo.defined != 0 ? &typeinfo : NULL);
2573
2574 /* Insert name in all uppercase. */
2575 for (p = namebuf; *p; p++)
2576 *p = TOUPPER (*p);
2577
2578 if (strncmp (namebuf, newname, namelen))
2579 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2580 typeinfo.defined != 0 ? &typeinfo : NULL);
2581
2582 /* Insert name in all lowercase. */
2583 for (p = namebuf; *p; p++)
2584 *p = TOLOWER (*p);
2585
2586 if (strncmp (namebuf, newname, namelen))
2587 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2588 typeinfo.defined != 0 ? &typeinfo : NULL);
2589
2590 free (namebuf);
2591 return TRUE;
2592 }
2593
2594 /* Should never be called, as .req goes between the alias and the
2595 register name, not at the beginning of the line. */
2596
2597 static void
2598 s_req (int a ATTRIBUTE_UNUSED)
2599 {
2600 as_bad (_("invalid syntax for .req directive"));
2601 }
2602
2603 static void
2604 s_dn (int a ATTRIBUTE_UNUSED)
2605 {
2606 as_bad (_("invalid syntax for .dn directive"));
2607 }
2608
2609 static void
2610 s_qn (int a ATTRIBUTE_UNUSED)
2611 {
2612 as_bad (_("invalid syntax for .qn directive"));
2613 }
2614
2615 /* The .unreq directive deletes an alias which was previously defined
2616 by .req. For example:
2617
2618 my_alias .req r11
2619 .unreq my_alias */
2620
2621 static void
2622 s_unreq (int a ATTRIBUTE_UNUSED)
2623 {
2624 char * name;
2625 char saved_char;
2626
2627 name = input_line_pointer;
2628
2629 while (*input_line_pointer != 0
2630 && *input_line_pointer != ' '
2631 && *input_line_pointer != '\n')
2632 ++input_line_pointer;
2633
2634 saved_char = *input_line_pointer;
2635 *input_line_pointer = 0;
2636
2637 if (!*name)
2638 as_bad (_("invalid syntax for .unreq directive"));
2639 else
2640 {
2641 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2642 name);
2643
2644 if (!reg)
2645 as_bad (_("unknown register alias '%s'"), name);
2646 else if (reg->builtin)
2647 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2648 name);
2649 else
2650 {
2651 char * p;
2652 char * nbuf;
2653
2654 hash_delete (arm_reg_hsh, name, FALSE);
2655 free ((char *) reg->name);
2656 if (reg->neon)
2657 free (reg->neon);
2658 free (reg);
2659
2660 /* Also locate the all upper case and all lower case versions.
2661 Do not complain if we cannot find one or the other as it
2662 was probably deleted above. */
2663
2664 nbuf = strdup (name);
2665 for (p = nbuf; *p; p++)
2666 *p = TOUPPER (*p);
2667 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2668 if (reg)
2669 {
2670 hash_delete (arm_reg_hsh, nbuf, FALSE);
2671 free ((char *) reg->name);
2672 if (reg->neon)
2673 free (reg->neon);
2674 free (reg);
2675 }
2676
2677 for (p = nbuf; *p; p++)
2678 *p = TOLOWER (*p);
2679 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2680 if (reg)
2681 {
2682 hash_delete (arm_reg_hsh, nbuf, FALSE);
2683 free ((char *) reg->name);
2684 if (reg->neon)
2685 free (reg->neon);
2686 free (reg);
2687 }
2688
2689 free (nbuf);
2690 }
2691 }
2692
2693 *input_line_pointer = saved_char;
2694 demand_empty_rest_of_line ();
2695 }
2696
2697 /* Directives: Instruction set selection. */
2698
2699 #ifdef OBJ_ELF
2700 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2701 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2702 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2703 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2704
2705 /* Create a new mapping symbol for the transition to STATE. */
2706
2707 static void
2708 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2709 {
2710 symbolS * symbolP;
2711 const char * symname;
2712 int type;
2713
2714 switch (state)
2715 {
2716 case MAP_DATA:
2717 symname = "$d";
2718 type = BSF_NO_FLAGS;
2719 break;
2720 case MAP_ARM:
2721 symname = "$a";
2722 type = BSF_NO_FLAGS;
2723 break;
2724 case MAP_THUMB:
2725 symname = "$t";
2726 type = BSF_NO_FLAGS;
2727 break;
2728 default:
2729 abort ();
2730 }
2731
2732 symbolP = symbol_new (symname, now_seg, value, frag);
2733 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2734
2735 switch (state)
2736 {
2737 case MAP_ARM:
2738 THUMB_SET_FUNC (symbolP, 0);
2739 ARM_SET_THUMB (symbolP, 0);
2740 ARM_SET_INTERWORK (symbolP, support_interwork);
2741 break;
2742
2743 case MAP_THUMB:
2744 THUMB_SET_FUNC (symbolP, 1);
2745 ARM_SET_THUMB (symbolP, 1);
2746 ARM_SET_INTERWORK (symbolP, support_interwork);
2747 break;
2748
2749 case MAP_DATA:
2750 default:
2751 break;
2752 }
2753
2754 /* Save the mapping symbols for future reference. Also check that
2755 we do not place two mapping symbols at the same offset within a
2756 frag. We'll handle overlap between frags in
2757 check_mapping_symbols.
2758
2759 If .fill or other data filling directive generates zero sized data,
2760 the mapping symbol for the following code will have the same value
2761 as the one generated for the data filling directive. In this case,
2762 we replace the old symbol with the new one at the same address. */
2763 if (value == 0)
2764 {
2765 if (frag->tc_frag_data.first_map != NULL)
2766 {
2767 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2768 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2769 }
2770 frag->tc_frag_data.first_map = symbolP;
2771 }
2772 if (frag->tc_frag_data.last_map != NULL)
2773 {
2774 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2775 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2776 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2777 }
2778 frag->tc_frag_data.last_map = symbolP;
2779 }
2780
2781 /* We must sometimes convert a region marked as code to data during
2782 code alignment, if an odd number of bytes have to be padded. The
2783 code mapping symbol is pushed to an aligned address. */
2784
2785 static void
2786 insert_data_mapping_symbol (enum mstate state,
2787 valueT value, fragS *frag, offsetT bytes)
2788 {
2789 /* If there was already a mapping symbol, remove it. */
2790 if (frag->tc_frag_data.last_map != NULL
2791 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2792 {
2793 symbolS *symp = frag->tc_frag_data.last_map;
2794
2795 if (value == 0)
2796 {
2797 know (frag->tc_frag_data.first_map == symp);
2798 frag->tc_frag_data.first_map = NULL;
2799 }
2800 frag->tc_frag_data.last_map = NULL;
2801 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2802 }
2803
2804 make_mapping_symbol (MAP_DATA, value, frag);
2805 make_mapping_symbol (state, value + bytes, frag);
2806 }
2807
2808 static void mapping_state_2 (enum mstate state, int max_chars);
2809
2810 /* Set the mapping state to STATE. Only call this when about to
2811 emit some STATE bytes to the file. */
2812
2813 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2814 void
2815 mapping_state (enum mstate state)
2816 {
2817 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2818
2819 if (mapstate == state)
2820 /* The mapping symbol has already been emitted.
2821 There is nothing else to do. */
2822 return;
2823
2824 if (state == MAP_ARM || state == MAP_THUMB)
2825 /* PR gas/12931
2826 All ARM instructions require 4-byte alignment.
2827 (Almost) all Thumb instructions require 2-byte alignment.
2828
2829 When emitting instructions into any section, mark the section
2830 appropriately.
2831
2832 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2833 but themselves require 2-byte alignment; this applies to some
2834 PC- relative forms. However, these cases will involve implicit
2835 literal pool generation or an explicit .align >=2, both of
2836 which will cause the section to me marked with sufficient
2837 alignment. Thus, we don't handle those cases here. */
2838 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2839
2840 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2841 /* This case will be evaluated later. */
2842 return;
2843
2844 mapping_state_2 (state, 0);
2845 }
2846
2847 /* Same as mapping_state, but MAX_CHARS bytes have already been
2848 allocated. Put the mapping symbol that far back. */
2849
2850 static void
2851 mapping_state_2 (enum mstate state, int max_chars)
2852 {
2853 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2854
2855 if (!SEG_NORMAL (now_seg))
2856 return;
2857
2858 if (mapstate == state)
2859 /* The mapping symbol has already been emitted.
2860 There is nothing else to do. */
2861 return;
2862
2863 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2864 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2865 {
2866 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2867 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2868
2869 if (add_symbol)
2870 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2871 }
2872
2873 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2874 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2875 }
2876 #undef TRANSITION
2877 #else
2878 #define mapping_state(x) ((void)0)
2879 #define mapping_state_2(x, y) ((void)0)
2880 #endif
2881
2882 /* Find the real, Thumb encoded start of a Thumb function. */
2883
2884 #ifdef OBJ_COFF
2885 static symbolS *
2886 find_real_start (symbolS * symbolP)
2887 {
2888 char * real_start;
2889 const char * name = S_GET_NAME (symbolP);
2890 symbolS * new_target;
2891
2892 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2893 #define STUB_NAME ".real_start_of"
2894
2895 if (name == NULL)
2896 abort ();
2897
2898 /* The compiler may generate BL instructions to local labels because
2899 it needs to perform a branch to a far away location. These labels
2900 do not have a corresponding ".real_start_of" label. We check
2901 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2902 the ".real_start_of" convention for nonlocal branches. */
2903 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2904 return symbolP;
2905
2906 real_start = concat (STUB_NAME, name, NULL);
2907 new_target = symbol_find (real_start);
2908 free (real_start);
2909
2910 if (new_target == NULL)
2911 {
2912 as_warn (_("Failed to find real start of function: %s\n"), name);
2913 new_target = symbolP;
2914 }
2915
2916 return new_target;
2917 }
2918 #endif
2919
2920 static void
2921 opcode_select (int width)
2922 {
2923 switch (width)
2924 {
2925 case 16:
2926 if (! thumb_mode)
2927 {
2928 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2929 as_bad (_("selected processor does not support THUMB opcodes"));
2930
2931 thumb_mode = 1;
2932 /* No need to force the alignment, since we will have been
2933 coming from ARM mode, which is word-aligned. */
2934 record_alignment (now_seg, 1);
2935 }
2936 break;
2937
2938 case 32:
2939 if (thumb_mode)
2940 {
2941 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2942 as_bad (_("selected processor does not support ARM opcodes"));
2943
2944 thumb_mode = 0;
2945
2946 if (!need_pass_2)
2947 frag_align (2, 0, 0);
2948
2949 record_alignment (now_seg, 1);
2950 }
2951 break;
2952
2953 default:
2954 as_bad (_("invalid instruction size selected (%d)"), width);
2955 }
2956 }
2957
2958 static void
2959 s_arm (int ignore ATTRIBUTE_UNUSED)
2960 {
2961 opcode_select (32);
2962 demand_empty_rest_of_line ();
2963 }
2964
2965 static void
2966 s_thumb (int ignore ATTRIBUTE_UNUSED)
2967 {
2968 opcode_select (16);
2969 demand_empty_rest_of_line ();
2970 }
2971
2972 static void
2973 s_code (int unused ATTRIBUTE_UNUSED)
2974 {
2975 int temp;
2976
2977 temp = get_absolute_expression ();
2978 switch (temp)
2979 {
2980 case 16:
2981 case 32:
2982 opcode_select (temp);
2983 break;
2984
2985 default:
2986 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2987 }
2988 }
2989
2990 static void
2991 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2992 {
2993 /* If we are not already in thumb mode go into it, EVEN if
2994 the target processor does not support thumb instructions.
2995 This is used by gcc/config/arm/lib1funcs.asm for example
2996 to compile interworking support functions even if the
2997 target processor should not support interworking. */
2998 if (! thumb_mode)
2999 {
3000 thumb_mode = 2;
3001 record_alignment (now_seg, 1);
3002 }
3003
3004 demand_empty_rest_of_line ();
3005 }
3006
3007 static void
3008 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
3009 {
3010 s_thumb (0);
3011
3012 /* The following label is the name/address of the start of a Thumb function.
3013 We need to know this for the interworking support. */
3014 label_is_thumb_function_name = TRUE;
3015 }
3016
3017 /* Perform a .set directive, but also mark the alias as
3018 being a thumb function. */
3019
3020 static void
3021 s_thumb_set (int equiv)
3022 {
3023 /* XXX the following is a duplicate of the code for s_set() in read.c
3024 We cannot just call that code as we need to get at the symbol that
3025 is created. */
3026 char * name;
3027 char delim;
3028 char * end_name;
3029 symbolS * symbolP;
3030
3031 /* Especial apologies for the random logic:
3032 This just grew, and could be parsed much more simply!
3033 Dean - in haste. */
3034 delim = get_symbol_name (& name);
3035 end_name = input_line_pointer;
3036 (void) restore_line_pointer (delim);
3037
3038 if (*input_line_pointer != ',')
3039 {
3040 *end_name = 0;
3041 as_bad (_("expected comma after name \"%s\""), name);
3042 *end_name = delim;
3043 ignore_rest_of_line ();
3044 return;
3045 }
3046
3047 input_line_pointer++;
3048 *end_name = 0;
3049
3050 if (name[0] == '.' && name[1] == '\0')
3051 {
3052 /* XXX - this should not happen to .thumb_set. */
3053 abort ();
3054 }
3055
3056 if ((symbolP = symbol_find (name)) == NULL
3057 && (symbolP = md_undefined_symbol (name)) == NULL)
3058 {
3059 #ifndef NO_LISTING
3060 /* When doing symbol listings, play games with dummy fragments living
3061 outside the normal fragment chain to record the file and line info
3062 for this symbol. */
3063 if (listing & LISTING_SYMBOLS)
3064 {
3065 extern struct list_info_struct * listing_tail;
3066 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
3067
3068 memset (dummy_frag, 0, sizeof (fragS));
3069 dummy_frag->fr_type = rs_fill;
3070 dummy_frag->line = listing_tail;
3071 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
3072 dummy_frag->fr_symbol = symbolP;
3073 }
3074 else
3075 #endif
3076 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
3077
3078 #ifdef OBJ_COFF
3079 /* "set" symbols are local unless otherwise specified. */
3080 SF_SET_LOCAL (symbolP);
3081 #endif /* OBJ_COFF */
3082 } /* Make a new symbol. */
3083
3084 symbol_table_insert (symbolP);
3085
3086 * end_name = delim;
3087
3088 if (equiv
3089 && S_IS_DEFINED (symbolP)
3090 && S_GET_SEGMENT (symbolP) != reg_section)
3091 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
3092
3093 pseudo_set (symbolP);
3094
3095 demand_empty_rest_of_line ();
3096
3097 /* XXX Now we come to the Thumb specific bit of code. */
3098
3099 THUMB_SET_FUNC (symbolP, 1);
3100 ARM_SET_THUMB (symbolP, 1);
3101 #if defined OBJ_ELF || defined OBJ_COFF
3102 ARM_SET_INTERWORK (symbolP, support_interwork);
3103 #endif
3104 }
3105
3106 /* Directives: Mode selection. */
3107
3108 /* .syntax [unified|divided] - choose the new unified syntax
3109 (same for Arm and Thumb encoding, modulo slight differences in what
3110 can be represented) or the old divergent syntax for each mode. */
3111 static void
3112 s_syntax (int unused ATTRIBUTE_UNUSED)
3113 {
3114 char *name, delim;
3115
3116 delim = get_symbol_name (& name);
3117
3118 if (!strcasecmp (name, "unified"))
3119 unified_syntax = TRUE;
3120 else if (!strcasecmp (name, "divided"))
3121 unified_syntax = FALSE;
3122 else
3123 {
3124 as_bad (_("unrecognized syntax mode \"%s\""), name);
3125 return;
3126 }
3127 (void) restore_line_pointer (delim);
3128 demand_empty_rest_of_line ();
3129 }
3130
3131 /* Directives: sectioning and alignment. */
3132
3133 static void
3134 s_bss (int ignore ATTRIBUTE_UNUSED)
3135 {
3136 /* We don't support putting frags in the BSS segment, we fake it by
3137 marking in_bss, then looking at s_skip for clues. */
3138 subseg_set (bss_section, 0);
3139 demand_empty_rest_of_line ();
3140
3141 #ifdef md_elf_section_change_hook
3142 md_elf_section_change_hook ();
3143 #endif
3144 }
3145
3146 static void
3147 s_even (int ignore ATTRIBUTE_UNUSED)
3148 {
3149 /* Never make frag if expect extra pass. */
3150 if (!need_pass_2)
3151 frag_align (1, 0, 0);
3152
3153 record_alignment (now_seg, 1);
3154
3155 demand_empty_rest_of_line ();
3156 }
3157
3158 /* Directives: CodeComposer Studio. */
3159
3160 /* .ref (for CodeComposer Studio syntax only). */
3161 static void
3162 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3163 {
3164 if (codecomposer_syntax)
3165 ignore_rest_of_line ();
3166 else
3167 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3168 }
3169
3170 /* If name is not NULL, then it is used for marking the beginning of a
3171 function, whereas if it is NULL then it means the function end. */
3172 static void
3173 asmfunc_debug (const char * name)
3174 {
3175 static const char * last_name = NULL;
3176
3177 if (name != NULL)
3178 {
3179 gas_assert (last_name == NULL);
3180 last_name = name;
3181
3182 if (debug_type == DEBUG_STABS)
3183 stabs_generate_asm_func (name, name);
3184 }
3185 else
3186 {
3187 gas_assert (last_name != NULL);
3188
3189 if (debug_type == DEBUG_STABS)
3190 stabs_generate_asm_endfunc (last_name, last_name);
3191
3192 last_name = NULL;
3193 }
3194 }
3195
3196 static void
3197 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3198 {
3199 if (codecomposer_syntax)
3200 {
3201 switch (asmfunc_state)
3202 {
3203 case OUTSIDE_ASMFUNC:
3204 asmfunc_state = WAITING_ASMFUNC_NAME;
3205 break;
3206
3207 case WAITING_ASMFUNC_NAME:
3208 as_bad (_(".asmfunc repeated."));
3209 break;
3210
3211 case WAITING_ENDASMFUNC:
3212 as_bad (_(".asmfunc without function."));
3213 break;
3214 }
3215 demand_empty_rest_of_line ();
3216 }
3217 else
3218 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3219 }
3220
3221 static void
3222 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3223 {
3224 if (codecomposer_syntax)
3225 {
3226 switch (asmfunc_state)
3227 {
3228 case OUTSIDE_ASMFUNC:
3229 as_bad (_(".endasmfunc without a .asmfunc."));
3230 break;
3231
3232 case WAITING_ASMFUNC_NAME:
3233 as_bad (_(".endasmfunc without function."));
3234 break;
3235
3236 case WAITING_ENDASMFUNC:
3237 asmfunc_state = OUTSIDE_ASMFUNC;
3238 asmfunc_debug (NULL);
3239 break;
3240 }
3241 demand_empty_rest_of_line ();
3242 }
3243 else
3244 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3245 }
3246
3247 static void
3248 s_ccs_def (int name)
3249 {
3250 if (codecomposer_syntax)
3251 s_globl (name);
3252 else
3253 as_bad (_(".def pseudo-op only available with -mccs flag."));
3254 }
3255
3256 /* Directives: Literal pools. */
3257
3258 static literal_pool *
3259 find_literal_pool (void)
3260 {
3261 literal_pool * pool;
3262
3263 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3264 {
3265 if (pool->section == now_seg
3266 && pool->sub_section == now_subseg)
3267 break;
3268 }
3269
3270 return pool;
3271 }
3272
3273 static literal_pool *
3274 find_or_make_literal_pool (void)
3275 {
3276 /* Next literal pool ID number. */
3277 static unsigned int latest_pool_num = 1;
3278 literal_pool * pool;
3279
3280 pool = find_literal_pool ();
3281
3282 if (pool == NULL)
3283 {
3284 /* Create a new pool. */
3285 pool = XNEW (literal_pool);
3286 if (! pool)
3287 return NULL;
3288
3289 pool->next_free_entry = 0;
3290 pool->section = now_seg;
3291 pool->sub_section = now_subseg;
3292 pool->next = list_of_pools;
3293 pool->symbol = NULL;
3294 pool->alignment = 2;
3295
3296 /* Add it to the list. */
3297 list_of_pools = pool;
3298 }
3299
3300 /* New pools, and emptied pools, will have a NULL symbol. */
3301 if (pool->symbol == NULL)
3302 {
3303 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3304 (valueT) 0, &zero_address_frag);
3305 pool->id = latest_pool_num ++;
3306 }
3307
3308 /* Done. */
3309 return pool;
3310 }
3311
3312 /* Add the literal in the global 'inst'
3313 structure to the relevant literal pool. */
3314
3315 static int
3316 add_to_lit_pool (unsigned int nbytes)
3317 {
3318 #define PADDING_SLOT 0x1
3319 #define LIT_ENTRY_SIZE_MASK 0xFF
3320 literal_pool * pool;
3321 unsigned int entry, pool_size = 0;
3322 bfd_boolean padding_slot_p = FALSE;
3323 unsigned imm1 = 0;
3324 unsigned imm2 = 0;
3325
3326 if (nbytes == 8)
3327 {
3328 imm1 = inst.operands[1].imm;
3329 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3330 : inst.relocs[0].exp.X_unsigned ? 0
3331 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3332 if (target_big_endian)
3333 {
3334 imm1 = imm2;
3335 imm2 = inst.operands[1].imm;
3336 }
3337 }
3338
3339 pool = find_or_make_literal_pool ();
3340
3341 /* Check if this literal value is already in the pool. */
3342 for (entry = 0; entry < pool->next_free_entry; entry ++)
3343 {
3344 if (nbytes == 4)
3345 {
3346 if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3347 && (inst.relocs[0].exp.X_op == O_constant)
3348 && (pool->literals[entry].X_add_number
3349 == inst.relocs[0].exp.X_add_number)
3350 && (pool->literals[entry].X_md == nbytes)
3351 && (pool->literals[entry].X_unsigned
3352 == inst.relocs[0].exp.X_unsigned))
3353 break;
3354
3355 if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3356 && (inst.relocs[0].exp.X_op == O_symbol)
3357 && (pool->literals[entry].X_add_number
3358 == inst.relocs[0].exp.X_add_number)
3359 && (pool->literals[entry].X_add_symbol
3360 == inst.relocs[0].exp.X_add_symbol)
3361 && (pool->literals[entry].X_op_symbol
3362 == inst.relocs[0].exp.X_op_symbol)
3363 && (pool->literals[entry].X_md == nbytes))
3364 break;
3365 }
3366 else if ((nbytes == 8)
3367 && !(pool_size & 0x7)
3368 && ((entry + 1) != pool->next_free_entry)
3369 && (pool->literals[entry].X_op == O_constant)
3370 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3371 && (pool->literals[entry].X_unsigned
3372 == inst.relocs[0].exp.X_unsigned)
3373 && (pool->literals[entry + 1].X_op == O_constant)
3374 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3375 && (pool->literals[entry + 1].X_unsigned
3376 == inst.relocs[0].exp.X_unsigned))
3377 break;
3378
3379 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3380 if (padding_slot_p && (nbytes == 4))
3381 break;
3382
3383 pool_size += 4;
3384 }
3385
3386 /* Do we need to create a new entry? */
3387 if (entry == pool->next_free_entry)
3388 {
3389 if (entry >= MAX_LITERAL_POOL_SIZE)
3390 {
3391 inst.error = _("literal pool overflow");
3392 return FAIL;
3393 }
3394
3395 if (nbytes == 8)
3396 {
3397 /* For 8-byte entries, we align to an 8-byte boundary,
3398 and split it into two 4-byte entries, because on 32-bit
3399 host, 8-byte constants are treated as big num, thus
3400 saved in "generic_bignum" which will be overwritten
3401 by later assignments.
3402
3403 We also need to make sure there is enough space for
3404 the split.
3405
3406 We also check to make sure the literal operand is a
3407 constant number. */
3408 if (!(inst.relocs[0].exp.X_op == O_constant
3409 || inst.relocs[0].exp.X_op == O_big))
3410 {
3411 inst.error = _("invalid type for literal pool");
3412 return FAIL;
3413 }
3414 else if (pool_size & 0x7)
3415 {
3416 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3417 {
3418 inst.error = _("literal pool overflow");
3419 return FAIL;
3420 }
3421
3422 pool->literals[entry] = inst.relocs[0].exp;
3423 pool->literals[entry].X_op = O_constant;
3424 pool->literals[entry].X_add_number = 0;
3425 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3426 pool->next_free_entry += 1;
3427 pool_size += 4;
3428 }
3429 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3430 {
3431 inst.error = _("literal pool overflow");
3432 return FAIL;
3433 }
3434
3435 pool->literals[entry] = inst.relocs[0].exp;
3436 pool->literals[entry].X_op = O_constant;
3437 pool->literals[entry].X_add_number = imm1;
3438 pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3439 pool->literals[entry++].X_md = 4;
3440 pool->literals[entry] = inst.relocs[0].exp;
3441 pool->literals[entry].X_op = O_constant;
3442 pool->literals[entry].X_add_number = imm2;
3443 pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3444 pool->literals[entry].X_md = 4;
3445 pool->alignment = 3;
3446 pool->next_free_entry += 1;
3447 }
3448 else
3449 {
3450 pool->literals[entry] = inst.relocs[0].exp;
3451 pool->literals[entry].X_md = 4;
3452 }
3453
3454 #ifdef OBJ_ELF
3455 /* PR ld/12974: Record the location of the first source line to reference
3456 this entry in the literal pool. If it turns out during linking that the
3457 symbol does not exist we will be able to give an accurate line number for
3458 the (first use of the) missing reference. */
3459 if (debug_type == DEBUG_DWARF2)
3460 dwarf2_where (pool->locs + entry);
3461 #endif
3462 pool->next_free_entry += 1;
3463 }
3464 else if (padding_slot_p)
3465 {
3466 pool->literals[entry] = inst.relocs[0].exp;
3467 pool->literals[entry].X_md = nbytes;
3468 }
3469
3470 inst.relocs[0].exp.X_op = O_symbol;
3471 inst.relocs[0].exp.X_add_number = pool_size;
3472 inst.relocs[0].exp.X_add_symbol = pool->symbol;
3473
3474 return SUCCESS;
3475 }
3476
3477 bfd_boolean
3478 tc_start_label_without_colon (void)
3479 {
3480 bfd_boolean ret = TRUE;
3481
3482 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3483 {
3484 const char *label = input_line_pointer;
3485
3486 while (!is_end_of_line[(int) label[-1]])
3487 --label;
3488
3489 if (*label == '.')
3490 {
3491 as_bad (_("Invalid label '%s'"), label);
3492 ret = FALSE;
3493 }
3494
3495 asmfunc_debug (label);
3496
3497 asmfunc_state = WAITING_ENDASMFUNC;
3498 }
3499
3500 return ret;
3501 }
3502
3503 /* Can't use symbol_new here, so have to create a symbol and then at
3504 a later date assign it a value. That's what these functions do. */
3505
3506 static void
3507 symbol_locate (symbolS * symbolP,
3508 const char * name, /* It is copied, the caller can modify. */
3509 segT segment, /* Segment identifier (SEG_<something>). */
3510 valueT valu, /* Symbol value. */
3511 fragS * frag) /* Associated fragment. */
3512 {
3513 size_t name_length;
3514 char * preserved_copy_of_name;
3515
3516 name_length = strlen (name) + 1; /* +1 for \0. */
3517 obstack_grow (&notes, name, name_length);
3518 preserved_copy_of_name = (char *) obstack_finish (&notes);
3519
3520 #ifdef tc_canonicalize_symbol_name
3521 preserved_copy_of_name =
3522 tc_canonicalize_symbol_name (preserved_copy_of_name);
3523 #endif
3524
3525 S_SET_NAME (symbolP, preserved_copy_of_name);
3526
3527 S_SET_SEGMENT (symbolP, segment);
3528 S_SET_VALUE (symbolP, valu);
3529 symbol_clear_list_pointers (symbolP);
3530
3531 symbol_set_frag (symbolP, frag);
3532
3533 /* Link to end of symbol chain. */
3534 {
3535 extern int symbol_table_frozen;
3536
3537 if (symbol_table_frozen)
3538 abort ();
3539 }
3540
3541 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3542
3543 obj_symbol_new_hook (symbolP);
3544
3545 #ifdef tc_symbol_new_hook
3546 tc_symbol_new_hook (symbolP);
3547 #endif
3548
3549 #ifdef DEBUG_SYMS
3550 verify_symbol_chain (symbol_rootP, symbol_lastP);
3551 #endif /* DEBUG_SYMS */
3552 }
3553
3554 static void
3555 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3556 {
3557 unsigned int entry;
3558 literal_pool * pool;
3559 char sym_name[20];
3560
3561 pool = find_literal_pool ();
3562 if (pool == NULL
3563 || pool->symbol == NULL
3564 || pool->next_free_entry == 0)
3565 return;
3566
3567 /* Align pool as you have word accesses.
3568 Only make a frag if we have to. */
3569 if (!need_pass_2)
3570 frag_align (pool->alignment, 0, 0);
3571
3572 record_alignment (now_seg, 2);
3573
3574 #ifdef OBJ_ELF
3575 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3576 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3577 #endif
3578 sprintf (sym_name, "$$lit_\002%x", pool->id);
3579
3580 symbol_locate (pool->symbol, sym_name, now_seg,
3581 (valueT) frag_now_fix (), frag_now);
3582 symbol_table_insert (pool->symbol);
3583
3584 ARM_SET_THUMB (pool->symbol, thumb_mode);
3585
3586 #if defined OBJ_COFF || defined OBJ_ELF
3587 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3588 #endif
3589
3590 for (entry = 0; entry < pool->next_free_entry; entry ++)
3591 {
3592 #ifdef OBJ_ELF
3593 if (debug_type == DEBUG_DWARF2)
3594 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3595 #endif
3596 /* First output the expression in the instruction to the pool. */
3597 emit_expr (&(pool->literals[entry]),
3598 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3599 }
3600
3601 /* Mark the pool as empty. */
3602 pool->next_free_entry = 0;
3603 pool->symbol = NULL;
3604 }
3605
3606 #ifdef OBJ_ELF
3607 /* Forward declarations for functions below, in the MD interface
3608 section. */
3609 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3610 static valueT create_unwind_entry (int);
3611 static void start_unwind_section (const segT, int);
3612 static void add_unwind_opcode (valueT, int);
3613 static void flush_pending_unwind (void);
3614
3615 /* Directives: Data. */
3616
3617 static void
3618 s_arm_elf_cons (int nbytes)
3619 {
3620 expressionS exp;
3621
3622 #ifdef md_flush_pending_output
3623 md_flush_pending_output ();
3624 #endif
3625
3626 if (is_it_end_of_statement ())
3627 {
3628 demand_empty_rest_of_line ();
3629 return;
3630 }
3631
3632 #ifdef md_cons_align
3633 md_cons_align (nbytes);
3634 #endif
3635
3636 mapping_state (MAP_DATA);
3637 do
3638 {
3639 int reloc;
3640 char *base = input_line_pointer;
3641
3642 expression (& exp);
3643
3644 if (exp.X_op != O_symbol)
3645 emit_expr (&exp, (unsigned int) nbytes);
3646 else
3647 {
3648 char *before_reloc = input_line_pointer;
3649 reloc = parse_reloc (&input_line_pointer);
3650 if (reloc == -1)
3651 {
3652 as_bad (_("unrecognized relocation suffix"));
3653 ignore_rest_of_line ();
3654 return;
3655 }
3656 else if (reloc == BFD_RELOC_UNUSED)
3657 emit_expr (&exp, (unsigned int) nbytes);
3658 else
3659 {
3660 reloc_howto_type *howto = (reloc_howto_type *)
3661 bfd_reloc_type_lookup (stdoutput,
3662 (bfd_reloc_code_real_type) reloc);
3663 int size = bfd_get_reloc_size (howto);
3664
3665 if (reloc == BFD_RELOC_ARM_PLT32)
3666 {
3667 as_bad (_("(plt) is only valid on branch targets"));
3668 reloc = BFD_RELOC_UNUSED;
3669 size = 0;
3670 }
3671
3672 if (size > nbytes)
3673 as_bad (ngettext ("%s relocations do not fit in %d byte",
3674 "%s relocations do not fit in %d bytes",
3675 nbytes),
3676 howto->name, nbytes);
3677 else
3678 {
3679 /* We've parsed an expression stopping at O_symbol.
3680 But there may be more expression left now that we
3681 have parsed the relocation marker. Parse it again.
3682 XXX Surely there is a cleaner way to do this. */
3683 char *p = input_line_pointer;
3684 int offset;
3685 char *save_buf = XNEWVEC (char, input_line_pointer - base);
3686
3687 memcpy (save_buf, base, input_line_pointer - base);
3688 memmove (base + (input_line_pointer - before_reloc),
3689 base, before_reloc - base);
3690
3691 input_line_pointer = base + (input_line_pointer-before_reloc);
3692 expression (&exp);
3693 memcpy (base, save_buf, p - base);
3694
3695 offset = nbytes - size;
3696 p = frag_more (nbytes);
3697 memset (p, 0, nbytes);
3698 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3699 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3700 free (save_buf);
3701 }
3702 }
3703 }
3704 }
3705 while (*input_line_pointer++ == ',');
3706
3707 /* Put terminator back into stream. */
3708 input_line_pointer --;
3709 demand_empty_rest_of_line ();
3710 }
3711
3712 /* Emit an expression containing a 32-bit thumb instruction.
3713 Implementation based on put_thumb32_insn. */
3714
3715 static void
3716 emit_thumb32_expr (expressionS * exp)
3717 {
3718 expressionS exp_high = *exp;
3719
3720 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3721 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3722 exp->X_add_number &= 0xffff;
3723 emit_expr (exp, (unsigned int) THUMB_SIZE);
3724 }
3725
3726 /* Guess the instruction size based on the opcode. */
3727
3728 static int
3729 thumb_insn_size (int opcode)
3730 {
3731 if ((unsigned int) opcode < 0xe800u)
3732 return 2;
3733 else if ((unsigned int) opcode >= 0xe8000000u)
3734 return 4;
3735 else
3736 return 0;
3737 }
3738
3739 static bfd_boolean
3740 emit_insn (expressionS *exp, int nbytes)
3741 {
3742 int size = 0;
3743
3744 if (exp->X_op == O_constant)
3745 {
3746 size = nbytes;
3747
3748 if (size == 0)
3749 size = thumb_insn_size (exp->X_add_number);
3750
3751 if (size != 0)
3752 {
3753 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3754 {
3755 as_bad (_(".inst.n operand too big. "\
3756 "Use .inst.w instead"));
3757 size = 0;
3758 }
3759 else
3760 {
3761 if (now_it.state == AUTOMATIC_IT_BLOCK)
3762 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3763 else
3764 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3765
3766 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3767 emit_thumb32_expr (exp);
3768 else
3769 emit_expr (exp, (unsigned int) size);
3770
3771 it_fsm_post_encode ();
3772 }
3773 }
3774 else
3775 as_bad (_("cannot determine Thumb instruction size. " \
3776 "Use .inst.n/.inst.w instead"));
3777 }
3778 else
3779 as_bad (_("constant expression required"));
3780
3781 return (size != 0);
3782 }
3783
3784 /* Like s_arm_elf_cons but do not use md_cons_align and
3785 set the mapping state to MAP_ARM/MAP_THUMB. */
3786
3787 static void
3788 s_arm_elf_inst (int nbytes)
3789 {
3790 if (is_it_end_of_statement ())
3791 {
3792 demand_empty_rest_of_line ();
3793 return;
3794 }
3795
3796 /* Calling mapping_state () here will not change ARM/THUMB,
3797 but will ensure not to be in DATA state. */
3798
3799 if (thumb_mode)
3800 mapping_state (MAP_THUMB);
3801 else
3802 {
3803 if (nbytes != 0)
3804 {
3805 as_bad (_("width suffixes are invalid in ARM mode"));
3806 ignore_rest_of_line ();
3807 return;
3808 }
3809
3810 nbytes = 4;
3811
3812 mapping_state (MAP_ARM);
3813 }
3814
3815 do
3816 {
3817 expressionS exp;
3818
3819 expression (& exp);
3820
3821 if (! emit_insn (& exp, nbytes))
3822 {
3823 ignore_rest_of_line ();
3824 return;
3825 }
3826 }
3827 while (*input_line_pointer++ == ',');
3828
3829 /* Put terminator back into stream. */
3830 input_line_pointer --;
3831 demand_empty_rest_of_line ();
3832 }
3833
3834 /* Parse a .rel31 directive. */
3835
3836 static void
3837 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3838 {
3839 expressionS exp;
3840 char *p;
3841 valueT highbit;
3842
3843 highbit = 0;
3844 if (*input_line_pointer == '1')
3845 highbit = 0x80000000;
3846 else if (*input_line_pointer != '0')
3847 as_bad (_("expected 0 or 1"));
3848
3849 input_line_pointer++;
3850 if (*input_line_pointer != ',')
3851 as_bad (_("missing comma"));
3852 input_line_pointer++;
3853
3854 #ifdef md_flush_pending_output
3855 md_flush_pending_output ();
3856 #endif
3857
3858 #ifdef md_cons_align
3859 md_cons_align (4);
3860 #endif
3861
3862 mapping_state (MAP_DATA);
3863
3864 expression (&exp);
3865
3866 p = frag_more (4);
3867 md_number_to_chars (p, highbit, 4);
3868 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3869 BFD_RELOC_ARM_PREL31);
3870
3871 demand_empty_rest_of_line ();
3872 }
3873
3874 /* Directives: AEABI stack-unwind tables. */
3875
3876 /* Parse an unwind_fnstart directive. Simply records the current location. */
3877
3878 static void
3879 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3880 {
3881 demand_empty_rest_of_line ();
3882 if (unwind.proc_start)
3883 {
3884 as_bad (_("duplicate .fnstart directive"));
3885 return;
3886 }
3887
3888 /* Mark the start of the function. */
3889 unwind.proc_start = expr_build_dot ();
3890
3891 /* Reset the rest of the unwind info. */
3892 unwind.opcode_count = 0;
3893 unwind.table_entry = NULL;
3894 unwind.personality_routine = NULL;
3895 unwind.personality_index = -1;
3896 unwind.frame_size = 0;
3897 unwind.fp_offset = 0;
3898 unwind.fp_reg = REG_SP;
3899 unwind.fp_used = 0;
3900 unwind.sp_restored = 0;
3901 }
3902
3903
3904 /* Parse a handlerdata directive. Creates the exception handling table entry
3905 for the function. */
3906
3907 static void
3908 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3909 {
3910 demand_empty_rest_of_line ();
3911 if (!unwind.proc_start)
3912 as_bad (MISSING_FNSTART);
3913
3914 if (unwind.table_entry)
3915 as_bad (_("duplicate .handlerdata directive"));
3916
3917 create_unwind_entry (1);
3918 }
3919
3920 /* Parse an unwind_fnend directive. Generates the index table entry. */
3921
3922 static void
3923 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3924 {
3925 long where;
3926 char *ptr;
3927 valueT val;
3928 unsigned int marked_pr_dependency;
3929
3930 demand_empty_rest_of_line ();
3931
3932 if (!unwind.proc_start)
3933 {
3934 as_bad (_(".fnend directive without .fnstart"));
3935 return;
3936 }
3937
3938 /* Add eh table entry. */
3939 if (unwind.table_entry == NULL)
3940 val = create_unwind_entry (0);
3941 else
3942 val = 0;
3943
3944 /* Add index table entry. This is two words. */
3945 start_unwind_section (unwind.saved_seg, 1);
3946 frag_align (2, 0, 0);
3947 record_alignment (now_seg, 2);
3948
3949 ptr = frag_more (8);
3950 memset (ptr, 0, 8);
3951 where = frag_now_fix () - 8;
3952
3953 /* Self relative offset of the function start. */
3954 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3955 BFD_RELOC_ARM_PREL31);
3956
3957 /* Indicate dependency on EHABI-defined personality routines to the
3958 linker, if it hasn't been done already. */
3959 marked_pr_dependency
3960 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3961 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3962 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3963 {
3964 static const char *const name[] =
3965 {
3966 "__aeabi_unwind_cpp_pr0",
3967 "__aeabi_unwind_cpp_pr1",
3968 "__aeabi_unwind_cpp_pr2"
3969 };
3970 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3971 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3972 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3973 |= 1 << unwind.personality_index;
3974 }
3975
3976 if (val)
3977 /* Inline exception table entry. */
3978 md_number_to_chars (ptr + 4, val, 4);
3979 else
3980 /* Self relative offset of the table entry. */
3981 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3982 BFD_RELOC_ARM_PREL31);
3983
3984 /* Restore the original section. */
3985 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3986
3987 unwind.proc_start = NULL;
3988 }
3989
3990
3991 /* Parse an unwind_cantunwind directive. */
3992
3993 static void
3994 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3995 {
3996 demand_empty_rest_of_line ();
3997 if (!unwind.proc_start)
3998 as_bad (MISSING_FNSTART);
3999
4000 if (unwind.personality_routine || unwind.personality_index != -1)
4001 as_bad (_("personality routine specified for cantunwind frame"));
4002
4003 unwind.personality_index = -2;
4004 }
4005
4006
4007 /* Parse a personalityindex directive. */
4008
4009 static void
4010 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
4011 {
4012 expressionS exp;
4013
4014 if (!unwind.proc_start)
4015 as_bad (MISSING_FNSTART);
4016
4017 if (unwind.personality_routine || unwind.personality_index != -1)
4018 as_bad (_("duplicate .personalityindex directive"));
4019
4020 expression (&exp);
4021
4022 if (exp.X_op != O_constant
4023 || exp.X_add_number < 0 || exp.X_add_number > 15)
4024 {
4025 as_bad (_("bad personality routine number"));
4026 ignore_rest_of_line ();
4027 return;
4028 }
4029
4030 unwind.personality_index = exp.X_add_number;
4031
4032 demand_empty_rest_of_line ();
4033 }
4034
4035
4036 /* Parse a personality directive. */
4037
4038 static void
4039 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
4040 {
4041 char *name, *p, c;
4042
4043 if (!unwind.proc_start)
4044 as_bad (MISSING_FNSTART);
4045
4046 if (unwind.personality_routine || unwind.personality_index != -1)
4047 as_bad (_("duplicate .personality directive"));
4048
4049 c = get_symbol_name (& name);
4050 p = input_line_pointer;
4051 if (c == '"')
4052 ++ input_line_pointer;
4053 unwind.personality_routine = symbol_find_or_make (name);
4054 *p = c;
4055 demand_empty_rest_of_line ();
4056 }
4057
4058
4059 /* Parse a directive saving core registers. */
4060
4061 static void
4062 s_arm_unwind_save_core (void)
4063 {
4064 valueT op;
4065 long range;
4066 int n;
4067
4068 range = parse_reg_list (&input_line_pointer, REGLIST_RN);
4069 if (range == FAIL)
4070 {
4071 as_bad (_("expected register list"));
4072 ignore_rest_of_line ();
4073 return;
4074 }
4075
4076 demand_empty_rest_of_line ();
4077
4078 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
4079 into .unwind_save {..., sp...}. We aren't bothered about the value of
4080 ip because it is clobbered by calls. */
4081 if (unwind.sp_restored && unwind.fp_reg == 12
4082 && (range & 0x3000) == 0x1000)
4083 {
4084 unwind.opcode_count--;
4085 unwind.sp_restored = 0;
4086 range = (range | 0x2000) & ~0x1000;
4087 unwind.pending_offset = 0;
4088 }
4089
4090 /* Pop r4-r15. */
4091 if (range & 0xfff0)
4092 {
4093 /* See if we can use the short opcodes. These pop a block of up to 8
4094 registers starting with r4, plus maybe r14. */
4095 for (n = 0; n < 8; n++)
4096 {
4097 /* Break at the first non-saved register. */
4098 if ((range & (1 << (n + 4))) == 0)
4099 break;
4100 }
4101 /* See if there are any other bits set. */
4102 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
4103 {
4104 /* Use the long form. */
4105 op = 0x8000 | ((range >> 4) & 0xfff);
4106 add_unwind_opcode (op, 2);
4107 }
4108 else
4109 {
4110 /* Use the short form. */
4111 if (range & 0x4000)
4112 op = 0xa8; /* Pop r14. */
4113 else
4114 op = 0xa0; /* Do not pop r14. */
4115 op |= (n - 1);
4116 add_unwind_opcode (op, 1);
4117 }
4118 }
4119
4120 /* Pop r0-r3. */
4121 if (range & 0xf)
4122 {
4123 op = 0xb100 | (range & 0xf);
4124 add_unwind_opcode (op, 2);
4125 }
4126
4127 /* Record the number of bytes pushed. */
4128 for (n = 0; n < 16; n++)
4129 {
4130 if (range & (1 << n))
4131 unwind.frame_size += 4;
4132 }
4133 }
4134
4135
4136 /* Parse a directive saving FPA registers. */
4137
4138 static void
4139 s_arm_unwind_save_fpa (int reg)
4140 {
4141 expressionS exp;
4142 int num_regs;
4143 valueT op;
4144
4145 /* Get Number of registers to transfer. */
4146 if (skip_past_comma (&input_line_pointer) != FAIL)
4147 expression (&exp);
4148 else
4149 exp.X_op = O_illegal;
4150
4151 if (exp.X_op != O_constant)
4152 {
4153 as_bad (_("expected , <constant>"));
4154 ignore_rest_of_line ();
4155 return;
4156 }
4157
4158 num_regs = exp.X_add_number;
4159
4160 if (num_regs < 1 || num_regs > 4)
4161 {
4162 as_bad (_("number of registers must be in the range [1:4]"));
4163 ignore_rest_of_line ();
4164 return;
4165 }
4166
4167 demand_empty_rest_of_line ();
4168
4169 if (reg == 4)
4170 {
4171 /* Short form. */
4172 op = 0xb4 | (num_regs - 1);
4173 add_unwind_opcode (op, 1);
4174 }
4175 else
4176 {
4177 /* Long form. */
4178 op = 0xc800 | (reg << 4) | (num_regs - 1);
4179 add_unwind_opcode (op, 2);
4180 }
4181 unwind.frame_size += num_regs * 12;
4182 }
4183
4184
4185 /* Parse a directive saving VFP registers for ARMv6 and above. */
4186
4187 static void
4188 s_arm_unwind_save_vfp_armv6 (void)
4189 {
4190 int count;
4191 unsigned int start;
4192 valueT op;
4193 int num_vfpv3_regs = 0;
4194 int num_regs_below_16;
4195 bfd_boolean partial_match;
4196
4197 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D,
4198 &partial_match);
4199 if (count == FAIL)
4200 {
4201 as_bad (_("expected register list"));
4202 ignore_rest_of_line ();
4203 return;
4204 }
4205
4206 demand_empty_rest_of_line ();
4207
4208 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4209 than FSTMX/FLDMX-style ones). */
4210
4211 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4212 if (start >= 16)
4213 num_vfpv3_regs = count;
4214 else if (start + count > 16)
4215 num_vfpv3_regs = start + count - 16;
4216
4217 if (num_vfpv3_regs > 0)
4218 {
4219 int start_offset = start > 16 ? start - 16 : 0;
4220 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4221 add_unwind_opcode (op, 2);
4222 }
4223
4224 /* Generate opcode for registers numbered in the range 0 .. 15. */
4225 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4226 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4227 if (num_regs_below_16 > 0)
4228 {
4229 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4230 add_unwind_opcode (op, 2);
4231 }
4232
4233 unwind.frame_size += count * 8;
4234 }
4235
4236
4237 /* Parse a directive saving VFP registers for pre-ARMv6. */
4238
4239 static void
4240 s_arm_unwind_save_vfp (void)
4241 {
4242 int count;
4243 unsigned int reg;
4244 valueT op;
4245 bfd_boolean partial_match;
4246
4247 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D,
4248 &partial_match);
4249 if (count == FAIL)
4250 {
4251 as_bad (_("expected register list"));
4252 ignore_rest_of_line ();
4253 return;
4254 }
4255
4256 demand_empty_rest_of_line ();
4257
4258 if (reg == 8)
4259 {
4260 /* Short form. */
4261 op = 0xb8 | (count - 1);
4262 add_unwind_opcode (op, 1);
4263 }
4264 else
4265 {
4266 /* Long form. */
4267 op = 0xb300 | (reg << 4) | (count - 1);
4268 add_unwind_opcode (op, 2);
4269 }
4270 unwind.frame_size += count * 8 + 4;
4271 }
4272
4273
4274 /* Parse a directive saving iWMMXt data registers. */
4275
4276 static void
4277 s_arm_unwind_save_mmxwr (void)
4278 {
4279 int reg;
4280 int hi_reg;
4281 int i;
4282 unsigned mask = 0;
4283 valueT op;
4284
4285 if (*input_line_pointer == '{')
4286 input_line_pointer++;
4287
4288 do
4289 {
4290 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4291
4292 if (reg == FAIL)
4293 {
4294 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4295 goto error;
4296 }
4297
4298 if (mask >> reg)
4299 as_tsktsk (_("register list not in ascending order"));
4300 mask |= 1 << reg;
4301
4302 if (*input_line_pointer == '-')
4303 {
4304 input_line_pointer++;
4305 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4306 if (hi_reg == FAIL)
4307 {
4308 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4309 goto error;
4310 }
4311 else if (reg >= hi_reg)
4312 {
4313 as_bad (_("bad register range"));
4314 goto error;
4315 }
4316 for (; reg < hi_reg; reg++)
4317 mask |= 1 << reg;
4318 }
4319 }
4320 while (skip_past_comma (&input_line_pointer) != FAIL);
4321
4322 skip_past_char (&input_line_pointer, '}');
4323
4324 demand_empty_rest_of_line ();
4325
4326 /* Generate any deferred opcodes because we're going to be looking at
4327 the list. */
4328 flush_pending_unwind ();
4329
4330 for (i = 0; i < 16; i++)
4331 {
4332 if (mask & (1 << i))
4333 unwind.frame_size += 8;
4334 }
4335
4336 /* Attempt to combine with a previous opcode. We do this because gcc
4337 likes to output separate unwind directives for a single block of
4338 registers. */
4339 if (unwind.opcode_count > 0)
4340 {
4341 i = unwind.opcodes[unwind.opcode_count - 1];
4342 if ((i & 0xf8) == 0xc0)
4343 {
4344 i &= 7;
4345 /* Only merge if the blocks are contiguous. */
4346 if (i < 6)
4347 {
4348 if ((mask & 0xfe00) == (1 << 9))
4349 {
4350 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4351 unwind.opcode_count--;
4352 }
4353 }
4354 else if (i == 6 && unwind.opcode_count >= 2)
4355 {
4356 i = unwind.opcodes[unwind.opcode_count - 2];
4357 reg = i >> 4;
4358 i &= 0xf;
4359
4360 op = 0xffff << (reg - 1);
4361 if (reg > 0
4362 && ((mask & op) == (1u << (reg - 1))))
4363 {
4364 op = (1 << (reg + i + 1)) - 1;
4365 op &= ~((1 << reg) - 1);
4366 mask |= op;
4367 unwind.opcode_count -= 2;
4368 }
4369 }
4370 }
4371 }
4372
4373 hi_reg = 15;
4374 /* We want to generate opcodes in the order the registers have been
4375 saved, ie. descending order. */
4376 for (reg = 15; reg >= -1; reg--)
4377 {
4378 /* Save registers in blocks. */
4379 if (reg < 0
4380 || !(mask & (1 << reg)))
4381 {
4382 /* We found an unsaved reg. Generate opcodes to save the
4383 preceding block. */
4384 if (reg != hi_reg)
4385 {
4386 if (reg == 9)
4387 {
4388 /* Short form. */
4389 op = 0xc0 | (hi_reg - 10);
4390 add_unwind_opcode (op, 1);
4391 }
4392 else
4393 {
4394 /* Long form. */
4395 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4396 add_unwind_opcode (op, 2);
4397 }
4398 }
4399 hi_reg = reg - 1;
4400 }
4401 }
4402
4403 return;
4404 error:
4405 ignore_rest_of_line ();
4406 }
4407
4408 static void
4409 s_arm_unwind_save_mmxwcg (void)
4410 {
4411 int reg;
4412 int hi_reg;
4413 unsigned mask = 0;
4414 valueT op;
4415
4416 if (*input_line_pointer == '{')
4417 input_line_pointer++;
4418
4419 skip_whitespace (input_line_pointer);
4420
4421 do
4422 {
4423 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4424
4425 if (reg == FAIL)
4426 {
4427 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4428 goto error;
4429 }
4430
4431 reg -= 8;
4432 if (mask >> reg)
4433 as_tsktsk (_("register list not in ascending order"));
4434 mask |= 1 << reg;
4435
4436 if (*input_line_pointer == '-')
4437 {
4438 input_line_pointer++;
4439 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4440 if (hi_reg == FAIL)
4441 {
4442 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4443 goto error;
4444 }
4445 else if (reg >= hi_reg)
4446 {
4447 as_bad (_("bad register range"));
4448 goto error;
4449 }
4450 for (; reg < hi_reg; reg++)
4451 mask |= 1 << reg;
4452 }
4453 }
4454 while (skip_past_comma (&input_line_pointer) != FAIL);
4455
4456 skip_past_char (&input_line_pointer, '}');
4457
4458 demand_empty_rest_of_line ();
4459
4460 /* Generate any deferred opcodes because we're going to be looking at
4461 the list. */
4462 flush_pending_unwind ();
4463
4464 for (reg = 0; reg < 16; reg++)
4465 {
4466 if (mask & (1 << reg))
4467 unwind.frame_size += 4;
4468 }
4469 op = 0xc700 | mask;
4470 add_unwind_opcode (op, 2);
4471 return;
4472 error:
4473 ignore_rest_of_line ();
4474 }
4475
4476
4477 /* Parse an unwind_save directive.
4478 If the argument is non-zero, this is a .vsave directive. */
4479
4480 static void
4481 s_arm_unwind_save (int arch_v6)
4482 {
4483 char *peek;
4484 struct reg_entry *reg;
4485 bfd_boolean had_brace = FALSE;
4486
4487 if (!unwind.proc_start)
4488 as_bad (MISSING_FNSTART);
4489
4490 /* Figure out what sort of save we have. */
4491 peek = input_line_pointer;
4492
4493 if (*peek == '{')
4494 {
4495 had_brace = TRUE;
4496 peek++;
4497 }
4498
4499 reg = arm_reg_parse_multi (&peek);
4500
4501 if (!reg)
4502 {
4503 as_bad (_("register expected"));
4504 ignore_rest_of_line ();
4505 return;
4506 }
4507
4508 switch (reg->type)
4509 {
4510 case REG_TYPE_FN:
4511 if (had_brace)
4512 {
4513 as_bad (_("FPA .unwind_save does not take a register list"));
4514 ignore_rest_of_line ();
4515 return;
4516 }
4517 input_line_pointer = peek;
4518 s_arm_unwind_save_fpa (reg->number);
4519 return;
4520
4521 case REG_TYPE_RN:
4522 s_arm_unwind_save_core ();
4523 return;
4524
4525 case REG_TYPE_VFD:
4526 if (arch_v6)
4527 s_arm_unwind_save_vfp_armv6 ();
4528 else
4529 s_arm_unwind_save_vfp ();
4530 return;
4531
4532 case REG_TYPE_MMXWR:
4533 s_arm_unwind_save_mmxwr ();
4534 return;
4535
4536 case REG_TYPE_MMXWCG:
4537 s_arm_unwind_save_mmxwcg ();
4538 return;
4539
4540 default:
4541 as_bad (_(".unwind_save does not support this kind of register"));
4542 ignore_rest_of_line ();
4543 }
4544 }
4545
4546
4547 /* Parse an unwind_movsp directive. */
4548
4549 static void
4550 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4551 {
4552 int reg;
4553 valueT op;
4554 int offset;
4555
4556 if (!unwind.proc_start)
4557 as_bad (MISSING_FNSTART);
4558
4559 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4560 if (reg == FAIL)
4561 {
4562 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4563 ignore_rest_of_line ();
4564 return;
4565 }
4566
4567 /* Optional constant. */
4568 if (skip_past_comma (&input_line_pointer) != FAIL)
4569 {
4570 if (immediate_for_directive (&offset) == FAIL)
4571 return;
4572 }
4573 else
4574 offset = 0;
4575
4576 demand_empty_rest_of_line ();
4577
4578 if (reg == REG_SP || reg == REG_PC)
4579 {
4580 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4581 return;
4582 }
4583
4584 if (unwind.fp_reg != REG_SP)
4585 as_bad (_("unexpected .unwind_movsp directive"));
4586
4587 /* Generate opcode to restore the value. */
4588 op = 0x90 | reg;
4589 add_unwind_opcode (op, 1);
4590
4591 /* Record the information for later. */
4592 unwind.fp_reg = reg;
4593 unwind.fp_offset = unwind.frame_size - offset;
4594 unwind.sp_restored = 1;
4595 }
4596
4597 /* Parse an unwind_pad directive. */
4598
4599 static void
4600 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4601 {
4602 int offset;
4603
4604 if (!unwind.proc_start)
4605 as_bad (MISSING_FNSTART);
4606
4607 if (immediate_for_directive (&offset) == FAIL)
4608 return;
4609
4610 if (offset & 3)
4611 {
4612 as_bad (_("stack increment must be multiple of 4"));
4613 ignore_rest_of_line ();
4614 return;
4615 }
4616
4617 /* Don't generate any opcodes, just record the details for later. */
4618 unwind.frame_size += offset;
4619 unwind.pending_offset += offset;
4620
4621 demand_empty_rest_of_line ();
4622 }
4623
4624 /* Parse an unwind_setfp directive. */
4625
4626 static void
4627 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4628 {
4629 int sp_reg;
4630 int fp_reg;
4631 int offset;
4632
4633 if (!unwind.proc_start)
4634 as_bad (MISSING_FNSTART);
4635
4636 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4637 if (skip_past_comma (&input_line_pointer) == FAIL)
4638 sp_reg = FAIL;
4639 else
4640 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4641
4642 if (fp_reg == FAIL || sp_reg == FAIL)
4643 {
4644 as_bad (_("expected <reg>, <reg>"));
4645 ignore_rest_of_line ();
4646 return;
4647 }
4648
4649 /* Optional constant. */
4650 if (skip_past_comma (&input_line_pointer) != FAIL)
4651 {
4652 if (immediate_for_directive (&offset) == FAIL)
4653 return;
4654 }
4655 else
4656 offset = 0;
4657
4658 demand_empty_rest_of_line ();
4659
4660 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4661 {
4662 as_bad (_("register must be either sp or set by a previous"
4663 "unwind_movsp directive"));
4664 return;
4665 }
4666
4667 /* Don't generate any opcodes, just record the information for later. */
4668 unwind.fp_reg = fp_reg;
4669 unwind.fp_used = 1;
4670 if (sp_reg == REG_SP)
4671 unwind.fp_offset = unwind.frame_size - offset;
4672 else
4673 unwind.fp_offset -= offset;
4674 }
4675
4676 /* Parse an unwind_raw directive. */
4677
4678 static void
4679 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4680 {
4681 expressionS exp;
4682 /* This is an arbitrary limit. */
4683 unsigned char op[16];
4684 int count;
4685
4686 if (!unwind.proc_start)
4687 as_bad (MISSING_FNSTART);
4688
4689 expression (&exp);
4690 if (exp.X_op == O_constant
4691 && skip_past_comma (&input_line_pointer) != FAIL)
4692 {
4693 unwind.frame_size += exp.X_add_number;
4694 expression (&exp);
4695 }
4696 else
4697 exp.X_op = O_illegal;
4698
4699 if (exp.X_op != O_constant)
4700 {
4701 as_bad (_("expected <offset>, <opcode>"));
4702 ignore_rest_of_line ();
4703 return;
4704 }
4705
4706 count = 0;
4707
4708 /* Parse the opcode. */
4709 for (;;)
4710 {
4711 if (count >= 16)
4712 {
4713 as_bad (_("unwind opcode too long"));
4714 ignore_rest_of_line ();
4715 }
4716 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4717 {
4718 as_bad (_("invalid unwind opcode"));
4719 ignore_rest_of_line ();
4720 return;
4721 }
4722 op[count++] = exp.X_add_number;
4723
4724 /* Parse the next byte. */
4725 if (skip_past_comma (&input_line_pointer) == FAIL)
4726 break;
4727
4728 expression (&exp);
4729 }
4730
4731 /* Add the opcode bytes in reverse order. */
4732 while (count--)
4733 add_unwind_opcode (op[count], 1);
4734
4735 demand_empty_rest_of_line ();
4736 }
4737
4738
4739 /* Parse a .eabi_attribute directive. */
4740
4741 static void
4742 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4743 {
4744 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4745
4746 if (tag >= 0 && tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4747 attributes_set_explicitly[tag] = 1;
4748 }
4749
4750 /* Emit a tls fix for the symbol. */
4751
4752 static void
4753 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4754 {
4755 char *p;
4756 expressionS exp;
4757 #ifdef md_flush_pending_output
4758 md_flush_pending_output ();
4759 #endif
4760
4761 #ifdef md_cons_align
4762 md_cons_align (4);
4763 #endif
4764
4765 /* Since we're just labelling the code, there's no need to define a
4766 mapping symbol. */
4767 expression (&exp);
4768 p = obstack_next_free (&frchain_now->frch_obstack);
4769 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4770 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4771 : BFD_RELOC_ARM_TLS_DESCSEQ);
4772 }
4773 #endif /* OBJ_ELF */
4774
4775 static void s_arm_arch (int);
4776 static void s_arm_object_arch (int);
4777 static void s_arm_cpu (int);
4778 static void s_arm_fpu (int);
4779 static void s_arm_arch_extension (int);
4780
4781 #ifdef TE_PE
4782
4783 static void
4784 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4785 {
4786 expressionS exp;
4787
4788 do
4789 {
4790 expression (&exp);
4791 if (exp.X_op == O_symbol)
4792 exp.X_op = O_secrel;
4793
4794 emit_expr (&exp, 4);
4795 }
4796 while (*input_line_pointer++ == ',');
4797
4798 input_line_pointer--;
4799 demand_empty_rest_of_line ();
4800 }
4801 #endif /* TE_PE */
4802
4803 /* This table describes all the machine specific pseudo-ops the assembler
4804 has to support. The fields are:
4805 pseudo-op name without dot
4806 function to call to execute this pseudo-op
4807 Integer arg to pass to the function. */
4808
4809 const pseudo_typeS md_pseudo_table[] =
4810 {
4811 /* Never called because '.req' does not start a line. */
4812 { "req", s_req, 0 },
4813 /* Following two are likewise never called. */
4814 { "dn", s_dn, 0 },
4815 { "qn", s_qn, 0 },
4816 { "unreq", s_unreq, 0 },
4817 { "bss", s_bss, 0 },
4818 { "align", s_align_ptwo, 2 },
4819 { "arm", s_arm, 0 },
4820 { "thumb", s_thumb, 0 },
4821 { "code", s_code, 0 },
4822 { "force_thumb", s_force_thumb, 0 },
4823 { "thumb_func", s_thumb_func, 0 },
4824 { "thumb_set", s_thumb_set, 0 },
4825 { "even", s_even, 0 },
4826 { "ltorg", s_ltorg, 0 },
4827 { "pool", s_ltorg, 0 },
4828 { "syntax", s_syntax, 0 },
4829 { "cpu", s_arm_cpu, 0 },
4830 { "arch", s_arm_arch, 0 },
4831 { "object_arch", s_arm_object_arch, 0 },
4832 { "fpu", s_arm_fpu, 0 },
4833 { "arch_extension", s_arm_arch_extension, 0 },
4834 #ifdef OBJ_ELF
4835 { "word", s_arm_elf_cons, 4 },
4836 { "long", s_arm_elf_cons, 4 },
4837 { "inst.n", s_arm_elf_inst, 2 },
4838 { "inst.w", s_arm_elf_inst, 4 },
4839 { "inst", s_arm_elf_inst, 0 },
4840 { "rel31", s_arm_rel31, 0 },
4841 { "fnstart", s_arm_unwind_fnstart, 0 },
4842 { "fnend", s_arm_unwind_fnend, 0 },
4843 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4844 { "personality", s_arm_unwind_personality, 0 },
4845 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4846 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4847 { "save", s_arm_unwind_save, 0 },
4848 { "vsave", s_arm_unwind_save, 1 },
4849 { "movsp", s_arm_unwind_movsp, 0 },
4850 { "pad", s_arm_unwind_pad, 0 },
4851 { "setfp", s_arm_unwind_setfp, 0 },
4852 { "unwind_raw", s_arm_unwind_raw, 0 },
4853 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4854 { "tlsdescseq", s_arm_tls_descseq, 0 },
4855 #else
4856 { "word", cons, 4},
4857
4858 /* These are used for dwarf. */
4859 {"2byte", cons, 2},
4860 {"4byte", cons, 4},
4861 {"8byte", cons, 8},
4862 /* These are used for dwarf2. */
4863 { "file", dwarf2_directive_file, 0 },
4864 { "loc", dwarf2_directive_loc, 0 },
4865 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4866 #endif
4867 { "extend", float_cons, 'x' },
4868 { "ldouble", float_cons, 'x' },
4869 { "packed", float_cons, 'p' },
4870 #ifdef TE_PE
4871 {"secrel32", pe_directive_secrel, 0},
4872 #endif
4873
4874 /* These are for compatibility with CodeComposer Studio. */
4875 {"ref", s_ccs_ref, 0},
4876 {"def", s_ccs_def, 0},
4877 {"asmfunc", s_ccs_asmfunc, 0},
4878 {"endasmfunc", s_ccs_endasmfunc, 0},
4879
4880 { 0, 0, 0 }
4881 };
4882 \f
4883 /* Parser functions used exclusively in instruction operands. */
4884
4885 /* Generic immediate-value read function for use in insn parsing.
4886 STR points to the beginning of the immediate (the leading #);
4887 VAL receives the value; if the value is outside [MIN, MAX]
4888 issue an error. PREFIX_OPT is true if the immediate prefix is
4889 optional. */
4890
4891 static int
4892 parse_immediate (char **str, int *val, int min, int max,
4893 bfd_boolean prefix_opt)
4894 {
4895 expressionS exp;
4896
4897 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4898 if (exp.X_op != O_constant)
4899 {
4900 inst.error = _("constant expression required");
4901 return FAIL;
4902 }
4903
4904 if (exp.X_add_number < min || exp.X_add_number > max)
4905 {
4906 inst.error = _("immediate value out of range");
4907 return FAIL;
4908 }
4909
4910 *val = exp.X_add_number;
4911 return SUCCESS;
4912 }
4913
4914 /* Less-generic immediate-value read function with the possibility of loading a
4915 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4916 instructions. Puts the result directly in inst.operands[i]. */
4917
4918 static int
4919 parse_big_immediate (char **str, int i, expressionS *in_exp,
4920 bfd_boolean allow_symbol_p)
4921 {
4922 expressionS exp;
4923 expressionS *exp_p = in_exp ? in_exp : &exp;
4924 char *ptr = *str;
4925
4926 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4927
4928 if (exp_p->X_op == O_constant)
4929 {
4930 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4931 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4932 O_constant. We have to be careful not to break compilation for
4933 32-bit X_add_number, though. */
4934 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4935 {
4936 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4937 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4938 & 0xffffffff);
4939 inst.operands[i].regisimm = 1;
4940 }
4941 }
4942 else if (exp_p->X_op == O_big
4943 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4944 {
4945 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4946
4947 /* Bignums have their least significant bits in
4948 generic_bignum[0]. Make sure we put 32 bits in imm and
4949 32 bits in reg, in a (hopefully) portable way. */
4950 gas_assert (parts != 0);
4951
4952 /* Make sure that the number is not too big.
4953 PR 11972: Bignums can now be sign-extended to the
4954 size of a .octa so check that the out of range bits
4955 are all zero or all one. */
4956 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4957 {
4958 LITTLENUM_TYPE m = -1;
4959
4960 if (generic_bignum[parts * 2] != 0
4961 && generic_bignum[parts * 2] != m)
4962 return FAIL;
4963
4964 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4965 if (generic_bignum[j] != generic_bignum[j-1])
4966 return FAIL;
4967 }
4968
4969 inst.operands[i].imm = 0;
4970 for (j = 0; j < parts; j++, idx++)
4971 inst.operands[i].imm |= generic_bignum[idx]
4972 << (LITTLENUM_NUMBER_OF_BITS * j);
4973 inst.operands[i].reg = 0;
4974 for (j = 0; j < parts; j++, idx++)
4975 inst.operands[i].reg |= generic_bignum[idx]
4976 << (LITTLENUM_NUMBER_OF_BITS * j);
4977 inst.operands[i].regisimm = 1;
4978 }
4979 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4980 return FAIL;
4981
4982 *str = ptr;
4983
4984 return SUCCESS;
4985 }
4986
4987 /* Returns the pseudo-register number of an FPA immediate constant,
4988 or FAIL if there isn't a valid constant here. */
4989
4990 static int
4991 parse_fpa_immediate (char ** str)
4992 {
4993 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4994 char * save_in;
4995 expressionS exp;
4996 int i;
4997 int j;
4998
4999 /* First try and match exact strings, this is to guarantee
5000 that some formats will work even for cross assembly. */
5001
5002 for (i = 0; fp_const[i]; i++)
5003 {
5004 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
5005 {
5006 char *start = *str;
5007
5008 *str += strlen (fp_const[i]);
5009 if (is_end_of_line[(unsigned char) **str])
5010 return i + 8;
5011 *str = start;
5012 }
5013 }
5014
5015 /* Just because we didn't get a match doesn't mean that the constant
5016 isn't valid, just that it is in a format that we don't
5017 automatically recognize. Try parsing it with the standard
5018 expression routines. */
5019
5020 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
5021
5022 /* Look for a raw floating point number. */
5023 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
5024 && is_end_of_line[(unsigned char) *save_in])
5025 {
5026 for (i = 0; i < NUM_FLOAT_VALS; i++)
5027 {
5028 for (j = 0; j < MAX_LITTLENUMS; j++)
5029 {
5030 if (words[j] != fp_values[i][j])
5031 break;
5032 }
5033
5034 if (j == MAX_LITTLENUMS)
5035 {
5036 *str = save_in;
5037 return i + 8;
5038 }
5039 }
5040 }
5041
5042 /* Try and parse a more complex expression, this will probably fail
5043 unless the code uses a floating point prefix (eg "0f"). */
5044 save_in = input_line_pointer;
5045 input_line_pointer = *str;
5046 if (expression (&exp) == absolute_section
5047 && exp.X_op == O_big
5048 && exp.X_add_number < 0)
5049 {
5050 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
5051 Ditto for 15. */
5052 #define X_PRECISION 5
5053 #define E_PRECISION 15L
5054 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
5055 {
5056 for (i = 0; i < NUM_FLOAT_VALS; i++)
5057 {
5058 for (j = 0; j < MAX_LITTLENUMS; j++)
5059 {
5060 if (words[j] != fp_values[i][j])
5061 break;
5062 }
5063
5064 if (j == MAX_LITTLENUMS)
5065 {
5066 *str = input_line_pointer;
5067 input_line_pointer = save_in;
5068 return i + 8;
5069 }
5070 }
5071 }
5072 }
5073
5074 *str = input_line_pointer;
5075 input_line_pointer = save_in;
5076 inst.error = _("invalid FPA immediate expression");
5077 return FAIL;
5078 }
5079
5080 /* Returns 1 if a number has "quarter-precision" float format
5081 0baBbbbbbc defgh000 00000000 00000000. */
5082
5083 static int
5084 is_quarter_float (unsigned imm)
5085 {
5086 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
5087 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
5088 }
5089
5090
5091 /* Detect the presence of a floating point or integer zero constant,
5092 i.e. #0.0 or #0. */
5093
5094 static bfd_boolean
5095 parse_ifimm_zero (char **in)
5096 {
5097 int error_code;
5098
5099 if (!is_immediate_prefix (**in))
5100 {
5101 /* In unified syntax, all prefixes are optional. */
5102 if (!unified_syntax)
5103 return FALSE;
5104 }
5105 else
5106 ++*in;
5107
5108 /* Accept #0x0 as a synonym for #0. */
5109 if (strncmp (*in, "0x", 2) == 0)
5110 {
5111 int val;
5112 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
5113 return FALSE;
5114 return TRUE;
5115 }
5116
5117 error_code = atof_generic (in, ".", EXP_CHARS,
5118 &generic_floating_point_number);
5119
5120 if (!error_code
5121 && generic_floating_point_number.sign == '+'
5122 && (generic_floating_point_number.low
5123 > generic_floating_point_number.leader))
5124 return TRUE;
5125
5126 return FALSE;
5127 }
5128
5129 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5130 0baBbbbbbc defgh000 00000000 00000000.
5131 The zero and minus-zero cases need special handling, since they can't be
5132 encoded in the "quarter-precision" float format, but can nonetheless be
5133 loaded as integer constants. */
5134
5135 static unsigned
5136 parse_qfloat_immediate (char **ccp, int *immed)
5137 {
5138 char *str = *ccp;
5139 char *fpnum;
5140 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5141 int found_fpchar = 0;
5142
5143 skip_past_char (&str, '#');
5144
5145 /* We must not accidentally parse an integer as a floating-point number. Make
5146 sure that the value we parse is not an integer by checking for special
5147 characters '.' or 'e'.
5148 FIXME: This is a horrible hack, but doing better is tricky because type
5149 information isn't in a very usable state at parse time. */
5150 fpnum = str;
5151 skip_whitespace (fpnum);
5152
5153 if (strncmp (fpnum, "0x", 2) == 0)
5154 return FAIL;
5155 else
5156 {
5157 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5158 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5159 {
5160 found_fpchar = 1;
5161 break;
5162 }
5163
5164 if (!found_fpchar)
5165 return FAIL;
5166 }
5167
5168 if ((str = atof_ieee (str, 's', words)) != NULL)
5169 {
5170 unsigned fpword = 0;
5171 int i;
5172
5173 /* Our FP word must be 32 bits (single-precision FP). */
5174 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5175 {
5176 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5177 fpword |= words[i];
5178 }
5179
5180 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5181 *immed = fpword;
5182 else
5183 return FAIL;
5184
5185 *ccp = str;
5186
5187 return SUCCESS;
5188 }
5189
5190 return FAIL;
5191 }
5192
5193 /* Shift operands. */
5194 enum shift_kind
5195 {
5196 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5197 };
5198
5199 struct asm_shift_name
5200 {
5201 const char *name;
5202 enum shift_kind kind;
5203 };
5204
5205 /* Third argument to parse_shift. */
5206 enum parse_shift_mode
5207 {
5208 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5209 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5210 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5211 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5212 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5213 };
5214
5215 /* Parse a <shift> specifier on an ARM data processing instruction.
5216 This has three forms:
5217
5218 (LSL|LSR|ASL|ASR|ROR) Rs
5219 (LSL|LSR|ASL|ASR|ROR) #imm
5220 RRX
5221
5222 Note that ASL is assimilated to LSL in the instruction encoding, and
5223 RRX to ROR #0 (which cannot be written as such). */
5224
5225 static int
5226 parse_shift (char **str, int i, enum parse_shift_mode mode)
5227 {
5228 const struct asm_shift_name *shift_name;
5229 enum shift_kind shift;
5230 char *s = *str;
5231 char *p = s;
5232 int reg;
5233
5234 for (p = *str; ISALPHA (*p); p++)
5235 ;
5236
5237 if (p == *str)
5238 {
5239 inst.error = _("shift expression expected");
5240 return FAIL;
5241 }
5242
5243 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5244 p - *str);
5245
5246 if (shift_name == NULL)
5247 {
5248 inst.error = _("shift expression expected");
5249 return FAIL;
5250 }
5251
5252 shift = shift_name->kind;
5253
5254 switch (mode)
5255 {
5256 case NO_SHIFT_RESTRICT:
5257 case SHIFT_IMMEDIATE: break;
5258
5259 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5260 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5261 {
5262 inst.error = _("'LSL' or 'ASR' required");
5263 return FAIL;
5264 }
5265 break;
5266
5267 case SHIFT_LSL_IMMEDIATE:
5268 if (shift != SHIFT_LSL)
5269 {
5270 inst.error = _("'LSL' required");
5271 return FAIL;
5272 }
5273 break;
5274
5275 case SHIFT_ASR_IMMEDIATE:
5276 if (shift != SHIFT_ASR)
5277 {
5278 inst.error = _("'ASR' required");
5279 return FAIL;
5280 }
5281 break;
5282
5283 default: abort ();
5284 }
5285
5286 if (shift != SHIFT_RRX)
5287 {
5288 /* Whitespace can appear here if the next thing is a bare digit. */
5289 skip_whitespace (p);
5290
5291 if (mode == NO_SHIFT_RESTRICT
5292 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5293 {
5294 inst.operands[i].imm = reg;
5295 inst.operands[i].immisreg = 1;
5296 }
5297 else if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5298 return FAIL;
5299 }
5300 inst.operands[i].shift_kind = shift;
5301 inst.operands[i].shifted = 1;
5302 *str = p;
5303 return SUCCESS;
5304 }
5305
5306 /* Parse a <shifter_operand> for an ARM data processing instruction:
5307
5308 #<immediate>
5309 #<immediate>, <rotate>
5310 <Rm>
5311 <Rm>, <shift>
5312
5313 where <shift> is defined by parse_shift above, and <rotate> is a
5314 multiple of 2 between 0 and 30. Validation of immediate operands
5315 is deferred to md_apply_fix. */
5316
5317 static int
5318 parse_shifter_operand (char **str, int i)
5319 {
5320 int value;
5321 expressionS exp;
5322
5323 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5324 {
5325 inst.operands[i].reg = value;
5326 inst.operands[i].isreg = 1;
5327
5328 /* parse_shift will override this if appropriate */
5329 inst.relocs[0].exp.X_op = O_constant;
5330 inst.relocs[0].exp.X_add_number = 0;
5331
5332 if (skip_past_comma (str) == FAIL)
5333 return SUCCESS;
5334
5335 /* Shift operation on register. */
5336 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5337 }
5338
5339 if (my_get_expression (&inst.relocs[0].exp, str, GE_IMM_PREFIX))
5340 return FAIL;
5341
5342 if (skip_past_comma (str) == SUCCESS)
5343 {
5344 /* #x, y -- ie explicit rotation by Y. */
5345 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5346 return FAIL;
5347
5348 if (exp.X_op != O_constant || inst.relocs[0].exp.X_op != O_constant)
5349 {
5350 inst.error = _("constant expression expected");
5351 return FAIL;
5352 }
5353
5354 value = exp.X_add_number;
5355 if (value < 0 || value > 30 || value % 2 != 0)
5356 {
5357 inst.error = _("invalid rotation");
5358 return FAIL;
5359 }
5360 if (inst.relocs[0].exp.X_add_number < 0
5361 || inst.relocs[0].exp.X_add_number > 255)
5362 {
5363 inst.error = _("invalid constant");
5364 return FAIL;
5365 }
5366
5367 /* Encode as specified. */
5368 inst.operands[i].imm = inst.relocs[0].exp.X_add_number | value << 7;
5369 return SUCCESS;
5370 }
5371
5372 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
5373 inst.relocs[0].pc_rel = 0;
5374 return SUCCESS;
5375 }
5376
5377 /* Group relocation information. Each entry in the table contains the
5378 textual name of the relocation as may appear in assembler source
5379 and must end with a colon.
5380 Along with this textual name are the relocation codes to be used if
5381 the corresponding instruction is an ALU instruction (ADD or SUB only),
5382 an LDR, an LDRS, or an LDC. */
5383
5384 struct group_reloc_table_entry
5385 {
5386 const char *name;
5387 int alu_code;
5388 int ldr_code;
5389 int ldrs_code;
5390 int ldc_code;
5391 };
5392
5393 typedef enum
5394 {
5395 /* Varieties of non-ALU group relocation. */
5396
5397 GROUP_LDR,
5398 GROUP_LDRS,
5399 GROUP_LDC
5400 } group_reloc_type;
5401
5402 static struct group_reloc_table_entry group_reloc_table[] =
5403 { /* Program counter relative: */
5404 { "pc_g0_nc",
5405 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5406 0, /* LDR */
5407 0, /* LDRS */
5408 0 }, /* LDC */
5409 { "pc_g0",
5410 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5411 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5412 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5413 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5414 { "pc_g1_nc",
5415 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5416 0, /* LDR */
5417 0, /* LDRS */
5418 0 }, /* LDC */
5419 { "pc_g1",
5420 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5421 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5422 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5423 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5424 { "pc_g2",
5425 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5426 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5427 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5428 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5429 /* Section base relative */
5430 { "sb_g0_nc",
5431 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5432 0, /* LDR */
5433 0, /* LDRS */
5434 0 }, /* LDC */
5435 { "sb_g0",
5436 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5437 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5438 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5439 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5440 { "sb_g1_nc",
5441 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5442 0, /* LDR */
5443 0, /* LDRS */
5444 0 }, /* LDC */
5445 { "sb_g1",
5446 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5447 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5448 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5449 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5450 { "sb_g2",
5451 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5452 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5453 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5454 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5455 /* Absolute thumb alu relocations. */
5456 { "lower0_7",
5457 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5458 0, /* LDR. */
5459 0, /* LDRS. */
5460 0 }, /* LDC. */
5461 { "lower8_15",
5462 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5463 0, /* LDR. */
5464 0, /* LDRS. */
5465 0 }, /* LDC. */
5466 { "upper0_7",
5467 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5468 0, /* LDR. */
5469 0, /* LDRS. */
5470 0 }, /* LDC. */
5471 { "upper8_15",
5472 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5473 0, /* LDR. */
5474 0, /* LDRS. */
5475 0 } }; /* LDC. */
5476
5477 /* Given the address of a pointer pointing to the textual name of a group
5478 relocation as may appear in assembler source, attempt to find its details
5479 in group_reloc_table. The pointer will be updated to the character after
5480 the trailing colon. On failure, FAIL will be returned; SUCCESS
5481 otherwise. On success, *entry will be updated to point at the relevant
5482 group_reloc_table entry. */
5483
5484 static int
5485 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5486 {
5487 unsigned int i;
5488 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5489 {
5490 int length = strlen (group_reloc_table[i].name);
5491
5492 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5493 && (*str)[length] == ':')
5494 {
5495 *out = &group_reloc_table[i];
5496 *str += (length + 1);
5497 return SUCCESS;
5498 }
5499 }
5500
5501 return FAIL;
5502 }
5503
5504 /* Parse a <shifter_operand> for an ARM data processing instruction
5505 (as for parse_shifter_operand) where group relocations are allowed:
5506
5507 #<immediate>
5508 #<immediate>, <rotate>
5509 #:<group_reloc>:<expression>
5510 <Rm>
5511 <Rm>, <shift>
5512
5513 where <group_reloc> is one of the strings defined in group_reloc_table.
5514 The hashes are optional.
5515
5516 Everything else is as for parse_shifter_operand. */
5517
5518 static parse_operand_result
5519 parse_shifter_operand_group_reloc (char **str, int i)
5520 {
5521 /* Determine if we have the sequence of characters #: or just :
5522 coming next. If we do, then we check for a group relocation.
5523 If we don't, punt the whole lot to parse_shifter_operand. */
5524
5525 if (((*str)[0] == '#' && (*str)[1] == ':')
5526 || (*str)[0] == ':')
5527 {
5528 struct group_reloc_table_entry *entry;
5529
5530 if ((*str)[0] == '#')
5531 (*str) += 2;
5532 else
5533 (*str)++;
5534
5535 /* Try to parse a group relocation. Anything else is an error. */
5536 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5537 {
5538 inst.error = _("unknown group relocation");
5539 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5540 }
5541
5542 /* We now have the group relocation table entry corresponding to
5543 the name in the assembler source. Next, we parse the expression. */
5544 if (my_get_expression (&inst.relocs[0].exp, str, GE_NO_PREFIX))
5545 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5546
5547 /* Record the relocation type (always the ALU variant here). */
5548 inst.relocs[0].type = (bfd_reloc_code_real_type) entry->alu_code;
5549 gas_assert (inst.relocs[0].type != 0);
5550
5551 return PARSE_OPERAND_SUCCESS;
5552 }
5553 else
5554 return parse_shifter_operand (str, i) == SUCCESS
5555 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5556
5557 /* Never reached. */
5558 }
5559
5560 /* Parse a Neon alignment expression. Information is written to
5561 inst.operands[i]. We assume the initial ':' has been skipped.
5562
5563 align .imm = align << 8, .immisalign=1, .preind=0 */
5564 static parse_operand_result
5565 parse_neon_alignment (char **str, int i)
5566 {
5567 char *p = *str;
5568 expressionS exp;
5569
5570 my_get_expression (&exp, &p, GE_NO_PREFIX);
5571
5572 if (exp.X_op != O_constant)
5573 {
5574 inst.error = _("alignment must be constant");
5575 return PARSE_OPERAND_FAIL;
5576 }
5577
5578 inst.operands[i].imm = exp.X_add_number << 8;
5579 inst.operands[i].immisalign = 1;
5580 /* Alignments are not pre-indexes. */
5581 inst.operands[i].preind = 0;
5582
5583 *str = p;
5584 return PARSE_OPERAND_SUCCESS;
5585 }
5586
5587 /* Parse all forms of an ARM address expression. Information is written
5588 to inst.operands[i] and/or inst.relocs[0].
5589
5590 Preindexed addressing (.preind=1):
5591
5592 [Rn, #offset] .reg=Rn .relocs[0].exp=offset
5593 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5594 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5595 .shift_kind=shift .relocs[0].exp=shift_imm
5596
5597 These three may have a trailing ! which causes .writeback to be set also.
5598
5599 Postindexed addressing (.postind=1, .writeback=1):
5600
5601 [Rn], #offset .reg=Rn .relocs[0].exp=offset
5602 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5603 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5604 .shift_kind=shift .relocs[0].exp=shift_imm
5605
5606 Unindexed addressing (.preind=0, .postind=0):
5607
5608 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5609
5610 Other:
5611
5612 [Rn]{!} shorthand for [Rn,#0]{!}
5613 =immediate .isreg=0 .relocs[0].exp=immediate
5614 label .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label
5615
5616 It is the caller's responsibility to check for addressing modes not
5617 supported by the instruction, and to set inst.relocs[0].type. */
5618
5619 static parse_operand_result
5620 parse_address_main (char **str, int i, int group_relocations,
5621 group_reloc_type group_type)
5622 {
5623 char *p = *str;
5624 int reg;
5625
5626 if (skip_past_char (&p, '[') == FAIL)
5627 {
5628 if (skip_past_char (&p, '=') == FAIL)
5629 {
5630 /* Bare address - translate to PC-relative offset. */
5631 inst.relocs[0].pc_rel = 1;
5632 inst.operands[i].reg = REG_PC;
5633 inst.operands[i].isreg = 1;
5634 inst.operands[i].preind = 1;
5635
5636 if (my_get_expression (&inst.relocs[0].exp, &p, GE_OPT_PREFIX_BIG))
5637 return PARSE_OPERAND_FAIL;
5638 }
5639 else if (parse_big_immediate (&p, i, &inst.relocs[0].exp,
5640 /*allow_symbol_p=*/TRUE))
5641 return PARSE_OPERAND_FAIL;
5642
5643 *str = p;
5644 return PARSE_OPERAND_SUCCESS;
5645 }
5646
5647 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5648 skip_whitespace (p);
5649
5650 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5651 {
5652 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5653 return PARSE_OPERAND_FAIL;
5654 }
5655 inst.operands[i].reg = reg;
5656 inst.operands[i].isreg = 1;
5657
5658 if (skip_past_comma (&p) == SUCCESS)
5659 {
5660 inst.operands[i].preind = 1;
5661
5662 if (*p == '+') p++;
5663 else if (*p == '-') p++, inst.operands[i].negative = 1;
5664
5665 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5666 {
5667 inst.operands[i].imm = reg;
5668 inst.operands[i].immisreg = 1;
5669
5670 if (skip_past_comma (&p) == SUCCESS)
5671 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5672 return PARSE_OPERAND_FAIL;
5673 }
5674 else if (skip_past_char (&p, ':') == SUCCESS)
5675 {
5676 /* FIXME: '@' should be used here, but it's filtered out by generic
5677 code before we get to see it here. This may be subject to
5678 change. */
5679 parse_operand_result result = parse_neon_alignment (&p, i);
5680
5681 if (result != PARSE_OPERAND_SUCCESS)
5682 return result;
5683 }
5684 else
5685 {
5686 if (inst.operands[i].negative)
5687 {
5688 inst.operands[i].negative = 0;
5689 p--;
5690 }
5691
5692 if (group_relocations
5693 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5694 {
5695 struct group_reloc_table_entry *entry;
5696
5697 /* Skip over the #: or : sequence. */
5698 if (*p == '#')
5699 p += 2;
5700 else
5701 p++;
5702
5703 /* Try to parse a group relocation. Anything else is an
5704 error. */
5705 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5706 {
5707 inst.error = _("unknown group relocation");
5708 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5709 }
5710
5711 /* We now have the group relocation table entry corresponding to
5712 the name in the assembler source. Next, we parse the
5713 expression. */
5714 if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
5715 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5716
5717 /* Record the relocation type. */
5718 switch (group_type)
5719 {
5720 case GROUP_LDR:
5721 inst.relocs[0].type
5722 = (bfd_reloc_code_real_type) entry->ldr_code;
5723 break;
5724
5725 case GROUP_LDRS:
5726 inst.relocs[0].type
5727 = (bfd_reloc_code_real_type) entry->ldrs_code;
5728 break;
5729
5730 case GROUP_LDC:
5731 inst.relocs[0].type
5732 = (bfd_reloc_code_real_type) entry->ldc_code;
5733 break;
5734
5735 default:
5736 gas_assert (0);
5737 }
5738
5739 if (inst.relocs[0].type == 0)
5740 {
5741 inst.error = _("this group relocation is not allowed on this instruction");
5742 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5743 }
5744 }
5745 else
5746 {
5747 char *q = p;
5748
5749 if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5750 return PARSE_OPERAND_FAIL;
5751 /* If the offset is 0, find out if it's a +0 or -0. */
5752 if (inst.relocs[0].exp.X_op == O_constant
5753 && inst.relocs[0].exp.X_add_number == 0)
5754 {
5755 skip_whitespace (q);
5756 if (*q == '#')
5757 {
5758 q++;
5759 skip_whitespace (q);
5760 }
5761 if (*q == '-')
5762 inst.operands[i].negative = 1;
5763 }
5764 }
5765 }
5766 }
5767 else if (skip_past_char (&p, ':') == SUCCESS)
5768 {
5769 /* FIXME: '@' should be used here, but it's filtered out by generic code
5770 before we get to see it here. This may be subject to change. */
5771 parse_operand_result result = parse_neon_alignment (&p, i);
5772
5773 if (result != PARSE_OPERAND_SUCCESS)
5774 return result;
5775 }
5776
5777 if (skip_past_char (&p, ']') == FAIL)
5778 {
5779 inst.error = _("']' expected");
5780 return PARSE_OPERAND_FAIL;
5781 }
5782
5783 if (skip_past_char (&p, '!') == SUCCESS)
5784 inst.operands[i].writeback = 1;
5785
5786 else if (skip_past_comma (&p) == SUCCESS)
5787 {
5788 if (skip_past_char (&p, '{') == SUCCESS)
5789 {
5790 /* [Rn], {expr} - unindexed, with option */
5791 if (parse_immediate (&p, &inst.operands[i].imm,
5792 0, 255, TRUE) == FAIL)
5793 return PARSE_OPERAND_FAIL;
5794
5795 if (skip_past_char (&p, '}') == FAIL)
5796 {
5797 inst.error = _("'}' expected at end of 'option' field");
5798 return PARSE_OPERAND_FAIL;
5799 }
5800 if (inst.operands[i].preind)
5801 {
5802 inst.error = _("cannot combine index with option");
5803 return PARSE_OPERAND_FAIL;
5804 }
5805 *str = p;
5806 return PARSE_OPERAND_SUCCESS;
5807 }
5808 else
5809 {
5810 inst.operands[i].postind = 1;
5811 inst.operands[i].writeback = 1;
5812
5813 if (inst.operands[i].preind)
5814 {
5815 inst.error = _("cannot combine pre- and post-indexing");
5816 return PARSE_OPERAND_FAIL;
5817 }
5818
5819 if (*p == '+') p++;
5820 else if (*p == '-') p++, inst.operands[i].negative = 1;
5821
5822 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5823 {
5824 /* We might be using the immediate for alignment already. If we
5825 are, OR the register number into the low-order bits. */
5826 if (inst.operands[i].immisalign)
5827 inst.operands[i].imm |= reg;
5828 else
5829 inst.operands[i].imm = reg;
5830 inst.operands[i].immisreg = 1;
5831
5832 if (skip_past_comma (&p) == SUCCESS)
5833 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5834 return PARSE_OPERAND_FAIL;
5835 }
5836 else
5837 {
5838 char *q = p;
5839
5840 if (inst.operands[i].negative)
5841 {
5842 inst.operands[i].negative = 0;
5843 p--;
5844 }
5845 if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5846 return PARSE_OPERAND_FAIL;
5847 /* If the offset is 0, find out if it's a +0 or -0. */
5848 if (inst.relocs[0].exp.X_op == O_constant
5849 && inst.relocs[0].exp.X_add_number == 0)
5850 {
5851 skip_whitespace (q);
5852 if (*q == '#')
5853 {
5854 q++;
5855 skip_whitespace (q);
5856 }
5857 if (*q == '-')
5858 inst.operands[i].negative = 1;
5859 }
5860 }
5861 }
5862 }
5863
5864 /* If at this point neither .preind nor .postind is set, we have a
5865 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5866 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5867 {
5868 inst.operands[i].preind = 1;
5869 inst.relocs[0].exp.X_op = O_constant;
5870 inst.relocs[0].exp.X_add_number = 0;
5871 }
5872 *str = p;
5873 return PARSE_OPERAND_SUCCESS;
5874 }
5875
5876 static int
5877 parse_address (char **str, int i)
5878 {
5879 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5880 ? SUCCESS : FAIL;
5881 }
5882
5883 static parse_operand_result
5884 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5885 {
5886 return parse_address_main (str, i, 1, type);
5887 }
5888
5889 /* Parse an operand for a MOVW or MOVT instruction. */
5890 static int
5891 parse_half (char **str)
5892 {
5893 char * p;
5894
5895 p = *str;
5896 skip_past_char (&p, '#');
5897 if (strncasecmp (p, ":lower16:", 9) == 0)
5898 inst.relocs[0].type = BFD_RELOC_ARM_MOVW;
5899 else if (strncasecmp (p, ":upper16:", 9) == 0)
5900 inst.relocs[0].type = BFD_RELOC_ARM_MOVT;
5901
5902 if (inst.relocs[0].type != BFD_RELOC_UNUSED)
5903 {
5904 p += 9;
5905 skip_whitespace (p);
5906 }
5907
5908 if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
5909 return FAIL;
5910
5911 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
5912 {
5913 if (inst.relocs[0].exp.X_op != O_constant)
5914 {
5915 inst.error = _("constant expression expected");
5916 return FAIL;
5917 }
5918 if (inst.relocs[0].exp.X_add_number < 0
5919 || inst.relocs[0].exp.X_add_number > 0xffff)
5920 {
5921 inst.error = _("immediate value out of range");
5922 return FAIL;
5923 }
5924 }
5925 *str = p;
5926 return SUCCESS;
5927 }
5928
5929 /* Miscellaneous. */
5930
5931 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5932 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5933 static int
5934 parse_psr (char **str, bfd_boolean lhs)
5935 {
5936 char *p;
5937 unsigned long psr_field;
5938 const struct asm_psr *psr;
5939 char *start;
5940 bfd_boolean is_apsr = FALSE;
5941 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5942
5943 /* PR gas/12698: If the user has specified -march=all then m_profile will
5944 be TRUE, but we want to ignore it in this case as we are building for any
5945 CPU type, including non-m variants. */
5946 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
5947 m_profile = FALSE;
5948
5949 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5950 feature for ease of use and backwards compatibility. */
5951 p = *str;
5952 if (strncasecmp (p, "SPSR", 4) == 0)
5953 {
5954 if (m_profile)
5955 goto unsupported_psr;
5956
5957 psr_field = SPSR_BIT;
5958 }
5959 else if (strncasecmp (p, "CPSR", 4) == 0)
5960 {
5961 if (m_profile)
5962 goto unsupported_psr;
5963
5964 psr_field = 0;
5965 }
5966 else if (strncasecmp (p, "APSR", 4) == 0)
5967 {
5968 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5969 and ARMv7-R architecture CPUs. */
5970 is_apsr = TRUE;
5971 psr_field = 0;
5972 }
5973 else if (m_profile)
5974 {
5975 start = p;
5976 do
5977 p++;
5978 while (ISALNUM (*p) || *p == '_');
5979
5980 if (strncasecmp (start, "iapsr", 5) == 0
5981 || strncasecmp (start, "eapsr", 5) == 0
5982 || strncasecmp (start, "xpsr", 4) == 0
5983 || strncasecmp (start, "psr", 3) == 0)
5984 p = start + strcspn (start, "rR") + 1;
5985
5986 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5987 p - start);
5988
5989 if (!psr)
5990 return FAIL;
5991
5992 /* If APSR is being written, a bitfield may be specified. Note that
5993 APSR itself is handled above. */
5994 if (psr->field <= 3)
5995 {
5996 psr_field = psr->field;
5997 is_apsr = TRUE;
5998 goto check_suffix;
5999 }
6000
6001 *str = p;
6002 /* M-profile MSR instructions have the mask field set to "10", except
6003 *PSR variants which modify APSR, which may use a different mask (and
6004 have been handled already). Do that by setting the PSR_f field
6005 here. */
6006 return psr->field | (lhs ? PSR_f : 0);
6007 }
6008 else
6009 goto unsupported_psr;
6010
6011 p += 4;
6012 check_suffix:
6013 if (*p == '_')
6014 {
6015 /* A suffix follows. */
6016 p++;
6017 start = p;
6018
6019 do
6020 p++;
6021 while (ISALNUM (*p) || *p == '_');
6022
6023 if (is_apsr)
6024 {
6025 /* APSR uses a notation for bits, rather than fields. */
6026 unsigned int nzcvq_bits = 0;
6027 unsigned int g_bit = 0;
6028 char *bit;
6029
6030 for (bit = start; bit != p; bit++)
6031 {
6032 switch (TOLOWER (*bit))
6033 {
6034 case 'n':
6035 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
6036 break;
6037
6038 case 'z':
6039 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
6040 break;
6041
6042 case 'c':
6043 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
6044 break;
6045
6046 case 'v':
6047 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
6048 break;
6049
6050 case 'q':
6051 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
6052 break;
6053
6054 case 'g':
6055 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
6056 break;
6057
6058 default:
6059 inst.error = _("unexpected bit specified after APSR");
6060 return FAIL;
6061 }
6062 }
6063
6064 if (nzcvq_bits == 0x1f)
6065 psr_field |= PSR_f;
6066
6067 if (g_bit == 0x1)
6068 {
6069 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
6070 {
6071 inst.error = _("selected processor does not "
6072 "support DSP extension");
6073 return FAIL;
6074 }
6075
6076 psr_field |= PSR_s;
6077 }
6078
6079 if ((nzcvq_bits & 0x20) != 0
6080 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
6081 || (g_bit & 0x2) != 0)
6082 {
6083 inst.error = _("bad bitmask specified after APSR");
6084 return FAIL;
6085 }
6086 }
6087 else
6088 {
6089 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
6090 p - start);
6091 if (!psr)
6092 goto error;
6093
6094 psr_field |= psr->field;
6095 }
6096 }
6097 else
6098 {
6099 if (ISALNUM (*p))
6100 goto error; /* Garbage after "[CS]PSR". */
6101
6102 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
6103 is deprecated, but allow it anyway. */
6104 if (is_apsr && lhs)
6105 {
6106 psr_field |= PSR_f;
6107 as_tsktsk (_("writing to APSR without specifying a bitmask is "
6108 "deprecated"));
6109 }
6110 else if (!m_profile)
6111 /* These bits are never right for M-profile devices: don't set them
6112 (only code paths which read/write APSR reach here). */
6113 psr_field |= (PSR_c | PSR_f);
6114 }
6115 *str = p;
6116 return psr_field;
6117
6118 unsupported_psr:
6119 inst.error = _("selected processor does not support requested special "
6120 "purpose register");
6121 return FAIL;
6122
6123 error:
6124 inst.error = _("flag for {c}psr instruction expected");
6125 return FAIL;
6126 }
6127
6128 static int
6129 parse_sys_vldr_vstr (char **str)
6130 {
6131 unsigned i;
6132 int val = FAIL;
6133 struct {
6134 const char *name;
6135 int regl;
6136 int regh;
6137 } sysregs[] = {
6138 {"FPSCR", 0x1, 0x0},
6139 {"FPSCR_nzcvqc", 0x2, 0x0},
6140 {"VPR", 0x4, 0x1},
6141 {"P0", 0x5, 0x1},
6142 {"FPCXTNS", 0x6, 0x1},
6143 {"FPCXTS", 0x7, 0x1}
6144 };
6145 char *op_end = strchr (*str, ',');
6146 size_t op_strlen = op_end - *str;
6147
6148 for (i = 0; i < sizeof (sysregs) / sizeof (sysregs[0]); i++)
6149 {
6150 if (!strncmp (*str, sysregs[i].name, op_strlen))
6151 {
6152 val = sysregs[i].regl | (sysregs[i].regh << 3);
6153 *str = op_end;
6154 break;
6155 }
6156 }
6157
6158 return val;
6159 }
6160
6161 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6162 value suitable for splatting into the AIF field of the instruction. */
6163
6164 static int
6165 parse_cps_flags (char **str)
6166 {
6167 int val = 0;
6168 int saw_a_flag = 0;
6169 char *s = *str;
6170
6171 for (;;)
6172 switch (*s++)
6173 {
6174 case '\0': case ',':
6175 goto done;
6176
6177 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6178 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6179 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6180
6181 default:
6182 inst.error = _("unrecognized CPS flag");
6183 return FAIL;
6184 }
6185
6186 done:
6187 if (saw_a_flag == 0)
6188 {
6189 inst.error = _("missing CPS flags");
6190 return FAIL;
6191 }
6192
6193 *str = s - 1;
6194 return val;
6195 }
6196
6197 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6198 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6199
6200 static int
6201 parse_endian_specifier (char **str)
6202 {
6203 int little_endian;
6204 char *s = *str;
6205
6206 if (strncasecmp (s, "BE", 2))
6207 little_endian = 0;
6208 else if (strncasecmp (s, "LE", 2))
6209 little_endian = 1;
6210 else
6211 {
6212 inst.error = _("valid endian specifiers are be or le");
6213 return FAIL;
6214 }
6215
6216 if (ISALNUM (s[2]) || s[2] == '_')
6217 {
6218 inst.error = _("valid endian specifiers are be or le");
6219 return FAIL;
6220 }
6221
6222 *str = s + 2;
6223 return little_endian;
6224 }
6225
6226 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6227 value suitable for poking into the rotate field of an sxt or sxta
6228 instruction, or FAIL on error. */
6229
6230 static int
6231 parse_ror (char **str)
6232 {
6233 int rot;
6234 char *s = *str;
6235
6236 if (strncasecmp (s, "ROR", 3) == 0)
6237 s += 3;
6238 else
6239 {
6240 inst.error = _("missing rotation field after comma");
6241 return FAIL;
6242 }
6243
6244 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6245 return FAIL;
6246
6247 switch (rot)
6248 {
6249 case 0: *str = s; return 0x0;
6250 case 8: *str = s; return 0x1;
6251 case 16: *str = s; return 0x2;
6252 case 24: *str = s; return 0x3;
6253
6254 default:
6255 inst.error = _("rotation can only be 0, 8, 16, or 24");
6256 return FAIL;
6257 }
6258 }
6259
6260 /* Parse a conditional code (from conds[] below). The value returned is in the
6261 range 0 .. 14, or FAIL. */
6262 static int
6263 parse_cond (char **str)
6264 {
6265 char *q;
6266 const struct asm_cond *c;
6267 int n;
6268 /* Condition codes are always 2 characters, so matching up to
6269 3 characters is sufficient. */
6270 char cond[3];
6271
6272 q = *str;
6273 n = 0;
6274 while (ISALPHA (*q) && n < 3)
6275 {
6276 cond[n] = TOLOWER (*q);
6277 q++;
6278 n++;
6279 }
6280
6281 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6282 if (!c)
6283 {
6284 inst.error = _("condition required");
6285 return FAIL;
6286 }
6287
6288 *str = q;
6289 return c->value;
6290 }
6291
6292 /* Record a use of the given feature. */
6293 static void
6294 record_feature_use (const arm_feature_set *feature)
6295 {
6296 if (thumb_mode)
6297 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6298 else
6299 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6300 }
6301
6302 /* If the given feature is currently allowed, mark it as used and return TRUE.
6303 Return FALSE otherwise. */
6304 static bfd_boolean
6305 mark_feature_used (const arm_feature_set *feature)
6306 {
6307 /* Ensure the option is currently allowed. */
6308 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6309 return FALSE;
6310
6311 /* Add the appropriate architecture feature for the barrier option used. */
6312 record_feature_use (feature);
6313
6314 return TRUE;
6315 }
6316
6317 /* Parse an option for a barrier instruction. Returns the encoding for the
6318 option, or FAIL. */
6319 static int
6320 parse_barrier (char **str)
6321 {
6322 char *p, *q;
6323 const struct asm_barrier_opt *o;
6324
6325 p = q = *str;
6326 while (ISALPHA (*q))
6327 q++;
6328
6329 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6330 q - p);
6331 if (!o)
6332 return FAIL;
6333
6334 if (!mark_feature_used (&o->arch))
6335 return FAIL;
6336
6337 *str = q;
6338 return o->value;
6339 }
6340
6341 /* Parse the operands of a table branch instruction. Similar to a memory
6342 operand. */
6343 static int
6344 parse_tb (char **str)
6345 {
6346 char * p = *str;
6347 int reg;
6348
6349 if (skip_past_char (&p, '[') == FAIL)
6350 {
6351 inst.error = _("'[' expected");
6352 return FAIL;
6353 }
6354
6355 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6356 {
6357 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6358 return FAIL;
6359 }
6360 inst.operands[0].reg = reg;
6361
6362 if (skip_past_comma (&p) == FAIL)
6363 {
6364 inst.error = _("',' expected");
6365 return FAIL;
6366 }
6367
6368 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6369 {
6370 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6371 return FAIL;
6372 }
6373 inst.operands[0].imm = reg;
6374
6375 if (skip_past_comma (&p) == SUCCESS)
6376 {
6377 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6378 return FAIL;
6379 if (inst.relocs[0].exp.X_add_number != 1)
6380 {
6381 inst.error = _("invalid shift");
6382 return FAIL;
6383 }
6384 inst.operands[0].shifted = 1;
6385 }
6386
6387 if (skip_past_char (&p, ']') == FAIL)
6388 {
6389 inst.error = _("']' expected");
6390 return FAIL;
6391 }
6392 *str = p;
6393 return SUCCESS;
6394 }
6395
6396 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6397 information on the types the operands can take and how they are encoded.
6398 Up to four operands may be read; this function handles setting the
6399 ".present" field for each read operand itself.
6400 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6401 else returns FAIL. */
6402
6403 static int
6404 parse_neon_mov (char **str, int *which_operand)
6405 {
6406 int i = *which_operand, val;
6407 enum arm_reg_type rtype;
6408 char *ptr = *str;
6409 struct neon_type_el optype;
6410
6411 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6412 {
6413 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6414 inst.operands[i].reg = val;
6415 inst.operands[i].isscalar = 1;
6416 inst.operands[i].vectype = optype;
6417 inst.operands[i++].present = 1;
6418
6419 if (skip_past_comma (&ptr) == FAIL)
6420 goto wanted_comma;
6421
6422 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6423 goto wanted_arm;
6424
6425 inst.operands[i].reg = val;
6426 inst.operands[i].isreg = 1;
6427 inst.operands[i].present = 1;
6428 }
6429 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6430 != FAIL)
6431 {
6432 /* Cases 0, 1, 2, 3, 5 (D only). */
6433 if (skip_past_comma (&ptr) == FAIL)
6434 goto wanted_comma;
6435
6436 inst.operands[i].reg = val;
6437 inst.operands[i].isreg = 1;
6438 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6439 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6440 inst.operands[i].isvec = 1;
6441 inst.operands[i].vectype = optype;
6442 inst.operands[i++].present = 1;
6443
6444 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6445 {
6446 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6447 Case 13: VMOV <Sd>, <Rm> */
6448 inst.operands[i].reg = val;
6449 inst.operands[i].isreg = 1;
6450 inst.operands[i].present = 1;
6451
6452 if (rtype == REG_TYPE_NQ)
6453 {
6454 first_error (_("can't use Neon quad register here"));
6455 return FAIL;
6456 }
6457 else if (rtype != REG_TYPE_VFS)
6458 {
6459 i++;
6460 if (skip_past_comma (&ptr) == FAIL)
6461 goto wanted_comma;
6462 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6463 goto wanted_arm;
6464 inst.operands[i].reg = val;
6465 inst.operands[i].isreg = 1;
6466 inst.operands[i].present = 1;
6467 }
6468 }
6469 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6470 &optype)) != FAIL)
6471 {
6472 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6473 Case 1: VMOV<c><q> <Dd>, <Dm>
6474 Case 8: VMOV.F32 <Sd>, <Sm>
6475 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6476
6477 inst.operands[i].reg = val;
6478 inst.operands[i].isreg = 1;
6479 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6480 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6481 inst.operands[i].isvec = 1;
6482 inst.operands[i].vectype = optype;
6483 inst.operands[i].present = 1;
6484
6485 if (skip_past_comma (&ptr) == SUCCESS)
6486 {
6487 /* Case 15. */
6488 i++;
6489
6490 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6491 goto wanted_arm;
6492
6493 inst.operands[i].reg = val;
6494 inst.operands[i].isreg = 1;
6495 inst.operands[i++].present = 1;
6496
6497 if (skip_past_comma (&ptr) == FAIL)
6498 goto wanted_comma;
6499
6500 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6501 goto wanted_arm;
6502
6503 inst.operands[i].reg = val;
6504 inst.operands[i].isreg = 1;
6505 inst.operands[i].present = 1;
6506 }
6507 }
6508 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6509 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6510 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6511 Case 10: VMOV.F32 <Sd>, #<imm>
6512 Case 11: VMOV.F64 <Dd>, #<imm> */
6513 inst.operands[i].immisfloat = 1;
6514 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6515 == SUCCESS)
6516 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6517 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6518 ;
6519 else
6520 {
6521 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6522 return FAIL;
6523 }
6524 }
6525 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6526 {
6527 /* Cases 6, 7. */
6528 inst.operands[i].reg = val;
6529 inst.operands[i].isreg = 1;
6530 inst.operands[i++].present = 1;
6531
6532 if (skip_past_comma (&ptr) == FAIL)
6533 goto wanted_comma;
6534
6535 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6536 {
6537 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6538 inst.operands[i].reg = val;
6539 inst.operands[i].isscalar = 1;
6540 inst.operands[i].present = 1;
6541 inst.operands[i].vectype = optype;
6542 }
6543 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6544 {
6545 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6546 inst.operands[i].reg = val;
6547 inst.operands[i].isreg = 1;
6548 inst.operands[i++].present = 1;
6549
6550 if (skip_past_comma (&ptr) == FAIL)
6551 goto wanted_comma;
6552
6553 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6554 == FAIL)
6555 {
6556 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6557 return FAIL;
6558 }
6559
6560 inst.operands[i].reg = val;
6561 inst.operands[i].isreg = 1;
6562 inst.operands[i].isvec = 1;
6563 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6564 inst.operands[i].vectype = optype;
6565 inst.operands[i].present = 1;
6566
6567 if (rtype == REG_TYPE_VFS)
6568 {
6569 /* Case 14. */
6570 i++;
6571 if (skip_past_comma (&ptr) == FAIL)
6572 goto wanted_comma;
6573 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6574 &optype)) == FAIL)
6575 {
6576 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6577 return FAIL;
6578 }
6579 inst.operands[i].reg = val;
6580 inst.operands[i].isreg = 1;
6581 inst.operands[i].isvec = 1;
6582 inst.operands[i].issingle = 1;
6583 inst.operands[i].vectype = optype;
6584 inst.operands[i].present = 1;
6585 }
6586 }
6587 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6588 != FAIL)
6589 {
6590 /* Case 13. */
6591 inst.operands[i].reg = val;
6592 inst.operands[i].isreg = 1;
6593 inst.operands[i].isvec = 1;
6594 inst.operands[i].issingle = 1;
6595 inst.operands[i].vectype = optype;
6596 inst.operands[i].present = 1;
6597 }
6598 }
6599 else
6600 {
6601 first_error (_("parse error"));
6602 return FAIL;
6603 }
6604
6605 /* Successfully parsed the operands. Update args. */
6606 *which_operand = i;
6607 *str = ptr;
6608 return SUCCESS;
6609
6610 wanted_comma:
6611 first_error (_("expected comma"));
6612 return FAIL;
6613
6614 wanted_arm:
6615 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6616 return FAIL;
6617 }
6618
6619 /* Use this macro when the operand constraints are different
6620 for ARM and THUMB (e.g. ldrd). */
6621 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6622 ((arm_operand) | ((thumb_operand) << 16))
6623
6624 /* Matcher codes for parse_operands. */
6625 enum operand_parse_code
6626 {
6627 OP_stop, /* end of line */
6628
6629 OP_RR, /* ARM register */
6630 OP_RRnpc, /* ARM register, not r15 */
6631 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6632 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6633 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6634 optional trailing ! */
6635 OP_RRw, /* ARM register, not r15, optional trailing ! */
6636 OP_RCP, /* Coprocessor number */
6637 OP_RCN, /* Coprocessor register */
6638 OP_RF, /* FPA register */
6639 OP_RVS, /* VFP single precision register */
6640 OP_RVD, /* VFP double precision register (0..15) */
6641 OP_RND, /* Neon double precision register (0..31) */
6642 OP_RNQ, /* Neon quad precision register */
6643 OP_RVSD, /* VFP single or double precision register */
6644 OP_RNSD, /* Neon single or double precision register */
6645 OP_RNDQ, /* Neon double or quad precision register */
6646 OP_RNSDQ, /* Neon single, double or quad precision register */
6647 OP_RNSC, /* Neon scalar D[X] */
6648 OP_RVC, /* VFP control register */
6649 OP_RMF, /* Maverick F register */
6650 OP_RMD, /* Maverick D register */
6651 OP_RMFX, /* Maverick FX register */
6652 OP_RMDX, /* Maverick DX register */
6653 OP_RMAX, /* Maverick AX register */
6654 OP_RMDS, /* Maverick DSPSC register */
6655 OP_RIWR, /* iWMMXt wR register */
6656 OP_RIWC, /* iWMMXt wC register */
6657 OP_RIWG, /* iWMMXt wCG register */
6658 OP_RXA, /* XScale accumulator register */
6659
6660 /* New operands for Armv8.1-M Mainline. */
6661 OP_LR, /* ARM LR register */
6662 OP_RRnpcsp_I32, /* ARM register (no BadReg) or literal 1 .. 32 */
6663
6664 OP_REGLST, /* ARM register list */
6665 OP_CLRMLST, /* CLRM register list */
6666 OP_VRSLST, /* VFP single-precision register list */
6667 OP_VRDLST, /* VFP double-precision register list */
6668 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6669 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6670 OP_NSTRLST, /* Neon element/structure list */
6671 OP_VRSDVLST, /* VFP single or double-precision register list and VPR */
6672
6673 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6674 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6675 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6676 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6677 OP_RNSD_RNSC, /* Neon S or D reg, or Neon scalar. */
6678 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6679 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6680 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6681 OP_VMOV, /* Neon VMOV operands. */
6682 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6683 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6684 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6685 OP_VLDR, /* VLDR operand. */
6686
6687 OP_I0, /* immediate zero */
6688 OP_I7, /* immediate value 0 .. 7 */
6689 OP_I15, /* 0 .. 15 */
6690 OP_I16, /* 1 .. 16 */
6691 OP_I16z, /* 0 .. 16 */
6692 OP_I31, /* 0 .. 31 */
6693 OP_I31w, /* 0 .. 31, optional trailing ! */
6694 OP_I32, /* 1 .. 32 */
6695 OP_I32z, /* 0 .. 32 */
6696 OP_I63, /* 0 .. 63 */
6697 OP_I63s, /* -64 .. 63 */
6698 OP_I64, /* 1 .. 64 */
6699 OP_I64z, /* 0 .. 64 */
6700 OP_I255, /* 0 .. 255 */
6701
6702 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6703 OP_I7b, /* 0 .. 7 */
6704 OP_I15b, /* 0 .. 15 */
6705 OP_I31b, /* 0 .. 31 */
6706
6707 OP_SH, /* shifter operand */
6708 OP_SHG, /* shifter operand with possible group relocation */
6709 OP_ADDR, /* Memory address expression (any mode) */
6710 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6711 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6712 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6713 OP_EXP, /* arbitrary expression */
6714 OP_EXPi, /* same, with optional immediate prefix */
6715 OP_EXPr, /* same, with optional relocation suffix */
6716 OP_EXPs, /* same, with optional non-first operand relocation suffix */
6717 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6718 OP_IROT1, /* VCADD rotate immediate: 90, 270. */
6719 OP_IROT2, /* VCMLA rotate immediate: 0, 90, 180, 270. */
6720
6721 OP_CPSF, /* CPS flags */
6722 OP_ENDI, /* Endianness specifier */
6723 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6724 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6725 OP_COND, /* conditional code */
6726 OP_TB, /* Table branch. */
6727
6728 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6729
6730 OP_RRnpc_I0, /* ARM register or literal 0 */
6731 OP_RR_EXr, /* ARM register or expression with opt. reloc stuff. */
6732 OP_RR_EXi, /* ARM register or expression with imm prefix */
6733 OP_RF_IF, /* FPA register or immediate */
6734 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6735 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6736
6737 /* Optional operands. */
6738 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6739 OP_oI31b, /* 0 .. 31 */
6740 OP_oI32b, /* 1 .. 32 */
6741 OP_oI32z, /* 0 .. 32 */
6742 OP_oIffffb, /* 0 .. 65535 */
6743 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6744
6745 OP_oRR, /* ARM register */
6746 OP_oLR, /* ARM LR register */
6747 OP_oRRnpc, /* ARM register, not the PC */
6748 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6749 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6750 OP_oRND, /* Optional Neon double precision register */
6751 OP_oRNQ, /* Optional Neon quad precision register */
6752 OP_oRNDQ, /* Optional Neon double or quad precision register */
6753 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6754 OP_oSHll, /* LSL immediate */
6755 OP_oSHar, /* ASR immediate */
6756 OP_oSHllar, /* LSL or ASR immediate */
6757 OP_oROR, /* ROR 0/8/16/24 */
6758 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6759
6760 /* Some pre-defined mixed (ARM/THUMB) operands. */
6761 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6762 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6763 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6764
6765 OP_FIRST_OPTIONAL = OP_oI7b
6766 };
6767
6768 /* Generic instruction operand parser. This does no encoding and no
6769 semantic validation; it merely squirrels values away in the inst
6770 structure. Returns SUCCESS or FAIL depending on whether the
6771 specified grammar matched. */
6772 static int
6773 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6774 {
6775 unsigned const int *upat = pattern;
6776 char *backtrack_pos = 0;
6777 const char *backtrack_error = 0;
6778 int i, val = 0, backtrack_index = 0;
6779 enum arm_reg_type rtype;
6780 parse_operand_result result;
6781 unsigned int op_parse_code;
6782 bfd_boolean partial_match;
6783
6784 #define po_char_or_fail(chr) \
6785 do \
6786 { \
6787 if (skip_past_char (&str, chr) == FAIL) \
6788 goto bad_args; \
6789 } \
6790 while (0)
6791
6792 #define po_reg_or_fail(regtype) \
6793 do \
6794 { \
6795 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6796 & inst.operands[i].vectype); \
6797 if (val == FAIL) \
6798 { \
6799 first_error (_(reg_expected_msgs[regtype])); \
6800 goto failure; \
6801 } \
6802 inst.operands[i].reg = val; \
6803 inst.operands[i].isreg = 1; \
6804 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6805 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6806 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6807 || rtype == REG_TYPE_VFD \
6808 || rtype == REG_TYPE_NQ); \
6809 } \
6810 while (0)
6811
6812 #define po_reg_or_goto(regtype, label) \
6813 do \
6814 { \
6815 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6816 & inst.operands[i].vectype); \
6817 if (val == FAIL) \
6818 goto label; \
6819 \
6820 inst.operands[i].reg = val; \
6821 inst.operands[i].isreg = 1; \
6822 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6823 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6824 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6825 || rtype == REG_TYPE_VFD \
6826 || rtype == REG_TYPE_NQ); \
6827 } \
6828 while (0)
6829
6830 #define po_imm_or_fail(min, max, popt) \
6831 do \
6832 { \
6833 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6834 goto failure; \
6835 inst.operands[i].imm = val; \
6836 } \
6837 while (0)
6838
6839 #define po_scalar_or_goto(elsz, label) \
6840 do \
6841 { \
6842 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6843 if (val == FAIL) \
6844 goto label; \
6845 inst.operands[i].reg = val; \
6846 inst.operands[i].isscalar = 1; \
6847 } \
6848 while (0)
6849
6850 #define po_misc_or_fail(expr) \
6851 do \
6852 { \
6853 if (expr) \
6854 goto failure; \
6855 } \
6856 while (0)
6857
6858 #define po_misc_or_fail_no_backtrack(expr) \
6859 do \
6860 { \
6861 result = expr; \
6862 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6863 backtrack_pos = 0; \
6864 if (result != PARSE_OPERAND_SUCCESS) \
6865 goto failure; \
6866 } \
6867 while (0)
6868
6869 #define po_barrier_or_imm(str) \
6870 do \
6871 { \
6872 val = parse_barrier (&str); \
6873 if (val == FAIL && ! ISALPHA (*str)) \
6874 goto immediate; \
6875 if (val == FAIL \
6876 /* ISB can only take SY as an option. */ \
6877 || ((inst.instruction & 0xf0) == 0x60 \
6878 && val != 0xf)) \
6879 { \
6880 inst.error = _("invalid barrier type"); \
6881 backtrack_pos = 0; \
6882 goto failure; \
6883 } \
6884 } \
6885 while (0)
6886
6887 skip_whitespace (str);
6888
6889 for (i = 0; upat[i] != OP_stop; i++)
6890 {
6891 op_parse_code = upat[i];
6892 if (op_parse_code >= 1<<16)
6893 op_parse_code = thumb ? (op_parse_code >> 16)
6894 : (op_parse_code & ((1<<16)-1));
6895
6896 if (op_parse_code >= OP_FIRST_OPTIONAL)
6897 {
6898 /* Remember where we are in case we need to backtrack. */
6899 gas_assert (!backtrack_pos);
6900 backtrack_pos = str;
6901 backtrack_error = inst.error;
6902 backtrack_index = i;
6903 }
6904
6905 if (i > 0 && (i > 1 || inst.operands[0].present))
6906 po_char_or_fail (',');
6907
6908 switch (op_parse_code)
6909 {
6910 /* Registers */
6911 case OP_oRRnpc:
6912 case OP_oRRnpcsp:
6913 case OP_RRnpc:
6914 case OP_RRnpcsp:
6915 case OP_oRR:
6916 case OP_LR:
6917 case OP_oLR:
6918 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6919 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6920 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6921 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6922 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6923 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6924 case OP_oRND:
6925 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6926 case OP_RVC:
6927 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6928 break;
6929 /* Also accept generic coprocessor regs for unknown registers. */
6930 coproc_reg:
6931 po_reg_or_fail (REG_TYPE_CN);
6932 break;
6933 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6934 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6935 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6936 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6937 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6938 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6939 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6940 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6941 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6942 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6943 case OP_oRNQ:
6944 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6945 case OP_RNSD: po_reg_or_fail (REG_TYPE_NSD); break;
6946 case OP_oRNDQ:
6947 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6948 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6949 case OP_oRNSDQ:
6950 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6951
6952 /* Neon scalar. Using an element size of 8 means that some invalid
6953 scalars are accepted here, so deal with those in later code. */
6954 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6955
6956 case OP_RNDQ_I0:
6957 {
6958 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6959 break;
6960 try_imm0:
6961 po_imm_or_fail (0, 0, TRUE);
6962 }
6963 break;
6964
6965 case OP_RVSD_I0:
6966 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6967 break;
6968
6969 case OP_RSVD_FI0:
6970 {
6971 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6972 break;
6973 try_ifimm0:
6974 if (parse_ifimm_zero (&str))
6975 inst.operands[i].imm = 0;
6976 else
6977 {
6978 inst.error
6979 = _("only floating point zero is allowed as immediate value");
6980 goto failure;
6981 }
6982 }
6983 break;
6984
6985 case OP_RR_RNSC:
6986 {
6987 po_scalar_or_goto (8, try_rr);
6988 break;
6989 try_rr:
6990 po_reg_or_fail (REG_TYPE_RN);
6991 }
6992 break;
6993
6994 case OP_RNSDQ_RNSC:
6995 {
6996 po_scalar_or_goto (8, try_nsdq);
6997 break;
6998 try_nsdq:
6999 po_reg_or_fail (REG_TYPE_NSDQ);
7000 }
7001 break;
7002
7003 case OP_RNSD_RNSC:
7004 {
7005 po_scalar_or_goto (8, try_s_scalar);
7006 break;
7007 try_s_scalar:
7008 po_scalar_or_goto (4, try_nsd);
7009 break;
7010 try_nsd:
7011 po_reg_or_fail (REG_TYPE_NSD);
7012 }
7013 break;
7014
7015 case OP_RNDQ_RNSC:
7016 {
7017 po_scalar_or_goto (8, try_ndq);
7018 break;
7019 try_ndq:
7020 po_reg_or_fail (REG_TYPE_NDQ);
7021 }
7022 break;
7023
7024 case OP_RND_RNSC:
7025 {
7026 po_scalar_or_goto (8, try_vfd);
7027 break;
7028 try_vfd:
7029 po_reg_or_fail (REG_TYPE_VFD);
7030 }
7031 break;
7032
7033 case OP_VMOV:
7034 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
7035 not careful then bad things might happen. */
7036 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
7037 break;
7038
7039 case OP_RNDQ_Ibig:
7040 {
7041 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
7042 break;
7043 try_immbig:
7044 /* There's a possibility of getting a 64-bit immediate here, so
7045 we need special handling. */
7046 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
7047 == FAIL)
7048 {
7049 inst.error = _("immediate value is out of range");
7050 goto failure;
7051 }
7052 }
7053 break;
7054
7055 case OP_RNDQ_I63b:
7056 {
7057 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
7058 break;
7059 try_shimm:
7060 po_imm_or_fail (0, 63, TRUE);
7061 }
7062 break;
7063
7064 case OP_RRnpcb:
7065 po_char_or_fail ('[');
7066 po_reg_or_fail (REG_TYPE_RN);
7067 po_char_or_fail (']');
7068 break;
7069
7070 case OP_RRnpctw:
7071 case OP_RRw:
7072 case OP_oRRw:
7073 po_reg_or_fail (REG_TYPE_RN);
7074 if (skip_past_char (&str, '!') == SUCCESS)
7075 inst.operands[i].writeback = 1;
7076 break;
7077
7078 /* Immediates */
7079 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
7080 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
7081 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
7082 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
7083 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
7084 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
7085 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
7086 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
7087 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
7088 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
7089 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
7090 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
7091
7092 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
7093 case OP_oI7b:
7094 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
7095 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
7096 case OP_oI31b:
7097 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
7098 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
7099 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
7100 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
7101
7102 /* Immediate variants */
7103 case OP_oI255c:
7104 po_char_or_fail ('{');
7105 po_imm_or_fail (0, 255, TRUE);
7106 po_char_or_fail ('}');
7107 break;
7108
7109 case OP_I31w:
7110 /* The expression parser chokes on a trailing !, so we have
7111 to find it first and zap it. */
7112 {
7113 char *s = str;
7114 while (*s && *s != ',')
7115 s++;
7116 if (s[-1] == '!')
7117 {
7118 s[-1] = '\0';
7119 inst.operands[i].writeback = 1;
7120 }
7121 po_imm_or_fail (0, 31, TRUE);
7122 if (str == s - 1)
7123 str = s;
7124 }
7125 break;
7126
7127 /* Expressions */
7128 case OP_EXPi: EXPi:
7129 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7130 GE_OPT_PREFIX));
7131 break;
7132
7133 case OP_EXP:
7134 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7135 GE_NO_PREFIX));
7136 break;
7137
7138 case OP_EXPr: EXPr:
7139 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7140 GE_NO_PREFIX));
7141 if (inst.relocs[0].exp.X_op == O_symbol)
7142 {
7143 val = parse_reloc (&str);
7144 if (val == -1)
7145 {
7146 inst.error = _("unrecognized relocation suffix");
7147 goto failure;
7148 }
7149 else if (val != BFD_RELOC_UNUSED)
7150 {
7151 inst.operands[i].imm = val;
7152 inst.operands[i].hasreloc = 1;
7153 }
7154 }
7155 break;
7156
7157 case OP_EXPs:
7158 po_misc_or_fail (my_get_expression (&inst.relocs[i].exp, &str,
7159 GE_NO_PREFIX));
7160 if (inst.relocs[i].exp.X_op == O_symbol)
7161 {
7162 inst.operands[i].hasreloc = 1;
7163 }
7164 else if (inst.relocs[i].exp.X_op == O_constant)
7165 {
7166 inst.operands[i].imm = inst.relocs[i].exp.X_add_number;
7167 inst.operands[i].hasreloc = 0;
7168 }
7169 break;
7170
7171 /* Operand for MOVW or MOVT. */
7172 case OP_HALF:
7173 po_misc_or_fail (parse_half (&str));
7174 break;
7175
7176 /* Register or expression. */
7177 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
7178 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
7179
7180 /* Register or immediate. */
7181 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
7182 I0: po_imm_or_fail (0, 0, FALSE); break;
7183
7184 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
7185 IF:
7186 if (!is_immediate_prefix (*str))
7187 goto bad_args;
7188 str++;
7189 val = parse_fpa_immediate (&str);
7190 if (val == FAIL)
7191 goto failure;
7192 /* FPA immediates are encoded as registers 8-15.
7193 parse_fpa_immediate has already applied the offset. */
7194 inst.operands[i].reg = val;
7195 inst.operands[i].isreg = 1;
7196 break;
7197
7198 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
7199 I32z: po_imm_or_fail (0, 32, FALSE); break;
7200
7201 /* Two kinds of register. */
7202 case OP_RIWR_RIWC:
7203 {
7204 struct reg_entry *rege = arm_reg_parse_multi (&str);
7205 if (!rege
7206 || (rege->type != REG_TYPE_MMXWR
7207 && rege->type != REG_TYPE_MMXWC
7208 && rege->type != REG_TYPE_MMXWCG))
7209 {
7210 inst.error = _("iWMMXt data or control register expected");
7211 goto failure;
7212 }
7213 inst.operands[i].reg = rege->number;
7214 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
7215 }
7216 break;
7217
7218 case OP_RIWC_RIWG:
7219 {
7220 struct reg_entry *rege = arm_reg_parse_multi (&str);
7221 if (!rege
7222 || (rege->type != REG_TYPE_MMXWC
7223 && rege->type != REG_TYPE_MMXWCG))
7224 {
7225 inst.error = _("iWMMXt control register expected");
7226 goto failure;
7227 }
7228 inst.operands[i].reg = rege->number;
7229 inst.operands[i].isreg = 1;
7230 }
7231 break;
7232
7233 /* Misc */
7234 case OP_CPSF: val = parse_cps_flags (&str); break;
7235 case OP_ENDI: val = parse_endian_specifier (&str); break;
7236 case OP_oROR: val = parse_ror (&str); break;
7237 case OP_COND: val = parse_cond (&str); break;
7238 case OP_oBARRIER_I15:
7239 po_barrier_or_imm (str); break;
7240 immediate:
7241 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
7242 goto failure;
7243 break;
7244
7245 case OP_wPSR:
7246 case OP_rPSR:
7247 po_reg_or_goto (REG_TYPE_RNB, try_psr);
7248 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7249 {
7250 inst.error = _("Banked registers are not available with this "
7251 "architecture.");
7252 goto failure;
7253 }
7254 break;
7255 try_psr:
7256 val = parse_psr (&str, op_parse_code == OP_wPSR);
7257 break;
7258
7259 case OP_VLDR:
7260 po_reg_or_goto (REG_TYPE_VFSD, try_sysreg);
7261 break;
7262 try_sysreg:
7263 val = parse_sys_vldr_vstr (&str);
7264 break;
7265
7266 case OP_APSR_RR:
7267 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7268 break;
7269 try_apsr:
7270 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7271 instruction). */
7272 if (strncasecmp (str, "APSR_", 5) == 0)
7273 {
7274 unsigned found = 0;
7275 str += 5;
7276 while (found < 15)
7277 switch (*str++)
7278 {
7279 case 'c': found = (found & 1) ? 16 : found | 1; break;
7280 case 'n': found = (found & 2) ? 16 : found | 2; break;
7281 case 'z': found = (found & 4) ? 16 : found | 4; break;
7282 case 'v': found = (found & 8) ? 16 : found | 8; break;
7283 default: found = 16;
7284 }
7285 if (found != 15)
7286 goto failure;
7287 inst.operands[i].isvec = 1;
7288 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7289 inst.operands[i].reg = REG_PC;
7290 }
7291 else
7292 goto failure;
7293 break;
7294
7295 case OP_TB:
7296 po_misc_or_fail (parse_tb (&str));
7297 break;
7298
7299 /* Register lists. */
7300 case OP_REGLST:
7301 val = parse_reg_list (&str, REGLIST_RN);
7302 if (*str == '^')
7303 {
7304 inst.operands[i].writeback = 1;
7305 str++;
7306 }
7307 break;
7308
7309 case OP_CLRMLST:
7310 val = parse_reg_list (&str, REGLIST_CLRM);
7311 break;
7312
7313 case OP_VRSLST:
7314 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S,
7315 &partial_match);
7316 break;
7317
7318 case OP_VRDLST:
7319 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D,
7320 &partial_match);
7321 break;
7322
7323 case OP_VRSDLST:
7324 /* Allow Q registers too. */
7325 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7326 REGLIST_NEON_D, &partial_match);
7327 if (val == FAIL)
7328 {
7329 inst.error = NULL;
7330 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7331 REGLIST_VFP_S, &partial_match);
7332 inst.operands[i].issingle = 1;
7333 }
7334 break;
7335
7336 case OP_VRSDVLST:
7337 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7338 REGLIST_VFP_D_VPR, &partial_match);
7339 if (val == FAIL && !partial_match)
7340 {
7341 inst.error = NULL;
7342 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7343 REGLIST_VFP_S_VPR, &partial_match);
7344 inst.operands[i].issingle = 1;
7345 }
7346 break;
7347
7348 case OP_NRDLST:
7349 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7350 REGLIST_NEON_D, &partial_match);
7351 break;
7352
7353 case OP_NSTRLST:
7354 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7355 &inst.operands[i].vectype);
7356 break;
7357
7358 /* Addressing modes */
7359 case OP_ADDR:
7360 po_misc_or_fail (parse_address (&str, i));
7361 break;
7362
7363 case OP_ADDRGLDR:
7364 po_misc_or_fail_no_backtrack (
7365 parse_address_group_reloc (&str, i, GROUP_LDR));
7366 break;
7367
7368 case OP_ADDRGLDRS:
7369 po_misc_or_fail_no_backtrack (
7370 parse_address_group_reloc (&str, i, GROUP_LDRS));
7371 break;
7372
7373 case OP_ADDRGLDC:
7374 po_misc_or_fail_no_backtrack (
7375 parse_address_group_reloc (&str, i, GROUP_LDC));
7376 break;
7377
7378 case OP_SH:
7379 po_misc_or_fail (parse_shifter_operand (&str, i));
7380 break;
7381
7382 case OP_SHG:
7383 po_misc_or_fail_no_backtrack (
7384 parse_shifter_operand_group_reloc (&str, i));
7385 break;
7386
7387 case OP_oSHll:
7388 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7389 break;
7390
7391 case OP_oSHar:
7392 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7393 break;
7394
7395 case OP_oSHllar:
7396 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7397 break;
7398
7399 default:
7400 as_fatal (_("unhandled operand code %d"), op_parse_code);
7401 }
7402
7403 /* Various value-based sanity checks and shared operations. We
7404 do not signal immediate failures for the register constraints;
7405 this allows a syntax error to take precedence. */
7406 switch (op_parse_code)
7407 {
7408 case OP_oRRnpc:
7409 case OP_RRnpc:
7410 case OP_RRnpcb:
7411 case OP_RRw:
7412 case OP_oRRw:
7413 case OP_RRnpc_I0:
7414 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7415 inst.error = BAD_PC;
7416 break;
7417
7418 case OP_oRRnpcsp:
7419 case OP_RRnpcsp:
7420 if (inst.operands[i].isreg)
7421 {
7422 if (inst.operands[i].reg == REG_PC)
7423 inst.error = BAD_PC;
7424 else if (inst.operands[i].reg == REG_SP
7425 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7426 relaxed since ARMv8-A. */
7427 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
7428 {
7429 gas_assert (thumb);
7430 inst.error = BAD_SP;
7431 }
7432 }
7433 break;
7434
7435 case OP_RRnpctw:
7436 if (inst.operands[i].isreg
7437 && inst.operands[i].reg == REG_PC
7438 && (inst.operands[i].writeback || thumb))
7439 inst.error = BAD_PC;
7440 break;
7441
7442 case OP_VLDR:
7443 if (inst.operands[i].isreg)
7444 break;
7445 /* fall through. */
7446 case OP_CPSF:
7447 case OP_ENDI:
7448 case OP_oROR:
7449 case OP_wPSR:
7450 case OP_rPSR:
7451 case OP_COND:
7452 case OP_oBARRIER_I15:
7453 case OP_REGLST:
7454 case OP_CLRMLST:
7455 case OP_VRSLST:
7456 case OP_VRDLST:
7457 case OP_VRSDLST:
7458 case OP_VRSDVLST:
7459 case OP_NRDLST:
7460 case OP_NSTRLST:
7461 if (val == FAIL)
7462 goto failure;
7463 inst.operands[i].imm = val;
7464 break;
7465
7466 case OP_LR:
7467 case OP_oLR:
7468 if (inst.operands[i].reg != REG_LR)
7469 inst.error = _("operand must be LR register");
7470 break;
7471
7472 default:
7473 break;
7474 }
7475
7476 /* If we get here, this operand was successfully parsed. */
7477 inst.operands[i].present = 1;
7478 continue;
7479
7480 bad_args:
7481 inst.error = BAD_ARGS;
7482
7483 failure:
7484 if (!backtrack_pos)
7485 {
7486 /* The parse routine should already have set inst.error, but set a
7487 default here just in case. */
7488 if (!inst.error)
7489 inst.error = _("syntax error");
7490 return FAIL;
7491 }
7492
7493 /* Do not backtrack over a trailing optional argument that
7494 absorbed some text. We will only fail again, with the
7495 'garbage following instruction' error message, which is
7496 probably less helpful than the current one. */
7497 if (backtrack_index == i && backtrack_pos != str
7498 && upat[i+1] == OP_stop)
7499 {
7500 if (!inst.error)
7501 inst.error = _("syntax error");
7502 return FAIL;
7503 }
7504
7505 /* Try again, skipping the optional argument at backtrack_pos. */
7506 str = backtrack_pos;
7507 inst.error = backtrack_error;
7508 inst.operands[backtrack_index].present = 0;
7509 i = backtrack_index;
7510 backtrack_pos = 0;
7511 }
7512
7513 /* Check that we have parsed all the arguments. */
7514 if (*str != '\0' && !inst.error)
7515 inst.error = _("garbage following instruction");
7516
7517 return inst.error ? FAIL : SUCCESS;
7518 }
7519
7520 #undef po_char_or_fail
7521 #undef po_reg_or_fail
7522 #undef po_reg_or_goto
7523 #undef po_imm_or_fail
7524 #undef po_scalar_or_fail
7525 #undef po_barrier_or_imm
7526
7527 /* Shorthand macro for instruction encoding functions issuing errors. */
7528 #define constraint(expr, err) \
7529 do \
7530 { \
7531 if (expr) \
7532 { \
7533 inst.error = err; \
7534 return; \
7535 } \
7536 } \
7537 while (0)
7538
7539 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7540 instructions are unpredictable if these registers are used. This
7541 is the BadReg predicate in ARM's Thumb-2 documentation.
7542
7543 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7544 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
7545 #define reject_bad_reg(reg) \
7546 do \
7547 if (reg == REG_PC) \
7548 { \
7549 inst.error = BAD_PC; \
7550 return; \
7551 } \
7552 else if (reg == REG_SP \
7553 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
7554 { \
7555 inst.error = BAD_SP; \
7556 return; \
7557 } \
7558 while (0)
7559
7560 /* If REG is R13 (the stack pointer), warn that its use is
7561 deprecated. */
7562 #define warn_deprecated_sp(reg) \
7563 do \
7564 if (warn_on_deprecated && reg == REG_SP) \
7565 as_tsktsk (_("use of r13 is deprecated")); \
7566 while (0)
7567
7568 /* Functions for operand encoding. ARM, then Thumb. */
7569
7570 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7571
7572 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7573
7574 The only binary encoding difference is the Coprocessor number. Coprocessor
7575 9 is used for half-precision calculations or conversions. The format of the
7576 instruction is the same as the equivalent Coprocessor 10 instruction that
7577 exists for Single-Precision operation. */
7578
7579 static void
7580 do_scalar_fp16_v82_encode (void)
7581 {
7582 if (inst.cond != COND_ALWAYS)
7583 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7584 " the behaviour is UNPREDICTABLE"));
7585 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
7586 _(BAD_FP16));
7587
7588 inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
7589 mark_feature_used (&arm_ext_fp16);
7590 }
7591
7592 /* If VAL can be encoded in the immediate field of an ARM instruction,
7593 return the encoded form. Otherwise, return FAIL. */
7594
7595 static unsigned int
7596 encode_arm_immediate (unsigned int val)
7597 {
7598 unsigned int a, i;
7599
7600 if (val <= 0xff)
7601 return val;
7602
7603 for (i = 2; i < 32; i += 2)
7604 if ((a = rotate_left (val, i)) <= 0xff)
7605 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7606
7607 return FAIL;
7608 }
7609
7610 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7611 return the encoded form. Otherwise, return FAIL. */
7612 static unsigned int
7613 encode_thumb32_immediate (unsigned int val)
7614 {
7615 unsigned int a, i;
7616
7617 if (val <= 0xff)
7618 return val;
7619
7620 for (i = 1; i <= 24; i++)
7621 {
7622 a = val >> i;
7623 if ((val & ~(0xff << i)) == 0)
7624 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7625 }
7626
7627 a = val & 0xff;
7628 if (val == ((a << 16) | a))
7629 return 0x100 | a;
7630 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7631 return 0x300 | a;
7632
7633 a = val & 0xff00;
7634 if (val == ((a << 16) | a))
7635 return 0x200 | (a >> 8);
7636
7637 return FAIL;
7638 }
7639 /* Encode a VFP SP or DP register number into inst.instruction. */
7640
7641 static void
7642 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7643 {
7644 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7645 && reg > 15)
7646 {
7647 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7648 {
7649 if (thumb_mode)
7650 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7651 fpu_vfp_ext_d32);
7652 else
7653 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7654 fpu_vfp_ext_d32);
7655 }
7656 else
7657 {
7658 first_error (_("D register out of range for selected VFP version"));
7659 return;
7660 }
7661 }
7662
7663 switch (pos)
7664 {
7665 case VFP_REG_Sd:
7666 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7667 break;
7668
7669 case VFP_REG_Sn:
7670 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7671 break;
7672
7673 case VFP_REG_Sm:
7674 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7675 break;
7676
7677 case VFP_REG_Dd:
7678 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7679 break;
7680
7681 case VFP_REG_Dn:
7682 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7683 break;
7684
7685 case VFP_REG_Dm:
7686 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7687 break;
7688
7689 default:
7690 abort ();
7691 }
7692 }
7693
7694 /* Encode a <shift> in an ARM-format instruction. The immediate,
7695 if any, is handled by md_apply_fix. */
7696 static void
7697 encode_arm_shift (int i)
7698 {
7699 /* register-shifted register. */
7700 if (inst.operands[i].immisreg)
7701 {
7702 int op_index;
7703 for (op_index = 0; op_index <= i; ++op_index)
7704 {
7705 /* Check the operand only when it's presented. In pre-UAL syntax,
7706 if the destination register is the same as the first operand, two
7707 register form of the instruction can be used. */
7708 if (inst.operands[op_index].present && inst.operands[op_index].isreg
7709 && inst.operands[op_index].reg == REG_PC)
7710 as_warn (UNPRED_REG ("r15"));
7711 }
7712
7713 if (inst.operands[i].imm == REG_PC)
7714 as_warn (UNPRED_REG ("r15"));
7715 }
7716
7717 if (inst.operands[i].shift_kind == SHIFT_RRX)
7718 inst.instruction |= SHIFT_ROR << 5;
7719 else
7720 {
7721 inst.instruction |= inst.operands[i].shift_kind << 5;
7722 if (inst.operands[i].immisreg)
7723 {
7724 inst.instruction |= SHIFT_BY_REG;
7725 inst.instruction |= inst.operands[i].imm << 8;
7726 }
7727 else
7728 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
7729 }
7730 }
7731
7732 static void
7733 encode_arm_shifter_operand (int i)
7734 {
7735 if (inst.operands[i].isreg)
7736 {
7737 inst.instruction |= inst.operands[i].reg;
7738 encode_arm_shift (i);
7739 }
7740 else
7741 {
7742 inst.instruction |= INST_IMMEDIATE;
7743 if (inst.relocs[0].type != BFD_RELOC_ARM_IMMEDIATE)
7744 inst.instruction |= inst.operands[i].imm;
7745 }
7746 }
7747
7748 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7749 static void
7750 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7751 {
7752 /* PR 14260:
7753 Generate an error if the operand is not a register. */
7754 constraint (!inst.operands[i].isreg,
7755 _("Instruction does not support =N addresses"));
7756
7757 inst.instruction |= inst.operands[i].reg << 16;
7758
7759 if (inst.operands[i].preind)
7760 {
7761 if (is_t)
7762 {
7763 inst.error = _("instruction does not accept preindexed addressing");
7764 return;
7765 }
7766 inst.instruction |= PRE_INDEX;
7767 if (inst.operands[i].writeback)
7768 inst.instruction |= WRITE_BACK;
7769
7770 }
7771 else if (inst.operands[i].postind)
7772 {
7773 gas_assert (inst.operands[i].writeback);
7774 if (is_t)
7775 inst.instruction |= WRITE_BACK;
7776 }
7777 else /* unindexed - only for coprocessor */
7778 {
7779 inst.error = _("instruction does not accept unindexed addressing");
7780 return;
7781 }
7782
7783 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7784 && (((inst.instruction & 0x000f0000) >> 16)
7785 == ((inst.instruction & 0x0000f000) >> 12)))
7786 as_warn ((inst.instruction & LOAD_BIT)
7787 ? _("destination register same as write-back base")
7788 : _("source register same as write-back base"));
7789 }
7790
7791 /* inst.operands[i] was set up by parse_address. Encode it into an
7792 ARM-format mode 2 load or store instruction. If is_t is true,
7793 reject forms that cannot be used with a T instruction (i.e. not
7794 post-indexed). */
7795 static void
7796 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7797 {
7798 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7799
7800 encode_arm_addr_mode_common (i, is_t);
7801
7802 if (inst.operands[i].immisreg)
7803 {
7804 constraint ((inst.operands[i].imm == REG_PC
7805 || (is_pc && inst.operands[i].writeback)),
7806 BAD_PC_ADDRESSING);
7807 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7808 inst.instruction |= inst.operands[i].imm;
7809 if (!inst.operands[i].negative)
7810 inst.instruction |= INDEX_UP;
7811 if (inst.operands[i].shifted)
7812 {
7813 if (inst.operands[i].shift_kind == SHIFT_RRX)
7814 inst.instruction |= SHIFT_ROR << 5;
7815 else
7816 {
7817 inst.instruction |= inst.operands[i].shift_kind << 5;
7818 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
7819 }
7820 }
7821 }
7822 else /* immediate offset in inst.relocs[0] */
7823 {
7824 if (is_pc && !inst.relocs[0].pc_rel)
7825 {
7826 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7827
7828 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7829 cannot use PC in addressing.
7830 PC cannot be used in writeback addressing, either. */
7831 constraint ((is_t || inst.operands[i].writeback),
7832 BAD_PC_ADDRESSING);
7833
7834 /* Use of PC in str is deprecated for ARMv7. */
7835 if (warn_on_deprecated
7836 && !is_load
7837 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7838 as_tsktsk (_("use of PC in this instruction is deprecated"));
7839 }
7840
7841 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
7842 {
7843 /* Prefer + for zero encoded value. */
7844 if (!inst.operands[i].negative)
7845 inst.instruction |= INDEX_UP;
7846 inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM;
7847 }
7848 }
7849 }
7850
7851 /* inst.operands[i] was set up by parse_address. Encode it into an
7852 ARM-format mode 3 load or store instruction. Reject forms that
7853 cannot be used with such instructions. If is_t is true, reject
7854 forms that cannot be used with a T instruction (i.e. not
7855 post-indexed). */
7856 static void
7857 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7858 {
7859 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7860 {
7861 inst.error = _("instruction does not accept scaled register index");
7862 return;
7863 }
7864
7865 encode_arm_addr_mode_common (i, is_t);
7866
7867 if (inst.operands[i].immisreg)
7868 {
7869 constraint ((inst.operands[i].imm == REG_PC
7870 || (is_t && inst.operands[i].reg == REG_PC)),
7871 BAD_PC_ADDRESSING);
7872 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7873 BAD_PC_WRITEBACK);
7874 inst.instruction |= inst.operands[i].imm;
7875 if (!inst.operands[i].negative)
7876 inst.instruction |= INDEX_UP;
7877 }
7878 else /* immediate offset in inst.relocs[0] */
7879 {
7880 constraint ((inst.operands[i].reg == REG_PC && !inst.relocs[0].pc_rel
7881 && inst.operands[i].writeback),
7882 BAD_PC_WRITEBACK);
7883 inst.instruction |= HWOFFSET_IMM;
7884 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
7885 {
7886 /* Prefer + for zero encoded value. */
7887 if (!inst.operands[i].negative)
7888 inst.instruction |= INDEX_UP;
7889
7890 inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM8;
7891 }
7892 }
7893 }
7894
7895 /* Write immediate bits [7:0] to the following locations:
7896
7897 |28/24|23 19|18 16|15 4|3 0|
7898 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7899
7900 This function is used by VMOV/VMVN/VORR/VBIC. */
7901
7902 static void
7903 neon_write_immbits (unsigned immbits)
7904 {
7905 inst.instruction |= immbits & 0xf;
7906 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7907 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7908 }
7909
7910 /* Invert low-order SIZE bits of XHI:XLO. */
7911
7912 static void
7913 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7914 {
7915 unsigned immlo = xlo ? *xlo : 0;
7916 unsigned immhi = xhi ? *xhi : 0;
7917
7918 switch (size)
7919 {
7920 case 8:
7921 immlo = (~immlo) & 0xff;
7922 break;
7923
7924 case 16:
7925 immlo = (~immlo) & 0xffff;
7926 break;
7927
7928 case 64:
7929 immhi = (~immhi) & 0xffffffff;
7930 /* fall through. */
7931
7932 case 32:
7933 immlo = (~immlo) & 0xffffffff;
7934 break;
7935
7936 default:
7937 abort ();
7938 }
7939
7940 if (xlo)
7941 *xlo = immlo;
7942
7943 if (xhi)
7944 *xhi = immhi;
7945 }
7946
7947 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7948 A, B, C, D. */
7949
7950 static int
7951 neon_bits_same_in_bytes (unsigned imm)
7952 {
7953 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7954 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7955 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7956 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7957 }
7958
7959 /* For immediate of above form, return 0bABCD. */
7960
7961 static unsigned
7962 neon_squash_bits (unsigned imm)
7963 {
7964 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7965 | ((imm & 0x01000000) >> 21);
7966 }
7967
7968 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7969
7970 static unsigned
7971 neon_qfloat_bits (unsigned imm)
7972 {
7973 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7974 }
7975
7976 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7977 the instruction. *OP is passed as the initial value of the op field, and
7978 may be set to a different value depending on the constant (i.e.
7979 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7980 MVN). If the immediate looks like a repeated pattern then also
7981 try smaller element sizes. */
7982
7983 static int
7984 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7985 unsigned *immbits, int *op, int size,
7986 enum neon_el_type type)
7987 {
7988 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7989 float. */
7990 if (type == NT_float && !float_p)
7991 return FAIL;
7992
7993 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7994 {
7995 if (size != 32 || *op == 1)
7996 return FAIL;
7997 *immbits = neon_qfloat_bits (immlo);
7998 return 0xf;
7999 }
8000
8001 if (size == 64)
8002 {
8003 if (neon_bits_same_in_bytes (immhi)
8004 && neon_bits_same_in_bytes (immlo))
8005 {
8006 if (*op == 1)
8007 return FAIL;
8008 *immbits = (neon_squash_bits (immhi) << 4)
8009 | neon_squash_bits (immlo);
8010 *op = 1;
8011 return 0xe;
8012 }
8013
8014 if (immhi != immlo)
8015 return FAIL;
8016 }
8017
8018 if (size >= 32)
8019 {
8020 if (immlo == (immlo & 0x000000ff))
8021 {
8022 *immbits = immlo;
8023 return 0x0;
8024 }
8025 else if (immlo == (immlo & 0x0000ff00))
8026 {
8027 *immbits = immlo >> 8;
8028 return 0x2;
8029 }
8030 else if (immlo == (immlo & 0x00ff0000))
8031 {
8032 *immbits = immlo >> 16;
8033 return 0x4;
8034 }
8035 else if (immlo == (immlo & 0xff000000))
8036 {
8037 *immbits = immlo >> 24;
8038 return 0x6;
8039 }
8040 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
8041 {
8042 *immbits = (immlo >> 8) & 0xff;
8043 return 0xc;
8044 }
8045 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
8046 {
8047 *immbits = (immlo >> 16) & 0xff;
8048 return 0xd;
8049 }
8050
8051 if ((immlo & 0xffff) != (immlo >> 16))
8052 return FAIL;
8053 immlo &= 0xffff;
8054 }
8055
8056 if (size >= 16)
8057 {
8058 if (immlo == (immlo & 0x000000ff))
8059 {
8060 *immbits = immlo;
8061 return 0x8;
8062 }
8063 else if (immlo == (immlo & 0x0000ff00))
8064 {
8065 *immbits = immlo >> 8;
8066 return 0xa;
8067 }
8068
8069 if ((immlo & 0xff) != (immlo >> 8))
8070 return FAIL;
8071 immlo &= 0xff;
8072 }
8073
8074 if (immlo == (immlo & 0x000000ff))
8075 {
8076 /* Don't allow MVN with 8-bit immediate. */
8077 if (*op == 1)
8078 return FAIL;
8079 *immbits = immlo;
8080 return 0xe;
8081 }
8082
8083 return FAIL;
8084 }
8085
8086 #if defined BFD_HOST_64_BIT
8087 /* Returns TRUE if double precision value V may be cast
8088 to single precision without loss of accuracy. */
8089
8090 static bfd_boolean
8091 is_double_a_single (bfd_int64_t v)
8092 {
8093 int exp = (int)((v >> 52) & 0x7FF);
8094 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
8095
8096 return (exp == 0 || exp == 0x7FF
8097 || (exp >= 1023 - 126 && exp <= 1023 + 127))
8098 && (mantissa & 0x1FFFFFFFl) == 0;
8099 }
8100
8101 /* Returns a double precision value casted to single precision
8102 (ignoring the least significant bits in exponent and mantissa). */
8103
8104 static int
8105 double_to_single (bfd_int64_t v)
8106 {
8107 int sign = (int) ((v >> 63) & 1l);
8108 int exp = (int) ((v >> 52) & 0x7FF);
8109 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
8110
8111 if (exp == 0x7FF)
8112 exp = 0xFF;
8113 else
8114 {
8115 exp = exp - 1023 + 127;
8116 if (exp >= 0xFF)
8117 {
8118 /* Infinity. */
8119 exp = 0x7F;
8120 mantissa = 0;
8121 }
8122 else if (exp < 0)
8123 {
8124 /* No denormalized numbers. */
8125 exp = 0;
8126 mantissa = 0;
8127 }
8128 }
8129 mantissa >>= 29;
8130 return (sign << 31) | (exp << 23) | mantissa;
8131 }
8132 #endif /* BFD_HOST_64_BIT */
8133
8134 enum lit_type
8135 {
8136 CONST_THUMB,
8137 CONST_ARM,
8138 CONST_VEC
8139 };
8140
8141 static void do_vfp_nsyn_opcode (const char *);
8142
8143 /* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
8144 Determine whether it can be performed with a move instruction; if
8145 it can, convert inst.instruction to that move instruction and
8146 return TRUE; if it can't, convert inst.instruction to a literal-pool
8147 load and return FALSE. If this is not a valid thing to do in the
8148 current context, set inst.error and return TRUE.
8149
8150 inst.operands[i] describes the destination register. */
8151
8152 static bfd_boolean
8153 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
8154 {
8155 unsigned long tbit;
8156 bfd_boolean thumb_p = (t == CONST_THUMB);
8157 bfd_boolean arm_p = (t == CONST_ARM);
8158
8159 if (thumb_p)
8160 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
8161 else
8162 tbit = LOAD_BIT;
8163
8164 if ((inst.instruction & tbit) == 0)
8165 {
8166 inst.error = _("invalid pseudo operation");
8167 return TRUE;
8168 }
8169
8170 if (inst.relocs[0].exp.X_op != O_constant
8171 && inst.relocs[0].exp.X_op != O_symbol
8172 && inst.relocs[0].exp.X_op != O_big)
8173 {
8174 inst.error = _("constant expression expected");
8175 return TRUE;
8176 }
8177
8178 if (inst.relocs[0].exp.X_op == O_constant
8179 || inst.relocs[0].exp.X_op == O_big)
8180 {
8181 #if defined BFD_HOST_64_BIT
8182 bfd_int64_t v;
8183 #else
8184 offsetT v;
8185 #endif
8186 if (inst.relocs[0].exp.X_op == O_big)
8187 {
8188 LITTLENUM_TYPE w[X_PRECISION];
8189 LITTLENUM_TYPE * l;
8190
8191 if (inst.relocs[0].exp.X_add_number == -1)
8192 {
8193 gen_to_words (w, X_PRECISION, E_PRECISION);
8194 l = w;
8195 /* FIXME: Should we check words w[2..5] ? */
8196 }
8197 else
8198 l = generic_bignum;
8199
8200 #if defined BFD_HOST_64_BIT
8201 v =
8202 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
8203 << LITTLENUM_NUMBER_OF_BITS)
8204 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
8205 << LITTLENUM_NUMBER_OF_BITS)
8206 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
8207 << LITTLENUM_NUMBER_OF_BITS)
8208 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
8209 #else
8210 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
8211 | (l[0] & LITTLENUM_MASK);
8212 #endif
8213 }
8214 else
8215 v = inst.relocs[0].exp.X_add_number;
8216
8217 if (!inst.operands[i].issingle)
8218 {
8219 if (thumb_p)
8220 {
8221 /* LDR should not use lead in a flag-setting instruction being
8222 chosen so we do not check whether movs can be used. */
8223
8224 if ((ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
8225 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8226 && inst.operands[i].reg != 13
8227 && inst.operands[i].reg != 15)
8228 {
8229 /* Check if on thumb2 it can be done with a mov.w, mvn or
8230 movw instruction. */
8231 unsigned int newimm;
8232 bfd_boolean isNegated;
8233
8234 newimm = encode_thumb32_immediate (v);
8235 if (newimm != (unsigned int) FAIL)
8236 isNegated = FALSE;
8237 else
8238 {
8239 newimm = encode_thumb32_immediate (~v);
8240 if (newimm != (unsigned int) FAIL)
8241 isNegated = TRUE;
8242 }
8243
8244 /* The number can be loaded with a mov.w or mvn
8245 instruction. */
8246 if (newimm != (unsigned int) FAIL
8247 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
8248 {
8249 inst.instruction = (0xf04f0000 /* MOV.W. */
8250 | (inst.operands[i].reg << 8));
8251 /* Change to MOVN. */
8252 inst.instruction |= (isNegated ? 0x200000 : 0);
8253 inst.instruction |= (newimm & 0x800) << 15;
8254 inst.instruction |= (newimm & 0x700) << 4;
8255 inst.instruction |= (newimm & 0x0ff);
8256 return TRUE;
8257 }
8258 /* The number can be loaded with a movw instruction. */
8259 else if ((v & ~0xFFFF) == 0
8260 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8261 {
8262 int imm = v & 0xFFFF;
8263
8264 inst.instruction = 0xf2400000; /* MOVW. */
8265 inst.instruction |= (inst.operands[i].reg << 8);
8266 inst.instruction |= (imm & 0xf000) << 4;
8267 inst.instruction |= (imm & 0x0800) << 15;
8268 inst.instruction |= (imm & 0x0700) << 4;
8269 inst.instruction |= (imm & 0x00ff);
8270 return TRUE;
8271 }
8272 }
8273 }
8274 else if (arm_p)
8275 {
8276 int value = encode_arm_immediate (v);
8277
8278 if (value != FAIL)
8279 {
8280 /* This can be done with a mov instruction. */
8281 inst.instruction &= LITERAL_MASK;
8282 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
8283 inst.instruction |= value & 0xfff;
8284 return TRUE;
8285 }
8286
8287 value = encode_arm_immediate (~ v);
8288 if (value != FAIL)
8289 {
8290 /* This can be done with a mvn instruction. */
8291 inst.instruction &= LITERAL_MASK;
8292 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8293 inst.instruction |= value & 0xfff;
8294 return TRUE;
8295 }
8296 }
8297 else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8298 {
8299 int op = 0;
8300 unsigned immbits = 0;
8301 unsigned immlo = inst.operands[1].imm;
8302 unsigned immhi = inst.operands[1].regisimm
8303 ? inst.operands[1].reg
8304 : inst.relocs[0].exp.X_unsigned
8305 ? 0
8306 : ((bfd_int64_t)((int) immlo)) >> 32;
8307 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8308 &op, 64, NT_invtype);
8309
8310 if (cmode == FAIL)
8311 {
8312 neon_invert_size (&immlo, &immhi, 64);
8313 op = !op;
8314 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8315 &op, 64, NT_invtype);
8316 }
8317
8318 if (cmode != FAIL)
8319 {
8320 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8321 | (1 << 23)
8322 | (cmode << 8)
8323 | (op << 5)
8324 | (1 << 4);
8325
8326 /* Fill other bits in vmov encoding for both thumb and arm. */
8327 if (thumb_mode)
8328 inst.instruction |= (0x7U << 29) | (0xF << 24);
8329 else
8330 inst.instruction |= (0xFU << 28) | (0x1 << 25);
8331 neon_write_immbits (immbits);
8332 return TRUE;
8333 }
8334 }
8335 }
8336
8337 if (t == CONST_VEC)
8338 {
8339 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8340 if (inst.operands[i].issingle
8341 && is_quarter_float (inst.operands[1].imm)
8342 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8343 {
8344 inst.operands[1].imm =
8345 neon_qfloat_bits (v);
8346 do_vfp_nsyn_opcode ("fconsts");
8347 return TRUE;
8348 }
8349
8350 /* If our host does not support a 64-bit type then we cannot perform
8351 the following optimization. This mean that there will be a
8352 discrepancy between the output produced by an assembler built for
8353 a 32-bit-only host and the output produced from a 64-bit host, but
8354 this cannot be helped. */
8355 #if defined BFD_HOST_64_BIT
8356 else if (!inst.operands[1].issingle
8357 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8358 {
8359 if (is_double_a_single (v)
8360 && is_quarter_float (double_to_single (v)))
8361 {
8362 inst.operands[1].imm =
8363 neon_qfloat_bits (double_to_single (v));
8364 do_vfp_nsyn_opcode ("fconstd");
8365 return TRUE;
8366 }
8367 }
8368 #endif
8369 }
8370 }
8371
8372 if (add_to_lit_pool ((!inst.operands[i].isvec
8373 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
8374 return TRUE;
8375
8376 inst.operands[1].reg = REG_PC;
8377 inst.operands[1].isreg = 1;
8378 inst.operands[1].preind = 1;
8379 inst.relocs[0].pc_rel = 1;
8380 inst.relocs[0].type = (thumb_p
8381 ? BFD_RELOC_ARM_THUMB_OFFSET
8382 : (mode_3
8383 ? BFD_RELOC_ARM_HWLITERAL
8384 : BFD_RELOC_ARM_LITERAL));
8385 return FALSE;
8386 }
8387
8388 /* inst.operands[i] was set up by parse_address. Encode it into an
8389 ARM-format instruction. Reject all forms which cannot be encoded
8390 into a coprocessor load/store instruction. If wb_ok is false,
8391 reject use of writeback; if unind_ok is false, reject use of
8392 unindexed addressing. If reloc_override is not 0, use it instead
8393 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8394 (in which case it is preserved). */
8395
8396 static int
8397 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8398 {
8399 if (!inst.operands[i].isreg)
8400 {
8401 /* PR 18256 */
8402 if (! inst.operands[0].isvec)
8403 {
8404 inst.error = _("invalid co-processor operand");
8405 return FAIL;
8406 }
8407 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8408 return SUCCESS;
8409 }
8410
8411 inst.instruction |= inst.operands[i].reg << 16;
8412
8413 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8414
8415 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8416 {
8417 gas_assert (!inst.operands[i].writeback);
8418 if (!unind_ok)
8419 {
8420 inst.error = _("instruction does not support unindexed addressing");
8421 return FAIL;
8422 }
8423 inst.instruction |= inst.operands[i].imm;
8424 inst.instruction |= INDEX_UP;
8425 return SUCCESS;
8426 }
8427
8428 if (inst.operands[i].preind)
8429 inst.instruction |= PRE_INDEX;
8430
8431 if (inst.operands[i].writeback)
8432 {
8433 if (inst.operands[i].reg == REG_PC)
8434 {
8435 inst.error = _("pc may not be used with write-back");
8436 return FAIL;
8437 }
8438 if (!wb_ok)
8439 {
8440 inst.error = _("instruction does not support writeback");
8441 return FAIL;
8442 }
8443 inst.instruction |= WRITE_BACK;
8444 }
8445
8446 if (reloc_override)
8447 inst.relocs[0].type = (bfd_reloc_code_real_type) reloc_override;
8448 else if ((inst.relocs[0].type < BFD_RELOC_ARM_ALU_PC_G0_NC
8449 || inst.relocs[0].type > BFD_RELOC_ARM_LDC_SB_G2)
8450 && inst.relocs[0].type != BFD_RELOC_ARM_LDR_PC_G0)
8451 {
8452 if (thumb_mode)
8453 inst.relocs[0].type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8454 else
8455 inst.relocs[0].type = BFD_RELOC_ARM_CP_OFF_IMM;
8456 }
8457
8458 /* Prefer + for zero encoded value. */
8459 if (!inst.operands[i].negative)
8460 inst.instruction |= INDEX_UP;
8461
8462 return SUCCESS;
8463 }
8464
8465 /* Functions for instruction encoding, sorted by sub-architecture.
8466 First some generics; their names are taken from the conventional
8467 bit positions for register arguments in ARM format instructions. */
8468
8469 static void
8470 do_noargs (void)
8471 {
8472 }
8473
8474 static void
8475 do_rd (void)
8476 {
8477 inst.instruction |= inst.operands[0].reg << 12;
8478 }
8479
8480 static void
8481 do_rn (void)
8482 {
8483 inst.instruction |= inst.operands[0].reg << 16;
8484 }
8485
8486 static void
8487 do_rd_rm (void)
8488 {
8489 inst.instruction |= inst.operands[0].reg << 12;
8490 inst.instruction |= inst.operands[1].reg;
8491 }
8492
8493 static void
8494 do_rm_rn (void)
8495 {
8496 inst.instruction |= inst.operands[0].reg;
8497 inst.instruction |= inst.operands[1].reg << 16;
8498 }
8499
8500 static void
8501 do_rd_rn (void)
8502 {
8503 inst.instruction |= inst.operands[0].reg << 12;
8504 inst.instruction |= inst.operands[1].reg << 16;
8505 }
8506
8507 static void
8508 do_rn_rd (void)
8509 {
8510 inst.instruction |= inst.operands[0].reg << 16;
8511 inst.instruction |= inst.operands[1].reg << 12;
8512 }
8513
8514 static void
8515 do_tt (void)
8516 {
8517 inst.instruction |= inst.operands[0].reg << 8;
8518 inst.instruction |= inst.operands[1].reg << 16;
8519 }
8520
8521 static bfd_boolean
8522 check_obsolete (const arm_feature_set *feature, const char *msg)
8523 {
8524 if (ARM_CPU_IS_ANY (cpu_variant))
8525 {
8526 as_tsktsk ("%s", msg);
8527 return TRUE;
8528 }
8529 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8530 {
8531 as_bad ("%s", msg);
8532 return TRUE;
8533 }
8534
8535 return FALSE;
8536 }
8537
8538 static void
8539 do_rd_rm_rn (void)
8540 {
8541 unsigned Rn = inst.operands[2].reg;
8542 /* Enforce restrictions on SWP instruction. */
8543 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8544 {
8545 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8546 _("Rn must not overlap other operands"));
8547
8548 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8549 */
8550 if (!check_obsolete (&arm_ext_v8,
8551 _("swp{b} use is obsoleted for ARMv8 and later"))
8552 && warn_on_deprecated
8553 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8554 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8555 }
8556
8557 inst.instruction |= inst.operands[0].reg << 12;
8558 inst.instruction |= inst.operands[1].reg;
8559 inst.instruction |= Rn << 16;
8560 }
8561
8562 static void
8563 do_rd_rn_rm (void)
8564 {
8565 inst.instruction |= inst.operands[0].reg << 12;
8566 inst.instruction |= inst.operands[1].reg << 16;
8567 inst.instruction |= inst.operands[2].reg;
8568 }
8569
8570 static void
8571 do_rm_rd_rn (void)
8572 {
8573 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8574 constraint (((inst.relocs[0].exp.X_op != O_constant
8575 && inst.relocs[0].exp.X_op != O_illegal)
8576 || inst.relocs[0].exp.X_add_number != 0),
8577 BAD_ADDR_MODE);
8578 inst.instruction |= inst.operands[0].reg;
8579 inst.instruction |= inst.operands[1].reg << 12;
8580 inst.instruction |= inst.operands[2].reg << 16;
8581 }
8582
8583 static void
8584 do_imm0 (void)
8585 {
8586 inst.instruction |= inst.operands[0].imm;
8587 }
8588
8589 static void
8590 do_rd_cpaddr (void)
8591 {
8592 inst.instruction |= inst.operands[0].reg << 12;
8593 encode_arm_cp_address (1, TRUE, TRUE, 0);
8594 }
8595
8596 /* ARM instructions, in alphabetical order by function name (except
8597 that wrapper functions appear immediately after the function they
8598 wrap). */
8599
8600 /* This is a pseudo-op of the form "adr rd, label" to be converted
8601 into a relative address of the form "add rd, pc, #label-.-8". */
8602
8603 static void
8604 do_adr (void)
8605 {
8606 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8607
8608 /* Frag hacking will turn this into a sub instruction if the offset turns
8609 out to be negative. */
8610 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
8611 inst.relocs[0].pc_rel = 1;
8612 inst.relocs[0].exp.X_add_number -= 8;
8613
8614 if (support_interwork
8615 && inst.relocs[0].exp.X_op == O_symbol
8616 && inst.relocs[0].exp.X_add_symbol != NULL
8617 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
8618 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
8619 inst.relocs[0].exp.X_add_number |= 1;
8620 }
8621
8622 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8623 into a relative address of the form:
8624 add rd, pc, #low(label-.-8)"
8625 add rd, rd, #high(label-.-8)" */
8626
8627 static void
8628 do_adrl (void)
8629 {
8630 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8631
8632 /* Frag hacking will turn this into a sub instruction if the offset turns
8633 out to be negative. */
8634 inst.relocs[0].type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8635 inst.relocs[0].pc_rel = 1;
8636 inst.size = INSN_SIZE * 2;
8637 inst.relocs[0].exp.X_add_number -= 8;
8638
8639 if (support_interwork
8640 && inst.relocs[0].exp.X_op == O_symbol
8641 && inst.relocs[0].exp.X_add_symbol != NULL
8642 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
8643 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
8644 inst.relocs[0].exp.X_add_number |= 1;
8645 }
8646
8647 static void
8648 do_arit (void)
8649 {
8650 constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8651 && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
8652 THUMB1_RELOC_ONLY);
8653 if (!inst.operands[1].present)
8654 inst.operands[1].reg = inst.operands[0].reg;
8655 inst.instruction |= inst.operands[0].reg << 12;
8656 inst.instruction |= inst.operands[1].reg << 16;
8657 encode_arm_shifter_operand (2);
8658 }
8659
8660 static void
8661 do_barrier (void)
8662 {
8663 if (inst.operands[0].present)
8664 inst.instruction |= inst.operands[0].imm;
8665 else
8666 inst.instruction |= 0xf;
8667 }
8668
8669 static void
8670 do_bfc (void)
8671 {
8672 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8673 constraint (msb > 32, _("bit-field extends past end of register"));
8674 /* The instruction encoding stores the LSB and MSB,
8675 not the LSB and width. */
8676 inst.instruction |= inst.operands[0].reg << 12;
8677 inst.instruction |= inst.operands[1].imm << 7;
8678 inst.instruction |= (msb - 1) << 16;
8679 }
8680
8681 static void
8682 do_bfi (void)
8683 {
8684 unsigned int msb;
8685
8686 /* #0 in second position is alternative syntax for bfc, which is
8687 the same instruction but with REG_PC in the Rm field. */
8688 if (!inst.operands[1].isreg)
8689 inst.operands[1].reg = REG_PC;
8690
8691 msb = inst.operands[2].imm + inst.operands[3].imm;
8692 constraint (msb > 32, _("bit-field extends past end of register"));
8693 /* The instruction encoding stores the LSB and MSB,
8694 not the LSB and width. */
8695 inst.instruction |= inst.operands[0].reg << 12;
8696 inst.instruction |= inst.operands[1].reg;
8697 inst.instruction |= inst.operands[2].imm << 7;
8698 inst.instruction |= (msb - 1) << 16;
8699 }
8700
8701 static void
8702 do_bfx (void)
8703 {
8704 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8705 _("bit-field extends past end of register"));
8706 inst.instruction |= inst.operands[0].reg << 12;
8707 inst.instruction |= inst.operands[1].reg;
8708 inst.instruction |= inst.operands[2].imm << 7;
8709 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8710 }
8711
8712 /* ARM V5 breakpoint instruction (argument parse)
8713 BKPT <16 bit unsigned immediate>
8714 Instruction is not conditional.
8715 The bit pattern given in insns[] has the COND_ALWAYS condition,
8716 and it is an error if the caller tried to override that. */
8717
8718 static void
8719 do_bkpt (void)
8720 {
8721 /* Top 12 of 16 bits to bits 19:8. */
8722 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8723
8724 /* Bottom 4 of 16 bits to bits 3:0. */
8725 inst.instruction |= inst.operands[0].imm & 0xf;
8726 }
8727
8728 static void
8729 encode_branch (int default_reloc)
8730 {
8731 if (inst.operands[0].hasreloc)
8732 {
8733 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8734 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8735 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8736 inst.relocs[0].type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8737 ? BFD_RELOC_ARM_PLT32
8738 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8739 }
8740 else
8741 inst.relocs[0].type = (bfd_reloc_code_real_type) default_reloc;
8742 inst.relocs[0].pc_rel = 1;
8743 }
8744
8745 static void
8746 do_branch (void)
8747 {
8748 #ifdef OBJ_ELF
8749 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8750 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8751 else
8752 #endif
8753 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8754 }
8755
8756 static void
8757 do_bl (void)
8758 {
8759 #ifdef OBJ_ELF
8760 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8761 {
8762 if (inst.cond == COND_ALWAYS)
8763 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8764 else
8765 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8766 }
8767 else
8768 #endif
8769 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8770 }
8771
8772 /* ARM V5 branch-link-exchange instruction (argument parse)
8773 BLX <target_addr> ie BLX(1)
8774 BLX{<condition>} <Rm> ie BLX(2)
8775 Unfortunately, there are two different opcodes for this mnemonic.
8776 So, the insns[].value is not used, and the code here zaps values
8777 into inst.instruction.
8778 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8779
8780 static void
8781 do_blx (void)
8782 {
8783 if (inst.operands[0].isreg)
8784 {
8785 /* Arg is a register; the opcode provided by insns[] is correct.
8786 It is not illegal to do "blx pc", just useless. */
8787 if (inst.operands[0].reg == REG_PC)
8788 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8789
8790 inst.instruction |= inst.operands[0].reg;
8791 }
8792 else
8793 {
8794 /* Arg is an address; this instruction cannot be executed
8795 conditionally, and the opcode must be adjusted.
8796 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8797 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8798 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8799 inst.instruction = 0xfa000000;
8800 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8801 }
8802 }
8803
8804 static void
8805 do_bx (void)
8806 {
8807 bfd_boolean want_reloc;
8808
8809 if (inst.operands[0].reg == REG_PC)
8810 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8811
8812 inst.instruction |= inst.operands[0].reg;
8813 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8814 it is for ARMv4t or earlier. */
8815 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8816 if (!ARM_FEATURE_ZERO (selected_object_arch)
8817 && !ARM_CPU_HAS_FEATURE (selected_object_arch, arm_ext_v5))
8818 want_reloc = TRUE;
8819
8820 #ifdef OBJ_ELF
8821 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8822 #endif
8823 want_reloc = FALSE;
8824
8825 if (want_reloc)
8826 inst.relocs[0].type = BFD_RELOC_ARM_V4BX;
8827 }
8828
8829
8830 /* ARM v5TEJ. Jump to Jazelle code. */
8831
8832 static void
8833 do_bxj (void)
8834 {
8835 if (inst.operands[0].reg == REG_PC)
8836 as_tsktsk (_("use of r15 in bxj is not really useful"));
8837
8838 inst.instruction |= inst.operands[0].reg;
8839 }
8840
8841 /* Co-processor data operation:
8842 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8843 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8844 static void
8845 do_cdp (void)
8846 {
8847 inst.instruction |= inst.operands[0].reg << 8;
8848 inst.instruction |= inst.operands[1].imm << 20;
8849 inst.instruction |= inst.operands[2].reg << 12;
8850 inst.instruction |= inst.operands[3].reg << 16;
8851 inst.instruction |= inst.operands[4].reg;
8852 inst.instruction |= inst.operands[5].imm << 5;
8853 }
8854
8855 static void
8856 do_cmp (void)
8857 {
8858 inst.instruction |= inst.operands[0].reg << 16;
8859 encode_arm_shifter_operand (1);
8860 }
8861
8862 /* Transfer between coprocessor and ARM registers.
8863 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8864 MRC2
8865 MCR{cond}
8866 MCR2
8867
8868 No special properties. */
8869
8870 struct deprecated_coproc_regs_s
8871 {
8872 unsigned cp;
8873 int opc1;
8874 unsigned crn;
8875 unsigned crm;
8876 int opc2;
8877 arm_feature_set deprecated;
8878 arm_feature_set obsoleted;
8879 const char *dep_msg;
8880 const char *obs_msg;
8881 };
8882
8883 #define DEPR_ACCESS_V8 \
8884 N_("This coprocessor register access is deprecated in ARMv8")
8885
8886 /* Table of all deprecated coprocessor registers. */
8887 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8888 {
8889 {15, 0, 7, 10, 5, /* CP15DMB. */
8890 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8891 DEPR_ACCESS_V8, NULL},
8892 {15, 0, 7, 10, 4, /* CP15DSB. */
8893 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8894 DEPR_ACCESS_V8, NULL},
8895 {15, 0, 7, 5, 4, /* CP15ISB. */
8896 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8897 DEPR_ACCESS_V8, NULL},
8898 {14, 6, 1, 0, 0, /* TEEHBR. */
8899 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8900 DEPR_ACCESS_V8, NULL},
8901 {14, 6, 0, 0, 0, /* TEECR. */
8902 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8903 DEPR_ACCESS_V8, NULL},
8904 };
8905
8906 #undef DEPR_ACCESS_V8
8907
8908 static const size_t deprecated_coproc_reg_count =
8909 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8910
8911 static void
8912 do_co_reg (void)
8913 {
8914 unsigned Rd;
8915 size_t i;
8916
8917 Rd = inst.operands[2].reg;
8918 if (thumb_mode)
8919 {
8920 if (inst.instruction == 0xee000010
8921 || inst.instruction == 0xfe000010)
8922 /* MCR, MCR2 */
8923 reject_bad_reg (Rd);
8924 else if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
8925 /* MRC, MRC2 */
8926 constraint (Rd == REG_SP, BAD_SP);
8927 }
8928 else
8929 {
8930 /* MCR */
8931 if (inst.instruction == 0xe000010)
8932 constraint (Rd == REG_PC, BAD_PC);
8933 }
8934
8935 for (i = 0; i < deprecated_coproc_reg_count; ++i)
8936 {
8937 const struct deprecated_coproc_regs_s *r =
8938 deprecated_coproc_regs + i;
8939
8940 if (inst.operands[0].reg == r->cp
8941 && inst.operands[1].imm == r->opc1
8942 && inst.operands[3].reg == r->crn
8943 && inst.operands[4].reg == r->crm
8944 && inst.operands[5].imm == r->opc2)
8945 {
8946 if (! ARM_CPU_IS_ANY (cpu_variant)
8947 && warn_on_deprecated
8948 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8949 as_tsktsk ("%s", r->dep_msg);
8950 }
8951 }
8952
8953 inst.instruction |= inst.operands[0].reg << 8;
8954 inst.instruction |= inst.operands[1].imm << 21;
8955 inst.instruction |= Rd << 12;
8956 inst.instruction |= inst.operands[3].reg << 16;
8957 inst.instruction |= inst.operands[4].reg;
8958 inst.instruction |= inst.operands[5].imm << 5;
8959 }
8960
8961 /* Transfer between coprocessor register and pair of ARM registers.
8962 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8963 MCRR2
8964 MRRC{cond}
8965 MRRC2
8966
8967 Two XScale instructions are special cases of these:
8968
8969 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8970 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8971
8972 Result unpredictable if Rd or Rn is R15. */
8973
8974 static void
8975 do_co_reg2c (void)
8976 {
8977 unsigned Rd, Rn;
8978
8979 Rd = inst.operands[2].reg;
8980 Rn = inst.operands[3].reg;
8981
8982 if (thumb_mode)
8983 {
8984 reject_bad_reg (Rd);
8985 reject_bad_reg (Rn);
8986 }
8987 else
8988 {
8989 constraint (Rd == REG_PC, BAD_PC);
8990 constraint (Rn == REG_PC, BAD_PC);
8991 }
8992
8993 /* Only check the MRRC{2} variants. */
8994 if ((inst.instruction & 0x0FF00000) == 0x0C500000)
8995 {
8996 /* If Rd == Rn, error that the operation is
8997 unpredictable (example MRRC p3,#1,r1,r1,c4). */
8998 constraint (Rd == Rn, BAD_OVERLAP);
8999 }
9000
9001 inst.instruction |= inst.operands[0].reg << 8;
9002 inst.instruction |= inst.operands[1].imm << 4;
9003 inst.instruction |= Rd << 12;
9004 inst.instruction |= Rn << 16;
9005 inst.instruction |= inst.operands[4].reg;
9006 }
9007
9008 static void
9009 do_cpsi (void)
9010 {
9011 inst.instruction |= inst.operands[0].imm << 6;
9012 if (inst.operands[1].present)
9013 {
9014 inst.instruction |= CPSI_MMOD;
9015 inst.instruction |= inst.operands[1].imm;
9016 }
9017 }
9018
9019 static void
9020 do_dbg (void)
9021 {
9022 inst.instruction |= inst.operands[0].imm;
9023 }
9024
9025 static void
9026 do_div (void)
9027 {
9028 unsigned Rd, Rn, Rm;
9029
9030 Rd = inst.operands[0].reg;
9031 Rn = (inst.operands[1].present
9032 ? inst.operands[1].reg : Rd);
9033 Rm = inst.operands[2].reg;
9034
9035 constraint ((Rd == REG_PC), BAD_PC);
9036 constraint ((Rn == REG_PC), BAD_PC);
9037 constraint ((Rm == REG_PC), BAD_PC);
9038
9039 inst.instruction |= Rd << 16;
9040 inst.instruction |= Rn << 0;
9041 inst.instruction |= Rm << 8;
9042 }
9043
9044 static void
9045 do_it (void)
9046 {
9047 /* There is no IT instruction in ARM mode. We
9048 process it to do the validation as if in
9049 thumb mode, just in case the code gets
9050 assembled for thumb using the unified syntax. */
9051
9052 inst.size = 0;
9053 if (unified_syntax)
9054 {
9055 set_it_insn_type (IT_INSN);
9056 now_it.mask = (inst.instruction & 0xf) | 0x10;
9057 now_it.cc = inst.operands[0].imm;
9058 }
9059 }
9060
9061 /* If there is only one register in the register list,
9062 then return its register number. Otherwise return -1. */
9063 static int
9064 only_one_reg_in_list (int range)
9065 {
9066 int i = ffs (range) - 1;
9067 return (i > 15 || range != (1 << i)) ? -1 : i;
9068 }
9069
9070 static void
9071 encode_ldmstm(int from_push_pop_mnem)
9072 {
9073 int base_reg = inst.operands[0].reg;
9074 int range = inst.operands[1].imm;
9075 int one_reg;
9076
9077 inst.instruction |= base_reg << 16;
9078 inst.instruction |= range;
9079
9080 if (inst.operands[1].writeback)
9081 inst.instruction |= LDM_TYPE_2_OR_3;
9082
9083 if (inst.operands[0].writeback)
9084 {
9085 inst.instruction |= WRITE_BACK;
9086 /* Check for unpredictable uses of writeback. */
9087 if (inst.instruction & LOAD_BIT)
9088 {
9089 /* Not allowed in LDM type 2. */
9090 if ((inst.instruction & LDM_TYPE_2_OR_3)
9091 && ((range & (1 << REG_PC)) == 0))
9092 as_warn (_("writeback of base register is UNPREDICTABLE"));
9093 /* Only allowed if base reg not in list for other types. */
9094 else if (range & (1 << base_reg))
9095 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
9096 }
9097 else /* STM. */
9098 {
9099 /* Not allowed for type 2. */
9100 if (inst.instruction & LDM_TYPE_2_OR_3)
9101 as_warn (_("writeback of base register is UNPREDICTABLE"));
9102 /* Only allowed if base reg not in list, or first in list. */
9103 else if ((range & (1 << base_reg))
9104 && (range & ((1 << base_reg) - 1)))
9105 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
9106 }
9107 }
9108
9109 /* If PUSH/POP has only one register, then use the A2 encoding. */
9110 one_reg = only_one_reg_in_list (range);
9111 if (from_push_pop_mnem && one_reg >= 0)
9112 {
9113 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
9114
9115 if (is_push && one_reg == 13 /* SP */)
9116 /* PR 22483: The A2 encoding cannot be used when
9117 pushing the stack pointer as this is UNPREDICTABLE. */
9118 return;
9119
9120 inst.instruction &= A_COND_MASK;
9121 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
9122 inst.instruction |= one_reg << 12;
9123 }
9124 }
9125
9126 static void
9127 do_ldmstm (void)
9128 {
9129 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
9130 }
9131
9132 /* ARMv5TE load-consecutive (argument parse)
9133 Mode is like LDRH.
9134
9135 LDRccD R, mode
9136 STRccD R, mode. */
9137
9138 static void
9139 do_ldrd (void)
9140 {
9141 constraint (inst.operands[0].reg % 2 != 0,
9142 _("first transfer register must be even"));
9143 constraint (inst.operands[1].present
9144 && inst.operands[1].reg != inst.operands[0].reg + 1,
9145 _("can only transfer two consecutive registers"));
9146 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9147 constraint (!inst.operands[2].isreg, _("'[' expected"));
9148
9149 if (!inst.operands[1].present)
9150 inst.operands[1].reg = inst.operands[0].reg + 1;
9151
9152 /* encode_arm_addr_mode_3 will diagnose overlap between the base
9153 register and the first register written; we have to diagnose
9154 overlap between the base and the second register written here. */
9155
9156 if (inst.operands[2].reg == inst.operands[1].reg
9157 && (inst.operands[2].writeback || inst.operands[2].postind))
9158 as_warn (_("base register written back, and overlaps "
9159 "second transfer register"));
9160
9161 if (!(inst.instruction & V4_STR_BIT))
9162 {
9163 /* For an index-register load, the index register must not overlap the
9164 destination (even if not write-back). */
9165 if (inst.operands[2].immisreg
9166 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
9167 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
9168 as_warn (_("index register overlaps transfer register"));
9169 }
9170 inst.instruction |= inst.operands[0].reg << 12;
9171 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
9172 }
9173
9174 static void
9175 do_ldrex (void)
9176 {
9177 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9178 || inst.operands[1].postind || inst.operands[1].writeback
9179 || inst.operands[1].immisreg || inst.operands[1].shifted
9180 || inst.operands[1].negative
9181 /* This can arise if the programmer has written
9182 strex rN, rM, foo
9183 or if they have mistakenly used a register name as the last
9184 operand, eg:
9185 strex rN, rM, rX
9186 It is very difficult to distinguish between these two cases
9187 because "rX" might actually be a label. ie the register
9188 name has been occluded by a symbol of the same name. So we
9189 just generate a general 'bad addressing mode' type error
9190 message and leave it up to the programmer to discover the
9191 true cause and fix their mistake. */
9192 || (inst.operands[1].reg == REG_PC),
9193 BAD_ADDR_MODE);
9194
9195 constraint (inst.relocs[0].exp.X_op != O_constant
9196 || inst.relocs[0].exp.X_add_number != 0,
9197 _("offset must be zero in ARM encoding"));
9198
9199 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
9200
9201 inst.instruction |= inst.operands[0].reg << 12;
9202 inst.instruction |= inst.operands[1].reg << 16;
9203 inst.relocs[0].type = BFD_RELOC_UNUSED;
9204 }
9205
9206 static void
9207 do_ldrexd (void)
9208 {
9209 constraint (inst.operands[0].reg % 2 != 0,
9210 _("even register required"));
9211 constraint (inst.operands[1].present
9212 && inst.operands[1].reg != inst.operands[0].reg + 1,
9213 _("can only load two consecutive registers"));
9214 /* If op 1 were present and equal to PC, this function wouldn't
9215 have been called in the first place. */
9216 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9217
9218 inst.instruction |= inst.operands[0].reg << 12;
9219 inst.instruction |= inst.operands[2].reg << 16;
9220 }
9221
9222 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9223 which is not a multiple of four is UNPREDICTABLE. */
9224 static void
9225 check_ldr_r15_aligned (void)
9226 {
9227 constraint (!(inst.operands[1].immisreg)
9228 && (inst.operands[0].reg == REG_PC
9229 && inst.operands[1].reg == REG_PC
9230 && (inst.relocs[0].exp.X_add_number & 0x3)),
9231 _("ldr to register 15 must be 4-byte aligned"));
9232 }
9233
9234 static void
9235 do_ldst (void)
9236 {
9237 inst.instruction |= inst.operands[0].reg << 12;
9238 if (!inst.operands[1].isreg)
9239 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
9240 return;
9241 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
9242 check_ldr_r15_aligned ();
9243 }
9244
9245 static void
9246 do_ldstt (void)
9247 {
9248 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9249 reject [Rn,...]. */
9250 if (inst.operands[1].preind)
9251 {
9252 constraint (inst.relocs[0].exp.X_op != O_constant
9253 || inst.relocs[0].exp.X_add_number != 0,
9254 _("this instruction requires a post-indexed address"));
9255
9256 inst.operands[1].preind = 0;
9257 inst.operands[1].postind = 1;
9258 inst.operands[1].writeback = 1;
9259 }
9260 inst.instruction |= inst.operands[0].reg << 12;
9261 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
9262 }
9263
9264 /* Halfword and signed-byte load/store operations. */
9265
9266 static void
9267 do_ldstv4 (void)
9268 {
9269 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9270 inst.instruction |= inst.operands[0].reg << 12;
9271 if (!inst.operands[1].isreg)
9272 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
9273 return;
9274 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
9275 }
9276
9277 static void
9278 do_ldsttv4 (void)
9279 {
9280 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9281 reject [Rn,...]. */
9282 if (inst.operands[1].preind)
9283 {
9284 constraint (inst.relocs[0].exp.X_op != O_constant
9285 || inst.relocs[0].exp.X_add_number != 0,
9286 _("this instruction requires a post-indexed address"));
9287
9288 inst.operands[1].preind = 0;
9289 inst.operands[1].postind = 1;
9290 inst.operands[1].writeback = 1;
9291 }
9292 inst.instruction |= inst.operands[0].reg << 12;
9293 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
9294 }
9295
9296 /* Co-processor register load/store.
9297 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9298 static void
9299 do_lstc (void)
9300 {
9301 inst.instruction |= inst.operands[0].reg << 8;
9302 inst.instruction |= inst.operands[1].reg << 12;
9303 encode_arm_cp_address (2, TRUE, TRUE, 0);
9304 }
9305
9306 static void
9307 do_mlas (void)
9308 {
9309 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9310 if (inst.operands[0].reg == inst.operands[1].reg
9311 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9312 && !(inst.instruction & 0x00400000))
9313 as_tsktsk (_("Rd and Rm should be different in mla"));
9314
9315 inst.instruction |= inst.operands[0].reg << 16;
9316 inst.instruction |= inst.operands[1].reg;
9317 inst.instruction |= inst.operands[2].reg << 8;
9318 inst.instruction |= inst.operands[3].reg << 12;
9319 }
9320
9321 static void
9322 do_mov (void)
9323 {
9324 constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9325 && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9326 THUMB1_RELOC_ONLY);
9327 inst.instruction |= inst.operands[0].reg << 12;
9328 encode_arm_shifter_operand (1);
9329 }
9330
9331 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9332 static void
9333 do_mov16 (void)
9334 {
9335 bfd_vma imm;
9336 bfd_boolean top;
9337
9338 top = (inst.instruction & 0x00400000) != 0;
9339 constraint (top && inst.relocs[0].type == BFD_RELOC_ARM_MOVW,
9340 _(":lower16: not allowed in this instruction"));
9341 constraint (!top && inst.relocs[0].type == BFD_RELOC_ARM_MOVT,
9342 _(":upper16: not allowed in this instruction"));
9343 inst.instruction |= inst.operands[0].reg << 12;
9344 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
9345 {
9346 imm = inst.relocs[0].exp.X_add_number;
9347 /* The value is in two pieces: 0:11, 16:19. */
9348 inst.instruction |= (imm & 0x00000fff);
9349 inst.instruction |= (imm & 0x0000f000) << 4;
9350 }
9351 }
9352
9353 static int
9354 do_vfp_nsyn_mrs (void)
9355 {
9356 if (inst.operands[0].isvec)
9357 {
9358 if (inst.operands[1].reg != 1)
9359 first_error (_("operand 1 must be FPSCR"));
9360 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
9361 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
9362 do_vfp_nsyn_opcode ("fmstat");
9363 }
9364 else if (inst.operands[1].isvec)
9365 do_vfp_nsyn_opcode ("fmrx");
9366 else
9367 return FAIL;
9368
9369 return SUCCESS;
9370 }
9371
9372 static int
9373 do_vfp_nsyn_msr (void)
9374 {
9375 if (inst.operands[0].isvec)
9376 do_vfp_nsyn_opcode ("fmxr");
9377 else
9378 return FAIL;
9379
9380 return SUCCESS;
9381 }
9382
9383 static void
9384 do_vmrs (void)
9385 {
9386 unsigned Rt = inst.operands[0].reg;
9387
9388 if (thumb_mode && Rt == REG_SP)
9389 {
9390 inst.error = BAD_SP;
9391 return;
9392 }
9393
9394 /* MVFR2 is only valid at ARMv8-A. */
9395 if (inst.operands[1].reg == 5)
9396 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9397 _(BAD_FPU));
9398
9399 /* APSR_ sets isvec. All other refs to PC are illegal. */
9400 if (!inst.operands[0].isvec && Rt == REG_PC)
9401 {
9402 inst.error = BAD_PC;
9403 return;
9404 }
9405
9406 /* If we get through parsing the register name, we just insert the number
9407 generated into the instruction without further validation. */
9408 inst.instruction |= (inst.operands[1].reg << 16);
9409 inst.instruction |= (Rt << 12);
9410 }
9411
9412 static void
9413 do_vmsr (void)
9414 {
9415 unsigned Rt = inst.operands[1].reg;
9416
9417 if (thumb_mode)
9418 reject_bad_reg (Rt);
9419 else if (Rt == REG_PC)
9420 {
9421 inst.error = BAD_PC;
9422 return;
9423 }
9424
9425 /* MVFR2 is only valid for ARMv8-A. */
9426 if (inst.operands[0].reg == 5)
9427 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9428 _(BAD_FPU));
9429
9430 /* If we get through parsing the register name, we just insert the number
9431 generated into the instruction without further validation. */
9432 inst.instruction |= (inst.operands[0].reg << 16);
9433 inst.instruction |= (Rt << 12);
9434 }
9435
9436 static void
9437 do_mrs (void)
9438 {
9439 unsigned br;
9440
9441 if (do_vfp_nsyn_mrs () == SUCCESS)
9442 return;
9443
9444 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9445 inst.instruction |= inst.operands[0].reg << 12;
9446
9447 if (inst.operands[1].isreg)
9448 {
9449 br = inst.operands[1].reg;
9450 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf0000))
9451 as_bad (_("bad register for mrs"));
9452 }
9453 else
9454 {
9455 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9456 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9457 != (PSR_c|PSR_f),
9458 _("'APSR', 'CPSR' or 'SPSR' expected"));
9459 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9460 }
9461
9462 inst.instruction |= br;
9463 }
9464
9465 /* Two possible forms:
9466 "{C|S}PSR_<field>, Rm",
9467 "{C|S}PSR_f, #expression". */
9468
9469 static void
9470 do_msr (void)
9471 {
9472 if (do_vfp_nsyn_msr () == SUCCESS)
9473 return;
9474
9475 inst.instruction |= inst.operands[0].imm;
9476 if (inst.operands[1].isreg)
9477 inst.instruction |= inst.operands[1].reg;
9478 else
9479 {
9480 inst.instruction |= INST_IMMEDIATE;
9481 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
9482 inst.relocs[0].pc_rel = 0;
9483 }
9484 }
9485
9486 static void
9487 do_mul (void)
9488 {
9489 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9490
9491 if (!inst.operands[2].present)
9492 inst.operands[2].reg = inst.operands[0].reg;
9493 inst.instruction |= inst.operands[0].reg << 16;
9494 inst.instruction |= inst.operands[1].reg;
9495 inst.instruction |= inst.operands[2].reg << 8;
9496
9497 if (inst.operands[0].reg == inst.operands[1].reg
9498 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9499 as_tsktsk (_("Rd and Rm should be different in mul"));
9500 }
9501
9502 /* Long Multiply Parser
9503 UMULL RdLo, RdHi, Rm, Rs
9504 SMULL RdLo, RdHi, Rm, Rs
9505 UMLAL RdLo, RdHi, Rm, Rs
9506 SMLAL RdLo, RdHi, Rm, Rs. */
9507
9508 static void
9509 do_mull (void)
9510 {
9511 inst.instruction |= inst.operands[0].reg << 12;
9512 inst.instruction |= inst.operands[1].reg << 16;
9513 inst.instruction |= inst.operands[2].reg;
9514 inst.instruction |= inst.operands[3].reg << 8;
9515
9516 /* rdhi and rdlo must be different. */
9517 if (inst.operands[0].reg == inst.operands[1].reg)
9518 as_tsktsk (_("rdhi and rdlo must be different"));
9519
9520 /* rdhi, rdlo and rm must all be different before armv6. */
9521 if ((inst.operands[0].reg == inst.operands[2].reg
9522 || inst.operands[1].reg == inst.operands[2].reg)
9523 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9524 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9525 }
9526
9527 static void
9528 do_nop (void)
9529 {
9530 if (inst.operands[0].present
9531 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9532 {
9533 /* Architectural NOP hints are CPSR sets with no bits selected. */
9534 inst.instruction &= 0xf0000000;
9535 inst.instruction |= 0x0320f000;
9536 if (inst.operands[0].present)
9537 inst.instruction |= inst.operands[0].imm;
9538 }
9539 }
9540
9541 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9542 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9543 Condition defaults to COND_ALWAYS.
9544 Error if Rd, Rn or Rm are R15. */
9545
9546 static void
9547 do_pkhbt (void)
9548 {
9549 inst.instruction |= inst.operands[0].reg << 12;
9550 inst.instruction |= inst.operands[1].reg << 16;
9551 inst.instruction |= inst.operands[2].reg;
9552 if (inst.operands[3].present)
9553 encode_arm_shift (3);
9554 }
9555
9556 /* ARM V6 PKHTB (Argument Parse). */
9557
9558 static void
9559 do_pkhtb (void)
9560 {
9561 if (!inst.operands[3].present)
9562 {
9563 /* If the shift specifier is omitted, turn the instruction
9564 into pkhbt rd, rm, rn. */
9565 inst.instruction &= 0xfff00010;
9566 inst.instruction |= inst.operands[0].reg << 12;
9567 inst.instruction |= inst.operands[1].reg;
9568 inst.instruction |= inst.operands[2].reg << 16;
9569 }
9570 else
9571 {
9572 inst.instruction |= inst.operands[0].reg << 12;
9573 inst.instruction |= inst.operands[1].reg << 16;
9574 inst.instruction |= inst.operands[2].reg;
9575 encode_arm_shift (3);
9576 }
9577 }
9578
9579 /* ARMv5TE: Preload-Cache
9580 MP Extensions: Preload for write
9581
9582 PLD(W) <addr_mode>
9583
9584 Syntactically, like LDR with B=1, W=0, L=1. */
9585
9586 static void
9587 do_pld (void)
9588 {
9589 constraint (!inst.operands[0].isreg,
9590 _("'[' expected after PLD mnemonic"));
9591 constraint (inst.operands[0].postind,
9592 _("post-indexed expression used in preload instruction"));
9593 constraint (inst.operands[0].writeback,
9594 _("writeback used in preload instruction"));
9595 constraint (!inst.operands[0].preind,
9596 _("unindexed addressing used in preload instruction"));
9597 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9598 }
9599
9600 /* ARMv7: PLI <addr_mode> */
9601 static void
9602 do_pli (void)
9603 {
9604 constraint (!inst.operands[0].isreg,
9605 _("'[' expected after PLI mnemonic"));
9606 constraint (inst.operands[0].postind,
9607 _("post-indexed expression used in preload instruction"));
9608 constraint (inst.operands[0].writeback,
9609 _("writeback used in preload instruction"));
9610 constraint (!inst.operands[0].preind,
9611 _("unindexed addressing used in preload instruction"));
9612 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9613 inst.instruction &= ~PRE_INDEX;
9614 }
9615
9616 static void
9617 do_push_pop (void)
9618 {
9619 constraint (inst.operands[0].writeback,
9620 _("push/pop do not support {reglist}^"));
9621 inst.operands[1] = inst.operands[0];
9622 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9623 inst.operands[0].isreg = 1;
9624 inst.operands[0].writeback = 1;
9625 inst.operands[0].reg = REG_SP;
9626 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9627 }
9628
9629 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9630 word at the specified address and the following word
9631 respectively.
9632 Unconditionally executed.
9633 Error if Rn is R15. */
9634
9635 static void
9636 do_rfe (void)
9637 {
9638 inst.instruction |= inst.operands[0].reg << 16;
9639 if (inst.operands[0].writeback)
9640 inst.instruction |= WRITE_BACK;
9641 }
9642
9643 /* ARM V6 ssat (argument parse). */
9644
9645 static void
9646 do_ssat (void)
9647 {
9648 inst.instruction |= inst.operands[0].reg << 12;
9649 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9650 inst.instruction |= inst.operands[2].reg;
9651
9652 if (inst.operands[3].present)
9653 encode_arm_shift (3);
9654 }
9655
9656 /* ARM V6 usat (argument parse). */
9657
9658 static void
9659 do_usat (void)
9660 {
9661 inst.instruction |= inst.operands[0].reg << 12;
9662 inst.instruction |= inst.operands[1].imm << 16;
9663 inst.instruction |= inst.operands[2].reg;
9664
9665 if (inst.operands[3].present)
9666 encode_arm_shift (3);
9667 }
9668
9669 /* ARM V6 ssat16 (argument parse). */
9670
9671 static void
9672 do_ssat16 (void)
9673 {
9674 inst.instruction |= inst.operands[0].reg << 12;
9675 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9676 inst.instruction |= inst.operands[2].reg;
9677 }
9678
9679 static void
9680 do_usat16 (void)
9681 {
9682 inst.instruction |= inst.operands[0].reg << 12;
9683 inst.instruction |= inst.operands[1].imm << 16;
9684 inst.instruction |= inst.operands[2].reg;
9685 }
9686
9687 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9688 preserving the other bits.
9689
9690 setend <endian_specifier>, where <endian_specifier> is either
9691 BE or LE. */
9692
9693 static void
9694 do_setend (void)
9695 {
9696 if (warn_on_deprecated
9697 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9698 as_tsktsk (_("setend use is deprecated for ARMv8"));
9699
9700 if (inst.operands[0].imm)
9701 inst.instruction |= 0x200;
9702 }
9703
9704 static void
9705 do_shift (void)
9706 {
9707 unsigned int Rm = (inst.operands[1].present
9708 ? inst.operands[1].reg
9709 : inst.operands[0].reg);
9710
9711 inst.instruction |= inst.operands[0].reg << 12;
9712 inst.instruction |= Rm;
9713 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9714 {
9715 inst.instruction |= inst.operands[2].reg << 8;
9716 inst.instruction |= SHIFT_BY_REG;
9717 /* PR 12854: Error on extraneous shifts. */
9718 constraint (inst.operands[2].shifted,
9719 _("extraneous shift as part of operand to shift insn"));
9720 }
9721 else
9722 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
9723 }
9724
9725 static void
9726 do_smc (void)
9727 {
9728 inst.relocs[0].type = BFD_RELOC_ARM_SMC;
9729 inst.relocs[0].pc_rel = 0;
9730 }
9731
9732 static void
9733 do_hvc (void)
9734 {
9735 inst.relocs[0].type = BFD_RELOC_ARM_HVC;
9736 inst.relocs[0].pc_rel = 0;
9737 }
9738
9739 static void
9740 do_swi (void)
9741 {
9742 inst.relocs[0].type = BFD_RELOC_ARM_SWI;
9743 inst.relocs[0].pc_rel = 0;
9744 }
9745
9746 static void
9747 do_setpan (void)
9748 {
9749 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9750 _("selected processor does not support SETPAN instruction"));
9751
9752 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9753 }
9754
9755 static void
9756 do_t_setpan (void)
9757 {
9758 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9759 _("selected processor does not support SETPAN instruction"));
9760
9761 inst.instruction |= (inst.operands[0].imm << 3);
9762 }
9763
9764 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9765 SMLAxy{cond} Rd,Rm,Rs,Rn
9766 SMLAWy{cond} Rd,Rm,Rs,Rn
9767 Error if any register is R15. */
9768
9769 static void
9770 do_smla (void)
9771 {
9772 inst.instruction |= inst.operands[0].reg << 16;
9773 inst.instruction |= inst.operands[1].reg;
9774 inst.instruction |= inst.operands[2].reg << 8;
9775 inst.instruction |= inst.operands[3].reg << 12;
9776 }
9777
9778 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9779 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9780 Error if any register is R15.
9781 Warning if Rdlo == Rdhi. */
9782
9783 static void
9784 do_smlal (void)
9785 {
9786 inst.instruction |= inst.operands[0].reg << 12;
9787 inst.instruction |= inst.operands[1].reg << 16;
9788 inst.instruction |= inst.operands[2].reg;
9789 inst.instruction |= inst.operands[3].reg << 8;
9790
9791 if (inst.operands[0].reg == inst.operands[1].reg)
9792 as_tsktsk (_("rdhi and rdlo must be different"));
9793 }
9794
9795 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9796 SMULxy{cond} Rd,Rm,Rs
9797 Error if any register is R15. */
9798
9799 static void
9800 do_smul (void)
9801 {
9802 inst.instruction |= inst.operands[0].reg << 16;
9803 inst.instruction |= inst.operands[1].reg;
9804 inst.instruction |= inst.operands[2].reg << 8;
9805 }
9806
9807 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9808 the same for both ARM and Thumb-2. */
9809
9810 static void
9811 do_srs (void)
9812 {
9813 int reg;
9814
9815 if (inst.operands[0].present)
9816 {
9817 reg = inst.operands[0].reg;
9818 constraint (reg != REG_SP, _("SRS base register must be r13"));
9819 }
9820 else
9821 reg = REG_SP;
9822
9823 inst.instruction |= reg << 16;
9824 inst.instruction |= inst.operands[1].imm;
9825 if (inst.operands[0].writeback || inst.operands[1].writeback)
9826 inst.instruction |= WRITE_BACK;
9827 }
9828
9829 /* ARM V6 strex (argument parse). */
9830
9831 static void
9832 do_strex (void)
9833 {
9834 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9835 || inst.operands[2].postind || inst.operands[2].writeback
9836 || inst.operands[2].immisreg || inst.operands[2].shifted
9837 || inst.operands[2].negative
9838 /* See comment in do_ldrex(). */
9839 || (inst.operands[2].reg == REG_PC),
9840 BAD_ADDR_MODE);
9841
9842 constraint (inst.operands[0].reg == inst.operands[1].reg
9843 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9844
9845 constraint (inst.relocs[0].exp.X_op != O_constant
9846 || inst.relocs[0].exp.X_add_number != 0,
9847 _("offset must be zero in ARM encoding"));
9848
9849 inst.instruction |= inst.operands[0].reg << 12;
9850 inst.instruction |= inst.operands[1].reg;
9851 inst.instruction |= inst.operands[2].reg << 16;
9852 inst.relocs[0].type = BFD_RELOC_UNUSED;
9853 }
9854
9855 static void
9856 do_t_strexbh (void)
9857 {
9858 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9859 || inst.operands[2].postind || inst.operands[2].writeback
9860 || inst.operands[2].immisreg || inst.operands[2].shifted
9861 || inst.operands[2].negative,
9862 BAD_ADDR_MODE);
9863
9864 constraint (inst.operands[0].reg == inst.operands[1].reg
9865 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9866
9867 do_rm_rd_rn ();
9868 }
9869
9870 static void
9871 do_strexd (void)
9872 {
9873 constraint (inst.operands[1].reg % 2 != 0,
9874 _("even register required"));
9875 constraint (inst.operands[2].present
9876 && inst.operands[2].reg != inst.operands[1].reg + 1,
9877 _("can only store two consecutive registers"));
9878 /* If op 2 were present and equal to PC, this function wouldn't
9879 have been called in the first place. */
9880 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9881
9882 constraint (inst.operands[0].reg == inst.operands[1].reg
9883 || inst.operands[0].reg == inst.operands[1].reg + 1
9884 || inst.operands[0].reg == inst.operands[3].reg,
9885 BAD_OVERLAP);
9886
9887 inst.instruction |= inst.operands[0].reg << 12;
9888 inst.instruction |= inst.operands[1].reg;
9889 inst.instruction |= inst.operands[3].reg << 16;
9890 }
9891
9892 /* ARM V8 STRL. */
9893 static void
9894 do_stlex (void)
9895 {
9896 constraint (inst.operands[0].reg == inst.operands[1].reg
9897 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9898
9899 do_rd_rm_rn ();
9900 }
9901
9902 static void
9903 do_t_stlex (void)
9904 {
9905 constraint (inst.operands[0].reg == inst.operands[1].reg
9906 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9907
9908 do_rm_rd_rn ();
9909 }
9910
9911 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9912 extends it to 32-bits, and adds the result to a value in another
9913 register. You can specify a rotation by 0, 8, 16, or 24 bits
9914 before extracting the 16-bit value.
9915 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9916 Condition defaults to COND_ALWAYS.
9917 Error if any register uses R15. */
9918
9919 static void
9920 do_sxtah (void)
9921 {
9922 inst.instruction |= inst.operands[0].reg << 12;
9923 inst.instruction |= inst.operands[1].reg << 16;
9924 inst.instruction |= inst.operands[2].reg;
9925 inst.instruction |= inst.operands[3].imm << 10;
9926 }
9927
9928 /* ARM V6 SXTH.
9929
9930 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9931 Condition defaults to COND_ALWAYS.
9932 Error if any register uses R15. */
9933
9934 static void
9935 do_sxth (void)
9936 {
9937 inst.instruction |= inst.operands[0].reg << 12;
9938 inst.instruction |= inst.operands[1].reg;
9939 inst.instruction |= inst.operands[2].imm << 10;
9940 }
9941 \f
9942 /* VFP instructions. In a logical order: SP variant first, monad
9943 before dyad, arithmetic then move then load/store. */
9944
9945 static void
9946 do_vfp_sp_monadic (void)
9947 {
9948 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9949 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9950 }
9951
9952 static void
9953 do_vfp_sp_dyadic (void)
9954 {
9955 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9956 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9957 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9958 }
9959
9960 static void
9961 do_vfp_sp_compare_z (void)
9962 {
9963 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9964 }
9965
9966 static void
9967 do_vfp_dp_sp_cvt (void)
9968 {
9969 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9970 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9971 }
9972
9973 static void
9974 do_vfp_sp_dp_cvt (void)
9975 {
9976 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9977 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9978 }
9979
9980 static void
9981 do_vfp_reg_from_sp (void)
9982 {
9983 inst.instruction |= inst.operands[0].reg << 12;
9984 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9985 }
9986
9987 static void
9988 do_vfp_reg2_from_sp2 (void)
9989 {
9990 constraint (inst.operands[2].imm != 2,
9991 _("only two consecutive VFP SP registers allowed here"));
9992 inst.instruction |= inst.operands[0].reg << 12;
9993 inst.instruction |= inst.operands[1].reg << 16;
9994 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9995 }
9996
9997 static void
9998 do_vfp_sp_from_reg (void)
9999 {
10000 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
10001 inst.instruction |= inst.operands[1].reg << 12;
10002 }
10003
10004 static void
10005 do_vfp_sp2_from_reg2 (void)
10006 {
10007 constraint (inst.operands[0].imm != 2,
10008 _("only two consecutive VFP SP registers allowed here"));
10009 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
10010 inst.instruction |= inst.operands[1].reg << 12;
10011 inst.instruction |= inst.operands[2].reg << 16;
10012 }
10013
10014 static void
10015 do_vfp_sp_ldst (void)
10016 {
10017 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10018 encode_arm_cp_address (1, FALSE, TRUE, 0);
10019 }
10020
10021 static void
10022 do_vfp_dp_ldst (void)
10023 {
10024 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10025 encode_arm_cp_address (1, FALSE, TRUE, 0);
10026 }
10027
10028
10029 static void
10030 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
10031 {
10032 if (inst.operands[0].writeback)
10033 inst.instruction |= WRITE_BACK;
10034 else
10035 constraint (ldstm_type != VFP_LDSTMIA,
10036 _("this addressing mode requires base-register writeback"));
10037 inst.instruction |= inst.operands[0].reg << 16;
10038 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
10039 inst.instruction |= inst.operands[1].imm;
10040 }
10041
10042 static void
10043 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
10044 {
10045 int count;
10046
10047 if (inst.operands[0].writeback)
10048 inst.instruction |= WRITE_BACK;
10049 else
10050 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
10051 _("this addressing mode requires base-register writeback"));
10052
10053 inst.instruction |= inst.operands[0].reg << 16;
10054 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10055
10056 count = inst.operands[1].imm << 1;
10057 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
10058 count += 1;
10059
10060 inst.instruction |= count;
10061 }
10062
10063 static void
10064 do_vfp_sp_ldstmia (void)
10065 {
10066 vfp_sp_ldstm (VFP_LDSTMIA);
10067 }
10068
10069 static void
10070 do_vfp_sp_ldstmdb (void)
10071 {
10072 vfp_sp_ldstm (VFP_LDSTMDB);
10073 }
10074
10075 static void
10076 do_vfp_dp_ldstmia (void)
10077 {
10078 vfp_dp_ldstm (VFP_LDSTMIA);
10079 }
10080
10081 static void
10082 do_vfp_dp_ldstmdb (void)
10083 {
10084 vfp_dp_ldstm (VFP_LDSTMDB);
10085 }
10086
10087 static void
10088 do_vfp_xp_ldstmia (void)
10089 {
10090 vfp_dp_ldstm (VFP_LDSTMIAX);
10091 }
10092
10093 static void
10094 do_vfp_xp_ldstmdb (void)
10095 {
10096 vfp_dp_ldstm (VFP_LDSTMDBX);
10097 }
10098
10099 static void
10100 do_vfp_dp_rd_rm (void)
10101 {
10102 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10103 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
10104 }
10105
10106 static void
10107 do_vfp_dp_rn_rd (void)
10108 {
10109 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
10110 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10111 }
10112
10113 static void
10114 do_vfp_dp_rd_rn (void)
10115 {
10116 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10117 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10118 }
10119
10120 static void
10121 do_vfp_dp_rd_rn_rm (void)
10122 {
10123 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10124 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10125 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
10126 }
10127
10128 static void
10129 do_vfp_dp_rd (void)
10130 {
10131 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10132 }
10133
10134 static void
10135 do_vfp_dp_rm_rd_rn (void)
10136 {
10137 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
10138 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10139 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
10140 }
10141
10142 /* VFPv3 instructions. */
10143 static void
10144 do_vfp_sp_const (void)
10145 {
10146 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10147 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10148 inst.instruction |= (inst.operands[1].imm & 0x0f);
10149 }
10150
10151 static void
10152 do_vfp_dp_const (void)
10153 {
10154 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10155 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10156 inst.instruction |= (inst.operands[1].imm & 0x0f);
10157 }
10158
10159 static void
10160 vfp_conv (int srcsize)
10161 {
10162 int immbits = srcsize - inst.operands[1].imm;
10163
10164 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
10165 {
10166 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
10167 i.e. immbits must be in range 0 - 16. */
10168 inst.error = _("immediate value out of range, expected range [0, 16]");
10169 return;
10170 }
10171 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
10172 {
10173 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
10174 i.e. immbits must be in range 0 - 31. */
10175 inst.error = _("immediate value out of range, expected range [1, 32]");
10176 return;
10177 }
10178
10179 inst.instruction |= (immbits & 1) << 5;
10180 inst.instruction |= (immbits >> 1);
10181 }
10182
10183 static void
10184 do_vfp_sp_conv_16 (void)
10185 {
10186 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10187 vfp_conv (16);
10188 }
10189
10190 static void
10191 do_vfp_dp_conv_16 (void)
10192 {
10193 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10194 vfp_conv (16);
10195 }
10196
10197 static void
10198 do_vfp_sp_conv_32 (void)
10199 {
10200 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10201 vfp_conv (32);
10202 }
10203
10204 static void
10205 do_vfp_dp_conv_32 (void)
10206 {
10207 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10208 vfp_conv (32);
10209 }
10210 \f
10211 /* FPA instructions. Also in a logical order. */
10212
10213 static void
10214 do_fpa_cmp (void)
10215 {
10216 inst.instruction |= inst.operands[0].reg << 16;
10217 inst.instruction |= inst.operands[1].reg;
10218 }
10219
10220 static void
10221 do_fpa_ldmstm (void)
10222 {
10223 inst.instruction |= inst.operands[0].reg << 12;
10224 switch (inst.operands[1].imm)
10225 {
10226 case 1: inst.instruction |= CP_T_X; break;
10227 case 2: inst.instruction |= CP_T_Y; break;
10228 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
10229 case 4: break;
10230 default: abort ();
10231 }
10232
10233 if (inst.instruction & (PRE_INDEX | INDEX_UP))
10234 {
10235 /* The instruction specified "ea" or "fd", so we can only accept
10236 [Rn]{!}. The instruction does not really support stacking or
10237 unstacking, so we have to emulate these by setting appropriate
10238 bits and offsets. */
10239 constraint (inst.relocs[0].exp.X_op != O_constant
10240 || inst.relocs[0].exp.X_add_number != 0,
10241 _("this instruction does not support indexing"));
10242
10243 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
10244 inst.relocs[0].exp.X_add_number = 12 * inst.operands[1].imm;
10245
10246 if (!(inst.instruction & INDEX_UP))
10247 inst.relocs[0].exp.X_add_number = -inst.relocs[0].exp.X_add_number;
10248
10249 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
10250 {
10251 inst.operands[2].preind = 0;
10252 inst.operands[2].postind = 1;
10253 }
10254 }
10255
10256 encode_arm_cp_address (2, TRUE, TRUE, 0);
10257 }
10258 \f
10259 /* iWMMXt instructions: strictly in alphabetical order. */
10260
10261 static void
10262 do_iwmmxt_tandorc (void)
10263 {
10264 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
10265 }
10266
10267 static void
10268 do_iwmmxt_textrc (void)
10269 {
10270 inst.instruction |= inst.operands[0].reg << 12;
10271 inst.instruction |= inst.operands[1].imm;
10272 }
10273
10274 static void
10275 do_iwmmxt_textrm (void)
10276 {
10277 inst.instruction |= inst.operands[0].reg << 12;
10278 inst.instruction |= inst.operands[1].reg << 16;
10279 inst.instruction |= inst.operands[2].imm;
10280 }
10281
10282 static void
10283 do_iwmmxt_tinsr (void)
10284 {
10285 inst.instruction |= inst.operands[0].reg << 16;
10286 inst.instruction |= inst.operands[1].reg << 12;
10287 inst.instruction |= inst.operands[2].imm;
10288 }
10289
10290 static void
10291 do_iwmmxt_tmia (void)
10292 {
10293 inst.instruction |= inst.operands[0].reg << 5;
10294 inst.instruction |= inst.operands[1].reg;
10295 inst.instruction |= inst.operands[2].reg << 12;
10296 }
10297
10298 static void
10299 do_iwmmxt_waligni (void)
10300 {
10301 inst.instruction |= inst.operands[0].reg << 12;
10302 inst.instruction |= inst.operands[1].reg << 16;
10303 inst.instruction |= inst.operands[2].reg;
10304 inst.instruction |= inst.operands[3].imm << 20;
10305 }
10306
10307 static void
10308 do_iwmmxt_wmerge (void)
10309 {
10310 inst.instruction |= inst.operands[0].reg << 12;
10311 inst.instruction |= inst.operands[1].reg << 16;
10312 inst.instruction |= inst.operands[2].reg;
10313 inst.instruction |= inst.operands[3].imm << 21;
10314 }
10315
10316 static void
10317 do_iwmmxt_wmov (void)
10318 {
10319 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10320 inst.instruction |= inst.operands[0].reg << 12;
10321 inst.instruction |= inst.operands[1].reg << 16;
10322 inst.instruction |= inst.operands[1].reg;
10323 }
10324
10325 static void
10326 do_iwmmxt_wldstbh (void)
10327 {
10328 int reloc;
10329 inst.instruction |= inst.operands[0].reg << 12;
10330 if (thumb_mode)
10331 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
10332 else
10333 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
10334 encode_arm_cp_address (1, TRUE, FALSE, reloc);
10335 }
10336
10337 static void
10338 do_iwmmxt_wldstw (void)
10339 {
10340 /* RIWR_RIWC clears .isreg for a control register. */
10341 if (!inst.operands[0].isreg)
10342 {
10343 constraint (inst.cond != COND_ALWAYS, BAD_COND);
10344 inst.instruction |= 0xf0000000;
10345 }
10346
10347 inst.instruction |= inst.operands[0].reg << 12;
10348 encode_arm_cp_address (1, TRUE, TRUE, 0);
10349 }
10350
10351 static void
10352 do_iwmmxt_wldstd (void)
10353 {
10354 inst.instruction |= inst.operands[0].reg << 12;
10355 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
10356 && inst.operands[1].immisreg)
10357 {
10358 inst.instruction &= ~0x1a000ff;
10359 inst.instruction |= (0xfU << 28);
10360 if (inst.operands[1].preind)
10361 inst.instruction |= PRE_INDEX;
10362 if (!inst.operands[1].negative)
10363 inst.instruction |= INDEX_UP;
10364 if (inst.operands[1].writeback)
10365 inst.instruction |= WRITE_BACK;
10366 inst.instruction |= inst.operands[1].reg << 16;
10367 inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
10368 inst.instruction |= inst.operands[1].imm;
10369 }
10370 else
10371 encode_arm_cp_address (1, TRUE, FALSE, 0);
10372 }
10373
10374 static void
10375 do_iwmmxt_wshufh (void)
10376 {
10377 inst.instruction |= inst.operands[0].reg << 12;
10378 inst.instruction |= inst.operands[1].reg << 16;
10379 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
10380 inst.instruction |= (inst.operands[2].imm & 0x0f);
10381 }
10382
10383 static void
10384 do_iwmmxt_wzero (void)
10385 {
10386 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10387 inst.instruction |= inst.operands[0].reg;
10388 inst.instruction |= inst.operands[0].reg << 12;
10389 inst.instruction |= inst.operands[0].reg << 16;
10390 }
10391
10392 static void
10393 do_iwmmxt_wrwrwr_or_imm5 (void)
10394 {
10395 if (inst.operands[2].isreg)
10396 do_rd_rn_rm ();
10397 else {
10398 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
10399 _("immediate operand requires iWMMXt2"));
10400 do_rd_rn ();
10401 if (inst.operands[2].imm == 0)
10402 {
10403 switch ((inst.instruction >> 20) & 0xf)
10404 {
10405 case 4:
10406 case 5:
10407 case 6:
10408 case 7:
10409 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10410 inst.operands[2].imm = 16;
10411 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
10412 break;
10413 case 8:
10414 case 9:
10415 case 10:
10416 case 11:
10417 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10418 inst.operands[2].imm = 32;
10419 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
10420 break;
10421 case 12:
10422 case 13:
10423 case 14:
10424 case 15:
10425 {
10426 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10427 unsigned long wrn;
10428 wrn = (inst.instruction >> 16) & 0xf;
10429 inst.instruction &= 0xff0fff0f;
10430 inst.instruction |= wrn;
10431 /* Bail out here; the instruction is now assembled. */
10432 return;
10433 }
10434 }
10435 }
10436 /* Map 32 -> 0, etc. */
10437 inst.operands[2].imm &= 0x1f;
10438 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10439 }
10440 }
10441 \f
10442 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10443 operations first, then control, shift, and load/store. */
10444
10445 /* Insns like "foo X,Y,Z". */
10446
10447 static void
10448 do_mav_triple (void)
10449 {
10450 inst.instruction |= inst.operands[0].reg << 16;
10451 inst.instruction |= inst.operands[1].reg;
10452 inst.instruction |= inst.operands[2].reg << 12;
10453 }
10454
10455 /* Insns like "foo W,X,Y,Z".
10456 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10457
10458 static void
10459 do_mav_quad (void)
10460 {
10461 inst.instruction |= inst.operands[0].reg << 5;
10462 inst.instruction |= inst.operands[1].reg << 12;
10463 inst.instruction |= inst.operands[2].reg << 16;
10464 inst.instruction |= inst.operands[3].reg;
10465 }
10466
10467 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10468 static void
10469 do_mav_dspsc (void)
10470 {
10471 inst.instruction |= inst.operands[1].reg << 12;
10472 }
10473
10474 /* Maverick shift immediate instructions.
10475 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10476 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10477
10478 static void
10479 do_mav_shift (void)
10480 {
10481 int imm = inst.operands[2].imm;
10482
10483 inst.instruction |= inst.operands[0].reg << 12;
10484 inst.instruction |= inst.operands[1].reg << 16;
10485
10486 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10487 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10488 Bit 4 should be 0. */
10489 imm = (imm & 0xf) | ((imm & 0x70) << 1);
10490
10491 inst.instruction |= imm;
10492 }
10493 \f
10494 /* XScale instructions. Also sorted arithmetic before move. */
10495
10496 /* Xscale multiply-accumulate (argument parse)
10497 MIAcc acc0,Rm,Rs
10498 MIAPHcc acc0,Rm,Rs
10499 MIAxycc acc0,Rm,Rs. */
10500
10501 static void
10502 do_xsc_mia (void)
10503 {
10504 inst.instruction |= inst.operands[1].reg;
10505 inst.instruction |= inst.operands[2].reg << 12;
10506 }
10507
10508 /* Xscale move-accumulator-register (argument parse)
10509
10510 MARcc acc0,RdLo,RdHi. */
10511
10512 static void
10513 do_xsc_mar (void)
10514 {
10515 inst.instruction |= inst.operands[1].reg << 12;
10516 inst.instruction |= inst.operands[2].reg << 16;
10517 }
10518
10519 /* Xscale move-register-accumulator (argument parse)
10520
10521 MRAcc RdLo,RdHi,acc0. */
10522
10523 static void
10524 do_xsc_mra (void)
10525 {
10526 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10527 inst.instruction |= inst.operands[0].reg << 12;
10528 inst.instruction |= inst.operands[1].reg << 16;
10529 }
10530 \f
10531 /* Encoding functions relevant only to Thumb. */
10532
10533 /* inst.operands[i] is a shifted-register operand; encode
10534 it into inst.instruction in the format used by Thumb32. */
10535
10536 static void
10537 encode_thumb32_shifted_operand (int i)
10538 {
10539 unsigned int value = inst.relocs[0].exp.X_add_number;
10540 unsigned int shift = inst.operands[i].shift_kind;
10541
10542 constraint (inst.operands[i].immisreg,
10543 _("shift by register not allowed in thumb mode"));
10544 inst.instruction |= inst.operands[i].reg;
10545 if (shift == SHIFT_RRX)
10546 inst.instruction |= SHIFT_ROR << 4;
10547 else
10548 {
10549 constraint (inst.relocs[0].exp.X_op != O_constant,
10550 _("expression too complex"));
10551
10552 constraint (value > 32
10553 || (value == 32 && (shift == SHIFT_LSL
10554 || shift == SHIFT_ROR)),
10555 _("shift expression is too large"));
10556
10557 if (value == 0)
10558 shift = SHIFT_LSL;
10559 else if (value == 32)
10560 value = 0;
10561
10562 inst.instruction |= shift << 4;
10563 inst.instruction |= (value & 0x1c) << 10;
10564 inst.instruction |= (value & 0x03) << 6;
10565 }
10566 }
10567
10568
10569 /* inst.operands[i] was set up by parse_address. Encode it into a
10570 Thumb32 format load or store instruction. Reject forms that cannot
10571 be used with such instructions. If is_t is true, reject forms that
10572 cannot be used with a T instruction; if is_d is true, reject forms
10573 that cannot be used with a D instruction. If it is a store insn,
10574 reject PC in Rn. */
10575
10576 static void
10577 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10578 {
10579 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10580
10581 constraint (!inst.operands[i].isreg,
10582 _("Instruction does not support =N addresses"));
10583
10584 inst.instruction |= inst.operands[i].reg << 16;
10585 if (inst.operands[i].immisreg)
10586 {
10587 constraint (is_pc, BAD_PC_ADDRESSING);
10588 constraint (is_t || is_d, _("cannot use register index with this instruction"));
10589 constraint (inst.operands[i].negative,
10590 _("Thumb does not support negative register indexing"));
10591 constraint (inst.operands[i].postind,
10592 _("Thumb does not support register post-indexing"));
10593 constraint (inst.operands[i].writeback,
10594 _("Thumb does not support register indexing with writeback"));
10595 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10596 _("Thumb supports only LSL in shifted register indexing"));
10597
10598 inst.instruction |= inst.operands[i].imm;
10599 if (inst.operands[i].shifted)
10600 {
10601 constraint (inst.relocs[0].exp.X_op != O_constant,
10602 _("expression too complex"));
10603 constraint (inst.relocs[0].exp.X_add_number < 0
10604 || inst.relocs[0].exp.X_add_number > 3,
10605 _("shift out of range"));
10606 inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
10607 }
10608 inst.relocs[0].type = BFD_RELOC_UNUSED;
10609 }
10610 else if (inst.operands[i].preind)
10611 {
10612 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10613 constraint (is_t && inst.operands[i].writeback,
10614 _("cannot use writeback with this instruction"));
10615 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10616 BAD_PC_ADDRESSING);
10617
10618 if (is_d)
10619 {
10620 inst.instruction |= 0x01000000;
10621 if (inst.operands[i].writeback)
10622 inst.instruction |= 0x00200000;
10623 }
10624 else
10625 {
10626 inst.instruction |= 0x00000c00;
10627 if (inst.operands[i].writeback)
10628 inst.instruction |= 0x00000100;
10629 }
10630 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10631 }
10632 else if (inst.operands[i].postind)
10633 {
10634 gas_assert (inst.operands[i].writeback);
10635 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10636 constraint (is_t, _("cannot use post-indexing with this instruction"));
10637
10638 if (is_d)
10639 inst.instruction |= 0x00200000;
10640 else
10641 inst.instruction |= 0x00000900;
10642 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10643 }
10644 else /* unindexed - only for coprocessor */
10645 inst.error = _("instruction does not accept unindexed addressing");
10646 }
10647
10648 /* Table of Thumb instructions which exist in both 16- and 32-bit
10649 encodings (the latter only in post-V6T2 cores). The index is the
10650 value used in the insns table below. When there is more than one
10651 possible 16-bit encoding for the instruction, this table always
10652 holds variant (1).
10653 Also contains several pseudo-instructions used during relaxation. */
10654 #define T16_32_TAB \
10655 X(_adc, 4140, eb400000), \
10656 X(_adcs, 4140, eb500000), \
10657 X(_add, 1c00, eb000000), \
10658 X(_adds, 1c00, eb100000), \
10659 X(_addi, 0000, f1000000), \
10660 X(_addis, 0000, f1100000), \
10661 X(_add_pc,000f, f20f0000), \
10662 X(_add_sp,000d, f10d0000), \
10663 X(_adr, 000f, f20f0000), \
10664 X(_and, 4000, ea000000), \
10665 X(_ands, 4000, ea100000), \
10666 X(_asr, 1000, fa40f000), \
10667 X(_asrs, 1000, fa50f000), \
10668 X(_b, e000, f000b000), \
10669 X(_bcond, d000, f0008000), \
10670 X(_bf, 0000, f040e001), \
10671 X(_bfcsel,0000, f000e001), \
10672 X(_bfx, 0000, f060e001), \
10673 X(_bfl, 0000, f000c001), \
10674 X(_bflx, 0000, f070e001), \
10675 X(_bic, 4380, ea200000), \
10676 X(_bics, 4380, ea300000), \
10677 X(_cmn, 42c0, eb100f00), \
10678 X(_cmp, 2800, ebb00f00), \
10679 X(_cpsie, b660, f3af8400), \
10680 X(_cpsid, b670, f3af8600), \
10681 X(_cpy, 4600, ea4f0000), \
10682 X(_dec_sp,80dd, f1ad0d00), \
10683 X(_dls, 0000, f040e001), \
10684 X(_eor, 4040, ea800000), \
10685 X(_eors, 4040, ea900000), \
10686 X(_inc_sp,00dd, f10d0d00), \
10687 X(_ldmia, c800, e8900000), \
10688 X(_ldr, 6800, f8500000), \
10689 X(_ldrb, 7800, f8100000), \
10690 X(_ldrh, 8800, f8300000), \
10691 X(_ldrsb, 5600, f9100000), \
10692 X(_ldrsh, 5e00, f9300000), \
10693 X(_ldr_pc,4800, f85f0000), \
10694 X(_ldr_pc2,4800, f85f0000), \
10695 X(_ldr_sp,9800, f85d0000), \
10696 X(_le, 0000, f00fc001), \
10697 X(_lsl, 0000, fa00f000), \
10698 X(_lsls, 0000, fa10f000), \
10699 X(_lsr, 0800, fa20f000), \
10700 X(_lsrs, 0800, fa30f000), \
10701 X(_mov, 2000, ea4f0000), \
10702 X(_movs, 2000, ea5f0000), \
10703 X(_mul, 4340, fb00f000), \
10704 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10705 X(_mvn, 43c0, ea6f0000), \
10706 X(_mvns, 43c0, ea7f0000), \
10707 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10708 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10709 X(_orr, 4300, ea400000), \
10710 X(_orrs, 4300, ea500000), \
10711 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10712 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10713 X(_rev, ba00, fa90f080), \
10714 X(_rev16, ba40, fa90f090), \
10715 X(_revsh, bac0, fa90f0b0), \
10716 X(_ror, 41c0, fa60f000), \
10717 X(_rors, 41c0, fa70f000), \
10718 X(_sbc, 4180, eb600000), \
10719 X(_sbcs, 4180, eb700000), \
10720 X(_stmia, c000, e8800000), \
10721 X(_str, 6000, f8400000), \
10722 X(_strb, 7000, f8000000), \
10723 X(_strh, 8000, f8200000), \
10724 X(_str_sp,9000, f84d0000), \
10725 X(_sub, 1e00, eba00000), \
10726 X(_subs, 1e00, ebb00000), \
10727 X(_subi, 8000, f1a00000), \
10728 X(_subis, 8000, f1b00000), \
10729 X(_sxtb, b240, fa4ff080), \
10730 X(_sxth, b200, fa0ff080), \
10731 X(_tst, 4200, ea100f00), \
10732 X(_uxtb, b2c0, fa5ff080), \
10733 X(_uxth, b280, fa1ff080), \
10734 X(_nop, bf00, f3af8000), \
10735 X(_yield, bf10, f3af8001), \
10736 X(_wfe, bf20, f3af8002), \
10737 X(_wfi, bf30, f3af8003), \
10738 X(_wls, 0000, f040c001), \
10739 X(_sev, bf40, f3af8004), \
10740 X(_sevl, bf50, f3af8005), \
10741 X(_udf, de00, f7f0a000)
10742
10743 /* To catch errors in encoding functions, the codes are all offset by
10744 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10745 as 16-bit instructions. */
10746 #define X(a,b,c) T_MNEM##a
10747 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10748 #undef X
10749
10750 #define X(a,b,c) 0x##b
10751 static const unsigned short thumb_op16[] = { T16_32_TAB };
10752 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10753 #undef X
10754
10755 #define X(a,b,c) 0x##c
10756 static const unsigned int thumb_op32[] = { T16_32_TAB };
10757 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10758 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10759 #undef X
10760 #undef T16_32_TAB
10761
10762 /* Thumb instruction encoders, in alphabetical order. */
10763
10764 /* ADDW or SUBW. */
10765
10766 static void
10767 do_t_add_sub_w (void)
10768 {
10769 int Rd, Rn;
10770
10771 Rd = inst.operands[0].reg;
10772 Rn = inst.operands[1].reg;
10773
10774 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10775 is the SP-{plus,minus}-immediate form of the instruction. */
10776 if (Rn == REG_SP)
10777 constraint (Rd == REG_PC, BAD_PC);
10778 else
10779 reject_bad_reg (Rd);
10780
10781 inst.instruction |= (Rn << 16) | (Rd << 8);
10782 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
10783 }
10784
10785 /* Parse an add or subtract instruction. We get here with inst.instruction
10786 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
10787
10788 static void
10789 do_t_add_sub (void)
10790 {
10791 int Rd, Rs, Rn;
10792
10793 Rd = inst.operands[0].reg;
10794 Rs = (inst.operands[1].present
10795 ? inst.operands[1].reg /* Rd, Rs, foo */
10796 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10797
10798 if (Rd == REG_PC)
10799 set_it_insn_type_last ();
10800
10801 if (unified_syntax)
10802 {
10803 bfd_boolean flags;
10804 bfd_boolean narrow;
10805 int opcode;
10806
10807 flags = (inst.instruction == T_MNEM_adds
10808 || inst.instruction == T_MNEM_subs);
10809 if (flags)
10810 narrow = !in_it_block ();
10811 else
10812 narrow = in_it_block ();
10813 if (!inst.operands[2].isreg)
10814 {
10815 int add;
10816
10817 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10818 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10819
10820 add = (inst.instruction == T_MNEM_add
10821 || inst.instruction == T_MNEM_adds);
10822 opcode = 0;
10823 if (inst.size_req != 4)
10824 {
10825 /* Attempt to use a narrow opcode, with relaxation if
10826 appropriate. */
10827 if (Rd == REG_SP && Rs == REG_SP && !flags)
10828 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10829 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10830 opcode = T_MNEM_add_sp;
10831 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10832 opcode = T_MNEM_add_pc;
10833 else if (Rd <= 7 && Rs <= 7 && narrow)
10834 {
10835 if (flags)
10836 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10837 else
10838 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10839 }
10840 if (opcode)
10841 {
10842 inst.instruction = THUMB_OP16(opcode);
10843 inst.instruction |= (Rd << 4) | Rs;
10844 if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10845 || (inst.relocs[0].type
10846 > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC))
10847 {
10848 if (inst.size_req == 2)
10849 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
10850 else
10851 inst.relax = opcode;
10852 }
10853 }
10854 else
10855 constraint (inst.size_req == 2, BAD_HIREG);
10856 }
10857 if (inst.size_req == 4
10858 || (inst.size_req != 2 && !opcode))
10859 {
10860 constraint ((inst.relocs[0].type
10861 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
10862 && (inst.relocs[0].type
10863 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
10864 THUMB1_RELOC_ONLY);
10865 if (Rd == REG_PC)
10866 {
10867 constraint (add, BAD_PC);
10868 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10869 _("only SUBS PC, LR, #const allowed"));
10870 constraint (inst.relocs[0].exp.X_op != O_constant,
10871 _("expression too complex"));
10872 constraint (inst.relocs[0].exp.X_add_number < 0
10873 || inst.relocs[0].exp.X_add_number > 0xff,
10874 _("immediate value out of range"));
10875 inst.instruction = T2_SUBS_PC_LR
10876 | inst.relocs[0].exp.X_add_number;
10877 inst.relocs[0].type = BFD_RELOC_UNUSED;
10878 return;
10879 }
10880 else if (Rs == REG_PC)
10881 {
10882 /* Always use addw/subw. */
10883 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10884 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
10885 }
10886 else
10887 {
10888 inst.instruction = THUMB_OP32 (inst.instruction);
10889 inst.instruction = (inst.instruction & 0xe1ffffff)
10890 | 0x10000000;
10891 if (flags)
10892 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
10893 else
10894 inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_IMM;
10895 }
10896 inst.instruction |= Rd << 8;
10897 inst.instruction |= Rs << 16;
10898 }
10899 }
10900 else
10901 {
10902 unsigned int value = inst.relocs[0].exp.X_add_number;
10903 unsigned int shift = inst.operands[2].shift_kind;
10904
10905 Rn = inst.operands[2].reg;
10906 /* See if we can do this with a 16-bit instruction. */
10907 if (!inst.operands[2].shifted && inst.size_req != 4)
10908 {
10909 if (Rd > 7 || Rs > 7 || Rn > 7)
10910 narrow = FALSE;
10911
10912 if (narrow)
10913 {
10914 inst.instruction = ((inst.instruction == T_MNEM_adds
10915 || inst.instruction == T_MNEM_add)
10916 ? T_OPCODE_ADD_R3
10917 : T_OPCODE_SUB_R3);
10918 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10919 return;
10920 }
10921
10922 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10923 {
10924 /* Thumb-1 cores (except v6-M) require at least one high
10925 register in a narrow non flag setting add. */
10926 if (Rd > 7 || Rn > 7
10927 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10928 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10929 {
10930 if (Rd == Rn)
10931 {
10932 Rn = Rs;
10933 Rs = Rd;
10934 }
10935 inst.instruction = T_OPCODE_ADD_HI;
10936 inst.instruction |= (Rd & 8) << 4;
10937 inst.instruction |= (Rd & 7);
10938 inst.instruction |= Rn << 3;
10939 return;
10940 }
10941 }
10942 }
10943
10944 constraint (Rd == REG_PC, BAD_PC);
10945 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10946 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10947 constraint (Rs == REG_PC, BAD_PC);
10948 reject_bad_reg (Rn);
10949
10950 /* If we get here, it can't be done in 16 bits. */
10951 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10952 _("shift must be constant"));
10953 inst.instruction = THUMB_OP32 (inst.instruction);
10954 inst.instruction |= Rd << 8;
10955 inst.instruction |= Rs << 16;
10956 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10957 _("shift value over 3 not allowed in thumb mode"));
10958 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10959 _("only LSL shift allowed in thumb mode"));
10960 encode_thumb32_shifted_operand (2);
10961 }
10962 }
10963 else
10964 {
10965 constraint (inst.instruction == T_MNEM_adds
10966 || inst.instruction == T_MNEM_subs,
10967 BAD_THUMB32);
10968
10969 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10970 {
10971 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10972 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10973 BAD_HIREG);
10974
10975 inst.instruction = (inst.instruction == T_MNEM_add
10976 ? 0x0000 : 0x8000);
10977 inst.instruction |= (Rd << 4) | Rs;
10978 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
10979 return;
10980 }
10981
10982 Rn = inst.operands[2].reg;
10983 constraint (inst.operands[2].shifted, _("unshifted register required"));
10984
10985 /* We now have Rd, Rs, and Rn set to registers. */
10986 if (Rd > 7 || Rs > 7 || Rn > 7)
10987 {
10988 /* Can't do this for SUB. */
10989 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10990 inst.instruction = T_OPCODE_ADD_HI;
10991 inst.instruction |= (Rd & 8) << 4;
10992 inst.instruction |= (Rd & 7);
10993 if (Rs == Rd)
10994 inst.instruction |= Rn << 3;
10995 else if (Rn == Rd)
10996 inst.instruction |= Rs << 3;
10997 else
10998 constraint (1, _("dest must overlap one source register"));
10999 }
11000 else
11001 {
11002 inst.instruction = (inst.instruction == T_MNEM_add
11003 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
11004 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
11005 }
11006 }
11007 }
11008
11009 static void
11010 do_t_adr (void)
11011 {
11012 unsigned Rd;
11013
11014 Rd = inst.operands[0].reg;
11015 reject_bad_reg (Rd);
11016
11017 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
11018 {
11019 /* Defer to section relaxation. */
11020 inst.relax = inst.instruction;
11021 inst.instruction = THUMB_OP16 (inst.instruction);
11022 inst.instruction |= Rd << 4;
11023 }
11024 else if (unified_syntax && inst.size_req != 2)
11025 {
11026 /* Generate a 32-bit opcode. */
11027 inst.instruction = THUMB_OP32 (inst.instruction);
11028 inst.instruction |= Rd << 8;
11029 inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_PC12;
11030 inst.relocs[0].pc_rel = 1;
11031 }
11032 else
11033 {
11034 /* Generate a 16-bit opcode. */
11035 inst.instruction = THUMB_OP16 (inst.instruction);
11036 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11037 inst.relocs[0].exp.X_add_number -= 4; /* PC relative adjust. */
11038 inst.relocs[0].pc_rel = 1;
11039 inst.instruction |= Rd << 4;
11040 }
11041
11042 if (inst.relocs[0].exp.X_op == O_symbol
11043 && inst.relocs[0].exp.X_add_symbol != NULL
11044 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
11045 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
11046 inst.relocs[0].exp.X_add_number += 1;
11047 }
11048
11049 /* Arithmetic instructions for which there is just one 16-bit
11050 instruction encoding, and it allows only two low registers.
11051 For maximal compatibility with ARM syntax, we allow three register
11052 operands even when Thumb-32 instructions are not available, as long
11053 as the first two are identical. For instance, both "sbc r0,r1" and
11054 "sbc r0,r0,r1" are allowed. */
11055 static void
11056 do_t_arit3 (void)
11057 {
11058 int Rd, Rs, Rn;
11059
11060 Rd = inst.operands[0].reg;
11061 Rs = (inst.operands[1].present
11062 ? inst.operands[1].reg /* Rd, Rs, foo */
11063 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11064 Rn = inst.operands[2].reg;
11065
11066 reject_bad_reg (Rd);
11067 reject_bad_reg (Rs);
11068 if (inst.operands[2].isreg)
11069 reject_bad_reg (Rn);
11070
11071 if (unified_syntax)
11072 {
11073 if (!inst.operands[2].isreg)
11074 {
11075 /* For an immediate, we always generate a 32-bit opcode;
11076 section relaxation will shrink it later if possible. */
11077 inst.instruction = THUMB_OP32 (inst.instruction);
11078 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11079 inst.instruction |= Rd << 8;
11080 inst.instruction |= Rs << 16;
11081 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11082 }
11083 else
11084 {
11085 bfd_boolean narrow;
11086
11087 /* See if we can do this with a 16-bit instruction. */
11088 if (THUMB_SETS_FLAGS (inst.instruction))
11089 narrow = !in_it_block ();
11090 else
11091 narrow = in_it_block ();
11092
11093 if (Rd > 7 || Rn > 7 || Rs > 7)
11094 narrow = FALSE;
11095 if (inst.operands[2].shifted)
11096 narrow = FALSE;
11097 if (inst.size_req == 4)
11098 narrow = FALSE;
11099
11100 if (narrow
11101 && Rd == Rs)
11102 {
11103 inst.instruction = THUMB_OP16 (inst.instruction);
11104 inst.instruction |= Rd;
11105 inst.instruction |= Rn << 3;
11106 return;
11107 }
11108
11109 /* If we get here, it can't be done in 16 bits. */
11110 constraint (inst.operands[2].shifted
11111 && inst.operands[2].immisreg,
11112 _("shift must be constant"));
11113 inst.instruction = THUMB_OP32 (inst.instruction);
11114 inst.instruction |= Rd << 8;
11115 inst.instruction |= Rs << 16;
11116 encode_thumb32_shifted_operand (2);
11117 }
11118 }
11119 else
11120 {
11121 /* On its face this is a lie - the instruction does set the
11122 flags. However, the only supported mnemonic in this mode
11123 says it doesn't. */
11124 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11125
11126 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11127 _("unshifted register required"));
11128 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11129 constraint (Rd != Rs,
11130 _("dest and source1 must be the same register"));
11131
11132 inst.instruction = THUMB_OP16 (inst.instruction);
11133 inst.instruction |= Rd;
11134 inst.instruction |= Rn << 3;
11135 }
11136 }
11137
11138 /* Similarly, but for instructions where the arithmetic operation is
11139 commutative, so we can allow either of them to be different from
11140 the destination operand in a 16-bit instruction. For instance, all
11141 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
11142 accepted. */
11143 static void
11144 do_t_arit3c (void)
11145 {
11146 int Rd, Rs, Rn;
11147
11148 Rd = inst.operands[0].reg;
11149 Rs = (inst.operands[1].present
11150 ? inst.operands[1].reg /* Rd, Rs, foo */
11151 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11152 Rn = inst.operands[2].reg;
11153
11154 reject_bad_reg (Rd);
11155 reject_bad_reg (Rs);
11156 if (inst.operands[2].isreg)
11157 reject_bad_reg (Rn);
11158
11159 if (unified_syntax)
11160 {
11161 if (!inst.operands[2].isreg)
11162 {
11163 /* For an immediate, we always generate a 32-bit opcode;
11164 section relaxation will shrink it later if possible. */
11165 inst.instruction = THUMB_OP32 (inst.instruction);
11166 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11167 inst.instruction |= Rd << 8;
11168 inst.instruction |= Rs << 16;
11169 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11170 }
11171 else
11172 {
11173 bfd_boolean narrow;
11174
11175 /* See if we can do this with a 16-bit instruction. */
11176 if (THUMB_SETS_FLAGS (inst.instruction))
11177 narrow = !in_it_block ();
11178 else
11179 narrow = in_it_block ();
11180
11181 if (Rd > 7 || Rn > 7 || Rs > 7)
11182 narrow = FALSE;
11183 if (inst.operands[2].shifted)
11184 narrow = FALSE;
11185 if (inst.size_req == 4)
11186 narrow = FALSE;
11187
11188 if (narrow)
11189 {
11190 if (Rd == Rs)
11191 {
11192 inst.instruction = THUMB_OP16 (inst.instruction);
11193 inst.instruction |= Rd;
11194 inst.instruction |= Rn << 3;
11195 return;
11196 }
11197 if (Rd == Rn)
11198 {
11199 inst.instruction = THUMB_OP16 (inst.instruction);
11200 inst.instruction |= Rd;
11201 inst.instruction |= Rs << 3;
11202 return;
11203 }
11204 }
11205
11206 /* If we get here, it can't be done in 16 bits. */
11207 constraint (inst.operands[2].shifted
11208 && inst.operands[2].immisreg,
11209 _("shift must be constant"));
11210 inst.instruction = THUMB_OP32 (inst.instruction);
11211 inst.instruction |= Rd << 8;
11212 inst.instruction |= Rs << 16;
11213 encode_thumb32_shifted_operand (2);
11214 }
11215 }
11216 else
11217 {
11218 /* On its face this is a lie - the instruction does set the
11219 flags. However, the only supported mnemonic in this mode
11220 says it doesn't. */
11221 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11222
11223 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11224 _("unshifted register required"));
11225 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11226
11227 inst.instruction = THUMB_OP16 (inst.instruction);
11228 inst.instruction |= Rd;
11229
11230 if (Rd == Rs)
11231 inst.instruction |= Rn << 3;
11232 else if (Rd == Rn)
11233 inst.instruction |= Rs << 3;
11234 else
11235 constraint (1, _("dest must overlap one source register"));
11236 }
11237 }
11238
11239 static void
11240 do_t_bfc (void)
11241 {
11242 unsigned Rd;
11243 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
11244 constraint (msb > 32, _("bit-field extends past end of register"));
11245 /* The instruction encoding stores the LSB and MSB,
11246 not the LSB and width. */
11247 Rd = inst.operands[0].reg;
11248 reject_bad_reg (Rd);
11249 inst.instruction |= Rd << 8;
11250 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
11251 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
11252 inst.instruction |= msb - 1;
11253 }
11254
11255 static void
11256 do_t_bfi (void)
11257 {
11258 int Rd, Rn;
11259 unsigned int msb;
11260
11261 Rd = inst.operands[0].reg;
11262 reject_bad_reg (Rd);
11263
11264 /* #0 in second position is alternative syntax for bfc, which is
11265 the same instruction but with REG_PC in the Rm field. */
11266 if (!inst.operands[1].isreg)
11267 Rn = REG_PC;
11268 else
11269 {
11270 Rn = inst.operands[1].reg;
11271 reject_bad_reg (Rn);
11272 }
11273
11274 msb = inst.operands[2].imm + inst.operands[3].imm;
11275 constraint (msb > 32, _("bit-field extends past end of register"));
11276 /* The instruction encoding stores the LSB and MSB,
11277 not the LSB and width. */
11278 inst.instruction |= Rd << 8;
11279 inst.instruction |= Rn << 16;
11280 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11281 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11282 inst.instruction |= msb - 1;
11283 }
11284
11285 static void
11286 do_t_bfx (void)
11287 {
11288 unsigned Rd, Rn;
11289
11290 Rd = inst.operands[0].reg;
11291 Rn = inst.operands[1].reg;
11292
11293 reject_bad_reg (Rd);
11294 reject_bad_reg (Rn);
11295
11296 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
11297 _("bit-field extends past end of register"));
11298 inst.instruction |= Rd << 8;
11299 inst.instruction |= Rn << 16;
11300 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11301 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11302 inst.instruction |= inst.operands[3].imm - 1;
11303 }
11304
11305 /* ARM V5 Thumb BLX (argument parse)
11306 BLX <target_addr> which is BLX(1)
11307 BLX <Rm> which is BLX(2)
11308 Unfortunately, there are two different opcodes for this mnemonic.
11309 So, the insns[].value is not used, and the code here zaps values
11310 into inst.instruction.
11311
11312 ??? How to take advantage of the additional two bits of displacement
11313 available in Thumb32 mode? Need new relocation? */
11314
11315 static void
11316 do_t_blx (void)
11317 {
11318 set_it_insn_type_last ();
11319
11320 if (inst.operands[0].isreg)
11321 {
11322 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
11323 /* We have a register, so this is BLX(2). */
11324 inst.instruction |= inst.operands[0].reg << 3;
11325 }
11326 else
11327 {
11328 /* No register. This must be BLX(1). */
11329 inst.instruction = 0xf000e800;
11330 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
11331 }
11332 }
11333
11334 static void
11335 do_t_branch (void)
11336 {
11337 int opcode;
11338 int cond;
11339 bfd_reloc_code_real_type reloc;
11340
11341 cond = inst.cond;
11342 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
11343
11344 if (in_it_block ())
11345 {
11346 /* Conditional branches inside IT blocks are encoded as unconditional
11347 branches. */
11348 cond = COND_ALWAYS;
11349 }
11350 else
11351 cond = inst.cond;
11352
11353 if (cond != COND_ALWAYS)
11354 opcode = T_MNEM_bcond;
11355 else
11356 opcode = inst.instruction;
11357
11358 if (unified_syntax
11359 && (inst.size_req == 4
11360 || (inst.size_req != 2
11361 && (inst.operands[0].hasreloc
11362 || inst.relocs[0].exp.X_op == O_constant))))
11363 {
11364 inst.instruction = THUMB_OP32(opcode);
11365 if (cond == COND_ALWAYS)
11366 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
11367 else
11368 {
11369 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
11370 _("selected architecture does not support "
11371 "wide conditional branch instruction"));
11372
11373 gas_assert (cond != 0xF);
11374 inst.instruction |= cond << 22;
11375 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
11376 }
11377 }
11378 else
11379 {
11380 inst.instruction = THUMB_OP16(opcode);
11381 if (cond == COND_ALWAYS)
11382 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
11383 else
11384 {
11385 inst.instruction |= cond << 8;
11386 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
11387 }
11388 /* Allow section relaxation. */
11389 if (unified_syntax && inst.size_req != 2)
11390 inst.relax = opcode;
11391 }
11392 inst.relocs[0].type = reloc;
11393 inst.relocs[0].pc_rel = 1;
11394 }
11395
11396 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11397 between the two is the maximum immediate allowed - which is passed in
11398 RANGE. */
11399 static void
11400 do_t_bkpt_hlt1 (int range)
11401 {
11402 constraint (inst.cond != COND_ALWAYS,
11403 _("instruction is always unconditional"));
11404 if (inst.operands[0].present)
11405 {
11406 constraint (inst.operands[0].imm > range,
11407 _("immediate value out of range"));
11408 inst.instruction |= inst.operands[0].imm;
11409 }
11410
11411 set_it_insn_type (NEUTRAL_IT_INSN);
11412 }
11413
11414 static void
11415 do_t_hlt (void)
11416 {
11417 do_t_bkpt_hlt1 (63);
11418 }
11419
11420 static void
11421 do_t_bkpt (void)
11422 {
11423 do_t_bkpt_hlt1 (255);
11424 }
11425
11426 static void
11427 do_t_branch23 (void)
11428 {
11429 set_it_insn_type_last ();
11430 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
11431
11432 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11433 this file. We used to simply ignore the PLT reloc type here --
11434 the branch encoding is now needed to deal with TLSCALL relocs.
11435 So if we see a PLT reloc now, put it back to how it used to be to
11436 keep the preexisting behaviour. */
11437 if (inst.relocs[0].type == BFD_RELOC_ARM_PLT32)
11438 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH23;
11439
11440 #if defined(OBJ_COFF)
11441 /* If the destination of the branch is a defined symbol which does not have
11442 the THUMB_FUNC attribute, then we must be calling a function which has
11443 the (interfacearm) attribute. We look for the Thumb entry point to that
11444 function and change the branch to refer to that function instead. */
11445 if ( inst.relocs[0].exp.X_op == O_symbol
11446 && inst.relocs[0].exp.X_add_symbol != NULL
11447 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
11448 && ! THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
11449 inst.relocs[0].exp.X_add_symbol
11450 = find_real_start (inst.relocs[0].exp.X_add_symbol);
11451 #endif
11452 }
11453
11454 static void
11455 do_t_bx (void)
11456 {
11457 set_it_insn_type_last ();
11458 inst.instruction |= inst.operands[0].reg << 3;
11459 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11460 should cause the alignment to be checked once it is known. This is
11461 because BX PC only works if the instruction is word aligned. */
11462 }
11463
11464 static void
11465 do_t_bxj (void)
11466 {
11467 int Rm;
11468
11469 set_it_insn_type_last ();
11470 Rm = inst.operands[0].reg;
11471 reject_bad_reg (Rm);
11472 inst.instruction |= Rm << 16;
11473 }
11474
11475 static void
11476 do_t_clz (void)
11477 {
11478 unsigned Rd;
11479 unsigned Rm;
11480
11481 Rd = inst.operands[0].reg;
11482 Rm = inst.operands[1].reg;
11483
11484 reject_bad_reg (Rd);
11485 reject_bad_reg (Rm);
11486
11487 inst.instruction |= Rd << 8;
11488 inst.instruction |= Rm << 16;
11489 inst.instruction |= Rm;
11490 }
11491
11492 static void
11493 do_t_csdb (void)
11494 {
11495 set_it_insn_type (OUTSIDE_IT_INSN);
11496 }
11497
11498 static void
11499 do_t_cps (void)
11500 {
11501 set_it_insn_type (OUTSIDE_IT_INSN);
11502 inst.instruction |= inst.operands[0].imm;
11503 }
11504
11505 static void
11506 do_t_cpsi (void)
11507 {
11508 set_it_insn_type (OUTSIDE_IT_INSN);
11509 if (unified_syntax
11510 && (inst.operands[1].present || inst.size_req == 4)
11511 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11512 {
11513 unsigned int imod = (inst.instruction & 0x0030) >> 4;
11514 inst.instruction = 0xf3af8000;
11515 inst.instruction |= imod << 9;
11516 inst.instruction |= inst.operands[0].imm << 5;
11517 if (inst.operands[1].present)
11518 inst.instruction |= 0x100 | inst.operands[1].imm;
11519 }
11520 else
11521 {
11522 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11523 && (inst.operands[0].imm & 4),
11524 _("selected processor does not support 'A' form "
11525 "of this instruction"));
11526 constraint (inst.operands[1].present || inst.size_req == 4,
11527 _("Thumb does not support the 2-argument "
11528 "form of this instruction"));
11529 inst.instruction |= inst.operands[0].imm;
11530 }
11531 }
11532
11533 /* THUMB CPY instruction (argument parse). */
11534
11535 static void
11536 do_t_cpy (void)
11537 {
11538 if (inst.size_req == 4)
11539 {
11540 inst.instruction = THUMB_OP32 (T_MNEM_mov);
11541 inst.instruction |= inst.operands[0].reg << 8;
11542 inst.instruction |= inst.operands[1].reg;
11543 }
11544 else
11545 {
11546 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11547 inst.instruction |= (inst.operands[0].reg & 0x7);
11548 inst.instruction |= inst.operands[1].reg << 3;
11549 }
11550 }
11551
11552 static void
11553 do_t_cbz (void)
11554 {
11555 set_it_insn_type (OUTSIDE_IT_INSN);
11556 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11557 inst.instruction |= inst.operands[0].reg;
11558 inst.relocs[0].pc_rel = 1;
11559 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11560 }
11561
11562 static void
11563 do_t_dbg (void)
11564 {
11565 inst.instruction |= inst.operands[0].imm;
11566 }
11567
11568 static void
11569 do_t_div (void)
11570 {
11571 unsigned Rd, Rn, Rm;
11572
11573 Rd = inst.operands[0].reg;
11574 Rn = (inst.operands[1].present
11575 ? inst.operands[1].reg : Rd);
11576 Rm = inst.operands[2].reg;
11577
11578 reject_bad_reg (Rd);
11579 reject_bad_reg (Rn);
11580 reject_bad_reg (Rm);
11581
11582 inst.instruction |= Rd << 8;
11583 inst.instruction |= Rn << 16;
11584 inst.instruction |= Rm;
11585 }
11586
11587 static void
11588 do_t_hint (void)
11589 {
11590 if (unified_syntax && inst.size_req == 4)
11591 inst.instruction = THUMB_OP32 (inst.instruction);
11592 else
11593 inst.instruction = THUMB_OP16 (inst.instruction);
11594 }
11595
11596 static void
11597 do_t_it (void)
11598 {
11599 unsigned int cond = inst.operands[0].imm;
11600
11601 set_it_insn_type (IT_INSN);
11602 now_it.mask = (inst.instruction & 0xf) | 0x10;
11603 now_it.cc = cond;
11604 now_it.warn_deprecated = FALSE;
11605
11606 /* If the condition is a negative condition, invert the mask. */
11607 if ((cond & 0x1) == 0x0)
11608 {
11609 unsigned int mask = inst.instruction & 0x000f;
11610
11611 if ((mask & 0x7) == 0)
11612 {
11613 /* No conversion needed. */
11614 now_it.block_length = 1;
11615 }
11616 else if ((mask & 0x3) == 0)
11617 {
11618 mask ^= 0x8;
11619 now_it.block_length = 2;
11620 }
11621 else if ((mask & 0x1) == 0)
11622 {
11623 mask ^= 0xC;
11624 now_it.block_length = 3;
11625 }
11626 else
11627 {
11628 mask ^= 0xE;
11629 now_it.block_length = 4;
11630 }
11631
11632 inst.instruction &= 0xfff0;
11633 inst.instruction |= mask;
11634 }
11635
11636 inst.instruction |= cond << 4;
11637 }
11638
11639 /* Helper function used for both push/pop and ldm/stm. */
11640 static void
11641 encode_thumb2_multi (bfd_boolean do_io, int base, unsigned mask,
11642 bfd_boolean writeback)
11643 {
11644 bfd_boolean load, store;
11645
11646 gas_assert (base != -1 || !do_io);
11647 load = do_io && ((inst.instruction & (1 << 20)) != 0);
11648 store = do_io && !load;
11649
11650 if (mask & (1 << 13))
11651 inst.error = _("SP not allowed in register list");
11652
11653 if (do_io && (mask & (1 << base)) != 0
11654 && writeback)
11655 inst.error = _("having the base register in the register list when "
11656 "using write back is UNPREDICTABLE");
11657
11658 if (load)
11659 {
11660 if (mask & (1 << 15))
11661 {
11662 if (mask & (1 << 14))
11663 inst.error = _("LR and PC should not both be in register list");
11664 else
11665 set_it_insn_type_last ();
11666 }
11667 }
11668 else if (store)
11669 {
11670 if (mask & (1 << 15))
11671 inst.error = _("PC not allowed in register list");
11672 }
11673
11674 if (do_io && ((mask & (mask - 1)) == 0))
11675 {
11676 /* Single register transfers implemented as str/ldr. */
11677 if (writeback)
11678 {
11679 if (inst.instruction & (1 << 23))
11680 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11681 else
11682 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11683 }
11684 else
11685 {
11686 if (inst.instruction & (1 << 23))
11687 inst.instruction = 0x00800000; /* ia -> [base] */
11688 else
11689 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11690 }
11691
11692 inst.instruction |= 0xf8400000;
11693 if (load)
11694 inst.instruction |= 0x00100000;
11695
11696 mask = ffs (mask) - 1;
11697 mask <<= 12;
11698 }
11699 else if (writeback)
11700 inst.instruction |= WRITE_BACK;
11701
11702 inst.instruction |= mask;
11703 if (do_io)
11704 inst.instruction |= base << 16;
11705 }
11706
11707 static void
11708 do_t_ldmstm (void)
11709 {
11710 /* This really doesn't seem worth it. */
11711 constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
11712 _("expression too complex"));
11713 constraint (inst.operands[1].writeback,
11714 _("Thumb load/store multiple does not support {reglist}^"));
11715
11716 if (unified_syntax)
11717 {
11718 bfd_boolean narrow;
11719 unsigned mask;
11720
11721 narrow = FALSE;
11722 /* See if we can use a 16-bit instruction. */
11723 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11724 && inst.size_req != 4
11725 && !(inst.operands[1].imm & ~0xff))
11726 {
11727 mask = 1 << inst.operands[0].reg;
11728
11729 if (inst.operands[0].reg <= 7)
11730 {
11731 if (inst.instruction == T_MNEM_stmia
11732 ? inst.operands[0].writeback
11733 : (inst.operands[0].writeback
11734 == !(inst.operands[1].imm & mask)))
11735 {
11736 if (inst.instruction == T_MNEM_stmia
11737 && (inst.operands[1].imm & mask)
11738 && (inst.operands[1].imm & (mask - 1)))
11739 as_warn (_("value stored for r%d is UNKNOWN"),
11740 inst.operands[0].reg);
11741
11742 inst.instruction = THUMB_OP16 (inst.instruction);
11743 inst.instruction |= inst.operands[0].reg << 8;
11744 inst.instruction |= inst.operands[1].imm;
11745 narrow = TRUE;
11746 }
11747 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11748 {
11749 /* This means 1 register in reg list one of 3 situations:
11750 1. Instruction is stmia, but without writeback.
11751 2. lmdia without writeback, but with Rn not in
11752 reglist.
11753 3. ldmia with writeback, but with Rn in reglist.
11754 Case 3 is UNPREDICTABLE behaviour, so we handle
11755 case 1 and 2 which can be converted into a 16-bit
11756 str or ldr. The SP cases are handled below. */
11757 unsigned long opcode;
11758 /* First, record an error for Case 3. */
11759 if (inst.operands[1].imm & mask
11760 && inst.operands[0].writeback)
11761 inst.error =
11762 _("having the base register in the register list when "
11763 "using write back is UNPREDICTABLE");
11764
11765 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11766 : T_MNEM_ldr);
11767 inst.instruction = THUMB_OP16 (opcode);
11768 inst.instruction |= inst.operands[0].reg << 3;
11769 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11770 narrow = TRUE;
11771 }
11772 }
11773 else if (inst.operands[0] .reg == REG_SP)
11774 {
11775 if (inst.operands[0].writeback)
11776 {
11777 inst.instruction =
11778 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11779 ? T_MNEM_push : T_MNEM_pop);
11780 inst.instruction |= inst.operands[1].imm;
11781 narrow = TRUE;
11782 }
11783 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11784 {
11785 inst.instruction =
11786 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11787 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11788 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11789 narrow = TRUE;
11790 }
11791 }
11792 }
11793
11794 if (!narrow)
11795 {
11796 if (inst.instruction < 0xffff)
11797 inst.instruction = THUMB_OP32 (inst.instruction);
11798
11799 encode_thumb2_multi (TRUE /* do_io */, inst.operands[0].reg,
11800 inst.operands[1].imm,
11801 inst.operands[0].writeback);
11802 }
11803 }
11804 else
11805 {
11806 constraint (inst.operands[0].reg > 7
11807 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11808 constraint (inst.instruction != T_MNEM_ldmia
11809 && inst.instruction != T_MNEM_stmia,
11810 _("Thumb-2 instruction only valid in unified syntax"));
11811 if (inst.instruction == T_MNEM_stmia)
11812 {
11813 if (!inst.operands[0].writeback)
11814 as_warn (_("this instruction will write back the base register"));
11815 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11816 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11817 as_warn (_("value stored for r%d is UNKNOWN"),
11818 inst.operands[0].reg);
11819 }
11820 else
11821 {
11822 if (!inst.operands[0].writeback
11823 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11824 as_warn (_("this instruction will write back the base register"));
11825 else if (inst.operands[0].writeback
11826 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11827 as_warn (_("this instruction will not write back the base register"));
11828 }
11829
11830 inst.instruction = THUMB_OP16 (inst.instruction);
11831 inst.instruction |= inst.operands[0].reg << 8;
11832 inst.instruction |= inst.operands[1].imm;
11833 }
11834 }
11835
11836 static void
11837 do_t_ldrex (void)
11838 {
11839 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11840 || inst.operands[1].postind || inst.operands[1].writeback
11841 || inst.operands[1].immisreg || inst.operands[1].shifted
11842 || inst.operands[1].negative,
11843 BAD_ADDR_MODE);
11844
11845 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11846
11847 inst.instruction |= inst.operands[0].reg << 12;
11848 inst.instruction |= inst.operands[1].reg << 16;
11849 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
11850 }
11851
11852 static void
11853 do_t_ldrexd (void)
11854 {
11855 if (!inst.operands[1].present)
11856 {
11857 constraint (inst.operands[0].reg == REG_LR,
11858 _("r14 not allowed as first register "
11859 "when second register is omitted"));
11860 inst.operands[1].reg = inst.operands[0].reg + 1;
11861 }
11862 constraint (inst.operands[0].reg == inst.operands[1].reg,
11863 BAD_OVERLAP);
11864
11865 inst.instruction |= inst.operands[0].reg << 12;
11866 inst.instruction |= inst.operands[1].reg << 8;
11867 inst.instruction |= inst.operands[2].reg << 16;
11868 }
11869
11870 static void
11871 do_t_ldst (void)
11872 {
11873 unsigned long opcode;
11874 int Rn;
11875
11876 if (inst.operands[0].isreg
11877 && !inst.operands[0].preind
11878 && inst.operands[0].reg == REG_PC)
11879 set_it_insn_type_last ();
11880
11881 opcode = inst.instruction;
11882 if (unified_syntax)
11883 {
11884 if (!inst.operands[1].isreg)
11885 {
11886 if (opcode <= 0xffff)
11887 inst.instruction = THUMB_OP32 (opcode);
11888 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11889 return;
11890 }
11891 if (inst.operands[1].isreg
11892 && !inst.operands[1].writeback
11893 && !inst.operands[1].shifted && !inst.operands[1].postind
11894 && !inst.operands[1].negative && inst.operands[0].reg <= 7
11895 && opcode <= 0xffff
11896 && inst.size_req != 4)
11897 {
11898 /* Insn may have a 16-bit form. */
11899 Rn = inst.operands[1].reg;
11900 if (inst.operands[1].immisreg)
11901 {
11902 inst.instruction = THUMB_OP16 (opcode);
11903 /* [Rn, Rik] */
11904 if (Rn <= 7 && inst.operands[1].imm <= 7)
11905 goto op16;
11906 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11907 reject_bad_reg (inst.operands[1].imm);
11908 }
11909 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11910 && opcode != T_MNEM_ldrsb)
11911 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11912 || (Rn == REG_SP && opcode == T_MNEM_str))
11913 {
11914 /* [Rn, #const] */
11915 if (Rn > 7)
11916 {
11917 if (Rn == REG_PC)
11918 {
11919 if (inst.relocs[0].pc_rel)
11920 opcode = T_MNEM_ldr_pc2;
11921 else
11922 opcode = T_MNEM_ldr_pc;
11923 }
11924 else
11925 {
11926 if (opcode == T_MNEM_ldr)
11927 opcode = T_MNEM_ldr_sp;
11928 else
11929 opcode = T_MNEM_str_sp;
11930 }
11931 inst.instruction = inst.operands[0].reg << 8;
11932 }
11933 else
11934 {
11935 inst.instruction = inst.operands[0].reg;
11936 inst.instruction |= inst.operands[1].reg << 3;
11937 }
11938 inst.instruction |= THUMB_OP16 (opcode);
11939 if (inst.size_req == 2)
11940 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
11941 else
11942 inst.relax = opcode;
11943 return;
11944 }
11945 }
11946 /* Definitely a 32-bit variant. */
11947
11948 /* Warning for Erratum 752419. */
11949 if (opcode == T_MNEM_ldr
11950 && inst.operands[0].reg == REG_SP
11951 && inst.operands[1].writeback == 1
11952 && !inst.operands[1].immisreg)
11953 {
11954 if (no_cpu_selected ()
11955 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11956 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11957 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11958 as_warn (_("This instruction may be unpredictable "
11959 "if executed on M-profile cores "
11960 "with interrupts enabled."));
11961 }
11962
11963 /* Do some validations regarding addressing modes. */
11964 if (inst.operands[1].immisreg)
11965 reject_bad_reg (inst.operands[1].imm);
11966
11967 constraint (inst.operands[1].writeback == 1
11968 && inst.operands[0].reg == inst.operands[1].reg,
11969 BAD_OVERLAP);
11970
11971 inst.instruction = THUMB_OP32 (opcode);
11972 inst.instruction |= inst.operands[0].reg << 12;
11973 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11974 check_ldr_r15_aligned ();
11975 return;
11976 }
11977
11978 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11979
11980 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11981 {
11982 /* Only [Rn,Rm] is acceptable. */
11983 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11984 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11985 || inst.operands[1].postind || inst.operands[1].shifted
11986 || inst.operands[1].negative,
11987 _("Thumb does not support this addressing mode"));
11988 inst.instruction = THUMB_OP16 (inst.instruction);
11989 goto op16;
11990 }
11991
11992 inst.instruction = THUMB_OP16 (inst.instruction);
11993 if (!inst.operands[1].isreg)
11994 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11995 return;
11996
11997 constraint (!inst.operands[1].preind
11998 || inst.operands[1].shifted
11999 || inst.operands[1].writeback,
12000 _("Thumb does not support this addressing mode"));
12001 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
12002 {
12003 constraint (inst.instruction & 0x0600,
12004 _("byte or halfword not valid for base register"));
12005 constraint (inst.operands[1].reg == REG_PC
12006 && !(inst.instruction & THUMB_LOAD_BIT),
12007 _("r15 based store not allowed"));
12008 constraint (inst.operands[1].immisreg,
12009 _("invalid base register for register offset"));
12010
12011 if (inst.operands[1].reg == REG_PC)
12012 inst.instruction = T_OPCODE_LDR_PC;
12013 else if (inst.instruction & THUMB_LOAD_BIT)
12014 inst.instruction = T_OPCODE_LDR_SP;
12015 else
12016 inst.instruction = T_OPCODE_STR_SP;
12017
12018 inst.instruction |= inst.operands[0].reg << 8;
12019 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12020 return;
12021 }
12022
12023 constraint (inst.operands[1].reg > 7, BAD_HIREG);
12024 if (!inst.operands[1].immisreg)
12025 {
12026 /* Immediate offset. */
12027 inst.instruction |= inst.operands[0].reg;
12028 inst.instruction |= inst.operands[1].reg << 3;
12029 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12030 return;
12031 }
12032
12033 /* Register offset. */
12034 constraint (inst.operands[1].imm > 7, BAD_HIREG);
12035 constraint (inst.operands[1].negative,
12036 _("Thumb does not support this addressing mode"));
12037
12038 op16:
12039 switch (inst.instruction)
12040 {
12041 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
12042 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
12043 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
12044 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
12045 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
12046 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
12047 case 0x5600 /* ldrsb */:
12048 case 0x5e00 /* ldrsh */: break;
12049 default: abort ();
12050 }
12051
12052 inst.instruction |= inst.operands[0].reg;
12053 inst.instruction |= inst.operands[1].reg << 3;
12054 inst.instruction |= inst.operands[1].imm << 6;
12055 }
12056
12057 static void
12058 do_t_ldstd (void)
12059 {
12060 if (!inst.operands[1].present)
12061 {
12062 inst.operands[1].reg = inst.operands[0].reg + 1;
12063 constraint (inst.operands[0].reg == REG_LR,
12064 _("r14 not allowed here"));
12065 constraint (inst.operands[0].reg == REG_R12,
12066 _("r12 not allowed here"));
12067 }
12068
12069 if (inst.operands[2].writeback
12070 && (inst.operands[0].reg == inst.operands[2].reg
12071 || inst.operands[1].reg == inst.operands[2].reg))
12072 as_warn (_("base register written back, and overlaps "
12073 "one of transfer registers"));
12074
12075 inst.instruction |= inst.operands[0].reg << 12;
12076 inst.instruction |= inst.operands[1].reg << 8;
12077 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
12078 }
12079
12080 static void
12081 do_t_ldstt (void)
12082 {
12083 inst.instruction |= inst.operands[0].reg << 12;
12084 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
12085 }
12086
12087 static void
12088 do_t_mla (void)
12089 {
12090 unsigned Rd, Rn, Rm, Ra;
12091
12092 Rd = inst.operands[0].reg;
12093 Rn = inst.operands[1].reg;
12094 Rm = inst.operands[2].reg;
12095 Ra = inst.operands[3].reg;
12096
12097 reject_bad_reg (Rd);
12098 reject_bad_reg (Rn);
12099 reject_bad_reg (Rm);
12100 reject_bad_reg (Ra);
12101
12102 inst.instruction |= Rd << 8;
12103 inst.instruction |= Rn << 16;
12104 inst.instruction |= Rm;
12105 inst.instruction |= Ra << 12;
12106 }
12107
12108 static void
12109 do_t_mlal (void)
12110 {
12111 unsigned RdLo, RdHi, Rn, Rm;
12112
12113 RdLo = inst.operands[0].reg;
12114 RdHi = inst.operands[1].reg;
12115 Rn = inst.operands[2].reg;
12116 Rm = inst.operands[3].reg;
12117
12118 reject_bad_reg (RdLo);
12119 reject_bad_reg (RdHi);
12120 reject_bad_reg (Rn);
12121 reject_bad_reg (Rm);
12122
12123 inst.instruction |= RdLo << 12;
12124 inst.instruction |= RdHi << 8;
12125 inst.instruction |= Rn << 16;
12126 inst.instruction |= Rm;
12127 }
12128
12129 static void
12130 do_t_mov_cmp (void)
12131 {
12132 unsigned Rn, Rm;
12133
12134 Rn = inst.operands[0].reg;
12135 Rm = inst.operands[1].reg;
12136
12137 if (Rn == REG_PC)
12138 set_it_insn_type_last ();
12139
12140 if (unified_syntax)
12141 {
12142 int r0off = (inst.instruction == T_MNEM_mov
12143 || inst.instruction == T_MNEM_movs) ? 8 : 16;
12144 unsigned long opcode;
12145 bfd_boolean narrow;
12146 bfd_boolean low_regs;
12147
12148 low_regs = (Rn <= 7 && Rm <= 7);
12149 opcode = inst.instruction;
12150 if (in_it_block ())
12151 narrow = opcode != T_MNEM_movs;
12152 else
12153 narrow = opcode != T_MNEM_movs || low_regs;
12154 if (inst.size_req == 4
12155 || inst.operands[1].shifted)
12156 narrow = FALSE;
12157
12158 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
12159 if (opcode == T_MNEM_movs && inst.operands[1].isreg
12160 && !inst.operands[1].shifted
12161 && Rn == REG_PC
12162 && Rm == REG_LR)
12163 {
12164 inst.instruction = T2_SUBS_PC_LR;
12165 return;
12166 }
12167
12168 if (opcode == T_MNEM_cmp)
12169 {
12170 constraint (Rn == REG_PC, BAD_PC);
12171 if (narrow)
12172 {
12173 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
12174 but valid. */
12175 warn_deprecated_sp (Rm);
12176 /* R15 was documented as a valid choice for Rm in ARMv6,
12177 but as UNPREDICTABLE in ARMv7. ARM's proprietary
12178 tools reject R15, so we do too. */
12179 constraint (Rm == REG_PC, BAD_PC);
12180 }
12181 else
12182 reject_bad_reg (Rm);
12183 }
12184 else if (opcode == T_MNEM_mov
12185 || opcode == T_MNEM_movs)
12186 {
12187 if (inst.operands[1].isreg)
12188 {
12189 if (opcode == T_MNEM_movs)
12190 {
12191 reject_bad_reg (Rn);
12192 reject_bad_reg (Rm);
12193 }
12194 else if (narrow)
12195 {
12196 /* This is mov.n. */
12197 if ((Rn == REG_SP || Rn == REG_PC)
12198 && (Rm == REG_SP || Rm == REG_PC))
12199 {
12200 as_tsktsk (_("Use of r%u as a source register is "
12201 "deprecated when r%u is the destination "
12202 "register."), Rm, Rn);
12203 }
12204 }
12205 else
12206 {
12207 /* This is mov.w. */
12208 constraint (Rn == REG_PC, BAD_PC);
12209 constraint (Rm == REG_PC, BAD_PC);
12210 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12211 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
12212 }
12213 }
12214 else
12215 reject_bad_reg (Rn);
12216 }
12217
12218 if (!inst.operands[1].isreg)
12219 {
12220 /* Immediate operand. */
12221 if (!in_it_block () && opcode == T_MNEM_mov)
12222 narrow = 0;
12223 if (low_regs && narrow)
12224 {
12225 inst.instruction = THUMB_OP16 (opcode);
12226 inst.instruction |= Rn << 8;
12227 if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12228 || inst.relocs[0].type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
12229 {
12230 if (inst.size_req == 2)
12231 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
12232 else
12233 inst.relax = opcode;
12234 }
12235 }
12236 else
12237 {
12238 constraint ((inst.relocs[0].type
12239 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
12240 && (inst.relocs[0].type
12241 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
12242 THUMB1_RELOC_ONLY);
12243
12244 inst.instruction = THUMB_OP32 (inst.instruction);
12245 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12246 inst.instruction |= Rn << r0off;
12247 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
12248 }
12249 }
12250 else if (inst.operands[1].shifted && inst.operands[1].immisreg
12251 && (inst.instruction == T_MNEM_mov
12252 || inst.instruction == T_MNEM_movs))
12253 {
12254 /* Register shifts are encoded as separate shift instructions. */
12255 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
12256
12257 if (in_it_block ())
12258 narrow = !flags;
12259 else
12260 narrow = flags;
12261
12262 if (inst.size_req == 4)
12263 narrow = FALSE;
12264
12265 if (!low_regs || inst.operands[1].imm > 7)
12266 narrow = FALSE;
12267
12268 if (Rn != Rm)
12269 narrow = FALSE;
12270
12271 switch (inst.operands[1].shift_kind)
12272 {
12273 case SHIFT_LSL:
12274 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
12275 break;
12276 case SHIFT_ASR:
12277 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
12278 break;
12279 case SHIFT_LSR:
12280 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
12281 break;
12282 case SHIFT_ROR:
12283 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
12284 break;
12285 default:
12286 abort ();
12287 }
12288
12289 inst.instruction = opcode;
12290 if (narrow)
12291 {
12292 inst.instruction |= Rn;
12293 inst.instruction |= inst.operands[1].imm << 3;
12294 }
12295 else
12296 {
12297 if (flags)
12298 inst.instruction |= CONDS_BIT;
12299
12300 inst.instruction |= Rn << 8;
12301 inst.instruction |= Rm << 16;
12302 inst.instruction |= inst.operands[1].imm;
12303 }
12304 }
12305 else if (!narrow)
12306 {
12307 /* Some mov with immediate shift have narrow variants.
12308 Register shifts are handled above. */
12309 if (low_regs && inst.operands[1].shifted
12310 && (inst.instruction == T_MNEM_mov
12311 || inst.instruction == T_MNEM_movs))
12312 {
12313 if (in_it_block ())
12314 narrow = (inst.instruction == T_MNEM_mov);
12315 else
12316 narrow = (inst.instruction == T_MNEM_movs);
12317 }
12318
12319 if (narrow)
12320 {
12321 switch (inst.operands[1].shift_kind)
12322 {
12323 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12324 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12325 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12326 default: narrow = FALSE; break;
12327 }
12328 }
12329
12330 if (narrow)
12331 {
12332 inst.instruction |= Rn;
12333 inst.instruction |= Rm << 3;
12334 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
12335 }
12336 else
12337 {
12338 inst.instruction = THUMB_OP32 (inst.instruction);
12339 inst.instruction |= Rn << r0off;
12340 encode_thumb32_shifted_operand (1);
12341 }
12342 }
12343 else
12344 switch (inst.instruction)
12345 {
12346 case T_MNEM_mov:
12347 /* In v4t or v5t a move of two lowregs produces unpredictable
12348 results. Don't allow this. */
12349 if (low_regs)
12350 {
12351 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
12352 "MOV Rd, Rs with two low registers is not "
12353 "permitted on this architecture");
12354 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12355 arm_ext_v6);
12356 }
12357
12358 inst.instruction = T_OPCODE_MOV_HR;
12359 inst.instruction |= (Rn & 0x8) << 4;
12360 inst.instruction |= (Rn & 0x7);
12361 inst.instruction |= Rm << 3;
12362 break;
12363
12364 case T_MNEM_movs:
12365 /* We know we have low registers at this point.
12366 Generate LSLS Rd, Rs, #0. */
12367 inst.instruction = T_OPCODE_LSL_I;
12368 inst.instruction |= Rn;
12369 inst.instruction |= Rm << 3;
12370 break;
12371
12372 case T_MNEM_cmp:
12373 if (low_regs)
12374 {
12375 inst.instruction = T_OPCODE_CMP_LR;
12376 inst.instruction |= Rn;
12377 inst.instruction |= Rm << 3;
12378 }
12379 else
12380 {
12381 inst.instruction = T_OPCODE_CMP_HR;
12382 inst.instruction |= (Rn & 0x8) << 4;
12383 inst.instruction |= (Rn & 0x7);
12384 inst.instruction |= Rm << 3;
12385 }
12386 break;
12387 }
12388 return;
12389 }
12390
12391 inst.instruction = THUMB_OP16 (inst.instruction);
12392
12393 /* PR 10443: Do not silently ignore shifted operands. */
12394 constraint (inst.operands[1].shifted,
12395 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12396
12397 if (inst.operands[1].isreg)
12398 {
12399 if (Rn < 8 && Rm < 8)
12400 {
12401 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12402 since a MOV instruction produces unpredictable results. */
12403 if (inst.instruction == T_OPCODE_MOV_I8)
12404 inst.instruction = T_OPCODE_ADD_I3;
12405 else
12406 inst.instruction = T_OPCODE_CMP_LR;
12407
12408 inst.instruction |= Rn;
12409 inst.instruction |= Rm << 3;
12410 }
12411 else
12412 {
12413 if (inst.instruction == T_OPCODE_MOV_I8)
12414 inst.instruction = T_OPCODE_MOV_HR;
12415 else
12416 inst.instruction = T_OPCODE_CMP_HR;
12417 do_t_cpy ();
12418 }
12419 }
12420 else
12421 {
12422 constraint (Rn > 7,
12423 _("only lo regs allowed with immediate"));
12424 inst.instruction |= Rn << 8;
12425 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
12426 }
12427 }
12428
12429 static void
12430 do_t_mov16 (void)
12431 {
12432 unsigned Rd;
12433 bfd_vma imm;
12434 bfd_boolean top;
12435
12436 top = (inst.instruction & 0x00800000) != 0;
12437 if (inst.relocs[0].type == BFD_RELOC_ARM_MOVW)
12438 {
12439 constraint (top, _(":lower16: not allowed in this instruction"));
12440 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVW;
12441 }
12442 else if (inst.relocs[0].type == BFD_RELOC_ARM_MOVT)
12443 {
12444 constraint (!top, _(":upper16: not allowed in this instruction"));
12445 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVT;
12446 }
12447
12448 Rd = inst.operands[0].reg;
12449 reject_bad_reg (Rd);
12450
12451 inst.instruction |= Rd << 8;
12452 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
12453 {
12454 imm = inst.relocs[0].exp.X_add_number;
12455 inst.instruction |= (imm & 0xf000) << 4;
12456 inst.instruction |= (imm & 0x0800) << 15;
12457 inst.instruction |= (imm & 0x0700) << 4;
12458 inst.instruction |= (imm & 0x00ff);
12459 }
12460 }
12461
12462 static void
12463 do_t_mvn_tst (void)
12464 {
12465 unsigned Rn, Rm;
12466
12467 Rn = inst.operands[0].reg;
12468 Rm = inst.operands[1].reg;
12469
12470 if (inst.instruction == T_MNEM_cmp
12471 || inst.instruction == T_MNEM_cmn)
12472 constraint (Rn == REG_PC, BAD_PC);
12473 else
12474 reject_bad_reg (Rn);
12475 reject_bad_reg (Rm);
12476
12477 if (unified_syntax)
12478 {
12479 int r0off = (inst.instruction == T_MNEM_mvn
12480 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
12481 bfd_boolean narrow;
12482
12483 if (inst.size_req == 4
12484 || inst.instruction > 0xffff
12485 || inst.operands[1].shifted
12486 || Rn > 7 || Rm > 7)
12487 narrow = FALSE;
12488 else if (inst.instruction == T_MNEM_cmn
12489 || inst.instruction == T_MNEM_tst)
12490 narrow = TRUE;
12491 else if (THUMB_SETS_FLAGS (inst.instruction))
12492 narrow = !in_it_block ();
12493 else
12494 narrow = in_it_block ();
12495
12496 if (!inst.operands[1].isreg)
12497 {
12498 /* For an immediate, we always generate a 32-bit opcode;
12499 section relaxation will shrink it later if possible. */
12500 if (inst.instruction < 0xffff)
12501 inst.instruction = THUMB_OP32 (inst.instruction);
12502 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12503 inst.instruction |= Rn << r0off;
12504 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
12505 }
12506 else
12507 {
12508 /* See if we can do this with a 16-bit instruction. */
12509 if (narrow)
12510 {
12511 inst.instruction = THUMB_OP16 (inst.instruction);
12512 inst.instruction |= Rn;
12513 inst.instruction |= Rm << 3;
12514 }
12515 else
12516 {
12517 constraint (inst.operands[1].shifted
12518 && inst.operands[1].immisreg,
12519 _("shift must be constant"));
12520 if (inst.instruction < 0xffff)
12521 inst.instruction = THUMB_OP32 (inst.instruction);
12522 inst.instruction |= Rn << r0off;
12523 encode_thumb32_shifted_operand (1);
12524 }
12525 }
12526 }
12527 else
12528 {
12529 constraint (inst.instruction > 0xffff
12530 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12531 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12532 _("unshifted register required"));
12533 constraint (Rn > 7 || Rm > 7,
12534 BAD_HIREG);
12535
12536 inst.instruction = THUMB_OP16 (inst.instruction);
12537 inst.instruction |= Rn;
12538 inst.instruction |= Rm << 3;
12539 }
12540 }
12541
12542 static void
12543 do_t_mrs (void)
12544 {
12545 unsigned Rd;
12546
12547 if (do_vfp_nsyn_mrs () == SUCCESS)
12548 return;
12549
12550 Rd = inst.operands[0].reg;
12551 reject_bad_reg (Rd);
12552 inst.instruction |= Rd << 8;
12553
12554 if (inst.operands[1].isreg)
12555 {
12556 unsigned br = inst.operands[1].reg;
12557 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12558 as_bad (_("bad register for mrs"));
12559
12560 inst.instruction |= br & (0xf << 16);
12561 inst.instruction |= (br & 0x300) >> 4;
12562 inst.instruction |= (br & SPSR_BIT) >> 2;
12563 }
12564 else
12565 {
12566 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12567
12568 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12569 {
12570 /* PR gas/12698: The constraint is only applied for m_profile.
12571 If the user has specified -march=all, we want to ignore it as
12572 we are building for any CPU type, including non-m variants. */
12573 bfd_boolean m_profile =
12574 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12575 constraint ((flags != 0) && m_profile, _("selected processor does "
12576 "not support requested special purpose register"));
12577 }
12578 else
12579 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12580 devices). */
12581 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12582 _("'APSR', 'CPSR' or 'SPSR' expected"));
12583
12584 inst.instruction |= (flags & SPSR_BIT) >> 2;
12585 inst.instruction |= inst.operands[1].imm & 0xff;
12586 inst.instruction |= 0xf0000;
12587 }
12588 }
12589
12590 static void
12591 do_t_msr (void)
12592 {
12593 int flags;
12594 unsigned Rn;
12595
12596 if (do_vfp_nsyn_msr () == SUCCESS)
12597 return;
12598
12599 constraint (!inst.operands[1].isreg,
12600 _("Thumb encoding does not support an immediate here"));
12601
12602 if (inst.operands[0].isreg)
12603 flags = (int)(inst.operands[0].reg);
12604 else
12605 flags = inst.operands[0].imm;
12606
12607 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12608 {
12609 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12610
12611 /* PR gas/12698: The constraint is only applied for m_profile.
12612 If the user has specified -march=all, we want to ignore it as
12613 we are building for any CPU type, including non-m variants. */
12614 bfd_boolean m_profile =
12615 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12616 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12617 && (bits & ~(PSR_s | PSR_f)) != 0)
12618 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12619 && bits != PSR_f)) && m_profile,
12620 _("selected processor does not support requested special "
12621 "purpose register"));
12622 }
12623 else
12624 constraint ((flags & 0xff) != 0, _("selected processor does not support "
12625 "requested special purpose register"));
12626
12627 Rn = inst.operands[1].reg;
12628 reject_bad_reg (Rn);
12629
12630 inst.instruction |= (flags & SPSR_BIT) >> 2;
12631 inst.instruction |= (flags & 0xf0000) >> 8;
12632 inst.instruction |= (flags & 0x300) >> 4;
12633 inst.instruction |= (flags & 0xff);
12634 inst.instruction |= Rn << 16;
12635 }
12636
12637 static void
12638 do_t_mul (void)
12639 {
12640 bfd_boolean narrow;
12641 unsigned Rd, Rn, Rm;
12642
12643 if (!inst.operands[2].present)
12644 inst.operands[2].reg = inst.operands[0].reg;
12645
12646 Rd = inst.operands[0].reg;
12647 Rn = inst.operands[1].reg;
12648 Rm = inst.operands[2].reg;
12649
12650 if (unified_syntax)
12651 {
12652 if (inst.size_req == 4
12653 || (Rd != Rn
12654 && Rd != Rm)
12655 || Rn > 7
12656 || Rm > 7)
12657 narrow = FALSE;
12658 else if (inst.instruction == T_MNEM_muls)
12659 narrow = !in_it_block ();
12660 else
12661 narrow = in_it_block ();
12662 }
12663 else
12664 {
12665 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12666 constraint (Rn > 7 || Rm > 7,
12667 BAD_HIREG);
12668 narrow = TRUE;
12669 }
12670
12671 if (narrow)
12672 {
12673 /* 16-bit MULS/Conditional MUL. */
12674 inst.instruction = THUMB_OP16 (inst.instruction);
12675 inst.instruction |= Rd;
12676
12677 if (Rd == Rn)
12678 inst.instruction |= Rm << 3;
12679 else if (Rd == Rm)
12680 inst.instruction |= Rn << 3;
12681 else
12682 constraint (1, _("dest must overlap one source register"));
12683 }
12684 else
12685 {
12686 constraint (inst.instruction != T_MNEM_mul,
12687 _("Thumb-2 MUL must not set flags"));
12688 /* 32-bit MUL. */
12689 inst.instruction = THUMB_OP32 (inst.instruction);
12690 inst.instruction |= Rd << 8;
12691 inst.instruction |= Rn << 16;
12692 inst.instruction |= Rm << 0;
12693
12694 reject_bad_reg (Rd);
12695 reject_bad_reg (Rn);
12696 reject_bad_reg (Rm);
12697 }
12698 }
12699
12700 static void
12701 do_t_mull (void)
12702 {
12703 unsigned RdLo, RdHi, Rn, Rm;
12704
12705 RdLo = inst.operands[0].reg;
12706 RdHi = inst.operands[1].reg;
12707 Rn = inst.operands[2].reg;
12708 Rm = inst.operands[3].reg;
12709
12710 reject_bad_reg (RdLo);
12711 reject_bad_reg (RdHi);
12712 reject_bad_reg (Rn);
12713 reject_bad_reg (Rm);
12714
12715 inst.instruction |= RdLo << 12;
12716 inst.instruction |= RdHi << 8;
12717 inst.instruction |= Rn << 16;
12718 inst.instruction |= Rm;
12719
12720 if (RdLo == RdHi)
12721 as_tsktsk (_("rdhi and rdlo must be different"));
12722 }
12723
12724 static void
12725 do_t_nop (void)
12726 {
12727 set_it_insn_type (NEUTRAL_IT_INSN);
12728
12729 if (unified_syntax)
12730 {
12731 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12732 {
12733 inst.instruction = THUMB_OP32 (inst.instruction);
12734 inst.instruction |= inst.operands[0].imm;
12735 }
12736 else
12737 {
12738 /* PR9722: Check for Thumb2 availability before
12739 generating a thumb2 nop instruction. */
12740 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12741 {
12742 inst.instruction = THUMB_OP16 (inst.instruction);
12743 inst.instruction |= inst.operands[0].imm << 4;
12744 }
12745 else
12746 inst.instruction = 0x46c0;
12747 }
12748 }
12749 else
12750 {
12751 constraint (inst.operands[0].present,
12752 _("Thumb does not support NOP with hints"));
12753 inst.instruction = 0x46c0;
12754 }
12755 }
12756
12757 static void
12758 do_t_neg (void)
12759 {
12760 if (unified_syntax)
12761 {
12762 bfd_boolean narrow;
12763
12764 if (THUMB_SETS_FLAGS (inst.instruction))
12765 narrow = !in_it_block ();
12766 else
12767 narrow = in_it_block ();
12768 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12769 narrow = FALSE;
12770 if (inst.size_req == 4)
12771 narrow = FALSE;
12772
12773 if (!narrow)
12774 {
12775 inst.instruction = THUMB_OP32 (inst.instruction);
12776 inst.instruction |= inst.operands[0].reg << 8;
12777 inst.instruction |= inst.operands[1].reg << 16;
12778 }
12779 else
12780 {
12781 inst.instruction = THUMB_OP16 (inst.instruction);
12782 inst.instruction |= inst.operands[0].reg;
12783 inst.instruction |= inst.operands[1].reg << 3;
12784 }
12785 }
12786 else
12787 {
12788 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12789 BAD_HIREG);
12790 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12791
12792 inst.instruction = THUMB_OP16 (inst.instruction);
12793 inst.instruction |= inst.operands[0].reg;
12794 inst.instruction |= inst.operands[1].reg << 3;
12795 }
12796 }
12797
12798 static void
12799 do_t_orn (void)
12800 {
12801 unsigned Rd, Rn;
12802
12803 Rd = inst.operands[0].reg;
12804 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12805
12806 reject_bad_reg (Rd);
12807 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12808 reject_bad_reg (Rn);
12809
12810 inst.instruction |= Rd << 8;
12811 inst.instruction |= Rn << 16;
12812
12813 if (!inst.operands[2].isreg)
12814 {
12815 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12816 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
12817 }
12818 else
12819 {
12820 unsigned Rm;
12821
12822 Rm = inst.operands[2].reg;
12823 reject_bad_reg (Rm);
12824
12825 constraint (inst.operands[2].shifted
12826 && inst.operands[2].immisreg,
12827 _("shift must be constant"));
12828 encode_thumb32_shifted_operand (2);
12829 }
12830 }
12831
12832 static void
12833 do_t_pkhbt (void)
12834 {
12835 unsigned Rd, Rn, Rm;
12836
12837 Rd = inst.operands[0].reg;
12838 Rn = inst.operands[1].reg;
12839 Rm = inst.operands[2].reg;
12840
12841 reject_bad_reg (Rd);
12842 reject_bad_reg (Rn);
12843 reject_bad_reg (Rm);
12844
12845 inst.instruction |= Rd << 8;
12846 inst.instruction |= Rn << 16;
12847 inst.instruction |= Rm;
12848 if (inst.operands[3].present)
12849 {
12850 unsigned int val = inst.relocs[0].exp.X_add_number;
12851 constraint (inst.relocs[0].exp.X_op != O_constant,
12852 _("expression too complex"));
12853 inst.instruction |= (val & 0x1c) << 10;
12854 inst.instruction |= (val & 0x03) << 6;
12855 }
12856 }
12857
12858 static void
12859 do_t_pkhtb (void)
12860 {
12861 if (!inst.operands[3].present)
12862 {
12863 unsigned Rtmp;
12864
12865 inst.instruction &= ~0x00000020;
12866
12867 /* PR 10168. Swap the Rm and Rn registers. */
12868 Rtmp = inst.operands[1].reg;
12869 inst.operands[1].reg = inst.operands[2].reg;
12870 inst.operands[2].reg = Rtmp;
12871 }
12872 do_t_pkhbt ();
12873 }
12874
12875 static void
12876 do_t_pld (void)
12877 {
12878 if (inst.operands[0].immisreg)
12879 reject_bad_reg (inst.operands[0].imm);
12880
12881 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12882 }
12883
12884 static void
12885 do_t_push_pop (void)
12886 {
12887 unsigned mask;
12888
12889 constraint (inst.operands[0].writeback,
12890 _("push/pop do not support {reglist}^"));
12891 constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
12892 _("expression too complex"));
12893
12894 mask = inst.operands[0].imm;
12895 if (inst.size_req != 4 && (mask & ~0xff) == 0)
12896 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12897 else if (inst.size_req != 4
12898 && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
12899 ? REG_LR : REG_PC)))
12900 {
12901 inst.instruction = THUMB_OP16 (inst.instruction);
12902 inst.instruction |= THUMB_PP_PC_LR;
12903 inst.instruction |= mask & 0xff;
12904 }
12905 else if (unified_syntax)
12906 {
12907 inst.instruction = THUMB_OP32 (inst.instruction);
12908 encode_thumb2_multi (TRUE /* do_io */, 13, mask, TRUE);
12909 }
12910 else
12911 {
12912 inst.error = _("invalid register list to push/pop instruction");
12913 return;
12914 }
12915 }
12916
12917 static void
12918 do_t_clrm (void)
12919 {
12920 if (unified_syntax)
12921 encode_thumb2_multi (FALSE /* do_io */, -1, inst.operands[0].imm, FALSE);
12922 else
12923 {
12924 inst.error = _("invalid register list to push/pop instruction");
12925 return;
12926 }
12927 }
12928
12929 static void
12930 do_t_vscclrm (void)
12931 {
12932 if (inst.operands[0].issingle)
12933 {
12934 inst.instruction |= (inst.operands[0].reg & 0x1) << 22;
12935 inst.instruction |= (inst.operands[0].reg & 0x1e) << 11;
12936 inst.instruction |= inst.operands[0].imm;
12937 }
12938 else
12939 {
12940 inst.instruction |= (inst.operands[0].reg & 0x10) << 18;
12941 inst.instruction |= (inst.operands[0].reg & 0xf) << 12;
12942 inst.instruction |= 1 << 8;
12943 inst.instruction |= inst.operands[0].imm << 1;
12944 }
12945 }
12946
12947 static void
12948 do_t_rbit (void)
12949 {
12950 unsigned Rd, Rm;
12951
12952 Rd = inst.operands[0].reg;
12953 Rm = inst.operands[1].reg;
12954
12955 reject_bad_reg (Rd);
12956 reject_bad_reg (Rm);
12957
12958 inst.instruction |= Rd << 8;
12959 inst.instruction |= Rm << 16;
12960 inst.instruction |= Rm;
12961 }
12962
12963 static void
12964 do_t_rev (void)
12965 {
12966 unsigned Rd, Rm;
12967
12968 Rd = inst.operands[0].reg;
12969 Rm = inst.operands[1].reg;
12970
12971 reject_bad_reg (Rd);
12972 reject_bad_reg (Rm);
12973
12974 if (Rd <= 7 && Rm <= 7
12975 && inst.size_req != 4)
12976 {
12977 inst.instruction = THUMB_OP16 (inst.instruction);
12978 inst.instruction |= Rd;
12979 inst.instruction |= Rm << 3;
12980 }
12981 else if (unified_syntax)
12982 {
12983 inst.instruction = THUMB_OP32 (inst.instruction);
12984 inst.instruction |= Rd << 8;
12985 inst.instruction |= Rm << 16;
12986 inst.instruction |= Rm;
12987 }
12988 else
12989 inst.error = BAD_HIREG;
12990 }
12991
12992 static void
12993 do_t_rrx (void)
12994 {
12995 unsigned Rd, Rm;
12996
12997 Rd = inst.operands[0].reg;
12998 Rm = inst.operands[1].reg;
12999
13000 reject_bad_reg (Rd);
13001 reject_bad_reg (Rm);
13002
13003 inst.instruction |= Rd << 8;
13004 inst.instruction |= Rm;
13005 }
13006
13007 static void
13008 do_t_rsb (void)
13009 {
13010 unsigned Rd, Rs;
13011
13012 Rd = inst.operands[0].reg;
13013 Rs = (inst.operands[1].present
13014 ? inst.operands[1].reg /* Rd, Rs, foo */
13015 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
13016
13017 reject_bad_reg (Rd);
13018 reject_bad_reg (Rs);
13019 if (inst.operands[2].isreg)
13020 reject_bad_reg (inst.operands[2].reg);
13021
13022 inst.instruction |= Rd << 8;
13023 inst.instruction |= Rs << 16;
13024 if (!inst.operands[2].isreg)
13025 {
13026 bfd_boolean narrow;
13027
13028 if ((inst.instruction & 0x00100000) != 0)
13029 narrow = !in_it_block ();
13030 else
13031 narrow = in_it_block ();
13032
13033 if (Rd > 7 || Rs > 7)
13034 narrow = FALSE;
13035
13036 if (inst.size_req == 4 || !unified_syntax)
13037 narrow = FALSE;
13038
13039 if (inst.relocs[0].exp.X_op != O_constant
13040 || inst.relocs[0].exp.X_add_number != 0)
13041 narrow = FALSE;
13042
13043 /* Turn rsb #0 into 16-bit neg. We should probably do this via
13044 relaxation, but it doesn't seem worth the hassle. */
13045 if (narrow)
13046 {
13047 inst.relocs[0].type = BFD_RELOC_UNUSED;
13048 inst.instruction = THUMB_OP16 (T_MNEM_negs);
13049 inst.instruction |= Rs << 3;
13050 inst.instruction |= Rd;
13051 }
13052 else
13053 {
13054 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13055 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13056 }
13057 }
13058 else
13059 encode_thumb32_shifted_operand (2);
13060 }
13061
13062 static void
13063 do_t_setend (void)
13064 {
13065 if (warn_on_deprecated
13066 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13067 as_tsktsk (_("setend use is deprecated for ARMv8"));
13068
13069 set_it_insn_type (OUTSIDE_IT_INSN);
13070 if (inst.operands[0].imm)
13071 inst.instruction |= 0x8;
13072 }
13073
13074 static void
13075 do_t_shift (void)
13076 {
13077 if (!inst.operands[1].present)
13078 inst.operands[1].reg = inst.operands[0].reg;
13079
13080 if (unified_syntax)
13081 {
13082 bfd_boolean narrow;
13083 int shift_kind;
13084
13085 switch (inst.instruction)
13086 {
13087 case T_MNEM_asr:
13088 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
13089 case T_MNEM_lsl:
13090 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
13091 case T_MNEM_lsr:
13092 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
13093 case T_MNEM_ror:
13094 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
13095 default: abort ();
13096 }
13097
13098 if (THUMB_SETS_FLAGS (inst.instruction))
13099 narrow = !in_it_block ();
13100 else
13101 narrow = in_it_block ();
13102 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
13103 narrow = FALSE;
13104 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
13105 narrow = FALSE;
13106 if (inst.operands[2].isreg
13107 && (inst.operands[1].reg != inst.operands[0].reg
13108 || inst.operands[2].reg > 7))
13109 narrow = FALSE;
13110 if (inst.size_req == 4)
13111 narrow = FALSE;
13112
13113 reject_bad_reg (inst.operands[0].reg);
13114 reject_bad_reg (inst.operands[1].reg);
13115
13116 if (!narrow)
13117 {
13118 if (inst.operands[2].isreg)
13119 {
13120 reject_bad_reg (inst.operands[2].reg);
13121 inst.instruction = THUMB_OP32 (inst.instruction);
13122 inst.instruction |= inst.operands[0].reg << 8;
13123 inst.instruction |= inst.operands[1].reg << 16;
13124 inst.instruction |= inst.operands[2].reg;
13125
13126 /* PR 12854: Error on extraneous shifts. */
13127 constraint (inst.operands[2].shifted,
13128 _("extraneous shift as part of operand to shift insn"));
13129 }
13130 else
13131 {
13132 inst.operands[1].shifted = 1;
13133 inst.operands[1].shift_kind = shift_kind;
13134 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
13135 ? T_MNEM_movs : T_MNEM_mov);
13136 inst.instruction |= inst.operands[0].reg << 8;
13137 encode_thumb32_shifted_operand (1);
13138 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
13139 inst.relocs[0].type = BFD_RELOC_UNUSED;
13140 }
13141 }
13142 else
13143 {
13144 if (inst.operands[2].isreg)
13145 {
13146 switch (shift_kind)
13147 {
13148 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
13149 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
13150 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
13151 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
13152 default: abort ();
13153 }
13154
13155 inst.instruction |= inst.operands[0].reg;
13156 inst.instruction |= inst.operands[2].reg << 3;
13157
13158 /* PR 12854: Error on extraneous shifts. */
13159 constraint (inst.operands[2].shifted,
13160 _("extraneous shift as part of operand to shift insn"));
13161 }
13162 else
13163 {
13164 switch (shift_kind)
13165 {
13166 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
13167 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
13168 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
13169 default: abort ();
13170 }
13171 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13172 inst.instruction |= inst.operands[0].reg;
13173 inst.instruction |= inst.operands[1].reg << 3;
13174 }
13175 }
13176 }
13177 else
13178 {
13179 constraint (inst.operands[0].reg > 7
13180 || inst.operands[1].reg > 7, BAD_HIREG);
13181 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
13182
13183 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
13184 {
13185 constraint (inst.operands[2].reg > 7, BAD_HIREG);
13186 constraint (inst.operands[0].reg != inst.operands[1].reg,
13187 _("source1 and dest must be same register"));
13188
13189 switch (inst.instruction)
13190 {
13191 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
13192 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
13193 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
13194 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
13195 default: abort ();
13196 }
13197
13198 inst.instruction |= inst.operands[0].reg;
13199 inst.instruction |= inst.operands[2].reg << 3;
13200
13201 /* PR 12854: Error on extraneous shifts. */
13202 constraint (inst.operands[2].shifted,
13203 _("extraneous shift as part of operand to shift insn"));
13204 }
13205 else
13206 {
13207 switch (inst.instruction)
13208 {
13209 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
13210 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
13211 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
13212 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
13213 default: abort ();
13214 }
13215 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13216 inst.instruction |= inst.operands[0].reg;
13217 inst.instruction |= inst.operands[1].reg << 3;
13218 }
13219 }
13220 }
13221
13222 static void
13223 do_t_simd (void)
13224 {
13225 unsigned Rd, Rn, Rm;
13226
13227 Rd = inst.operands[0].reg;
13228 Rn = inst.operands[1].reg;
13229 Rm = inst.operands[2].reg;
13230
13231 reject_bad_reg (Rd);
13232 reject_bad_reg (Rn);
13233 reject_bad_reg (Rm);
13234
13235 inst.instruction |= Rd << 8;
13236 inst.instruction |= Rn << 16;
13237 inst.instruction |= Rm;
13238 }
13239
13240 static void
13241 do_t_simd2 (void)
13242 {
13243 unsigned Rd, Rn, Rm;
13244
13245 Rd = inst.operands[0].reg;
13246 Rm = inst.operands[1].reg;
13247 Rn = inst.operands[2].reg;
13248
13249 reject_bad_reg (Rd);
13250 reject_bad_reg (Rn);
13251 reject_bad_reg (Rm);
13252
13253 inst.instruction |= Rd << 8;
13254 inst.instruction |= Rn << 16;
13255 inst.instruction |= Rm;
13256 }
13257
13258 static void
13259 do_t_smc (void)
13260 {
13261 unsigned int value = inst.relocs[0].exp.X_add_number;
13262 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
13263 _("SMC is not permitted on this architecture"));
13264 constraint (inst.relocs[0].exp.X_op != O_constant,
13265 _("expression too complex"));
13266 inst.relocs[0].type = BFD_RELOC_UNUSED;
13267 inst.instruction |= (value & 0xf000) >> 12;
13268 inst.instruction |= (value & 0x0ff0);
13269 inst.instruction |= (value & 0x000f) << 16;
13270 /* PR gas/15623: SMC instructions must be last in an IT block. */
13271 set_it_insn_type_last ();
13272 }
13273
13274 static void
13275 do_t_hvc (void)
13276 {
13277 unsigned int value = inst.relocs[0].exp.X_add_number;
13278
13279 inst.relocs[0].type = BFD_RELOC_UNUSED;
13280 inst.instruction |= (value & 0x0fff);
13281 inst.instruction |= (value & 0xf000) << 4;
13282 }
13283
13284 static void
13285 do_t_ssat_usat (int bias)
13286 {
13287 unsigned Rd, Rn;
13288
13289 Rd = inst.operands[0].reg;
13290 Rn = inst.operands[2].reg;
13291
13292 reject_bad_reg (Rd);
13293 reject_bad_reg (Rn);
13294
13295 inst.instruction |= Rd << 8;
13296 inst.instruction |= inst.operands[1].imm - bias;
13297 inst.instruction |= Rn << 16;
13298
13299 if (inst.operands[3].present)
13300 {
13301 offsetT shift_amount = inst.relocs[0].exp.X_add_number;
13302
13303 inst.relocs[0].type = BFD_RELOC_UNUSED;
13304
13305 constraint (inst.relocs[0].exp.X_op != O_constant,
13306 _("expression too complex"));
13307
13308 if (shift_amount != 0)
13309 {
13310 constraint (shift_amount > 31,
13311 _("shift expression is too large"));
13312
13313 if (inst.operands[3].shift_kind == SHIFT_ASR)
13314 inst.instruction |= 0x00200000; /* sh bit. */
13315
13316 inst.instruction |= (shift_amount & 0x1c) << 10;
13317 inst.instruction |= (shift_amount & 0x03) << 6;
13318 }
13319 }
13320 }
13321
13322 static void
13323 do_t_ssat (void)
13324 {
13325 do_t_ssat_usat (1);
13326 }
13327
13328 static void
13329 do_t_ssat16 (void)
13330 {
13331 unsigned Rd, Rn;
13332
13333 Rd = inst.operands[0].reg;
13334 Rn = inst.operands[2].reg;
13335
13336 reject_bad_reg (Rd);
13337 reject_bad_reg (Rn);
13338
13339 inst.instruction |= Rd << 8;
13340 inst.instruction |= inst.operands[1].imm - 1;
13341 inst.instruction |= Rn << 16;
13342 }
13343
13344 static void
13345 do_t_strex (void)
13346 {
13347 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
13348 || inst.operands[2].postind || inst.operands[2].writeback
13349 || inst.operands[2].immisreg || inst.operands[2].shifted
13350 || inst.operands[2].negative,
13351 BAD_ADDR_MODE);
13352
13353 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
13354
13355 inst.instruction |= inst.operands[0].reg << 8;
13356 inst.instruction |= inst.operands[1].reg << 12;
13357 inst.instruction |= inst.operands[2].reg << 16;
13358 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
13359 }
13360
13361 static void
13362 do_t_strexd (void)
13363 {
13364 if (!inst.operands[2].present)
13365 inst.operands[2].reg = inst.operands[1].reg + 1;
13366
13367 constraint (inst.operands[0].reg == inst.operands[1].reg
13368 || inst.operands[0].reg == inst.operands[2].reg
13369 || inst.operands[0].reg == inst.operands[3].reg,
13370 BAD_OVERLAP);
13371
13372 inst.instruction |= inst.operands[0].reg;
13373 inst.instruction |= inst.operands[1].reg << 12;
13374 inst.instruction |= inst.operands[2].reg << 8;
13375 inst.instruction |= inst.operands[3].reg << 16;
13376 }
13377
13378 static void
13379 do_t_sxtah (void)
13380 {
13381 unsigned Rd, Rn, Rm;
13382
13383 Rd = inst.operands[0].reg;
13384 Rn = inst.operands[1].reg;
13385 Rm = inst.operands[2].reg;
13386
13387 reject_bad_reg (Rd);
13388 reject_bad_reg (Rn);
13389 reject_bad_reg (Rm);
13390
13391 inst.instruction |= Rd << 8;
13392 inst.instruction |= Rn << 16;
13393 inst.instruction |= Rm;
13394 inst.instruction |= inst.operands[3].imm << 4;
13395 }
13396
13397 static void
13398 do_t_sxth (void)
13399 {
13400 unsigned Rd, Rm;
13401
13402 Rd = inst.operands[0].reg;
13403 Rm = inst.operands[1].reg;
13404
13405 reject_bad_reg (Rd);
13406 reject_bad_reg (Rm);
13407
13408 if (inst.instruction <= 0xffff
13409 && inst.size_req != 4
13410 && Rd <= 7 && Rm <= 7
13411 && (!inst.operands[2].present || inst.operands[2].imm == 0))
13412 {
13413 inst.instruction = THUMB_OP16 (inst.instruction);
13414 inst.instruction |= Rd;
13415 inst.instruction |= Rm << 3;
13416 }
13417 else if (unified_syntax)
13418 {
13419 if (inst.instruction <= 0xffff)
13420 inst.instruction = THUMB_OP32 (inst.instruction);
13421 inst.instruction |= Rd << 8;
13422 inst.instruction |= Rm;
13423 inst.instruction |= inst.operands[2].imm << 4;
13424 }
13425 else
13426 {
13427 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
13428 _("Thumb encoding does not support rotation"));
13429 constraint (1, BAD_HIREG);
13430 }
13431 }
13432
13433 static void
13434 do_t_swi (void)
13435 {
13436 inst.relocs[0].type = BFD_RELOC_ARM_SWI;
13437 }
13438
13439 static void
13440 do_t_tb (void)
13441 {
13442 unsigned Rn, Rm;
13443 int half;
13444
13445 half = (inst.instruction & 0x10) != 0;
13446 set_it_insn_type_last ();
13447 constraint (inst.operands[0].immisreg,
13448 _("instruction requires register index"));
13449
13450 Rn = inst.operands[0].reg;
13451 Rm = inst.operands[0].imm;
13452
13453 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13454 constraint (Rn == REG_SP, BAD_SP);
13455 reject_bad_reg (Rm);
13456
13457 constraint (!half && inst.operands[0].shifted,
13458 _("instruction does not allow shifted index"));
13459 inst.instruction |= (Rn << 16) | Rm;
13460 }
13461
13462 static void
13463 do_t_udf (void)
13464 {
13465 if (!inst.operands[0].present)
13466 inst.operands[0].imm = 0;
13467
13468 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
13469 {
13470 constraint (inst.size_req == 2,
13471 _("immediate value out of range"));
13472 inst.instruction = THUMB_OP32 (inst.instruction);
13473 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
13474 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
13475 }
13476 else
13477 {
13478 inst.instruction = THUMB_OP16 (inst.instruction);
13479 inst.instruction |= inst.operands[0].imm;
13480 }
13481
13482 set_it_insn_type (NEUTRAL_IT_INSN);
13483 }
13484
13485
13486 static void
13487 do_t_usat (void)
13488 {
13489 do_t_ssat_usat (0);
13490 }
13491
13492 static void
13493 do_t_usat16 (void)
13494 {
13495 unsigned Rd, Rn;
13496
13497 Rd = inst.operands[0].reg;
13498 Rn = inst.operands[2].reg;
13499
13500 reject_bad_reg (Rd);
13501 reject_bad_reg (Rn);
13502
13503 inst.instruction |= Rd << 8;
13504 inst.instruction |= inst.operands[1].imm;
13505 inst.instruction |= Rn << 16;
13506 }
13507
13508 /* Checking the range of the branch offset (VAL) with NBITS bits
13509 and IS_SIGNED signedness. Also checks the LSB to be 0. */
13510 static int
13511 v8_1_branch_value_check (int val, int nbits, int is_signed)
13512 {
13513 gas_assert (nbits > 0 && nbits <= 32);
13514 if (is_signed)
13515 {
13516 int cmp = (1 << (nbits - 1));
13517 if ((val < -cmp) || (val >= cmp) || (val & 0x01))
13518 return FAIL;
13519 }
13520 else
13521 {
13522 if ((val <= 0) || (val >= (1 << nbits)) || (val & 0x1))
13523 return FAIL;
13524 }
13525 return SUCCESS;
13526 }
13527
13528 /* For branches in Armv8.1-M Mainline. */
13529 static void
13530 do_t_branch_future (void)
13531 {
13532 unsigned long insn = inst.instruction;
13533
13534 inst.instruction = THUMB_OP32 (inst.instruction);
13535 if (inst.operands[0].hasreloc == 0)
13536 {
13537 if (v8_1_branch_value_check (inst.operands[0].imm, 5, FALSE) == FAIL)
13538 as_bad (BAD_BRANCH_OFF);
13539
13540 inst.instruction |= ((inst.operands[0].imm & 0x1f) >> 1) << 23;
13541 }
13542 else
13543 {
13544 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH5;
13545 inst.relocs[0].pc_rel = 1;
13546 }
13547
13548 switch (insn)
13549 {
13550 case T_MNEM_bf:
13551 if (inst.operands[1].hasreloc == 0)
13552 {
13553 int val = inst.operands[1].imm;
13554 if (v8_1_branch_value_check (inst.operands[1].imm, 17, TRUE) == FAIL)
13555 as_bad (BAD_BRANCH_OFF);
13556
13557 int immA = (val & 0x0001f000) >> 12;
13558 int immB = (val & 0x00000ffc) >> 2;
13559 int immC = (val & 0x00000002) >> 1;
13560 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
13561 }
13562 else
13563 {
13564 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF17;
13565 inst.relocs[1].pc_rel = 1;
13566 }
13567 break;
13568
13569 case T_MNEM_bfl:
13570 if (inst.operands[1].hasreloc == 0)
13571 {
13572 int val = inst.operands[1].imm;
13573 if (v8_1_branch_value_check (inst.operands[1].imm, 19, TRUE) == FAIL)
13574 as_bad (BAD_BRANCH_OFF);
13575
13576 int immA = (val & 0x0007f000) >> 12;
13577 int immB = (val & 0x00000ffc) >> 2;
13578 int immC = (val & 0x00000002) >> 1;
13579 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
13580 }
13581 else
13582 {
13583 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF19;
13584 inst.relocs[1].pc_rel = 1;
13585 }
13586 break;
13587
13588 case T_MNEM_bfcsel:
13589 /* Operand 1. */
13590 if (inst.operands[1].hasreloc == 0)
13591 {
13592 int val = inst.operands[1].imm;
13593 int immA = (val & 0x00001000) >> 12;
13594 int immB = (val & 0x00000ffc) >> 2;
13595 int immC = (val & 0x00000002) >> 1;
13596 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
13597 }
13598 else
13599 {
13600 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF13;
13601 inst.relocs[1].pc_rel = 1;
13602 }
13603
13604 /* Operand 2. */
13605 if (inst.operands[2].hasreloc == 0)
13606 {
13607 constraint ((inst.operands[0].hasreloc != 0), BAD_ARGS);
13608 int val2 = inst.operands[2].imm;
13609 int val0 = inst.operands[0].imm & 0x1f;
13610 int diff = val2 - val0;
13611 if (diff == 4)
13612 inst.instruction |= 1 << 17; /* T bit. */
13613 else if (diff != 2)
13614 as_bad (_("out of range label-relative fixup value"));
13615 }
13616 else
13617 {
13618 constraint ((inst.operands[0].hasreloc == 0), BAD_ARGS);
13619 inst.relocs[2].type = BFD_RELOC_THUMB_PCREL_BFCSEL;
13620 inst.relocs[2].pc_rel = 1;
13621 }
13622
13623 /* Operand 3. */
13624 constraint (inst.cond != COND_ALWAYS, BAD_COND);
13625 inst.instruction |= (inst.operands[3].imm & 0xf) << 18;
13626 break;
13627
13628 case T_MNEM_bfx:
13629 case T_MNEM_bflx:
13630 inst.instruction |= inst.operands[1].reg << 16;
13631 break;
13632
13633 default: abort ();
13634 }
13635 }
13636
13637 /* Helper function for do_t_loloop to handle relocations. */
13638 static void
13639 v8_1_loop_reloc (int is_le)
13640 {
13641 if (inst.relocs[0].exp.X_op == O_constant)
13642 {
13643 int value = inst.relocs[0].exp.X_add_number;
13644 value = (is_le) ? -value : value;
13645
13646 if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
13647 as_bad (BAD_BRANCH_OFF);
13648
13649 int imml, immh;
13650
13651 immh = (value & 0x00000ffc) >> 2;
13652 imml = (value & 0x00000002) >> 1;
13653
13654 inst.instruction |= (imml << 11) | (immh << 1);
13655 }
13656 else
13657 {
13658 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_LOOP12;
13659 inst.relocs[0].pc_rel = 1;
13660 }
13661 }
13662
13663 /* To handle the Scalar Low Overhead Loop instructions
13664 in Armv8.1-M Mainline. */
13665 static void
13666 do_t_loloop (void)
13667 {
13668 unsigned long insn = inst.instruction;
13669
13670 set_it_insn_type (OUTSIDE_IT_INSN);
13671 inst.instruction = THUMB_OP32 (inst.instruction);
13672
13673 switch (insn)
13674 {
13675 case T_MNEM_le:
13676 /* le <label>. */
13677 if (!inst.operands[0].present)
13678 inst.instruction |= 1 << 21;
13679
13680 v8_1_loop_reloc (TRUE);
13681 break;
13682
13683 case T_MNEM_wls:
13684 v8_1_loop_reloc (FALSE);
13685 /* Fall through. */
13686 case T_MNEM_dls:
13687 constraint (inst.operands[1].isreg != 1, BAD_ARGS);
13688 inst.instruction |= (inst.operands[1].reg << 16);
13689 break;
13690
13691 default: abort();
13692 }
13693 }
13694
13695 /* Neon instruction encoder helpers. */
13696
13697 /* Encodings for the different types for various Neon opcodes. */
13698
13699 /* An "invalid" code for the following tables. */
13700 #define N_INV -1u
13701
13702 struct neon_tab_entry
13703 {
13704 unsigned integer;
13705 unsigned float_or_poly;
13706 unsigned scalar_or_imm;
13707 };
13708
13709 /* Map overloaded Neon opcodes to their respective encodings. */
13710 #define NEON_ENC_TAB \
13711 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13712 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13713 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13714 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13715 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13716 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13717 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13718 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13719 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13720 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13721 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13722 /* Register variants of the following two instructions are encoded as
13723 vcge / vcgt with the operands reversed. */ \
13724 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13725 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13726 X(vfma, N_INV, 0x0000c10, N_INV), \
13727 X(vfms, N_INV, 0x0200c10, N_INV), \
13728 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13729 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13730 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13731 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13732 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13733 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13734 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13735 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13736 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13737 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13738 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13739 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13740 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13741 X(vshl, 0x0000400, N_INV, 0x0800510), \
13742 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13743 X(vand, 0x0000110, N_INV, 0x0800030), \
13744 X(vbic, 0x0100110, N_INV, 0x0800030), \
13745 X(veor, 0x1000110, N_INV, N_INV), \
13746 X(vorn, 0x0300110, N_INV, 0x0800010), \
13747 X(vorr, 0x0200110, N_INV, 0x0800010), \
13748 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13749 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13750 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13751 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13752 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13753 X(vst1, 0x0000000, 0x0800000, N_INV), \
13754 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13755 X(vst2, 0x0000100, 0x0800100, N_INV), \
13756 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13757 X(vst3, 0x0000200, 0x0800200, N_INV), \
13758 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13759 X(vst4, 0x0000300, 0x0800300, N_INV), \
13760 X(vmovn, 0x1b20200, N_INV, N_INV), \
13761 X(vtrn, 0x1b20080, N_INV, N_INV), \
13762 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13763 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13764 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13765 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13766 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13767 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13768 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13769 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13770 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13771 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13772 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13773 X(vseleq, 0xe000a00, N_INV, N_INV), \
13774 X(vselvs, 0xe100a00, N_INV, N_INV), \
13775 X(vselge, 0xe200a00, N_INV, N_INV), \
13776 X(vselgt, 0xe300a00, N_INV, N_INV), \
13777 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13778 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13779 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13780 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13781 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13782 X(aes, 0x3b00300, N_INV, N_INV), \
13783 X(sha3op, 0x2000c00, N_INV, N_INV), \
13784 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13785 X(sha2op, 0x3ba0380, N_INV, N_INV)
13786
13787 enum neon_opc
13788 {
13789 #define X(OPC,I,F,S) N_MNEM_##OPC
13790 NEON_ENC_TAB
13791 #undef X
13792 };
13793
13794 static const struct neon_tab_entry neon_enc_tab[] =
13795 {
13796 #define X(OPC,I,F,S) { (I), (F), (S) }
13797 NEON_ENC_TAB
13798 #undef X
13799 };
13800
13801 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13802 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13803 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13804 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13805 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13806 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13807 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13808 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13809 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13810 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13811 #define NEON_ENC_SINGLE_(X) \
13812 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13813 #define NEON_ENC_DOUBLE_(X) \
13814 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13815 #define NEON_ENC_FPV8_(X) \
13816 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13817
13818 #define NEON_ENCODE(type, inst) \
13819 do \
13820 { \
13821 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13822 inst.is_neon = 1; \
13823 } \
13824 while (0)
13825
13826 #define check_neon_suffixes \
13827 do \
13828 { \
13829 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13830 { \
13831 as_bad (_("invalid neon suffix for non neon instruction")); \
13832 return; \
13833 } \
13834 } \
13835 while (0)
13836
13837 /* Define shapes for instruction operands. The following mnemonic characters
13838 are used in this table:
13839
13840 F - VFP S<n> register
13841 D - Neon D<n> register
13842 Q - Neon Q<n> register
13843 I - Immediate
13844 S - Scalar
13845 R - ARM register
13846 L - D<n> register list
13847
13848 This table is used to generate various data:
13849 - enumerations of the form NS_DDR to be used as arguments to
13850 neon_select_shape.
13851 - a table classifying shapes into single, double, quad, mixed.
13852 - a table used to drive neon_select_shape. */
13853
13854 #define NEON_SHAPE_DEF \
13855 X(3, (D, D, D), DOUBLE), \
13856 X(3, (Q, Q, Q), QUAD), \
13857 X(3, (D, D, I), DOUBLE), \
13858 X(3, (Q, Q, I), QUAD), \
13859 X(3, (D, D, S), DOUBLE), \
13860 X(3, (Q, Q, S), QUAD), \
13861 X(2, (D, D), DOUBLE), \
13862 X(2, (Q, Q), QUAD), \
13863 X(2, (D, S), DOUBLE), \
13864 X(2, (Q, S), QUAD), \
13865 X(2, (D, R), DOUBLE), \
13866 X(2, (Q, R), QUAD), \
13867 X(2, (D, I), DOUBLE), \
13868 X(2, (Q, I), QUAD), \
13869 X(3, (D, L, D), DOUBLE), \
13870 X(2, (D, Q), MIXED), \
13871 X(2, (Q, D), MIXED), \
13872 X(3, (D, Q, I), MIXED), \
13873 X(3, (Q, D, I), MIXED), \
13874 X(3, (Q, D, D), MIXED), \
13875 X(3, (D, Q, Q), MIXED), \
13876 X(3, (Q, Q, D), MIXED), \
13877 X(3, (Q, D, S), MIXED), \
13878 X(3, (D, Q, S), MIXED), \
13879 X(4, (D, D, D, I), DOUBLE), \
13880 X(4, (Q, Q, Q, I), QUAD), \
13881 X(4, (D, D, S, I), DOUBLE), \
13882 X(4, (Q, Q, S, I), QUAD), \
13883 X(2, (F, F), SINGLE), \
13884 X(3, (F, F, F), SINGLE), \
13885 X(2, (F, I), SINGLE), \
13886 X(2, (F, D), MIXED), \
13887 X(2, (D, F), MIXED), \
13888 X(3, (F, F, I), MIXED), \
13889 X(4, (R, R, F, F), SINGLE), \
13890 X(4, (F, F, R, R), SINGLE), \
13891 X(3, (D, R, R), DOUBLE), \
13892 X(3, (R, R, D), DOUBLE), \
13893 X(2, (S, R), SINGLE), \
13894 X(2, (R, S), SINGLE), \
13895 X(2, (F, R), SINGLE), \
13896 X(2, (R, F), SINGLE), \
13897 /* Half float shape supported so far. */\
13898 X (2, (H, D), MIXED), \
13899 X (2, (D, H), MIXED), \
13900 X (2, (H, F), MIXED), \
13901 X (2, (F, H), MIXED), \
13902 X (2, (H, H), HALF), \
13903 X (2, (H, R), HALF), \
13904 X (2, (R, H), HALF), \
13905 X (2, (H, I), HALF), \
13906 X (3, (H, H, H), HALF), \
13907 X (3, (H, F, I), MIXED), \
13908 X (3, (F, H, I), MIXED), \
13909 X (3, (D, H, H), MIXED), \
13910 X (3, (D, H, S), MIXED)
13911
13912 #define S2(A,B) NS_##A##B
13913 #define S3(A,B,C) NS_##A##B##C
13914 #define S4(A,B,C,D) NS_##A##B##C##D
13915
13916 #define X(N, L, C) S##N L
13917
13918 enum neon_shape
13919 {
13920 NEON_SHAPE_DEF,
13921 NS_NULL
13922 };
13923
13924 #undef X
13925 #undef S2
13926 #undef S3
13927 #undef S4
13928
13929 enum neon_shape_class
13930 {
13931 SC_HALF,
13932 SC_SINGLE,
13933 SC_DOUBLE,
13934 SC_QUAD,
13935 SC_MIXED
13936 };
13937
13938 #define X(N, L, C) SC_##C
13939
13940 static enum neon_shape_class neon_shape_class[] =
13941 {
13942 NEON_SHAPE_DEF
13943 };
13944
13945 #undef X
13946
13947 enum neon_shape_el
13948 {
13949 SE_H,
13950 SE_F,
13951 SE_D,
13952 SE_Q,
13953 SE_I,
13954 SE_S,
13955 SE_R,
13956 SE_L
13957 };
13958
13959 /* Register widths of above. */
13960 static unsigned neon_shape_el_size[] =
13961 {
13962 16,
13963 32,
13964 64,
13965 128,
13966 0,
13967 32,
13968 32,
13969 0
13970 };
13971
13972 struct neon_shape_info
13973 {
13974 unsigned els;
13975 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13976 };
13977
13978 #define S2(A,B) { SE_##A, SE_##B }
13979 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13980 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13981
13982 #define X(N, L, C) { N, S##N L }
13983
13984 static struct neon_shape_info neon_shape_tab[] =
13985 {
13986 NEON_SHAPE_DEF
13987 };
13988
13989 #undef X
13990 #undef S2
13991 #undef S3
13992 #undef S4
13993
13994 /* Bit masks used in type checking given instructions.
13995 'N_EQK' means the type must be the same as (or based on in some way) the key
13996 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13997 set, various other bits can be set as well in order to modify the meaning of
13998 the type constraint. */
13999
14000 enum neon_type_mask
14001 {
14002 N_S8 = 0x0000001,
14003 N_S16 = 0x0000002,
14004 N_S32 = 0x0000004,
14005 N_S64 = 0x0000008,
14006 N_U8 = 0x0000010,
14007 N_U16 = 0x0000020,
14008 N_U32 = 0x0000040,
14009 N_U64 = 0x0000080,
14010 N_I8 = 0x0000100,
14011 N_I16 = 0x0000200,
14012 N_I32 = 0x0000400,
14013 N_I64 = 0x0000800,
14014 N_8 = 0x0001000,
14015 N_16 = 0x0002000,
14016 N_32 = 0x0004000,
14017 N_64 = 0x0008000,
14018 N_P8 = 0x0010000,
14019 N_P16 = 0x0020000,
14020 N_F16 = 0x0040000,
14021 N_F32 = 0x0080000,
14022 N_F64 = 0x0100000,
14023 N_P64 = 0x0200000,
14024 N_KEY = 0x1000000, /* Key element (main type specifier). */
14025 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
14026 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
14027 N_UNT = 0x8000000, /* Must be explicitly untyped. */
14028 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
14029 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
14030 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
14031 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
14032 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
14033 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
14034 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
14035 N_UTYP = 0,
14036 N_MAX_NONSPECIAL = N_P64
14037 };
14038
14039 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
14040
14041 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
14042 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14043 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
14044 #define N_S_32 (N_S8 | N_S16 | N_S32)
14045 #define N_F_16_32 (N_F16 | N_F32)
14046 #define N_SUF_32 (N_SU_32 | N_F_16_32)
14047 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
14048 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
14049 #define N_F_ALL (N_F16 | N_F32 | N_F64)
14050
14051 /* Pass this as the first type argument to neon_check_type to ignore types
14052 altogether. */
14053 #define N_IGNORE_TYPE (N_KEY | N_EQK)
14054
14055 /* Select a "shape" for the current instruction (describing register types or
14056 sizes) from a list of alternatives. Return NS_NULL if the current instruction
14057 doesn't fit. For non-polymorphic shapes, checking is usually done as a
14058 function of operand parsing, so this function doesn't need to be called.
14059 Shapes should be listed in order of decreasing length. */
14060
14061 static enum neon_shape
14062 neon_select_shape (enum neon_shape shape, ...)
14063 {
14064 va_list ap;
14065 enum neon_shape first_shape = shape;
14066
14067 /* Fix missing optional operands. FIXME: we don't know at this point how
14068 many arguments we should have, so this makes the assumption that we have
14069 > 1. This is true of all current Neon opcodes, I think, but may not be
14070 true in the future. */
14071 if (!inst.operands[1].present)
14072 inst.operands[1] = inst.operands[0];
14073
14074 va_start (ap, shape);
14075
14076 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
14077 {
14078 unsigned j;
14079 int matches = 1;
14080
14081 for (j = 0; j < neon_shape_tab[shape].els; j++)
14082 {
14083 if (!inst.operands[j].present)
14084 {
14085 matches = 0;
14086 break;
14087 }
14088
14089 switch (neon_shape_tab[shape].el[j])
14090 {
14091 /* If a .f16, .16, .u16, .s16 type specifier is given over
14092 a VFP single precision register operand, it's essentially
14093 means only half of the register is used.
14094
14095 If the type specifier is given after the mnemonics, the
14096 information is stored in inst.vectype. If the type specifier
14097 is given after register operand, the information is stored
14098 in inst.operands[].vectype.
14099
14100 When there is only one type specifier, and all the register
14101 operands are the same type of hardware register, the type
14102 specifier applies to all register operands.
14103
14104 If no type specifier is given, the shape is inferred from
14105 operand information.
14106
14107 for example:
14108 vadd.f16 s0, s1, s2: NS_HHH
14109 vabs.f16 s0, s1: NS_HH
14110 vmov.f16 s0, r1: NS_HR
14111 vmov.f16 r0, s1: NS_RH
14112 vcvt.f16 r0, s1: NS_RH
14113 vcvt.f16.s32 s2, s2, #29: NS_HFI
14114 vcvt.f16.s32 s2, s2: NS_HF
14115 */
14116 case SE_H:
14117 if (!(inst.operands[j].isreg
14118 && inst.operands[j].isvec
14119 && inst.operands[j].issingle
14120 && !inst.operands[j].isquad
14121 && ((inst.vectype.elems == 1
14122 && inst.vectype.el[0].size == 16)
14123 || (inst.vectype.elems > 1
14124 && inst.vectype.el[j].size == 16)
14125 || (inst.vectype.elems == 0
14126 && inst.operands[j].vectype.type != NT_invtype
14127 && inst.operands[j].vectype.size == 16))))
14128 matches = 0;
14129 break;
14130
14131 case SE_F:
14132 if (!(inst.operands[j].isreg
14133 && inst.operands[j].isvec
14134 && inst.operands[j].issingle
14135 && !inst.operands[j].isquad
14136 && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
14137 || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
14138 || (inst.vectype.elems == 0
14139 && (inst.operands[j].vectype.size == 32
14140 || inst.operands[j].vectype.type == NT_invtype)))))
14141 matches = 0;
14142 break;
14143
14144 case SE_D:
14145 if (!(inst.operands[j].isreg
14146 && inst.operands[j].isvec
14147 && !inst.operands[j].isquad
14148 && !inst.operands[j].issingle))
14149 matches = 0;
14150 break;
14151
14152 case SE_R:
14153 if (!(inst.operands[j].isreg
14154 && !inst.operands[j].isvec))
14155 matches = 0;
14156 break;
14157
14158 case SE_Q:
14159 if (!(inst.operands[j].isreg
14160 && inst.operands[j].isvec
14161 && inst.operands[j].isquad
14162 && !inst.operands[j].issingle))
14163 matches = 0;
14164 break;
14165
14166 case SE_I:
14167 if (!(!inst.operands[j].isreg
14168 && !inst.operands[j].isscalar))
14169 matches = 0;
14170 break;
14171
14172 case SE_S:
14173 if (!(!inst.operands[j].isreg
14174 && inst.operands[j].isscalar))
14175 matches = 0;
14176 break;
14177
14178 case SE_L:
14179 break;
14180 }
14181 if (!matches)
14182 break;
14183 }
14184 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
14185 /* We've matched all the entries in the shape table, and we don't
14186 have any left over operands which have not been matched. */
14187 break;
14188 }
14189
14190 va_end (ap);
14191
14192 if (shape == NS_NULL && first_shape != NS_NULL)
14193 first_error (_("invalid instruction shape"));
14194
14195 return shape;
14196 }
14197
14198 /* True if SHAPE is predominantly a quadword operation (most of the time, this
14199 means the Q bit should be set). */
14200
14201 static int
14202 neon_quad (enum neon_shape shape)
14203 {
14204 return neon_shape_class[shape] == SC_QUAD;
14205 }
14206
14207 static void
14208 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
14209 unsigned *g_size)
14210 {
14211 /* Allow modification to be made to types which are constrained to be
14212 based on the key element, based on bits set alongside N_EQK. */
14213 if ((typebits & N_EQK) != 0)
14214 {
14215 if ((typebits & N_HLF) != 0)
14216 *g_size /= 2;
14217 else if ((typebits & N_DBL) != 0)
14218 *g_size *= 2;
14219 if ((typebits & N_SGN) != 0)
14220 *g_type = NT_signed;
14221 else if ((typebits & N_UNS) != 0)
14222 *g_type = NT_unsigned;
14223 else if ((typebits & N_INT) != 0)
14224 *g_type = NT_integer;
14225 else if ((typebits & N_FLT) != 0)
14226 *g_type = NT_float;
14227 else if ((typebits & N_SIZ) != 0)
14228 *g_type = NT_untyped;
14229 }
14230 }
14231
14232 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
14233 operand type, i.e. the single type specified in a Neon instruction when it
14234 is the only one given. */
14235
14236 static struct neon_type_el
14237 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
14238 {
14239 struct neon_type_el dest = *key;
14240
14241 gas_assert ((thisarg & N_EQK) != 0);
14242
14243 neon_modify_type_size (thisarg, &dest.type, &dest.size);
14244
14245 return dest;
14246 }
14247
14248 /* Convert Neon type and size into compact bitmask representation. */
14249
14250 static enum neon_type_mask
14251 type_chk_of_el_type (enum neon_el_type type, unsigned size)
14252 {
14253 switch (type)
14254 {
14255 case NT_untyped:
14256 switch (size)
14257 {
14258 case 8: return N_8;
14259 case 16: return N_16;
14260 case 32: return N_32;
14261 case 64: return N_64;
14262 default: ;
14263 }
14264 break;
14265
14266 case NT_integer:
14267 switch (size)
14268 {
14269 case 8: return N_I8;
14270 case 16: return N_I16;
14271 case 32: return N_I32;
14272 case 64: return N_I64;
14273 default: ;
14274 }
14275 break;
14276
14277 case NT_float:
14278 switch (size)
14279 {
14280 case 16: return N_F16;
14281 case 32: return N_F32;
14282 case 64: return N_F64;
14283 default: ;
14284 }
14285 break;
14286
14287 case NT_poly:
14288 switch (size)
14289 {
14290 case 8: return N_P8;
14291 case 16: return N_P16;
14292 case 64: return N_P64;
14293 default: ;
14294 }
14295 break;
14296
14297 case NT_signed:
14298 switch (size)
14299 {
14300 case 8: return N_S8;
14301 case 16: return N_S16;
14302 case 32: return N_S32;
14303 case 64: return N_S64;
14304 default: ;
14305 }
14306 break;
14307
14308 case NT_unsigned:
14309 switch (size)
14310 {
14311 case 8: return N_U8;
14312 case 16: return N_U16;
14313 case 32: return N_U32;
14314 case 64: return N_U64;
14315 default: ;
14316 }
14317 break;
14318
14319 default: ;
14320 }
14321
14322 return N_UTYP;
14323 }
14324
14325 /* Convert compact Neon bitmask type representation to a type and size. Only
14326 handles the case where a single bit is set in the mask. */
14327
14328 static int
14329 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
14330 enum neon_type_mask mask)
14331 {
14332 if ((mask & N_EQK) != 0)
14333 return FAIL;
14334
14335 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
14336 *size = 8;
14337 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
14338 *size = 16;
14339 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
14340 *size = 32;
14341 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
14342 *size = 64;
14343 else
14344 return FAIL;
14345
14346 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
14347 *type = NT_signed;
14348 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
14349 *type = NT_unsigned;
14350 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
14351 *type = NT_integer;
14352 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
14353 *type = NT_untyped;
14354 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
14355 *type = NT_poly;
14356 else if ((mask & (N_F_ALL)) != 0)
14357 *type = NT_float;
14358 else
14359 return FAIL;
14360
14361 return SUCCESS;
14362 }
14363
14364 /* Modify a bitmask of allowed types. This is only needed for type
14365 relaxation. */
14366
14367 static unsigned
14368 modify_types_allowed (unsigned allowed, unsigned mods)
14369 {
14370 unsigned size;
14371 enum neon_el_type type;
14372 unsigned destmask;
14373 int i;
14374
14375 destmask = 0;
14376
14377 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
14378 {
14379 if (el_type_of_type_chk (&type, &size,
14380 (enum neon_type_mask) (allowed & i)) == SUCCESS)
14381 {
14382 neon_modify_type_size (mods, &type, &size);
14383 destmask |= type_chk_of_el_type (type, size);
14384 }
14385 }
14386
14387 return destmask;
14388 }
14389
14390 /* Check type and return type classification.
14391 The manual states (paraphrase): If one datatype is given, it indicates the
14392 type given in:
14393 - the second operand, if there is one
14394 - the operand, if there is no second operand
14395 - the result, if there are no operands.
14396 This isn't quite good enough though, so we use a concept of a "key" datatype
14397 which is set on a per-instruction basis, which is the one which matters when
14398 only one data type is written.
14399 Note: this function has side-effects (e.g. filling in missing operands). All
14400 Neon instructions should call it before performing bit encoding. */
14401
14402 static struct neon_type_el
14403 neon_check_type (unsigned els, enum neon_shape ns, ...)
14404 {
14405 va_list ap;
14406 unsigned i, pass, key_el = 0;
14407 unsigned types[NEON_MAX_TYPE_ELS];
14408 enum neon_el_type k_type = NT_invtype;
14409 unsigned k_size = -1u;
14410 struct neon_type_el badtype = {NT_invtype, -1};
14411 unsigned key_allowed = 0;
14412
14413 /* Optional registers in Neon instructions are always (not) in operand 1.
14414 Fill in the missing operand here, if it was omitted. */
14415 if (els > 1 && !inst.operands[1].present)
14416 inst.operands[1] = inst.operands[0];
14417
14418 /* Suck up all the varargs. */
14419 va_start (ap, ns);
14420 for (i = 0; i < els; i++)
14421 {
14422 unsigned thisarg = va_arg (ap, unsigned);
14423 if (thisarg == N_IGNORE_TYPE)
14424 {
14425 va_end (ap);
14426 return badtype;
14427 }
14428 types[i] = thisarg;
14429 if ((thisarg & N_KEY) != 0)
14430 key_el = i;
14431 }
14432 va_end (ap);
14433
14434 if (inst.vectype.elems > 0)
14435 for (i = 0; i < els; i++)
14436 if (inst.operands[i].vectype.type != NT_invtype)
14437 {
14438 first_error (_("types specified in both the mnemonic and operands"));
14439 return badtype;
14440 }
14441
14442 /* Duplicate inst.vectype elements here as necessary.
14443 FIXME: No idea if this is exactly the same as the ARM assembler,
14444 particularly when an insn takes one register and one non-register
14445 operand. */
14446 if (inst.vectype.elems == 1 && els > 1)
14447 {
14448 unsigned j;
14449 inst.vectype.elems = els;
14450 inst.vectype.el[key_el] = inst.vectype.el[0];
14451 for (j = 0; j < els; j++)
14452 if (j != key_el)
14453 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
14454 types[j]);
14455 }
14456 else if (inst.vectype.elems == 0 && els > 0)
14457 {
14458 unsigned j;
14459 /* No types were given after the mnemonic, so look for types specified
14460 after each operand. We allow some flexibility here; as long as the
14461 "key" operand has a type, we can infer the others. */
14462 for (j = 0; j < els; j++)
14463 if (inst.operands[j].vectype.type != NT_invtype)
14464 inst.vectype.el[j] = inst.operands[j].vectype;
14465
14466 if (inst.operands[key_el].vectype.type != NT_invtype)
14467 {
14468 for (j = 0; j < els; j++)
14469 if (inst.operands[j].vectype.type == NT_invtype)
14470 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
14471 types[j]);
14472 }
14473 else
14474 {
14475 first_error (_("operand types can't be inferred"));
14476 return badtype;
14477 }
14478 }
14479 else if (inst.vectype.elems != els)
14480 {
14481 first_error (_("type specifier has the wrong number of parts"));
14482 return badtype;
14483 }
14484
14485 for (pass = 0; pass < 2; pass++)
14486 {
14487 for (i = 0; i < els; i++)
14488 {
14489 unsigned thisarg = types[i];
14490 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
14491 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
14492 enum neon_el_type g_type = inst.vectype.el[i].type;
14493 unsigned g_size = inst.vectype.el[i].size;
14494
14495 /* Decay more-specific signed & unsigned types to sign-insensitive
14496 integer types if sign-specific variants are unavailable. */
14497 if ((g_type == NT_signed || g_type == NT_unsigned)
14498 && (types_allowed & N_SU_ALL) == 0)
14499 g_type = NT_integer;
14500
14501 /* If only untyped args are allowed, decay any more specific types to
14502 them. Some instructions only care about signs for some element
14503 sizes, so handle that properly. */
14504 if (((types_allowed & N_UNT) == 0)
14505 && ((g_size == 8 && (types_allowed & N_8) != 0)
14506 || (g_size == 16 && (types_allowed & N_16) != 0)
14507 || (g_size == 32 && (types_allowed & N_32) != 0)
14508 || (g_size == 64 && (types_allowed & N_64) != 0)))
14509 g_type = NT_untyped;
14510
14511 if (pass == 0)
14512 {
14513 if ((thisarg & N_KEY) != 0)
14514 {
14515 k_type = g_type;
14516 k_size = g_size;
14517 key_allowed = thisarg & ~N_KEY;
14518
14519 /* Check architecture constraint on FP16 extension. */
14520 if (k_size == 16
14521 && k_type == NT_float
14522 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14523 {
14524 inst.error = _(BAD_FP16);
14525 return badtype;
14526 }
14527 }
14528 }
14529 else
14530 {
14531 if ((thisarg & N_VFP) != 0)
14532 {
14533 enum neon_shape_el regshape;
14534 unsigned regwidth, match;
14535
14536 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14537 if (ns == NS_NULL)
14538 {
14539 first_error (_("invalid instruction shape"));
14540 return badtype;
14541 }
14542 regshape = neon_shape_tab[ns].el[i];
14543 regwidth = neon_shape_el_size[regshape];
14544
14545 /* In VFP mode, operands must match register widths. If we
14546 have a key operand, use its width, else use the width of
14547 the current operand. */
14548 if (k_size != -1u)
14549 match = k_size;
14550 else
14551 match = g_size;
14552
14553 /* FP16 will use a single precision register. */
14554 if (regwidth == 32 && match == 16)
14555 {
14556 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14557 match = regwidth;
14558 else
14559 {
14560 inst.error = _(BAD_FP16);
14561 return badtype;
14562 }
14563 }
14564
14565 if (regwidth != match)
14566 {
14567 first_error (_("operand size must match register width"));
14568 return badtype;
14569 }
14570 }
14571
14572 if ((thisarg & N_EQK) == 0)
14573 {
14574 unsigned given_type = type_chk_of_el_type (g_type, g_size);
14575
14576 if ((given_type & types_allowed) == 0)
14577 {
14578 first_error (_("bad type in Neon instruction"));
14579 return badtype;
14580 }
14581 }
14582 else
14583 {
14584 enum neon_el_type mod_k_type = k_type;
14585 unsigned mod_k_size = k_size;
14586 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
14587 if (g_type != mod_k_type || g_size != mod_k_size)
14588 {
14589 first_error (_("inconsistent types in Neon instruction"));
14590 return badtype;
14591 }
14592 }
14593 }
14594 }
14595 }
14596
14597 return inst.vectype.el[key_el];
14598 }
14599
14600 /* Neon-style VFP instruction forwarding. */
14601
14602 /* Thumb VFP instructions have 0xE in the condition field. */
14603
14604 static void
14605 do_vfp_cond_or_thumb (void)
14606 {
14607 inst.is_neon = 1;
14608
14609 if (thumb_mode)
14610 inst.instruction |= 0xe0000000;
14611 else
14612 inst.instruction |= inst.cond << 28;
14613 }
14614
14615 /* Look up and encode a simple mnemonic, for use as a helper function for the
14616 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14617 etc. It is assumed that operand parsing has already been done, and that the
14618 operands are in the form expected by the given opcode (this isn't necessarily
14619 the same as the form in which they were parsed, hence some massaging must
14620 take place before this function is called).
14621 Checks current arch version against that in the looked-up opcode. */
14622
14623 static void
14624 do_vfp_nsyn_opcode (const char *opname)
14625 {
14626 const struct asm_opcode *opcode;
14627
14628 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
14629
14630 if (!opcode)
14631 abort ();
14632
14633 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
14634 thumb_mode ? *opcode->tvariant : *opcode->avariant),
14635 _(BAD_FPU));
14636
14637 inst.is_neon = 1;
14638
14639 if (thumb_mode)
14640 {
14641 inst.instruction = opcode->tvalue;
14642 opcode->tencode ();
14643 }
14644 else
14645 {
14646 inst.instruction = (inst.cond << 28) | opcode->avalue;
14647 opcode->aencode ();
14648 }
14649 }
14650
14651 static void
14652 do_vfp_nsyn_add_sub (enum neon_shape rs)
14653 {
14654 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
14655
14656 if (rs == NS_FFF || rs == NS_HHH)
14657 {
14658 if (is_add)
14659 do_vfp_nsyn_opcode ("fadds");
14660 else
14661 do_vfp_nsyn_opcode ("fsubs");
14662
14663 /* ARMv8.2 fp16 instruction. */
14664 if (rs == NS_HHH)
14665 do_scalar_fp16_v82_encode ();
14666 }
14667 else
14668 {
14669 if (is_add)
14670 do_vfp_nsyn_opcode ("faddd");
14671 else
14672 do_vfp_nsyn_opcode ("fsubd");
14673 }
14674 }
14675
14676 /* Check operand types to see if this is a VFP instruction, and if so call
14677 PFN (). */
14678
14679 static int
14680 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
14681 {
14682 enum neon_shape rs;
14683 struct neon_type_el et;
14684
14685 switch (args)
14686 {
14687 case 2:
14688 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14689 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14690 break;
14691
14692 case 3:
14693 rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14694 et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14695 N_F_ALL | N_KEY | N_VFP);
14696 break;
14697
14698 default:
14699 abort ();
14700 }
14701
14702 if (et.type != NT_invtype)
14703 {
14704 pfn (rs);
14705 return SUCCESS;
14706 }
14707
14708 inst.error = NULL;
14709 return FAIL;
14710 }
14711
14712 static void
14713 do_vfp_nsyn_mla_mls (enum neon_shape rs)
14714 {
14715 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
14716
14717 if (rs == NS_FFF || rs == NS_HHH)
14718 {
14719 if (is_mla)
14720 do_vfp_nsyn_opcode ("fmacs");
14721 else
14722 do_vfp_nsyn_opcode ("fnmacs");
14723
14724 /* ARMv8.2 fp16 instruction. */
14725 if (rs == NS_HHH)
14726 do_scalar_fp16_v82_encode ();
14727 }
14728 else
14729 {
14730 if (is_mla)
14731 do_vfp_nsyn_opcode ("fmacd");
14732 else
14733 do_vfp_nsyn_opcode ("fnmacd");
14734 }
14735 }
14736
14737 static void
14738 do_vfp_nsyn_fma_fms (enum neon_shape rs)
14739 {
14740 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
14741
14742 if (rs == NS_FFF || rs == NS_HHH)
14743 {
14744 if (is_fma)
14745 do_vfp_nsyn_opcode ("ffmas");
14746 else
14747 do_vfp_nsyn_opcode ("ffnmas");
14748
14749 /* ARMv8.2 fp16 instruction. */
14750 if (rs == NS_HHH)
14751 do_scalar_fp16_v82_encode ();
14752 }
14753 else
14754 {
14755 if (is_fma)
14756 do_vfp_nsyn_opcode ("ffmad");
14757 else
14758 do_vfp_nsyn_opcode ("ffnmad");
14759 }
14760 }
14761
14762 static void
14763 do_vfp_nsyn_mul (enum neon_shape rs)
14764 {
14765 if (rs == NS_FFF || rs == NS_HHH)
14766 {
14767 do_vfp_nsyn_opcode ("fmuls");
14768
14769 /* ARMv8.2 fp16 instruction. */
14770 if (rs == NS_HHH)
14771 do_scalar_fp16_v82_encode ();
14772 }
14773 else
14774 do_vfp_nsyn_opcode ("fmuld");
14775 }
14776
14777 static void
14778 do_vfp_nsyn_abs_neg (enum neon_shape rs)
14779 {
14780 int is_neg = (inst.instruction & 0x80) != 0;
14781 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
14782
14783 if (rs == NS_FF || rs == NS_HH)
14784 {
14785 if (is_neg)
14786 do_vfp_nsyn_opcode ("fnegs");
14787 else
14788 do_vfp_nsyn_opcode ("fabss");
14789
14790 /* ARMv8.2 fp16 instruction. */
14791 if (rs == NS_HH)
14792 do_scalar_fp16_v82_encode ();
14793 }
14794 else
14795 {
14796 if (is_neg)
14797 do_vfp_nsyn_opcode ("fnegd");
14798 else
14799 do_vfp_nsyn_opcode ("fabsd");
14800 }
14801 }
14802
14803 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14804 insns belong to Neon, and are handled elsewhere. */
14805
14806 static void
14807 do_vfp_nsyn_ldm_stm (int is_dbmode)
14808 {
14809 int is_ldm = (inst.instruction & (1 << 20)) != 0;
14810 if (is_ldm)
14811 {
14812 if (is_dbmode)
14813 do_vfp_nsyn_opcode ("fldmdbs");
14814 else
14815 do_vfp_nsyn_opcode ("fldmias");
14816 }
14817 else
14818 {
14819 if (is_dbmode)
14820 do_vfp_nsyn_opcode ("fstmdbs");
14821 else
14822 do_vfp_nsyn_opcode ("fstmias");
14823 }
14824 }
14825
14826 static void
14827 do_vfp_nsyn_sqrt (void)
14828 {
14829 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14830 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14831
14832 if (rs == NS_FF || rs == NS_HH)
14833 {
14834 do_vfp_nsyn_opcode ("fsqrts");
14835
14836 /* ARMv8.2 fp16 instruction. */
14837 if (rs == NS_HH)
14838 do_scalar_fp16_v82_encode ();
14839 }
14840 else
14841 do_vfp_nsyn_opcode ("fsqrtd");
14842 }
14843
14844 static void
14845 do_vfp_nsyn_div (void)
14846 {
14847 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14848 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14849 N_F_ALL | N_KEY | N_VFP);
14850
14851 if (rs == NS_FFF || rs == NS_HHH)
14852 {
14853 do_vfp_nsyn_opcode ("fdivs");
14854
14855 /* ARMv8.2 fp16 instruction. */
14856 if (rs == NS_HHH)
14857 do_scalar_fp16_v82_encode ();
14858 }
14859 else
14860 do_vfp_nsyn_opcode ("fdivd");
14861 }
14862
14863 static void
14864 do_vfp_nsyn_nmul (void)
14865 {
14866 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14867 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14868 N_F_ALL | N_KEY | N_VFP);
14869
14870 if (rs == NS_FFF || rs == NS_HHH)
14871 {
14872 NEON_ENCODE (SINGLE, inst);
14873 do_vfp_sp_dyadic ();
14874
14875 /* ARMv8.2 fp16 instruction. */
14876 if (rs == NS_HHH)
14877 do_scalar_fp16_v82_encode ();
14878 }
14879 else
14880 {
14881 NEON_ENCODE (DOUBLE, inst);
14882 do_vfp_dp_rd_rn_rm ();
14883 }
14884 do_vfp_cond_or_thumb ();
14885
14886 }
14887
14888 static void
14889 do_vfp_nsyn_cmp (void)
14890 {
14891 enum neon_shape rs;
14892 if (inst.operands[1].isreg)
14893 {
14894 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14895 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14896
14897 if (rs == NS_FF || rs == NS_HH)
14898 {
14899 NEON_ENCODE (SINGLE, inst);
14900 do_vfp_sp_monadic ();
14901 }
14902 else
14903 {
14904 NEON_ENCODE (DOUBLE, inst);
14905 do_vfp_dp_rd_rm ();
14906 }
14907 }
14908 else
14909 {
14910 rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
14911 neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
14912
14913 switch (inst.instruction & 0x0fffffff)
14914 {
14915 case N_MNEM_vcmp:
14916 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
14917 break;
14918 case N_MNEM_vcmpe:
14919 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
14920 break;
14921 default:
14922 abort ();
14923 }
14924
14925 if (rs == NS_FI || rs == NS_HI)
14926 {
14927 NEON_ENCODE (SINGLE, inst);
14928 do_vfp_sp_compare_z ();
14929 }
14930 else
14931 {
14932 NEON_ENCODE (DOUBLE, inst);
14933 do_vfp_dp_rd ();
14934 }
14935 }
14936 do_vfp_cond_or_thumb ();
14937
14938 /* ARMv8.2 fp16 instruction. */
14939 if (rs == NS_HI || rs == NS_HH)
14940 do_scalar_fp16_v82_encode ();
14941 }
14942
14943 static void
14944 nsyn_insert_sp (void)
14945 {
14946 inst.operands[1] = inst.operands[0];
14947 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
14948 inst.operands[0].reg = REG_SP;
14949 inst.operands[0].isreg = 1;
14950 inst.operands[0].writeback = 1;
14951 inst.operands[0].present = 1;
14952 }
14953
14954 static void
14955 do_vfp_nsyn_push (void)
14956 {
14957 nsyn_insert_sp ();
14958
14959 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14960 _("register list must contain at least 1 and at most 16 "
14961 "registers"));
14962
14963 if (inst.operands[1].issingle)
14964 do_vfp_nsyn_opcode ("fstmdbs");
14965 else
14966 do_vfp_nsyn_opcode ("fstmdbd");
14967 }
14968
14969 static void
14970 do_vfp_nsyn_pop (void)
14971 {
14972 nsyn_insert_sp ();
14973
14974 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14975 _("register list must contain at least 1 and at most 16 "
14976 "registers"));
14977
14978 if (inst.operands[1].issingle)
14979 do_vfp_nsyn_opcode ("fldmias");
14980 else
14981 do_vfp_nsyn_opcode ("fldmiad");
14982 }
14983
14984 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14985 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14986
14987 static void
14988 neon_dp_fixup (struct arm_it* insn)
14989 {
14990 unsigned int i = insn->instruction;
14991 insn->is_neon = 1;
14992
14993 if (thumb_mode)
14994 {
14995 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14996 if (i & (1 << 24))
14997 i |= 1 << 28;
14998
14999 i &= ~(1 << 24);
15000
15001 i |= 0xef000000;
15002 }
15003 else
15004 i |= 0xf2000000;
15005
15006 insn->instruction = i;
15007 }
15008
15009 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
15010 (0, 1, 2, 3). */
15011
15012 static unsigned
15013 neon_logbits (unsigned x)
15014 {
15015 return ffs (x) - 4;
15016 }
15017
15018 #define LOW4(R) ((R) & 0xf)
15019 #define HI1(R) (((R) >> 4) & 1)
15020
15021 /* Encode insns with bit pattern:
15022
15023 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15024 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
15025
15026 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
15027 different meaning for some instruction. */
15028
15029 static void
15030 neon_three_same (int isquad, int ubit, int size)
15031 {
15032 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15033 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15034 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15035 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15036 inst.instruction |= LOW4 (inst.operands[2].reg);
15037 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15038 inst.instruction |= (isquad != 0) << 6;
15039 inst.instruction |= (ubit != 0) << 24;
15040 if (size != -1)
15041 inst.instruction |= neon_logbits (size) << 20;
15042
15043 neon_dp_fixup (&inst);
15044 }
15045
15046 /* Encode instructions of the form:
15047
15048 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
15049 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
15050
15051 Don't write size if SIZE == -1. */
15052
15053 static void
15054 neon_two_same (int qbit, int ubit, int size)
15055 {
15056 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15057 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15058 inst.instruction |= LOW4 (inst.operands[1].reg);
15059 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15060 inst.instruction |= (qbit != 0) << 6;
15061 inst.instruction |= (ubit != 0) << 24;
15062
15063 if (size != -1)
15064 inst.instruction |= neon_logbits (size) << 18;
15065
15066 neon_dp_fixup (&inst);
15067 }
15068
15069 /* Neon instruction encoders, in approximate order of appearance. */
15070
15071 static void
15072 do_neon_dyadic_i_su (void)
15073 {
15074 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15075 struct neon_type_el et = neon_check_type (3, rs,
15076 N_EQK, N_EQK, N_SU_32 | N_KEY);
15077 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15078 }
15079
15080 static void
15081 do_neon_dyadic_i64_su (void)
15082 {
15083 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15084 struct neon_type_el et = neon_check_type (3, rs,
15085 N_EQK, N_EQK, N_SU_ALL | N_KEY);
15086 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15087 }
15088
15089 static void
15090 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
15091 unsigned immbits)
15092 {
15093 unsigned size = et.size >> 3;
15094 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15095 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15096 inst.instruction |= LOW4 (inst.operands[1].reg);
15097 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15098 inst.instruction |= (isquad != 0) << 6;
15099 inst.instruction |= immbits << 16;
15100 inst.instruction |= (size >> 3) << 7;
15101 inst.instruction |= (size & 0x7) << 19;
15102 if (write_ubit)
15103 inst.instruction |= (uval != 0) << 24;
15104
15105 neon_dp_fixup (&inst);
15106 }
15107
15108 static void
15109 do_neon_shl_imm (void)
15110 {
15111 if (!inst.operands[2].isreg)
15112 {
15113 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15114 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
15115 int imm = inst.operands[2].imm;
15116
15117 constraint (imm < 0 || (unsigned)imm >= et.size,
15118 _("immediate out of range for shift"));
15119 NEON_ENCODE (IMMED, inst);
15120 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15121 }
15122 else
15123 {
15124 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15125 struct neon_type_el et = neon_check_type (3, rs,
15126 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
15127 unsigned int tmp;
15128
15129 /* VSHL/VQSHL 3-register variants have syntax such as:
15130 vshl.xx Dd, Dm, Dn
15131 whereas other 3-register operations encoded by neon_three_same have
15132 syntax like:
15133 vadd.xx Dd, Dn, Dm
15134 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
15135 here. */
15136 tmp = inst.operands[2].reg;
15137 inst.operands[2].reg = inst.operands[1].reg;
15138 inst.operands[1].reg = tmp;
15139 NEON_ENCODE (INTEGER, inst);
15140 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15141 }
15142 }
15143
15144 static void
15145 do_neon_qshl_imm (void)
15146 {
15147 if (!inst.operands[2].isreg)
15148 {
15149 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15150 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
15151 int imm = inst.operands[2].imm;
15152
15153 constraint (imm < 0 || (unsigned)imm >= et.size,
15154 _("immediate out of range for shift"));
15155 NEON_ENCODE (IMMED, inst);
15156 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
15157 }
15158 else
15159 {
15160 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15161 struct neon_type_el et = neon_check_type (3, rs,
15162 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
15163 unsigned int tmp;
15164
15165 /* See note in do_neon_shl_imm. */
15166 tmp = inst.operands[2].reg;
15167 inst.operands[2].reg = inst.operands[1].reg;
15168 inst.operands[1].reg = tmp;
15169 NEON_ENCODE (INTEGER, inst);
15170 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15171 }
15172 }
15173
15174 static void
15175 do_neon_rshl (void)
15176 {
15177 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15178 struct neon_type_el et = neon_check_type (3, rs,
15179 N_EQK, N_EQK, N_SU_ALL | N_KEY);
15180 unsigned int tmp;
15181
15182 tmp = inst.operands[2].reg;
15183 inst.operands[2].reg = inst.operands[1].reg;
15184 inst.operands[1].reg = tmp;
15185 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15186 }
15187
15188 static int
15189 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
15190 {
15191 /* Handle .I8 pseudo-instructions. */
15192 if (size == 8)
15193 {
15194 /* Unfortunately, this will make everything apart from zero out-of-range.
15195 FIXME is this the intended semantics? There doesn't seem much point in
15196 accepting .I8 if so. */
15197 immediate |= immediate << 8;
15198 size = 16;
15199 }
15200
15201 if (size >= 32)
15202 {
15203 if (immediate == (immediate & 0x000000ff))
15204 {
15205 *immbits = immediate;
15206 return 0x1;
15207 }
15208 else if (immediate == (immediate & 0x0000ff00))
15209 {
15210 *immbits = immediate >> 8;
15211 return 0x3;
15212 }
15213 else if (immediate == (immediate & 0x00ff0000))
15214 {
15215 *immbits = immediate >> 16;
15216 return 0x5;
15217 }
15218 else if (immediate == (immediate & 0xff000000))
15219 {
15220 *immbits = immediate >> 24;
15221 return 0x7;
15222 }
15223 if ((immediate & 0xffff) != (immediate >> 16))
15224 goto bad_immediate;
15225 immediate &= 0xffff;
15226 }
15227
15228 if (immediate == (immediate & 0x000000ff))
15229 {
15230 *immbits = immediate;
15231 return 0x9;
15232 }
15233 else if (immediate == (immediate & 0x0000ff00))
15234 {
15235 *immbits = immediate >> 8;
15236 return 0xb;
15237 }
15238
15239 bad_immediate:
15240 first_error (_("immediate value out of range"));
15241 return FAIL;
15242 }
15243
15244 static void
15245 do_neon_logic (void)
15246 {
15247 if (inst.operands[2].present && inst.operands[2].isreg)
15248 {
15249 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15250 neon_check_type (3, rs, N_IGNORE_TYPE);
15251 /* U bit and size field were set as part of the bitmask. */
15252 NEON_ENCODE (INTEGER, inst);
15253 neon_three_same (neon_quad (rs), 0, -1);
15254 }
15255 else
15256 {
15257 const int three_ops_form = (inst.operands[2].present
15258 && !inst.operands[2].isreg);
15259 const int immoperand = (three_ops_form ? 2 : 1);
15260 enum neon_shape rs = (three_ops_form
15261 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
15262 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
15263 struct neon_type_el et = neon_check_type (2, rs,
15264 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15265 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
15266 unsigned immbits;
15267 int cmode;
15268
15269 if (et.type == NT_invtype)
15270 return;
15271
15272 if (three_ops_form)
15273 constraint (inst.operands[0].reg != inst.operands[1].reg,
15274 _("first and second operands shall be the same register"));
15275
15276 NEON_ENCODE (IMMED, inst);
15277
15278 immbits = inst.operands[immoperand].imm;
15279 if (et.size == 64)
15280 {
15281 /* .i64 is a pseudo-op, so the immediate must be a repeating
15282 pattern. */
15283 if (immbits != (inst.operands[immoperand].regisimm ?
15284 inst.operands[immoperand].reg : 0))
15285 {
15286 /* Set immbits to an invalid constant. */
15287 immbits = 0xdeadbeef;
15288 }
15289 }
15290
15291 switch (opcode)
15292 {
15293 case N_MNEM_vbic:
15294 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15295 break;
15296
15297 case N_MNEM_vorr:
15298 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15299 break;
15300
15301 case N_MNEM_vand:
15302 /* Pseudo-instruction for VBIC. */
15303 neon_invert_size (&immbits, 0, et.size);
15304 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15305 break;
15306
15307 case N_MNEM_vorn:
15308 /* Pseudo-instruction for VORR. */
15309 neon_invert_size (&immbits, 0, et.size);
15310 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15311 break;
15312
15313 default:
15314 abort ();
15315 }
15316
15317 if (cmode == FAIL)
15318 return;
15319
15320 inst.instruction |= neon_quad (rs) << 6;
15321 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15322 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15323 inst.instruction |= cmode << 8;
15324 neon_write_immbits (immbits);
15325
15326 neon_dp_fixup (&inst);
15327 }
15328 }
15329
15330 static void
15331 do_neon_bitfield (void)
15332 {
15333 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15334 neon_check_type (3, rs, N_IGNORE_TYPE);
15335 neon_three_same (neon_quad (rs), 0, -1);
15336 }
15337
15338 static void
15339 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
15340 unsigned destbits)
15341 {
15342 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15343 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
15344 types | N_KEY);
15345 if (et.type == NT_float)
15346 {
15347 NEON_ENCODE (FLOAT, inst);
15348 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
15349 }
15350 else
15351 {
15352 NEON_ENCODE (INTEGER, inst);
15353 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
15354 }
15355 }
15356
15357 static void
15358 do_neon_dyadic_if_su (void)
15359 {
15360 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
15361 }
15362
15363 static void
15364 do_neon_dyadic_if_su_d (void)
15365 {
15366 /* This version only allow D registers, but that constraint is enforced during
15367 operand parsing so we don't need to do anything extra here. */
15368 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
15369 }
15370
15371 static void
15372 do_neon_dyadic_if_i_d (void)
15373 {
15374 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15375 affected if we specify unsigned args. */
15376 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15377 }
15378
15379 enum vfp_or_neon_is_neon_bits
15380 {
15381 NEON_CHECK_CC = 1,
15382 NEON_CHECK_ARCH = 2,
15383 NEON_CHECK_ARCH8 = 4
15384 };
15385
15386 /* Call this function if an instruction which may have belonged to the VFP or
15387 Neon instruction sets, but turned out to be a Neon instruction (due to the
15388 operand types involved, etc.). We have to check and/or fix-up a couple of
15389 things:
15390
15391 - Make sure the user hasn't attempted to make a Neon instruction
15392 conditional.
15393 - Alter the value in the condition code field if necessary.
15394 - Make sure that the arch supports Neon instructions.
15395
15396 Which of these operations take place depends on bits from enum
15397 vfp_or_neon_is_neon_bits.
15398
15399 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
15400 current instruction's condition is COND_ALWAYS, the condition field is
15401 changed to inst.uncond_value. This is necessary because instructions shared
15402 between VFP and Neon may be conditional for the VFP variants only, and the
15403 unconditional Neon version must have, e.g., 0xF in the condition field. */
15404
15405 static int
15406 vfp_or_neon_is_neon (unsigned check)
15407 {
15408 /* Conditions are always legal in Thumb mode (IT blocks). */
15409 if (!thumb_mode && (check & NEON_CHECK_CC))
15410 {
15411 if (inst.cond != COND_ALWAYS)
15412 {
15413 first_error (_(BAD_COND));
15414 return FAIL;
15415 }
15416 if (inst.uncond_value != -1)
15417 inst.instruction |= inst.uncond_value << 28;
15418 }
15419
15420 if ((check & NEON_CHECK_ARCH)
15421 && !mark_feature_used (&fpu_neon_ext_v1))
15422 {
15423 first_error (_(BAD_FPU));
15424 return FAIL;
15425 }
15426
15427 if ((check & NEON_CHECK_ARCH8)
15428 && !mark_feature_used (&fpu_neon_ext_armv8))
15429 {
15430 first_error (_(BAD_FPU));
15431 return FAIL;
15432 }
15433
15434 return SUCCESS;
15435 }
15436
15437 static void
15438 do_neon_addsub_if_i (void)
15439 {
15440 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
15441 return;
15442
15443 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15444 return;
15445
15446 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15447 affected if we specify unsigned args. */
15448 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
15449 }
15450
15451 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
15452 result to be:
15453 V<op> A,B (A is operand 0, B is operand 2)
15454 to mean:
15455 V<op> A,B,A
15456 not:
15457 V<op> A,B,B
15458 so handle that case specially. */
15459
15460 static void
15461 neon_exchange_operands (void)
15462 {
15463 if (inst.operands[1].present)
15464 {
15465 void *scratch = xmalloc (sizeof (inst.operands[0]));
15466
15467 /* Swap operands[1] and operands[2]. */
15468 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
15469 inst.operands[1] = inst.operands[2];
15470 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
15471 free (scratch);
15472 }
15473 else
15474 {
15475 inst.operands[1] = inst.operands[2];
15476 inst.operands[2] = inst.operands[0];
15477 }
15478 }
15479
15480 static void
15481 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
15482 {
15483 if (inst.operands[2].isreg)
15484 {
15485 if (invert)
15486 neon_exchange_operands ();
15487 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
15488 }
15489 else
15490 {
15491 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15492 struct neon_type_el et = neon_check_type (2, rs,
15493 N_EQK | N_SIZ, immtypes | N_KEY);
15494
15495 NEON_ENCODE (IMMED, inst);
15496 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15497 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15498 inst.instruction |= LOW4 (inst.operands[1].reg);
15499 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15500 inst.instruction |= neon_quad (rs) << 6;
15501 inst.instruction |= (et.type == NT_float) << 10;
15502 inst.instruction |= neon_logbits (et.size) << 18;
15503
15504 neon_dp_fixup (&inst);
15505 }
15506 }
15507
15508 static void
15509 do_neon_cmp (void)
15510 {
15511 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
15512 }
15513
15514 static void
15515 do_neon_cmp_inv (void)
15516 {
15517 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
15518 }
15519
15520 static void
15521 do_neon_ceq (void)
15522 {
15523 neon_compare (N_IF_32, N_IF_32, FALSE);
15524 }
15525
15526 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
15527 scalars, which are encoded in 5 bits, M : Rm.
15528 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
15529 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
15530 index in M.
15531
15532 Dot Product instructions are similar to multiply instructions except elsize
15533 should always be 32.
15534
15535 This function translates SCALAR, which is GAS's internal encoding of indexed
15536 scalar register, to raw encoding. There is also register and index range
15537 check based on ELSIZE. */
15538
15539 static unsigned
15540 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
15541 {
15542 unsigned regno = NEON_SCALAR_REG (scalar);
15543 unsigned elno = NEON_SCALAR_INDEX (scalar);
15544
15545 switch (elsize)
15546 {
15547 case 16:
15548 if (regno > 7 || elno > 3)
15549 goto bad_scalar;
15550 return regno | (elno << 3);
15551
15552 case 32:
15553 if (regno > 15 || elno > 1)
15554 goto bad_scalar;
15555 return regno | (elno << 4);
15556
15557 default:
15558 bad_scalar:
15559 first_error (_("scalar out of range for multiply instruction"));
15560 }
15561
15562 return 0;
15563 }
15564
15565 /* Encode multiply / multiply-accumulate scalar instructions. */
15566
15567 static void
15568 neon_mul_mac (struct neon_type_el et, int ubit)
15569 {
15570 unsigned scalar;
15571
15572 /* Give a more helpful error message if we have an invalid type. */
15573 if (et.type == NT_invtype)
15574 return;
15575
15576 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
15577 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15578 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15579 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15580 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15581 inst.instruction |= LOW4 (scalar);
15582 inst.instruction |= HI1 (scalar) << 5;
15583 inst.instruction |= (et.type == NT_float) << 8;
15584 inst.instruction |= neon_logbits (et.size) << 20;
15585 inst.instruction |= (ubit != 0) << 24;
15586
15587 neon_dp_fixup (&inst);
15588 }
15589
15590 static void
15591 do_neon_mac_maybe_scalar (void)
15592 {
15593 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
15594 return;
15595
15596 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15597 return;
15598
15599 if (inst.operands[2].isscalar)
15600 {
15601 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15602 struct neon_type_el et = neon_check_type (3, rs,
15603 N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
15604 NEON_ENCODE (SCALAR, inst);
15605 neon_mul_mac (et, neon_quad (rs));
15606 }
15607 else
15608 {
15609 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15610 affected if we specify unsigned args. */
15611 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15612 }
15613 }
15614
15615 static void
15616 do_neon_fmac (void)
15617 {
15618 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
15619 return;
15620
15621 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15622 return;
15623
15624 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15625 }
15626
15627 static void
15628 do_neon_tst (void)
15629 {
15630 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15631 struct neon_type_el et = neon_check_type (3, rs,
15632 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
15633 neon_three_same (neon_quad (rs), 0, et.size);
15634 }
15635
15636 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15637 same types as the MAC equivalents. The polynomial type for this instruction
15638 is encoded the same as the integer type. */
15639
15640 static void
15641 do_neon_mul (void)
15642 {
15643 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
15644 return;
15645
15646 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15647 return;
15648
15649 if (inst.operands[2].isscalar)
15650 do_neon_mac_maybe_scalar ();
15651 else
15652 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
15653 }
15654
15655 static void
15656 do_neon_qdmulh (void)
15657 {
15658 if (inst.operands[2].isscalar)
15659 {
15660 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15661 struct neon_type_el et = neon_check_type (3, rs,
15662 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15663 NEON_ENCODE (SCALAR, inst);
15664 neon_mul_mac (et, neon_quad (rs));
15665 }
15666 else
15667 {
15668 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15669 struct neon_type_el et = neon_check_type (3, rs,
15670 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15671 NEON_ENCODE (INTEGER, inst);
15672 /* The U bit (rounding) comes from bit mask. */
15673 neon_three_same (neon_quad (rs), 0, et.size);
15674 }
15675 }
15676
15677 static void
15678 do_neon_qrdmlah (void)
15679 {
15680 /* Check we're on the correct architecture. */
15681 if (!mark_feature_used (&fpu_neon_ext_armv8))
15682 inst.error =
15683 _("instruction form not available on this architecture.");
15684 else if (!mark_feature_used (&fpu_neon_ext_v8_1))
15685 {
15686 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
15687 record_feature_use (&fpu_neon_ext_v8_1);
15688 }
15689
15690 if (inst.operands[2].isscalar)
15691 {
15692 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15693 struct neon_type_el et = neon_check_type (3, rs,
15694 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15695 NEON_ENCODE (SCALAR, inst);
15696 neon_mul_mac (et, neon_quad (rs));
15697 }
15698 else
15699 {
15700 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15701 struct neon_type_el et = neon_check_type (3, rs,
15702 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15703 NEON_ENCODE (INTEGER, inst);
15704 /* The U bit (rounding) comes from bit mask. */
15705 neon_three_same (neon_quad (rs), 0, et.size);
15706 }
15707 }
15708
15709 static void
15710 do_neon_fcmp_absolute (void)
15711 {
15712 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15713 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15714 N_F_16_32 | N_KEY);
15715 /* Size field comes from bit mask. */
15716 neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
15717 }
15718
15719 static void
15720 do_neon_fcmp_absolute_inv (void)
15721 {
15722 neon_exchange_operands ();
15723 do_neon_fcmp_absolute ();
15724 }
15725
15726 static void
15727 do_neon_step (void)
15728 {
15729 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15730 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15731 N_F_16_32 | N_KEY);
15732 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
15733 }
15734
15735 static void
15736 do_neon_abs_neg (void)
15737 {
15738 enum neon_shape rs;
15739 struct neon_type_el et;
15740
15741 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
15742 return;
15743
15744 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15745 return;
15746
15747 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15748 et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
15749
15750 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15751 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15752 inst.instruction |= LOW4 (inst.operands[1].reg);
15753 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15754 inst.instruction |= neon_quad (rs) << 6;
15755 inst.instruction |= (et.type == NT_float) << 10;
15756 inst.instruction |= neon_logbits (et.size) << 18;
15757
15758 neon_dp_fixup (&inst);
15759 }
15760
15761 static void
15762 do_neon_sli (void)
15763 {
15764 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15765 struct neon_type_el et = neon_check_type (2, rs,
15766 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15767 int imm = inst.operands[2].imm;
15768 constraint (imm < 0 || (unsigned)imm >= et.size,
15769 _("immediate out of range for insert"));
15770 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15771 }
15772
15773 static void
15774 do_neon_sri (void)
15775 {
15776 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15777 struct neon_type_el et = neon_check_type (2, rs,
15778 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15779 int imm = inst.operands[2].imm;
15780 constraint (imm < 1 || (unsigned)imm > et.size,
15781 _("immediate out of range for insert"));
15782 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
15783 }
15784
15785 static void
15786 do_neon_qshlu_imm (void)
15787 {
15788 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15789 struct neon_type_el et = neon_check_type (2, rs,
15790 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
15791 int imm = inst.operands[2].imm;
15792 constraint (imm < 0 || (unsigned)imm >= et.size,
15793 _("immediate out of range for shift"));
15794 /* Only encodes the 'U present' variant of the instruction.
15795 In this case, signed types have OP (bit 8) set to 0.
15796 Unsigned types have OP set to 1. */
15797 inst.instruction |= (et.type == NT_unsigned) << 8;
15798 /* The rest of the bits are the same as other immediate shifts. */
15799 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15800 }
15801
15802 static void
15803 do_neon_qmovn (void)
15804 {
15805 struct neon_type_el et = neon_check_type (2, NS_DQ,
15806 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15807 /* Saturating move where operands can be signed or unsigned, and the
15808 destination has the same signedness. */
15809 NEON_ENCODE (INTEGER, inst);
15810 if (et.type == NT_unsigned)
15811 inst.instruction |= 0xc0;
15812 else
15813 inst.instruction |= 0x80;
15814 neon_two_same (0, 1, et.size / 2);
15815 }
15816
15817 static void
15818 do_neon_qmovun (void)
15819 {
15820 struct neon_type_el et = neon_check_type (2, NS_DQ,
15821 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15822 /* Saturating move with unsigned results. Operands must be signed. */
15823 NEON_ENCODE (INTEGER, inst);
15824 neon_two_same (0, 1, et.size / 2);
15825 }
15826
15827 static void
15828 do_neon_rshift_sat_narrow (void)
15829 {
15830 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15831 or unsigned. If operands are unsigned, results must also be unsigned. */
15832 struct neon_type_el et = neon_check_type (2, NS_DQI,
15833 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15834 int imm = inst.operands[2].imm;
15835 /* This gets the bounds check, size encoding and immediate bits calculation
15836 right. */
15837 et.size /= 2;
15838
15839 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15840 VQMOVN.I<size> <Dd>, <Qm>. */
15841 if (imm == 0)
15842 {
15843 inst.operands[2].present = 0;
15844 inst.instruction = N_MNEM_vqmovn;
15845 do_neon_qmovn ();
15846 return;
15847 }
15848
15849 constraint (imm < 1 || (unsigned)imm > et.size,
15850 _("immediate out of range"));
15851 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
15852 }
15853
15854 static void
15855 do_neon_rshift_sat_narrow_u (void)
15856 {
15857 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15858 or unsigned. If operands are unsigned, results must also be unsigned. */
15859 struct neon_type_el et = neon_check_type (2, NS_DQI,
15860 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15861 int imm = inst.operands[2].imm;
15862 /* This gets the bounds check, size encoding and immediate bits calculation
15863 right. */
15864 et.size /= 2;
15865
15866 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15867 VQMOVUN.I<size> <Dd>, <Qm>. */
15868 if (imm == 0)
15869 {
15870 inst.operands[2].present = 0;
15871 inst.instruction = N_MNEM_vqmovun;
15872 do_neon_qmovun ();
15873 return;
15874 }
15875
15876 constraint (imm < 1 || (unsigned)imm > et.size,
15877 _("immediate out of range"));
15878 /* FIXME: The manual is kind of unclear about what value U should have in
15879 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15880 must be 1. */
15881 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
15882 }
15883
15884 static void
15885 do_neon_movn (void)
15886 {
15887 struct neon_type_el et = neon_check_type (2, NS_DQ,
15888 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15889 NEON_ENCODE (INTEGER, inst);
15890 neon_two_same (0, 1, et.size / 2);
15891 }
15892
15893 static void
15894 do_neon_rshift_narrow (void)
15895 {
15896 struct neon_type_el et = neon_check_type (2, NS_DQI,
15897 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15898 int imm = inst.operands[2].imm;
15899 /* This gets the bounds check, size encoding and immediate bits calculation
15900 right. */
15901 et.size /= 2;
15902
15903 /* If immediate is zero then we are a pseudo-instruction for
15904 VMOVN.I<size> <Dd>, <Qm> */
15905 if (imm == 0)
15906 {
15907 inst.operands[2].present = 0;
15908 inst.instruction = N_MNEM_vmovn;
15909 do_neon_movn ();
15910 return;
15911 }
15912
15913 constraint (imm < 1 || (unsigned)imm > et.size,
15914 _("immediate out of range for narrowing operation"));
15915 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
15916 }
15917
15918 static void
15919 do_neon_shll (void)
15920 {
15921 /* FIXME: Type checking when lengthening. */
15922 struct neon_type_el et = neon_check_type (2, NS_QDI,
15923 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
15924 unsigned imm = inst.operands[2].imm;
15925
15926 if (imm == et.size)
15927 {
15928 /* Maximum shift variant. */
15929 NEON_ENCODE (INTEGER, inst);
15930 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15931 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15932 inst.instruction |= LOW4 (inst.operands[1].reg);
15933 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15934 inst.instruction |= neon_logbits (et.size) << 18;
15935
15936 neon_dp_fixup (&inst);
15937 }
15938 else
15939 {
15940 /* A more-specific type check for non-max versions. */
15941 et = neon_check_type (2, NS_QDI,
15942 N_EQK | N_DBL, N_SU_32 | N_KEY);
15943 NEON_ENCODE (IMMED, inst);
15944 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
15945 }
15946 }
15947
15948 /* Check the various types for the VCVT instruction, and return which version
15949 the current instruction is. */
15950
15951 #define CVT_FLAVOUR_VAR \
15952 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15953 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15954 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15955 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15956 /* Half-precision conversions. */ \
15957 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15958 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15959 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
15960 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
15961 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15962 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15963 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
15964 Compared with single/double precision variants, only the co-processor \
15965 field is different, so the encoding flow is reused here. */ \
15966 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
15967 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
15968 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15969 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15970 /* VFP instructions. */ \
15971 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15972 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15973 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15974 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15975 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15976 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15977 /* VFP instructions with bitshift. */ \
15978 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15979 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15980 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15981 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15982 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15983 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15984 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15985 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15986
15987 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15988 neon_cvt_flavour_##C,
15989
15990 /* The different types of conversions we can do. */
15991 enum neon_cvt_flavour
15992 {
15993 CVT_FLAVOUR_VAR
15994 neon_cvt_flavour_invalid,
15995 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
15996 };
15997
15998 #undef CVT_VAR
15999
16000 static enum neon_cvt_flavour
16001 get_neon_cvt_flavour (enum neon_shape rs)
16002 {
16003 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
16004 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
16005 if (et.type != NT_invtype) \
16006 { \
16007 inst.error = NULL; \
16008 return (neon_cvt_flavour_##C); \
16009 }
16010
16011 struct neon_type_el et;
16012 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
16013 || rs == NS_FF) ? N_VFP : 0;
16014 /* The instruction versions which take an immediate take one register
16015 argument, which is extended to the width of the full register. Thus the
16016 "source" and "destination" registers must have the same width. Hack that
16017 here by making the size equal to the key (wider, in this case) operand. */
16018 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
16019
16020 CVT_FLAVOUR_VAR;
16021
16022 return neon_cvt_flavour_invalid;
16023 #undef CVT_VAR
16024 }
16025
16026 enum neon_cvt_mode
16027 {
16028 neon_cvt_mode_a,
16029 neon_cvt_mode_n,
16030 neon_cvt_mode_p,
16031 neon_cvt_mode_m,
16032 neon_cvt_mode_z,
16033 neon_cvt_mode_x,
16034 neon_cvt_mode_r
16035 };
16036
16037 /* Neon-syntax VFP conversions. */
16038
16039 static void
16040 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
16041 {
16042 const char *opname = 0;
16043
16044 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
16045 || rs == NS_FHI || rs == NS_HFI)
16046 {
16047 /* Conversions with immediate bitshift. */
16048 const char *enc[] =
16049 {
16050 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
16051 CVT_FLAVOUR_VAR
16052 NULL
16053 #undef CVT_VAR
16054 };
16055
16056 if (flavour < (int) ARRAY_SIZE (enc))
16057 {
16058 opname = enc[flavour];
16059 constraint (inst.operands[0].reg != inst.operands[1].reg,
16060 _("operands 0 and 1 must be the same register"));
16061 inst.operands[1] = inst.operands[2];
16062 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
16063 }
16064 }
16065 else
16066 {
16067 /* Conversions without bitshift. */
16068 const char *enc[] =
16069 {
16070 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
16071 CVT_FLAVOUR_VAR
16072 NULL
16073 #undef CVT_VAR
16074 };
16075
16076 if (flavour < (int) ARRAY_SIZE (enc))
16077 opname = enc[flavour];
16078 }
16079
16080 if (opname)
16081 do_vfp_nsyn_opcode (opname);
16082
16083 /* ARMv8.2 fp16 VCVT instruction. */
16084 if (flavour == neon_cvt_flavour_s32_f16
16085 || flavour == neon_cvt_flavour_u32_f16
16086 || flavour == neon_cvt_flavour_f16_u32
16087 || flavour == neon_cvt_flavour_f16_s32)
16088 do_scalar_fp16_v82_encode ();
16089 }
16090
16091 static void
16092 do_vfp_nsyn_cvtz (void)
16093 {
16094 enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
16095 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
16096 const char *enc[] =
16097 {
16098 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
16099 CVT_FLAVOUR_VAR
16100 NULL
16101 #undef CVT_VAR
16102 };
16103
16104 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
16105 do_vfp_nsyn_opcode (enc[flavour]);
16106 }
16107
16108 static void
16109 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
16110 enum neon_cvt_mode mode)
16111 {
16112 int sz, op;
16113 int rm;
16114
16115 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16116 D register operands. */
16117 if (flavour == neon_cvt_flavour_s32_f64
16118 || flavour == neon_cvt_flavour_u32_f64)
16119 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16120 _(BAD_FPU));
16121
16122 if (flavour == neon_cvt_flavour_s32_f16
16123 || flavour == neon_cvt_flavour_u32_f16)
16124 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
16125 _(BAD_FP16));
16126
16127 set_it_insn_type (OUTSIDE_IT_INSN);
16128
16129 switch (flavour)
16130 {
16131 case neon_cvt_flavour_s32_f64:
16132 sz = 1;
16133 op = 1;
16134 break;
16135 case neon_cvt_flavour_s32_f32:
16136 sz = 0;
16137 op = 1;
16138 break;
16139 case neon_cvt_flavour_s32_f16:
16140 sz = 0;
16141 op = 1;
16142 break;
16143 case neon_cvt_flavour_u32_f64:
16144 sz = 1;
16145 op = 0;
16146 break;
16147 case neon_cvt_flavour_u32_f32:
16148 sz = 0;
16149 op = 0;
16150 break;
16151 case neon_cvt_flavour_u32_f16:
16152 sz = 0;
16153 op = 0;
16154 break;
16155 default:
16156 first_error (_("invalid instruction shape"));
16157 return;
16158 }
16159
16160 switch (mode)
16161 {
16162 case neon_cvt_mode_a: rm = 0; break;
16163 case neon_cvt_mode_n: rm = 1; break;
16164 case neon_cvt_mode_p: rm = 2; break;
16165 case neon_cvt_mode_m: rm = 3; break;
16166 default: first_error (_("invalid rounding mode")); return;
16167 }
16168
16169 NEON_ENCODE (FPV8, inst);
16170 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
16171 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
16172 inst.instruction |= sz << 8;
16173
16174 /* ARMv8.2 fp16 VCVT instruction. */
16175 if (flavour == neon_cvt_flavour_s32_f16
16176 ||flavour == neon_cvt_flavour_u32_f16)
16177 do_scalar_fp16_v82_encode ();
16178 inst.instruction |= op << 7;
16179 inst.instruction |= rm << 16;
16180 inst.instruction |= 0xf0000000;
16181 inst.is_neon = TRUE;
16182 }
16183
16184 static void
16185 do_neon_cvt_1 (enum neon_cvt_mode mode)
16186 {
16187 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
16188 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
16189 NS_FH, NS_HF, NS_FHI, NS_HFI,
16190 NS_NULL);
16191 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
16192
16193 if (flavour == neon_cvt_flavour_invalid)
16194 return;
16195
16196 /* PR11109: Handle round-to-zero for VCVT conversions. */
16197 if (mode == neon_cvt_mode_z
16198 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
16199 && (flavour == neon_cvt_flavour_s16_f16
16200 || flavour == neon_cvt_flavour_u16_f16
16201 || flavour == neon_cvt_flavour_s32_f32
16202 || flavour == neon_cvt_flavour_u32_f32
16203 || flavour == neon_cvt_flavour_s32_f64
16204 || flavour == neon_cvt_flavour_u32_f64)
16205 && (rs == NS_FD || rs == NS_FF))
16206 {
16207 do_vfp_nsyn_cvtz ();
16208 return;
16209 }
16210
16211 /* ARMv8.2 fp16 VCVT conversions. */
16212 if (mode == neon_cvt_mode_z
16213 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
16214 && (flavour == neon_cvt_flavour_s32_f16
16215 || flavour == neon_cvt_flavour_u32_f16)
16216 && (rs == NS_FH))
16217 {
16218 do_vfp_nsyn_cvtz ();
16219 do_scalar_fp16_v82_encode ();
16220 return;
16221 }
16222
16223 /* VFP rather than Neon conversions. */
16224 if (flavour >= neon_cvt_flavour_first_fp)
16225 {
16226 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
16227 do_vfp_nsyn_cvt (rs, flavour);
16228 else
16229 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
16230
16231 return;
16232 }
16233
16234 switch (rs)
16235 {
16236 case NS_DDI:
16237 case NS_QQI:
16238 {
16239 unsigned immbits;
16240 unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
16241 0x0000100, 0x1000100, 0x0, 0x1000000};
16242
16243 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16244 return;
16245
16246 /* Fixed-point conversion with #0 immediate is encoded as an
16247 integer conversion. */
16248 if (inst.operands[2].present && inst.operands[2].imm == 0)
16249 goto int_encode;
16250 NEON_ENCODE (IMMED, inst);
16251 if (flavour != neon_cvt_flavour_invalid)
16252 inst.instruction |= enctab[flavour];
16253 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16254 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16255 inst.instruction |= LOW4 (inst.operands[1].reg);
16256 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16257 inst.instruction |= neon_quad (rs) << 6;
16258 inst.instruction |= 1 << 21;
16259 if (flavour < neon_cvt_flavour_s16_f16)
16260 {
16261 inst.instruction |= 1 << 21;
16262 immbits = 32 - inst.operands[2].imm;
16263 inst.instruction |= immbits << 16;
16264 }
16265 else
16266 {
16267 inst.instruction |= 3 << 20;
16268 immbits = 16 - inst.operands[2].imm;
16269 inst.instruction |= immbits << 16;
16270 inst.instruction &= ~(1 << 9);
16271 }
16272
16273 neon_dp_fixup (&inst);
16274 }
16275 break;
16276
16277 case NS_DD:
16278 case NS_QQ:
16279 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
16280 {
16281 NEON_ENCODE (FLOAT, inst);
16282 set_it_insn_type (OUTSIDE_IT_INSN);
16283
16284 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16285 return;
16286
16287 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16288 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16289 inst.instruction |= LOW4 (inst.operands[1].reg);
16290 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16291 inst.instruction |= neon_quad (rs) << 6;
16292 inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
16293 || flavour == neon_cvt_flavour_u32_f32) << 7;
16294 inst.instruction |= mode << 8;
16295 if (flavour == neon_cvt_flavour_u16_f16
16296 || flavour == neon_cvt_flavour_s16_f16)
16297 /* Mask off the original size bits and reencode them. */
16298 inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
16299
16300 if (thumb_mode)
16301 inst.instruction |= 0xfc000000;
16302 else
16303 inst.instruction |= 0xf0000000;
16304 }
16305 else
16306 {
16307 int_encode:
16308 {
16309 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
16310 0x100, 0x180, 0x0, 0x080};
16311
16312 NEON_ENCODE (INTEGER, inst);
16313
16314 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16315 return;
16316
16317 if (flavour != neon_cvt_flavour_invalid)
16318 inst.instruction |= enctab[flavour];
16319
16320 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16321 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16322 inst.instruction |= LOW4 (inst.operands[1].reg);
16323 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16324 inst.instruction |= neon_quad (rs) << 6;
16325 if (flavour >= neon_cvt_flavour_s16_f16
16326 && flavour <= neon_cvt_flavour_f16_u16)
16327 /* Half precision. */
16328 inst.instruction |= 1 << 18;
16329 else
16330 inst.instruction |= 2 << 18;
16331
16332 neon_dp_fixup (&inst);
16333 }
16334 }
16335 break;
16336
16337 /* Half-precision conversions for Advanced SIMD -- neon. */
16338 case NS_QD:
16339 case NS_DQ:
16340 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16341 return;
16342
16343 if ((rs == NS_DQ)
16344 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
16345 {
16346 as_bad (_("operand size must match register width"));
16347 break;
16348 }
16349
16350 if ((rs == NS_QD)
16351 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
16352 {
16353 as_bad (_("operand size must match register width"));
16354 break;
16355 }
16356
16357 if (rs == NS_DQ)
16358 inst.instruction = 0x3b60600;
16359 else
16360 inst.instruction = 0x3b60700;
16361
16362 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16363 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16364 inst.instruction |= LOW4 (inst.operands[1].reg);
16365 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16366 neon_dp_fixup (&inst);
16367 break;
16368
16369 default:
16370 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
16371 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
16372 do_vfp_nsyn_cvt (rs, flavour);
16373 else
16374 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
16375 }
16376 }
16377
16378 static void
16379 do_neon_cvtr (void)
16380 {
16381 do_neon_cvt_1 (neon_cvt_mode_x);
16382 }
16383
16384 static void
16385 do_neon_cvt (void)
16386 {
16387 do_neon_cvt_1 (neon_cvt_mode_z);
16388 }
16389
16390 static void
16391 do_neon_cvta (void)
16392 {
16393 do_neon_cvt_1 (neon_cvt_mode_a);
16394 }
16395
16396 static void
16397 do_neon_cvtn (void)
16398 {
16399 do_neon_cvt_1 (neon_cvt_mode_n);
16400 }
16401
16402 static void
16403 do_neon_cvtp (void)
16404 {
16405 do_neon_cvt_1 (neon_cvt_mode_p);
16406 }
16407
16408 static void
16409 do_neon_cvtm (void)
16410 {
16411 do_neon_cvt_1 (neon_cvt_mode_m);
16412 }
16413
16414 static void
16415 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
16416 {
16417 if (is_double)
16418 mark_feature_used (&fpu_vfp_ext_armv8);
16419
16420 encode_arm_vfp_reg (inst.operands[0].reg,
16421 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
16422 encode_arm_vfp_reg (inst.operands[1].reg,
16423 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
16424 inst.instruction |= to ? 0x10000 : 0;
16425 inst.instruction |= t ? 0x80 : 0;
16426 inst.instruction |= is_double ? 0x100 : 0;
16427 do_vfp_cond_or_thumb ();
16428 }
16429
16430 static void
16431 do_neon_cvttb_1 (bfd_boolean t)
16432 {
16433 enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
16434 NS_DF, NS_DH, NS_NULL);
16435
16436 if (rs == NS_NULL)
16437 return;
16438 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
16439 {
16440 inst.error = NULL;
16441 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
16442 }
16443 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
16444 {
16445 inst.error = NULL;
16446 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
16447 }
16448 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
16449 {
16450 /* The VCVTB and VCVTT instructions with D-register operands
16451 don't work for SP only targets. */
16452 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16453 _(BAD_FPU));
16454
16455 inst.error = NULL;
16456 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
16457 }
16458 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
16459 {
16460 /* The VCVTB and VCVTT instructions with D-register operands
16461 don't work for SP only targets. */
16462 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16463 _(BAD_FPU));
16464
16465 inst.error = NULL;
16466 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
16467 }
16468 else
16469 return;
16470 }
16471
16472 static void
16473 do_neon_cvtb (void)
16474 {
16475 do_neon_cvttb_1 (FALSE);
16476 }
16477
16478
16479 static void
16480 do_neon_cvtt (void)
16481 {
16482 do_neon_cvttb_1 (TRUE);
16483 }
16484
16485 static void
16486 neon_move_immediate (void)
16487 {
16488 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
16489 struct neon_type_el et = neon_check_type (2, rs,
16490 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
16491 unsigned immlo, immhi = 0, immbits;
16492 int op, cmode, float_p;
16493
16494 constraint (et.type == NT_invtype,
16495 _("operand size must be specified for immediate VMOV"));
16496
16497 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
16498 op = (inst.instruction & (1 << 5)) != 0;
16499
16500 immlo = inst.operands[1].imm;
16501 if (inst.operands[1].regisimm)
16502 immhi = inst.operands[1].reg;
16503
16504 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
16505 _("immediate has bits set outside the operand size"));
16506
16507 float_p = inst.operands[1].immisfloat;
16508
16509 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
16510 et.size, et.type)) == FAIL)
16511 {
16512 /* Invert relevant bits only. */
16513 neon_invert_size (&immlo, &immhi, et.size);
16514 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
16515 with one or the other; those cases are caught by
16516 neon_cmode_for_move_imm. */
16517 op = !op;
16518 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
16519 &op, et.size, et.type)) == FAIL)
16520 {
16521 first_error (_("immediate out of range"));
16522 return;
16523 }
16524 }
16525
16526 inst.instruction &= ~(1 << 5);
16527 inst.instruction |= op << 5;
16528
16529 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16530 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16531 inst.instruction |= neon_quad (rs) << 6;
16532 inst.instruction |= cmode << 8;
16533
16534 neon_write_immbits (immbits);
16535 }
16536
16537 static void
16538 do_neon_mvn (void)
16539 {
16540 if (inst.operands[1].isreg)
16541 {
16542 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16543
16544 NEON_ENCODE (INTEGER, inst);
16545 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16546 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16547 inst.instruction |= LOW4 (inst.operands[1].reg);
16548 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16549 inst.instruction |= neon_quad (rs) << 6;
16550 }
16551 else
16552 {
16553 NEON_ENCODE (IMMED, inst);
16554 neon_move_immediate ();
16555 }
16556
16557 neon_dp_fixup (&inst);
16558 }
16559
16560 /* Encode instructions of form:
16561
16562 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16563 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
16564
16565 static void
16566 neon_mixed_length (struct neon_type_el et, unsigned size)
16567 {
16568 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16569 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16570 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16571 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16572 inst.instruction |= LOW4 (inst.operands[2].reg);
16573 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16574 inst.instruction |= (et.type == NT_unsigned) << 24;
16575 inst.instruction |= neon_logbits (size) << 20;
16576
16577 neon_dp_fixup (&inst);
16578 }
16579
16580 static void
16581 do_neon_dyadic_long (void)
16582 {
16583 /* FIXME: Type checking for lengthening op. */
16584 struct neon_type_el et = neon_check_type (3, NS_QDD,
16585 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
16586 neon_mixed_length (et, et.size);
16587 }
16588
16589 static void
16590 do_neon_abal (void)
16591 {
16592 struct neon_type_el et = neon_check_type (3, NS_QDD,
16593 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
16594 neon_mixed_length (et, et.size);
16595 }
16596
16597 static void
16598 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
16599 {
16600 if (inst.operands[2].isscalar)
16601 {
16602 struct neon_type_el et = neon_check_type (3, NS_QDS,
16603 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
16604 NEON_ENCODE (SCALAR, inst);
16605 neon_mul_mac (et, et.type == NT_unsigned);
16606 }
16607 else
16608 {
16609 struct neon_type_el et = neon_check_type (3, NS_QDD,
16610 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
16611 NEON_ENCODE (INTEGER, inst);
16612 neon_mixed_length (et, et.size);
16613 }
16614 }
16615
16616 static void
16617 do_neon_mac_maybe_scalar_long (void)
16618 {
16619 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
16620 }
16621
16622 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
16623 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
16624
16625 static unsigned
16626 neon_scalar_for_fmac_fp16_long (unsigned scalar, unsigned quad_p)
16627 {
16628 unsigned regno = NEON_SCALAR_REG (scalar);
16629 unsigned elno = NEON_SCALAR_INDEX (scalar);
16630
16631 if (quad_p)
16632 {
16633 if (regno > 7 || elno > 3)
16634 goto bad_scalar;
16635
16636 return ((regno & 0x7)
16637 | ((elno & 0x1) << 3)
16638 | (((elno >> 1) & 0x1) << 5));
16639 }
16640 else
16641 {
16642 if (regno > 15 || elno > 1)
16643 goto bad_scalar;
16644
16645 return (((regno & 0x1) << 5)
16646 | ((regno >> 1) & 0x7)
16647 | ((elno & 0x1) << 3));
16648 }
16649
16650 bad_scalar:
16651 first_error (_("scalar out of range for multiply instruction"));
16652 return 0;
16653 }
16654
16655 static void
16656 do_neon_fmac_maybe_scalar_long (int subtype)
16657 {
16658 enum neon_shape rs;
16659 int high8;
16660 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
16661 field (bits[21:20]) has different meaning. For scalar index variant, it's
16662 used to differentiate add and subtract, otherwise it's with fixed value
16663 0x2. */
16664 int size = -1;
16665
16666 if (inst.cond != COND_ALWAYS)
16667 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
16668 "behaviour is UNPREDICTABLE"));
16669
16670 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16_fml),
16671 _(BAD_FP16));
16672
16673 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
16674 _(BAD_FPU));
16675
16676 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
16677 be a scalar index register. */
16678 if (inst.operands[2].isscalar)
16679 {
16680 high8 = 0xfe000000;
16681 if (subtype)
16682 size = 16;
16683 rs = neon_select_shape (NS_DHS, NS_QDS, NS_NULL);
16684 }
16685 else
16686 {
16687 high8 = 0xfc000000;
16688 size = 32;
16689 if (subtype)
16690 inst.instruction |= (0x1 << 23);
16691 rs = neon_select_shape (NS_DHH, NS_QDD, NS_NULL);
16692 }
16693
16694 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16);
16695
16696 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
16697 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
16698 so we simply pass -1 as size. */
16699 unsigned quad_p = (rs == NS_QDD || rs == NS_QDS);
16700 neon_three_same (quad_p, 0, size);
16701
16702 /* Undo neon_dp_fixup. Redo the high eight bits. */
16703 inst.instruction &= 0x00ffffff;
16704 inst.instruction |= high8;
16705
16706 #define LOW1(R) ((R) & 0x1)
16707 #define HI4(R) (((R) >> 1) & 0xf)
16708 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
16709 whether the instruction is in Q form and whether Vm is a scalar indexed
16710 operand. */
16711 if (inst.operands[2].isscalar)
16712 {
16713 unsigned rm
16714 = neon_scalar_for_fmac_fp16_long (inst.operands[2].reg, quad_p);
16715 inst.instruction &= 0xffffffd0;
16716 inst.instruction |= rm;
16717
16718 if (!quad_p)
16719 {
16720 /* Redo Rn as well. */
16721 inst.instruction &= 0xfff0ff7f;
16722 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
16723 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
16724 }
16725 }
16726 else if (!quad_p)
16727 {
16728 /* Redo Rn and Rm. */
16729 inst.instruction &= 0xfff0ff50;
16730 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
16731 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
16732 inst.instruction |= HI4 (inst.operands[2].reg);
16733 inst.instruction |= LOW1 (inst.operands[2].reg) << 5;
16734 }
16735 }
16736
16737 static void
16738 do_neon_vfmal (void)
16739 {
16740 return do_neon_fmac_maybe_scalar_long (0);
16741 }
16742
16743 static void
16744 do_neon_vfmsl (void)
16745 {
16746 return do_neon_fmac_maybe_scalar_long (1);
16747 }
16748
16749 static void
16750 do_neon_dyadic_wide (void)
16751 {
16752 struct neon_type_el et = neon_check_type (3, NS_QQD,
16753 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
16754 neon_mixed_length (et, et.size);
16755 }
16756
16757 static void
16758 do_neon_dyadic_narrow (void)
16759 {
16760 struct neon_type_el et = neon_check_type (3, NS_QDD,
16761 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
16762 /* Operand sign is unimportant, and the U bit is part of the opcode,
16763 so force the operand type to integer. */
16764 et.type = NT_integer;
16765 neon_mixed_length (et, et.size / 2);
16766 }
16767
16768 static void
16769 do_neon_mul_sat_scalar_long (void)
16770 {
16771 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
16772 }
16773
16774 static void
16775 do_neon_vmull (void)
16776 {
16777 if (inst.operands[2].isscalar)
16778 do_neon_mac_maybe_scalar_long ();
16779 else
16780 {
16781 struct neon_type_el et = neon_check_type (3, NS_QDD,
16782 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
16783
16784 if (et.type == NT_poly)
16785 NEON_ENCODE (POLY, inst);
16786 else
16787 NEON_ENCODE (INTEGER, inst);
16788
16789 /* For polynomial encoding the U bit must be zero, and the size must
16790 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
16791 obviously, as 0b10). */
16792 if (et.size == 64)
16793 {
16794 /* Check we're on the correct architecture. */
16795 if (!mark_feature_used (&fpu_crypto_ext_armv8))
16796 inst.error =
16797 _("Instruction form not available on this architecture.");
16798
16799 et.size = 32;
16800 }
16801
16802 neon_mixed_length (et, et.size);
16803 }
16804 }
16805
16806 static void
16807 do_neon_ext (void)
16808 {
16809 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
16810 struct neon_type_el et = neon_check_type (3, rs,
16811 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16812 unsigned imm = (inst.operands[3].imm * et.size) / 8;
16813
16814 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
16815 _("shift out of range"));
16816 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16817 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16818 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16819 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16820 inst.instruction |= LOW4 (inst.operands[2].reg);
16821 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16822 inst.instruction |= neon_quad (rs) << 6;
16823 inst.instruction |= imm << 8;
16824
16825 neon_dp_fixup (&inst);
16826 }
16827
16828 static void
16829 do_neon_rev (void)
16830 {
16831 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16832 struct neon_type_el et = neon_check_type (2, rs,
16833 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16834 unsigned op = (inst.instruction >> 7) & 3;
16835 /* N (width of reversed regions) is encoded as part of the bitmask. We
16836 extract it here to check the elements to be reversed are smaller.
16837 Otherwise we'd get a reserved instruction. */
16838 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
16839 gas_assert (elsize != 0);
16840 constraint (et.size >= elsize,
16841 _("elements must be smaller than reversal region"));
16842 neon_two_same (neon_quad (rs), 1, et.size);
16843 }
16844
16845 static void
16846 do_neon_dup (void)
16847 {
16848 if (inst.operands[1].isscalar)
16849 {
16850 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
16851 struct neon_type_el et = neon_check_type (2, rs,
16852 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16853 unsigned sizebits = et.size >> 3;
16854 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
16855 int logsize = neon_logbits (et.size);
16856 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
16857
16858 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
16859 return;
16860
16861 NEON_ENCODE (SCALAR, inst);
16862 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16863 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16864 inst.instruction |= LOW4 (dm);
16865 inst.instruction |= HI1 (dm) << 5;
16866 inst.instruction |= neon_quad (rs) << 6;
16867 inst.instruction |= x << 17;
16868 inst.instruction |= sizebits << 16;
16869
16870 neon_dp_fixup (&inst);
16871 }
16872 else
16873 {
16874 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
16875 struct neon_type_el et = neon_check_type (2, rs,
16876 N_8 | N_16 | N_32 | N_KEY, N_EQK);
16877 /* Duplicate ARM register to lanes of vector. */
16878 NEON_ENCODE (ARMREG, inst);
16879 switch (et.size)
16880 {
16881 case 8: inst.instruction |= 0x400000; break;
16882 case 16: inst.instruction |= 0x000020; break;
16883 case 32: inst.instruction |= 0x000000; break;
16884 default: break;
16885 }
16886 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16887 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
16888 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
16889 inst.instruction |= neon_quad (rs) << 21;
16890 /* The encoding for this instruction is identical for the ARM and Thumb
16891 variants, except for the condition field. */
16892 do_vfp_cond_or_thumb ();
16893 }
16894 }
16895
16896 /* VMOV has particularly many variations. It can be one of:
16897 0. VMOV<c><q> <Qd>, <Qm>
16898 1. VMOV<c><q> <Dd>, <Dm>
16899 (Register operations, which are VORR with Rm = Rn.)
16900 2. VMOV<c><q>.<dt> <Qd>, #<imm>
16901 3. VMOV<c><q>.<dt> <Dd>, #<imm>
16902 (Immediate loads.)
16903 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16904 (ARM register to scalar.)
16905 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16906 (Two ARM registers to vector.)
16907 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16908 (Scalar to ARM register.)
16909 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16910 (Vector to two ARM registers.)
16911 8. VMOV.F32 <Sd>, <Sm>
16912 9. VMOV.F64 <Dd>, <Dm>
16913 (VFP register moves.)
16914 10. VMOV.F32 <Sd>, #imm
16915 11. VMOV.F64 <Dd>, #imm
16916 (VFP float immediate load.)
16917 12. VMOV <Rd>, <Sm>
16918 (VFP single to ARM reg.)
16919 13. VMOV <Sd>, <Rm>
16920 (ARM reg to VFP single.)
16921 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16922 (Two ARM regs to two VFP singles.)
16923 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16924 (Two VFP singles to two ARM regs.)
16925
16926 These cases can be disambiguated using neon_select_shape, except cases 1/9
16927 and 3/11 which depend on the operand type too.
16928
16929 All the encoded bits are hardcoded by this function.
16930
16931 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16932 Cases 5, 7 may be used with VFPv2 and above.
16933
16934 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16935 can specify a type where it doesn't make sense to, and is ignored). */
16936
16937 static void
16938 do_neon_mov (void)
16939 {
16940 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
16941 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR,
16942 NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
16943 NS_HR, NS_RH, NS_HI, NS_NULL);
16944 struct neon_type_el et;
16945 const char *ldconst = 0;
16946
16947 switch (rs)
16948 {
16949 case NS_DD: /* case 1/9. */
16950 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16951 /* It is not an error here if no type is given. */
16952 inst.error = NULL;
16953 if (et.type == NT_float && et.size == 64)
16954 {
16955 do_vfp_nsyn_opcode ("fcpyd");
16956 break;
16957 }
16958 /* fall through. */
16959
16960 case NS_QQ: /* case 0/1. */
16961 {
16962 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16963 return;
16964 /* The architecture manual I have doesn't explicitly state which
16965 value the U bit should have for register->register moves, but
16966 the equivalent VORR instruction has U = 0, so do that. */
16967 inst.instruction = 0x0200110;
16968 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16969 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16970 inst.instruction |= LOW4 (inst.operands[1].reg);
16971 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16972 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16973 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16974 inst.instruction |= neon_quad (rs) << 6;
16975
16976 neon_dp_fixup (&inst);
16977 }
16978 break;
16979
16980 case NS_DI: /* case 3/11. */
16981 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16982 inst.error = NULL;
16983 if (et.type == NT_float && et.size == 64)
16984 {
16985 /* case 11 (fconstd). */
16986 ldconst = "fconstd";
16987 goto encode_fconstd;
16988 }
16989 /* fall through. */
16990
16991 case NS_QI: /* case 2/3. */
16992 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16993 return;
16994 inst.instruction = 0x0800010;
16995 neon_move_immediate ();
16996 neon_dp_fixup (&inst);
16997 break;
16998
16999 case NS_SR: /* case 4. */
17000 {
17001 unsigned bcdebits = 0;
17002 int logsize;
17003 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
17004 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
17005
17006 /* .<size> is optional here, defaulting to .32. */
17007 if (inst.vectype.elems == 0
17008 && inst.operands[0].vectype.type == NT_invtype
17009 && inst.operands[1].vectype.type == NT_invtype)
17010 {
17011 inst.vectype.el[0].type = NT_untyped;
17012 inst.vectype.el[0].size = 32;
17013 inst.vectype.elems = 1;
17014 }
17015
17016 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
17017 logsize = neon_logbits (et.size);
17018
17019 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
17020 _(BAD_FPU));
17021 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
17022 && et.size != 32, _(BAD_FPU));
17023 constraint (et.type == NT_invtype, _("bad type for scalar"));
17024 constraint (x >= 64 / et.size, _("scalar index out of range"));
17025
17026 switch (et.size)
17027 {
17028 case 8: bcdebits = 0x8; break;
17029 case 16: bcdebits = 0x1; break;
17030 case 32: bcdebits = 0x0; break;
17031 default: ;
17032 }
17033
17034 bcdebits |= x << logsize;
17035
17036 inst.instruction = 0xe000b10;
17037 do_vfp_cond_or_thumb ();
17038 inst.instruction |= LOW4 (dn) << 16;
17039 inst.instruction |= HI1 (dn) << 7;
17040 inst.instruction |= inst.operands[1].reg << 12;
17041 inst.instruction |= (bcdebits & 3) << 5;
17042 inst.instruction |= (bcdebits >> 2) << 21;
17043 }
17044 break;
17045
17046 case NS_DRR: /* case 5 (fmdrr). */
17047 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
17048 _(BAD_FPU));
17049
17050 inst.instruction = 0xc400b10;
17051 do_vfp_cond_or_thumb ();
17052 inst.instruction |= LOW4 (inst.operands[0].reg);
17053 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
17054 inst.instruction |= inst.operands[1].reg << 12;
17055 inst.instruction |= inst.operands[2].reg << 16;
17056 break;
17057
17058 case NS_RS: /* case 6. */
17059 {
17060 unsigned logsize;
17061 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
17062 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
17063 unsigned abcdebits = 0;
17064
17065 /* .<dt> is optional here, defaulting to .32. */
17066 if (inst.vectype.elems == 0
17067 && inst.operands[0].vectype.type == NT_invtype
17068 && inst.operands[1].vectype.type == NT_invtype)
17069 {
17070 inst.vectype.el[0].type = NT_untyped;
17071 inst.vectype.el[0].size = 32;
17072 inst.vectype.elems = 1;
17073 }
17074
17075 et = neon_check_type (2, NS_NULL,
17076 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
17077 logsize = neon_logbits (et.size);
17078
17079 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
17080 _(BAD_FPU));
17081 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
17082 && et.size != 32, _(BAD_FPU));
17083 constraint (et.type == NT_invtype, _("bad type for scalar"));
17084 constraint (x >= 64 / et.size, _("scalar index out of range"));
17085
17086 switch (et.size)
17087 {
17088 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
17089 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
17090 case 32: abcdebits = 0x00; break;
17091 default: ;
17092 }
17093
17094 abcdebits |= x << logsize;
17095 inst.instruction = 0xe100b10;
17096 do_vfp_cond_or_thumb ();
17097 inst.instruction |= LOW4 (dn) << 16;
17098 inst.instruction |= HI1 (dn) << 7;
17099 inst.instruction |= inst.operands[0].reg << 12;
17100 inst.instruction |= (abcdebits & 3) << 5;
17101 inst.instruction |= (abcdebits >> 2) << 21;
17102 }
17103 break;
17104
17105 case NS_RRD: /* case 7 (fmrrd). */
17106 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
17107 _(BAD_FPU));
17108
17109 inst.instruction = 0xc500b10;
17110 do_vfp_cond_or_thumb ();
17111 inst.instruction |= inst.operands[0].reg << 12;
17112 inst.instruction |= inst.operands[1].reg << 16;
17113 inst.instruction |= LOW4 (inst.operands[2].reg);
17114 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
17115 break;
17116
17117 case NS_FF: /* case 8 (fcpys). */
17118 do_vfp_nsyn_opcode ("fcpys");
17119 break;
17120
17121 case NS_HI:
17122 case NS_FI: /* case 10 (fconsts). */
17123 ldconst = "fconsts";
17124 encode_fconstd:
17125 if (!inst.operands[1].immisfloat)
17126 {
17127 unsigned new_imm;
17128 /* Immediate has to fit in 8 bits so float is enough. */
17129 float imm = (float) inst.operands[1].imm;
17130 memcpy (&new_imm, &imm, sizeof (float));
17131 /* But the assembly may have been written to provide an integer
17132 bit pattern that equates to a float, so check that the
17133 conversion has worked. */
17134 if (is_quarter_float (new_imm))
17135 {
17136 if (is_quarter_float (inst.operands[1].imm))
17137 as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
17138
17139 inst.operands[1].imm = new_imm;
17140 inst.operands[1].immisfloat = 1;
17141 }
17142 }
17143
17144 if (is_quarter_float (inst.operands[1].imm))
17145 {
17146 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
17147 do_vfp_nsyn_opcode (ldconst);
17148
17149 /* ARMv8.2 fp16 vmov.f16 instruction. */
17150 if (rs == NS_HI)
17151 do_scalar_fp16_v82_encode ();
17152 }
17153 else
17154 first_error (_("immediate out of range"));
17155 break;
17156
17157 case NS_RH:
17158 case NS_RF: /* case 12 (fmrs). */
17159 do_vfp_nsyn_opcode ("fmrs");
17160 /* ARMv8.2 fp16 vmov.f16 instruction. */
17161 if (rs == NS_RH)
17162 do_scalar_fp16_v82_encode ();
17163 break;
17164
17165 case NS_HR:
17166 case NS_FR: /* case 13 (fmsr). */
17167 do_vfp_nsyn_opcode ("fmsr");
17168 /* ARMv8.2 fp16 vmov.f16 instruction. */
17169 if (rs == NS_HR)
17170 do_scalar_fp16_v82_encode ();
17171 break;
17172
17173 /* The encoders for the fmrrs and fmsrr instructions expect three operands
17174 (one of which is a list), but we have parsed four. Do some fiddling to
17175 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
17176 expect. */
17177 case NS_RRFF: /* case 14 (fmrrs). */
17178 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
17179 _("VFP registers must be adjacent"));
17180 inst.operands[2].imm = 2;
17181 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
17182 do_vfp_nsyn_opcode ("fmrrs");
17183 break;
17184
17185 case NS_FFRR: /* case 15 (fmsrr). */
17186 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
17187 _("VFP registers must be adjacent"));
17188 inst.operands[1] = inst.operands[2];
17189 inst.operands[2] = inst.operands[3];
17190 inst.operands[0].imm = 2;
17191 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
17192 do_vfp_nsyn_opcode ("fmsrr");
17193 break;
17194
17195 case NS_NULL:
17196 /* neon_select_shape has determined that the instruction
17197 shape is wrong and has already set the error message. */
17198 break;
17199
17200 default:
17201 abort ();
17202 }
17203 }
17204
17205 static void
17206 do_neon_rshift_round_imm (void)
17207 {
17208 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
17209 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
17210 int imm = inst.operands[2].imm;
17211
17212 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
17213 if (imm == 0)
17214 {
17215 inst.operands[2].present = 0;
17216 do_neon_mov ();
17217 return;
17218 }
17219
17220 constraint (imm < 1 || (unsigned)imm > et.size,
17221 _("immediate out of range for shift"));
17222 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
17223 et.size - imm);
17224 }
17225
17226 static void
17227 do_neon_movhf (void)
17228 {
17229 enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
17230 constraint (rs != NS_HH, _("invalid suffix"));
17231
17232 if (inst.cond != COND_ALWAYS)
17233 {
17234 if (thumb_mode)
17235 {
17236 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
17237 " the behaviour is UNPREDICTABLE"));
17238 }
17239 else
17240 {
17241 inst.error = BAD_COND;
17242 return;
17243 }
17244 }
17245
17246 do_vfp_sp_monadic ();
17247
17248 inst.is_neon = 1;
17249 inst.instruction |= 0xf0000000;
17250 }
17251
17252 static void
17253 do_neon_movl (void)
17254 {
17255 struct neon_type_el et = neon_check_type (2, NS_QD,
17256 N_EQK | N_DBL, N_SU_32 | N_KEY);
17257 unsigned sizebits = et.size >> 3;
17258 inst.instruction |= sizebits << 19;
17259 neon_two_same (0, et.type == NT_unsigned, -1);
17260 }
17261
17262 static void
17263 do_neon_trn (void)
17264 {
17265 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17266 struct neon_type_el et = neon_check_type (2, rs,
17267 N_EQK, N_8 | N_16 | N_32 | N_KEY);
17268 NEON_ENCODE (INTEGER, inst);
17269 neon_two_same (neon_quad (rs), 1, et.size);
17270 }
17271
17272 static void
17273 do_neon_zip_uzp (void)
17274 {
17275 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17276 struct neon_type_el et = neon_check_type (2, rs,
17277 N_EQK, N_8 | N_16 | N_32 | N_KEY);
17278 if (rs == NS_DD && et.size == 32)
17279 {
17280 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
17281 inst.instruction = N_MNEM_vtrn;
17282 do_neon_trn ();
17283 return;
17284 }
17285 neon_two_same (neon_quad (rs), 1, et.size);
17286 }
17287
17288 static void
17289 do_neon_sat_abs_neg (void)
17290 {
17291 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17292 struct neon_type_el et = neon_check_type (2, rs,
17293 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
17294 neon_two_same (neon_quad (rs), 1, et.size);
17295 }
17296
17297 static void
17298 do_neon_pair_long (void)
17299 {
17300 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17301 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
17302 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
17303 inst.instruction |= (et.type == NT_unsigned) << 7;
17304 neon_two_same (neon_quad (rs), 1, et.size);
17305 }
17306
17307 static void
17308 do_neon_recip_est (void)
17309 {
17310 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17311 struct neon_type_el et = neon_check_type (2, rs,
17312 N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
17313 inst.instruction |= (et.type == NT_float) << 8;
17314 neon_two_same (neon_quad (rs), 1, et.size);
17315 }
17316
17317 static void
17318 do_neon_cls (void)
17319 {
17320 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17321 struct neon_type_el et = neon_check_type (2, rs,
17322 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
17323 neon_two_same (neon_quad (rs), 1, et.size);
17324 }
17325
17326 static void
17327 do_neon_clz (void)
17328 {
17329 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17330 struct neon_type_el et = neon_check_type (2, rs,
17331 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
17332 neon_two_same (neon_quad (rs), 1, et.size);
17333 }
17334
17335 static void
17336 do_neon_cnt (void)
17337 {
17338 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17339 struct neon_type_el et = neon_check_type (2, rs,
17340 N_EQK | N_INT, N_8 | N_KEY);
17341 neon_two_same (neon_quad (rs), 1, et.size);
17342 }
17343
17344 static void
17345 do_neon_swp (void)
17346 {
17347 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17348 neon_two_same (neon_quad (rs), 1, -1);
17349 }
17350
17351 static void
17352 do_neon_tbl_tbx (void)
17353 {
17354 unsigned listlenbits;
17355 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
17356
17357 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
17358 {
17359 first_error (_("bad list length for table lookup"));
17360 return;
17361 }
17362
17363 listlenbits = inst.operands[1].imm - 1;
17364 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17365 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17366 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17367 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17368 inst.instruction |= LOW4 (inst.operands[2].reg);
17369 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
17370 inst.instruction |= listlenbits << 8;
17371
17372 neon_dp_fixup (&inst);
17373 }
17374
17375 static void
17376 do_neon_ldm_stm (void)
17377 {
17378 /* P, U and L bits are part of bitmask. */
17379 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
17380 unsigned offsetbits = inst.operands[1].imm * 2;
17381
17382 if (inst.operands[1].issingle)
17383 {
17384 do_vfp_nsyn_ldm_stm (is_dbmode);
17385 return;
17386 }
17387
17388 constraint (is_dbmode && !inst.operands[0].writeback,
17389 _("writeback (!) must be used for VLDMDB and VSTMDB"));
17390
17391 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
17392 _("register list must contain at least 1 and at most 16 "
17393 "registers"));
17394
17395 inst.instruction |= inst.operands[0].reg << 16;
17396 inst.instruction |= inst.operands[0].writeback << 21;
17397 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
17398 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
17399
17400 inst.instruction |= offsetbits;
17401
17402 do_vfp_cond_or_thumb ();
17403 }
17404
17405 static void
17406 do_neon_ldr_str (void)
17407 {
17408 int is_ldr = (inst.instruction & (1 << 20)) != 0;
17409
17410 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
17411 And is UNPREDICTABLE in thumb mode. */
17412 if (!is_ldr
17413 && inst.operands[1].reg == REG_PC
17414 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
17415 {
17416 if (thumb_mode)
17417 inst.error = _("Use of PC here is UNPREDICTABLE");
17418 else if (warn_on_deprecated)
17419 as_tsktsk (_("Use of PC here is deprecated"));
17420 }
17421
17422 if (inst.operands[0].issingle)
17423 {
17424 if (is_ldr)
17425 do_vfp_nsyn_opcode ("flds");
17426 else
17427 do_vfp_nsyn_opcode ("fsts");
17428
17429 /* ARMv8.2 vldr.16/vstr.16 instruction. */
17430 if (inst.vectype.el[0].size == 16)
17431 do_scalar_fp16_v82_encode ();
17432 }
17433 else
17434 {
17435 if (is_ldr)
17436 do_vfp_nsyn_opcode ("fldd");
17437 else
17438 do_vfp_nsyn_opcode ("fstd");
17439 }
17440 }
17441
17442 static void
17443 do_t_vldr_vstr_sysreg (void)
17444 {
17445 int fp_vldr_bitno = 20, sysreg_vldr_bitno = 20;
17446 bfd_boolean is_vldr = ((inst.instruction & (1 << fp_vldr_bitno)) != 0);
17447
17448 /* Use of PC is UNPREDICTABLE. */
17449 if (inst.operands[1].reg == REG_PC)
17450 inst.error = _("Use of PC here is UNPREDICTABLE");
17451
17452 if (inst.operands[1].immisreg)
17453 inst.error = _("instruction does not accept register index");
17454
17455 if (!inst.operands[1].isreg)
17456 inst.error = _("instruction does not accept PC-relative addressing");
17457
17458 if (abs (inst.operands[1].imm) >= (1 << 7))
17459 inst.error = _("immediate value out of range");
17460
17461 inst.instruction = 0xec000f80;
17462 if (is_vldr)
17463 inst.instruction |= 1 << sysreg_vldr_bitno;
17464 encode_arm_cp_address (1, TRUE, FALSE, BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM);
17465 inst.instruction |= (inst.operands[0].imm & 0x7) << 13;
17466 inst.instruction |= (inst.operands[0].imm & 0x8) << 19;
17467 }
17468
17469 static void
17470 do_vldr_vstr (void)
17471 {
17472 bfd_boolean sysreg_op = !inst.operands[0].isreg;
17473
17474 /* VLDR/VSTR (System Register). */
17475 if (sysreg_op)
17476 {
17477 if (!mark_feature_used (&arm_ext_v8_1m_main))
17478 as_bad (_("Instruction not permitted on this architecture"));
17479
17480 do_t_vldr_vstr_sysreg ();
17481 }
17482 /* VLDR/VSTR. */
17483 else
17484 {
17485 if (!mark_feature_used (&fpu_vfp_ext_v1xd))
17486 as_bad (_("Instruction not permitted on this architecture"));
17487 do_neon_ldr_str ();
17488 }
17489 }
17490
17491 /* "interleave" version also handles non-interleaving register VLD1/VST1
17492 instructions. */
17493
17494 static void
17495 do_neon_ld_st_interleave (void)
17496 {
17497 struct neon_type_el et = neon_check_type (1, NS_NULL,
17498 N_8 | N_16 | N_32 | N_64);
17499 unsigned alignbits = 0;
17500 unsigned idx;
17501 /* The bits in this table go:
17502 0: register stride of one (0) or two (1)
17503 1,2: register list length, minus one (1, 2, 3, 4).
17504 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
17505 We use -1 for invalid entries. */
17506 const int typetable[] =
17507 {
17508 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
17509 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
17510 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
17511 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
17512 };
17513 int typebits;
17514
17515 if (et.type == NT_invtype)
17516 return;
17517
17518 if (inst.operands[1].immisalign)
17519 switch (inst.operands[1].imm >> 8)
17520 {
17521 case 64: alignbits = 1; break;
17522 case 128:
17523 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
17524 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
17525 goto bad_alignment;
17526 alignbits = 2;
17527 break;
17528 case 256:
17529 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
17530 goto bad_alignment;
17531 alignbits = 3;
17532 break;
17533 default:
17534 bad_alignment:
17535 first_error (_("bad alignment"));
17536 return;
17537 }
17538
17539 inst.instruction |= alignbits << 4;
17540 inst.instruction |= neon_logbits (et.size) << 6;
17541
17542 /* Bits [4:6] of the immediate in a list specifier encode register stride
17543 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
17544 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
17545 up the right value for "type" in a table based on this value and the given
17546 list style, then stick it back. */
17547 idx = ((inst.operands[0].imm >> 4) & 7)
17548 | (((inst.instruction >> 8) & 3) << 3);
17549
17550 typebits = typetable[idx];
17551
17552 constraint (typebits == -1, _("bad list type for instruction"));
17553 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
17554 _("bad element type for instruction"));
17555
17556 inst.instruction &= ~0xf00;
17557 inst.instruction |= typebits << 8;
17558 }
17559
17560 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
17561 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
17562 otherwise. The variable arguments are a list of pairs of legal (size, align)
17563 values, terminated with -1. */
17564
17565 static int
17566 neon_alignment_bit (int size, int align, int *do_alignment, ...)
17567 {
17568 va_list ap;
17569 int result = FAIL, thissize, thisalign;
17570
17571 if (!inst.operands[1].immisalign)
17572 {
17573 *do_alignment = 0;
17574 return SUCCESS;
17575 }
17576
17577 va_start (ap, do_alignment);
17578
17579 do
17580 {
17581 thissize = va_arg (ap, int);
17582 if (thissize == -1)
17583 break;
17584 thisalign = va_arg (ap, int);
17585
17586 if (size == thissize && align == thisalign)
17587 result = SUCCESS;
17588 }
17589 while (result != SUCCESS);
17590
17591 va_end (ap);
17592
17593 if (result == SUCCESS)
17594 *do_alignment = 1;
17595 else
17596 first_error (_("unsupported alignment for instruction"));
17597
17598 return result;
17599 }
17600
17601 static void
17602 do_neon_ld_st_lane (void)
17603 {
17604 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
17605 int align_good, do_alignment = 0;
17606 int logsize = neon_logbits (et.size);
17607 int align = inst.operands[1].imm >> 8;
17608 int n = (inst.instruction >> 8) & 3;
17609 int max_el = 64 / et.size;
17610
17611 if (et.type == NT_invtype)
17612 return;
17613
17614 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
17615 _("bad list length"));
17616 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
17617 _("scalar index out of range"));
17618 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
17619 && et.size == 8,
17620 _("stride of 2 unavailable when element size is 8"));
17621
17622 switch (n)
17623 {
17624 case 0: /* VLD1 / VST1. */
17625 align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
17626 32, 32, -1);
17627 if (align_good == FAIL)
17628 return;
17629 if (do_alignment)
17630 {
17631 unsigned alignbits = 0;
17632 switch (et.size)
17633 {
17634 case 16: alignbits = 0x1; break;
17635 case 32: alignbits = 0x3; break;
17636 default: ;
17637 }
17638 inst.instruction |= alignbits << 4;
17639 }
17640 break;
17641
17642 case 1: /* VLD2 / VST2. */
17643 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
17644 16, 32, 32, 64, -1);
17645 if (align_good == FAIL)
17646 return;
17647 if (do_alignment)
17648 inst.instruction |= 1 << 4;
17649 break;
17650
17651 case 2: /* VLD3 / VST3. */
17652 constraint (inst.operands[1].immisalign,
17653 _("can't use alignment with this instruction"));
17654 break;
17655
17656 case 3: /* VLD4 / VST4. */
17657 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
17658 16, 64, 32, 64, 32, 128, -1);
17659 if (align_good == FAIL)
17660 return;
17661 if (do_alignment)
17662 {
17663 unsigned alignbits = 0;
17664 switch (et.size)
17665 {
17666 case 8: alignbits = 0x1; break;
17667 case 16: alignbits = 0x1; break;
17668 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
17669 default: ;
17670 }
17671 inst.instruction |= alignbits << 4;
17672 }
17673 break;
17674
17675 default: ;
17676 }
17677
17678 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
17679 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17680 inst.instruction |= 1 << (4 + logsize);
17681
17682 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
17683 inst.instruction |= logsize << 10;
17684 }
17685
17686 /* Encode single n-element structure to all lanes VLD<n> instructions. */
17687
17688 static void
17689 do_neon_ld_dup (void)
17690 {
17691 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
17692 int align_good, do_alignment = 0;
17693
17694 if (et.type == NT_invtype)
17695 return;
17696
17697 switch ((inst.instruction >> 8) & 3)
17698 {
17699 case 0: /* VLD1. */
17700 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
17701 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
17702 &do_alignment, 16, 16, 32, 32, -1);
17703 if (align_good == FAIL)
17704 return;
17705 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
17706 {
17707 case 1: break;
17708 case 2: inst.instruction |= 1 << 5; break;
17709 default: first_error (_("bad list length")); return;
17710 }
17711 inst.instruction |= neon_logbits (et.size) << 6;
17712 break;
17713
17714 case 1: /* VLD2. */
17715 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
17716 &do_alignment, 8, 16, 16, 32, 32, 64,
17717 -1);
17718 if (align_good == FAIL)
17719 return;
17720 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
17721 _("bad list length"));
17722 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17723 inst.instruction |= 1 << 5;
17724 inst.instruction |= neon_logbits (et.size) << 6;
17725 break;
17726
17727 case 2: /* VLD3. */
17728 constraint (inst.operands[1].immisalign,
17729 _("can't use alignment with this instruction"));
17730 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
17731 _("bad list length"));
17732 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17733 inst.instruction |= 1 << 5;
17734 inst.instruction |= neon_logbits (et.size) << 6;
17735 break;
17736
17737 case 3: /* VLD4. */
17738 {
17739 int align = inst.operands[1].imm >> 8;
17740 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
17741 16, 64, 32, 64, 32, 128, -1);
17742 if (align_good == FAIL)
17743 return;
17744 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
17745 _("bad list length"));
17746 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17747 inst.instruction |= 1 << 5;
17748 if (et.size == 32 && align == 128)
17749 inst.instruction |= 0x3 << 6;
17750 else
17751 inst.instruction |= neon_logbits (et.size) << 6;
17752 }
17753 break;
17754
17755 default: ;
17756 }
17757
17758 inst.instruction |= do_alignment << 4;
17759 }
17760
17761 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
17762 apart from bits [11:4]. */
17763
17764 static void
17765 do_neon_ldx_stx (void)
17766 {
17767 if (inst.operands[1].isreg)
17768 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
17769
17770 switch (NEON_LANE (inst.operands[0].imm))
17771 {
17772 case NEON_INTERLEAVE_LANES:
17773 NEON_ENCODE (INTERLV, inst);
17774 do_neon_ld_st_interleave ();
17775 break;
17776
17777 case NEON_ALL_LANES:
17778 NEON_ENCODE (DUP, inst);
17779 if (inst.instruction == N_INV)
17780 {
17781 first_error ("only loads support such operands");
17782 break;
17783 }
17784 do_neon_ld_dup ();
17785 break;
17786
17787 default:
17788 NEON_ENCODE (LANE, inst);
17789 do_neon_ld_st_lane ();
17790 }
17791
17792 /* L bit comes from bit mask. */
17793 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17794 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17795 inst.instruction |= inst.operands[1].reg << 16;
17796
17797 if (inst.operands[1].postind)
17798 {
17799 int postreg = inst.operands[1].imm & 0xf;
17800 constraint (!inst.operands[1].immisreg,
17801 _("post-index must be a register"));
17802 constraint (postreg == 0xd || postreg == 0xf,
17803 _("bad register for post-index"));
17804 inst.instruction |= postreg;
17805 }
17806 else
17807 {
17808 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
17809 constraint (inst.relocs[0].exp.X_op != O_constant
17810 || inst.relocs[0].exp.X_add_number != 0,
17811 BAD_ADDR_MODE);
17812
17813 if (inst.operands[1].writeback)
17814 {
17815 inst.instruction |= 0xd;
17816 }
17817 else
17818 inst.instruction |= 0xf;
17819 }
17820
17821 if (thumb_mode)
17822 inst.instruction |= 0xf9000000;
17823 else
17824 inst.instruction |= 0xf4000000;
17825 }
17826
17827 /* FP v8. */
17828 static void
17829 do_vfp_nsyn_fpv8 (enum neon_shape rs)
17830 {
17831 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17832 D register operands. */
17833 if (neon_shape_class[rs] == SC_DOUBLE)
17834 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17835 _(BAD_FPU));
17836
17837 NEON_ENCODE (FPV8, inst);
17838
17839 if (rs == NS_FFF || rs == NS_HHH)
17840 {
17841 do_vfp_sp_dyadic ();
17842
17843 /* ARMv8.2 fp16 instruction. */
17844 if (rs == NS_HHH)
17845 do_scalar_fp16_v82_encode ();
17846 }
17847 else
17848 do_vfp_dp_rd_rn_rm ();
17849
17850 if (rs == NS_DDD)
17851 inst.instruction |= 0x100;
17852
17853 inst.instruction |= 0xf0000000;
17854 }
17855
17856 static void
17857 do_vsel (void)
17858 {
17859 set_it_insn_type (OUTSIDE_IT_INSN);
17860
17861 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
17862 first_error (_("invalid instruction shape"));
17863 }
17864
17865 static void
17866 do_vmaxnm (void)
17867 {
17868 set_it_insn_type (OUTSIDE_IT_INSN);
17869
17870 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
17871 return;
17872
17873 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17874 return;
17875
17876 neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
17877 }
17878
17879 static void
17880 do_vrint_1 (enum neon_cvt_mode mode)
17881 {
17882 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
17883 struct neon_type_el et;
17884
17885 if (rs == NS_NULL)
17886 return;
17887
17888 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17889 D register operands. */
17890 if (neon_shape_class[rs] == SC_DOUBLE)
17891 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17892 _(BAD_FPU));
17893
17894 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
17895 | N_VFP);
17896 if (et.type != NT_invtype)
17897 {
17898 /* VFP encodings. */
17899 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
17900 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
17901 set_it_insn_type (OUTSIDE_IT_INSN);
17902
17903 NEON_ENCODE (FPV8, inst);
17904 if (rs == NS_FF || rs == NS_HH)
17905 do_vfp_sp_monadic ();
17906 else
17907 do_vfp_dp_rd_rm ();
17908
17909 switch (mode)
17910 {
17911 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
17912 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
17913 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
17914 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
17915 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
17916 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
17917 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
17918 default: abort ();
17919 }
17920
17921 inst.instruction |= (rs == NS_DD) << 8;
17922 do_vfp_cond_or_thumb ();
17923
17924 /* ARMv8.2 fp16 vrint instruction. */
17925 if (rs == NS_HH)
17926 do_scalar_fp16_v82_encode ();
17927 }
17928 else
17929 {
17930 /* Neon encodings (or something broken...). */
17931 inst.error = NULL;
17932 et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
17933
17934 if (et.type == NT_invtype)
17935 return;
17936
17937 set_it_insn_type (OUTSIDE_IT_INSN);
17938 NEON_ENCODE (FLOAT, inst);
17939
17940 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17941 return;
17942
17943 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17944 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17945 inst.instruction |= LOW4 (inst.operands[1].reg);
17946 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17947 inst.instruction |= neon_quad (rs) << 6;
17948 /* Mask off the original size bits and reencode them. */
17949 inst.instruction = ((inst.instruction & 0xfff3ffff)
17950 | neon_logbits (et.size) << 18);
17951
17952 switch (mode)
17953 {
17954 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
17955 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
17956 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
17957 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
17958 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
17959 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
17960 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
17961 default: abort ();
17962 }
17963
17964 if (thumb_mode)
17965 inst.instruction |= 0xfc000000;
17966 else
17967 inst.instruction |= 0xf0000000;
17968 }
17969 }
17970
17971 static void
17972 do_vrintx (void)
17973 {
17974 do_vrint_1 (neon_cvt_mode_x);
17975 }
17976
17977 static void
17978 do_vrintz (void)
17979 {
17980 do_vrint_1 (neon_cvt_mode_z);
17981 }
17982
17983 static void
17984 do_vrintr (void)
17985 {
17986 do_vrint_1 (neon_cvt_mode_r);
17987 }
17988
17989 static void
17990 do_vrinta (void)
17991 {
17992 do_vrint_1 (neon_cvt_mode_a);
17993 }
17994
17995 static void
17996 do_vrintn (void)
17997 {
17998 do_vrint_1 (neon_cvt_mode_n);
17999 }
18000
18001 static void
18002 do_vrintp (void)
18003 {
18004 do_vrint_1 (neon_cvt_mode_p);
18005 }
18006
18007 static void
18008 do_vrintm (void)
18009 {
18010 do_vrint_1 (neon_cvt_mode_m);
18011 }
18012
18013 static unsigned
18014 neon_scalar_for_vcmla (unsigned opnd, unsigned elsize)
18015 {
18016 unsigned regno = NEON_SCALAR_REG (opnd);
18017 unsigned elno = NEON_SCALAR_INDEX (opnd);
18018
18019 if (elsize == 16 && elno < 2 && regno < 16)
18020 return regno | (elno << 4);
18021 else if (elsize == 32 && elno == 0)
18022 return regno;
18023
18024 first_error (_("scalar out of range"));
18025 return 0;
18026 }
18027
18028 static void
18029 do_vcmla (void)
18030 {
18031 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
18032 _(BAD_FPU));
18033 constraint (inst.relocs[0].exp.X_op != O_constant,
18034 _("expression too complex"));
18035 unsigned rot = inst.relocs[0].exp.X_add_number;
18036 constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
18037 _("immediate out of range"));
18038 rot /= 90;
18039 if (inst.operands[2].isscalar)
18040 {
18041 enum neon_shape rs = neon_select_shape (NS_DDSI, NS_QQSI, NS_NULL);
18042 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
18043 N_KEY | N_F16 | N_F32).size;
18044 unsigned m = neon_scalar_for_vcmla (inst.operands[2].reg, size);
18045 inst.is_neon = 1;
18046 inst.instruction = 0xfe000800;
18047 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18048 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18049 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
18050 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
18051 inst.instruction |= LOW4 (m);
18052 inst.instruction |= HI1 (m) << 5;
18053 inst.instruction |= neon_quad (rs) << 6;
18054 inst.instruction |= rot << 20;
18055 inst.instruction |= (size == 32) << 23;
18056 }
18057 else
18058 {
18059 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
18060 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
18061 N_KEY | N_F16 | N_F32).size;
18062 neon_three_same (neon_quad (rs), 0, -1);
18063 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
18064 inst.instruction |= 0xfc200800;
18065 inst.instruction |= rot << 23;
18066 inst.instruction |= (size == 32) << 20;
18067 }
18068 }
18069
18070 static void
18071 do_vcadd (void)
18072 {
18073 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
18074 _(BAD_FPU));
18075 constraint (inst.relocs[0].exp.X_op != O_constant,
18076 _("expression too complex"));
18077 unsigned rot = inst.relocs[0].exp.X_add_number;
18078 constraint (rot != 90 && rot != 270, _("immediate out of range"));
18079 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
18080 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
18081 N_KEY | N_F16 | N_F32).size;
18082 neon_three_same (neon_quad (rs), 0, -1);
18083 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
18084 inst.instruction |= 0xfc800800;
18085 inst.instruction |= (rot == 270) << 24;
18086 inst.instruction |= (size == 32) << 20;
18087 }
18088
18089 /* Dot Product instructions encoding support. */
18090
18091 static void
18092 do_neon_dotproduct (int unsigned_p)
18093 {
18094 enum neon_shape rs;
18095 unsigned scalar_oprd2 = 0;
18096 int high8;
18097
18098 if (inst.cond != COND_ALWAYS)
18099 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
18100 "is UNPREDICTABLE"));
18101
18102 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
18103 _(BAD_FPU));
18104
18105 /* Dot Product instructions are in three-same D/Q register format or the third
18106 operand can be a scalar index register. */
18107 if (inst.operands[2].isscalar)
18108 {
18109 scalar_oprd2 = neon_scalar_for_mul (inst.operands[2].reg, 32);
18110 high8 = 0xfe000000;
18111 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
18112 }
18113 else
18114 {
18115 high8 = 0xfc000000;
18116 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18117 }
18118
18119 if (unsigned_p)
18120 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_U8);
18121 else
18122 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_S8);
18123
18124 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
18125 Product instruction, so we pass 0 as the "ubit" parameter. And the
18126 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
18127 neon_three_same (neon_quad (rs), 0, 32);
18128
18129 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
18130 different NEON three-same encoding. */
18131 inst.instruction &= 0x00ffffff;
18132 inst.instruction |= high8;
18133 /* Encode 'U' bit which indicates signedness. */
18134 inst.instruction |= (unsigned_p ? 1 : 0) << 4;
18135 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
18136 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
18137 the instruction encoding. */
18138 if (inst.operands[2].isscalar)
18139 {
18140 inst.instruction &= 0xffffffd0;
18141 inst.instruction |= LOW4 (scalar_oprd2);
18142 inst.instruction |= HI1 (scalar_oprd2) << 5;
18143 }
18144 }
18145
18146 /* Dot Product instructions for signed integer. */
18147
18148 static void
18149 do_neon_dotproduct_s (void)
18150 {
18151 return do_neon_dotproduct (0);
18152 }
18153
18154 /* Dot Product instructions for unsigned integer. */
18155
18156 static void
18157 do_neon_dotproduct_u (void)
18158 {
18159 return do_neon_dotproduct (1);
18160 }
18161
18162 /* Crypto v1 instructions. */
18163 static void
18164 do_crypto_2op_1 (unsigned elttype, int op)
18165 {
18166 set_it_insn_type (OUTSIDE_IT_INSN);
18167
18168 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
18169 == NT_invtype)
18170 return;
18171
18172 inst.error = NULL;
18173
18174 NEON_ENCODE (INTEGER, inst);
18175 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18176 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18177 inst.instruction |= LOW4 (inst.operands[1].reg);
18178 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
18179 if (op != -1)
18180 inst.instruction |= op << 6;
18181
18182 if (thumb_mode)
18183 inst.instruction |= 0xfc000000;
18184 else
18185 inst.instruction |= 0xf0000000;
18186 }
18187
18188 static void
18189 do_crypto_3op_1 (int u, int op)
18190 {
18191 set_it_insn_type (OUTSIDE_IT_INSN);
18192
18193 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
18194 N_32 | N_UNT | N_KEY).type == NT_invtype)
18195 return;
18196
18197 inst.error = NULL;
18198
18199 NEON_ENCODE (INTEGER, inst);
18200 neon_three_same (1, u, 8 << op);
18201 }
18202
18203 static void
18204 do_aese (void)
18205 {
18206 do_crypto_2op_1 (N_8, 0);
18207 }
18208
18209 static void
18210 do_aesd (void)
18211 {
18212 do_crypto_2op_1 (N_8, 1);
18213 }
18214
18215 static void
18216 do_aesmc (void)
18217 {
18218 do_crypto_2op_1 (N_8, 2);
18219 }
18220
18221 static void
18222 do_aesimc (void)
18223 {
18224 do_crypto_2op_1 (N_8, 3);
18225 }
18226
18227 static void
18228 do_sha1c (void)
18229 {
18230 do_crypto_3op_1 (0, 0);
18231 }
18232
18233 static void
18234 do_sha1p (void)
18235 {
18236 do_crypto_3op_1 (0, 1);
18237 }
18238
18239 static void
18240 do_sha1m (void)
18241 {
18242 do_crypto_3op_1 (0, 2);
18243 }
18244
18245 static void
18246 do_sha1su0 (void)
18247 {
18248 do_crypto_3op_1 (0, 3);
18249 }
18250
18251 static void
18252 do_sha256h (void)
18253 {
18254 do_crypto_3op_1 (1, 0);
18255 }
18256
18257 static void
18258 do_sha256h2 (void)
18259 {
18260 do_crypto_3op_1 (1, 1);
18261 }
18262
18263 static void
18264 do_sha256su1 (void)
18265 {
18266 do_crypto_3op_1 (1, 2);
18267 }
18268
18269 static void
18270 do_sha1h (void)
18271 {
18272 do_crypto_2op_1 (N_32, -1);
18273 }
18274
18275 static void
18276 do_sha1su1 (void)
18277 {
18278 do_crypto_2op_1 (N_32, 0);
18279 }
18280
18281 static void
18282 do_sha256su0 (void)
18283 {
18284 do_crypto_2op_1 (N_32, 1);
18285 }
18286
18287 static void
18288 do_crc32_1 (unsigned int poly, unsigned int sz)
18289 {
18290 unsigned int Rd = inst.operands[0].reg;
18291 unsigned int Rn = inst.operands[1].reg;
18292 unsigned int Rm = inst.operands[2].reg;
18293
18294 set_it_insn_type (OUTSIDE_IT_INSN);
18295 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
18296 inst.instruction |= LOW4 (Rn) << 16;
18297 inst.instruction |= LOW4 (Rm);
18298 inst.instruction |= sz << (thumb_mode ? 4 : 21);
18299 inst.instruction |= poly << (thumb_mode ? 20 : 9);
18300
18301 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
18302 as_warn (UNPRED_REG ("r15"));
18303 }
18304
18305 static void
18306 do_crc32b (void)
18307 {
18308 do_crc32_1 (0, 0);
18309 }
18310
18311 static void
18312 do_crc32h (void)
18313 {
18314 do_crc32_1 (0, 1);
18315 }
18316
18317 static void
18318 do_crc32w (void)
18319 {
18320 do_crc32_1 (0, 2);
18321 }
18322
18323 static void
18324 do_crc32cb (void)
18325 {
18326 do_crc32_1 (1, 0);
18327 }
18328
18329 static void
18330 do_crc32ch (void)
18331 {
18332 do_crc32_1 (1, 1);
18333 }
18334
18335 static void
18336 do_crc32cw (void)
18337 {
18338 do_crc32_1 (1, 2);
18339 }
18340
18341 static void
18342 do_vjcvt (void)
18343 {
18344 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
18345 _(BAD_FPU));
18346 neon_check_type (2, NS_FD, N_S32, N_F64);
18347 do_vfp_sp_dp_cvt ();
18348 do_vfp_cond_or_thumb ();
18349 }
18350
18351 \f
18352 /* Overall per-instruction processing. */
18353
18354 /* We need to be able to fix up arbitrary expressions in some statements.
18355 This is so that we can handle symbols that are an arbitrary distance from
18356 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
18357 which returns part of an address in a form which will be valid for
18358 a data instruction. We do this by pushing the expression into a symbol
18359 in the expr_section, and creating a fix for that. */
18360
18361 static void
18362 fix_new_arm (fragS * frag,
18363 int where,
18364 short int size,
18365 expressionS * exp,
18366 int pc_rel,
18367 int reloc)
18368 {
18369 fixS * new_fix;
18370
18371 switch (exp->X_op)
18372 {
18373 case O_constant:
18374 if (pc_rel)
18375 {
18376 /* Create an absolute valued symbol, so we have something to
18377 refer to in the object file. Unfortunately for us, gas's
18378 generic expression parsing will already have folded out
18379 any use of .set foo/.type foo %function that may have
18380 been used to set type information of the target location,
18381 that's being specified symbolically. We have to presume
18382 the user knows what they are doing. */
18383 char name[16 + 8];
18384 symbolS *symbol;
18385
18386 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
18387
18388 symbol = symbol_find_or_make (name);
18389 S_SET_SEGMENT (symbol, absolute_section);
18390 symbol_set_frag (symbol, &zero_address_frag);
18391 S_SET_VALUE (symbol, exp->X_add_number);
18392 exp->X_op = O_symbol;
18393 exp->X_add_symbol = symbol;
18394 exp->X_add_number = 0;
18395 }
18396 /* FALLTHROUGH */
18397 case O_symbol:
18398 case O_add:
18399 case O_subtract:
18400 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
18401 (enum bfd_reloc_code_real) reloc);
18402 break;
18403
18404 default:
18405 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
18406 pc_rel, (enum bfd_reloc_code_real) reloc);
18407 break;
18408 }
18409
18410 /* Mark whether the fix is to a THUMB instruction, or an ARM
18411 instruction. */
18412 new_fix->tc_fix_data = thumb_mode;
18413 }
18414
18415 /* Create a frg for an instruction requiring relaxation. */
18416 static void
18417 output_relax_insn (void)
18418 {
18419 char * to;
18420 symbolS *sym;
18421 int offset;
18422
18423 /* The size of the instruction is unknown, so tie the debug info to the
18424 start of the instruction. */
18425 dwarf2_emit_insn (0);
18426
18427 switch (inst.relocs[0].exp.X_op)
18428 {
18429 case O_symbol:
18430 sym = inst.relocs[0].exp.X_add_symbol;
18431 offset = inst.relocs[0].exp.X_add_number;
18432 break;
18433 case O_constant:
18434 sym = NULL;
18435 offset = inst.relocs[0].exp.X_add_number;
18436 break;
18437 default:
18438 sym = make_expr_symbol (&inst.relocs[0].exp);
18439 offset = 0;
18440 break;
18441 }
18442 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
18443 inst.relax, sym, offset, NULL/*offset, opcode*/);
18444 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
18445 }
18446
18447 /* Write a 32-bit thumb instruction to buf. */
18448 static void
18449 put_thumb32_insn (char * buf, unsigned long insn)
18450 {
18451 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
18452 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
18453 }
18454
18455 static void
18456 output_inst (const char * str)
18457 {
18458 char * to = NULL;
18459
18460 if (inst.error)
18461 {
18462 as_bad ("%s -- `%s'", inst.error, str);
18463 return;
18464 }
18465 if (inst.relax)
18466 {
18467 output_relax_insn ();
18468 return;
18469 }
18470 if (inst.size == 0)
18471 return;
18472
18473 to = frag_more (inst.size);
18474 /* PR 9814: Record the thumb mode into the current frag so that we know
18475 what type of NOP padding to use, if necessary. We override any previous
18476 setting so that if the mode has changed then the NOPS that we use will
18477 match the encoding of the last instruction in the frag. */
18478 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
18479
18480 if (thumb_mode && (inst.size > THUMB_SIZE))
18481 {
18482 gas_assert (inst.size == (2 * THUMB_SIZE));
18483 put_thumb32_insn (to, inst.instruction);
18484 }
18485 else if (inst.size > INSN_SIZE)
18486 {
18487 gas_assert (inst.size == (2 * INSN_SIZE));
18488 md_number_to_chars (to, inst.instruction, INSN_SIZE);
18489 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
18490 }
18491 else
18492 md_number_to_chars (to, inst.instruction, inst.size);
18493
18494 int r;
18495 for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
18496 {
18497 if (inst.relocs[r].type != BFD_RELOC_UNUSED)
18498 fix_new_arm (frag_now, to - frag_now->fr_literal,
18499 inst.size, & inst.relocs[r].exp, inst.relocs[r].pc_rel,
18500 inst.relocs[r].type);
18501 }
18502
18503 dwarf2_emit_insn (inst.size);
18504 }
18505
18506 static char *
18507 output_it_inst (int cond, int mask, char * to)
18508 {
18509 unsigned long instruction = 0xbf00;
18510
18511 mask &= 0xf;
18512 instruction |= mask;
18513 instruction |= cond << 4;
18514
18515 if (to == NULL)
18516 {
18517 to = frag_more (2);
18518 #ifdef OBJ_ELF
18519 dwarf2_emit_insn (2);
18520 #endif
18521 }
18522
18523 md_number_to_chars (to, instruction, 2);
18524
18525 return to;
18526 }
18527
18528 /* Tag values used in struct asm_opcode's tag field. */
18529 enum opcode_tag
18530 {
18531 OT_unconditional, /* Instruction cannot be conditionalized.
18532 The ARM condition field is still 0xE. */
18533 OT_unconditionalF, /* Instruction cannot be conditionalized
18534 and carries 0xF in its ARM condition field. */
18535 OT_csuffix, /* Instruction takes a conditional suffix. */
18536 OT_csuffixF, /* Some forms of the instruction take a conditional
18537 suffix, others place 0xF where the condition field
18538 would be. */
18539 OT_cinfix3, /* Instruction takes a conditional infix,
18540 beginning at character index 3. (In
18541 unified mode, it becomes a suffix.) */
18542 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
18543 tsts, cmps, cmns, and teqs. */
18544 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
18545 character index 3, even in unified mode. Used for
18546 legacy instructions where suffix and infix forms
18547 may be ambiguous. */
18548 OT_csuf_or_in3, /* Instruction takes either a conditional
18549 suffix or an infix at character index 3. */
18550 OT_odd_infix_unc, /* This is the unconditional variant of an
18551 instruction that takes a conditional infix
18552 at an unusual position. In unified mode,
18553 this variant will accept a suffix. */
18554 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
18555 are the conditional variants of instructions that
18556 take conditional infixes in unusual positions.
18557 The infix appears at character index
18558 (tag - OT_odd_infix_0). These are not accepted
18559 in unified mode. */
18560 };
18561
18562 /* Subroutine of md_assemble, responsible for looking up the primary
18563 opcode from the mnemonic the user wrote. STR points to the
18564 beginning of the mnemonic.
18565
18566 This is not simply a hash table lookup, because of conditional
18567 variants. Most instructions have conditional variants, which are
18568 expressed with a _conditional affix_ to the mnemonic. If we were
18569 to encode each conditional variant as a literal string in the opcode
18570 table, it would have approximately 20,000 entries.
18571
18572 Most mnemonics take this affix as a suffix, and in unified syntax,
18573 'most' is upgraded to 'all'. However, in the divided syntax, some
18574 instructions take the affix as an infix, notably the s-variants of
18575 the arithmetic instructions. Of those instructions, all but six
18576 have the infix appear after the third character of the mnemonic.
18577
18578 Accordingly, the algorithm for looking up primary opcodes given
18579 an identifier is:
18580
18581 1. Look up the identifier in the opcode table.
18582 If we find a match, go to step U.
18583
18584 2. Look up the last two characters of the identifier in the
18585 conditions table. If we find a match, look up the first N-2
18586 characters of the identifier in the opcode table. If we
18587 find a match, go to step CE.
18588
18589 3. Look up the fourth and fifth characters of the identifier in
18590 the conditions table. If we find a match, extract those
18591 characters from the identifier, and look up the remaining
18592 characters in the opcode table. If we find a match, go
18593 to step CM.
18594
18595 4. Fail.
18596
18597 U. Examine the tag field of the opcode structure, in case this is
18598 one of the six instructions with its conditional infix in an
18599 unusual place. If it is, the tag tells us where to find the
18600 infix; look it up in the conditions table and set inst.cond
18601 accordingly. Otherwise, this is an unconditional instruction.
18602 Again set inst.cond accordingly. Return the opcode structure.
18603
18604 CE. Examine the tag field to make sure this is an instruction that
18605 should receive a conditional suffix. If it is not, fail.
18606 Otherwise, set inst.cond from the suffix we already looked up,
18607 and return the opcode structure.
18608
18609 CM. Examine the tag field to make sure this is an instruction that
18610 should receive a conditional infix after the third character.
18611 If it is not, fail. Otherwise, undo the edits to the current
18612 line of input and proceed as for case CE. */
18613
18614 static const struct asm_opcode *
18615 opcode_lookup (char **str)
18616 {
18617 char *end, *base;
18618 char *affix;
18619 const struct asm_opcode *opcode;
18620 const struct asm_cond *cond;
18621 char save[2];
18622
18623 /* Scan up to the end of the mnemonic, which must end in white space,
18624 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
18625 for (base = end = *str; *end != '\0'; end++)
18626 if (*end == ' ' || *end == '.')
18627 break;
18628
18629 if (end == base)
18630 return NULL;
18631
18632 /* Handle a possible width suffix and/or Neon type suffix. */
18633 if (end[0] == '.')
18634 {
18635 int offset = 2;
18636
18637 /* The .w and .n suffixes are only valid if the unified syntax is in
18638 use. */
18639 if (unified_syntax && end[1] == 'w')
18640 inst.size_req = 4;
18641 else if (unified_syntax && end[1] == 'n')
18642 inst.size_req = 2;
18643 else
18644 offset = 0;
18645
18646 inst.vectype.elems = 0;
18647
18648 *str = end + offset;
18649
18650 if (end[offset] == '.')
18651 {
18652 /* See if we have a Neon type suffix (possible in either unified or
18653 non-unified ARM syntax mode). */
18654 if (parse_neon_type (&inst.vectype, str) == FAIL)
18655 return NULL;
18656 }
18657 else if (end[offset] != '\0' && end[offset] != ' ')
18658 return NULL;
18659 }
18660 else
18661 *str = end;
18662
18663 /* Look for unaffixed or special-case affixed mnemonic. */
18664 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
18665 end - base);
18666 if (opcode)
18667 {
18668 /* step U */
18669 if (opcode->tag < OT_odd_infix_0)
18670 {
18671 inst.cond = COND_ALWAYS;
18672 return opcode;
18673 }
18674
18675 if (warn_on_deprecated && unified_syntax)
18676 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18677 affix = base + (opcode->tag - OT_odd_infix_0);
18678 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
18679 gas_assert (cond);
18680
18681 inst.cond = cond->value;
18682 return opcode;
18683 }
18684
18685 /* Cannot have a conditional suffix on a mnemonic of less than two
18686 characters. */
18687 if (end - base < 3)
18688 return NULL;
18689
18690 /* Look for suffixed mnemonic. */
18691 affix = end - 2;
18692 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
18693 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
18694 affix - base);
18695 if (opcode && cond)
18696 {
18697 /* step CE */
18698 switch (opcode->tag)
18699 {
18700 case OT_cinfix3_legacy:
18701 /* Ignore conditional suffixes matched on infix only mnemonics. */
18702 break;
18703
18704 case OT_cinfix3:
18705 case OT_cinfix3_deprecated:
18706 case OT_odd_infix_unc:
18707 if (!unified_syntax)
18708 return NULL;
18709 /* Fall through. */
18710
18711 case OT_csuffix:
18712 case OT_csuffixF:
18713 case OT_csuf_or_in3:
18714 inst.cond = cond->value;
18715 return opcode;
18716
18717 case OT_unconditional:
18718 case OT_unconditionalF:
18719 if (thumb_mode)
18720 inst.cond = cond->value;
18721 else
18722 {
18723 /* Delayed diagnostic. */
18724 inst.error = BAD_COND;
18725 inst.cond = COND_ALWAYS;
18726 }
18727 return opcode;
18728
18729 default:
18730 return NULL;
18731 }
18732 }
18733
18734 /* Cannot have a usual-position infix on a mnemonic of less than
18735 six characters (five would be a suffix). */
18736 if (end - base < 6)
18737 return NULL;
18738
18739 /* Look for infixed mnemonic in the usual position. */
18740 affix = base + 3;
18741 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
18742 if (!cond)
18743 return NULL;
18744
18745 memcpy (save, affix, 2);
18746 memmove (affix, affix + 2, (end - affix) - 2);
18747 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
18748 (end - base) - 2);
18749 memmove (affix + 2, affix, (end - affix) - 2);
18750 memcpy (affix, save, 2);
18751
18752 if (opcode
18753 && (opcode->tag == OT_cinfix3
18754 || opcode->tag == OT_cinfix3_deprecated
18755 || opcode->tag == OT_csuf_or_in3
18756 || opcode->tag == OT_cinfix3_legacy))
18757 {
18758 /* Step CM. */
18759 if (warn_on_deprecated && unified_syntax
18760 && (opcode->tag == OT_cinfix3
18761 || opcode->tag == OT_cinfix3_deprecated))
18762 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18763
18764 inst.cond = cond->value;
18765 return opcode;
18766 }
18767
18768 return NULL;
18769 }
18770
18771 /* This function generates an initial IT instruction, leaving its block
18772 virtually open for the new instructions. Eventually,
18773 the mask will be updated by now_it_add_mask () each time
18774 a new instruction needs to be included in the IT block.
18775 Finally, the block is closed with close_automatic_it_block ().
18776 The block closure can be requested either from md_assemble (),
18777 a tencode (), or due to a label hook. */
18778
18779 static void
18780 new_automatic_it_block (int cond)
18781 {
18782 now_it.state = AUTOMATIC_IT_BLOCK;
18783 now_it.mask = 0x18;
18784 now_it.cc = cond;
18785 now_it.block_length = 1;
18786 mapping_state (MAP_THUMB);
18787 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
18788 now_it.warn_deprecated = FALSE;
18789 now_it.insn_cond = TRUE;
18790 }
18791
18792 /* Close an automatic IT block.
18793 See comments in new_automatic_it_block (). */
18794
18795 static void
18796 close_automatic_it_block (void)
18797 {
18798 now_it.mask = 0x10;
18799 now_it.block_length = 0;
18800 }
18801
18802 /* Update the mask of the current automatically-generated IT
18803 instruction. See comments in new_automatic_it_block (). */
18804
18805 static void
18806 now_it_add_mask (int cond)
18807 {
18808 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
18809 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
18810 | ((bitvalue) << (nbit)))
18811 const int resulting_bit = (cond & 1);
18812
18813 now_it.mask &= 0xf;
18814 now_it.mask = SET_BIT_VALUE (now_it.mask,
18815 resulting_bit,
18816 (5 - now_it.block_length));
18817 now_it.mask = SET_BIT_VALUE (now_it.mask,
18818 1,
18819 ((5 - now_it.block_length) - 1) );
18820 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
18821
18822 #undef CLEAR_BIT
18823 #undef SET_BIT_VALUE
18824 }
18825
18826 /* The IT blocks handling machinery is accessed through the these functions:
18827 it_fsm_pre_encode () from md_assemble ()
18828 set_it_insn_type () optional, from the tencode functions
18829 set_it_insn_type_last () ditto
18830 in_it_block () ditto
18831 it_fsm_post_encode () from md_assemble ()
18832 force_automatic_it_block_close () from label handling functions
18833
18834 Rationale:
18835 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
18836 initializing the IT insn type with a generic initial value depending
18837 on the inst.condition.
18838 2) During the tencode function, two things may happen:
18839 a) The tencode function overrides the IT insn type by
18840 calling either set_it_insn_type (type) or set_it_insn_type_last ().
18841 b) The tencode function queries the IT block state by
18842 calling in_it_block () (i.e. to determine narrow/not narrow mode).
18843
18844 Both set_it_insn_type and in_it_block run the internal FSM state
18845 handling function (handle_it_state), because: a) setting the IT insn
18846 type may incur in an invalid state (exiting the function),
18847 and b) querying the state requires the FSM to be updated.
18848 Specifically we want to avoid creating an IT block for conditional
18849 branches, so it_fsm_pre_encode is actually a guess and we can't
18850 determine whether an IT block is required until the tencode () routine
18851 has decided what type of instruction this actually it.
18852 Because of this, if set_it_insn_type and in_it_block have to be used,
18853 set_it_insn_type has to be called first.
18854
18855 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
18856 determines the insn IT type depending on the inst.cond code.
18857 When a tencode () routine encodes an instruction that can be
18858 either outside an IT block, or, in the case of being inside, has to be
18859 the last one, set_it_insn_type_last () will determine the proper
18860 IT instruction type based on the inst.cond code. Otherwise,
18861 set_it_insn_type can be called for overriding that logic or
18862 for covering other cases.
18863
18864 Calling handle_it_state () may not transition the IT block state to
18865 OUTSIDE_IT_BLOCK immediately, since the (current) state could be
18866 still queried. Instead, if the FSM determines that the state should
18867 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
18868 after the tencode () function: that's what it_fsm_post_encode () does.
18869
18870 Since in_it_block () calls the state handling function to get an
18871 updated state, an error may occur (due to invalid insns combination).
18872 In that case, inst.error is set.
18873 Therefore, inst.error has to be checked after the execution of
18874 the tencode () routine.
18875
18876 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
18877 any pending state change (if any) that didn't take place in
18878 handle_it_state () as explained above. */
18879
18880 static void
18881 it_fsm_pre_encode (void)
18882 {
18883 if (inst.cond != COND_ALWAYS)
18884 inst.it_insn_type = INSIDE_IT_INSN;
18885 else
18886 inst.it_insn_type = OUTSIDE_IT_INSN;
18887
18888 now_it.state_handled = 0;
18889 }
18890
18891 /* IT state FSM handling function. */
18892
18893 static int
18894 handle_it_state (void)
18895 {
18896 now_it.state_handled = 1;
18897 now_it.insn_cond = FALSE;
18898
18899 switch (now_it.state)
18900 {
18901 case OUTSIDE_IT_BLOCK:
18902 switch (inst.it_insn_type)
18903 {
18904 case OUTSIDE_IT_INSN:
18905 break;
18906
18907 case INSIDE_IT_INSN:
18908 case INSIDE_IT_LAST_INSN:
18909 if (thumb_mode == 0)
18910 {
18911 if (unified_syntax
18912 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
18913 as_tsktsk (_("Warning: conditional outside an IT block"\
18914 " for Thumb."));
18915 }
18916 else
18917 {
18918 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
18919 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
18920 {
18921 /* Automatically generate the IT instruction. */
18922 new_automatic_it_block (inst.cond);
18923 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
18924 close_automatic_it_block ();
18925 }
18926 else
18927 {
18928 inst.error = BAD_OUT_IT;
18929 return FAIL;
18930 }
18931 }
18932 break;
18933
18934 case IF_INSIDE_IT_LAST_INSN:
18935 case NEUTRAL_IT_INSN:
18936 break;
18937
18938 case IT_INSN:
18939 now_it.state = MANUAL_IT_BLOCK;
18940 now_it.block_length = 0;
18941 break;
18942 }
18943 break;
18944
18945 case AUTOMATIC_IT_BLOCK:
18946 /* Three things may happen now:
18947 a) We should increment current it block size;
18948 b) We should close current it block (closing insn or 4 insns);
18949 c) We should close current it block and start a new one (due
18950 to incompatible conditions or
18951 4 insns-length block reached). */
18952
18953 switch (inst.it_insn_type)
18954 {
18955 case OUTSIDE_IT_INSN:
18956 /* The closure of the block shall happen immediately,
18957 so any in_it_block () call reports the block as closed. */
18958 force_automatic_it_block_close ();
18959 break;
18960
18961 case INSIDE_IT_INSN:
18962 case INSIDE_IT_LAST_INSN:
18963 case IF_INSIDE_IT_LAST_INSN:
18964 now_it.block_length++;
18965
18966 if (now_it.block_length > 4
18967 || !now_it_compatible (inst.cond))
18968 {
18969 force_automatic_it_block_close ();
18970 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
18971 new_automatic_it_block (inst.cond);
18972 }
18973 else
18974 {
18975 now_it.insn_cond = TRUE;
18976 now_it_add_mask (inst.cond);
18977 }
18978
18979 if (now_it.state == AUTOMATIC_IT_BLOCK
18980 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
18981 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
18982 close_automatic_it_block ();
18983 break;
18984
18985 case NEUTRAL_IT_INSN:
18986 now_it.block_length++;
18987 now_it.insn_cond = TRUE;
18988
18989 if (now_it.block_length > 4)
18990 force_automatic_it_block_close ();
18991 else
18992 now_it_add_mask (now_it.cc & 1);
18993 break;
18994
18995 case IT_INSN:
18996 close_automatic_it_block ();
18997 now_it.state = MANUAL_IT_BLOCK;
18998 break;
18999 }
19000 break;
19001
19002 case MANUAL_IT_BLOCK:
19003 {
19004 /* Check conditional suffixes. */
19005 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
19006 int is_last;
19007 now_it.mask <<= 1;
19008 now_it.mask &= 0x1f;
19009 is_last = (now_it.mask == 0x10);
19010 now_it.insn_cond = TRUE;
19011
19012 switch (inst.it_insn_type)
19013 {
19014 case OUTSIDE_IT_INSN:
19015 inst.error = BAD_NOT_IT;
19016 return FAIL;
19017
19018 case INSIDE_IT_INSN:
19019 if (cond != inst.cond)
19020 {
19021 inst.error = BAD_IT_COND;
19022 return FAIL;
19023 }
19024 break;
19025
19026 case INSIDE_IT_LAST_INSN:
19027 case IF_INSIDE_IT_LAST_INSN:
19028 if (cond != inst.cond)
19029 {
19030 inst.error = BAD_IT_COND;
19031 return FAIL;
19032 }
19033 if (!is_last)
19034 {
19035 inst.error = BAD_BRANCH;
19036 return FAIL;
19037 }
19038 break;
19039
19040 case NEUTRAL_IT_INSN:
19041 /* The BKPT instruction is unconditional even in an IT block. */
19042 break;
19043
19044 case IT_INSN:
19045 inst.error = BAD_IT_IT;
19046 return FAIL;
19047 }
19048 }
19049 break;
19050 }
19051
19052 return SUCCESS;
19053 }
19054
19055 struct depr_insn_mask
19056 {
19057 unsigned long pattern;
19058 unsigned long mask;
19059 const char* description;
19060 };
19061
19062 /* List of 16-bit instruction patterns deprecated in an IT block in
19063 ARMv8. */
19064 static const struct depr_insn_mask depr_it_insns[] = {
19065 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
19066 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
19067 { 0xa000, 0xb800, N_("ADR") },
19068 { 0x4800, 0xf800, N_("Literal loads") },
19069 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
19070 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
19071 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
19072 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
19073 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
19074 { 0, 0, NULL }
19075 };
19076
19077 static void
19078 it_fsm_post_encode (void)
19079 {
19080 int is_last;
19081
19082 if (!now_it.state_handled)
19083 handle_it_state ();
19084
19085 if (now_it.insn_cond
19086 && !now_it.warn_deprecated
19087 && warn_on_deprecated
19088 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)
19089 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m))
19090 {
19091 if (inst.instruction >= 0x10000)
19092 {
19093 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
19094 "performance deprecated in ARMv8-A and ARMv8-R"));
19095 now_it.warn_deprecated = TRUE;
19096 }
19097 else
19098 {
19099 const struct depr_insn_mask *p = depr_it_insns;
19100
19101 while (p->mask != 0)
19102 {
19103 if ((inst.instruction & p->mask) == p->pattern)
19104 {
19105 as_tsktsk (_("IT blocks containing 16-bit Thumb "
19106 "instructions of the following class are "
19107 "performance deprecated in ARMv8-A and "
19108 "ARMv8-R: %s"), p->description);
19109 now_it.warn_deprecated = TRUE;
19110 break;
19111 }
19112
19113 ++p;
19114 }
19115 }
19116
19117 if (now_it.block_length > 1)
19118 {
19119 as_tsktsk (_("IT blocks containing more than one conditional "
19120 "instruction are performance deprecated in ARMv8-A and "
19121 "ARMv8-R"));
19122 now_it.warn_deprecated = TRUE;
19123 }
19124 }
19125
19126 is_last = (now_it.mask == 0x10);
19127 if (is_last)
19128 {
19129 now_it.state = OUTSIDE_IT_BLOCK;
19130 now_it.mask = 0;
19131 }
19132 }
19133
19134 static void
19135 force_automatic_it_block_close (void)
19136 {
19137 if (now_it.state == AUTOMATIC_IT_BLOCK)
19138 {
19139 close_automatic_it_block ();
19140 now_it.state = OUTSIDE_IT_BLOCK;
19141 now_it.mask = 0;
19142 }
19143 }
19144
19145 static int
19146 in_it_block (void)
19147 {
19148 if (!now_it.state_handled)
19149 handle_it_state ();
19150
19151 return now_it.state != OUTSIDE_IT_BLOCK;
19152 }
19153
19154 /* Whether OPCODE only has T32 encoding. Since this function is only used by
19155 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
19156 here, hence the "known" in the function name. */
19157
19158 static bfd_boolean
19159 known_t32_only_insn (const struct asm_opcode *opcode)
19160 {
19161 /* Original Thumb-1 wide instruction. */
19162 if (opcode->tencode == do_t_blx
19163 || opcode->tencode == do_t_branch23
19164 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
19165 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
19166 return TRUE;
19167
19168 /* Wide-only instruction added to ARMv8-M Baseline. */
19169 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
19170 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
19171 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
19172 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
19173 return TRUE;
19174
19175 return FALSE;
19176 }
19177
19178 /* Whether wide instruction variant can be used if available for a valid OPCODE
19179 in ARCH. */
19180
19181 static bfd_boolean
19182 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
19183 {
19184 if (known_t32_only_insn (opcode))
19185 return TRUE;
19186
19187 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
19188 of variant T3 of B.W is checked in do_t_branch. */
19189 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
19190 && opcode->tencode == do_t_branch)
19191 return TRUE;
19192
19193 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
19194 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
19195 && opcode->tencode == do_t_mov_cmp
19196 /* Make sure CMP instruction is not affected. */
19197 && opcode->aencode == do_mov)
19198 return TRUE;
19199
19200 /* Wide instruction variants of all instructions with narrow *and* wide
19201 variants become available with ARMv6t2. Other opcodes are either
19202 narrow-only or wide-only and are thus available if OPCODE is valid. */
19203 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
19204 return TRUE;
19205
19206 /* OPCODE with narrow only instruction variant or wide variant not
19207 available. */
19208 return FALSE;
19209 }
19210
19211 void
19212 md_assemble (char *str)
19213 {
19214 char *p = str;
19215 const struct asm_opcode * opcode;
19216
19217 /* Align the previous label if needed. */
19218 if (last_label_seen != NULL)
19219 {
19220 symbol_set_frag (last_label_seen, frag_now);
19221 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
19222 S_SET_SEGMENT (last_label_seen, now_seg);
19223 }
19224
19225 memset (&inst, '\0', sizeof (inst));
19226 int r;
19227 for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
19228 inst.relocs[r].type = BFD_RELOC_UNUSED;
19229
19230 opcode = opcode_lookup (&p);
19231 if (!opcode)
19232 {
19233 /* It wasn't an instruction, but it might be a register alias of
19234 the form alias .req reg, or a Neon .dn/.qn directive. */
19235 if (! create_register_alias (str, p)
19236 && ! create_neon_reg_alias (str, p))
19237 as_bad (_("bad instruction `%s'"), str);
19238
19239 return;
19240 }
19241
19242 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
19243 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
19244
19245 /* The value which unconditional instructions should have in place of the
19246 condition field. */
19247 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
19248
19249 if (thumb_mode)
19250 {
19251 arm_feature_set variant;
19252
19253 variant = cpu_variant;
19254 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
19255 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
19256 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
19257 /* Check that this instruction is supported for this CPU. */
19258 if (!opcode->tvariant
19259 || (thumb_mode == 1
19260 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
19261 {
19262 if (opcode->tencode == do_t_swi)
19263 as_bad (_("SVC is not permitted on this architecture"));
19264 else
19265 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
19266 return;
19267 }
19268 if (inst.cond != COND_ALWAYS && !unified_syntax
19269 && opcode->tencode != do_t_branch)
19270 {
19271 as_bad (_("Thumb does not support conditional execution"));
19272 return;
19273 }
19274
19275 /* Two things are addressed here:
19276 1) Implicit require narrow instructions on Thumb-1.
19277 This avoids relaxation accidentally introducing Thumb-2
19278 instructions.
19279 2) Reject wide instructions in non Thumb-2 cores.
19280
19281 Only instructions with narrow and wide variants need to be handled
19282 but selecting all non wide-only instructions is easier. */
19283 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
19284 && !t32_insn_ok (variant, opcode))
19285 {
19286 if (inst.size_req == 0)
19287 inst.size_req = 2;
19288 else if (inst.size_req == 4)
19289 {
19290 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
19291 as_bad (_("selected processor does not support 32bit wide "
19292 "variant of instruction `%s'"), str);
19293 else
19294 as_bad (_("selected processor does not support `%s' in "
19295 "Thumb-2 mode"), str);
19296 return;
19297 }
19298 }
19299
19300 inst.instruction = opcode->tvalue;
19301
19302 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
19303 {
19304 /* Prepare the it_insn_type for those encodings that don't set
19305 it. */
19306 it_fsm_pre_encode ();
19307
19308 opcode->tencode ();
19309
19310 it_fsm_post_encode ();
19311 }
19312
19313 if (!(inst.error || inst.relax))
19314 {
19315 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
19316 inst.size = (inst.instruction > 0xffff ? 4 : 2);
19317 if (inst.size_req && inst.size_req != inst.size)
19318 {
19319 as_bad (_("cannot honor width suffix -- `%s'"), str);
19320 return;
19321 }
19322 }
19323
19324 /* Something has gone badly wrong if we try to relax a fixed size
19325 instruction. */
19326 gas_assert (inst.size_req == 0 || !inst.relax);
19327
19328 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
19329 *opcode->tvariant);
19330 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
19331 set those bits when Thumb-2 32-bit instructions are seen. The impact
19332 of relaxable instructions will be considered later after we finish all
19333 relaxation. */
19334 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
19335 variant = arm_arch_none;
19336 else
19337 variant = cpu_variant;
19338 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
19339 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
19340 arm_ext_v6t2);
19341
19342 check_neon_suffixes;
19343
19344 if (!inst.error)
19345 {
19346 mapping_state (MAP_THUMB);
19347 }
19348 }
19349 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
19350 {
19351 bfd_boolean is_bx;
19352
19353 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
19354 is_bx = (opcode->aencode == do_bx);
19355
19356 /* Check that this instruction is supported for this CPU. */
19357 if (!(is_bx && fix_v4bx)
19358 && !(opcode->avariant &&
19359 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
19360 {
19361 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
19362 return;
19363 }
19364 if (inst.size_req)
19365 {
19366 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
19367 return;
19368 }
19369
19370 inst.instruction = opcode->avalue;
19371 if (opcode->tag == OT_unconditionalF)
19372 inst.instruction |= 0xFU << 28;
19373 else
19374 inst.instruction |= inst.cond << 28;
19375 inst.size = INSN_SIZE;
19376 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
19377 {
19378 it_fsm_pre_encode ();
19379 opcode->aencode ();
19380 it_fsm_post_encode ();
19381 }
19382 /* Arm mode bx is marked as both v4T and v5 because it's still required
19383 on a hypothetical non-thumb v5 core. */
19384 if (is_bx)
19385 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
19386 else
19387 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
19388 *opcode->avariant);
19389
19390 check_neon_suffixes;
19391
19392 if (!inst.error)
19393 {
19394 mapping_state (MAP_ARM);
19395 }
19396 }
19397 else
19398 {
19399 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
19400 "-- `%s'"), str);
19401 return;
19402 }
19403 output_inst (str);
19404 }
19405
19406 static void
19407 check_it_blocks_finished (void)
19408 {
19409 #ifdef OBJ_ELF
19410 asection *sect;
19411
19412 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
19413 if (seg_info (sect)->tc_segment_info_data.current_it.state
19414 == MANUAL_IT_BLOCK)
19415 {
19416 as_warn (_("section '%s' finished with an open IT block."),
19417 sect->name);
19418 }
19419 #else
19420 if (now_it.state == MANUAL_IT_BLOCK)
19421 as_warn (_("file finished with an open IT block."));
19422 #endif
19423 }
19424
19425 /* Various frobbings of labels and their addresses. */
19426
19427 void
19428 arm_start_line_hook (void)
19429 {
19430 last_label_seen = NULL;
19431 }
19432
19433 void
19434 arm_frob_label (symbolS * sym)
19435 {
19436 last_label_seen = sym;
19437
19438 ARM_SET_THUMB (sym, thumb_mode);
19439
19440 #if defined OBJ_COFF || defined OBJ_ELF
19441 ARM_SET_INTERWORK (sym, support_interwork);
19442 #endif
19443
19444 force_automatic_it_block_close ();
19445
19446 /* Note - do not allow local symbols (.Lxxx) to be labelled
19447 as Thumb functions. This is because these labels, whilst
19448 they exist inside Thumb code, are not the entry points for
19449 possible ARM->Thumb calls. Also, these labels can be used
19450 as part of a computed goto or switch statement. eg gcc
19451 can generate code that looks like this:
19452
19453 ldr r2, [pc, .Laaa]
19454 lsl r3, r3, #2
19455 ldr r2, [r3, r2]
19456 mov pc, r2
19457
19458 .Lbbb: .word .Lxxx
19459 .Lccc: .word .Lyyy
19460 ..etc...
19461 .Laaa: .word Lbbb
19462
19463 The first instruction loads the address of the jump table.
19464 The second instruction converts a table index into a byte offset.
19465 The third instruction gets the jump address out of the table.
19466 The fourth instruction performs the jump.
19467
19468 If the address stored at .Laaa is that of a symbol which has the
19469 Thumb_Func bit set, then the linker will arrange for this address
19470 to have the bottom bit set, which in turn would mean that the
19471 address computation performed by the third instruction would end
19472 up with the bottom bit set. Since the ARM is capable of unaligned
19473 word loads, the instruction would then load the incorrect address
19474 out of the jump table, and chaos would ensue. */
19475 if (label_is_thumb_function_name
19476 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
19477 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
19478 {
19479 /* When the address of a Thumb function is taken the bottom
19480 bit of that address should be set. This will allow
19481 interworking between Arm and Thumb functions to work
19482 correctly. */
19483
19484 THUMB_SET_FUNC (sym, 1);
19485
19486 label_is_thumb_function_name = FALSE;
19487 }
19488
19489 dwarf2_emit_label (sym);
19490 }
19491
19492 bfd_boolean
19493 arm_data_in_code (void)
19494 {
19495 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
19496 {
19497 *input_line_pointer = '/';
19498 input_line_pointer += 5;
19499 *input_line_pointer = 0;
19500 return TRUE;
19501 }
19502
19503 return FALSE;
19504 }
19505
19506 char *
19507 arm_canonicalize_symbol_name (char * name)
19508 {
19509 int len;
19510
19511 if (thumb_mode && (len = strlen (name)) > 5
19512 && streq (name + len - 5, "/data"))
19513 *(name + len - 5) = 0;
19514
19515 return name;
19516 }
19517 \f
19518 /* Table of all register names defined by default. The user can
19519 define additional names with .req. Note that all register names
19520 should appear in both upper and lowercase variants. Some registers
19521 also have mixed-case names. */
19522
19523 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
19524 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
19525 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
19526 #define REGSET(p,t) \
19527 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
19528 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
19529 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
19530 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
19531 #define REGSETH(p,t) \
19532 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
19533 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
19534 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
19535 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
19536 #define REGSET2(p,t) \
19537 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
19538 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
19539 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
19540 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
19541 #define SPLRBANK(base,bank,t) \
19542 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
19543 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
19544 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
19545 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
19546 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
19547 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
19548
19549 static const struct reg_entry reg_names[] =
19550 {
19551 /* ARM integer registers. */
19552 REGSET(r, RN), REGSET(R, RN),
19553
19554 /* ATPCS synonyms. */
19555 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
19556 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
19557 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
19558
19559 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
19560 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
19561 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
19562
19563 /* Well-known aliases. */
19564 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
19565 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
19566
19567 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
19568 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
19569
19570 /* Coprocessor numbers. */
19571 REGSET(p, CP), REGSET(P, CP),
19572
19573 /* Coprocessor register numbers. The "cr" variants are for backward
19574 compatibility. */
19575 REGSET(c, CN), REGSET(C, CN),
19576 REGSET(cr, CN), REGSET(CR, CN),
19577
19578 /* ARM banked registers. */
19579 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
19580 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
19581 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
19582 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
19583 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
19584 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
19585 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
19586
19587 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
19588 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
19589 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
19590 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
19591 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
19592 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
19593 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
19594 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
19595
19596 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
19597 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
19598 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
19599 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
19600 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
19601 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
19602 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
19603 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
19604 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
19605
19606 /* FPA registers. */
19607 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
19608 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
19609
19610 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
19611 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
19612
19613 /* VFP SP registers. */
19614 REGSET(s,VFS), REGSET(S,VFS),
19615 REGSETH(s,VFS), REGSETH(S,VFS),
19616
19617 /* VFP DP Registers. */
19618 REGSET(d,VFD), REGSET(D,VFD),
19619 /* Extra Neon DP registers. */
19620 REGSETH(d,VFD), REGSETH(D,VFD),
19621
19622 /* Neon QP registers. */
19623 REGSET2(q,NQ), REGSET2(Q,NQ),
19624
19625 /* VFP control registers. */
19626 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
19627 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
19628 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
19629 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
19630 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
19631 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
19632 REGDEF(mvfr2,5,VFC), REGDEF(MVFR2,5,VFC),
19633
19634 /* Maverick DSP coprocessor registers. */
19635 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
19636 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
19637
19638 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
19639 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
19640 REGDEF(dspsc,0,DSPSC),
19641
19642 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
19643 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
19644 REGDEF(DSPSC,0,DSPSC),
19645
19646 /* iWMMXt data registers - p0, c0-15. */
19647 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
19648
19649 /* iWMMXt control registers - p1, c0-3. */
19650 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
19651 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
19652 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
19653 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
19654
19655 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
19656 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
19657 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
19658 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
19659 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
19660
19661 /* XScale accumulator registers. */
19662 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
19663 };
19664 #undef REGDEF
19665 #undef REGNUM
19666 #undef REGSET
19667
19668 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
19669 within psr_required_here. */
19670 static const struct asm_psr psrs[] =
19671 {
19672 /* Backward compatibility notation. Note that "all" is no longer
19673 truly all possible PSR bits. */
19674 {"all", PSR_c | PSR_f},
19675 {"flg", PSR_f},
19676 {"ctl", PSR_c},
19677
19678 /* Individual flags. */
19679 {"f", PSR_f},
19680 {"c", PSR_c},
19681 {"x", PSR_x},
19682 {"s", PSR_s},
19683
19684 /* Combinations of flags. */
19685 {"fs", PSR_f | PSR_s},
19686 {"fx", PSR_f | PSR_x},
19687 {"fc", PSR_f | PSR_c},
19688 {"sf", PSR_s | PSR_f},
19689 {"sx", PSR_s | PSR_x},
19690 {"sc", PSR_s | PSR_c},
19691 {"xf", PSR_x | PSR_f},
19692 {"xs", PSR_x | PSR_s},
19693 {"xc", PSR_x | PSR_c},
19694 {"cf", PSR_c | PSR_f},
19695 {"cs", PSR_c | PSR_s},
19696 {"cx", PSR_c | PSR_x},
19697 {"fsx", PSR_f | PSR_s | PSR_x},
19698 {"fsc", PSR_f | PSR_s | PSR_c},
19699 {"fxs", PSR_f | PSR_x | PSR_s},
19700 {"fxc", PSR_f | PSR_x | PSR_c},
19701 {"fcs", PSR_f | PSR_c | PSR_s},
19702 {"fcx", PSR_f | PSR_c | PSR_x},
19703 {"sfx", PSR_s | PSR_f | PSR_x},
19704 {"sfc", PSR_s | PSR_f | PSR_c},
19705 {"sxf", PSR_s | PSR_x | PSR_f},
19706 {"sxc", PSR_s | PSR_x | PSR_c},
19707 {"scf", PSR_s | PSR_c | PSR_f},
19708 {"scx", PSR_s | PSR_c | PSR_x},
19709 {"xfs", PSR_x | PSR_f | PSR_s},
19710 {"xfc", PSR_x | PSR_f | PSR_c},
19711 {"xsf", PSR_x | PSR_s | PSR_f},
19712 {"xsc", PSR_x | PSR_s | PSR_c},
19713 {"xcf", PSR_x | PSR_c | PSR_f},
19714 {"xcs", PSR_x | PSR_c | PSR_s},
19715 {"cfs", PSR_c | PSR_f | PSR_s},
19716 {"cfx", PSR_c | PSR_f | PSR_x},
19717 {"csf", PSR_c | PSR_s | PSR_f},
19718 {"csx", PSR_c | PSR_s | PSR_x},
19719 {"cxf", PSR_c | PSR_x | PSR_f},
19720 {"cxs", PSR_c | PSR_x | PSR_s},
19721 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
19722 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
19723 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
19724 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
19725 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
19726 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
19727 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
19728 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
19729 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
19730 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
19731 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
19732 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
19733 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
19734 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
19735 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
19736 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
19737 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
19738 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
19739 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
19740 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
19741 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
19742 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
19743 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
19744 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
19745 };
19746
19747 /* Table of V7M psr names. */
19748 static const struct asm_psr v7m_psrs[] =
19749 {
19750 {"apsr", 0x0 }, {"APSR", 0x0 },
19751 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
19752 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
19753 {"psr", 0x3 }, {"PSR", 0x3 },
19754 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
19755 {"ipsr", 0x5 }, {"IPSR", 0x5 },
19756 {"epsr", 0x6 }, {"EPSR", 0x6 },
19757 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
19758 {"msp", 0x8 }, {"MSP", 0x8 },
19759 {"psp", 0x9 }, {"PSP", 0x9 },
19760 {"msplim", 0xa }, {"MSPLIM", 0xa },
19761 {"psplim", 0xb }, {"PSPLIM", 0xb },
19762 {"primask", 0x10}, {"PRIMASK", 0x10},
19763 {"basepri", 0x11}, {"BASEPRI", 0x11},
19764 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
19765 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
19766 {"control", 0x14}, {"CONTROL", 0x14},
19767 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
19768 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
19769 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
19770 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
19771 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
19772 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
19773 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
19774 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
19775 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
19776 };
19777
19778 /* Table of all shift-in-operand names. */
19779 static const struct asm_shift_name shift_names [] =
19780 {
19781 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
19782 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
19783 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
19784 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
19785 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
19786 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
19787 };
19788
19789 /* Table of all explicit relocation names. */
19790 #ifdef OBJ_ELF
19791 static struct reloc_entry reloc_names[] =
19792 {
19793 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
19794 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
19795 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
19796 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
19797 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
19798 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
19799 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
19800 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
19801 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
19802 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
19803 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
19804 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
19805 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
19806 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
19807 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
19808 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
19809 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
19810 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ},
19811 { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC },
19812 { "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC },
19813 { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC },
19814 { "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC },
19815 { "funcdesc", BFD_RELOC_ARM_FUNCDESC },
19816 { "FUNCDESC", BFD_RELOC_ARM_FUNCDESC },
19817 { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC }, { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC },
19818 { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC }, { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC },
19819 { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC }, { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC },
19820 };
19821 #endif
19822
19823 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
19824 static const struct asm_cond conds[] =
19825 {
19826 {"eq", 0x0},
19827 {"ne", 0x1},
19828 {"cs", 0x2}, {"hs", 0x2},
19829 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
19830 {"mi", 0x4},
19831 {"pl", 0x5},
19832 {"vs", 0x6},
19833 {"vc", 0x7},
19834 {"hi", 0x8},
19835 {"ls", 0x9},
19836 {"ge", 0xa},
19837 {"lt", 0xb},
19838 {"gt", 0xc},
19839 {"le", 0xd},
19840 {"al", 0xe}
19841 };
19842
19843 #define UL_BARRIER(L,U,CODE,FEAT) \
19844 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
19845 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
19846
19847 static struct asm_barrier_opt barrier_opt_names[] =
19848 {
19849 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
19850 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
19851 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
19852 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
19853 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
19854 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
19855 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
19856 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
19857 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
19858 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
19859 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
19860 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
19861 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
19862 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
19863 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
19864 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
19865 };
19866
19867 #undef UL_BARRIER
19868
19869 /* Table of ARM-format instructions. */
19870
19871 /* Macros for gluing together operand strings. N.B. In all cases
19872 other than OPS0, the trailing OP_stop comes from default
19873 zero-initialization of the unspecified elements of the array. */
19874 #define OPS0() { OP_stop, }
19875 #define OPS1(a) { OP_##a, }
19876 #define OPS2(a,b) { OP_##a,OP_##b, }
19877 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
19878 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
19879 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
19880 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
19881
19882 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
19883 This is useful when mixing operands for ARM and THUMB, i.e. using the
19884 MIX_ARM_THUMB_OPERANDS macro.
19885 In order to use these macros, prefix the number of operands with _
19886 e.g. _3. */
19887 #define OPS_1(a) { a, }
19888 #define OPS_2(a,b) { a,b, }
19889 #define OPS_3(a,b,c) { a,b,c, }
19890 #define OPS_4(a,b,c,d) { a,b,c,d, }
19891 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
19892 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
19893
19894 /* These macros abstract out the exact format of the mnemonic table and
19895 save some repeated characters. */
19896
19897 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
19898 #define TxCE(mnem, op, top, nops, ops, ae, te) \
19899 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
19900 THUMB_VARIANT, do_##ae, do_##te }
19901
19902 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
19903 a T_MNEM_xyz enumerator. */
19904 #define TCE(mnem, aop, top, nops, ops, ae, te) \
19905 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
19906 #define tCE(mnem, aop, top, nops, ops, ae, te) \
19907 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19908
19909 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
19910 infix after the third character. */
19911 #define TxC3(mnem, op, top, nops, ops, ae, te) \
19912 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
19913 THUMB_VARIANT, do_##ae, do_##te }
19914 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
19915 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
19916 THUMB_VARIANT, do_##ae, do_##te }
19917 #define TC3(mnem, aop, top, nops, ops, ae, te) \
19918 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
19919 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
19920 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
19921 #define tC3(mnem, aop, top, nops, ops, ae, te) \
19922 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19923 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
19924 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19925
19926 /* Mnemonic that cannot be conditionalized. The ARM condition-code
19927 field is still 0xE. Many of the Thumb variants can be executed
19928 conditionally, so this is checked separately. */
19929 #define TUE(mnem, op, top, nops, ops, ae, te) \
19930 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19931 THUMB_VARIANT, do_##ae, do_##te }
19932
19933 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
19934 Used by mnemonics that have very minimal differences in the encoding for
19935 ARM and Thumb variants and can be handled in a common function. */
19936 #define TUEc(mnem, op, top, nops, ops, en) \
19937 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19938 THUMB_VARIANT, do_##en, do_##en }
19939
19940 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
19941 condition code field. */
19942 #define TUF(mnem, op, top, nops, ops, ae, te) \
19943 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
19944 THUMB_VARIANT, do_##ae, do_##te }
19945
19946 /* ARM-only variants of all the above. */
19947 #define CE(mnem, op, nops, ops, ae) \
19948 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19949
19950 #define C3(mnem, op, nops, ops, ae) \
19951 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19952
19953 /* Thumb-only variants of TCE and TUE. */
19954 #define ToC(mnem, top, nops, ops, te) \
19955 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
19956 do_##te }
19957
19958 #define ToU(mnem, top, nops, ops, te) \
19959 { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
19960 NULL, do_##te }
19961
19962 /* T_MNEM_xyz enumerator variants of ToC. */
19963 #define toC(mnem, top, nops, ops, te) \
19964 { mnem, OPS##nops ops, OT_csuffix, 0x0, T_MNEM##top, 0, THUMB_VARIANT, NULL, \
19965 do_##te }
19966
19967 /* T_MNEM_xyz enumerator variants of ToU. */
19968 #define toU(mnem, top, nops, ops, te) \
19969 { mnem, OPS##nops ops, OT_unconditional, 0x0, T_MNEM##top, 0, THUMB_VARIANT, \
19970 NULL, do_##te }
19971
19972 /* Legacy mnemonics that always have conditional infix after the third
19973 character. */
19974 #define CL(mnem, op, nops, ops, ae) \
19975 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19976 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19977
19978 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
19979 #define cCE(mnem, op, nops, ops, ae) \
19980 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19981
19982 /* Legacy coprocessor instructions where conditional infix and conditional
19983 suffix are ambiguous. For consistency this includes all FPA instructions,
19984 not just the potentially ambiguous ones. */
19985 #define cCL(mnem, op, nops, ops, ae) \
19986 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19987 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19988
19989 /* Coprocessor, takes either a suffix or a position-3 infix
19990 (for an FPA corner case). */
19991 #define C3E(mnem, op, nops, ops, ae) \
19992 { mnem, OPS##nops ops, OT_csuf_or_in3, \
19993 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19994
19995 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
19996 { m1 #m2 m3, OPS##nops ops, \
19997 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
19998 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19999
20000 #define CM(m1, m2, op, nops, ops, ae) \
20001 xCM_ (m1, , m2, op, nops, ops, ae), \
20002 xCM_ (m1, eq, m2, op, nops, ops, ae), \
20003 xCM_ (m1, ne, m2, op, nops, ops, ae), \
20004 xCM_ (m1, cs, m2, op, nops, ops, ae), \
20005 xCM_ (m1, hs, m2, op, nops, ops, ae), \
20006 xCM_ (m1, cc, m2, op, nops, ops, ae), \
20007 xCM_ (m1, ul, m2, op, nops, ops, ae), \
20008 xCM_ (m1, lo, m2, op, nops, ops, ae), \
20009 xCM_ (m1, mi, m2, op, nops, ops, ae), \
20010 xCM_ (m1, pl, m2, op, nops, ops, ae), \
20011 xCM_ (m1, vs, m2, op, nops, ops, ae), \
20012 xCM_ (m1, vc, m2, op, nops, ops, ae), \
20013 xCM_ (m1, hi, m2, op, nops, ops, ae), \
20014 xCM_ (m1, ls, m2, op, nops, ops, ae), \
20015 xCM_ (m1, ge, m2, op, nops, ops, ae), \
20016 xCM_ (m1, lt, m2, op, nops, ops, ae), \
20017 xCM_ (m1, gt, m2, op, nops, ops, ae), \
20018 xCM_ (m1, le, m2, op, nops, ops, ae), \
20019 xCM_ (m1, al, m2, op, nops, ops, ae)
20020
20021 #define UE(mnem, op, nops, ops, ae) \
20022 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
20023
20024 #define UF(mnem, op, nops, ops, ae) \
20025 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
20026
20027 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
20028 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
20029 use the same encoding function for each. */
20030 #define NUF(mnem, op, nops, ops, enc) \
20031 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
20032 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
20033
20034 /* Neon data processing, version which indirects through neon_enc_tab for
20035 the various overloaded versions of opcodes. */
20036 #define nUF(mnem, op, nops, ops, enc) \
20037 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
20038 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
20039
20040 /* Neon insn with conditional suffix for the ARM version, non-overloaded
20041 version. */
20042 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
20043 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
20044 THUMB_VARIANT, do_##enc, do_##enc }
20045
20046 #define NCE(mnem, op, nops, ops, enc) \
20047 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
20048
20049 #define NCEF(mnem, op, nops, ops, enc) \
20050 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
20051
20052 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
20053 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
20054 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
20055 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
20056
20057 #define nCE(mnem, op, nops, ops, enc) \
20058 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
20059
20060 #define nCEF(mnem, op, nops, ops, enc) \
20061 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
20062
20063 #define do_0 0
20064
20065 static const struct asm_opcode insns[] =
20066 {
20067 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
20068 #define THUMB_VARIANT & arm_ext_v4t
20069 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
20070 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
20071 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
20072 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
20073 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
20074 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
20075 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
20076 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
20077 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
20078 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
20079 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
20080 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
20081 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
20082 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
20083 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
20084 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
20085
20086 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
20087 for setting PSR flag bits. They are obsolete in V6 and do not
20088 have Thumb equivalents. */
20089 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
20090 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
20091 CL("tstp", 110f000, 2, (RR, SH), cmp),
20092 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
20093 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
20094 CL("cmpp", 150f000, 2, (RR, SH), cmp),
20095 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
20096 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
20097 CL("cmnp", 170f000, 2, (RR, SH), cmp),
20098
20099 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
20100 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
20101 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
20102 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
20103
20104 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
20105 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
20106 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
20107 OP_RRnpc),
20108 OP_ADDRGLDR),ldst, t_ldst),
20109 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
20110
20111 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20112 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20113 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20114 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20115 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20116 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20117
20118 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
20119 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
20120
20121 /* Pseudo ops. */
20122 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
20123 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
20124 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
20125 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
20126
20127 /* Thumb-compatibility pseudo ops. */
20128 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
20129 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
20130 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
20131 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
20132 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
20133 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
20134 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
20135 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
20136 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
20137 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
20138 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
20139 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
20140
20141 /* These may simplify to neg. */
20142 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
20143 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
20144
20145 #undef THUMB_VARIANT
20146 #define THUMB_VARIANT & arm_ext_os
20147
20148 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
20149 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
20150
20151 #undef THUMB_VARIANT
20152 #define THUMB_VARIANT & arm_ext_v6
20153
20154 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
20155
20156 /* V1 instructions with no Thumb analogue prior to V6T2. */
20157 #undef THUMB_VARIANT
20158 #define THUMB_VARIANT & arm_ext_v6t2
20159
20160 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
20161 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
20162 CL("teqp", 130f000, 2, (RR, SH), cmp),
20163
20164 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
20165 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
20166 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
20167 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
20168
20169 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20170 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20171
20172 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20173 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20174
20175 /* V1 instructions with no Thumb analogue at all. */
20176 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
20177 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
20178
20179 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
20180 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
20181 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
20182 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
20183 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
20184 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
20185 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
20186 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
20187
20188 #undef ARM_VARIANT
20189 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
20190 #undef THUMB_VARIANT
20191 #define THUMB_VARIANT & arm_ext_v4t
20192
20193 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
20194 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
20195
20196 #undef THUMB_VARIANT
20197 #define THUMB_VARIANT & arm_ext_v6t2
20198
20199 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
20200 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
20201
20202 /* Generic coprocessor instructions. */
20203 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
20204 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20205 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20206 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20207 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20208 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
20209 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
20210
20211 #undef ARM_VARIANT
20212 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
20213
20214 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
20215 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
20216
20217 #undef ARM_VARIANT
20218 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
20219 #undef THUMB_VARIANT
20220 #define THUMB_VARIANT & arm_ext_msr
20221
20222 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
20223 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
20224
20225 #undef ARM_VARIANT
20226 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
20227 #undef THUMB_VARIANT
20228 #define THUMB_VARIANT & arm_ext_v6t2
20229
20230 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
20231 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
20232 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
20233 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
20234 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
20235 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
20236 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
20237 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
20238
20239 #undef ARM_VARIANT
20240 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
20241 #undef THUMB_VARIANT
20242 #define THUMB_VARIANT & arm_ext_v4t
20243
20244 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
20245 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
20246 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
20247 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
20248 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
20249 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
20250
20251 #undef ARM_VARIANT
20252 #define ARM_VARIANT & arm_ext_v4t_5
20253
20254 /* ARM Architecture 4T. */
20255 /* Note: bx (and blx) are required on V5, even if the processor does
20256 not support Thumb. */
20257 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
20258
20259 #undef ARM_VARIANT
20260 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
20261 #undef THUMB_VARIANT
20262 #define THUMB_VARIANT & arm_ext_v5t
20263
20264 /* Note: blx has 2 variants; the .value coded here is for
20265 BLX(2). Only this variant has conditional execution. */
20266 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
20267 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
20268
20269 #undef THUMB_VARIANT
20270 #define THUMB_VARIANT & arm_ext_v6t2
20271
20272 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
20273 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20274 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20275 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20276 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20277 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
20278 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
20279 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
20280
20281 #undef ARM_VARIANT
20282 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
20283 #undef THUMB_VARIANT
20284 #define THUMB_VARIANT & arm_ext_v5exp
20285
20286 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
20287 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
20288 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
20289 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
20290
20291 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
20292 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
20293
20294 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
20295 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
20296 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
20297 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
20298
20299 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20300 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20301 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20302 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20303
20304 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20305 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20306
20307 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
20308 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
20309 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
20310 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
20311
20312 #undef ARM_VARIANT
20313 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
20314 #undef THUMB_VARIANT
20315 #define THUMB_VARIANT & arm_ext_v6t2
20316
20317 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
20318 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
20319 ldrd, t_ldstd),
20320 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
20321 ADDRGLDRS), ldrd, t_ldstd),
20322
20323 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
20324 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
20325
20326 #undef ARM_VARIANT
20327 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
20328
20329 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
20330
20331 #undef ARM_VARIANT
20332 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
20333 #undef THUMB_VARIANT
20334 #define THUMB_VARIANT & arm_ext_v6
20335
20336 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
20337 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
20338 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
20339 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
20340 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
20341 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
20342 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
20343 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
20344 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
20345 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
20346
20347 #undef THUMB_VARIANT
20348 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20349
20350 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
20351 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
20352 strex, t_strex),
20353 #undef THUMB_VARIANT
20354 #define THUMB_VARIANT & arm_ext_v6t2
20355
20356 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
20357 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
20358
20359 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
20360 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
20361
20362 /* ARM V6 not included in V7M. */
20363 #undef THUMB_VARIANT
20364 #define THUMB_VARIANT & arm_ext_v6_notm
20365 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
20366 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
20367 UF(rfeib, 9900a00, 1, (RRw), rfe),
20368 UF(rfeda, 8100a00, 1, (RRw), rfe),
20369 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
20370 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
20371 UF(rfefa, 8100a00, 1, (RRw), rfe),
20372 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
20373 UF(rfeed, 9900a00, 1, (RRw), rfe),
20374 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
20375 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
20376 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
20377 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
20378 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
20379 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
20380 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
20381 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
20382 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
20383 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
20384
20385 /* ARM V6 not included in V7M (eg. integer SIMD). */
20386 #undef THUMB_VARIANT
20387 #define THUMB_VARIANT & arm_ext_v6_dsp
20388 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
20389 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
20390 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20391 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20392 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20393 /* Old name for QASX. */
20394 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20395 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20396 /* Old name for QSAX. */
20397 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20398 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20399 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20400 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20401 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20402 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20403 /* Old name for SASX. */
20404 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20405 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20406 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20407 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20408 /* Old name for SHASX. */
20409 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20410 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20411 /* Old name for SHSAX. */
20412 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20413 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20414 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20415 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20416 /* Old name for SSAX. */
20417 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20418 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20419 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20420 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20421 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20422 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20423 /* Old name for UASX. */
20424 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20425 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20426 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20427 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20428 /* Old name for UHASX. */
20429 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20430 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20431 /* Old name for UHSAX. */
20432 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20433 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20434 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20435 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20436 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20437 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20438 /* Old name for UQASX. */
20439 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20440 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20441 /* Old name for UQSAX. */
20442 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20443 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20444 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20445 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20446 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20447 /* Old name for USAX. */
20448 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20449 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20450 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
20451 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
20452 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
20453 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
20454 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
20455 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
20456 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
20457 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
20458 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20459 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20460 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20461 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
20462 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
20463 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20464 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20465 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
20466 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
20467 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20468 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20469 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20470 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20471 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20472 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20473 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20474 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20475 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20476 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20477 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
20478 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
20479 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20480 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20481 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
20482
20483 #undef ARM_VARIANT
20484 #define ARM_VARIANT & arm_ext_v6k_v6t2
20485 #undef THUMB_VARIANT
20486 #define THUMB_VARIANT & arm_ext_v6k_v6t2
20487
20488 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
20489 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
20490 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
20491 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
20492
20493 #undef THUMB_VARIANT
20494 #define THUMB_VARIANT & arm_ext_v6_notm
20495 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
20496 ldrexd, t_ldrexd),
20497 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
20498 RRnpcb), strexd, t_strexd),
20499
20500 #undef THUMB_VARIANT
20501 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20502 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
20503 rd_rn, rd_rn),
20504 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
20505 rd_rn, rd_rn),
20506 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
20507 strex, t_strexbh),
20508 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
20509 strex, t_strexbh),
20510 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
20511
20512 #undef ARM_VARIANT
20513 #define ARM_VARIANT & arm_ext_sec
20514 #undef THUMB_VARIANT
20515 #define THUMB_VARIANT & arm_ext_sec
20516
20517 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
20518
20519 #undef ARM_VARIANT
20520 #define ARM_VARIANT & arm_ext_virt
20521 #undef THUMB_VARIANT
20522 #define THUMB_VARIANT & arm_ext_virt
20523
20524 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
20525 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
20526
20527 #undef ARM_VARIANT
20528 #define ARM_VARIANT & arm_ext_pan
20529 #undef THUMB_VARIANT
20530 #define THUMB_VARIANT & arm_ext_pan
20531
20532 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
20533
20534 #undef ARM_VARIANT
20535 #define ARM_VARIANT & arm_ext_v6t2
20536 #undef THUMB_VARIANT
20537 #define THUMB_VARIANT & arm_ext_v6t2
20538
20539 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
20540 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
20541 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
20542 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
20543
20544 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
20545 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
20546
20547 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20548 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20549 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20550 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20551
20552 #undef ARM_VARIANT
20553 #define ARM_VARIANT & arm_ext_v3
20554 #undef THUMB_VARIANT
20555 #define THUMB_VARIANT & arm_ext_v6t2
20556
20557 TUE("csdb", 320f014, f3af8014, 0, (), noargs, t_csdb),
20558 TUF("ssbb", 57ff040, f3bf8f40, 0, (), noargs, t_csdb),
20559 TUF("pssbb", 57ff044, f3bf8f44, 0, (), noargs, t_csdb),
20560
20561 #undef ARM_VARIANT
20562 #define ARM_VARIANT & arm_ext_v6t2
20563 #undef THUMB_VARIANT
20564 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20565 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
20566 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
20567
20568 /* Thumb-only instructions. */
20569 #undef ARM_VARIANT
20570 #define ARM_VARIANT NULL
20571 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
20572 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
20573
20574 /* ARM does not really have an IT instruction, so always allow it.
20575 The opcode is copied from Thumb in order to allow warnings in
20576 -mimplicit-it=[never | arm] modes. */
20577 #undef ARM_VARIANT
20578 #define ARM_VARIANT & arm_ext_v1
20579 #undef THUMB_VARIANT
20580 #define THUMB_VARIANT & arm_ext_v6t2
20581
20582 TUE("it", bf08, bf08, 1, (COND), it, t_it),
20583 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
20584 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
20585 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
20586 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
20587 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
20588 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
20589 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
20590 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
20591 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
20592 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
20593 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
20594 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
20595 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
20596 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
20597 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
20598 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
20599 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
20600
20601 /* Thumb2 only instructions. */
20602 #undef ARM_VARIANT
20603 #define ARM_VARIANT NULL
20604
20605 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
20606 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
20607 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
20608 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
20609 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
20610 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
20611
20612 /* Hardware division instructions. */
20613 #undef ARM_VARIANT
20614 #define ARM_VARIANT & arm_ext_adiv
20615 #undef THUMB_VARIANT
20616 #define THUMB_VARIANT & arm_ext_div
20617
20618 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
20619 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
20620
20621 /* ARM V6M/V7 instructions. */
20622 #undef ARM_VARIANT
20623 #define ARM_VARIANT & arm_ext_barrier
20624 #undef THUMB_VARIANT
20625 #define THUMB_VARIANT & arm_ext_barrier
20626
20627 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
20628 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
20629 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
20630
20631 /* ARM V7 instructions. */
20632 #undef ARM_VARIANT
20633 #define ARM_VARIANT & arm_ext_v7
20634 #undef THUMB_VARIANT
20635 #define THUMB_VARIANT & arm_ext_v7
20636
20637 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
20638 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
20639
20640 #undef ARM_VARIANT
20641 #define ARM_VARIANT & arm_ext_mp
20642 #undef THUMB_VARIANT
20643 #define THUMB_VARIANT & arm_ext_mp
20644
20645 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
20646
20647 /* AArchv8 instructions. */
20648 #undef ARM_VARIANT
20649 #define ARM_VARIANT & arm_ext_v8
20650
20651 /* Instructions shared between armv8-a and armv8-m. */
20652 #undef THUMB_VARIANT
20653 #define THUMB_VARIANT & arm_ext_atomics
20654
20655 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20656 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20657 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20658 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
20659 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
20660 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
20661 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20662 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
20663 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20664 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
20665 stlex, t_stlex),
20666 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
20667 stlex, t_stlex),
20668 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
20669 stlex, t_stlex),
20670 #undef THUMB_VARIANT
20671 #define THUMB_VARIANT & arm_ext_v8
20672
20673 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
20674 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
20675 ldrexd, t_ldrexd),
20676 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
20677 strexd, t_strexd),
20678
20679 /* Defined in V8 but is in undefined encoding space for earlier
20680 architectures. However earlier architectures are required to treat
20681 this instuction as a semihosting trap as well. Hence while not explicitly
20682 defined as such, it is in fact correct to define the instruction for all
20683 architectures. */
20684 #undef THUMB_VARIANT
20685 #define THUMB_VARIANT & arm_ext_v1
20686 #undef ARM_VARIANT
20687 #define ARM_VARIANT & arm_ext_v1
20688 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
20689
20690 /* ARMv8 T32 only. */
20691 #undef ARM_VARIANT
20692 #define ARM_VARIANT NULL
20693 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
20694 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
20695 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
20696
20697 /* FP for ARMv8. */
20698 #undef ARM_VARIANT
20699 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
20700 #undef THUMB_VARIANT
20701 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
20702
20703 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
20704 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
20705 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
20706 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
20707 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
20708 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
20709 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
20710 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
20711 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
20712 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
20713 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
20714 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
20715 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
20716 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
20717 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
20718 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
20719 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
20720
20721 /* Crypto v1 extensions. */
20722 #undef ARM_VARIANT
20723 #define ARM_VARIANT & fpu_crypto_ext_armv8
20724 #undef THUMB_VARIANT
20725 #define THUMB_VARIANT & fpu_crypto_ext_armv8
20726
20727 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
20728 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
20729 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
20730 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
20731 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
20732 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
20733 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
20734 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
20735 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
20736 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
20737 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
20738 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
20739 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
20740 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
20741
20742 #undef ARM_VARIANT
20743 #define ARM_VARIANT & crc_ext_armv8
20744 #undef THUMB_VARIANT
20745 #define THUMB_VARIANT & crc_ext_armv8
20746 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
20747 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
20748 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
20749 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
20750 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
20751 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
20752
20753 /* ARMv8.2 RAS extension. */
20754 #undef ARM_VARIANT
20755 #define ARM_VARIANT & arm_ext_ras
20756 #undef THUMB_VARIANT
20757 #define THUMB_VARIANT & arm_ext_ras
20758 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
20759
20760 #undef ARM_VARIANT
20761 #define ARM_VARIANT & arm_ext_v8_3
20762 #undef THUMB_VARIANT
20763 #define THUMB_VARIANT & arm_ext_v8_3
20764 NCE (vjcvt, eb90bc0, 2, (RVS, RVD), vjcvt),
20765 NUF (vcmla, 0, 4, (RNDQ, RNDQ, RNDQ_RNSC, EXPi), vcmla),
20766 NUF (vcadd, 0, 4, (RNDQ, RNDQ, RNDQ, EXPi), vcadd),
20767
20768 #undef ARM_VARIANT
20769 #define ARM_VARIANT & fpu_neon_ext_dotprod
20770 #undef THUMB_VARIANT
20771 #define THUMB_VARIANT & fpu_neon_ext_dotprod
20772 NUF (vsdot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_s),
20773 NUF (vudot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_u),
20774
20775 #undef ARM_VARIANT
20776 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
20777 #undef THUMB_VARIANT
20778 #define THUMB_VARIANT NULL
20779
20780 cCE("wfs", e200110, 1, (RR), rd),
20781 cCE("rfs", e300110, 1, (RR), rd),
20782 cCE("wfc", e400110, 1, (RR), rd),
20783 cCE("rfc", e500110, 1, (RR), rd),
20784
20785 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
20786 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
20787 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
20788 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
20789
20790 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
20791 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
20792 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
20793 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
20794
20795 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
20796 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
20797 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
20798 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
20799 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
20800 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
20801 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
20802 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
20803 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
20804 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
20805 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
20806 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
20807
20808 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
20809 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
20810 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
20811 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
20812 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
20813 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
20814 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
20815 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
20816 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
20817 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
20818 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
20819 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
20820
20821 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
20822 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
20823 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
20824 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
20825 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
20826 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
20827 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
20828 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
20829 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
20830 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
20831 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
20832 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
20833
20834 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
20835 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
20836 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
20837 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
20838 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
20839 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
20840 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
20841 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
20842 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
20843 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
20844 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
20845 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
20846
20847 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
20848 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
20849 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
20850 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
20851 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
20852 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
20853 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
20854 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
20855 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
20856 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
20857 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
20858 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
20859
20860 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
20861 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
20862 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
20863 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
20864 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
20865 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
20866 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
20867 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
20868 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
20869 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
20870 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
20871 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
20872
20873 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
20874 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
20875 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
20876 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
20877 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
20878 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
20879 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
20880 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
20881 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
20882 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
20883 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
20884 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
20885
20886 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
20887 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
20888 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
20889 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
20890 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
20891 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
20892 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
20893 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
20894 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
20895 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
20896 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
20897 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
20898
20899 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
20900 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
20901 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
20902 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
20903 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
20904 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
20905 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
20906 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
20907 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
20908 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
20909 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
20910 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
20911
20912 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
20913 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
20914 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
20915 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
20916 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
20917 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
20918 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
20919 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
20920 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
20921 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
20922 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
20923 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
20924
20925 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
20926 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
20927 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
20928 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
20929 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
20930 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
20931 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
20932 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
20933 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
20934 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
20935 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
20936 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
20937
20938 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
20939 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
20940 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
20941 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
20942 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
20943 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
20944 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
20945 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
20946 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
20947 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
20948 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
20949 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
20950
20951 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
20952 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
20953 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
20954 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
20955 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
20956 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
20957 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
20958 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
20959 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
20960 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
20961 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
20962 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
20963
20964 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
20965 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
20966 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
20967 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
20968 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
20969 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
20970 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
20971 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
20972 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
20973 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
20974 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
20975 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
20976
20977 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
20978 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
20979 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
20980 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
20981 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
20982 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
20983 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
20984 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
20985 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
20986 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
20987 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
20988 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
20989
20990 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
20991 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
20992 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
20993 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
20994 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
20995 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
20996 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
20997 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
20998 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
20999 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
21000 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
21001 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
21002
21003 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
21004 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
21005 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
21006 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
21007 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
21008 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21009 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21010 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21011 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
21012 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
21013 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
21014 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
21015
21016 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
21017 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
21018 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
21019 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
21020 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
21021 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21022 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21023 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21024 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
21025 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
21026 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
21027 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
21028
21029 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
21030 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
21031 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
21032 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
21033 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
21034 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21035 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21036 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21037 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
21038 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
21039 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
21040 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
21041
21042 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
21043 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
21044 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
21045 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
21046 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
21047 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21048 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21049 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21050 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
21051 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
21052 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
21053 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
21054
21055 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
21056 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
21057 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
21058 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
21059 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
21060 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21061 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21062 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21063 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
21064 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
21065 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
21066 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
21067
21068 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
21069 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
21070 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
21071 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
21072 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
21073 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21074 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21075 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21076 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
21077 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
21078 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
21079 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
21080
21081 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
21082 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
21083 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
21084 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
21085 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
21086 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21087 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21088 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21089 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
21090 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
21091 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
21092 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
21093
21094 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
21095 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
21096 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
21097 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
21098 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
21099 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21100 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21101 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21102 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
21103 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
21104 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
21105 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
21106
21107 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
21108 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
21109 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
21110 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
21111 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
21112 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21113 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21114 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21115 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
21116 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
21117 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
21118 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
21119
21120 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
21121 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
21122 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
21123 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
21124 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
21125 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21126 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21127 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21128 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
21129 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
21130 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
21131 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
21132
21133 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
21134 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
21135 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
21136 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
21137 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
21138 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21139 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21140 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21141 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
21142 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
21143 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
21144 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
21145
21146 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
21147 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
21148 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
21149 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
21150 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
21151 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21152 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21153 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21154 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
21155 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
21156 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
21157 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
21158
21159 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
21160 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
21161 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
21162 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
21163 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
21164 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21165 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21166 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21167 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
21168 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
21169 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
21170 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
21171
21172 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
21173 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
21174 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
21175 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
21176
21177 cCL("flts", e000110, 2, (RF, RR), rn_rd),
21178 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
21179 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
21180 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
21181 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
21182 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
21183 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
21184 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
21185 cCL("flte", e080110, 2, (RF, RR), rn_rd),
21186 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
21187 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
21188 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
21189
21190 /* The implementation of the FIX instruction is broken on some
21191 assemblers, in that it accepts a precision specifier as well as a
21192 rounding specifier, despite the fact that this is meaningless.
21193 To be more compatible, we accept it as well, though of course it
21194 does not set any bits. */
21195 cCE("fix", e100110, 2, (RR, RF), rd_rm),
21196 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
21197 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
21198 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
21199 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
21200 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
21201 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
21202 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
21203 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
21204 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
21205 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
21206 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
21207 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
21208
21209 /* Instructions that were new with the real FPA, call them V2. */
21210 #undef ARM_VARIANT
21211 #define ARM_VARIANT & fpu_fpa_ext_v2
21212
21213 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
21214 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
21215 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
21216 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
21217 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
21218 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
21219
21220 #undef ARM_VARIANT
21221 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
21222
21223 /* Moves and type conversions. */
21224 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
21225 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
21226 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
21227 cCE("fmstat", ef1fa10, 0, (), noargs),
21228 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
21229 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
21230 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
21231 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
21232 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
21233 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
21234 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
21235 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
21236 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
21237 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
21238
21239 /* Memory operations. */
21240 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
21241 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
21242 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
21243 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
21244 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
21245 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
21246 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
21247 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
21248 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
21249 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
21250 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
21251 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
21252 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
21253 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
21254 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
21255 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
21256 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
21257 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
21258
21259 /* Monadic operations. */
21260 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
21261 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
21262 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
21263
21264 /* Dyadic operations. */
21265 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21266 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21267 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21268 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21269 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21270 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21271 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21272 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21273 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21274
21275 /* Comparisons. */
21276 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
21277 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
21278 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
21279 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
21280
21281 /* Double precision load/store are still present on single precision
21282 implementations. */
21283 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
21284 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
21285 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
21286 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
21287 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
21288 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
21289 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
21290 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
21291 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
21292 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
21293
21294 #undef ARM_VARIANT
21295 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
21296
21297 /* Moves and type conversions. */
21298 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
21299 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
21300 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
21301 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
21302 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
21303 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
21304 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
21305 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
21306 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
21307 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
21308 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
21309 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
21310 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
21311
21312 /* Monadic operations. */
21313 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
21314 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
21315 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
21316
21317 /* Dyadic operations. */
21318 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21319 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21320 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21321 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21322 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21323 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21324 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21325 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21326 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21327
21328 /* Comparisons. */
21329 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
21330 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
21331 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
21332 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
21333
21334 #undef ARM_VARIANT
21335 #define ARM_VARIANT & fpu_vfp_ext_v2
21336
21337 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
21338 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
21339 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
21340 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
21341
21342 /* Instructions which may belong to either the Neon or VFP instruction sets.
21343 Individual encoder functions perform additional architecture checks. */
21344 #undef ARM_VARIANT
21345 #define ARM_VARIANT & fpu_vfp_ext_v1xd
21346 #undef THUMB_VARIANT
21347 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
21348
21349 /* These mnemonics are unique to VFP. */
21350 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
21351 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
21352 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
21353 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
21354 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
21355 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
21356 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
21357 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
21358 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
21359 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
21360
21361 /* Mnemonics shared by Neon and VFP. */
21362 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
21363 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
21364 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
21365
21366 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
21367 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
21368
21369 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
21370 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
21371
21372 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
21373 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
21374 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
21375 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
21376 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
21377 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
21378
21379 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
21380 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
21381 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
21382 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
21383
21384
21385 /* NOTE: All VMOV encoding is special-cased! */
21386 NCE(vmov, 0, 1, (VMOV), neon_mov),
21387 NCE(vmovq, 0, 1, (VMOV), neon_mov),
21388
21389 #undef THUMB_VARIANT
21390 /* Could be either VLDR/VSTR or VLDR/VSTR (system register) which are guarded
21391 by different feature bits. Since we are setting the Thumb guard, we can
21392 require Thumb-1 which makes it a nop guard and set the right feature bit in
21393 do_vldr_vstr (). */
21394 #define THUMB_VARIANT & arm_ext_v4t
21395 NCE(vldr, d100b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
21396 NCE(vstr, d000b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
21397
21398 #undef ARM_VARIANT
21399 #define ARM_VARIANT & arm_ext_fp16
21400 #undef THUMB_VARIANT
21401 #define THUMB_VARIANT & arm_ext_fp16
21402 /* New instructions added from v8.2, allowing the extraction and insertion of
21403 the upper 16 bits of a 32-bit vector register. */
21404 NCE (vmovx, eb00a40, 2, (RVS, RVS), neon_movhf),
21405 NCE (vins, eb00ac0, 2, (RVS, RVS), neon_movhf),
21406
21407 /* New backported fma/fms instructions optional in v8.2. */
21408 NCE (vfmal, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmal),
21409 NCE (vfmsl, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmsl),
21410
21411 #undef THUMB_VARIANT
21412 #define THUMB_VARIANT & fpu_neon_ext_v1
21413 #undef ARM_VARIANT
21414 #define ARM_VARIANT & fpu_neon_ext_v1
21415
21416 /* Data processing with three registers of the same length. */
21417 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
21418 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
21419 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
21420 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
21421 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
21422 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
21423 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
21424 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
21425 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
21426 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
21427 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
21428 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
21429 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
21430 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
21431 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
21432 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
21433 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
21434 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
21435 /* If not immediate, fall back to neon_dyadic_i64_su.
21436 shl_imm should accept I8 I16 I32 I64,
21437 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
21438 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
21439 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
21440 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
21441 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
21442 /* Logic ops, types optional & ignored. */
21443 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
21444 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
21445 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
21446 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
21447 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
21448 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
21449 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
21450 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
21451 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
21452 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
21453 /* Bitfield ops, untyped. */
21454 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
21455 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
21456 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
21457 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
21458 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
21459 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
21460 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
21461 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
21462 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
21463 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
21464 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
21465 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
21466 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
21467 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
21468 back to neon_dyadic_if_su. */
21469 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
21470 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
21471 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
21472 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
21473 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
21474 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
21475 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
21476 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
21477 /* Comparison. Type I8 I16 I32 F32. */
21478 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
21479 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
21480 /* As above, D registers only. */
21481 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
21482 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
21483 /* Int and float variants, signedness unimportant. */
21484 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
21485 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
21486 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
21487 /* Add/sub take types I8 I16 I32 I64 F32. */
21488 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
21489 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
21490 /* vtst takes sizes 8, 16, 32. */
21491 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
21492 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
21493 /* VMUL takes I8 I16 I32 F32 P8. */
21494 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
21495 /* VQD{R}MULH takes S16 S32. */
21496 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
21497 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
21498 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
21499 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
21500 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
21501 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
21502 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
21503 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
21504 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
21505 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
21506 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
21507 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
21508 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
21509 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
21510 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
21511 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
21512 /* ARM v8.1 extension. */
21513 nUF (vqrdmlah, _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
21514 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
21515 nUF (vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
21516 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
21517
21518 /* Two address, int/float. Types S8 S16 S32 F32. */
21519 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
21520 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
21521
21522 /* Data processing with two registers and a shift amount. */
21523 /* Right shifts, and variants with rounding.
21524 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
21525 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
21526 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
21527 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
21528 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
21529 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
21530 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
21531 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
21532 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
21533 /* Shift and insert. Sizes accepted 8 16 32 64. */
21534 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
21535 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
21536 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
21537 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
21538 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
21539 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
21540 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
21541 /* Right shift immediate, saturating & narrowing, with rounding variants.
21542 Types accepted S16 S32 S64 U16 U32 U64. */
21543 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
21544 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
21545 /* As above, unsigned. Types accepted S16 S32 S64. */
21546 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
21547 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
21548 /* Right shift narrowing. Types accepted I16 I32 I64. */
21549 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
21550 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
21551 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
21552 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
21553 /* CVT with optional immediate for fixed-point variant. */
21554 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
21555
21556 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
21557 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
21558
21559 /* Data processing, three registers of different lengths. */
21560 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
21561 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
21562 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
21563 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
21564 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
21565 /* If not scalar, fall back to neon_dyadic_long.
21566 Vector types as above, scalar types S16 S32 U16 U32. */
21567 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
21568 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
21569 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
21570 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
21571 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
21572 /* Dyadic, narrowing insns. Types I16 I32 I64. */
21573 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21574 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21575 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21576 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21577 /* Saturating doubling multiplies. Types S16 S32. */
21578 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
21579 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
21580 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
21581 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
21582 S16 S32 U16 U32. */
21583 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
21584
21585 /* Extract. Size 8. */
21586 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
21587 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
21588
21589 /* Two registers, miscellaneous. */
21590 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
21591 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
21592 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
21593 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
21594 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
21595 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
21596 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
21597 /* Vector replicate. Sizes 8 16 32. */
21598 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
21599 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
21600 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
21601 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
21602 /* VMOVN. Types I16 I32 I64. */
21603 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
21604 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
21605 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
21606 /* VQMOVUN. Types S16 S32 S64. */
21607 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
21608 /* VZIP / VUZP. Sizes 8 16 32. */
21609 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
21610 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
21611 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
21612 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
21613 /* VQABS / VQNEG. Types S8 S16 S32. */
21614 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
21615 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
21616 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
21617 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
21618 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
21619 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
21620 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
21621 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
21622 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
21623 /* Reciprocal estimates. Types U32 F16 F32. */
21624 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
21625 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
21626 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
21627 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
21628 /* VCLS. Types S8 S16 S32. */
21629 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
21630 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
21631 /* VCLZ. Types I8 I16 I32. */
21632 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
21633 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
21634 /* VCNT. Size 8. */
21635 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
21636 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
21637 /* Two address, untyped. */
21638 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
21639 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
21640 /* VTRN. Sizes 8 16 32. */
21641 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
21642 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
21643
21644 /* Table lookup. Size 8. */
21645 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
21646 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
21647
21648 #undef THUMB_VARIANT
21649 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
21650 #undef ARM_VARIANT
21651 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
21652
21653 /* Neon element/structure load/store. */
21654 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
21655 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
21656 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
21657 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
21658 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
21659 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
21660 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
21661 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
21662
21663 #undef THUMB_VARIANT
21664 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
21665 #undef ARM_VARIANT
21666 #define ARM_VARIANT & fpu_vfp_ext_v3xd
21667 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
21668 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21669 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21670 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21671 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21672 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21673 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21674 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21675 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21676
21677 #undef THUMB_VARIANT
21678 #define THUMB_VARIANT & fpu_vfp_ext_v3
21679 #undef ARM_VARIANT
21680 #define ARM_VARIANT & fpu_vfp_ext_v3
21681
21682 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
21683 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21684 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21685 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21686 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21687 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21688 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21689 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21690 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21691
21692 #undef ARM_VARIANT
21693 #define ARM_VARIANT & fpu_vfp_ext_fma
21694 #undef THUMB_VARIANT
21695 #define THUMB_VARIANT & fpu_vfp_ext_fma
21696 /* Mnemonics shared by Neon and VFP. These are included in the
21697 VFP FMA variant; NEON and VFP FMA always includes the NEON
21698 FMA instructions. */
21699 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
21700 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
21701 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
21702 the v form should always be used. */
21703 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21704 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21705 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21706 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21707 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
21708 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
21709
21710 #undef THUMB_VARIANT
21711 #undef ARM_VARIANT
21712 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
21713
21714 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21715 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21716 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21717 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21718 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21719 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21720 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
21721 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
21722
21723 #undef ARM_VARIANT
21724 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
21725
21726 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
21727 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
21728 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
21729 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
21730 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
21731 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
21732 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
21733 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
21734 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
21735 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
21736 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
21737 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
21738 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
21739 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
21740 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
21741 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
21742 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
21743 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
21744 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
21745 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
21746 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21747 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21748 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21749 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21750 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21751 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21752 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
21753 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
21754 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
21755 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
21756 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
21757 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
21758 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
21759 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
21760 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
21761 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
21762 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
21763 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21764 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21765 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21766 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21767 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21768 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21769 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21770 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21771 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21772 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
21773 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21774 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21775 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21776 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21777 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21778 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21779 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21780 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21781 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21782 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21783 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21784 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21785 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21786 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21787 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21788 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21789 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21790 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21791 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21792 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21793 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21794 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
21795 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
21796 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21797 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21798 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21799 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21800 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21801 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21802 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21803 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21804 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21805 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21806 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21807 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21808 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21809 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21810 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21811 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21812 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21813 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21814 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
21815 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21816 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21817 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21818 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21819 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21820 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21821 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21822 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21823 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21824 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21825 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21826 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21827 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21828 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21829 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21830 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21831 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21832 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21833 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21834 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21835 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21836 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
21837 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21838 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21839 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21840 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21841 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21842 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21843 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21844 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21845 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21846 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21847 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21848 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21849 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21850 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21851 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21852 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21853 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21854 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21855 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21856 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21857 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
21858 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
21859 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21860 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21861 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21862 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21863 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21864 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21865 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21866 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21867 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21868 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
21869 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
21870 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
21871 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
21872 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
21873 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
21874 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21875 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21876 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21877 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
21878 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
21879 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
21880 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
21881 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
21882 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
21883 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21884 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21885 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21886 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21887 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
21888
21889 #undef ARM_VARIANT
21890 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
21891
21892 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
21893 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
21894 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
21895 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
21896 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
21897 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
21898 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21899 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21900 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21901 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21902 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21903 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21904 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21905 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21906 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21907 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21908 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21909 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21910 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21911 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21912 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
21913 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21914 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21915 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21916 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21917 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21918 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21919 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21920 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21921 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21922 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21923 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21924 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21925 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21926 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21927 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21928 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21929 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21930 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21931 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21932 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21933 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21934 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21935 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21936 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21937 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21938 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21939 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21940 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21941 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21942 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21943 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21944 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21945 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21946 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21947 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21948 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21949
21950 #undef ARM_VARIANT
21951 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
21952
21953 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
21954 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
21955 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
21956 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
21957 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
21958 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
21959 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
21960 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
21961 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
21962 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
21963 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
21964 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
21965 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
21966 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
21967 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
21968 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
21969 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
21970 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
21971 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
21972 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
21973 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
21974 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
21975 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
21976 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
21977 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
21978 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
21979 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
21980 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
21981 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
21982 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
21983 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
21984 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
21985 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
21986 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
21987 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
21988 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
21989 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
21990 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
21991 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
21992 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
21993 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
21994 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
21995 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
21996 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
21997 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
21998 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
21999 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
22000 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
22001 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
22002 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
22003 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
22004 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
22005 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
22006 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
22007 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
22008 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
22009 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
22010 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
22011 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
22012 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
22013 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
22014 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
22015 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
22016 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
22017 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
22018 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
22019 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
22020 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
22021 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
22022 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
22023 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
22024 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
22025 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
22026 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
22027 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
22028 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
22029
22030 /* ARMv8.5-A instructions. */
22031 #undef ARM_VARIANT
22032 #define ARM_VARIANT & arm_ext_sb
22033 #undef THUMB_VARIANT
22034 #define THUMB_VARIANT & arm_ext_sb
22035 TUF("sb", 57ff070, f3bf8f70, 0, (), noargs, noargs),
22036
22037 #undef ARM_VARIANT
22038 #define ARM_VARIANT & arm_ext_predres
22039 #undef THUMB_VARIANT
22040 #define THUMB_VARIANT & arm_ext_predres
22041 CE("cfprctx", e070f93, 1, (RRnpc), rd),
22042 CE("dvprctx", e070fb3, 1, (RRnpc), rd),
22043 CE("cpprctx", e070ff3, 1, (RRnpc), rd),
22044
22045 /* ARMv8-M instructions. */
22046 #undef ARM_VARIANT
22047 #define ARM_VARIANT NULL
22048 #undef THUMB_VARIANT
22049 #define THUMB_VARIANT & arm_ext_v8m
22050 ToU("sg", e97fe97f, 0, (), noargs),
22051 ToC("blxns", 4784, 1, (RRnpc), t_blx),
22052 ToC("bxns", 4704, 1, (RRnpc), t_bx),
22053 ToC("tt", e840f000, 2, (RRnpc, RRnpc), tt),
22054 ToC("ttt", e840f040, 2, (RRnpc, RRnpc), tt),
22055 ToC("tta", e840f080, 2, (RRnpc, RRnpc), tt),
22056 ToC("ttat", e840f0c0, 2, (RRnpc, RRnpc), tt),
22057
22058 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
22059 instructions behave as nop if no VFP is present. */
22060 #undef THUMB_VARIANT
22061 #define THUMB_VARIANT & arm_ext_v8m_main
22062 ToC("vlldm", ec300a00, 1, (RRnpc), rn),
22063 ToC("vlstm", ec200a00, 1, (RRnpc), rn),
22064
22065 /* Armv8.1-M Mainline instructions. */
22066 #undef THUMB_VARIANT
22067 #define THUMB_VARIANT & arm_ext_v8_1m_main
22068 toC("bf", _bf, 2, (EXPs, EXPs), t_branch_future),
22069 toU("bfcsel", _bfcsel, 4, (EXPs, EXPs, EXPs, COND), t_branch_future),
22070 toC("bfx", _bfx, 2, (EXPs, RRnpcsp), t_branch_future),
22071 toC("bfl", _bfl, 2, (EXPs, EXPs), t_branch_future),
22072 toC("bflx", _bflx, 2, (EXPs, RRnpcsp), t_branch_future),
22073
22074 toU("dls", _dls, 2, (LR, RRnpcsp), t_loloop),
22075 toU("wls", _wls, 3, (LR, RRnpcsp, EXP), t_loloop),
22076 toU("le", _le, 2, (oLR, EXP), t_loloop),
22077
22078 ToC("clrm", e89f0000, 1, (CLRMLST), t_clrm),
22079 ToC("vscclrm", ec9f0a00, 1, (VRSDVLST), t_vscclrm)
22080 };
22081 #undef ARM_VARIANT
22082 #undef THUMB_VARIANT
22083 #undef TCE
22084 #undef TUE
22085 #undef TUF
22086 #undef TCC
22087 #undef cCE
22088 #undef cCL
22089 #undef C3E
22090 #undef C3
22091 #undef CE
22092 #undef CM
22093 #undef CL
22094 #undef UE
22095 #undef UF
22096 #undef UT
22097 #undef NUF
22098 #undef nUF
22099 #undef NCE
22100 #undef nCE
22101 #undef OPS0
22102 #undef OPS1
22103 #undef OPS2
22104 #undef OPS3
22105 #undef OPS4
22106 #undef OPS5
22107 #undef OPS6
22108 #undef do_0
22109 #undef ToC
22110 #undef toC
22111 #undef ToU
22112 #undef toU
22113 \f
22114 /* MD interface: bits in the object file. */
22115
22116 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
22117 for use in the a.out file, and stores them in the array pointed to by buf.
22118 This knows about the endian-ness of the target machine and does
22119 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
22120 2 (short) and 4 (long) Floating numbers are put out as a series of
22121 LITTLENUMS (shorts, here at least). */
22122
22123 void
22124 md_number_to_chars (char * buf, valueT val, int n)
22125 {
22126 if (target_big_endian)
22127 number_to_chars_bigendian (buf, val, n);
22128 else
22129 number_to_chars_littleendian (buf, val, n);
22130 }
22131
22132 static valueT
22133 md_chars_to_number (char * buf, int n)
22134 {
22135 valueT result = 0;
22136 unsigned char * where = (unsigned char *) buf;
22137
22138 if (target_big_endian)
22139 {
22140 while (n--)
22141 {
22142 result <<= 8;
22143 result |= (*where++ & 255);
22144 }
22145 }
22146 else
22147 {
22148 while (n--)
22149 {
22150 result <<= 8;
22151 result |= (where[n] & 255);
22152 }
22153 }
22154
22155 return result;
22156 }
22157
22158 /* MD interface: Sections. */
22159
22160 /* Calculate the maximum variable size (i.e., excluding fr_fix)
22161 that an rs_machine_dependent frag may reach. */
22162
22163 unsigned int
22164 arm_frag_max_var (fragS *fragp)
22165 {
22166 /* We only use rs_machine_dependent for variable-size Thumb instructions,
22167 which are either THUMB_SIZE (2) or INSN_SIZE (4).
22168
22169 Note that we generate relaxable instructions even for cases that don't
22170 really need it, like an immediate that's a trivial constant. So we're
22171 overestimating the instruction size for some of those cases. Rather
22172 than putting more intelligence here, it would probably be better to
22173 avoid generating a relaxation frag in the first place when it can be
22174 determined up front that a short instruction will suffice. */
22175
22176 gas_assert (fragp->fr_type == rs_machine_dependent);
22177 return INSN_SIZE;
22178 }
22179
22180 /* Estimate the size of a frag before relaxing. Assume everything fits in
22181 2 bytes. */
22182
22183 int
22184 md_estimate_size_before_relax (fragS * fragp,
22185 segT segtype ATTRIBUTE_UNUSED)
22186 {
22187 fragp->fr_var = 2;
22188 return 2;
22189 }
22190
22191 /* Convert a machine dependent frag. */
22192
22193 void
22194 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
22195 {
22196 unsigned long insn;
22197 unsigned long old_op;
22198 char *buf;
22199 expressionS exp;
22200 fixS *fixp;
22201 int reloc_type;
22202 int pc_rel;
22203 int opcode;
22204
22205 buf = fragp->fr_literal + fragp->fr_fix;
22206
22207 old_op = bfd_get_16(abfd, buf);
22208 if (fragp->fr_symbol)
22209 {
22210 exp.X_op = O_symbol;
22211 exp.X_add_symbol = fragp->fr_symbol;
22212 }
22213 else
22214 {
22215 exp.X_op = O_constant;
22216 }
22217 exp.X_add_number = fragp->fr_offset;
22218 opcode = fragp->fr_subtype;
22219 switch (opcode)
22220 {
22221 case T_MNEM_ldr_pc:
22222 case T_MNEM_ldr_pc2:
22223 case T_MNEM_ldr_sp:
22224 case T_MNEM_str_sp:
22225 case T_MNEM_ldr:
22226 case T_MNEM_ldrb:
22227 case T_MNEM_ldrh:
22228 case T_MNEM_str:
22229 case T_MNEM_strb:
22230 case T_MNEM_strh:
22231 if (fragp->fr_var == 4)
22232 {
22233 insn = THUMB_OP32 (opcode);
22234 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
22235 {
22236 insn |= (old_op & 0x700) << 4;
22237 }
22238 else
22239 {
22240 insn |= (old_op & 7) << 12;
22241 insn |= (old_op & 0x38) << 13;
22242 }
22243 insn |= 0x00000c00;
22244 put_thumb32_insn (buf, insn);
22245 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
22246 }
22247 else
22248 {
22249 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
22250 }
22251 pc_rel = (opcode == T_MNEM_ldr_pc2);
22252 break;
22253 case T_MNEM_adr:
22254 if (fragp->fr_var == 4)
22255 {
22256 insn = THUMB_OP32 (opcode);
22257 insn |= (old_op & 0xf0) << 4;
22258 put_thumb32_insn (buf, insn);
22259 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
22260 }
22261 else
22262 {
22263 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
22264 exp.X_add_number -= 4;
22265 }
22266 pc_rel = 1;
22267 break;
22268 case T_MNEM_mov:
22269 case T_MNEM_movs:
22270 case T_MNEM_cmp:
22271 case T_MNEM_cmn:
22272 if (fragp->fr_var == 4)
22273 {
22274 int r0off = (opcode == T_MNEM_mov
22275 || opcode == T_MNEM_movs) ? 0 : 8;
22276 insn = THUMB_OP32 (opcode);
22277 insn = (insn & 0xe1ffffff) | 0x10000000;
22278 insn |= (old_op & 0x700) << r0off;
22279 put_thumb32_insn (buf, insn);
22280 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
22281 }
22282 else
22283 {
22284 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
22285 }
22286 pc_rel = 0;
22287 break;
22288 case T_MNEM_b:
22289 if (fragp->fr_var == 4)
22290 {
22291 insn = THUMB_OP32(opcode);
22292 put_thumb32_insn (buf, insn);
22293 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
22294 }
22295 else
22296 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
22297 pc_rel = 1;
22298 break;
22299 case T_MNEM_bcond:
22300 if (fragp->fr_var == 4)
22301 {
22302 insn = THUMB_OP32(opcode);
22303 insn |= (old_op & 0xf00) << 14;
22304 put_thumb32_insn (buf, insn);
22305 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
22306 }
22307 else
22308 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
22309 pc_rel = 1;
22310 break;
22311 case T_MNEM_add_sp:
22312 case T_MNEM_add_pc:
22313 case T_MNEM_inc_sp:
22314 case T_MNEM_dec_sp:
22315 if (fragp->fr_var == 4)
22316 {
22317 /* ??? Choose between add and addw. */
22318 insn = THUMB_OP32 (opcode);
22319 insn |= (old_op & 0xf0) << 4;
22320 put_thumb32_insn (buf, insn);
22321 if (opcode == T_MNEM_add_pc)
22322 reloc_type = BFD_RELOC_ARM_T32_IMM12;
22323 else
22324 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
22325 }
22326 else
22327 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
22328 pc_rel = 0;
22329 break;
22330
22331 case T_MNEM_addi:
22332 case T_MNEM_addis:
22333 case T_MNEM_subi:
22334 case T_MNEM_subis:
22335 if (fragp->fr_var == 4)
22336 {
22337 insn = THUMB_OP32 (opcode);
22338 insn |= (old_op & 0xf0) << 4;
22339 insn |= (old_op & 0xf) << 16;
22340 put_thumb32_insn (buf, insn);
22341 if (insn & (1 << 20))
22342 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
22343 else
22344 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
22345 }
22346 else
22347 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
22348 pc_rel = 0;
22349 break;
22350 default:
22351 abort ();
22352 }
22353 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
22354 (enum bfd_reloc_code_real) reloc_type);
22355 fixp->fx_file = fragp->fr_file;
22356 fixp->fx_line = fragp->fr_line;
22357 fragp->fr_fix += fragp->fr_var;
22358
22359 /* Set whether we use thumb-2 ISA based on final relaxation results. */
22360 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
22361 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
22362 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
22363 }
22364
22365 /* Return the size of a relaxable immediate operand instruction.
22366 SHIFT and SIZE specify the form of the allowable immediate. */
22367 static int
22368 relax_immediate (fragS *fragp, int size, int shift)
22369 {
22370 offsetT offset;
22371 offsetT mask;
22372 offsetT low;
22373
22374 /* ??? Should be able to do better than this. */
22375 if (fragp->fr_symbol)
22376 return 4;
22377
22378 low = (1 << shift) - 1;
22379 mask = (1 << (shift + size)) - (1 << shift);
22380 offset = fragp->fr_offset;
22381 /* Force misaligned offsets to 32-bit variant. */
22382 if (offset & low)
22383 return 4;
22384 if (offset & ~mask)
22385 return 4;
22386 return 2;
22387 }
22388
22389 /* Get the address of a symbol during relaxation. */
22390 static addressT
22391 relaxed_symbol_addr (fragS *fragp, long stretch)
22392 {
22393 fragS *sym_frag;
22394 addressT addr;
22395 symbolS *sym;
22396
22397 sym = fragp->fr_symbol;
22398 sym_frag = symbol_get_frag (sym);
22399 know (S_GET_SEGMENT (sym) != absolute_section
22400 || sym_frag == &zero_address_frag);
22401 addr = S_GET_VALUE (sym) + fragp->fr_offset;
22402
22403 /* If frag has yet to be reached on this pass, assume it will
22404 move by STRETCH just as we did. If this is not so, it will
22405 be because some frag between grows, and that will force
22406 another pass. */
22407
22408 if (stretch != 0
22409 && sym_frag->relax_marker != fragp->relax_marker)
22410 {
22411 fragS *f;
22412
22413 /* Adjust stretch for any alignment frag. Note that if have
22414 been expanding the earlier code, the symbol may be
22415 defined in what appears to be an earlier frag. FIXME:
22416 This doesn't handle the fr_subtype field, which specifies
22417 a maximum number of bytes to skip when doing an
22418 alignment. */
22419 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
22420 {
22421 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
22422 {
22423 if (stretch < 0)
22424 stretch = - ((- stretch)
22425 & ~ ((1 << (int) f->fr_offset) - 1));
22426 else
22427 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
22428 if (stretch == 0)
22429 break;
22430 }
22431 }
22432 if (f != NULL)
22433 addr += stretch;
22434 }
22435
22436 return addr;
22437 }
22438
22439 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
22440 load. */
22441 static int
22442 relax_adr (fragS *fragp, asection *sec, long stretch)
22443 {
22444 addressT addr;
22445 offsetT val;
22446
22447 /* Assume worst case for symbols not known to be in the same section. */
22448 if (fragp->fr_symbol == NULL
22449 || !S_IS_DEFINED (fragp->fr_symbol)
22450 || sec != S_GET_SEGMENT (fragp->fr_symbol)
22451 || S_IS_WEAK (fragp->fr_symbol))
22452 return 4;
22453
22454 val = relaxed_symbol_addr (fragp, stretch);
22455 addr = fragp->fr_address + fragp->fr_fix;
22456 addr = (addr + 4) & ~3;
22457 /* Force misaligned targets to 32-bit variant. */
22458 if (val & 3)
22459 return 4;
22460 val -= addr;
22461 if (val < 0 || val > 1020)
22462 return 4;
22463 return 2;
22464 }
22465
22466 /* Return the size of a relaxable add/sub immediate instruction. */
22467 static int
22468 relax_addsub (fragS *fragp, asection *sec)
22469 {
22470 char *buf;
22471 int op;
22472
22473 buf = fragp->fr_literal + fragp->fr_fix;
22474 op = bfd_get_16(sec->owner, buf);
22475 if ((op & 0xf) == ((op >> 4) & 0xf))
22476 return relax_immediate (fragp, 8, 0);
22477 else
22478 return relax_immediate (fragp, 3, 0);
22479 }
22480
22481 /* Return TRUE iff the definition of symbol S could be pre-empted
22482 (overridden) at link or load time. */
22483 static bfd_boolean
22484 symbol_preemptible (symbolS *s)
22485 {
22486 /* Weak symbols can always be pre-empted. */
22487 if (S_IS_WEAK (s))
22488 return TRUE;
22489
22490 /* Non-global symbols cannot be pre-empted. */
22491 if (! S_IS_EXTERNAL (s))
22492 return FALSE;
22493
22494 #ifdef OBJ_ELF
22495 /* In ELF, a global symbol can be marked protected, or private. In that
22496 case it can't be pre-empted (other definitions in the same link unit
22497 would violate the ODR). */
22498 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
22499 return FALSE;
22500 #endif
22501
22502 /* Other global symbols might be pre-empted. */
22503 return TRUE;
22504 }
22505
22506 /* Return the size of a relaxable branch instruction. BITS is the
22507 size of the offset field in the narrow instruction. */
22508
22509 static int
22510 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
22511 {
22512 addressT addr;
22513 offsetT val;
22514 offsetT limit;
22515
22516 /* Assume worst case for symbols not known to be in the same section. */
22517 if (!S_IS_DEFINED (fragp->fr_symbol)
22518 || sec != S_GET_SEGMENT (fragp->fr_symbol)
22519 || S_IS_WEAK (fragp->fr_symbol))
22520 return 4;
22521
22522 #ifdef OBJ_ELF
22523 /* A branch to a function in ARM state will require interworking. */
22524 if (S_IS_DEFINED (fragp->fr_symbol)
22525 && ARM_IS_FUNC (fragp->fr_symbol))
22526 return 4;
22527 #endif
22528
22529 if (symbol_preemptible (fragp->fr_symbol))
22530 return 4;
22531
22532 val = relaxed_symbol_addr (fragp, stretch);
22533 addr = fragp->fr_address + fragp->fr_fix + 4;
22534 val -= addr;
22535
22536 /* Offset is a signed value *2 */
22537 limit = 1 << bits;
22538 if (val >= limit || val < -limit)
22539 return 4;
22540 return 2;
22541 }
22542
22543
22544 /* Relax a machine dependent frag. This returns the amount by which
22545 the current size of the frag should change. */
22546
22547 int
22548 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
22549 {
22550 int oldsize;
22551 int newsize;
22552
22553 oldsize = fragp->fr_var;
22554 switch (fragp->fr_subtype)
22555 {
22556 case T_MNEM_ldr_pc2:
22557 newsize = relax_adr (fragp, sec, stretch);
22558 break;
22559 case T_MNEM_ldr_pc:
22560 case T_MNEM_ldr_sp:
22561 case T_MNEM_str_sp:
22562 newsize = relax_immediate (fragp, 8, 2);
22563 break;
22564 case T_MNEM_ldr:
22565 case T_MNEM_str:
22566 newsize = relax_immediate (fragp, 5, 2);
22567 break;
22568 case T_MNEM_ldrh:
22569 case T_MNEM_strh:
22570 newsize = relax_immediate (fragp, 5, 1);
22571 break;
22572 case T_MNEM_ldrb:
22573 case T_MNEM_strb:
22574 newsize = relax_immediate (fragp, 5, 0);
22575 break;
22576 case T_MNEM_adr:
22577 newsize = relax_adr (fragp, sec, stretch);
22578 break;
22579 case T_MNEM_mov:
22580 case T_MNEM_movs:
22581 case T_MNEM_cmp:
22582 case T_MNEM_cmn:
22583 newsize = relax_immediate (fragp, 8, 0);
22584 break;
22585 case T_MNEM_b:
22586 newsize = relax_branch (fragp, sec, 11, stretch);
22587 break;
22588 case T_MNEM_bcond:
22589 newsize = relax_branch (fragp, sec, 8, stretch);
22590 break;
22591 case T_MNEM_add_sp:
22592 case T_MNEM_add_pc:
22593 newsize = relax_immediate (fragp, 8, 2);
22594 break;
22595 case T_MNEM_inc_sp:
22596 case T_MNEM_dec_sp:
22597 newsize = relax_immediate (fragp, 7, 2);
22598 break;
22599 case T_MNEM_addi:
22600 case T_MNEM_addis:
22601 case T_MNEM_subi:
22602 case T_MNEM_subis:
22603 newsize = relax_addsub (fragp, sec);
22604 break;
22605 default:
22606 abort ();
22607 }
22608
22609 fragp->fr_var = newsize;
22610 /* Freeze wide instructions that are at or before the same location as
22611 in the previous pass. This avoids infinite loops.
22612 Don't freeze them unconditionally because targets may be artificially
22613 misaligned by the expansion of preceding frags. */
22614 if (stretch <= 0 && newsize > 2)
22615 {
22616 md_convert_frag (sec->owner, sec, fragp);
22617 frag_wane (fragp);
22618 }
22619
22620 return newsize - oldsize;
22621 }
22622
22623 /* Round up a section size to the appropriate boundary. */
22624
22625 valueT
22626 md_section_align (segT segment ATTRIBUTE_UNUSED,
22627 valueT size)
22628 {
22629 return size;
22630 }
22631
22632 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
22633 of an rs_align_code fragment. */
22634
22635 void
22636 arm_handle_align (fragS * fragP)
22637 {
22638 static unsigned char const arm_noop[2][2][4] =
22639 {
22640 { /* ARMv1 */
22641 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
22642 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
22643 },
22644 { /* ARMv6k */
22645 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
22646 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
22647 },
22648 };
22649 static unsigned char const thumb_noop[2][2][2] =
22650 {
22651 { /* Thumb-1 */
22652 {0xc0, 0x46}, /* LE */
22653 {0x46, 0xc0}, /* BE */
22654 },
22655 { /* Thumb-2 */
22656 {0x00, 0xbf}, /* LE */
22657 {0xbf, 0x00} /* BE */
22658 }
22659 };
22660 static unsigned char const wide_thumb_noop[2][4] =
22661 { /* Wide Thumb-2 */
22662 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
22663 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
22664 };
22665
22666 unsigned bytes, fix, noop_size;
22667 char * p;
22668 const unsigned char * noop;
22669 const unsigned char *narrow_noop = NULL;
22670 #ifdef OBJ_ELF
22671 enum mstate state;
22672 #endif
22673
22674 if (fragP->fr_type != rs_align_code)
22675 return;
22676
22677 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
22678 p = fragP->fr_literal + fragP->fr_fix;
22679 fix = 0;
22680
22681 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
22682 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
22683
22684 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
22685
22686 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
22687 {
22688 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
22689 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
22690 {
22691 narrow_noop = thumb_noop[1][target_big_endian];
22692 noop = wide_thumb_noop[target_big_endian];
22693 }
22694 else
22695 noop = thumb_noop[0][target_big_endian];
22696 noop_size = 2;
22697 #ifdef OBJ_ELF
22698 state = MAP_THUMB;
22699 #endif
22700 }
22701 else
22702 {
22703 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
22704 ? selected_cpu : arm_arch_none,
22705 arm_ext_v6k) != 0]
22706 [target_big_endian];
22707 noop_size = 4;
22708 #ifdef OBJ_ELF
22709 state = MAP_ARM;
22710 #endif
22711 }
22712
22713 fragP->fr_var = noop_size;
22714
22715 if (bytes & (noop_size - 1))
22716 {
22717 fix = bytes & (noop_size - 1);
22718 #ifdef OBJ_ELF
22719 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
22720 #endif
22721 memset (p, 0, fix);
22722 p += fix;
22723 bytes -= fix;
22724 }
22725
22726 if (narrow_noop)
22727 {
22728 if (bytes & noop_size)
22729 {
22730 /* Insert a narrow noop. */
22731 memcpy (p, narrow_noop, noop_size);
22732 p += noop_size;
22733 bytes -= noop_size;
22734 fix += noop_size;
22735 }
22736
22737 /* Use wide noops for the remainder */
22738 noop_size = 4;
22739 }
22740
22741 while (bytes >= noop_size)
22742 {
22743 memcpy (p, noop, noop_size);
22744 p += noop_size;
22745 bytes -= noop_size;
22746 fix += noop_size;
22747 }
22748
22749 fragP->fr_fix += fix;
22750 }
22751
22752 /* Called from md_do_align. Used to create an alignment
22753 frag in a code section. */
22754
22755 void
22756 arm_frag_align_code (int n, int max)
22757 {
22758 char * p;
22759
22760 /* We assume that there will never be a requirement
22761 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
22762 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
22763 {
22764 char err_msg[128];
22765
22766 sprintf (err_msg,
22767 _("alignments greater than %d bytes not supported in .text sections."),
22768 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
22769 as_fatal ("%s", err_msg);
22770 }
22771
22772 p = frag_var (rs_align_code,
22773 MAX_MEM_FOR_RS_ALIGN_CODE,
22774 1,
22775 (relax_substateT) max,
22776 (symbolS *) NULL,
22777 (offsetT) n,
22778 (char *) NULL);
22779 *p = 0;
22780 }
22781
22782 /* Perform target specific initialisation of a frag.
22783 Note - despite the name this initialisation is not done when the frag
22784 is created, but only when its type is assigned. A frag can be created
22785 and used a long time before its type is set, so beware of assuming that
22786 this initialisation is performed first. */
22787
22788 #ifndef OBJ_ELF
22789 void
22790 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
22791 {
22792 /* Record whether this frag is in an ARM or a THUMB area. */
22793 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
22794 }
22795
22796 #else /* OBJ_ELF is defined. */
22797 void
22798 arm_init_frag (fragS * fragP, int max_chars)
22799 {
22800 bfd_boolean frag_thumb_mode;
22801
22802 /* If the current ARM vs THUMB mode has not already
22803 been recorded into this frag then do so now. */
22804 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
22805 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
22806
22807 /* PR 21809: Do not set a mapping state for debug sections
22808 - it just confuses other tools. */
22809 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
22810 return;
22811
22812 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
22813
22814 /* Record a mapping symbol for alignment frags. We will delete this
22815 later if the alignment ends up empty. */
22816 switch (fragP->fr_type)
22817 {
22818 case rs_align:
22819 case rs_align_test:
22820 case rs_fill:
22821 mapping_state_2 (MAP_DATA, max_chars);
22822 break;
22823 case rs_align_code:
22824 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
22825 break;
22826 default:
22827 break;
22828 }
22829 }
22830
22831 /* When we change sections we need to issue a new mapping symbol. */
22832
22833 void
22834 arm_elf_change_section (void)
22835 {
22836 /* Link an unlinked unwind index table section to the .text section. */
22837 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
22838 && elf_linked_to_section (now_seg) == NULL)
22839 elf_linked_to_section (now_seg) = text_section;
22840 }
22841
22842 int
22843 arm_elf_section_type (const char * str, size_t len)
22844 {
22845 if (len == 5 && strncmp (str, "exidx", 5) == 0)
22846 return SHT_ARM_EXIDX;
22847
22848 return -1;
22849 }
22850 \f
22851 /* Code to deal with unwinding tables. */
22852
22853 static void add_unwind_adjustsp (offsetT);
22854
22855 /* Generate any deferred unwind frame offset. */
22856
22857 static void
22858 flush_pending_unwind (void)
22859 {
22860 offsetT offset;
22861
22862 offset = unwind.pending_offset;
22863 unwind.pending_offset = 0;
22864 if (offset != 0)
22865 add_unwind_adjustsp (offset);
22866 }
22867
22868 /* Add an opcode to this list for this function. Two-byte opcodes should
22869 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
22870 order. */
22871
22872 static void
22873 add_unwind_opcode (valueT op, int length)
22874 {
22875 /* Add any deferred stack adjustment. */
22876 if (unwind.pending_offset)
22877 flush_pending_unwind ();
22878
22879 unwind.sp_restored = 0;
22880
22881 if (unwind.opcode_count + length > unwind.opcode_alloc)
22882 {
22883 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
22884 if (unwind.opcodes)
22885 unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
22886 unwind.opcode_alloc);
22887 else
22888 unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
22889 }
22890 while (length > 0)
22891 {
22892 length--;
22893 unwind.opcodes[unwind.opcode_count] = op & 0xff;
22894 op >>= 8;
22895 unwind.opcode_count++;
22896 }
22897 }
22898
22899 /* Add unwind opcodes to adjust the stack pointer. */
22900
22901 static void
22902 add_unwind_adjustsp (offsetT offset)
22903 {
22904 valueT op;
22905
22906 if (offset > 0x200)
22907 {
22908 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
22909 char bytes[5];
22910 int n;
22911 valueT o;
22912
22913 /* Long form: 0xb2, uleb128. */
22914 /* This might not fit in a word so add the individual bytes,
22915 remembering the list is built in reverse order. */
22916 o = (valueT) ((offset - 0x204) >> 2);
22917 if (o == 0)
22918 add_unwind_opcode (0, 1);
22919
22920 /* Calculate the uleb128 encoding of the offset. */
22921 n = 0;
22922 while (o)
22923 {
22924 bytes[n] = o & 0x7f;
22925 o >>= 7;
22926 if (o)
22927 bytes[n] |= 0x80;
22928 n++;
22929 }
22930 /* Add the insn. */
22931 for (; n; n--)
22932 add_unwind_opcode (bytes[n - 1], 1);
22933 add_unwind_opcode (0xb2, 1);
22934 }
22935 else if (offset > 0x100)
22936 {
22937 /* Two short opcodes. */
22938 add_unwind_opcode (0x3f, 1);
22939 op = (offset - 0x104) >> 2;
22940 add_unwind_opcode (op, 1);
22941 }
22942 else if (offset > 0)
22943 {
22944 /* Short opcode. */
22945 op = (offset - 4) >> 2;
22946 add_unwind_opcode (op, 1);
22947 }
22948 else if (offset < 0)
22949 {
22950 offset = -offset;
22951 while (offset > 0x100)
22952 {
22953 add_unwind_opcode (0x7f, 1);
22954 offset -= 0x100;
22955 }
22956 op = ((offset - 4) >> 2) | 0x40;
22957 add_unwind_opcode (op, 1);
22958 }
22959 }
22960
22961 /* Finish the list of unwind opcodes for this function. */
22962
22963 static void
22964 finish_unwind_opcodes (void)
22965 {
22966 valueT op;
22967
22968 if (unwind.fp_used)
22969 {
22970 /* Adjust sp as necessary. */
22971 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
22972 flush_pending_unwind ();
22973
22974 /* After restoring sp from the frame pointer. */
22975 op = 0x90 | unwind.fp_reg;
22976 add_unwind_opcode (op, 1);
22977 }
22978 else
22979 flush_pending_unwind ();
22980 }
22981
22982
22983 /* Start an exception table entry. If idx is nonzero this is an index table
22984 entry. */
22985
22986 static void
22987 start_unwind_section (const segT text_seg, int idx)
22988 {
22989 const char * text_name;
22990 const char * prefix;
22991 const char * prefix_once;
22992 const char * group_name;
22993 char * sec_name;
22994 int type;
22995 int flags;
22996 int linkonce;
22997
22998 if (idx)
22999 {
23000 prefix = ELF_STRING_ARM_unwind;
23001 prefix_once = ELF_STRING_ARM_unwind_once;
23002 type = SHT_ARM_EXIDX;
23003 }
23004 else
23005 {
23006 prefix = ELF_STRING_ARM_unwind_info;
23007 prefix_once = ELF_STRING_ARM_unwind_info_once;
23008 type = SHT_PROGBITS;
23009 }
23010
23011 text_name = segment_name (text_seg);
23012 if (streq (text_name, ".text"))
23013 text_name = "";
23014
23015 if (strncmp (text_name, ".gnu.linkonce.t.",
23016 strlen (".gnu.linkonce.t.")) == 0)
23017 {
23018 prefix = prefix_once;
23019 text_name += strlen (".gnu.linkonce.t.");
23020 }
23021
23022 sec_name = concat (prefix, text_name, (char *) NULL);
23023
23024 flags = SHF_ALLOC;
23025 linkonce = 0;
23026 group_name = 0;
23027
23028 /* Handle COMDAT group. */
23029 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
23030 {
23031 group_name = elf_group_name (text_seg);
23032 if (group_name == NULL)
23033 {
23034 as_bad (_("Group section `%s' has no group signature"),
23035 segment_name (text_seg));
23036 ignore_rest_of_line ();
23037 return;
23038 }
23039 flags |= SHF_GROUP;
23040 linkonce = 1;
23041 }
23042
23043 obj_elf_change_section (sec_name, type, 0, flags, 0, group_name,
23044 linkonce, 0);
23045
23046 /* Set the section link for index tables. */
23047 if (idx)
23048 elf_linked_to_section (now_seg) = text_seg;
23049 }
23050
23051
23052 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
23053 personality routine data. Returns zero, or the index table value for
23054 an inline entry. */
23055
23056 static valueT
23057 create_unwind_entry (int have_data)
23058 {
23059 int size;
23060 addressT where;
23061 char *ptr;
23062 /* The current word of data. */
23063 valueT data;
23064 /* The number of bytes left in this word. */
23065 int n;
23066
23067 finish_unwind_opcodes ();
23068
23069 /* Remember the current text section. */
23070 unwind.saved_seg = now_seg;
23071 unwind.saved_subseg = now_subseg;
23072
23073 start_unwind_section (now_seg, 0);
23074
23075 if (unwind.personality_routine == NULL)
23076 {
23077 if (unwind.personality_index == -2)
23078 {
23079 if (have_data)
23080 as_bad (_("handlerdata in cantunwind frame"));
23081 return 1; /* EXIDX_CANTUNWIND. */
23082 }
23083
23084 /* Use a default personality routine if none is specified. */
23085 if (unwind.personality_index == -1)
23086 {
23087 if (unwind.opcode_count > 3)
23088 unwind.personality_index = 1;
23089 else
23090 unwind.personality_index = 0;
23091 }
23092
23093 /* Space for the personality routine entry. */
23094 if (unwind.personality_index == 0)
23095 {
23096 if (unwind.opcode_count > 3)
23097 as_bad (_("too many unwind opcodes for personality routine 0"));
23098
23099 if (!have_data)
23100 {
23101 /* All the data is inline in the index table. */
23102 data = 0x80;
23103 n = 3;
23104 while (unwind.opcode_count > 0)
23105 {
23106 unwind.opcode_count--;
23107 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
23108 n--;
23109 }
23110
23111 /* Pad with "finish" opcodes. */
23112 while (n--)
23113 data = (data << 8) | 0xb0;
23114
23115 return data;
23116 }
23117 size = 0;
23118 }
23119 else
23120 /* We get two opcodes "free" in the first word. */
23121 size = unwind.opcode_count - 2;
23122 }
23123 else
23124 {
23125 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
23126 if (unwind.personality_index != -1)
23127 {
23128 as_bad (_("attempt to recreate an unwind entry"));
23129 return 1;
23130 }
23131
23132 /* An extra byte is required for the opcode count. */
23133 size = unwind.opcode_count + 1;
23134 }
23135
23136 size = (size + 3) >> 2;
23137 if (size > 0xff)
23138 as_bad (_("too many unwind opcodes"));
23139
23140 frag_align (2, 0, 0);
23141 record_alignment (now_seg, 2);
23142 unwind.table_entry = expr_build_dot ();
23143
23144 /* Allocate the table entry. */
23145 ptr = frag_more ((size << 2) + 4);
23146 /* PR 13449: Zero the table entries in case some of them are not used. */
23147 memset (ptr, 0, (size << 2) + 4);
23148 where = frag_now_fix () - ((size << 2) + 4);
23149
23150 switch (unwind.personality_index)
23151 {
23152 case -1:
23153 /* ??? Should this be a PLT generating relocation? */
23154 /* Custom personality routine. */
23155 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
23156 BFD_RELOC_ARM_PREL31);
23157
23158 where += 4;
23159 ptr += 4;
23160
23161 /* Set the first byte to the number of additional words. */
23162 data = size > 0 ? size - 1 : 0;
23163 n = 3;
23164 break;
23165
23166 /* ABI defined personality routines. */
23167 case 0:
23168 /* Three opcodes bytes are packed into the first word. */
23169 data = 0x80;
23170 n = 3;
23171 break;
23172
23173 case 1:
23174 case 2:
23175 /* The size and first two opcode bytes go in the first word. */
23176 data = ((0x80 + unwind.personality_index) << 8) | size;
23177 n = 2;
23178 break;
23179
23180 default:
23181 /* Should never happen. */
23182 abort ();
23183 }
23184
23185 /* Pack the opcodes into words (MSB first), reversing the list at the same
23186 time. */
23187 while (unwind.opcode_count > 0)
23188 {
23189 if (n == 0)
23190 {
23191 md_number_to_chars (ptr, data, 4);
23192 ptr += 4;
23193 n = 4;
23194 data = 0;
23195 }
23196 unwind.opcode_count--;
23197 n--;
23198 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
23199 }
23200
23201 /* Finish off the last word. */
23202 if (n < 4)
23203 {
23204 /* Pad with "finish" opcodes. */
23205 while (n--)
23206 data = (data << 8) | 0xb0;
23207
23208 md_number_to_chars (ptr, data, 4);
23209 }
23210
23211 if (!have_data)
23212 {
23213 /* Add an empty descriptor if there is no user-specified data. */
23214 ptr = frag_more (4);
23215 md_number_to_chars (ptr, 0, 4);
23216 }
23217
23218 return 0;
23219 }
23220
23221
23222 /* Initialize the DWARF-2 unwind information for this procedure. */
23223
23224 void
23225 tc_arm_frame_initial_instructions (void)
23226 {
23227 cfi_add_CFA_def_cfa (REG_SP, 0);
23228 }
23229 #endif /* OBJ_ELF */
23230
23231 /* Convert REGNAME to a DWARF-2 register number. */
23232
23233 int
23234 tc_arm_regname_to_dw2regnum (char *regname)
23235 {
23236 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
23237 if (reg != FAIL)
23238 return reg;
23239
23240 /* PR 16694: Allow VFP registers as well. */
23241 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
23242 if (reg != FAIL)
23243 return 64 + reg;
23244
23245 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
23246 if (reg != FAIL)
23247 return reg + 256;
23248
23249 return FAIL;
23250 }
23251
23252 #ifdef TE_PE
23253 void
23254 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
23255 {
23256 expressionS exp;
23257
23258 exp.X_op = O_secrel;
23259 exp.X_add_symbol = symbol;
23260 exp.X_add_number = 0;
23261 emit_expr (&exp, size);
23262 }
23263 #endif
23264
23265 /* MD interface: Symbol and relocation handling. */
23266
23267 /* Return the address within the segment that a PC-relative fixup is
23268 relative to. For ARM, PC-relative fixups applied to instructions
23269 are generally relative to the location of the fixup plus 8 bytes.
23270 Thumb branches are offset by 4, and Thumb loads relative to PC
23271 require special handling. */
23272
23273 long
23274 md_pcrel_from_section (fixS * fixP, segT seg)
23275 {
23276 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
23277
23278 /* If this is pc-relative and we are going to emit a relocation
23279 then we just want to put out any pipeline compensation that the linker
23280 will need. Otherwise we want to use the calculated base.
23281 For WinCE we skip the bias for externals as well, since this
23282 is how the MS ARM-CE assembler behaves and we want to be compatible. */
23283 if (fixP->fx_pcrel
23284 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
23285 || (arm_force_relocation (fixP)
23286 #ifdef TE_WINCE
23287 && !S_IS_EXTERNAL (fixP->fx_addsy)
23288 #endif
23289 )))
23290 base = 0;
23291
23292
23293 switch (fixP->fx_r_type)
23294 {
23295 /* PC relative addressing on the Thumb is slightly odd as the
23296 bottom two bits of the PC are forced to zero for the
23297 calculation. This happens *after* application of the
23298 pipeline offset. However, Thumb adrl already adjusts for
23299 this, so we need not do it again. */
23300 case BFD_RELOC_ARM_THUMB_ADD:
23301 return base & ~3;
23302
23303 case BFD_RELOC_ARM_THUMB_OFFSET:
23304 case BFD_RELOC_ARM_T32_OFFSET_IMM:
23305 case BFD_RELOC_ARM_T32_ADD_PC12:
23306 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
23307 return (base + 4) & ~3;
23308
23309 /* Thumb branches are simply offset by +4. */
23310 case BFD_RELOC_THUMB_PCREL_BRANCH5:
23311 case BFD_RELOC_THUMB_PCREL_BRANCH7:
23312 case BFD_RELOC_THUMB_PCREL_BRANCH9:
23313 case BFD_RELOC_THUMB_PCREL_BRANCH12:
23314 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23315 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23316 case BFD_RELOC_THUMB_PCREL_BFCSEL:
23317 case BFD_RELOC_ARM_THUMB_BF17:
23318 case BFD_RELOC_ARM_THUMB_BF19:
23319 case BFD_RELOC_ARM_THUMB_BF13:
23320 case BFD_RELOC_ARM_THUMB_LOOP12:
23321 return base + 4;
23322
23323 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23324 if (fixP->fx_addsy
23325 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23326 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23327 && ARM_IS_FUNC (fixP->fx_addsy)
23328 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23329 base = fixP->fx_where + fixP->fx_frag->fr_address;
23330 return base + 4;
23331
23332 /* BLX is like branches above, but forces the low two bits of PC to
23333 zero. */
23334 case BFD_RELOC_THUMB_PCREL_BLX:
23335 if (fixP->fx_addsy
23336 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23337 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23338 && THUMB_IS_FUNC (fixP->fx_addsy)
23339 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23340 base = fixP->fx_where + fixP->fx_frag->fr_address;
23341 return (base + 4) & ~3;
23342
23343 /* ARM mode branches are offset by +8. However, the Windows CE
23344 loader expects the relocation not to take this into account. */
23345 case BFD_RELOC_ARM_PCREL_BLX:
23346 if (fixP->fx_addsy
23347 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23348 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23349 && ARM_IS_FUNC (fixP->fx_addsy)
23350 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23351 base = fixP->fx_where + fixP->fx_frag->fr_address;
23352 return base + 8;
23353
23354 case BFD_RELOC_ARM_PCREL_CALL:
23355 if (fixP->fx_addsy
23356 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23357 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23358 && THUMB_IS_FUNC (fixP->fx_addsy)
23359 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23360 base = fixP->fx_where + fixP->fx_frag->fr_address;
23361 return base + 8;
23362
23363 case BFD_RELOC_ARM_PCREL_BRANCH:
23364 case BFD_RELOC_ARM_PCREL_JUMP:
23365 case BFD_RELOC_ARM_PLT32:
23366 #ifdef TE_WINCE
23367 /* When handling fixups immediately, because we have already
23368 discovered the value of a symbol, or the address of the frag involved
23369 we must account for the offset by +8, as the OS loader will never see the reloc.
23370 see fixup_segment() in write.c
23371 The S_IS_EXTERNAL test handles the case of global symbols.
23372 Those need the calculated base, not just the pipe compensation the linker will need. */
23373 if (fixP->fx_pcrel
23374 && fixP->fx_addsy != NULL
23375 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23376 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
23377 return base + 8;
23378 return base;
23379 #else
23380 return base + 8;
23381 #endif
23382
23383
23384 /* ARM mode loads relative to PC are also offset by +8. Unlike
23385 branches, the Windows CE loader *does* expect the relocation
23386 to take this into account. */
23387 case BFD_RELOC_ARM_OFFSET_IMM:
23388 case BFD_RELOC_ARM_OFFSET_IMM8:
23389 case BFD_RELOC_ARM_HWLITERAL:
23390 case BFD_RELOC_ARM_LITERAL:
23391 case BFD_RELOC_ARM_CP_OFF_IMM:
23392 return base + 8;
23393
23394
23395 /* Other PC-relative relocations are un-offset. */
23396 default:
23397 return base;
23398 }
23399 }
23400
23401 static bfd_boolean flag_warn_syms = TRUE;
23402
23403 bfd_boolean
23404 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
23405 {
23406 /* PR 18347 - Warn if the user attempts to create a symbol with the same
23407 name as an ARM instruction. Whilst strictly speaking it is allowed, it
23408 does mean that the resulting code might be very confusing to the reader.
23409 Also this warning can be triggered if the user omits an operand before
23410 an immediate address, eg:
23411
23412 LDR =foo
23413
23414 GAS treats this as an assignment of the value of the symbol foo to a
23415 symbol LDR, and so (without this code) it will not issue any kind of
23416 warning or error message.
23417
23418 Note - ARM instructions are case-insensitive but the strings in the hash
23419 table are all stored in lower case, so we must first ensure that name is
23420 lower case too. */
23421 if (flag_warn_syms && arm_ops_hsh)
23422 {
23423 char * nbuf = strdup (name);
23424 char * p;
23425
23426 for (p = nbuf; *p; p++)
23427 *p = TOLOWER (*p);
23428 if (hash_find (arm_ops_hsh, nbuf) != NULL)
23429 {
23430 static struct hash_control * already_warned = NULL;
23431
23432 if (already_warned == NULL)
23433 already_warned = hash_new ();
23434 /* Only warn about the symbol once. To keep the code
23435 simple we let hash_insert do the lookup for us. */
23436 if (hash_insert (already_warned, nbuf, NULL) == NULL)
23437 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
23438 }
23439 else
23440 free (nbuf);
23441 }
23442
23443 return FALSE;
23444 }
23445
23446 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
23447 Otherwise we have no need to default values of symbols. */
23448
23449 symbolS *
23450 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
23451 {
23452 #ifdef OBJ_ELF
23453 if (name[0] == '_' && name[1] == 'G'
23454 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
23455 {
23456 if (!GOT_symbol)
23457 {
23458 if (symbol_find (name))
23459 as_bad (_("GOT already in the symbol table"));
23460
23461 GOT_symbol = symbol_new (name, undefined_section,
23462 (valueT) 0, & zero_address_frag);
23463 }
23464
23465 return GOT_symbol;
23466 }
23467 #endif
23468
23469 return NULL;
23470 }
23471
23472 /* Subroutine of md_apply_fix. Check to see if an immediate can be
23473 computed as two separate immediate values, added together. We
23474 already know that this value cannot be computed by just one ARM
23475 instruction. */
23476
23477 static unsigned int
23478 validate_immediate_twopart (unsigned int val,
23479 unsigned int * highpart)
23480 {
23481 unsigned int a;
23482 unsigned int i;
23483
23484 for (i = 0; i < 32; i += 2)
23485 if (((a = rotate_left (val, i)) & 0xff) != 0)
23486 {
23487 if (a & 0xff00)
23488 {
23489 if (a & ~ 0xffff)
23490 continue;
23491 * highpart = (a >> 8) | ((i + 24) << 7);
23492 }
23493 else if (a & 0xff0000)
23494 {
23495 if (a & 0xff000000)
23496 continue;
23497 * highpart = (a >> 16) | ((i + 16) << 7);
23498 }
23499 else
23500 {
23501 gas_assert (a & 0xff000000);
23502 * highpart = (a >> 24) | ((i + 8) << 7);
23503 }
23504
23505 return (a & 0xff) | (i << 7);
23506 }
23507
23508 return FAIL;
23509 }
23510
23511 static int
23512 validate_offset_imm (unsigned int val, int hwse)
23513 {
23514 if ((hwse && val > 255) || val > 4095)
23515 return FAIL;
23516 return val;
23517 }
23518
23519 /* Subroutine of md_apply_fix. Do those data_ops which can take a
23520 negative immediate constant by altering the instruction. A bit of
23521 a hack really.
23522 MOV <-> MVN
23523 AND <-> BIC
23524 ADC <-> SBC
23525 by inverting the second operand, and
23526 ADD <-> SUB
23527 CMP <-> CMN
23528 by negating the second operand. */
23529
23530 static int
23531 negate_data_op (unsigned long * instruction,
23532 unsigned long value)
23533 {
23534 int op, new_inst;
23535 unsigned long negated, inverted;
23536
23537 negated = encode_arm_immediate (-value);
23538 inverted = encode_arm_immediate (~value);
23539
23540 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
23541 switch (op)
23542 {
23543 /* First negates. */
23544 case OPCODE_SUB: /* ADD <-> SUB */
23545 new_inst = OPCODE_ADD;
23546 value = negated;
23547 break;
23548
23549 case OPCODE_ADD:
23550 new_inst = OPCODE_SUB;
23551 value = negated;
23552 break;
23553
23554 case OPCODE_CMP: /* CMP <-> CMN */
23555 new_inst = OPCODE_CMN;
23556 value = negated;
23557 break;
23558
23559 case OPCODE_CMN:
23560 new_inst = OPCODE_CMP;
23561 value = negated;
23562 break;
23563
23564 /* Now Inverted ops. */
23565 case OPCODE_MOV: /* MOV <-> MVN */
23566 new_inst = OPCODE_MVN;
23567 value = inverted;
23568 break;
23569
23570 case OPCODE_MVN:
23571 new_inst = OPCODE_MOV;
23572 value = inverted;
23573 break;
23574
23575 case OPCODE_AND: /* AND <-> BIC */
23576 new_inst = OPCODE_BIC;
23577 value = inverted;
23578 break;
23579
23580 case OPCODE_BIC:
23581 new_inst = OPCODE_AND;
23582 value = inverted;
23583 break;
23584
23585 case OPCODE_ADC: /* ADC <-> SBC */
23586 new_inst = OPCODE_SBC;
23587 value = inverted;
23588 break;
23589
23590 case OPCODE_SBC:
23591 new_inst = OPCODE_ADC;
23592 value = inverted;
23593 break;
23594
23595 /* We cannot do anything. */
23596 default:
23597 return FAIL;
23598 }
23599
23600 if (value == (unsigned) FAIL)
23601 return FAIL;
23602
23603 *instruction &= OPCODE_MASK;
23604 *instruction |= new_inst << DATA_OP_SHIFT;
23605 return value;
23606 }
23607
23608 /* Like negate_data_op, but for Thumb-2. */
23609
23610 static unsigned int
23611 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
23612 {
23613 int op, new_inst;
23614 int rd;
23615 unsigned int negated, inverted;
23616
23617 negated = encode_thumb32_immediate (-value);
23618 inverted = encode_thumb32_immediate (~value);
23619
23620 rd = (*instruction >> 8) & 0xf;
23621 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
23622 switch (op)
23623 {
23624 /* ADD <-> SUB. Includes CMP <-> CMN. */
23625 case T2_OPCODE_SUB:
23626 new_inst = T2_OPCODE_ADD;
23627 value = negated;
23628 break;
23629
23630 case T2_OPCODE_ADD:
23631 new_inst = T2_OPCODE_SUB;
23632 value = negated;
23633 break;
23634
23635 /* ORR <-> ORN. Includes MOV <-> MVN. */
23636 case T2_OPCODE_ORR:
23637 new_inst = T2_OPCODE_ORN;
23638 value = inverted;
23639 break;
23640
23641 case T2_OPCODE_ORN:
23642 new_inst = T2_OPCODE_ORR;
23643 value = inverted;
23644 break;
23645
23646 /* AND <-> BIC. TST has no inverted equivalent. */
23647 case T2_OPCODE_AND:
23648 new_inst = T2_OPCODE_BIC;
23649 if (rd == 15)
23650 value = FAIL;
23651 else
23652 value = inverted;
23653 break;
23654
23655 case T2_OPCODE_BIC:
23656 new_inst = T2_OPCODE_AND;
23657 value = inverted;
23658 break;
23659
23660 /* ADC <-> SBC */
23661 case T2_OPCODE_ADC:
23662 new_inst = T2_OPCODE_SBC;
23663 value = inverted;
23664 break;
23665
23666 case T2_OPCODE_SBC:
23667 new_inst = T2_OPCODE_ADC;
23668 value = inverted;
23669 break;
23670
23671 /* We cannot do anything. */
23672 default:
23673 return FAIL;
23674 }
23675
23676 if (value == (unsigned int)FAIL)
23677 return FAIL;
23678
23679 *instruction &= T2_OPCODE_MASK;
23680 *instruction |= new_inst << T2_DATA_OP_SHIFT;
23681 return value;
23682 }
23683
23684 /* Read a 32-bit thumb instruction from buf. */
23685
23686 static unsigned long
23687 get_thumb32_insn (char * buf)
23688 {
23689 unsigned long insn;
23690 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
23691 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23692
23693 return insn;
23694 }
23695
23696 /* We usually want to set the low bit on the address of thumb function
23697 symbols. In particular .word foo - . should have the low bit set.
23698 Generic code tries to fold the difference of two symbols to
23699 a constant. Prevent this and force a relocation when the first symbols
23700 is a thumb function. */
23701
23702 bfd_boolean
23703 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
23704 {
23705 if (op == O_subtract
23706 && l->X_op == O_symbol
23707 && r->X_op == O_symbol
23708 && THUMB_IS_FUNC (l->X_add_symbol))
23709 {
23710 l->X_op = O_subtract;
23711 l->X_op_symbol = r->X_add_symbol;
23712 l->X_add_number -= r->X_add_number;
23713 return TRUE;
23714 }
23715
23716 /* Process as normal. */
23717 return FALSE;
23718 }
23719
23720 /* Encode Thumb2 unconditional branches and calls. The encoding
23721 for the 2 are identical for the immediate values. */
23722
23723 static void
23724 encode_thumb2_b_bl_offset (char * buf, offsetT value)
23725 {
23726 #define T2I1I2MASK ((1 << 13) | (1 << 11))
23727 offsetT newval;
23728 offsetT newval2;
23729 addressT S, I1, I2, lo, hi;
23730
23731 S = (value >> 24) & 0x01;
23732 I1 = (value >> 23) & 0x01;
23733 I2 = (value >> 22) & 0x01;
23734 hi = (value >> 12) & 0x3ff;
23735 lo = (value >> 1) & 0x7ff;
23736 newval = md_chars_to_number (buf, THUMB_SIZE);
23737 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23738 newval |= (S << 10) | hi;
23739 newval2 &= ~T2I1I2MASK;
23740 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
23741 md_number_to_chars (buf, newval, THUMB_SIZE);
23742 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
23743 }
23744
23745 void
23746 md_apply_fix (fixS * fixP,
23747 valueT * valP,
23748 segT seg)
23749 {
23750 offsetT value = * valP;
23751 offsetT newval;
23752 unsigned int newimm;
23753 unsigned long temp;
23754 int sign;
23755 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
23756
23757 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
23758
23759 /* Note whether this will delete the relocation. */
23760
23761 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
23762 fixP->fx_done = 1;
23763
23764 /* On a 64-bit host, silently truncate 'value' to 32 bits for
23765 consistency with the behaviour on 32-bit hosts. Remember value
23766 for emit_reloc. */
23767 value &= 0xffffffff;
23768 value ^= 0x80000000;
23769 value -= 0x80000000;
23770
23771 *valP = value;
23772 fixP->fx_addnumber = value;
23773
23774 /* Same treatment for fixP->fx_offset. */
23775 fixP->fx_offset &= 0xffffffff;
23776 fixP->fx_offset ^= 0x80000000;
23777 fixP->fx_offset -= 0x80000000;
23778
23779 switch (fixP->fx_r_type)
23780 {
23781 case BFD_RELOC_NONE:
23782 /* This will need to go in the object file. */
23783 fixP->fx_done = 0;
23784 break;
23785
23786 case BFD_RELOC_ARM_IMMEDIATE:
23787 /* We claim that this fixup has been processed here,
23788 even if in fact we generate an error because we do
23789 not have a reloc for it, so tc_gen_reloc will reject it. */
23790 fixP->fx_done = 1;
23791
23792 if (fixP->fx_addsy)
23793 {
23794 const char *msg = 0;
23795
23796 if (! S_IS_DEFINED (fixP->fx_addsy))
23797 msg = _("undefined symbol %s used as an immediate value");
23798 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
23799 msg = _("symbol %s is in a different section");
23800 else if (S_IS_WEAK (fixP->fx_addsy))
23801 msg = _("symbol %s is weak and may be overridden later");
23802
23803 if (msg)
23804 {
23805 as_bad_where (fixP->fx_file, fixP->fx_line,
23806 msg, S_GET_NAME (fixP->fx_addsy));
23807 break;
23808 }
23809 }
23810
23811 temp = md_chars_to_number (buf, INSN_SIZE);
23812
23813 /* If the offset is negative, we should use encoding A2 for ADR. */
23814 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
23815 newimm = negate_data_op (&temp, value);
23816 else
23817 {
23818 newimm = encode_arm_immediate (value);
23819
23820 /* If the instruction will fail, see if we can fix things up by
23821 changing the opcode. */
23822 if (newimm == (unsigned int) FAIL)
23823 newimm = negate_data_op (&temp, value);
23824 /* MOV accepts both ARM modified immediate (A1 encoding) and
23825 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
23826 When disassembling, MOV is preferred when there is no encoding
23827 overlap. */
23828 if (newimm == (unsigned int) FAIL
23829 && ((temp >> DATA_OP_SHIFT) & 0xf) == OPCODE_MOV
23830 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
23831 && !((temp >> SBIT_SHIFT) & 0x1)
23832 && value >= 0 && value <= 0xffff)
23833 {
23834 /* Clear bits[23:20] to change encoding from A1 to A2. */
23835 temp &= 0xff0fffff;
23836 /* Encoding high 4bits imm. Code below will encode the remaining
23837 low 12bits. */
23838 temp |= (value & 0x0000f000) << 4;
23839 newimm = value & 0x00000fff;
23840 }
23841 }
23842
23843 if (newimm == (unsigned int) FAIL)
23844 {
23845 as_bad_where (fixP->fx_file, fixP->fx_line,
23846 _("invalid constant (%lx) after fixup"),
23847 (unsigned long) value);
23848 break;
23849 }
23850
23851 newimm |= (temp & 0xfffff000);
23852 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
23853 break;
23854
23855 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
23856 {
23857 unsigned int highpart = 0;
23858 unsigned int newinsn = 0xe1a00000; /* nop. */
23859
23860 if (fixP->fx_addsy)
23861 {
23862 const char *msg = 0;
23863
23864 if (! S_IS_DEFINED (fixP->fx_addsy))
23865 msg = _("undefined symbol %s used as an immediate value");
23866 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
23867 msg = _("symbol %s is in a different section");
23868 else if (S_IS_WEAK (fixP->fx_addsy))
23869 msg = _("symbol %s is weak and may be overridden later");
23870
23871 if (msg)
23872 {
23873 as_bad_where (fixP->fx_file, fixP->fx_line,
23874 msg, S_GET_NAME (fixP->fx_addsy));
23875 break;
23876 }
23877 }
23878
23879 newimm = encode_arm_immediate (value);
23880 temp = md_chars_to_number (buf, INSN_SIZE);
23881
23882 /* If the instruction will fail, see if we can fix things up by
23883 changing the opcode. */
23884 if (newimm == (unsigned int) FAIL
23885 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
23886 {
23887 /* No ? OK - try using two ADD instructions to generate
23888 the value. */
23889 newimm = validate_immediate_twopart (value, & highpart);
23890
23891 /* Yes - then make sure that the second instruction is
23892 also an add. */
23893 if (newimm != (unsigned int) FAIL)
23894 newinsn = temp;
23895 /* Still No ? Try using a negated value. */
23896 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
23897 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
23898 /* Otherwise - give up. */
23899 else
23900 {
23901 as_bad_where (fixP->fx_file, fixP->fx_line,
23902 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
23903 (long) value);
23904 break;
23905 }
23906
23907 /* Replace the first operand in the 2nd instruction (which
23908 is the PC) with the destination register. We have
23909 already added in the PC in the first instruction and we
23910 do not want to do it again. */
23911 newinsn &= ~ 0xf0000;
23912 newinsn |= ((newinsn & 0x0f000) << 4);
23913 }
23914
23915 newimm |= (temp & 0xfffff000);
23916 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
23917
23918 highpart |= (newinsn & 0xfffff000);
23919 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
23920 }
23921 break;
23922
23923 case BFD_RELOC_ARM_OFFSET_IMM:
23924 if (!fixP->fx_done && seg->use_rela_p)
23925 value = 0;
23926 /* Fall through. */
23927
23928 case BFD_RELOC_ARM_LITERAL:
23929 sign = value > 0;
23930
23931 if (value < 0)
23932 value = - value;
23933
23934 if (validate_offset_imm (value, 0) == FAIL)
23935 {
23936 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
23937 as_bad_where (fixP->fx_file, fixP->fx_line,
23938 _("invalid literal constant: pool needs to be closer"));
23939 else
23940 as_bad_where (fixP->fx_file, fixP->fx_line,
23941 _("bad immediate value for offset (%ld)"),
23942 (long) value);
23943 break;
23944 }
23945
23946 newval = md_chars_to_number (buf, INSN_SIZE);
23947 if (value == 0)
23948 newval &= 0xfffff000;
23949 else
23950 {
23951 newval &= 0xff7ff000;
23952 newval |= value | (sign ? INDEX_UP : 0);
23953 }
23954 md_number_to_chars (buf, newval, INSN_SIZE);
23955 break;
23956
23957 case BFD_RELOC_ARM_OFFSET_IMM8:
23958 case BFD_RELOC_ARM_HWLITERAL:
23959 sign = value > 0;
23960
23961 if (value < 0)
23962 value = - value;
23963
23964 if (validate_offset_imm (value, 1) == FAIL)
23965 {
23966 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
23967 as_bad_where (fixP->fx_file, fixP->fx_line,
23968 _("invalid literal constant: pool needs to be closer"));
23969 else
23970 as_bad_where (fixP->fx_file, fixP->fx_line,
23971 _("bad immediate value for 8-bit offset (%ld)"),
23972 (long) value);
23973 break;
23974 }
23975
23976 newval = md_chars_to_number (buf, INSN_SIZE);
23977 if (value == 0)
23978 newval &= 0xfffff0f0;
23979 else
23980 {
23981 newval &= 0xff7ff0f0;
23982 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
23983 }
23984 md_number_to_chars (buf, newval, INSN_SIZE);
23985 break;
23986
23987 case BFD_RELOC_ARM_T32_OFFSET_U8:
23988 if (value < 0 || value > 1020 || value % 4 != 0)
23989 as_bad_where (fixP->fx_file, fixP->fx_line,
23990 _("bad immediate value for offset (%ld)"), (long) value);
23991 value /= 4;
23992
23993 newval = md_chars_to_number (buf+2, THUMB_SIZE);
23994 newval |= value;
23995 md_number_to_chars (buf+2, newval, THUMB_SIZE);
23996 break;
23997
23998 case BFD_RELOC_ARM_T32_OFFSET_IMM:
23999 /* This is a complicated relocation used for all varieties of Thumb32
24000 load/store instruction with immediate offset:
24001
24002 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
24003 *4, optional writeback(W)
24004 (doubleword load/store)
24005
24006 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
24007 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
24008 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
24009 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
24010 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
24011
24012 Uppercase letters indicate bits that are already encoded at
24013 this point. Lowercase letters are our problem. For the
24014 second block of instructions, the secondary opcode nybble
24015 (bits 8..11) is present, and bit 23 is zero, even if this is
24016 a PC-relative operation. */
24017 newval = md_chars_to_number (buf, THUMB_SIZE);
24018 newval <<= 16;
24019 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
24020
24021 if ((newval & 0xf0000000) == 0xe0000000)
24022 {
24023 /* Doubleword load/store: 8-bit offset, scaled by 4. */
24024 if (value >= 0)
24025 newval |= (1 << 23);
24026 else
24027 value = -value;
24028 if (value % 4 != 0)
24029 {
24030 as_bad_where (fixP->fx_file, fixP->fx_line,
24031 _("offset not a multiple of 4"));
24032 break;
24033 }
24034 value /= 4;
24035 if (value > 0xff)
24036 {
24037 as_bad_where (fixP->fx_file, fixP->fx_line,
24038 _("offset out of range"));
24039 break;
24040 }
24041 newval &= ~0xff;
24042 }
24043 else if ((newval & 0x000f0000) == 0x000f0000)
24044 {
24045 /* PC-relative, 12-bit offset. */
24046 if (value >= 0)
24047 newval |= (1 << 23);
24048 else
24049 value = -value;
24050 if (value > 0xfff)
24051 {
24052 as_bad_where (fixP->fx_file, fixP->fx_line,
24053 _("offset out of range"));
24054 break;
24055 }
24056 newval &= ~0xfff;
24057 }
24058 else if ((newval & 0x00000100) == 0x00000100)
24059 {
24060 /* Writeback: 8-bit, +/- offset. */
24061 if (value >= 0)
24062 newval |= (1 << 9);
24063 else
24064 value = -value;
24065 if (value > 0xff)
24066 {
24067 as_bad_where (fixP->fx_file, fixP->fx_line,
24068 _("offset out of range"));
24069 break;
24070 }
24071 newval &= ~0xff;
24072 }
24073 else if ((newval & 0x00000f00) == 0x00000e00)
24074 {
24075 /* T-instruction: positive 8-bit offset. */
24076 if (value < 0 || value > 0xff)
24077 {
24078 as_bad_where (fixP->fx_file, fixP->fx_line,
24079 _("offset out of range"));
24080 break;
24081 }
24082 newval &= ~0xff;
24083 newval |= value;
24084 }
24085 else
24086 {
24087 /* Positive 12-bit or negative 8-bit offset. */
24088 int limit;
24089 if (value >= 0)
24090 {
24091 newval |= (1 << 23);
24092 limit = 0xfff;
24093 }
24094 else
24095 {
24096 value = -value;
24097 limit = 0xff;
24098 }
24099 if (value > limit)
24100 {
24101 as_bad_where (fixP->fx_file, fixP->fx_line,
24102 _("offset out of range"));
24103 break;
24104 }
24105 newval &= ~limit;
24106 }
24107
24108 newval |= value;
24109 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
24110 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
24111 break;
24112
24113 case BFD_RELOC_ARM_SHIFT_IMM:
24114 newval = md_chars_to_number (buf, INSN_SIZE);
24115 if (((unsigned long) value) > 32
24116 || (value == 32
24117 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
24118 {
24119 as_bad_where (fixP->fx_file, fixP->fx_line,
24120 _("shift expression is too large"));
24121 break;
24122 }
24123
24124 if (value == 0)
24125 /* Shifts of zero must be done as lsl. */
24126 newval &= ~0x60;
24127 else if (value == 32)
24128 value = 0;
24129 newval &= 0xfffff07f;
24130 newval |= (value & 0x1f) << 7;
24131 md_number_to_chars (buf, newval, INSN_SIZE);
24132 break;
24133
24134 case BFD_RELOC_ARM_T32_IMMEDIATE:
24135 case BFD_RELOC_ARM_T32_ADD_IMM:
24136 case BFD_RELOC_ARM_T32_IMM12:
24137 case BFD_RELOC_ARM_T32_ADD_PC12:
24138 /* We claim that this fixup has been processed here,
24139 even if in fact we generate an error because we do
24140 not have a reloc for it, so tc_gen_reloc will reject it. */
24141 fixP->fx_done = 1;
24142
24143 if (fixP->fx_addsy
24144 && ! S_IS_DEFINED (fixP->fx_addsy))
24145 {
24146 as_bad_where (fixP->fx_file, fixP->fx_line,
24147 _("undefined symbol %s used as an immediate value"),
24148 S_GET_NAME (fixP->fx_addsy));
24149 break;
24150 }
24151
24152 newval = md_chars_to_number (buf, THUMB_SIZE);
24153 newval <<= 16;
24154 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
24155
24156 newimm = FAIL;
24157 if ((fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
24158 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
24159 Thumb2 modified immediate encoding (T2). */
24160 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
24161 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
24162 {
24163 newimm = encode_thumb32_immediate (value);
24164 if (newimm == (unsigned int) FAIL)
24165 newimm = thumb32_negate_data_op (&newval, value);
24166 }
24167 if (newimm == (unsigned int) FAIL)
24168 {
24169 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE)
24170 {
24171 /* Turn add/sum into addw/subw. */
24172 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
24173 newval = (newval & 0xfeffffff) | 0x02000000;
24174 /* No flat 12-bit imm encoding for addsw/subsw. */
24175 if ((newval & 0x00100000) == 0)
24176 {
24177 /* 12 bit immediate for addw/subw. */
24178 if (value < 0)
24179 {
24180 value = -value;
24181 newval ^= 0x00a00000;
24182 }
24183 if (value > 0xfff)
24184 newimm = (unsigned int) FAIL;
24185 else
24186 newimm = value;
24187 }
24188 }
24189 else
24190 {
24191 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
24192 UINT16 (T3 encoding), MOVW only accepts UINT16. When
24193 disassembling, MOV is preferred when there is no encoding
24194 overlap. */
24195 if (((newval >> T2_DATA_OP_SHIFT) & 0xf) == T2_OPCODE_ORR
24196 /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
24197 but with the Rn field [19:16] set to 1111. */
24198 && (((newval >> 16) & 0xf) == 0xf)
24199 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
24200 && !((newval >> T2_SBIT_SHIFT) & 0x1)
24201 && value >= 0 && value <= 0xffff)
24202 {
24203 /* Toggle bit[25] to change encoding from T2 to T3. */
24204 newval ^= 1 << 25;
24205 /* Clear bits[19:16]. */
24206 newval &= 0xfff0ffff;
24207 /* Encoding high 4bits imm. Code below will encode the
24208 remaining low 12bits. */
24209 newval |= (value & 0x0000f000) << 4;
24210 newimm = value & 0x00000fff;
24211 }
24212 }
24213 }
24214
24215 if (newimm == (unsigned int)FAIL)
24216 {
24217 as_bad_where (fixP->fx_file, fixP->fx_line,
24218 _("invalid constant (%lx) after fixup"),
24219 (unsigned long) value);
24220 break;
24221 }
24222
24223 newval |= (newimm & 0x800) << 15;
24224 newval |= (newimm & 0x700) << 4;
24225 newval |= (newimm & 0x0ff);
24226
24227 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
24228 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
24229 break;
24230
24231 case BFD_RELOC_ARM_SMC:
24232 if (((unsigned long) value) > 0xffff)
24233 as_bad_where (fixP->fx_file, fixP->fx_line,
24234 _("invalid smc expression"));
24235 newval = md_chars_to_number (buf, INSN_SIZE);
24236 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
24237 md_number_to_chars (buf, newval, INSN_SIZE);
24238 break;
24239
24240 case BFD_RELOC_ARM_HVC:
24241 if (((unsigned long) value) > 0xffff)
24242 as_bad_where (fixP->fx_file, fixP->fx_line,
24243 _("invalid hvc expression"));
24244 newval = md_chars_to_number (buf, INSN_SIZE);
24245 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
24246 md_number_to_chars (buf, newval, INSN_SIZE);
24247 break;
24248
24249 case BFD_RELOC_ARM_SWI:
24250 if (fixP->tc_fix_data != 0)
24251 {
24252 if (((unsigned long) value) > 0xff)
24253 as_bad_where (fixP->fx_file, fixP->fx_line,
24254 _("invalid swi expression"));
24255 newval = md_chars_to_number (buf, THUMB_SIZE);
24256 newval |= value;
24257 md_number_to_chars (buf, newval, THUMB_SIZE);
24258 }
24259 else
24260 {
24261 if (((unsigned long) value) > 0x00ffffff)
24262 as_bad_where (fixP->fx_file, fixP->fx_line,
24263 _("invalid swi expression"));
24264 newval = md_chars_to_number (buf, INSN_SIZE);
24265 newval |= value;
24266 md_number_to_chars (buf, newval, INSN_SIZE);
24267 }
24268 break;
24269
24270 case BFD_RELOC_ARM_MULTI:
24271 if (((unsigned long) value) > 0xffff)
24272 as_bad_where (fixP->fx_file, fixP->fx_line,
24273 _("invalid expression in load/store multiple"));
24274 newval = value | md_chars_to_number (buf, INSN_SIZE);
24275 md_number_to_chars (buf, newval, INSN_SIZE);
24276 break;
24277
24278 #ifdef OBJ_ELF
24279 case BFD_RELOC_ARM_PCREL_CALL:
24280
24281 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
24282 && fixP->fx_addsy
24283 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24284 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24285 && THUMB_IS_FUNC (fixP->fx_addsy))
24286 /* Flip the bl to blx. This is a simple flip
24287 bit here because we generate PCREL_CALL for
24288 unconditional bls. */
24289 {
24290 newval = md_chars_to_number (buf, INSN_SIZE);
24291 newval = newval | 0x10000000;
24292 md_number_to_chars (buf, newval, INSN_SIZE);
24293 temp = 1;
24294 fixP->fx_done = 1;
24295 }
24296 else
24297 temp = 3;
24298 goto arm_branch_common;
24299
24300 case BFD_RELOC_ARM_PCREL_JUMP:
24301 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
24302 && fixP->fx_addsy
24303 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24304 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24305 && THUMB_IS_FUNC (fixP->fx_addsy))
24306 {
24307 /* This would map to a bl<cond>, b<cond>,
24308 b<always> to a Thumb function. We
24309 need to force a relocation for this particular
24310 case. */
24311 newval = md_chars_to_number (buf, INSN_SIZE);
24312 fixP->fx_done = 0;
24313 }
24314 /* Fall through. */
24315
24316 case BFD_RELOC_ARM_PLT32:
24317 #endif
24318 case BFD_RELOC_ARM_PCREL_BRANCH:
24319 temp = 3;
24320 goto arm_branch_common;
24321
24322 case BFD_RELOC_ARM_PCREL_BLX:
24323
24324 temp = 1;
24325 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
24326 && fixP->fx_addsy
24327 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24328 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24329 && ARM_IS_FUNC (fixP->fx_addsy))
24330 {
24331 /* Flip the blx to a bl and warn. */
24332 const char *name = S_GET_NAME (fixP->fx_addsy);
24333 newval = 0xeb000000;
24334 as_warn_where (fixP->fx_file, fixP->fx_line,
24335 _("blx to '%s' an ARM ISA state function changed to bl"),
24336 name);
24337 md_number_to_chars (buf, newval, INSN_SIZE);
24338 temp = 3;
24339 fixP->fx_done = 1;
24340 }
24341
24342 #ifdef OBJ_ELF
24343 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
24344 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
24345 #endif
24346
24347 arm_branch_common:
24348 /* We are going to store value (shifted right by two) in the
24349 instruction, in a 24 bit, signed field. Bits 26 through 32 either
24350 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
24351 also be clear. */
24352 if (value & temp)
24353 as_bad_where (fixP->fx_file, fixP->fx_line,
24354 _("misaligned branch destination"));
24355 if ((value & (offsetT)0xfe000000) != (offsetT)0
24356 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
24357 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
24358
24359 if (fixP->fx_done || !seg->use_rela_p)
24360 {
24361 newval = md_chars_to_number (buf, INSN_SIZE);
24362 newval |= (value >> 2) & 0x00ffffff;
24363 /* Set the H bit on BLX instructions. */
24364 if (temp == 1)
24365 {
24366 if (value & 2)
24367 newval |= 0x01000000;
24368 else
24369 newval &= ~0x01000000;
24370 }
24371 md_number_to_chars (buf, newval, INSN_SIZE);
24372 }
24373 break;
24374
24375 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
24376 /* CBZ can only branch forward. */
24377
24378 /* Attempts to use CBZ to branch to the next instruction
24379 (which, strictly speaking, are prohibited) will be turned into
24380 no-ops.
24381
24382 FIXME: It may be better to remove the instruction completely and
24383 perform relaxation. */
24384 if (value == -2)
24385 {
24386 newval = md_chars_to_number (buf, THUMB_SIZE);
24387 newval = 0xbf00; /* NOP encoding T1 */
24388 md_number_to_chars (buf, newval, THUMB_SIZE);
24389 }
24390 else
24391 {
24392 if (value & ~0x7e)
24393 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
24394
24395 if (fixP->fx_done || !seg->use_rela_p)
24396 {
24397 newval = md_chars_to_number (buf, THUMB_SIZE);
24398 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
24399 md_number_to_chars (buf, newval, THUMB_SIZE);
24400 }
24401 }
24402 break;
24403
24404 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
24405 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
24406 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
24407
24408 if (fixP->fx_done || !seg->use_rela_p)
24409 {
24410 newval = md_chars_to_number (buf, THUMB_SIZE);
24411 newval |= (value & 0x1ff) >> 1;
24412 md_number_to_chars (buf, newval, THUMB_SIZE);
24413 }
24414 break;
24415
24416 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
24417 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
24418 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
24419
24420 if (fixP->fx_done || !seg->use_rela_p)
24421 {
24422 newval = md_chars_to_number (buf, THUMB_SIZE);
24423 newval |= (value & 0xfff) >> 1;
24424 md_number_to_chars (buf, newval, THUMB_SIZE);
24425 }
24426 break;
24427
24428 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24429 if (fixP->fx_addsy
24430 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24431 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24432 && ARM_IS_FUNC (fixP->fx_addsy)
24433 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
24434 {
24435 /* Force a relocation for a branch 20 bits wide. */
24436 fixP->fx_done = 0;
24437 }
24438 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
24439 as_bad_where (fixP->fx_file, fixP->fx_line,
24440 _("conditional branch out of range"));
24441
24442 if (fixP->fx_done || !seg->use_rela_p)
24443 {
24444 offsetT newval2;
24445 addressT S, J1, J2, lo, hi;
24446
24447 S = (value & 0x00100000) >> 20;
24448 J2 = (value & 0x00080000) >> 19;
24449 J1 = (value & 0x00040000) >> 18;
24450 hi = (value & 0x0003f000) >> 12;
24451 lo = (value & 0x00000ffe) >> 1;
24452
24453 newval = md_chars_to_number (buf, THUMB_SIZE);
24454 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
24455 newval |= (S << 10) | hi;
24456 newval2 |= (J1 << 13) | (J2 << 11) | lo;
24457 md_number_to_chars (buf, newval, THUMB_SIZE);
24458 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
24459 }
24460 break;
24461
24462 case BFD_RELOC_THUMB_PCREL_BLX:
24463 /* If there is a blx from a thumb state function to
24464 another thumb function flip this to a bl and warn
24465 about it. */
24466
24467 if (fixP->fx_addsy
24468 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24469 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24470 && THUMB_IS_FUNC (fixP->fx_addsy))
24471 {
24472 const char *name = S_GET_NAME (fixP->fx_addsy);
24473 as_warn_where (fixP->fx_file, fixP->fx_line,
24474 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
24475 name);
24476 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
24477 newval = newval | 0x1000;
24478 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
24479 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
24480 fixP->fx_done = 1;
24481 }
24482
24483
24484 goto thumb_bl_common;
24485
24486 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24487 /* A bl from Thumb state ISA to an internal ARM state function
24488 is converted to a blx. */
24489 if (fixP->fx_addsy
24490 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24491 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24492 && ARM_IS_FUNC (fixP->fx_addsy)
24493 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
24494 {
24495 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
24496 newval = newval & ~0x1000;
24497 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
24498 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
24499 fixP->fx_done = 1;
24500 }
24501
24502 thumb_bl_common:
24503
24504 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
24505 /* For a BLX instruction, make sure that the relocation is rounded up
24506 to a word boundary. This follows the semantics of the instruction
24507 which specifies that bit 1 of the target address will come from bit
24508 1 of the base address. */
24509 value = (value + 3) & ~ 3;
24510
24511 #ifdef OBJ_ELF
24512 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
24513 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
24514 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
24515 #endif
24516
24517 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
24518 {
24519 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
24520 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
24521 else if ((value & ~0x1ffffff)
24522 && ((value & ~0x1ffffff) != ~0x1ffffff))
24523 as_bad_where (fixP->fx_file, fixP->fx_line,
24524 _("Thumb2 branch out of range"));
24525 }
24526
24527 if (fixP->fx_done || !seg->use_rela_p)
24528 encode_thumb2_b_bl_offset (buf, value);
24529
24530 break;
24531
24532 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24533 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
24534 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
24535
24536 if (fixP->fx_done || !seg->use_rela_p)
24537 encode_thumb2_b_bl_offset (buf, value);
24538
24539 break;
24540
24541 case BFD_RELOC_8:
24542 if (fixP->fx_done || !seg->use_rela_p)
24543 *buf = value;
24544 break;
24545
24546 case BFD_RELOC_16:
24547 if (fixP->fx_done || !seg->use_rela_p)
24548 md_number_to_chars (buf, value, 2);
24549 break;
24550
24551 #ifdef OBJ_ELF
24552 case BFD_RELOC_ARM_TLS_CALL:
24553 case BFD_RELOC_ARM_THM_TLS_CALL:
24554 case BFD_RELOC_ARM_TLS_DESCSEQ:
24555 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
24556 case BFD_RELOC_ARM_TLS_GOTDESC:
24557 case BFD_RELOC_ARM_TLS_GD32:
24558 case BFD_RELOC_ARM_TLS_LE32:
24559 case BFD_RELOC_ARM_TLS_IE32:
24560 case BFD_RELOC_ARM_TLS_LDM32:
24561 case BFD_RELOC_ARM_TLS_LDO32:
24562 S_SET_THREAD_LOCAL (fixP->fx_addsy);
24563 break;
24564
24565 /* Same handling as above, but with the arm_fdpic guard. */
24566 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
24567 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
24568 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
24569 if (arm_fdpic)
24570 {
24571 S_SET_THREAD_LOCAL (fixP->fx_addsy);
24572 }
24573 else
24574 {
24575 as_bad_where (fixP->fx_file, fixP->fx_line,
24576 _("Relocation supported only in FDPIC mode"));
24577 }
24578 break;
24579
24580 case BFD_RELOC_ARM_GOT32:
24581 case BFD_RELOC_ARM_GOTOFF:
24582 break;
24583
24584 case BFD_RELOC_ARM_GOT_PREL:
24585 if (fixP->fx_done || !seg->use_rela_p)
24586 md_number_to_chars (buf, value, 4);
24587 break;
24588
24589 case BFD_RELOC_ARM_TARGET2:
24590 /* TARGET2 is not partial-inplace, so we need to write the
24591 addend here for REL targets, because it won't be written out
24592 during reloc processing later. */
24593 if (fixP->fx_done || !seg->use_rela_p)
24594 md_number_to_chars (buf, fixP->fx_offset, 4);
24595 break;
24596
24597 /* Relocations for FDPIC. */
24598 case BFD_RELOC_ARM_GOTFUNCDESC:
24599 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
24600 case BFD_RELOC_ARM_FUNCDESC:
24601 if (arm_fdpic)
24602 {
24603 if (fixP->fx_done || !seg->use_rela_p)
24604 md_number_to_chars (buf, 0, 4);
24605 }
24606 else
24607 {
24608 as_bad_where (fixP->fx_file, fixP->fx_line,
24609 _("Relocation supported only in FDPIC mode"));
24610 }
24611 break;
24612 #endif
24613
24614 case BFD_RELOC_RVA:
24615 case BFD_RELOC_32:
24616 case BFD_RELOC_ARM_TARGET1:
24617 case BFD_RELOC_ARM_ROSEGREL32:
24618 case BFD_RELOC_ARM_SBREL32:
24619 case BFD_RELOC_32_PCREL:
24620 #ifdef TE_PE
24621 case BFD_RELOC_32_SECREL:
24622 #endif
24623 if (fixP->fx_done || !seg->use_rela_p)
24624 #ifdef TE_WINCE
24625 /* For WinCE we only do this for pcrel fixups. */
24626 if (fixP->fx_done || fixP->fx_pcrel)
24627 #endif
24628 md_number_to_chars (buf, value, 4);
24629 break;
24630
24631 #ifdef OBJ_ELF
24632 case BFD_RELOC_ARM_PREL31:
24633 if (fixP->fx_done || !seg->use_rela_p)
24634 {
24635 newval = md_chars_to_number (buf, 4) & 0x80000000;
24636 if ((value ^ (value >> 1)) & 0x40000000)
24637 {
24638 as_bad_where (fixP->fx_file, fixP->fx_line,
24639 _("rel31 relocation overflow"));
24640 }
24641 newval |= value & 0x7fffffff;
24642 md_number_to_chars (buf, newval, 4);
24643 }
24644 break;
24645 #endif
24646
24647 case BFD_RELOC_ARM_CP_OFF_IMM:
24648 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
24649 case BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM:
24650 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
24651 newval = md_chars_to_number (buf, INSN_SIZE);
24652 else
24653 newval = get_thumb32_insn (buf);
24654 if ((newval & 0x0f200f00) == 0x0d000900)
24655 {
24656 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
24657 has permitted values that are multiples of 2, in the range 0
24658 to 510. */
24659 if (value < -510 || value > 510 || (value & 1))
24660 as_bad_where (fixP->fx_file, fixP->fx_line,
24661 _("co-processor offset out of range"));
24662 }
24663 else if ((newval & 0xfe001f80) == 0xec000f80)
24664 {
24665 if (value < -511 || value > 512 || (value & 3))
24666 as_bad_where (fixP->fx_file, fixP->fx_line,
24667 _("co-processor offset out of range"));
24668 }
24669 else if (value < -1023 || value > 1023 || (value & 3))
24670 as_bad_where (fixP->fx_file, fixP->fx_line,
24671 _("co-processor offset out of range"));
24672 cp_off_common:
24673 sign = value > 0;
24674 if (value < 0)
24675 value = -value;
24676 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24677 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
24678 newval = md_chars_to_number (buf, INSN_SIZE);
24679 else
24680 newval = get_thumb32_insn (buf);
24681 if (value == 0)
24682 {
24683 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
24684 newval &= 0xffffff80;
24685 else
24686 newval &= 0xffffff00;
24687 }
24688 else
24689 {
24690 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
24691 newval &= 0xff7fff80;
24692 else
24693 newval &= 0xff7fff00;
24694 if ((newval & 0x0f200f00) == 0x0d000900)
24695 {
24696 /* This is a fp16 vstr/vldr.
24697
24698 It requires the immediate offset in the instruction is shifted
24699 left by 1 to be a half-word offset.
24700
24701 Here, left shift by 1 first, and later right shift by 2
24702 should get the right offset. */
24703 value <<= 1;
24704 }
24705 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
24706 }
24707 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24708 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
24709 md_number_to_chars (buf, newval, INSN_SIZE);
24710 else
24711 put_thumb32_insn (buf, newval);
24712 break;
24713
24714 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
24715 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
24716 if (value < -255 || value > 255)
24717 as_bad_where (fixP->fx_file, fixP->fx_line,
24718 _("co-processor offset out of range"));
24719 value *= 4;
24720 goto cp_off_common;
24721
24722 case BFD_RELOC_ARM_THUMB_OFFSET:
24723 newval = md_chars_to_number (buf, THUMB_SIZE);
24724 /* Exactly what ranges, and where the offset is inserted depends
24725 on the type of instruction, we can establish this from the
24726 top 4 bits. */
24727 switch (newval >> 12)
24728 {
24729 case 4: /* PC load. */
24730 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
24731 forced to zero for these loads; md_pcrel_from has already
24732 compensated for this. */
24733 if (value & 3)
24734 as_bad_where (fixP->fx_file, fixP->fx_line,
24735 _("invalid offset, target not word aligned (0x%08lX)"),
24736 (((unsigned long) fixP->fx_frag->fr_address
24737 + (unsigned long) fixP->fx_where) & ~3)
24738 + (unsigned long) value);
24739
24740 if (value & ~0x3fc)
24741 as_bad_where (fixP->fx_file, fixP->fx_line,
24742 _("invalid offset, value too big (0x%08lX)"),
24743 (long) value);
24744
24745 newval |= value >> 2;
24746 break;
24747
24748 case 9: /* SP load/store. */
24749 if (value & ~0x3fc)
24750 as_bad_where (fixP->fx_file, fixP->fx_line,
24751 _("invalid offset, value too big (0x%08lX)"),
24752 (long) value);
24753 newval |= value >> 2;
24754 break;
24755
24756 case 6: /* Word load/store. */
24757 if (value & ~0x7c)
24758 as_bad_where (fixP->fx_file, fixP->fx_line,
24759 _("invalid offset, value too big (0x%08lX)"),
24760 (long) value);
24761 newval |= value << 4; /* 6 - 2. */
24762 break;
24763
24764 case 7: /* Byte load/store. */
24765 if (value & ~0x1f)
24766 as_bad_where (fixP->fx_file, fixP->fx_line,
24767 _("invalid offset, value too big (0x%08lX)"),
24768 (long) value);
24769 newval |= value << 6;
24770 break;
24771
24772 case 8: /* Halfword load/store. */
24773 if (value & ~0x3e)
24774 as_bad_where (fixP->fx_file, fixP->fx_line,
24775 _("invalid offset, value too big (0x%08lX)"),
24776 (long) value);
24777 newval |= value << 5; /* 6 - 1. */
24778 break;
24779
24780 default:
24781 as_bad_where (fixP->fx_file, fixP->fx_line,
24782 "Unable to process relocation for thumb opcode: %lx",
24783 (unsigned long) newval);
24784 break;
24785 }
24786 md_number_to_chars (buf, newval, THUMB_SIZE);
24787 break;
24788
24789 case BFD_RELOC_ARM_THUMB_ADD:
24790 /* This is a complicated relocation, since we use it for all of
24791 the following immediate relocations:
24792
24793 3bit ADD/SUB
24794 8bit ADD/SUB
24795 9bit ADD/SUB SP word-aligned
24796 10bit ADD PC/SP word-aligned
24797
24798 The type of instruction being processed is encoded in the
24799 instruction field:
24800
24801 0x8000 SUB
24802 0x00F0 Rd
24803 0x000F Rs
24804 */
24805 newval = md_chars_to_number (buf, THUMB_SIZE);
24806 {
24807 int rd = (newval >> 4) & 0xf;
24808 int rs = newval & 0xf;
24809 int subtract = !!(newval & 0x8000);
24810
24811 /* Check for HI regs, only very restricted cases allowed:
24812 Adjusting SP, and using PC or SP to get an address. */
24813 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
24814 || (rs > 7 && rs != REG_SP && rs != REG_PC))
24815 as_bad_where (fixP->fx_file, fixP->fx_line,
24816 _("invalid Hi register with immediate"));
24817
24818 /* If value is negative, choose the opposite instruction. */
24819 if (value < 0)
24820 {
24821 value = -value;
24822 subtract = !subtract;
24823 if (value < 0)
24824 as_bad_where (fixP->fx_file, fixP->fx_line,
24825 _("immediate value out of range"));
24826 }
24827
24828 if (rd == REG_SP)
24829 {
24830 if (value & ~0x1fc)
24831 as_bad_where (fixP->fx_file, fixP->fx_line,
24832 _("invalid immediate for stack address calculation"));
24833 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
24834 newval |= value >> 2;
24835 }
24836 else if (rs == REG_PC || rs == REG_SP)
24837 {
24838 /* PR gas/18541. If the addition is for a defined symbol
24839 within range of an ADR instruction then accept it. */
24840 if (subtract
24841 && value == 4
24842 && fixP->fx_addsy != NULL)
24843 {
24844 subtract = 0;
24845
24846 if (! S_IS_DEFINED (fixP->fx_addsy)
24847 || S_GET_SEGMENT (fixP->fx_addsy) != seg
24848 || S_IS_WEAK (fixP->fx_addsy))
24849 {
24850 as_bad_where (fixP->fx_file, fixP->fx_line,
24851 _("address calculation needs a strongly defined nearby symbol"));
24852 }
24853 else
24854 {
24855 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
24856
24857 /* Round up to the next 4-byte boundary. */
24858 if (v & 3)
24859 v = (v + 3) & ~ 3;
24860 else
24861 v += 4;
24862 v = S_GET_VALUE (fixP->fx_addsy) - v;
24863
24864 if (v & ~0x3fc)
24865 {
24866 as_bad_where (fixP->fx_file, fixP->fx_line,
24867 _("symbol too far away"));
24868 }
24869 else
24870 {
24871 fixP->fx_done = 1;
24872 value = v;
24873 }
24874 }
24875 }
24876
24877 if (subtract || value & ~0x3fc)
24878 as_bad_where (fixP->fx_file, fixP->fx_line,
24879 _("invalid immediate for address calculation (value = 0x%08lX)"),
24880 (unsigned long) (subtract ? - value : value));
24881 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
24882 newval |= rd << 8;
24883 newval |= value >> 2;
24884 }
24885 else if (rs == rd)
24886 {
24887 if (value & ~0xff)
24888 as_bad_where (fixP->fx_file, fixP->fx_line,
24889 _("immediate value out of range"));
24890 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
24891 newval |= (rd << 8) | value;
24892 }
24893 else
24894 {
24895 if (value & ~0x7)
24896 as_bad_where (fixP->fx_file, fixP->fx_line,
24897 _("immediate value out of range"));
24898 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
24899 newval |= rd | (rs << 3) | (value << 6);
24900 }
24901 }
24902 md_number_to_chars (buf, newval, THUMB_SIZE);
24903 break;
24904
24905 case BFD_RELOC_ARM_THUMB_IMM:
24906 newval = md_chars_to_number (buf, THUMB_SIZE);
24907 if (value < 0 || value > 255)
24908 as_bad_where (fixP->fx_file, fixP->fx_line,
24909 _("invalid immediate: %ld is out of range"),
24910 (long) value);
24911 newval |= value;
24912 md_number_to_chars (buf, newval, THUMB_SIZE);
24913 break;
24914
24915 case BFD_RELOC_ARM_THUMB_SHIFT:
24916 /* 5bit shift value (0..32). LSL cannot take 32. */
24917 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
24918 temp = newval & 0xf800;
24919 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
24920 as_bad_where (fixP->fx_file, fixP->fx_line,
24921 _("invalid shift value: %ld"), (long) value);
24922 /* Shifts of zero must be encoded as LSL. */
24923 if (value == 0)
24924 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
24925 /* Shifts of 32 are encoded as zero. */
24926 else if (value == 32)
24927 value = 0;
24928 newval |= value << 6;
24929 md_number_to_chars (buf, newval, THUMB_SIZE);
24930 break;
24931
24932 case BFD_RELOC_VTABLE_INHERIT:
24933 case BFD_RELOC_VTABLE_ENTRY:
24934 fixP->fx_done = 0;
24935 return;
24936
24937 case BFD_RELOC_ARM_MOVW:
24938 case BFD_RELOC_ARM_MOVT:
24939 case BFD_RELOC_ARM_THUMB_MOVW:
24940 case BFD_RELOC_ARM_THUMB_MOVT:
24941 if (fixP->fx_done || !seg->use_rela_p)
24942 {
24943 /* REL format relocations are limited to a 16-bit addend. */
24944 if (!fixP->fx_done)
24945 {
24946 if (value < -0x8000 || value > 0x7fff)
24947 as_bad_where (fixP->fx_file, fixP->fx_line,
24948 _("offset out of range"));
24949 }
24950 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
24951 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
24952 {
24953 value >>= 16;
24954 }
24955
24956 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
24957 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
24958 {
24959 newval = get_thumb32_insn (buf);
24960 newval &= 0xfbf08f00;
24961 newval |= (value & 0xf000) << 4;
24962 newval |= (value & 0x0800) << 15;
24963 newval |= (value & 0x0700) << 4;
24964 newval |= (value & 0x00ff);
24965 put_thumb32_insn (buf, newval);
24966 }
24967 else
24968 {
24969 newval = md_chars_to_number (buf, 4);
24970 newval &= 0xfff0f000;
24971 newval |= value & 0x0fff;
24972 newval |= (value & 0xf000) << 4;
24973 md_number_to_chars (buf, newval, 4);
24974 }
24975 }
24976 return;
24977
24978 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
24979 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
24980 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
24981 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
24982 gas_assert (!fixP->fx_done);
24983 {
24984 bfd_vma insn;
24985 bfd_boolean is_mov;
24986 bfd_vma encoded_addend = value;
24987
24988 /* Check that addend can be encoded in instruction. */
24989 if (!seg->use_rela_p && (value < 0 || value > 255))
24990 as_bad_where (fixP->fx_file, fixP->fx_line,
24991 _("the offset 0x%08lX is not representable"),
24992 (unsigned long) encoded_addend);
24993
24994 /* Extract the instruction. */
24995 insn = md_chars_to_number (buf, THUMB_SIZE);
24996 is_mov = (insn & 0xf800) == 0x2000;
24997
24998 /* Encode insn. */
24999 if (is_mov)
25000 {
25001 if (!seg->use_rela_p)
25002 insn |= encoded_addend;
25003 }
25004 else
25005 {
25006 int rd, rs;
25007
25008 /* Extract the instruction. */
25009 /* Encoding is the following
25010 0x8000 SUB
25011 0x00F0 Rd
25012 0x000F Rs
25013 */
25014 /* The following conditions must be true :
25015 - ADD
25016 - Rd == Rs
25017 - Rd <= 7
25018 */
25019 rd = (insn >> 4) & 0xf;
25020 rs = insn & 0xf;
25021 if ((insn & 0x8000) || (rd != rs) || rd > 7)
25022 as_bad_where (fixP->fx_file, fixP->fx_line,
25023 _("Unable to process relocation for thumb opcode: %lx"),
25024 (unsigned long) insn);
25025
25026 /* Encode as ADD immediate8 thumb 1 code. */
25027 insn = 0x3000 | (rd << 8);
25028
25029 /* Place the encoded addend into the first 8 bits of the
25030 instruction. */
25031 if (!seg->use_rela_p)
25032 insn |= encoded_addend;
25033 }
25034
25035 /* Update the instruction. */
25036 md_number_to_chars (buf, insn, THUMB_SIZE);
25037 }
25038 break;
25039
25040 case BFD_RELOC_ARM_ALU_PC_G0_NC:
25041 case BFD_RELOC_ARM_ALU_PC_G0:
25042 case BFD_RELOC_ARM_ALU_PC_G1_NC:
25043 case BFD_RELOC_ARM_ALU_PC_G1:
25044 case BFD_RELOC_ARM_ALU_PC_G2:
25045 case BFD_RELOC_ARM_ALU_SB_G0_NC:
25046 case BFD_RELOC_ARM_ALU_SB_G0:
25047 case BFD_RELOC_ARM_ALU_SB_G1_NC:
25048 case BFD_RELOC_ARM_ALU_SB_G1:
25049 case BFD_RELOC_ARM_ALU_SB_G2:
25050 gas_assert (!fixP->fx_done);
25051 if (!seg->use_rela_p)
25052 {
25053 bfd_vma insn;
25054 bfd_vma encoded_addend;
25055 bfd_vma addend_abs = llabs (value);
25056
25057 /* Check that the absolute value of the addend can be
25058 expressed as an 8-bit constant plus a rotation. */
25059 encoded_addend = encode_arm_immediate (addend_abs);
25060 if (encoded_addend == (unsigned int) FAIL)
25061 as_bad_where (fixP->fx_file, fixP->fx_line,
25062 _("the offset 0x%08lX is not representable"),
25063 (unsigned long) addend_abs);
25064
25065 /* Extract the instruction. */
25066 insn = md_chars_to_number (buf, INSN_SIZE);
25067
25068 /* If the addend is positive, use an ADD instruction.
25069 Otherwise use a SUB. Take care not to destroy the S bit. */
25070 insn &= 0xff1fffff;
25071 if (value < 0)
25072 insn |= 1 << 22;
25073 else
25074 insn |= 1 << 23;
25075
25076 /* Place the encoded addend into the first 12 bits of the
25077 instruction. */
25078 insn &= 0xfffff000;
25079 insn |= encoded_addend;
25080
25081 /* Update the instruction. */
25082 md_number_to_chars (buf, insn, INSN_SIZE);
25083 }
25084 break;
25085
25086 case BFD_RELOC_ARM_LDR_PC_G0:
25087 case BFD_RELOC_ARM_LDR_PC_G1:
25088 case BFD_RELOC_ARM_LDR_PC_G2:
25089 case BFD_RELOC_ARM_LDR_SB_G0:
25090 case BFD_RELOC_ARM_LDR_SB_G1:
25091 case BFD_RELOC_ARM_LDR_SB_G2:
25092 gas_assert (!fixP->fx_done);
25093 if (!seg->use_rela_p)
25094 {
25095 bfd_vma insn;
25096 bfd_vma addend_abs = llabs (value);
25097
25098 /* Check that the absolute value of the addend can be
25099 encoded in 12 bits. */
25100 if (addend_abs >= 0x1000)
25101 as_bad_where (fixP->fx_file, fixP->fx_line,
25102 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
25103 (unsigned long) addend_abs);
25104
25105 /* Extract the instruction. */
25106 insn = md_chars_to_number (buf, INSN_SIZE);
25107
25108 /* If the addend is negative, clear bit 23 of the instruction.
25109 Otherwise set it. */
25110 if (value < 0)
25111 insn &= ~(1 << 23);
25112 else
25113 insn |= 1 << 23;
25114
25115 /* Place the absolute value of the addend into the first 12 bits
25116 of the instruction. */
25117 insn &= 0xfffff000;
25118 insn |= addend_abs;
25119
25120 /* Update the instruction. */
25121 md_number_to_chars (buf, insn, INSN_SIZE);
25122 }
25123 break;
25124
25125 case BFD_RELOC_ARM_LDRS_PC_G0:
25126 case BFD_RELOC_ARM_LDRS_PC_G1:
25127 case BFD_RELOC_ARM_LDRS_PC_G2:
25128 case BFD_RELOC_ARM_LDRS_SB_G0:
25129 case BFD_RELOC_ARM_LDRS_SB_G1:
25130 case BFD_RELOC_ARM_LDRS_SB_G2:
25131 gas_assert (!fixP->fx_done);
25132 if (!seg->use_rela_p)
25133 {
25134 bfd_vma insn;
25135 bfd_vma addend_abs = llabs (value);
25136
25137 /* Check that the absolute value of the addend can be
25138 encoded in 8 bits. */
25139 if (addend_abs >= 0x100)
25140 as_bad_where (fixP->fx_file, fixP->fx_line,
25141 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
25142 (unsigned long) addend_abs);
25143
25144 /* Extract the instruction. */
25145 insn = md_chars_to_number (buf, INSN_SIZE);
25146
25147 /* If the addend is negative, clear bit 23 of the instruction.
25148 Otherwise set it. */
25149 if (value < 0)
25150 insn &= ~(1 << 23);
25151 else
25152 insn |= 1 << 23;
25153
25154 /* Place the first four bits of the absolute value of the addend
25155 into the first 4 bits of the instruction, and the remaining
25156 four into bits 8 .. 11. */
25157 insn &= 0xfffff0f0;
25158 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
25159
25160 /* Update the instruction. */
25161 md_number_to_chars (buf, insn, INSN_SIZE);
25162 }
25163 break;
25164
25165 case BFD_RELOC_ARM_LDC_PC_G0:
25166 case BFD_RELOC_ARM_LDC_PC_G1:
25167 case BFD_RELOC_ARM_LDC_PC_G2:
25168 case BFD_RELOC_ARM_LDC_SB_G0:
25169 case BFD_RELOC_ARM_LDC_SB_G1:
25170 case BFD_RELOC_ARM_LDC_SB_G2:
25171 gas_assert (!fixP->fx_done);
25172 if (!seg->use_rela_p)
25173 {
25174 bfd_vma insn;
25175 bfd_vma addend_abs = llabs (value);
25176
25177 /* Check that the absolute value of the addend is a multiple of
25178 four and, when divided by four, fits in 8 bits. */
25179 if (addend_abs & 0x3)
25180 as_bad_where (fixP->fx_file, fixP->fx_line,
25181 _("bad offset 0x%08lX (must be word-aligned)"),
25182 (unsigned long) addend_abs);
25183
25184 if ((addend_abs >> 2) > 0xff)
25185 as_bad_where (fixP->fx_file, fixP->fx_line,
25186 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
25187 (unsigned long) addend_abs);
25188
25189 /* Extract the instruction. */
25190 insn = md_chars_to_number (buf, INSN_SIZE);
25191
25192 /* If the addend is negative, clear bit 23 of the instruction.
25193 Otherwise set it. */
25194 if (value < 0)
25195 insn &= ~(1 << 23);
25196 else
25197 insn |= 1 << 23;
25198
25199 /* Place the addend (divided by four) into the first eight
25200 bits of the instruction. */
25201 insn &= 0xfffffff0;
25202 insn |= addend_abs >> 2;
25203
25204 /* Update the instruction. */
25205 md_number_to_chars (buf, insn, INSN_SIZE);
25206 }
25207 break;
25208
25209 case BFD_RELOC_THUMB_PCREL_BRANCH5:
25210 if (fixP->fx_addsy
25211 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25212 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25213 && ARM_IS_FUNC (fixP->fx_addsy)
25214 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
25215 {
25216 /* Force a relocation for a branch 5 bits wide. */
25217 fixP->fx_done = 0;
25218 }
25219 if (v8_1_branch_value_check (value, 5, FALSE) == FAIL)
25220 as_bad_where (fixP->fx_file, fixP->fx_line,
25221 BAD_BRANCH_OFF);
25222
25223 if (fixP->fx_done || !seg->use_rela_p)
25224 {
25225 addressT boff = value >> 1;
25226
25227 newval = md_chars_to_number (buf, THUMB_SIZE);
25228 newval |= (boff << 7);
25229 md_number_to_chars (buf, newval, THUMB_SIZE);
25230 }
25231 break;
25232
25233 case BFD_RELOC_THUMB_PCREL_BFCSEL:
25234 if (fixP->fx_addsy
25235 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25236 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25237 && ARM_IS_FUNC (fixP->fx_addsy)
25238 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
25239 {
25240 fixP->fx_done = 0;
25241 }
25242 if ((value & ~0x7f) && ((value & ~0x3f) != ~0x3f))
25243 as_bad_where (fixP->fx_file, fixP->fx_line,
25244 _("branch out of range"));
25245
25246 if (fixP->fx_done || !seg->use_rela_p)
25247 {
25248 newval = md_chars_to_number (buf, THUMB_SIZE);
25249
25250 addressT boff = ((newval & 0x0780) >> 7) << 1;
25251 addressT diff = value - boff;
25252
25253 if (diff == 4)
25254 {
25255 newval |= 1 << 1; /* T bit. */
25256 }
25257 else if (diff != 2)
25258 {
25259 as_bad_where (fixP->fx_file, fixP->fx_line,
25260 _("out of range label-relative fixup value"));
25261 }
25262 md_number_to_chars (buf, newval, THUMB_SIZE);
25263 }
25264 break;
25265
25266 case BFD_RELOC_ARM_THUMB_BF17:
25267 if (fixP->fx_addsy
25268 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25269 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25270 && ARM_IS_FUNC (fixP->fx_addsy)
25271 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
25272 {
25273 /* Force a relocation for a branch 17 bits wide. */
25274 fixP->fx_done = 0;
25275 }
25276
25277 if (v8_1_branch_value_check (value, 17, TRUE) == FAIL)
25278 as_bad_where (fixP->fx_file, fixP->fx_line,
25279 BAD_BRANCH_OFF);
25280
25281 if (fixP->fx_done || !seg->use_rela_p)
25282 {
25283 offsetT newval2;
25284 addressT immA, immB, immC;
25285
25286 immA = (value & 0x0001f000) >> 12;
25287 immB = (value & 0x00000ffc) >> 2;
25288 immC = (value & 0x00000002) >> 1;
25289
25290 newval = md_chars_to_number (buf, THUMB_SIZE);
25291 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25292 newval |= immA;
25293 newval2 |= (immC << 11) | (immB << 1);
25294 md_number_to_chars (buf, newval, THUMB_SIZE);
25295 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
25296 }
25297 break;
25298
25299 case BFD_RELOC_ARM_THUMB_BF19:
25300 if (fixP->fx_addsy
25301 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25302 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25303 && ARM_IS_FUNC (fixP->fx_addsy)
25304 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
25305 {
25306 /* Force a relocation for a branch 19 bits wide. */
25307 fixP->fx_done = 0;
25308 }
25309
25310 if (v8_1_branch_value_check (value, 19, TRUE) == FAIL)
25311 as_bad_where (fixP->fx_file, fixP->fx_line,
25312 BAD_BRANCH_OFF);
25313
25314 if (fixP->fx_done || !seg->use_rela_p)
25315 {
25316 offsetT newval2;
25317 addressT immA, immB, immC;
25318
25319 immA = (value & 0x0007f000) >> 12;
25320 immB = (value & 0x00000ffc) >> 2;
25321 immC = (value & 0x00000002) >> 1;
25322
25323 newval = md_chars_to_number (buf, THUMB_SIZE);
25324 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25325 newval |= immA;
25326 newval2 |= (immC << 11) | (immB << 1);
25327 md_number_to_chars (buf, newval, THUMB_SIZE);
25328 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
25329 }
25330 break;
25331
25332 case BFD_RELOC_ARM_THUMB_BF13:
25333 if (fixP->fx_addsy
25334 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25335 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25336 && ARM_IS_FUNC (fixP->fx_addsy)
25337 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
25338 {
25339 /* Force a relocation for a branch 13 bits wide. */
25340 fixP->fx_done = 0;
25341 }
25342
25343 if (v8_1_branch_value_check (value, 13, TRUE) == FAIL)
25344 as_bad_where (fixP->fx_file, fixP->fx_line,
25345 BAD_BRANCH_OFF);
25346
25347 if (fixP->fx_done || !seg->use_rela_p)
25348 {
25349 offsetT newval2;
25350 addressT immA, immB, immC;
25351
25352 immA = (value & 0x00001000) >> 12;
25353 immB = (value & 0x00000ffc) >> 2;
25354 immC = (value & 0x00000002) >> 1;
25355
25356 newval = md_chars_to_number (buf, THUMB_SIZE);
25357 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25358 newval |= immA;
25359 newval2 |= (immC << 11) | (immB << 1);
25360 md_number_to_chars (buf, newval, THUMB_SIZE);
25361 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
25362 }
25363 break;
25364
25365 case BFD_RELOC_ARM_THUMB_LOOP12:
25366 if (fixP->fx_addsy
25367 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25368 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25369 && ARM_IS_FUNC (fixP->fx_addsy)
25370 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
25371 {
25372 /* Force a relocation for a branch 12 bits wide. */
25373 fixP->fx_done = 0;
25374 }
25375
25376 bfd_vma insn = get_thumb32_insn (buf);
25377 /* le lr, <label> or le <label> */
25378 if (((insn & 0xffffffff) == 0xf00fc001)
25379 || ((insn & 0xffffffff) == 0xf02fc001))
25380 value = -value;
25381
25382 if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
25383 as_bad_where (fixP->fx_file, fixP->fx_line,
25384 BAD_BRANCH_OFF);
25385 if (fixP->fx_done || !seg->use_rela_p)
25386 {
25387 addressT imml, immh;
25388
25389 immh = (value & 0x00000ffc) >> 2;
25390 imml = (value & 0x00000002) >> 1;
25391
25392 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25393 newval |= (imml << 11) | (immh << 1);
25394 md_number_to_chars (buf + THUMB_SIZE, newval, THUMB_SIZE);
25395 }
25396 break;
25397
25398 case BFD_RELOC_ARM_V4BX:
25399 /* This will need to go in the object file. */
25400 fixP->fx_done = 0;
25401 break;
25402
25403 case BFD_RELOC_UNUSED:
25404 default:
25405 as_bad_where (fixP->fx_file, fixP->fx_line,
25406 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
25407 }
25408 }
25409
25410 /* Translate internal representation of relocation info to BFD target
25411 format. */
25412
25413 arelent *
25414 tc_gen_reloc (asection *section, fixS *fixp)
25415 {
25416 arelent * reloc;
25417 bfd_reloc_code_real_type code;
25418
25419 reloc = XNEW (arelent);
25420
25421 reloc->sym_ptr_ptr = XNEW (asymbol *);
25422 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
25423 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
25424
25425 if (fixp->fx_pcrel)
25426 {
25427 if (section->use_rela_p)
25428 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
25429 else
25430 fixp->fx_offset = reloc->address;
25431 }
25432 reloc->addend = fixp->fx_offset;
25433
25434 switch (fixp->fx_r_type)
25435 {
25436 case BFD_RELOC_8:
25437 if (fixp->fx_pcrel)
25438 {
25439 code = BFD_RELOC_8_PCREL;
25440 break;
25441 }
25442 /* Fall through. */
25443
25444 case BFD_RELOC_16:
25445 if (fixp->fx_pcrel)
25446 {
25447 code = BFD_RELOC_16_PCREL;
25448 break;
25449 }
25450 /* Fall through. */
25451
25452 case BFD_RELOC_32:
25453 if (fixp->fx_pcrel)
25454 {
25455 code = BFD_RELOC_32_PCREL;
25456 break;
25457 }
25458 /* Fall through. */
25459
25460 case BFD_RELOC_ARM_MOVW:
25461 if (fixp->fx_pcrel)
25462 {
25463 code = BFD_RELOC_ARM_MOVW_PCREL;
25464 break;
25465 }
25466 /* Fall through. */
25467
25468 case BFD_RELOC_ARM_MOVT:
25469 if (fixp->fx_pcrel)
25470 {
25471 code = BFD_RELOC_ARM_MOVT_PCREL;
25472 break;
25473 }
25474 /* Fall through. */
25475
25476 case BFD_RELOC_ARM_THUMB_MOVW:
25477 if (fixp->fx_pcrel)
25478 {
25479 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
25480 break;
25481 }
25482 /* Fall through. */
25483
25484 case BFD_RELOC_ARM_THUMB_MOVT:
25485 if (fixp->fx_pcrel)
25486 {
25487 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
25488 break;
25489 }
25490 /* Fall through. */
25491
25492 case BFD_RELOC_NONE:
25493 case BFD_RELOC_ARM_PCREL_BRANCH:
25494 case BFD_RELOC_ARM_PCREL_BLX:
25495 case BFD_RELOC_RVA:
25496 case BFD_RELOC_THUMB_PCREL_BRANCH7:
25497 case BFD_RELOC_THUMB_PCREL_BRANCH9:
25498 case BFD_RELOC_THUMB_PCREL_BRANCH12:
25499 case BFD_RELOC_THUMB_PCREL_BRANCH20:
25500 case BFD_RELOC_THUMB_PCREL_BRANCH23:
25501 case BFD_RELOC_THUMB_PCREL_BRANCH25:
25502 case BFD_RELOC_VTABLE_ENTRY:
25503 case BFD_RELOC_VTABLE_INHERIT:
25504 #ifdef TE_PE
25505 case BFD_RELOC_32_SECREL:
25506 #endif
25507 code = fixp->fx_r_type;
25508 break;
25509
25510 case BFD_RELOC_THUMB_PCREL_BLX:
25511 #ifdef OBJ_ELF
25512 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
25513 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
25514 else
25515 #endif
25516 code = BFD_RELOC_THUMB_PCREL_BLX;
25517 break;
25518
25519 case BFD_RELOC_ARM_LITERAL:
25520 case BFD_RELOC_ARM_HWLITERAL:
25521 /* If this is called then the a literal has
25522 been referenced across a section boundary. */
25523 as_bad_where (fixp->fx_file, fixp->fx_line,
25524 _("literal referenced across section boundary"));
25525 return NULL;
25526
25527 #ifdef OBJ_ELF
25528 case BFD_RELOC_ARM_TLS_CALL:
25529 case BFD_RELOC_ARM_THM_TLS_CALL:
25530 case BFD_RELOC_ARM_TLS_DESCSEQ:
25531 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
25532 case BFD_RELOC_ARM_GOT32:
25533 case BFD_RELOC_ARM_GOTOFF:
25534 case BFD_RELOC_ARM_GOT_PREL:
25535 case BFD_RELOC_ARM_PLT32:
25536 case BFD_RELOC_ARM_TARGET1:
25537 case BFD_RELOC_ARM_ROSEGREL32:
25538 case BFD_RELOC_ARM_SBREL32:
25539 case BFD_RELOC_ARM_PREL31:
25540 case BFD_RELOC_ARM_TARGET2:
25541 case BFD_RELOC_ARM_TLS_LDO32:
25542 case BFD_RELOC_ARM_PCREL_CALL:
25543 case BFD_RELOC_ARM_PCREL_JUMP:
25544 case BFD_RELOC_ARM_ALU_PC_G0_NC:
25545 case BFD_RELOC_ARM_ALU_PC_G0:
25546 case BFD_RELOC_ARM_ALU_PC_G1_NC:
25547 case BFD_RELOC_ARM_ALU_PC_G1:
25548 case BFD_RELOC_ARM_ALU_PC_G2:
25549 case BFD_RELOC_ARM_LDR_PC_G0:
25550 case BFD_RELOC_ARM_LDR_PC_G1:
25551 case BFD_RELOC_ARM_LDR_PC_G2:
25552 case BFD_RELOC_ARM_LDRS_PC_G0:
25553 case BFD_RELOC_ARM_LDRS_PC_G1:
25554 case BFD_RELOC_ARM_LDRS_PC_G2:
25555 case BFD_RELOC_ARM_LDC_PC_G0:
25556 case BFD_RELOC_ARM_LDC_PC_G1:
25557 case BFD_RELOC_ARM_LDC_PC_G2:
25558 case BFD_RELOC_ARM_ALU_SB_G0_NC:
25559 case BFD_RELOC_ARM_ALU_SB_G0:
25560 case BFD_RELOC_ARM_ALU_SB_G1_NC:
25561 case BFD_RELOC_ARM_ALU_SB_G1:
25562 case BFD_RELOC_ARM_ALU_SB_G2:
25563 case BFD_RELOC_ARM_LDR_SB_G0:
25564 case BFD_RELOC_ARM_LDR_SB_G1:
25565 case BFD_RELOC_ARM_LDR_SB_G2:
25566 case BFD_RELOC_ARM_LDRS_SB_G0:
25567 case BFD_RELOC_ARM_LDRS_SB_G1:
25568 case BFD_RELOC_ARM_LDRS_SB_G2:
25569 case BFD_RELOC_ARM_LDC_SB_G0:
25570 case BFD_RELOC_ARM_LDC_SB_G1:
25571 case BFD_RELOC_ARM_LDC_SB_G2:
25572 case BFD_RELOC_ARM_V4BX:
25573 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
25574 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
25575 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
25576 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
25577 case BFD_RELOC_ARM_GOTFUNCDESC:
25578 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
25579 case BFD_RELOC_ARM_FUNCDESC:
25580 case BFD_RELOC_ARM_THUMB_BF17:
25581 case BFD_RELOC_ARM_THUMB_BF19:
25582 case BFD_RELOC_ARM_THUMB_BF13:
25583 code = fixp->fx_r_type;
25584 break;
25585
25586 case BFD_RELOC_ARM_TLS_GOTDESC:
25587 case BFD_RELOC_ARM_TLS_GD32:
25588 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
25589 case BFD_RELOC_ARM_TLS_LE32:
25590 case BFD_RELOC_ARM_TLS_IE32:
25591 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
25592 case BFD_RELOC_ARM_TLS_LDM32:
25593 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
25594 /* BFD will include the symbol's address in the addend.
25595 But we don't want that, so subtract it out again here. */
25596 if (!S_IS_COMMON (fixp->fx_addsy))
25597 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
25598 code = fixp->fx_r_type;
25599 break;
25600 #endif
25601
25602 case BFD_RELOC_ARM_IMMEDIATE:
25603 as_bad_where (fixp->fx_file, fixp->fx_line,
25604 _("internal relocation (type: IMMEDIATE) not fixed up"));
25605 return NULL;
25606
25607 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
25608 as_bad_where (fixp->fx_file, fixp->fx_line,
25609 _("ADRL used for a symbol not defined in the same file"));
25610 return NULL;
25611
25612 case BFD_RELOC_THUMB_PCREL_BRANCH5:
25613 case BFD_RELOC_THUMB_PCREL_BFCSEL:
25614 case BFD_RELOC_ARM_THUMB_LOOP12:
25615 as_bad_where (fixp->fx_file, fixp->fx_line,
25616 _("%s used for a symbol not defined in the same file"),
25617 bfd_get_reloc_code_name (fixp->fx_r_type));
25618 return NULL;
25619
25620 case BFD_RELOC_ARM_OFFSET_IMM:
25621 if (section->use_rela_p)
25622 {
25623 code = fixp->fx_r_type;
25624 break;
25625 }
25626
25627 if (fixp->fx_addsy != NULL
25628 && !S_IS_DEFINED (fixp->fx_addsy)
25629 && S_IS_LOCAL (fixp->fx_addsy))
25630 {
25631 as_bad_where (fixp->fx_file, fixp->fx_line,
25632 _("undefined local label `%s'"),
25633 S_GET_NAME (fixp->fx_addsy));
25634 return NULL;
25635 }
25636
25637 as_bad_where (fixp->fx_file, fixp->fx_line,
25638 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
25639 return NULL;
25640
25641 default:
25642 {
25643 const char * type;
25644
25645 switch (fixp->fx_r_type)
25646 {
25647 case BFD_RELOC_NONE: type = "NONE"; break;
25648 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
25649 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
25650 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
25651 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
25652 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
25653 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
25654 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
25655 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
25656 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
25657 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
25658 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
25659 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
25660 default: type = _("<unknown>"); break;
25661 }
25662 as_bad_where (fixp->fx_file, fixp->fx_line,
25663 _("cannot represent %s relocation in this object file format"),
25664 type);
25665 return NULL;
25666 }
25667 }
25668
25669 #ifdef OBJ_ELF
25670 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
25671 && GOT_symbol
25672 && fixp->fx_addsy == GOT_symbol)
25673 {
25674 code = BFD_RELOC_ARM_GOTPC;
25675 reloc->addend = fixp->fx_offset = reloc->address;
25676 }
25677 #endif
25678
25679 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
25680
25681 if (reloc->howto == NULL)
25682 {
25683 as_bad_where (fixp->fx_file, fixp->fx_line,
25684 _("cannot represent %s relocation in this object file format"),
25685 bfd_get_reloc_code_name (code));
25686 return NULL;
25687 }
25688
25689 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
25690 vtable entry to be used in the relocation's section offset. */
25691 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
25692 reloc->address = fixp->fx_offset;
25693
25694 return reloc;
25695 }
25696
25697 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
25698
25699 void
25700 cons_fix_new_arm (fragS * frag,
25701 int where,
25702 int size,
25703 expressionS * exp,
25704 bfd_reloc_code_real_type reloc)
25705 {
25706 int pcrel = 0;
25707
25708 /* Pick a reloc.
25709 FIXME: @@ Should look at CPU word size. */
25710 switch (size)
25711 {
25712 case 1:
25713 reloc = BFD_RELOC_8;
25714 break;
25715 case 2:
25716 reloc = BFD_RELOC_16;
25717 break;
25718 case 4:
25719 default:
25720 reloc = BFD_RELOC_32;
25721 break;
25722 case 8:
25723 reloc = BFD_RELOC_64;
25724 break;
25725 }
25726
25727 #ifdef TE_PE
25728 if (exp->X_op == O_secrel)
25729 {
25730 exp->X_op = O_symbol;
25731 reloc = BFD_RELOC_32_SECREL;
25732 }
25733 #endif
25734
25735 fix_new_exp (frag, where, size, exp, pcrel, reloc);
25736 }
25737
25738 #if defined (OBJ_COFF)
25739 void
25740 arm_validate_fix (fixS * fixP)
25741 {
25742 /* If the destination of the branch is a defined symbol which does not have
25743 the THUMB_FUNC attribute, then we must be calling a function which has
25744 the (interfacearm) attribute. We look for the Thumb entry point to that
25745 function and change the branch to refer to that function instead. */
25746 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
25747 && fixP->fx_addsy != NULL
25748 && S_IS_DEFINED (fixP->fx_addsy)
25749 && ! THUMB_IS_FUNC (fixP->fx_addsy))
25750 {
25751 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
25752 }
25753 }
25754 #endif
25755
25756
25757 int
25758 arm_force_relocation (struct fix * fixp)
25759 {
25760 #if defined (OBJ_COFF) && defined (TE_PE)
25761 if (fixp->fx_r_type == BFD_RELOC_RVA)
25762 return 1;
25763 #endif
25764
25765 /* In case we have a call or a branch to a function in ARM ISA mode from
25766 a thumb function or vice-versa force the relocation. These relocations
25767 are cleared off for some cores that might have blx and simple transformations
25768 are possible. */
25769
25770 #ifdef OBJ_ELF
25771 switch (fixp->fx_r_type)
25772 {
25773 case BFD_RELOC_ARM_PCREL_JUMP:
25774 case BFD_RELOC_ARM_PCREL_CALL:
25775 case BFD_RELOC_THUMB_PCREL_BLX:
25776 if (THUMB_IS_FUNC (fixp->fx_addsy))
25777 return 1;
25778 break;
25779
25780 case BFD_RELOC_ARM_PCREL_BLX:
25781 case BFD_RELOC_THUMB_PCREL_BRANCH25:
25782 case BFD_RELOC_THUMB_PCREL_BRANCH20:
25783 case BFD_RELOC_THUMB_PCREL_BRANCH23:
25784 if (ARM_IS_FUNC (fixp->fx_addsy))
25785 return 1;
25786 break;
25787
25788 default:
25789 break;
25790 }
25791 #endif
25792
25793 /* Resolve these relocations even if the symbol is extern or weak.
25794 Technically this is probably wrong due to symbol preemption.
25795 In practice these relocations do not have enough range to be useful
25796 at dynamic link time, and some code (e.g. in the Linux kernel)
25797 expects these references to be resolved. */
25798 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
25799 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
25800 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
25801 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
25802 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
25803 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
25804 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
25805 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
25806 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
25807 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
25808 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
25809 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
25810 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
25811 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
25812 return 0;
25813
25814 /* Always leave these relocations for the linker. */
25815 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
25816 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
25817 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
25818 return 1;
25819
25820 /* Always generate relocations against function symbols. */
25821 if (fixp->fx_r_type == BFD_RELOC_32
25822 && fixp->fx_addsy
25823 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
25824 return 1;
25825
25826 return generic_force_reloc (fixp);
25827 }
25828
25829 #if defined (OBJ_ELF) || defined (OBJ_COFF)
25830 /* Relocations against function names must be left unadjusted,
25831 so that the linker can use this information to generate interworking
25832 stubs. The MIPS version of this function
25833 also prevents relocations that are mips-16 specific, but I do not
25834 know why it does this.
25835
25836 FIXME:
25837 There is one other problem that ought to be addressed here, but
25838 which currently is not: Taking the address of a label (rather
25839 than a function) and then later jumping to that address. Such
25840 addresses also ought to have their bottom bit set (assuming that
25841 they reside in Thumb code), but at the moment they will not. */
25842
25843 bfd_boolean
25844 arm_fix_adjustable (fixS * fixP)
25845 {
25846 if (fixP->fx_addsy == NULL)
25847 return 1;
25848
25849 /* Preserve relocations against symbols with function type. */
25850 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
25851 return FALSE;
25852
25853 if (THUMB_IS_FUNC (fixP->fx_addsy)
25854 && fixP->fx_subsy == NULL)
25855 return FALSE;
25856
25857 /* We need the symbol name for the VTABLE entries. */
25858 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
25859 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
25860 return FALSE;
25861
25862 /* Don't allow symbols to be discarded on GOT related relocs. */
25863 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
25864 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
25865 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
25866 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
25867 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32_FDPIC
25868 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
25869 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
25870 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32_FDPIC
25871 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
25872 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32_FDPIC
25873 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
25874 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
25875 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
25876 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
25877 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
25878 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
25879 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
25880 return FALSE;
25881
25882 /* Similarly for group relocations. */
25883 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
25884 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
25885 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
25886 return FALSE;
25887
25888 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
25889 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
25890 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
25891 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
25892 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
25893 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
25894 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
25895 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
25896 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
25897 return FALSE;
25898
25899 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
25900 offsets, so keep these symbols. */
25901 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
25902 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
25903 return FALSE;
25904
25905 return TRUE;
25906 }
25907 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
25908
25909 #ifdef OBJ_ELF
25910 const char *
25911 elf32_arm_target_format (void)
25912 {
25913 #ifdef TE_SYMBIAN
25914 return (target_big_endian
25915 ? "elf32-bigarm-symbian"
25916 : "elf32-littlearm-symbian");
25917 #elif defined (TE_VXWORKS)
25918 return (target_big_endian
25919 ? "elf32-bigarm-vxworks"
25920 : "elf32-littlearm-vxworks");
25921 #elif defined (TE_NACL)
25922 return (target_big_endian
25923 ? "elf32-bigarm-nacl"
25924 : "elf32-littlearm-nacl");
25925 #else
25926 if (arm_fdpic)
25927 {
25928 if (target_big_endian)
25929 return "elf32-bigarm-fdpic";
25930 else
25931 return "elf32-littlearm-fdpic";
25932 }
25933 else
25934 {
25935 if (target_big_endian)
25936 return "elf32-bigarm";
25937 else
25938 return "elf32-littlearm";
25939 }
25940 #endif
25941 }
25942
25943 void
25944 armelf_frob_symbol (symbolS * symp,
25945 int * puntp)
25946 {
25947 elf_frob_symbol (symp, puntp);
25948 }
25949 #endif
25950
25951 /* MD interface: Finalization. */
25952
25953 void
25954 arm_cleanup (void)
25955 {
25956 literal_pool * pool;
25957
25958 /* Ensure that all the IT blocks are properly closed. */
25959 check_it_blocks_finished ();
25960
25961 for (pool = list_of_pools; pool; pool = pool->next)
25962 {
25963 /* Put it at the end of the relevant section. */
25964 subseg_set (pool->section, pool->sub_section);
25965 #ifdef OBJ_ELF
25966 arm_elf_change_section ();
25967 #endif
25968 s_ltorg (0);
25969 }
25970 }
25971
25972 #ifdef OBJ_ELF
25973 /* Remove any excess mapping symbols generated for alignment frags in
25974 SEC. We may have created a mapping symbol before a zero byte
25975 alignment; remove it if there's a mapping symbol after the
25976 alignment. */
25977 static void
25978 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
25979 void *dummy ATTRIBUTE_UNUSED)
25980 {
25981 segment_info_type *seginfo = seg_info (sec);
25982 fragS *fragp;
25983
25984 if (seginfo == NULL || seginfo->frchainP == NULL)
25985 return;
25986
25987 for (fragp = seginfo->frchainP->frch_root;
25988 fragp != NULL;
25989 fragp = fragp->fr_next)
25990 {
25991 symbolS *sym = fragp->tc_frag_data.last_map;
25992 fragS *next = fragp->fr_next;
25993
25994 /* Variable-sized frags have been converted to fixed size by
25995 this point. But if this was variable-sized to start with,
25996 there will be a fixed-size frag after it. So don't handle
25997 next == NULL. */
25998 if (sym == NULL || next == NULL)
25999 continue;
26000
26001 if (S_GET_VALUE (sym) < next->fr_address)
26002 /* Not at the end of this frag. */
26003 continue;
26004 know (S_GET_VALUE (sym) == next->fr_address);
26005
26006 do
26007 {
26008 if (next->tc_frag_data.first_map != NULL)
26009 {
26010 /* Next frag starts with a mapping symbol. Discard this
26011 one. */
26012 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
26013 break;
26014 }
26015
26016 if (next->fr_next == NULL)
26017 {
26018 /* This mapping symbol is at the end of the section. Discard
26019 it. */
26020 know (next->fr_fix == 0 && next->fr_var == 0);
26021 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
26022 break;
26023 }
26024
26025 /* As long as we have empty frags without any mapping symbols,
26026 keep looking. */
26027 /* If the next frag is non-empty and does not start with a
26028 mapping symbol, then this mapping symbol is required. */
26029 if (next->fr_address != next->fr_next->fr_address)
26030 break;
26031
26032 next = next->fr_next;
26033 }
26034 while (next != NULL);
26035 }
26036 }
26037 #endif
26038
26039 /* Adjust the symbol table. This marks Thumb symbols as distinct from
26040 ARM ones. */
26041
26042 void
26043 arm_adjust_symtab (void)
26044 {
26045 #ifdef OBJ_COFF
26046 symbolS * sym;
26047
26048 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
26049 {
26050 if (ARM_IS_THUMB (sym))
26051 {
26052 if (THUMB_IS_FUNC (sym))
26053 {
26054 /* Mark the symbol as a Thumb function. */
26055 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
26056 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
26057 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
26058
26059 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
26060 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
26061 else
26062 as_bad (_("%s: unexpected function type: %d"),
26063 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
26064 }
26065 else switch (S_GET_STORAGE_CLASS (sym))
26066 {
26067 case C_EXT:
26068 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
26069 break;
26070 case C_STAT:
26071 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
26072 break;
26073 case C_LABEL:
26074 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
26075 break;
26076 default:
26077 /* Do nothing. */
26078 break;
26079 }
26080 }
26081
26082 if (ARM_IS_INTERWORK (sym))
26083 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
26084 }
26085 #endif
26086 #ifdef OBJ_ELF
26087 symbolS * sym;
26088 char bind;
26089
26090 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
26091 {
26092 if (ARM_IS_THUMB (sym))
26093 {
26094 elf_symbol_type * elf_sym;
26095
26096 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
26097 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
26098
26099 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
26100 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
26101 {
26102 /* If it's a .thumb_func, declare it as so,
26103 otherwise tag label as .code 16. */
26104 if (THUMB_IS_FUNC (sym))
26105 ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
26106 ST_BRANCH_TO_THUMB);
26107 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
26108 elf_sym->internal_elf_sym.st_info =
26109 ELF_ST_INFO (bind, STT_ARM_16BIT);
26110 }
26111 }
26112 }
26113
26114 /* Remove any overlapping mapping symbols generated by alignment frags. */
26115 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
26116 /* Now do generic ELF adjustments. */
26117 elf_adjust_symtab ();
26118 #endif
26119 }
26120
26121 /* MD interface: Initialization. */
26122
26123 static void
26124 set_constant_flonums (void)
26125 {
26126 int i;
26127
26128 for (i = 0; i < NUM_FLOAT_VALS; i++)
26129 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
26130 abort ();
26131 }
26132
26133 /* Auto-select Thumb mode if it's the only available instruction set for the
26134 given architecture. */
26135
26136 static void
26137 autoselect_thumb_from_cpu_variant (void)
26138 {
26139 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
26140 opcode_select (16);
26141 }
26142
26143 void
26144 md_begin (void)
26145 {
26146 unsigned mach;
26147 unsigned int i;
26148
26149 if ( (arm_ops_hsh = hash_new ()) == NULL
26150 || (arm_cond_hsh = hash_new ()) == NULL
26151 || (arm_shift_hsh = hash_new ()) == NULL
26152 || (arm_psr_hsh = hash_new ()) == NULL
26153 || (arm_v7m_psr_hsh = hash_new ()) == NULL
26154 || (arm_reg_hsh = hash_new ()) == NULL
26155 || (arm_reloc_hsh = hash_new ()) == NULL
26156 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
26157 as_fatal (_("virtual memory exhausted"));
26158
26159 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
26160 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
26161 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
26162 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
26163 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
26164 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
26165 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
26166 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
26167 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
26168 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
26169 (void *) (v7m_psrs + i));
26170 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
26171 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
26172 for (i = 0;
26173 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
26174 i++)
26175 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
26176 (void *) (barrier_opt_names + i));
26177 #ifdef OBJ_ELF
26178 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
26179 {
26180 struct reloc_entry * entry = reloc_names + i;
26181
26182 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
26183 /* This makes encode_branch() use the EABI versions of this relocation. */
26184 entry->reloc = BFD_RELOC_UNUSED;
26185
26186 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
26187 }
26188 #endif
26189
26190 set_constant_flonums ();
26191
26192 /* Set the cpu variant based on the command-line options. We prefer
26193 -mcpu= over -march= if both are set (as for GCC); and we prefer
26194 -mfpu= over any other way of setting the floating point unit.
26195 Use of legacy options with new options are faulted. */
26196 if (legacy_cpu)
26197 {
26198 if (mcpu_cpu_opt || march_cpu_opt)
26199 as_bad (_("use of old and new-style options to set CPU type"));
26200
26201 selected_arch = *legacy_cpu;
26202 }
26203 else if (mcpu_cpu_opt)
26204 {
26205 selected_arch = *mcpu_cpu_opt;
26206 selected_ext = *mcpu_ext_opt;
26207 }
26208 else if (march_cpu_opt)
26209 {
26210 selected_arch = *march_cpu_opt;
26211 selected_ext = *march_ext_opt;
26212 }
26213 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
26214
26215 if (legacy_fpu)
26216 {
26217 if (mfpu_opt)
26218 as_bad (_("use of old and new-style options to set FPU type"));
26219
26220 selected_fpu = *legacy_fpu;
26221 }
26222 else if (mfpu_opt)
26223 selected_fpu = *mfpu_opt;
26224 else
26225 {
26226 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
26227 || defined (TE_NetBSD) || defined (TE_VXWORKS))
26228 /* Some environments specify a default FPU. If they don't, infer it
26229 from the processor. */
26230 if (mcpu_fpu_opt)
26231 selected_fpu = *mcpu_fpu_opt;
26232 else if (march_fpu_opt)
26233 selected_fpu = *march_fpu_opt;
26234 #else
26235 selected_fpu = fpu_default;
26236 #endif
26237 }
26238
26239 if (ARM_FEATURE_ZERO (selected_fpu))
26240 {
26241 if (!no_cpu_selected ())
26242 selected_fpu = fpu_default;
26243 else
26244 selected_fpu = fpu_arch_fpa;
26245 }
26246
26247 #ifdef CPU_DEFAULT
26248 if (ARM_FEATURE_ZERO (selected_arch))
26249 {
26250 selected_arch = cpu_default;
26251 selected_cpu = selected_arch;
26252 }
26253 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
26254 #else
26255 /* Autodection of feature mode: allow all features in cpu_variant but leave
26256 selected_cpu unset. It will be set in aeabi_set_public_attributes ()
26257 after all instruction have been processed and we can decide what CPU
26258 should be selected. */
26259 if (ARM_FEATURE_ZERO (selected_arch))
26260 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
26261 else
26262 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
26263 #endif
26264
26265 autoselect_thumb_from_cpu_variant ();
26266
26267 arm_arch_used = thumb_arch_used = arm_arch_none;
26268
26269 #if defined OBJ_COFF || defined OBJ_ELF
26270 {
26271 unsigned int flags = 0;
26272
26273 #if defined OBJ_ELF
26274 flags = meabi_flags;
26275
26276 switch (meabi_flags)
26277 {
26278 case EF_ARM_EABI_UNKNOWN:
26279 #endif
26280 /* Set the flags in the private structure. */
26281 if (uses_apcs_26) flags |= F_APCS26;
26282 if (support_interwork) flags |= F_INTERWORK;
26283 if (uses_apcs_float) flags |= F_APCS_FLOAT;
26284 if (pic_code) flags |= F_PIC;
26285 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
26286 flags |= F_SOFT_FLOAT;
26287
26288 switch (mfloat_abi_opt)
26289 {
26290 case ARM_FLOAT_ABI_SOFT:
26291 case ARM_FLOAT_ABI_SOFTFP:
26292 flags |= F_SOFT_FLOAT;
26293 break;
26294
26295 case ARM_FLOAT_ABI_HARD:
26296 if (flags & F_SOFT_FLOAT)
26297 as_bad (_("hard-float conflicts with specified fpu"));
26298 break;
26299 }
26300
26301 /* Using pure-endian doubles (even if soft-float). */
26302 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
26303 flags |= F_VFP_FLOAT;
26304
26305 #if defined OBJ_ELF
26306 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
26307 flags |= EF_ARM_MAVERICK_FLOAT;
26308 break;
26309
26310 case EF_ARM_EABI_VER4:
26311 case EF_ARM_EABI_VER5:
26312 /* No additional flags to set. */
26313 break;
26314
26315 default:
26316 abort ();
26317 }
26318 #endif
26319 bfd_set_private_flags (stdoutput, flags);
26320
26321 /* We have run out flags in the COFF header to encode the
26322 status of ATPCS support, so instead we create a dummy,
26323 empty, debug section called .arm.atpcs. */
26324 if (atpcs)
26325 {
26326 asection * sec;
26327
26328 sec = bfd_make_section (stdoutput, ".arm.atpcs");
26329
26330 if (sec != NULL)
26331 {
26332 bfd_set_section_flags
26333 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
26334 bfd_set_section_size (stdoutput, sec, 0);
26335 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
26336 }
26337 }
26338 }
26339 #endif
26340
26341 /* Record the CPU type as well. */
26342 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
26343 mach = bfd_mach_arm_iWMMXt2;
26344 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
26345 mach = bfd_mach_arm_iWMMXt;
26346 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
26347 mach = bfd_mach_arm_XScale;
26348 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
26349 mach = bfd_mach_arm_ep9312;
26350 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
26351 mach = bfd_mach_arm_5TE;
26352 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
26353 {
26354 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
26355 mach = bfd_mach_arm_5T;
26356 else
26357 mach = bfd_mach_arm_5;
26358 }
26359 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
26360 {
26361 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
26362 mach = bfd_mach_arm_4T;
26363 else
26364 mach = bfd_mach_arm_4;
26365 }
26366 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
26367 mach = bfd_mach_arm_3M;
26368 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
26369 mach = bfd_mach_arm_3;
26370 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
26371 mach = bfd_mach_arm_2a;
26372 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
26373 mach = bfd_mach_arm_2;
26374 else
26375 mach = bfd_mach_arm_unknown;
26376
26377 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
26378 }
26379
26380 /* Command line processing. */
26381
26382 /* md_parse_option
26383 Invocation line includes a switch not recognized by the base assembler.
26384 See if it's a processor-specific option.
26385
26386 This routine is somewhat complicated by the need for backwards
26387 compatibility (since older releases of gcc can't be changed).
26388 The new options try to make the interface as compatible as
26389 possible with GCC.
26390
26391 New options (supported) are:
26392
26393 -mcpu=<cpu name> Assemble for selected processor
26394 -march=<architecture name> Assemble for selected architecture
26395 -mfpu=<fpu architecture> Assemble for selected FPU.
26396 -EB/-mbig-endian Big-endian
26397 -EL/-mlittle-endian Little-endian
26398 -k Generate PIC code
26399 -mthumb Start in Thumb mode
26400 -mthumb-interwork Code supports ARM/Thumb interworking
26401
26402 -m[no-]warn-deprecated Warn about deprecated features
26403 -m[no-]warn-syms Warn when symbols match instructions
26404
26405 For now we will also provide support for:
26406
26407 -mapcs-32 32-bit Program counter
26408 -mapcs-26 26-bit Program counter
26409 -macps-float Floats passed in FP registers
26410 -mapcs-reentrant Reentrant code
26411 -matpcs
26412 (sometime these will probably be replaced with -mapcs=<list of options>
26413 and -matpcs=<list of options>)
26414
26415 The remaining options are only supported for back-wards compatibility.
26416 Cpu variants, the arm part is optional:
26417 -m[arm]1 Currently not supported.
26418 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
26419 -m[arm]3 Arm 3 processor
26420 -m[arm]6[xx], Arm 6 processors
26421 -m[arm]7[xx][t][[d]m] Arm 7 processors
26422 -m[arm]8[10] Arm 8 processors
26423 -m[arm]9[20][tdmi] Arm 9 processors
26424 -mstrongarm[110[0]] StrongARM processors
26425 -mxscale XScale processors
26426 -m[arm]v[2345[t[e]]] Arm architectures
26427 -mall All (except the ARM1)
26428 FP variants:
26429 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
26430 -mfpe-old (No float load/store multiples)
26431 -mvfpxd VFP Single precision
26432 -mvfp All VFP
26433 -mno-fpu Disable all floating point instructions
26434
26435 The following CPU names are recognized:
26436 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
26437 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
26438 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
26439 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
26440 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
26441 arm10t arm10e, arm1020t, arm1020e, arm10200e,
26442 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
26443
26444 */
26445
26446 const char * md_shortopts = "m:k";
26447
26448 #ifdef ARM_BI_ENDIAN
26449 #define OPTION_EB (OPTION_MD_BASE + 0)
26450 #define OPTION_EL (OPTION_MD_BASE + 1)
26451 #else
26452 #if TARGET_BYTES_BIG_ENDIAN
26453 #define OPTION_EB (OPTION_MD_BASE + 0)
26454 #else
26455 #define OPTION_EL (OPTION_MD_BASE + 1)
26456 #endif
26457 #endif
26458 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
26459 #define OPTION_FDPIC (OPTION_MD_BASE + 3)
26460
26461 struct option md_longopts[] =
26462 {
26463 #ifdef OPTION_EB
26464 {"EB", no_argument, NULL, OPTION_EB},
26465 #endif
26466 #ifdef OPTION_EL
26467 {"EL", no_argument, NULL, OPTION_EL},
26468 #endif
26469 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
26470 #ifdef OBJ_ELF
26471 {"fdpic", no_argument, NULL, OPTION_FDPIC},
26472 #endif
26473 {NULL, no_argument, NULL, 0}
26474 };
26475
26476 size_t md_longopts_size = sizeof (md_longopts);
26477
26478 struct arm_option_table
26479 {
26480 const char * option; /* Option name to match. */
26481 const char * help; /* Help information. */
26482 int * var; /* Variable to change. */
26483 int value; /* What to change it to. */
26484 const char * deprecated; /* If non-null, print this message. */
26485 };
26486
26487 struct arm_option_table arm_opts[] =
26488 {
26489 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
26490 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
26491 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
26492 &support_interwork, 1, NULL},
26493 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
26494 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
26495 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
26496 1, NULL},
26497 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
26498 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
26499 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
26500 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
26501 NULL},
26502
26503 /* These are recognized by the assembler, but have no affect on code. */
26504 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
26505 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
26506
26507 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
26508 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
26509 &warn_on_deprecated, 0, NULL},
26510 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
26511 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
26512 {NULL, NULL, NULL, 0, NULL}
26513 };
26514
26515 struct arm_legacy_option_table
26516 {
26517 const char * option; /* Option name to match. */
26518 const arm_feature_set ** var; /* Variable to change. */
26519 const arm_feature_set value; /* What to change it to. */
26520 const char * deprecated; /* If non-null, print this message. */
26521 };
26522
26523 const struct arm_legacy_option_table arm_legacy_opts[] =
26524 {
26525 /* DON'T add any new processors to this list -- we want the whole list
26526 to go away... Add them to the processors table instead. */
26527 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
26528 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
26529 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
26530 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
26531 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
26532 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
26533 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
26534 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
26535 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
26536 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
26537 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
26538 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
26539 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
26540 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
26541 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
26542 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
26543 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
26544 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
26545 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
26546 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
26547 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
26548 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
26549 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
26550 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
26551 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
26552 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
26553 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
26554 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
26555 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
26556 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
26557 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
26558 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
26559 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
26560 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
26561 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
26562 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
26563 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
26564 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
26565 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
26566 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
26567 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
26568 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
26569 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
26570 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
26571 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
26572 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
26573 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
26574 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
26575 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
26576 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
26577 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
26578 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
26579 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
26580 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
26581 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
26582 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
26583 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
26584 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
26585 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
26586 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
26587 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
26588 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
26589 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
26590 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
26591 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
26592 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
26593 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
26594 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
26595 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
26596 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
26597 N_("use -mcpu=strongarm110")},
26598 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
26599 N_("use -mcpu=strongarm1100")},
26600 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
26601 N_("use -mcpu=strongarm1110")},
26602 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
26603 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
26604 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
26605
26606 /* Architecture variants -- don't add any more to this list either. */
26607 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
26608 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
26609 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
26610 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
26611 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
26612 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
26613 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
26614 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
26615 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
26616 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
26617 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
26618 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
26619 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
26620 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
26621 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
26622 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
26623 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
26624 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
26625
26626 /* Floating point variants -- don't add any more to this list either. */
26627 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
26628 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
26629 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
26630 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
26631 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
26632
26633 {NULL, NULL, ARM_ARCH_NONE, NULL}
26634 };
26635
26636 struct arm_cpu_option_table
26637 {
26638 const char * name;
26639 size_t name_len;
26640 const arm_feature_set value;
26641 const arm_feature_set ext;
26642 /* For some CPUs we assume an FPU unless the user explicitly sets
26643 -mfpu=... */
26644 const arm_feature_set default_fpu;
26645 /* The canonical name of the CPU, or NULL to use NAME converted to upper
26646 case. */
26647 const char * canonical_name;
26648 };
26649
26650 /* This list should, at a minimum, contain all the cpu names
26651 recognized by GCC. */
26652 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
26653
26654 static const struct arm_cpu_option_table arm_cpus[] =
26655 {
26656 ARM_CPU_OPT ("all", NULL, ARM_ANY,
26657 ARM_ARCH_NONE,
26658 FPU_ARCH_FPA),
26659 ARM_CPU_OPT ("arm1", NULL, ARM_ARCH_V1,
26660 ARM_ARCH_NONE,
26661 FPU_ARCH_FPA),
26662 ARM_CPU_OPT ("arm2", NULL, ARM_ARCH_V2,
26663 ARM_ARCH_NONE,
26664 FPU_ARCH_FPA),
26665 ARM_CPU_OPT ("arm250", NULL, ARM_ARCH_V2S,
26666 ARM_ARCH_NONE,
26667 FPU_ARCH_FPA),
26668 ARM_CPU_OPT ("arm3", NULL, ARM_ARCH_V2S,
26669 ARM_ARCH_NONE,
26670 FPU_ARCH_FPA),
26671 ARM_CPU_OPT ("arm6", NULL, ARM_ARCH_V3,
26672 ARM_ARCH_NONE,
26673 FPU_ARCH_FPA),
26674 ARM_CPU_OPT ("arm60", NULL, ARM_ARCH_V3,
26675 ARM_ARCH_NONE,
26676 FPU_ARCH_FPA),
26677 ARM_CPU_OPT ("arm600", NULL, ARM_ARCH_V3,
26678 ARM_ARCH_NONE,
26679 FPU_ARCH_FPA),
26680 ARM_CPU_OPT ("arm610", NULL, ARM_ARCH_V3,
26681 ARM_ARCH_NONE,
26682 FPU_ARCH_FPA),
26683 ARM_CPU_OPT ("arm620", NULL, ARM_ARCH_V3,
26684 ARM_ARCH_NONE,
26685 FPU_ARCH_FPA),
26686 ARM_CPU_OPT ("arm7", NULL, ARM_ARCH_V3,
26687 ARM_ARCH_NONE,
26688 FPU_ARCH_FPA),
26689 ARM_CPU_OPT ("arm7m", NULL, ARM_ARCH_V3M,
26690 ARM_ARCH_NONE,
26691 FPU_ARCH_FPA),
26692 ARM_CPU_OPT ("arm7d", NULL, ARM_ARCH_V3,
26693 ARM_ARCH_NONE,
26694 FPU_ARCH_FPA),
26695 ARM_CPU_OPT ("arm7dm", NULL, ARM_ARCH_V3M,
26696 ARM_ARCH_NONE,
26697 FPU_ARCH_FPA),
26698 ARM_CPU_OPT ("arm7di", NULL, ARM_ARCH_V3,
26699 ARM_ARCH_NONE,
26700 FPU_ARCH_FPA),
26701 ARM_CPU_OPT ("arm7dmi", NULL, ARM_ARCH_V3M,
26702 ARM_ARCH_NONE,
26703 FPU_ARCH_FPA),
26704 ARM_CPU_OPT ("arm70", NULL, ARM_ARCH_V3,
26705 ARM_ARCH_NONE,
26706 FPU_ARCH_FPA),
26707 ARM_CPU_OPT ("arm700", NULL, ARM_ARCH_V3,
26708 ARM_ARCH_NONE,
26709 FPU_ARCH_FPA),
26710 ARM_CPU_OPT ("arm700i", NULL, ARM_ARCH_V3,
26711 ARM_ARCH_NONE,
26712 FPU_ARCH_FPA),
26713 ARM_CPU_OPT ("arm710", NULL, ARM_ARCH_V3,
26714 ARM_ARCH_NONE,
26715 FPU_ARCH_FPA),
26716 ARM_CPU_OPT ("arm710t", NULL, ARM_ARCH_V4T,
26717 ARM_ARCH_NONE,
26718 FPU_ARCH_FPA),
26719 ARM_CPU_OPT ("arm720", NULL, ARM_ARCH_V3,
26720 ARM_ARCH_NONE,
26721 FPU_ARCH_FPA),
26722 ARM_CPU_OPT ("arm720t", NULL, ARM_ARCH_V4T,
26723 ARM_ARCH_NONE,
26724 FPU_ARCH_FPA),
26725 ARM_CPU_OPT ("arm740t", NULL, ARM_ARCH_V4T,
26726 ARM_ARCH_NONE,
26727 FPU_ARCH_FPA),
26728 ARM_CPU_OPT ("arm710c", NULL, ARM_ARCH_V3,
26729 ARM_ARCH_NONE,
26730 FPU_ARCH_FPA),
26731 ARM_CPU_OPT ("arm7100", NULL, ARM_ARCH_V3,
26732 ARM_ARCH_NONE,
26733 FPU_ARCH_FPA),
26734 ARM_CPU_OPT ("arm7500", NULL, ARM_ARCH_V3,
26735 ARM_ARCH_NONE,
26736 FPU_ARCH_FPA),
26737 ARM_CPU_OPT ("arm7500fe", NULL, ARM_ARCH_V3,
26738 ARM_ARCH_NONE,
26739 FPU_ARCH_FPA),
26740 ARM_CPU_OPT ("arm7t", NULL, ARM_ARCH_V4T,
26741 ARM_ARCH_NONE,
26742 FPU_ARCH_FPA),
26743 ARM_CPU_OPT ("arm7tdmi", NULL, ARM_ARCH_V4T,
26744 ARM_ARCH_NONE,
26745 FPU_ARCH_FPA),
26746 ARM_CPU_OPT ("arm7tdmi-s", NULL, ARM_ARCH_V4T,
26747 ARM_ARCH_NONE,
26748 FPU_ARCH_FPA),
26749 ARM_CPU_OPT ("arm8", NULL, ARM_ARCH_V4,
26750 ARM_ARCH_NONE,
26751 FPU_ARCH_FPA),
26752 ARM_CPU_OPT ("arm810", NULL, ARM_ARCH_V4,
26753 ARM_ARCH_NONE,
26754 FPU_ARCH_FPA),
26755 ARM_CPU_OPT ("strongarm", NULL, ARM_ARCH_V4,
26756 ARM_ARCH_NONE,
26757 FPU_ARCH_FPA),
26758 ARM_CPU_OPT ("strongarm1", NULL, ARM_ARCH_V4,
26759 ARM_ARCH_NONE,
26760 FPU_ARCH_FPA),
26761 ARM_CPU_OPT ("strongarm110", NULL, ARM_ARCH_V4,
26762 ARM_ARCH_NONE,
26763 FPU_ARCH_FPA),
26764 ARM_CPU_OPT ("strongarm1100", NULL, ARM_ARCH_V4,
26765 ARM_ARCH_NONE,
26766 FPU_ARCH_FPA),
26767 ARM_CPU_OPT ("strongarm1110", NULL, ARM_ARCH_V4,
26768 ARM_ARCH_NONE,
26769 FPU_ARCH_FPA),
26770 ARM_CPU_OPT ("arm9", NULL, ARM_ARCH_V4T,
26771 ARM_ARCH_NONE,
26772 FPU_ARCH_FPA),
26773 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T,
26774 ARM_ARCH_NONE,
26775 FPU_ARCH_FPA),
26776 ARM_CPU_OPT ("arm920t", NULL, ARM_ARCH_V4T,
26777 ARM_ARCH_NONE,
26778 FPU_ARCH_FPA),
26779 ARM_CPU_OPT ("arm922t", NULL, ARM_ARCH_V4T,
26780 ARM_ARCH_NONE,
26781 FPU_ARCH_FPA),
26782 ARM_CPU_OPT ("arm940t", NULL, ARM_ARCH_V4T,
26783 ARM_ARCH_NONE,
26784 FPU_ARCH_FPA),
26785 ARM_CPU_OPT ("arm9tdmi", NULL, ARM_ARCH_V4T,
26786 ARM_ARCH_NONE,
26787 FPU_ARCH_FPA),
26788 ARM_CPU_OPT ("fa526", NULL, ARM_ARCH_V4,
26789 ARM_ARCH_NONE,
26790 FPU_ARCH_FPA),
26791 ARM_CPU_OPT ("fa626", NULL, ARM_ARCH_V4,
26792 ARM_ARCH_NONE,
26793 FPU_ARCH_FPA),
26794
26795 /* For V5 or later processors we default to using VFP; but the user
26796 should really set the FPU type explicitly. */
26797 ARM_CPU_OPT ("arm9e-r0", NULL, ARM_ARCH_V5TExP,
26798 ARM_ARCH_NONE,
26799 FPU_ARCH_VFP_V2),
26800 ARM_CPU_OPT ("arm9e", NULL, ARM_ARCH_V5TE,
26801 ARM_ARCH_NONE,
26802 FPU_ARCH_VFP_V2),
26803 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ,
26804 ARM_ARCH_NONE,
26805 FPU_ARCH_VFP_V2),
26806 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ,
26807 ARM_ARCH_NONE,
26808 FPU_ARCH_VFP_V2),
26809 ARM_CPU_OPT ("arm926ej-s", NULL, ARM_ARCH_V5TEJ,
26810 ARM_ARCH_NONE,
26811 FPU_ARCH_VFP_V2),
26812 ARM_CPU_OPT ("arm946e-r0", NULL, ARM_ARCH_V5TExP,
26813 ARM_ARCH_NONE,
26814 FPU_ARCH_VFP_V2),
26815 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE,
26816 ARM_ARCH_NONE,
26817 FPU_ARCH_VFP_V2),
26818 ARM_CPU_OPT ("arm946e-s", NULL, ARM_ARCH_V5TE,
26819 ARM_ARCH_NONE,
26820 FPU_ARCH_VFP_V2),
26821 ARM_CPU_OPT ("arm966e-r0", NULL, ARM_ARCH_V5TExP,
26822 ARM_ARCH_NONE,
26823 FPU_ARCH_VFP_V2),
26824 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE,
26825 ARM_ARCH_NONE,
26826 FPU_ARCH_VFP_V2),
26827 ARM_CPU_OPT ("arm966e-s", NULL, ARM_ARCH_V5TE,
26828 ARM_ARCH_NONE,
26829 FPU_ARCH_VFP_V2),
26830 ARM_CPU_OPT ("arm968e-s", NULL, ARM_ARCH_V5TE,
26831 ARM_ARCH_NONE,
26832 FPU_ARCH_VFP_V2),
26833 ARM_CPU_OPT ("arm10t", NULL, ARM_ARCH_V5T,
26834 ARM_ARCH_NONE,
26835 FPU_ARCH_VFP_V1),
26836 ARM_CPU_OPT ("arm10tdmi", NULL, ARM_ARCH_V5T,
26837 ARM_ARCH_NONE,
26838 FPU_ARCH_VFP_V1),
26839 ARM_CPU_OPT ("arm10e", NULL, ARM_ARCH_V5TE,
26840 ARM_ARCH_NONE,
26841 FPU_ARCH_VFP_V2),
26842 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE,
26843 ARM_ARCH_NONE,
26844 FPU_ARCH_VFP_V2),
26845 ARM_CPU_OPT ("arm1020t", NULL, ARM_ARCH_V5T,
26846 ARM_ARCH_NONE,
26847 FPU_ARCH_VFP_V1),
26848 ARM_CPU_OPT ("arm1020e", NULL, ARM_ARCH_V5TE,
26849 ARM_ARCH_NONE,
26850 FPU_ARCH_VFP_V2),
26851 ARM_CPU_OPT ("arm1022e", NULL, ARM_ARCH_V5TE,
26852 ARM_ARCH_NONE,
26853 FPU_ARCH_VFP_V2),
26854 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ,
26855 ARM_ARCH_NONE,
26856 FPU_ARCH_VFP_V2),
26857 ARM_CPU_OPT ("arm1026ej-s", NULL, ARM_ARCH_V5TEJ,
26858 ARM_ARCH_NONE,
26859 FPU_ARCH_VFP_V2),
26860 ARM_CPU_OPT ("fa606te", NULL, ARM_ARCH_V5TE,
26861 ARM_ARCH_NONE,
26862 FPU_ARCH_VFP_V2),
26863 ARM_CPU_OPT ("fa616te", NULL, ARM_ARCH_V5TE,
26864 ARM_ARCH_NONE,
26865 FPU_ARCH_VFP_V2),
26866 ARM_CPU_OPT ("fa626te", NULL, ARM_ARCH_V5TE,
26867 ARM_ARCH_NONE,
26868 FPU_ARCH_VFP_V2),
26869 ARM_CPU_OPT ("fmp626", NULL, ARM_ARCH_V5TE,
26870 ARM_ARCH_NONE,
26871 FPU_ARCH_VFP_V2),
26872 ARM_CPU_OPT ("fa726te", NULL, ARM_ARCH_V5TE,
26873 ARM_ARCH_NONE,
26874 FPU_ARCH_VFP_V2),
26875 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6,
26876 ARM_ARCH_NONE,
26877 FPU_NONE),
26878 ARM_CPU_OPT ("arm1136j-s", NULL, ARM_ARCH_V6,
26879 ARM_ARCH_NONE,
26880 FPU_NONE),
26881 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6,
26882 ARM_ARCH_NONE,
26883 FPU_ARCH_VFP_V2),
26884 ARM_CPU_OPT ("arm1136jf-s", NULL, ARM_ARCH_V6,
26885 ARM_ARCH_NONE,
26886 FPU_ARCH_VFP_V2),
26887 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K,
26888 ARM_ARCH_NONE,
26889 FPU_ARCH_VFP_V2),
26890 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K,
26891 ARM_ARCH_NONE,
26892 FPU_NONE),
26893 ARM_CPU_OPT ("arm1156t2-s", NULL, ARM_ARCH_V6T2,
26894 ARM_ARCH_NONE,
26895 FPU_NONE),
26896 ARM_CPU_OPT ("arm1156t2f-s", NULL, ARM_ARCH_V6T2,
26897 ARM_ARCH_NONE,
26898 FPU_ARCH_VFP_V2),
26899 ARM_CPU_OPT ("arm1176jz-s", NULL, ARM_ARCH_V6KZ,
26900 ARM_ARCH_NONE,
26901 FPU_NONE),
26902 ARM_CPU_OPT ("arm1176jzf-s", NULL, ARM_ARCH_V6KZ,
26903 ARM_ARCH_NONE,
26904 FPU_ARCH_VFP_V2),
26905 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A,
26906 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26907 FPU_NONE),
26908 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE,
26909 ARM_ARCH_NONE,
26910 FPU_ARCH_NEON_VFP_V4),
26911 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A,
26912 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
26913 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
26914 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A,
26915 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26916 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
26917 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE,
26918 ARM_ARCH_NONE,
26919 FPU_ARCH_NEON_VFP_V4),
26920 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE,
26921 ARM_ARCH_NONE,
26922 FPU_ARCH_NEON_VFP_V4),
26923 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE,
26924 ARM_ARCH_NONE,
26925 FPU_ARCH_NEON_VFP_V4),
26926 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A,
26927 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26928 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26929 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A,
26930 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26931 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26932 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A,
26933 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26934 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26935 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A,
26936 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26937 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26938 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A,
26939 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26940 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26941 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A,
26942 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26943 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26944 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A,
26945 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26946 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26947 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A,
26948 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26949 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26950 ARM_CPU_OPT ("cortex-a76", "Cortex-A76", ARM_ARCH_V8_2A,
26951 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26952 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26953 ARM_CPU_OPT ("ares", "Ares", ARM_ARCH_V8_2A,
26954 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26955 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26956 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R,
26957 ARM_ARCH_NONE,
26958 FPU_NONE),
26959 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R,
26960 ARM_ARCH_NONE,
26961 FPU_ARCH_VFP_V3D16),
26962 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R,
26963 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
26964 FPU_NONE),
26965 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R,
26966 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
26967 FPU_ARCH_VFP_V3D16),
26968 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R,
26969 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
26970 FPU_ARCH_VFP_V3D16),
26971 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R,
26972 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26973 FPU_ARCH_NEON_VFP_ARMV8),
26974 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN,
26975 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
26976 FPU_NONE),
26977 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE,
26978 ARM_ARCH_NONE,
26979 FPU_NONE),
26980 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM,
26981 ARM_ARCH_NONE,
26982 FPU_NONE),
26983 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM,
26984 ARM_ARCH_NONE,
26985 FPU_NONE),
26986 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M,
26987 ARM_ARCH_NONE,
26988 FPU_NONE),
26989 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM,
26990 ARM_ARCH_NONE,
26991 FPU_NONE),
26992 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM,
26993 ARM_ARCH_NONE,
26994 FPU_NONE),
26995 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM,
26996 ARM_ARCH_NONE,
26997 FPU_NONE),
26998 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A,
26999 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
27000 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
27001 ARM_CPU_OPT ("neoverse-n1", "Neoverse N1", ARM_ARCH_V8_2A,
27002 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
27003 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
27004 /* ??? XSCALE is really an architecture. */
27005 ARM_CPU_OPT ("xscale", NULL, ARM_ARCH_XSCALE,
27006 ARM_ARCH_NONE,
27007 FPU_ARCH_VFP_V2),
27008
27009 /* ??? iwmmxt is not a processor. */
27010 ARM_CPU_OPT ("iwmmxt", NULL, ARM_ARCH_IWMMXT,
27011 ARM_ARCH_NONE,
27012 FPU_ARCH_VFP_V2),
27013 ARM_CPU_OPT ("iwmmxt2", NULL, ARM_ARCH_IWMMXT2,
27014 ARM_ARCH_NONE,
27015 FPU_ARCH_VFP_V2),
27016 ARM_CPU_OPT ("i80200", NULL, ARM_ARCH_XSCALE,
27017 ARM_ARCH_NONE,
27018 FPU_ARCH_VFP_V2),
27019
27020 /* Maverick. */
27021 ARM_CPU_OPT ("ep9312", "ARM920T",
27022 ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
27023 ARM_ARCH_NONE, FPU_ARCH_MAVERICK),
27024
27025 /* Marvell processors. */
27026 ARM_CPU_OPT ("marvell-pj4", NULL, ARM_ARCH_V7A,
27027 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
27028 FPU_ARCH_VFP_V3D16),
27029 ARM_CPU_OPT ("marvell-whitney", NULL, ARM_ARCH_V7A,
27030 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
27031 FPU_ARCH_NEON_VFP_V4),
27032
27033 /* APM X-Gene family. */
27034 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A,
27035 ARM_ARCH_NONE,
27036 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
27037 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A,
27038 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
27039 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
27040
27041 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
27042 };
27043 #undef ARM_CPU_OPT
27044
27045 struct arm_ext_table
27046 {
27047 const char * name;
27048 size_t name_len;
27049 const arm_feature_set merge;
27050 const arm_feature_set clear;
27051 };
27052
27053 struct arm_arch_option_table
27054 {
27055 const char * name;
27056 size_t name_len;
27057 const arm_feature_set value;
27058 const arm_feature_set default_fpu;
27059 const struct arm_ext_table * ext_table;
27060 };
27061
27062 /* Used to add support for +E and +noE extension. */
27063 #define ARM_EXT(E, M, C) { E, sizeof (E) - 1, M, C }
27064 /* Used to add support for a +E extension. */
27065 #define ARM_ADD(E, M) { E, sizeof(E) - 1, M, ARM_ARCH_NONE }
27066 /* Used to add support for a +noE extension. */
27067 #define ARM_REMOVE(E, C) { E, sizeof(E) -1, ARM_ARCH_NONE, C }
27068
27069 #define ALL_FP ARM_FEATURE (0, ARM_EXT2_FP16_INST | ARM_EXT2_FP16_FML, \
27070 ~0 & ~FPU_ENDIAN_PURE)
27071
27072 static const struct arm_ext_table armv5te_ext_table[] =
27073 {
27074 ARM_EXT ("fp", FPU_ARCH_VFP_V2, ALL_FP),
27075 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27076 };
27077
27078 static const struct arm_ext_table armv7_ext_table[] =
27079 {
27080 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
27081 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27082 };
27083
27084 static const struct arm_ext_table armv7ve_ext_table[] =
27085 {
27086 ARM_EXT ("fp", FPU_ARCH_VFP_V4D16, ALL_FP),
27087 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16),
27088 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
27089 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
27090 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
27091 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16), /* Alias for +fp. */
27092 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
27093
27094 ARM_EXT ("simd", FPU_ARCH_NEON_VFP_V4,
27095 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
27096
27097 /* Aliases for +simd. */
27098 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
27099
27100 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
27101 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
27102 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
27103
27104 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27105 };
27106
27107 static const struct arm_ext_table armv7a_ext_table[] =
27108 {
27109 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
27110 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp. */
27111 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
27112 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
27113 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
27114 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16),
27115 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
27116
27117 ARM_EXT ("simd", FPU_ARCH_VFP_V3_PLUS_NEON_V1,
27118 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
27119
27120 /* Aliases for +simd. */
27121 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
27122 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
27123
27124 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
27125 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
27126
27127 ARM_ADD ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP)),
27128 ARM_ADD ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC)),
27129 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27130 };
27131
27132 static const struct arm_ext_table armv7r_ext_table[] =
27133 {
27134 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V3xD),
27135 ARM_ADD ("vfpv3xd", FPU_ARCH_VFP_V3xD), /* Alias for +fp.sp. */
27136 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
27137 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp. */
27138 ARM_ADD ("vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16),
27139 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
27140 ARM_EXT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
27141 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV)),
27142 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27143 };
27144
27145 static const struct arm_ext_table armv7em_ext_table[] =
27146 {
27147 ARM_EXT ("fp", FPU_ARCH_VFP_V4_SP_D16, ALL_FP),
27148 /* Alias for +fp, used to be known as fpv4-sp-d16. */
27149 ARM_ADD ("vfpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16),
27150 ARM_ADD ("fpv5", FPU_ARCH_VFP_V5_SP_D16),
27151 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
27152 ARM_ADD ("fpv5-d16", FPU_ARCH_VFP_V5D16),
27153 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27154 };
27155
27156 static const struct arm_ext_table armv8a_ext_table[] =
27157 {
27158 ARM_ADD ("crc", ARCH_CRC_ARMV8),
27159 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
27160 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
27161 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
27162
27163 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27164 should use the +simd option to turn on FP. */
27165 ARM_REMOVE ("fp", ALL_FP),
27166 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
27167 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
27168 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27169 };
27170
27171
27172 static const struct arm_ext_table armv81a_ext_table[] =
27173 {
27174 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
27175 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
27176 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
27177
27178 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27179 should use the +simd option to turn on FP. */
27180 ARM_REMOVE ("fp", ALL_FP),
27181 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
27182 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
27183 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27184 };
27185
27186 static const struct arm_ext_table armv82a_ext_table[] =
27187 {
27188 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
27189 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16),
27190 ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML),
27191 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
27192 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
27193 ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
27194
27195 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27196 should use the +simd option to turn on FP. */
27197 ARM_REMOVE ("fp", ALL_FP),
27198 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
27199 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
27200 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27201 };
27202
27203 static const struct arm_ext_table armv84a_ext_table[] =
27204 {
27205 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
27206 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
27207 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
27208 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
27209
27210 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27211 should use the +simd option to turn on FP. */
27212 ARM_REMOVE ("fp", ALL_FP),
27213 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
27214 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
27215 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27216 };
27217
27218 static const struct arm_ext_table armv85a_ext_table[] =
27219 {
27220 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
27221 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
27222 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
27223 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
27224
27225 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27226 should use the +simd option to turn on FP. */
27227 ARM_REMOVE ("fp", ALL_FP),
27228 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27229 };
27230
27231 static const struct arm_ext_table armv8m_main_ext_table[] =
27232 {
27233 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
27234 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP)),
27235 ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16, ALL_FP),
27236 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
27237 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27238 };
27239
27240 static const struct arm_ext_table armv8_1m_main_ext_table[] =
27241 {
27242 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
27243 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP)),
27244 ARM_EXT ("fp",
27245 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
27246 FPU_VFP_V5_SP_D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA),
27247 ALL_FP),
27248 ARM_ADD ("fp.dp",
27249 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
27250 FPU_VFP_V5D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
27251 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27252 };
27253
27254 static const struct arm_ext_table armv8r_ext_table[] =
27255 {
27256 ARM_ADD ("crc", ARCH_CRC_ARMV8),
27257 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
27258 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
27259 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
27260 ARM_REMOVE ("fp", ALL_FP),
27261 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V5_SP_D16),
27262 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27263 };
27264
27265 /* This list should, at a minimum, contain all the architecture names
27266 recognized by GCC. */
27267 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF, NULL }
27268 #define ARM_ARCH_OPT2(N, V, DF, ext) \
27269 { N, sizeof (N) - 1, V, DF, ext##_ext_table }
27270
27271 static const struct arm_arch_option_table arm_archs[] =
27272 {
27273 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
27274 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
27275 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
27276 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
27277 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
27278 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
27279 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
27280 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
27281 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
27282 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
27283 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
27284 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
27285 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
27286 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
27287 ARM_ARCH_OPT2 ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP, armv5te),
27288 ARM_ARCH_OPT2 ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP, armv5te),
27289 ARM_ARCH_OPT2 ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP, armv5te),
27290 ARM_ARCH_OPT2 ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP, armv5te),
27291 ARM_ARCH_OPT2 ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP, armv5te),
27292 ARM_ARCH_OPT2 ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP, armv5te),
27293 ARM_ARCH_OPT2 ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP, armv5te),
27294 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
27295 kept to preserve existing behaviour. */
27296 ARM_ARCH_OPT2 ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP, armv5te),
27297 ARM_ARCH_OPT2 ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP, armv5te),
27298 ARM_ARCH_OPT2 ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP, armv5te),
27299 ARM_ARCH_OPT2 ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP, armv5te),
27300 ARM_ARCH_OPT2 ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP, armv5te),
27301 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
27302 kept to preserve existing behaviour. */
27303 ARM_ARCH_OPT2 ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP, armv5te),
27304 ARM_ARCH_OPT2 ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP, armv5te),
27305 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
27306 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
27307 ARM_ARCH_OPT2 ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP, armv7),
27308 /* The official spelling of the ARMv7 profile variants is the dashed form.
27309 Accept the non-dashed form for compatibility with old toolchains. */
27310 ARM_ARCH_OPT2 ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP, armv7a),
27311 ARM_ARCH_OPT2 ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP, armv7ve),
27312 ARM_ARCH_OPT2 ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP, armv7r),
27313 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
27314 ARM_ARCH_OPT2 ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP, armv7a),
27315 ARM_ARCH_OPT2 ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP, armv7r),
27316 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
27317 ARM_ARCH_OPT2 ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP, armv7em),
27318 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
27319 ARM_ARCH_OPT2 ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP,
27320 armv8m_main),
27321 ARM_ARCH_OPT2 ("armv8.1-m.main", ARM_ARCH_V8_1M_MAIN, FPU_ARCH_VFP,
27322 armv8_1m_main),
27323 ARM_ARCH_OPT2 ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP, armv8a),
27324 ARM_ARCH_OPT2 ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP, armv81a),
27325 ARM_ARCH_OPT2 ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP, armv82a),
27326 ARM_ARCH_OPT2 ("armv8.3-a", ARM_ARCH_V8_3A, FPU_ARCH_VFP, armv82a),
27327 ARM_ARCH_OPT2 ("armv8-r", ARM_ARCH_V8R, FPU_ARCH_VFP, armv8r),
27328 ARM_ARCH_OPT2 ("armv8.4-a", ARM_ARCH_V8_4A, FPU_ARCH_VFP, armv84a),
27329 ARM_ARCH_OPT2 ("armv8.5-a", ARM_ARCH_V8_5A, FPU_ARCH_VFP, armv85a),
27330 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
27331 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
27332 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2, FPU_ARCH_VFP),
27333 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
27334 };
27335 #undef ARM_ARCH_OPT
27336
27337 /* ISA extensions in the co-processor and main instruction set space. */
27338
27339 struct arm_option_extension_value_table
27340 {
27341 const char * name;
27342 size_t name_len;
27343 const arm_feature_set merge_value;
27344 const arm_feature_set clear_value;
27345 /* List of architectures for which an extension is available. ARM_ARCH_NONE
27346 indicates that an extension is available for all architectures while
27347 ARM_ANY marks an empty entry. */
27348 const arm_feature_set allowed_archs[2];
27349 };
27350
27351 /* The following table must be in alphabetical order with a NULL last entry. */
27352
27353 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
27354 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
27355
27356 /* DEPRECATED: Refrain from using this table to add any new extensions, instead
27357 use the context sensitive approach using arm_ext_table's. */
27358 static const struct arm_option_extension_value_table arm_extensions[] =
27359 {
27360 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
27361 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
27362 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
27363 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
27364 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
27365 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8,
27366 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD),
27367 ARM_ARCH_V8_2A),
27368 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
27369 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
27370 ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
27371 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
27372 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
27373 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
27374 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
27375 ARM_ARCH_V8_2A),
27376 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
27377 | ARM_EXT2_FP16_FML),
27378 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
27379 | ARM_EXT2_FP16_FML),
27380 ARM_ARCH_V8_2A),
27381 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
27382 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
27383 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
27384 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
27385 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
27386 Thumb divide instruction. Due to this having the same name as the
27387 previous entry, this will be ignored when doing command-line parsing and
27388 only considered by build attribute selection code. */
27389 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
27390 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
27391 ARM_FEATURE_CORE_LOW (ARM_EXT_V7)),
27392 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
27393 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
27394 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
27395 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
27396 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
27397 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
27398 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
27399 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
27400 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
27401 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
27402 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
27403 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
27404 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
27405 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
27406 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
27407 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
27408 ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
27409 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
27410 ARM_ARCH_V8A),
27411 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
27412 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
27413 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
27414 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1,
27415 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
27416 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
27417 ARM_EXT_OPT ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
27418 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
27419 ARM_ARCH_V8A),
27420 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
27421 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
27422 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
27423 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
27424 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
27425 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
27426 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
27427 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
27428 | ARM_EXT_DIV),
27429 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
27430 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
27431 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
27432 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
27433 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
27434 };
27435 #undef ARM_EXT_OPT
27436
27437 /* ISA floating-point and Advanced SIMD extensions. */
27438 struct arm_option_fpu_value_table
27439 {
27440 const char * name;
27441 const arm_feature_set value;
27442 };
27443
27444 /* This list should, at a minimum, contain all the fpu names
27445 recognized by GCC. */
27446 static const struct arm_option_fpu_value_table arm_fpus[] =
27447 {
27448 {"softfpa", FPU_NONE},
27449 {"fpe", FPU_ARCH_FPE},
27450 {"fpe2", FPU_ARCH_FPE},
27451 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
27452 {"fpa", FPU_ARCH_FPA},
27453 {"fpa10", FPU_ARCH_FPA},
27454 {"fpa11", FPU_ARCH_FPA},
27455 {"arm7500fe", FPU_ARCH_FPA},
27456 {"softvfp", FPU_ARCH_VFP},
27457 {"softvfp+vfp", FPU_ARCH_VFP_V2},
27458 {"vfp", FPU_ARCH_VFP_V2},
27459 {"vfp9", FPU_ARCH_VFP_V2},
27460 {"vfp3", FPU_ARCH_VFP_V3}, /* Undocumented, use vfpv3. */
27461 {"vfp10", FPU_ARCH_VFP_V2},
27462 {"vfp10-r0", FPU_ARCH_VFP_V1},
27463 {"vfpxd", FPU_ARCH_VFP_V1xD},
27464 {"vfpv2", FPU_ARCH_VFP_V2},
27465 {"vfpv3", FPU_ARCH_VFP_V3},
27466 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
27467 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
27468 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
27469 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
27470 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
27471 {"arm1020t", FPU_ARCH_VFP_V1},
27472 {"arm1020e", FPU_ARCH_VFP_V2},
27473 {"arm1136jfs", FPU_ARCH_VFP_V2}, /* Undocumented, use arm1136jf-s. */
27474 {"arm1136jf-s", FPU_ARCH_VFP_V2},
27475 {"maverick", FPU_ARCH_MAVERICK},
27476 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
27477 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
27478 {"neon-fp16", FPU_ARCH_NEON_FP16},
27479 {"vfpv4", FPU_ARCH_VFP_V4},
27480 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
27481 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
27482 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
27483 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
27484 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
27485 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
27486 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
27487 {"crypto-neon-fp-armv8",
27488 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
27489 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
27490 {"crypto-neon-fp-armv8.1",
27491 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
27492 {NULL, ARM_ARCH_NONE}
27493 };
27494
27495 struct arm_option_value_table
27496 {
27497 const char *name;
27498 long value;
27499 };
27500
27501 static const struct arm_option_value_table arm_float_abis[] =
27502 {
27503 {"hard", ARM_FLOAT_ABI_HARD},
27504 {"softfp", ARM_FLOAT_ABI_SOFTFP},
27505 {"soft", ARM_FLOAT_ABI_SOFT},
27506 {NULL, 0}
27507 };
27508
27509 #ifdef OBJ_ELF
27510 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
27511 static const struct arm_option_value_table arm_eabis[] =
27512 {
27513 {"gnu", EF_ARM_EABI_UNKNOWN},
27514 {"4", EF_ARM_EABI_VER4},
27515 {"5", EF_ARM_EABI_VER5},
27516 {NULL, 0}
27517 };
27518 #endif
27519
27520 struct arm_long_option_table
27521 {
27522 const char * option; /* Substring to match. */
27523 const char * help; /* Help information. */
27524 int (* func) (const char * subopt); /* Function to decode sub-option. */
27525 const char * deprecated; /* If non-null, print this message. */
27526 };
27527
27528 static bfd_boolean
27529 arm_parse_extension (const char *str, const arm_feature_set *opt_set,
27530 arm_feature_set *ext_set,
27531 const struct arm_ext_table *ext_table)
27532 {
27533 /* We insist on extensions being specified in alphabetical order, and with
27534 extensions being added before being removed. We achieve this by having
27535 the global ARM_EXTENSIONS table in alphabetical order, and using the
27536 ADDING_VALUE variable to indicate whether we are adding an extension (1)
27537 or removing it (0) and only allowing it to change in the order
27538 -1 -> 1 -> 0. */
27539 const struct arm_option_extension_value_table * opt = NULL;
27540 const arm_feature_set arm_any = ARM_ANY;
27541 int adding_value = -1;
27542
27543 while (str != NULL && *str != 0)
27544 {
27545 const char *ext;
27546 size_t len;
27547
27548 if (*str != '+')
27549 {
27550 as_bad (_("invalid architectural extension"));
27551 return FALSE;
27552 }
27553
27554 str++;
27555 ext = strchr (str, '+');
27556
27557 if (ext != NULL)
27558 len = ext - str;
27559 else
27560 len = strlen (str);
27561
27562 if (len >= 2 && strncmp (str, "no", 2) == 0)
27563 {
27564 if (adding_value != 0)
27565 {
27566 adding_value = 0;
27567 opt = arm_extensions;
27568 }
27569
27570 len -= 2;
27571 str += 2;
27572 }
27573 else if (len > 0)
27574 {
27575 if (adding_value == -1)
27576 {
27577 adding_value = 1;
27578 opt = arm_extensions;
27579 }
27580 else if (adding_value != 1)
27581 {
27582 as_bad (_("must specify extensions to add before specifying "
27583 "those to remove"));
27584 return FALSE;
27585 }
27586 }
27587
27588 if (len == 0)
27589 {
27590 as_bad (_("missing architectural extension"));
27591 return FALSE;
27592 }
27593
27594 gas_assert (adding_value != -1);
27595 gas_assert (opt != NULL);
27596
27597 if (ext_table != NULL)
27598 {
27599 const struct arm_ext_table * ext_opt = ext_table;
27600 bfd_boolean found = FALSE;
27601 for (; ext_opt->name != NULL; ext_opt++)
27602 if (ext_opt->name_len == len
27603 && strncmp (ext_opt->name, str, len) == 0)
27604 {
27605 if (adding_value)
27606 {
27607 if (ARM_FEATURE_ZERO (ext_opt->merge))
27608 /* TODO: Option not supported. When we remove the
27609 legacy table this case should error out. */
27610 continue;
27611
27612 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, ext_opt->merge);
27613 }
27614 else
27615 {
27616 if (ARM_FEATURE_ZERO (ext_opt->clear))
27617 /* TODO: Option not supported. When we remove the
27618 legacy table this case should error out. */
27619 continue;
27620 ARM_CLEAR_FEATURE (*ext_set, *ext_set, ext_opt->clear);
27621 }
27622 found = TRUE;
27623 break;
27624 }
27625 if (found)
27626 {
27627 str = ext;
27628 continue;
27629 }
27630 }
27631
27632 /* Scan over the options table trying to find an exact match. */
27633 for (; opt->name != NULL; opt++)
27634 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
27635 {
27636 int i, nb_allowed_archs =
27637 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
27638 /* Check we can apply the extension to this architecture. */
27639 for (i = 0; i < nb_allowed_archs; i++)
27640 {
27641 /* Empty entry. */
27642 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
27643 continue;
27644 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *opt_set))
27645 break;
27646 }
27647 if (i == nb_allowed_archs)
27648 {
27649 as_bad (_("extension does not apply to the base architecture"));
27650 return FALSE;
27651 }
27652
27653 /* Add or remove the extension. */
27654 if (adding_value)
27655 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
27656 else
27657 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
27658
27659 /* Allowing Thumb division instructions for ARMv7 in autodetection
27660 rely on this break so that duplicate extensions (extensions
27661 with the same name as a previous extension in the list) are not
27662 considered for command-line parsing. */
27663 break;
27664 }
27665
27666 if (opt->name == NULL)
27667 {
27668 /* Did we fail to find an extension because it wasn't specified in
27669 alphabetical order, or because it does not exist? */
27670
27671 for (opt = arm_extensions; opt->name != NULL; opt++)
27672 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
27673 break;
27674
27675 if (opt->name == NULL)
27676 as_bad (_("unknown architectural extension `%s'"), str);
27677 else
27678 as_bad (_("architectural extensions must be specified in "
27679 "alphabetical order"));
27680
27681 return FALSE;
27682 }
27683 else
27684 {
27685 /* We should skip the extension we've just matched the next time
27686 round. */
27687 opt++;
27688 }
27689
27690 str = ext;
27691 };
27692
27693 return TRUE;
27694 }
27695
27696 static bfd_boolean
27697 arm_parse_cpu (const char *str)
27698 {
27699 const struct arm_cpu_option_table *opt;
27700 const char *ext = strchr (str, '+');
27701 size_t len;
27702
27703 if (ext != NULL)
27704 len = ext - str;
27705 else
27706 len = strlen (str);
27707
27708 if (len == 0)
27709 {
27710 as_bad (_("missing cpu name `%s'"), str);
27711 return FALSE;
27712 }
27713
27714 for (opt = arm_cpus; opt->name != NULL; opt++)
27715 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
27716 {
27717 mcpu_cpu_opt = &opt->value;
27718 if (mcpu_ext_opt == NULL)
27719 mcpu_ext_opt = XNEW (arm_feature_set);
27720 *mcpu_ext_opt = opt->ext;
27721 mcpu_fpu_opt = &opt->default_fpu;
27722 if (opt->canonical_name)
27723 {
27724 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
27725 strcpy (selected_cpu_name, opt->canonical_name);
27726 }
27727 else
27728 {
27729 size_t i;
27730
27731 if (len >= sizeof selected_cpu_name)
27732 len = (sizeof selected_cpu_name) - 1;
27733
27734 for (i = 0; i < len; i++)
27735 selected_cpu_name[i] = TOUPPER (opt->name[i]);
27736 selected_cpu_name[i] = 0;
27737 }
27738
27739 if (ext != NULL)
27740 return arm_parse_extension (ext, mcpu_cpu_opt, mcpu_ext_opt, NULL);
27741
27742 return TRUE;
27743 }
27744
27745 as_bad (_("unknown cpu `%s'"), str);
27746 return FALSE;
27747 }
27748
27749 static bfd_boolean
27750 arm_parse_arch (const char *str)
27751 {
27752 const struct arm_arch_option_table *opt;
27753 const char *ext = strchr (str, '+');
27754 size_t len;
27755
27756 if (ext != NULL)
27757 len = ext - str;
27758 else
27759 len = strlen (str);
27760
27761 if (len == 0)
27762 {
27763 as_bad (_("missing architecture name `%s'"), str);
27764 return FALSE;
27765 }
27766
27767 for (opt = arm_archs; opt->name != NULL; opt++)
27768 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
27769 {
27770 march_cpu_opt = &opt->value;
27771 if (march_ext_opt == NULL)
27772 march_ext_opt = XNEW (arm_feature_set);
27773 *march_ext_opt = arm_arch_none;
27774 march_fpu_opt = &opt->default_fpu;
27775 strcpy (selected_cpu_name, opt->name);
27776
27777 if (ext != NULL)
27778 return arm_parse_extension (ext, march_cpu_opt, march_ext_opt,
27779 opt->ext_table);
27780
27781 return TRUE;
27782 }
27783
27784 as_bad (_("unknown architecture `%s'\n"), str);
27785 return FALSE;
27786 }
27787
27788 static bfd_boolean
27789 arm_parse_fpu (const char * str)
27790 {
27791 const struct arm_option_fpu_value_table * opt;
27792
27793 for (opt = arm_fpus; opt->name != NULL; opt++)
27794 if (streq (opt->name, str))
27795 {
27796 mfpu_opt = &opt->value;
27797 return TRUE;
27798 }
27799
27800 as_bad (_("unknown floating point format `%s'\n"), str);
27801 return FALSE;
27802 }
27803
27804 static bfd_boolean
27805 arm_parse_float_abi (const char * str)
27806 {
27807 const struct arm_option_value_table * opt;
27808
27809 for (opt = arm_float_abis; opt->name != NULL; opt++)
27810 if (streq (opt->name, str))
27811 {
27812 mfloat_abi_opt = opt->value;
27813 return TRUE;
27814 }
27815
27816 as_bad (_("unknown floating point abi `%s'\n"), str);
27817 return FALSE;
27818 }
27819
27820 #ifdef OBJ_ELF
27821 static bfd_boolean
27822 arm_parse_eabi (const char * str)
27823 {
27824 const struct arm_option_value_table *opt;
27825
27826 for (opt = arm_eabis; opt->name != NULL; opt++)
27827 if (streq (opt->name, str))
27828 {
27829 meabi_flags = opt->value;
27830 return TRUE;
27831 }
27832 as_bad (_("unknown EABI `%s'\n"), str);
27833 return FALSE;
27834 }
27835 #endif
27836
27837 static bfd_boolean
27838 arm_parse_it_mode (const char * str)
27839 {
27840 bfd_boolean ret = TRUE;
27841
27842 if (streq ("arm", str))
27843 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
27844 else if (streq ("thumb", str))
27845 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
27846 else if (streq ("always", str))
27847 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
27848 else if (streq ("never", str))
27849 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
27850 else
27851 {
27852 as_bad (_("unknown implicit IT mode `%s', should be "\
27853 "arm, thumb, always, or never."), str);
27854 ret = FALSE;
27855 }
27856
27857 return ret;
27858 }
27859
27860 static bfd_boolean
27861 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
27862 {
27863 codecomposer_syntax = TRUE;
27864 arm_comment_chars[0] = ';';
27865 arm_line_separator_chars[0] = 0;
27866 return TRUE;
27867 }
27868
27869 struct arm_long_option_table arm_long_opts[] =
27870 {
27871 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
27872 arm_parse_cpu, NULL},
27873 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
27874 arm_parse_arch, NULL},
27875 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
27876 arm_parse_fpu, NULL},
27877 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
27878 arm_parse_float_abi, NULL},
27879 #ifdef OBJ_ELF
27880 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
27881 arm_parse_eabi, NULL},
27882 #endif
27883 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
27884 arm_parse_it_mode, NULL},
27885 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
27886 arm_ccs_mode, NULL},
27887 {NULL, NULL, 0, NULL}
27888 };
27889
27890 int
27891 md_parse_option (int c, const char * arg)
27892 {
27893 struct arm_option_table *opt;
27894 const struct arm_legacy_option_table *fopt;
27895 struct arm_long_option_table *lopt;
27896
27897 switch (c)
27898 {
27899 #ifdef OPTION_EB
27900 case OPTION_EB:
27901 target_big_endian = 1;
27902 break;
27903 #endif
27904
27905 #ifdef OPTION_EL
27906 case OPTION_EL:
27907 target_big_endian = 0;
27908 break;
27909 #endif
27910
27911 case OPTION_FIX_V4BX:
27912 fix_v4bx = TRUE;
27913 break;
27914
27915 #ifdef OBJ_ELF
27916 case OPTION_FDPIC:
27917 arm_fdpic = TRUE;
27918 break;
27919 #endif /* OBJ_ELF */
27920
27921 case 'a':
27922 /* Listing option. Just ignore these, we don't support additional
27923 ones. */
27924 return 0;
27925
27926 default:
27927 for (opt = arm_opts; opt->option != NULL; opt++)
27928 {
27929 if (c == opt->option[0]
27930 && ((arg == NULL && opt->option[1] == 0)
27931 || streq (arg, opt->option + 1)))
27932 {
27933 /* If the option is deprecated, tell the user. */
27934 if (warn_on_deprecated && opt->deprecated != NULL)
27935 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
27936 arg ? arg : "", _(opt->deprecated));
27937
27938 if (opt->var != NULL)
27939 *opt->var = opt->value;
27940
27941 return 1;
27942 }
27943 }
27944
27945 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
27946 {
27947 if (c == fopt->option[0]
27948 && ((arg == NULL && fopt->option[1] == 0)
27949 || streq (arg, fopt->option + 1)))
27950 {
27951 /* If the option is deprecated, tell the user. */
27952 if (warn_on_deprecated && fopt->deprecated != NULL)
27953 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
27954 arg ? arg : "", _(fopt->deprecated));
27955
27956 if (fopt->var != NULL)
27957 *fopt->var = &fopt->value;
27958
27959 return 1;
27960 }
27961 }
27962
27963 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
27964 {
27965 /* These options are expected to have an argument. */
27966 if (c == lopt->option[0]
27967 && arg != NULL
27968 && strncmp (arg, lopt->option + 1,
27969 strlen (lopt->option + 1)) == 0)
27970 {
27971 /* If the option is deprecated, tell the user. */
27972 if (warn_on_deprecated && lopt->deprecated != NULL)
27973 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
27974 _(lopt->deprecated));
27975
27976 /* Call the sup-option parser. */
27977 return lopt->func (arg + strlen (lopt->option) - 1);
27978 }
27979 }
27980
27981 return 0;
27982 }
27983
27984 return 1;
27985 }
27986
27987 void
27988 md_show_usage (FILE * fp)
27989 {
27990 struct arm_option_table *opt;
27991 struct arm_long_option_table *lopt;
27992
27993 fprintf (fp, _(" ARM-specific assembler options:\n"));
27994
27995 for (opt = arm_opts; opt->option != NULL; opt++)
27996 if (opt->help != NULL)
27997 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
27998
27999 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
28000 if (lopt->help != NULL)
28001 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
28002
28003 #ifdef OPTION_EB
28004 fprintf (fp, _("\
28005 -EB assemble code for a big-endian cpu\n"));
28006 #endif
28007
28008 #ifdef OPTION_EL
28009 fprintf (fp, _("\
28010 -EL assemble code for a little-endian cpu\n"));
28011 #endif
28012
28013 fprintf (fp, _("\
28014 --fix-v4bx Allow BX in ARMv4 code\n"));
28015
28016 #ifdef OBJ_ELF
28017 fprintf (fp, _("\
28018 --fdpic generate an FDPIC object file\n"));
28019 #endif /* OBJ_ELF */
28020 }
28021
28022 #ifdef OBJ_ELF
28023
28024 typedef struct
28025 {
28026 int val;
28027 arm_feature_set flags;
28028 } cpu_arch_ver_table;
28029
28030 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
28031 chronologically for architectures, with an exception for ARMv6-M and
28032 ARMv6S-M due to legacy reasons. No new architecture should have a
28033 special case. This allows for build attribute selection results to be
28034 stable when new architectures are added. */
28035 static const cpu_arch_ver_table cpu_arch_ver[] =
28036 {
28037 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V1},
28038 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2},
28039 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2S},
28040 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3},
28041 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3M},
28042 {TAG_CPU_ARCH_V4, ARM_ARCH_V4xM},
28043 {TAG_CPU_ARCH_V4, ARM_ARCH_V4},
28044 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4TxM},
28045 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4T},
28046 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5xM},
28047 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5},
28048 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5TxM},
28049 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5T},
28050 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TExP},
28051 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TE},
28052 {TAG_CPU_ARCH_V5TEJ, ARM_ARCH_V5TEJ},
28053 {TAG_CPU_ARCH_V6, ARM_ARCH_V6},
28054 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6Z},
28055 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6KZ},
28056 {TAG_CPU_ARCH_V6K, ARM_ARCH_V6K},
28057 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6T2},
28058 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KT2},
28059 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6ZT2},
28060 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KZT2},
28061
28062 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
28063 always selected build attributes to match those of ARMv6-M
28064 (resp. ARMv6S-M). However, due to these architectures being a strict
28065 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
28066 would be selected when fully respecting chronology of architectures.
28067 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
28068 move them before ARMv7 architectures. */
28069 {TAG_CPU_ARCH_V6_M, ARM_ARCH_V6M},
28070 {TAG_CPU_ARCH_V6S_M, ARM_ARCH_V6SM},
28071
28072 {TAG_CPU_ARCH_V7, ARM_ARCH_V7},
28073 {TAG_CPU_ARCH_V7, ARM_ARCH_V7A},
28074 {TAG_CPU_ARCH_V7, ARM_ARCH_V7R},
28075 {TAG_CPU_ARCH_V7, ARM_ARCH_V7M},
28076 {TAG_CPU_ARCH_V7, ARM_ARCH_V7VE},
28077 {TAG_CPU_ARCH_V7E_M, ARM_ARCH_V7EM},
28078 {TAG_CPU_ARCH_V8, ARM_ARCH_V8A},
28079 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_1A},
28080 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_2A},
28081 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_3A},
28082 {TAG_CPU_ARCH_V8M_BASE, ARM_ARCH_V8M_BASE},
28083 {TAG_CPU_ARCH_V8M_MAIN, ARM_ARCH_V8M_MAIN},
28084 {TAG_CPU_ARCH_V8R, ARM_ARCH_V8R},
28085 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_4A},
28086 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_5A},
28087 {TAG_CPU_ARCH_V8_1M_MAIN, ARM_ARCH_V8_1M_MAIN},
28088 {-1, ARM_ARCH_NONE}
28089 };
28090
28091 /* Set an attribute if it has not already been set by the user. */
28092
28093 static void
28094 aeabi_set_attribute_int (int tag, int value)
28095 {
28096 if (tag < 1
28097 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
28098 || !attributes_set_explicitly[tag])
28099 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
28100 }
28101
28102 static void
28103 aeabi_set_attribute_string (int tag, const char *value)
28104 {
28105 if (tag < 1
28106 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
28107 || !attributes_set_explicitly[tag])
28108 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
28109 }
28110
28111 /* Return whether features in the *NEEDED feature set are available via
28112 extensions for the architecture whose feature set is *ARCH_FSET. */
28113
28114 static bfd_boolean
28115 have_ext_for_needed_feat_p (const arm_feature_set *arch_fset,
28116 const arm_feature_set *needed)
28117 {
28118 int i, nb_allowed_archs;
28119 arm_feature_set ext_fset;
28120 const struct arm_option_extension_value_table *opt;
28121
28122 ext_fset = arm_arch_none;
28123 for (opt = arm_extensions; opt->name != NULL; opt++)
28124 {
28125 /* Extension does not provide any feature we need. */
28126 if (!ARM_CPU_HAS_FEATURE (*needed, opt->merge_value))
28127 continue;
28128
28129 nb_allowed_archs =
28130 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
28131 for (i = 0; i < nb_allowed_archs; i++)
28132 {
28133 /* Empty entry. */
28134 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_arch_any))
28135 break;
28136
28137 /* Extension is available, add it. */
28138 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *arch_fset))
28139 ARM_MERGE_FEATURE_SETS (ext_fset, ext_fset, opt->merge_value);
28140 }
28141 }
28142
28143 /* Can we enable all features in *needed? */
28144 return ARM_FSET_CPU_SUBSET (*needed, ext_fset);
28145 }
28146
28147 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
28148 a given architecture feature set *ARCH_EXT_FSET including extension feature
28149 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
28150 - if true, check for an exact match of the architecture modulo extensions;
28151 - otherwise, select build attribute value of the first superset
28152 architecture released so that results remains stable when new architectures
28153 are added.
28154 For -march/-mcpu=all the build attribute value of the most featureful
28155 architecture is returned. Tag_CPU_arch_profile result is returned in
28156 PROFILE. */
28157
28158 static int
28159 get_aeabi_cpu_arch_from_fset (const arm_feature_set *arch_ext_fset,
28160 const arm_feature_set *ext_fset,
28161 char *profile, int exact_match)
28162 {
28163 arm_feature_set arch_fset;
28164 const cpu_arch_ver_table *p_ver, *p_ver_ret = NULL;
28165
28166 /* Select most featureful architecture with all its extensions if building
28167 for -march=all as the feature sets used to set build attributes. */
28168 if (ARM_FEATURE_EQUAL (*arch_ext_fset, arm_arch_any))
28169 {
28170 /* Force revisiting of decision for each new architecture. */
28171 gas_assert (MAX_TAG_CPU_ARCH <= TAG_CPU_ARCH_V8_1M_MAIN);
28172 *profile = 'A';
28173 return TAG_CPU_ARCH_V8;
28174 }
28175
28176 ARM_CLEAR_FEATURE (arch_fset, *arch_ext_fset, *ext_fset);
28177
28178 for (p_ver = cpu_arch_ver; p_ver->val != -1; p_ver++)
28179 {
28180 arm_feature_set known_arch_fset;
28181
28182 ARM_CLEAR_FEATURE (known_arch_fset, p_ver->flags, fpu_any);
28183 if (exact_match)
28184 {
28185 /* Base architecture match user-specified architecture and
28186 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
28187 if (ARM_FEATURE_EQUAL (*arch_ext_fset, known_arch_fset))
28188 {
28189 p_ver_ret = p_ver;
28190 goto found;
28191 }
28192 /* Base architecture match user-specified architecture only
28193 (eg. ARMv6-M in the same case as above). Record it in case we
28194 find a match with above condition. */
28195 else if (p_ver_ret == NULL
28196 && ARM_FEATURE_EQUAL (arch_fset, known_arch_fset))
28197 p_ver_ret = p_ver;
28198 }
28199 else
28200 {
28201
28202 /* Architecture has all features wanted. */
28203 if (ARM_FSET_CPU_SUBSET (arch_fset, known_arch_fset))
28204 {
28205 arm_feature_set added_fset;
28206
28207 /* Compute features added by this architecture over the one
28208 recorded in p_ver_ret. */
28209 if (p_ver_ret != NULL)
28210 ARM_CLEAR_FEATURE (added_fset, known_arch_fset,
28211 p_ver_ret->flags);
28212 /* First architecture that match incl. with extensions, or the
28213 only difference in features over the recorded match is
28214 features that were optional and are now mandatory. */
28215 if (p_ver_ret == NULL
28216 || ARM_FSET_CPU_SUBSET (added_fset, arch_fset))
28217 {
28218 p_ver_ret = p_ver;
28219 goto found;
28220 }
28221 }
28222 else if (p_ver_ret == NULL)
28223 {
28224 arm_feature_set needed_ext_fset;
28225
28226 ARM_CLEAR_FEATURE (needed_ext_fset, arch_fset, known_arch_fset);
28227
28228 /* Architecture has all features needed when using some
28229 extensions. Record it and continue searching in case there
28230 exist an architecture providing all needed features without
28231 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
28232 OS extension). */
28233 if (have_ext_for_needed_feat_p (&known_arch_fset,
28234 &needed_ext_fset))
28235 p_ver_ret = p_ver;
28236 }
28237 }
28238 }
28239
28240 if (p_ver_ret == NULL)
28241 return -1;
28242
28243 found:
28244 /* Tag_CPU_arch_profile. */
28245 if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7a)
28246 || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8)
28247 || (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_atomics)
28248 && !ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8m_m_only)))
28249 *profile = 'A';
28250 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7r))
28251 *profile = 'R';
28252 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_m))
28253 *profile = 'M';
28254 else
28255 *profile = '\0';
28256 return p_ver_ret->val;
28257 }
28258
28259 /* Set the public EABI object attributes. */
28260
28261 static void
28262 aeabi_set_public_attributes (void)
28263 {
28264 char profile = '\0';
28265 int arch = -1;
28266 int virt_sec = 0;
28267 int fp16_optional = 0;
28268 int skip_exact_match = 0;
28269 arm_feature_set flags, flags_arch, flags_ext;
28270
28271 /* Autodetection mode, choose the architecture based the instructions
28272 actually used. */
28273 if (no_cpu_selected ())
28274 {
28275 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
28276
28277 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
28278 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
28279
28280 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
28281 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
28282
28283 /* Code run during relaxation relies on selected_cpu being set. */
28284 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
28285 flags_ext = arm_arch_none;
28286 ARM_CLEAR_FEATURE (selected_arch, flags_arch, flags_ext);
28287 selected_ext = flags_ext;
28288 selected_cpu = flags;
28289 }
28290 /* Otherwise, choose the architecture based on the capabilities of the
28291 requested cpu. */
28292 else
28293 {
28294 ARM_MERGE_FEATURE_SETS (flags_arch, selected_arch, selected_ext);
28295 ARM_CLEAR_FEATURE (flags_arch, flags_arch, fpu_any);
28296 flags_ext = selected_ext;
28297 flags = selected_cpu;
28298 }
28299 ARM_MERGE_FEATURE_SETS (flags, flags, selected_fpu);
28300
28301 /* Allow the user to override the reported architecture. */
28302 if (!ARM_FEATURE_ZERO (selected_object_arch))
28303 {
28304 ARM_CLEAR_FEATURE (flags_arch, selected_object_arch, fpu_any);
28305 flags_ext = arm_arch_none;
28306 }
28307 else
28308 skip_exact_match = ARM_FEATURE_EQUAL (selected_cpu, arm_arch_any);
28309
28310 /* When this function is run again after relaxation has happened there is no
28311 way to determine whether an architecture or CPU was specified by the user:
28312 - selected_cpu is set above for relaxation to work;
28313 - march_cpu_opt is not set if only -mcpu or .cpu is used;
28314 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
28315 Therefore, if not in -march=all case we first try an exact match and fall
28316 back to autodetection. */
28317 if (!skip_exact_match)
28318 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 1);
28319 if (arch == -1)
28320 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 0);
28321 if (arch == -1)
28322 as_bad (_("no architecture contains all the instructions used\n"));
28323
28324 /* Tag_CPU_name. */
28325 if (selected_cpu_name[0])
28326 {
28327 char *q;
28328
28329 q = selected_cpu_name;
28330 if (strncmp (q, "armv", 4) == 0)
28331 {
28332 int i;
28333
28334 q += 4;
28335 for (i = 0; q[i]; i++)
28336 q[i] = TOUPPER (q[i]);
28337 }
28338 aeabi_set_attribute_string (Tag_CPU_name, q);
28339 }
28340
28341 /* Tag_CPU_arch. */
28342 aeabi_set_attribute_int (Tag_CPU_arch, arch);
28343
28344 /* Tag_CPU_arch_profile. */
28345 if (profile != '\0')
28346 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
28347
28348 /* Tag_DSP_extension. */
28349 if (ARM_CPU_HAS_FEATURE (selected_ext, arm_ext_dsp))
28350 aeabi_set_attribute_int (Tag_DSP_extension, 1);
28351
28352 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
28353 /* Tag_ARM_ISA_use. */
28354 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
28355 || ARM_FEATURE_ZERO (flags_arch))
28356 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
28357
28358 /* Tag_THUMB_ISA_use. */
28359 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
28360 || ARM_FEATURE_ZERO (flags_arch))
28361 {
28362 int thumb_isa_use;
28363
28364 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
28365 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
28366 thumb_isa_use = 3;
28367 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
28368 thumb_isa_use = 2;
28369 else
28370 thumb_isa_use = 1;
28371 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
28372 }
28373
28374 /* Tag_VFP_arch. */
28375 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
28376 aeabi_set_attribute_int (Tag_VFP_arch,
28377 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
28378 ? 7 : 8);
28379 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
28380 aeabi_set_attribute_int (Tag_VFP_arch,
28381 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
28382 ? 5 : 6);
28383 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
28384 {
28385 fp16_optional = 1;
28386 aeabi_set_attribute_int (Tag_VFP_arch, 3);
28387 }
28388 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
28389 {
28390 aeabi_set_attribute_int (Tag_VFP_arch, 4);
28391 fp16_optional = 1;
28392 }
28393 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
28394 aeabi_set_attribute_int (Tag_VFP_arch, 2);
28395 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
28396 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
28397 aeabi_set_attribute_int (Tag_VFP_arch, 1);
28398
28399 /* Tag_ABI_HardFP_use. */
28400 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
28401 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
28402 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
28403
28404 /* Tag_WMMX_arch. */
28405 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
28406 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
28407 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
28408 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
28409
28410 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
28411 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
28412 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
28413 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
28414 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
28415 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
28416 {
28417 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
28418 {
28419 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
28420 }
28421 else
28422 {
28423 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
28424 fp16_optional = 1;
28425 }
28426 }
28427
28428 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
28429 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
28430 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
28431
28432 /* Tag_DIV_use.
28433
28434 We set Tag_DIV_use to two when integer divide instructions have been used
28435 in ARM state, or when Thumb integer divide instructions have been used,
28436 but we have no architecture profile set, nor have we any ARM instructions.
28437
28438 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
28439 by the base architecture.
28440
28441 For new architectures we will have to check these tests. */
28442 gas_assert (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
28443 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
28444 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
28445 aeabi_set_attribute_int (Tag_DIV_use, 0);
28446 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
28447 || (profile == '\0'
28448 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
28449 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
28450 aeabi_set_attribute_int (Tag_DIV_use, 2);
28451
28452 /* Tag_MP_extension_use. */
28453 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
28454 aeabi_set_attribute_int (Tag_MPextension_use, 1);
28455
28456 /* Tag Virtualization_use. */
28457 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
28458 virt_sec |= 1;
28459 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
28460 virt_sec |= 2;
28461 if (virt_sec != 0)
28462 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
28463 }
28464
28465 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
28466 finished and free extension feature bits which will not be used anymore. */
28467
28468 void
28469 arm_md_post_relax (void)
28470 {
28471 aeabi_set_public_attributes ();
28472 XDELETE (mcpu_ext_opt);
28473 mcpu_ext_opt = NULL;
28474 XDELETE (march_ext_opt);
28475 march_ext_opt = NULL;
28476 }
28477
28478 /* Add the default contents for the .ARM.attributes section. */
28479
28480 void
28481 arm_md_end (void)
28482 {
28483 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
28484 return;
28485
28486 aeabi_set_public_attributes ();
28487 }
28488 #endif /* OBJ_ELF */
28489
28490 /* Parse a .cpu directive. */
28491
28492 static void
28493 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
28494 {
28495 const struct arm_cpu_option_table *opt;
28496 char *name;
28497 char saved_char;
28498
28499 name = input_line_pointer;
28500 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
28501 input_line_pointer++;
28502 saved_char = *input_line_pointer;
28503 *input_line_pointer = 0;
28504
28505 /* Skip the first "all" entry. */
28506 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
28507 if (streq (opt->name, name))
28508 {
28509 selected_arch = opt->value;
28510 selected_ext = opt->ext;
28511 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
28512 if (opt->canonical_name)
28513 strcpy (selected_cpu_name, opt->canonical_name);
28514 else
28515 {
28516 int i;
28517 for (i = 0; opt->name[i]; i++)
28518 selected_cpu_name[i] = TOUPPER (opt->name[i]);
28519
28520 selected_cpu_name[i] = 0;
28521 }
28522 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
28523
28524 *input_line_pointer = saved_char;
28525 demand_empty_rest_of_line ();
28526 return;
28527 }
28528 as_bad (_("unknown cpu `%s'"), name);
28529 *input_line_pointer = saved_char;
28530 ignore_rest_of_line ();
28531 }
28532
28533 /* Parse a .arch directive. */
28534
28535 static void
28536 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
28537 {
28538 const struct arm_arch_option_table *opt;
28539 char saved_char;
28540 char *name;
28541
28542 name = input_line_pointer;
28543 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
28544 input_line_pointer++;
28545 saved_char = *input_line_pointer;
28546 *input_line_pointer = 0;
28547
28548 /* Skip the first "all" entry. */
28549 for (opt = arm_archs + 1; opt->name != NULL; opt++)
28550 if (streq (opt->name, name))
28551 {
28552 selected_arch = opt->value;
28553 selected_ext = arm_arch_none;
28554 selected_cpu = selected_arch;
28555 strcpy (selected_cpu_name, opt->name);
28556 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
28557 *input_line_pointer = saved_char;
28558 demand_empty_rest_of_line ();
28559 return;
28560 }
28561
28562 as_bad (_("unknown architecture `%s'\n"), name);
28563 *input_line_pointer = saved_char;
28564 ignore_rest_of_line ();
28565 }
28566
28567 /* Parse a .object_arch directive. */
28568
28569 static void
28570 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
28571 {
28572 const struct arm_arch_option_table *opt;
28573 char saved_char;
28574 char *name;
28575
28576 name = input_line_pointer;
28577 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
28578 input_line_pointer++;
28579 saved_char = *input_line_pointer;
28580 *input_line_pointer = 0;
28581
28582 /* Skip the first "all" entry. */
28583 for (opt = arm_archs + 1; opt->name != NULL; opt++)
28584 if (streq (opt->name, name))
28585 {
28586 selected_object_arch = opt->value;
28587 *input_line_pointer = saved_char;
28588 demand_empty_rest_of_line ();
28589 return;
28590 }
28591
28592 as_bad (_("unknown architecture `%s'\n"), name);
28593 *input_line_pointer = saved_char;
28594 ignore_rest_of_line ();
28595 }
28596
28597 /* Parse a .arch_extension directive. */
28598
28599 static void
28600 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
28601 {
28602 const struct arm_option_extension_value_table *opt;
28603 char saved_char;
28604 char *name;
28605 int adding_value = 1;
28606
28607 name = input_line_pointer;
28608 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
28609 input_line_pointer++;
28610 saved_char = *input_line_pointer;
28611 *input_line_pointer = 0;
28612
28613 if (strlen (name) >= 2
28614 && strncmp (name, "no", 2) == 0)
28615 {
28616 adding_value = 0;
28617 name += 2;
28618 }
28619
28620 for (opt = arm_extensions; opt->name != NULL; opt++)
28621 if (streq (opt->name, name))
28622 {
28623 int i, nb_allowed_archs =
28624 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
28625 for (i = 0; i < nb_allowed_archs; i++)
28626 {
28627 /* Empty entry. */
28628 if (ARM_CPU_IS_ANY (opt->allowed_archs[i]))
28629 continue;
28630 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], selected_arch))
28631 break;
28632 }
28633
28634 if (i == nb_allowed_archs)
28635 {
28636 as_bad (_("architectural extension `%s' is not allowed for the "
28637 "current base architecture"), name);
28638 break;
28639 }
28640
28641 if (adding_value)
28642 ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
28643 opt->merge_value);
28644 else
28645 ARM_CLEAR_FEATURE (selected_ext, selected_ext, opt->clear_value);
28646
28647 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
28648 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
28649 *input_line_pointer = saved_char;
28650 demand_empty_rest_of_line ();
28651 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
28652 on this return so that duplicate extensions (extensions with the
28653 same name as a previous extension in the list) are not considered
28654 for command-line parsing. */
28655 return;
28656 }
28657
28658 if (opt->name == NULL)
28659 as_bad (_("unknown architecture extension `%s'\n"), name);
28660
28661 *input_line_pointer = saved_char;
28662 ignore_rest_of_line ();
28663 }
28664
28665 /* Parse a .fpu directive. */
28666
28667 static void
28668 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
28669 {
28670 const struct arm_option_fpu_value_table *opt;
28671 char saved_char;
28672 char *name;
28673
28674 name = input_line_pointer;
28675 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
28676 input_line_pointer++;
28677 saved_char = *input_line_pointer;
28678 *input_line_pointer = 0;
28679
28680 for (opt = arm_fpus; opt->name != NULL; opt++)
28681 if (streq (opt->name, name))
28682 {
28683 selected_fpu = opt->value;
28684 #ifndef CPU_DEFAULT
28685 if (no_cpu_selected ())
28686 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
28687 else
28688 #endif
28689 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
28690 *input_line_pointer = saved_char;
28691 demand_empty_rest_of_line ();
28692 return;
28693 }
28694
28695 as_bad (_("unknown floating point format `%s'\n"), name);
28696 *input_line_pointer = saved_char;
28697 ignore_rest_of_line ();
28698 }
28699
28700 /* Copy symbol information. */
28701
28702 void
28703 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
28704 {
28705 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
28706 }
28707
28708 #ifdef OBJ_ELF
28709 /* Given a symbolic attribute NAME, return the proper integer value.
28710 Returns -1 if the attribute is not known. */
28711
28712 int
28713 arm_convert_symbolic_attribute (const char *name)
28714 {
28715 static const struct
28716 {
28717 const char * name;
28718 const int tag;
28719 }
28720 attribute_table[] =
28721 {
28722 /* When you modify this table you should
28723 also modify the list in doc/c-arm.texi. */
28724 #define T(tag) {#tag, tag}
28725 T (Tag_CPU_raw_name),
28726 T (Tag_CPU_name),
28727 T (Tag_CPU_arch),
28728 T (Tag_CPU_arch_profile),
28729 T (Tag_ARM_ISA_use),
28730 T (Tag_THUMB_ISA_use),
28731 T (Tag_FP_arch),
28732 T (Tag_VFP_arch),
28733 T (Tag_WMMX_arch),
28734 T (Tag_Advanced_SIMD_arch),
28735 T (Tag_PCS_config),
28736 T (Tag_ABI_PCS_R9_use),
28737 T (Tag_ABI_PCS_RW_data),
28738 T (Tag_ABI_PCS_RO_data),
28739 T (Tag_ABI_PCS_GOT_use),
28740 T (Tag_ABI_PCS_wchar_t),
28741 T (Tag_ABI_FP_rounding),
28742 T (Tag_ABI_FP_denormal),
28743 T (Tag_ABI_FP_exceptions),
28744 T (Tag_ABI_FP_user_exceptions),
28745 T (Tag_ABI_FP_number_model),
28746 T (Tag_ABI_align_needed),
28747 T (Tag_ABI_align8_needed),
28748 T (Tag_ABI_align_preserved),
28749 T (Tag_ABI_align8_preserved),
28750 T (Tag_ABI_enum_size),
28751 T (Tag_ABI_HardFP_use),
28752 T (Tag_ABI_VFP_args),
28753 T (Tag_ABI_WMMX_args),
28754 T (Tag_ABI_optimization_goals),
28755 T (Tag_ABI_FP_optimization_goals),
28756 T (Tag_compatibility),
28757 T (Tag_CPU_unaligned_access),
28758 T (Tag_FP_HP_extension),
28759 T (Tag_VFP_HP_extension),
28760 T (Tag_ABI_FP_16bit_format),
28761 T (Tag_MPextension_use),
28762 T (Tag_DIV_use),
28763 T (Tag_nodefaults),
28764 T (Tag_also_compatible_with),
28765 T (Tag_conformance),
28766 T (Tag_T2EE_use),
28767 T (Tag_Virtualization_use),
28768 T (Tag_DSP_extension),
28769 /* We deliberately do not include Tag_MPextension_use_legacy. */
28770 #undef T
28771 };
28772 unsigned int i;
28773
28774 if (name == NULL)
28775 return -1;
28776
28777 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
28778 if (streq (name, attribute_table[i].name))
28779 return attribute_table[i].tag;
28780
28781 return -1;
28782 }
28783
28784 /* Apply sym value for relocations only in the case that they are for
28785 local symbols in the same segment as the fixup and you have the
28786 respective architectural feature for blx and simple switches. */
28787
28788 int
28789 arm_apply_sym_value (struct fix * fixP, segT this_seg)
28790 {
28791 if (fixP->fx_addsy
28792 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
28793 /* PR 17444: If the local symbol is in a different section then a reloc
28794 will always be generated for it, so applying the symbol value now
28795 will result in a double offset being stored in the relocation. */
28796 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
28797 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
28798 {
28799 switch (fixP->fx_r_type)
28800 {
28801 case BFD_RELOC_ARM_PCREL_BLX:
28802 case BFD_RELOC_THUMB_PCREL_BRANCH23:
28803 if (ARM_IS_FUNC (fixP->fx_addsy))
28804 return 1;
28805 break;
28806
28807 case BFD_RELOC_ARM_PCREL_CALL:
28808 case BFD_RELOC_THUMB_PCREL_BLX:
28809 if (THUMB_IS_FUNC (fixP->fx_addsy))
28810 return 1;
28811 break;
28812
28813 default:
28814 break;
28815 }
28816
28817 }
28818 return 0;
28819 }
28820 #endif /* OBJ_ELF */
This page took 1.129925 seconds and 5 git commands to generate.