[Arm, 2/3] Add instruction SB for AArch32
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2018 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 /* Whether --fdpic was given. */
79 static int arm_fdpic;
80
81 #endif /* OBJ_ELF */
82
83 /* Results from operand parsing worker functions. */
84
85 typedef enum
86 {
87 PARSE_OPERAND_SUCCESS,
88 PARSE_OPERAND_FAIL,
89 PARSE_OPERAND_FAIL_NO_BACKTRACK
90 } parse_operand_result;
91
92 enum arm_float_abi
93 {
94 ARM_FLOAT_ABI_HARD,
95 ARM_FLOAT_ABI_SOFTFP,
96 ARM_FLOAT_ABI_SOFT
97 };
98
99 /* Types of processor to assemble for. */
100 #ifndef CPU_DEFAULT
101 /* The code that was here used to select a default CPU depending on compiler
102 pre-defines which were only present when doing native builds, thus
103 changing gas' default behaviour depending upon the build host.
104
105 If you have a target that requires a default CPU option then the you
106 should define CPU_DEFAULT here. */
107 #endif
108
109 #ifndef FPU_DEFAULT
110 # ifdef TE_LINUX
111 # define FPU_DEFAULT FPU_ARCH_FPA
112 # elif defined (TE_NetBSD)
113 # ifdef OBJ_ELF
114 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
115 # else
116 /* Legacy a.out format. */
117 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
118 # endif
119 # elif defined (TE_VXWORKS)
120 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
121 # else
122 /* For backwards compatibility, default to FPA. */
123 # define FPU_DEFAULT FPU_ARCH_FPA
124 # endif
125 #endif /* ifndef FPU_DEFAULT */
126
127 #define streq(a, b) (strcmp (a, b) == 0)
128
129 /* Current set of feature bits available (CPU+FPU). Different from
130 selected_cpu + selected_fpu in case of autodetection since the CPU
131 feature bits are then all set. */
132 static arm_feature_set cpu_variant;
133 /* Feature bits used in each execution state. Used to set build attribute
134 (in particular Tag_*_ISA_use) in CPU autodetection mode. */
135 static arm_feature_set arm_arch_used;
136 static arm_feature_set thumb_arch_used;
137
138 /* Flags stored in private area of BFD structure. */
139 static int uses_apcs_26 = FALSE;
140 static int atpcs = FALSE;
141 static int support_interwork = FALSE;
142 static int uses_apcs_float = FALSE;
143 static int pic_code = FALSE;
144 static int fix_v4bx = FALSE;
145 /* Warn on using deprecated features. */
146 static int warn_on_deprecated = TRUE;
147
148 /* Understand CodeComposer Studio assembly syntax. */
149 bfd_boolean codecomposer_syntax = FALSE;
150
151 /* Variables that we set while parsing command-line options. Once all
152 options have been read we re-process these values to set the real
153 assembly flags. */
154
155 /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
156 instead of -mcpu=arm1). */
157 static const arm_feature_set *legacy_cpu = NULL;
158 static const arm_feature_set *legacy_fpu = NULL;
159
160 /* CPU, extension and FPU feature bits selected by -mcpu. */
161 static const arm_feature_set *mcpu_cpu_opt = NULL;
162 static arm_feature_set *mcpu_ext_opt = NULL;
163 static const arm_feature_set *mcpu_fpu_opt = NULL;
164
165 /* CPU, extension and FPU feature bits selected by -march. */
166 static const arm_feature_set *march_cpu_opt = NULL;
167 static arm_feature_set *march_ext_opt = NULL;
168 static const arm_feature_set *march_fpu_opt = NULL;
169
170 /* Feature bits selected by -mfpu. */
171 static const arm_feature_set *mfpu_opt = NULL;
172
173 /* Constants for known architecture features. */
174 static const arm_feature_set fpu_default = FPU_DEFAULT;
175 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
176 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
177 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
178 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
179 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
180 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
181 #ifdef OBJ_ELF
182 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
183 #endif
184 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
185
186 #ifdef CPU_DEFAULT
187 static const arm_feature_set cpu_default = CPU_DEFAULT;
188 #endif
189
190 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
191 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V2);
192 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
193 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
194 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
195 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
196 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
197 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
198 static const arm_feature_set arm_ext_v4t_5 =
199 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
200 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
201 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
202 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
203 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
204 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
205 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
206 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
207 static const arm_feature_set arm_ext_v6_notm =
208 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
209 static const arm_feature_set arm_ext_v6_dsp =
210 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
211 static const arm_feature_set arm_ext_barrier =
212 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
213 static const arm_feature_set arm_ext_msr =
214 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
215 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
216 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
217 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
218 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
219 #ifdef OBJ_ELF
220 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
221 #endif
222 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
223 static const arm_feature_set arm_ext_m =
224 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_V7M,
225 ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
226 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
227 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
228 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
229 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
230 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
231 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
232 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
233 static const arm_feature_set arm_ext_v8m_main =
234 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
235 /* Instructions in ARMv8-M only found in M profile architectures. */
236 static const arm_feature_set arm_ext_v8m_m_only =
237 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
238 static const arm_feature_set arm_ext_v6t2_v8m =
239 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
240 /* Instructions shared between ARMv8-A and ARMv8-M. */
241 static const arm_feature_set arm_ext_atomics =
242 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
243 #ifdef OBJ_ELF
244 /* DSP instructions Tag_DSP_extension refers to. */
245 static const arm_feature_set arm_ext_dsp =
246 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
247 #endif
248 static const arm_feature_set arm_ext_ras =
249 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
250 /* FP16 instructions. */
251 static const arm_feature_set arm_ext_fp16 =
252 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
253 static const arm_feature_set arm_ext_fp16_fml =
254 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML);
255 static const arm_feature_set arm_ext_v8_2 =
256 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A);
257 static const arm_feature_set arm_ext_v8_3 =
258 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A);
259 static const arm_feature_set arm_ext_sb =
260 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB);
261
262 static const arm_feature_set arm_arch_any = ARM_ANY;
263 #ifdef OBJ_ELF
264 static const arm_feature_set fpu_any = FPU_ANY;
265 #endif
266 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
267 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
268 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
269
270 static const arm_feature_set arm_cext_iwmmxt2 =
271 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
272 static const arm_feature_set arm_cext_iwmmxt =
273 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
274 static const arm_feature_set arm_cext_xscale =
275 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
276 static const arm_feature_set arm_cext_maverick =
277 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
278 static const arm_feature_set fpu_fpa_ext_v1 =
279 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
280 static const arm_feature_set fpu_fpa_ext_v2 =
281 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
282 static const arm_feature_set fpu_vfp_ext_v1xd =
283 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
284 static const arm_feature_set fpu_vfp_ext_v1 =
285 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
286 static const arm_feature_set fpu_vfp_ext_v2 =
287 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
288 static const arm_feature_set fpu_vfp_ext_v3xd =
289 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
290 static const arm_feature_set fpu_vfp_ext_v3 =
291 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
292 static const arm_feature_set fpu_vfp_ext_d32 =
293 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
294 static const arm_feature_set fpu_neon_ext_v1 =
295 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
296 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
297 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
298 #ifdef OBJ_ELF
299 static const arm_feature_set fpu_vfp_fp16 =
300 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
301 static const arm_feature_set fpu_neon_ext_fma =
302 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
303 #endif
304 static const arm_feature_set fpu_vfp_ext_fma =
305 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
306 static const arm_feature_set fpu_vfp_ext_armv8 =
307 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
308 static const arm_feature_set fpu_vfp_ext_armv8xd =
309 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
310 static const arm_feature_set fpu_neon_ext_armv8 =
311 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
312 static const arm_feature_set fpu_crypto_ext_armv8 =
313 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
314 static const arm_feature_set crc_ext_armv8 =
315 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
316 static const arm_feature_set fpu_neon_ext_v8_1 =
317 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
318 static const arm_feature_set fpu_neon_ext_dotprod =
319 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD);
320
321 static int mfloat_abi_opt = -1;
322 /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
323 directive. */
324 static arm_feature_set selected_arch = ARM_ARCH_NONE;
325 /* Extension feature bits selected by the last -mcpu/-march or .arch_extension
326 directive. */
327 static arm_feature_set selected_ext = ARM_ARCH_NONE;
328 /* Feature bits selected by the last -mcpu/-march or by the combination of the
329 last .cpu/.arch directive .arch_extension directives since that
330 directive. */
331 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
332 /* FPU feature bits selected by the last -mfpu or .fpu directive. */
333 static arm_feature_set selected_fpu = FPU_NONE;
334 /* Feature bits selected by the last .object_arch directive. */
335 static arm_feature_set selected_object_arch = ARM_ARCH_NONE;
336 /* Must be long enough to hold any of the names in arm_cpus. */
337 static char selected_cpu_name[20];
338
339 extern FLONUM_TYPE generic_floating_point_number;
340
341 /* Return if no cpu was selected on command-line. */
342 static bfd_boolean
343 no_cpu_selected (void)
344 {
345 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
346 }
347
348 #ifdef OBJ_ELF
349 # ifdef EABI_DEFAULT
350 static int meabi_flags = EABI_DEFAULT;
351 # else
352 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
353 # endif
354
355 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
356
357 bfd_boolean
358 arm_is_eabi (void)
359 {
360 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
361 }
362 #endif
363
364 #ifdef OBJ_ELF
365 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
366 symbolS * GOT_symbol;
367 #endif
368
369 /* 0: assemble for ARM,
370 1: assemble for Thumb,
371 2: assemble for Thumb even though target CPU does not support thumb
372 instructions. */
373 static int thumb_mode = 0;
374 /* A value distinct from the possible values for thumb_mode that we
375 can use to record whether thumb_mode has been copied into the
376 tc_frag_data field of a frag. */
377 #define MODE_RECORDED (1 << 4)
378
379 /* Specifies the intrinsic IT insn behavior mode. */
380 enum implicit_it_mode
381 {
382 IMPLICIT_IT_MODE_NEVER = 0x00,
383 IMPLICIT_IT_MODE_ARM = 0x01,
384 IMPLICIT_IT_MODE_THUMB = 0x02,
385 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
386 };
387 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
388
389 /* If unified_syntax is true, we are processing the new unified
390 ARM/Thumb syntax. Important differences from the old ARM mode:
391
392 - Immediate operands do not require a # prefix.
393 - Conditional affixes always appear at the end of the
394 instruction. (For backward compatibility, those instructions
395 that formerly had them in the middle, continue to accept them
396 there.)
397 - The IT instruction may appear, and if it does is validated
398 against subsequent conditional affixes. It does not generate
399 machine code.
400
401 Important differences from the old Thumb mode:
402
403 - Immediate operands do not require a # prefix.
404 - Most of the V6T2 instructions are only available in unified mode.
405 - The .N and .W suffixes are recognized and honored (it is an error
406 if they cannot be honored).
407 - All instructions set the flags if and only if they have an 's' affix.
408 - Conditional affixes may be used. They are validated against
409 preceding IT instructions. Unlike ARM mode, you cannot use a
410 conditional affix except in the scope of an IT instruction. */
411
412 static bfd_boolean unified_syntax = FALSE;
413
414 /* An immediate operand can start with #, and ld*, st*, pld operands
415 can contain [ and ]. We need to tell APP not to elide whitespace
416 before a [, which can appear as the first operand for pld.
417 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
418 const char arm_symbol_chars[] = "#[]{}";
419
420 enum neon_el_type
421 {
422 NT_invtype,
423 NT_untyped,
424 NT_integer,
425 NT_float,
426 NT_poly,
427 NT_signed,
428 NT_unsigned
429 };
430
431 struct neon_type_el
432 {
433 enum neon_el_type type;
434 unsigned size;
435 };
436
437 #define NEON_MAX_TYPE_ELS 4
438
439 struct neon_type
440 {
441 struct neon_type_el el[NEON_MAX_TYPE_ELS];
442 unsigned elems;
443 };
444
445 enum it_instruction_type
446 {
447 OUTSIDE_IT_INSN,
448 INSIDE_IT_INSN,
449 INSIDE_IT_LAST_INSN,
450 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
451 if inside, should be the last one. */
452 NEUTRAL_IT_INSN, /* This could be either inside or outside,
453 i.e. BKPT and NOP. */
454 IT_INSN /* The IT insn has been parsed. */
455 };
456
457 /* The maximum number of operands we need. */
458 #define ARM_IT_MAX_OPERANDS 6
459
460 struct arm_it
461 {
462 const char * error;
463 unsigned long instruction;
464 int size;
465 int size_req;
466 int cond;
467 /* "uncond_value" is set to the value in place of the conditional field in
468 unconditional versions of the instruction, or -1 if nothing is
469 appropriate. */
470 int uncond_value;
471 struct neon_type vectype;
472 /* This does not indicate an actual NEON instruction, only that
473 the mnemonic accepts neon-style type suffixes. */
474 int is_neon;
475 /* Set to the opcode if the instruction needs relaxation.
476 Zero if the instruction is not relaxed. */
477 unsigned long relax;
478 struct
479 {
480 bfd_reloc_code_real_type type;
481 expressionS exp;
482 int pc_rel;
483 } reloc;
484
485 enum it_instruction_type it_insn_type;
486
487 struct
488 {
489 unsigned reg;
490 signed int imm;
491 struct neon_type_el vectype;
492 unsigned present : 1; /* Operand present. */
493 unsigned isreg : 1; /* Operand was a register. */
494 unsigned immisreg : 1; /* .imm field is a second register. */
495 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
496 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
497 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
498 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
499 instructions. This allows us to disambiguate ARM <-> vector insns. */
500 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
501 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
502 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
503 unsigned issingle : 1; /* Operand is VFP single-precision register. */
504 unsigned hasreloc : 1; /* Operand has relocation suffix. */
505 unsigned writeback : 1; /* Operand has trailing ! */
506 unsigned preind : 1; /* Preindexed address. */
507 unsigned postind : 1; /* Postindexed address. */
508 unsigned negative : 1; /* Index register was negated. */
509 unsigned shifted : 1; /* Shift applied to operation. */
510 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
511 } operands[ARM_IT_MAX_OPERANDS];
512 };
513
514 static struct arm_it inst;
515
516 #define NUM_FLOAT_VALS 8
517
518 const char * fp_const[] =
519 {
520 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
521 };
522
523 /* Number of littlenums required to hold an extended precision number. */
524 #define MAX_LITTLENUMS 6
525
526 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
527
528 #define FAIL (-1)
529 #define SUCCESS (0)
530
531 #define SUFF_S 1
532 #define SUFF_D 2
533 #define SUFF_E 3
534 #define SUFF_P 4
535
536 #define CP_T_X 0x00008000
537 #define CP_T_Y 0x00400000
538
539 #define CONDS_BIT 0x00100000
540 #define LOAD_BIT 0x00100000
541
542 #define DOUBLE_LOAD_FLAG 0x00000001
543
544 struct asm_cond
545 {
546 const char * template_name;
547 unsigned long value;
548 };
549
550 #define COND_ALWAYS 0xE
551
552 struct asm_psr
553 {
554 const char * template_name;
555 unsigned long field;
556 };
557
558 struct asm_barrier_opt
559 {
560 const char * template_name;
561 unsigned long value;
562 const arm_feature_set arch;
563 };
564
565 /* The bit that distinguishes CPSR and SPSR. */
566 #define SPSR_BIT (1 << 22)
567
568 /* The individual PSR flag bits. */
569 #define PSR_c (1 << 16)
570 #define PSR_x (1 << 17)
571 #define PSR_s (1 << 18)
572 #define PSR_f (1 << 19)
573
574 struct reloc_entry
575 {
576 const char * name;
577 bfd_reloc_code_real_type reloc;
578 };
579
580 enum vfp_reg_pos
581 {
582 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
583 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
584 };
585
586 enum vfp_ldstm_type
587 {
588 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
589 };
590
591 /* Bits for DEFINED field in neon_typed_alias. */
592 #define NTA_HASTYPE 1
593 #define NTA_HASINDEX 2
594
595 struct neon_typed_alias
596 {
597 unsigned char defined;
598 unsigned char index;
599 struct neon_type_el eltype;
600 };
601
602 /* ARM register categories. This includes coprocessor numbers and various
603 architecture extensions' registers. Each entry should have an error message
604 in reg_expected_msgs below. */
605 enum arm_reg_type
606 {
607 REG_TYPE_RN,
608 REG_TYPE_CP,
609 REG_TYPE_CN,
610 REG_TYPE_FN,
611 REG_TYPE_VFS,
612 REG_TYPE_VFD,
613 REG_TYPE_NQ,
614 REG_TYPE_VFSD,
615 REG_TYPE_NDQ,
616 REG_TYPE_NSD,
617 REG_TYPE_NSDQ,
618 REG_TYPE_VFC,
619 REG_TYPE_MVF,
620 REG_TYPE_MVD,
621 REG_TYPE_MVFX,
622 REG_TYPE_MVDX,
623 REG_TYPE_MVAX,
624 REG_TYPE_DSPSC,
625 REG_TYPE_MMXWR,
626 REG_TYPE_MMXWC,
627 REG_TYPE_MMXWCG,
628 REG_TYPE_XSCALE,
629 REG_TYPE_RNB
630 };
631
632 /* Structure for a hash table entry for a register.
633 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
634 information which states whether a vector type or index is specified (for a
635 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
636 struct reg_entry
637 {
638 const char * name;
639 unsigned int number;
640 unsigned char type;
641 unsigned char builtin;
642 struct neon_typed_alias * neon;
643 };
644
645 /* Diagnostics used when we don't get a register of the expected type. */
646 const char * const reg_expected_msgs[] =
647 {
648 [REG_TYPE_RN] = N_("ARM register expected"),
649 [REG_TYPE_CP] = N_("bad or missing co-processor number"),
650 [REG_TYPE_CN] = N_("co-processor register expected"),
651 [REG_TYPE_FN] = N_("FPA register expected"),
652 [REG_TYPE_VFS] = N_("VFP single precision register expected"),
653 [REG_TYPE_VFD] = N_("VFP/Neon double precision register expected"),
654 [REG_TYPE_NQ] = N_("Neon quad precision register expected"),
655 [REG_TYPE_VFSD] = N_("VFP single or double precision register expected"),
656 [REG_TYPE_NDQ] = N_("Neon double or quad precision register expected"),
657 [REG_TYPE_NSD] = N_("Neon single or double precision register expected"),
658 [REG_TYPE_NSDQ] = N_("VFP single, double or Neon quad precision register"
659 " expected"),
660 [REG_TYPE_VFC] = N_("VFP system register expected"),
661 [REG_TYPE_MVF] = N_("Maverick MVF register expected"),
662 [REG_TYPE_MVD] = N_("Maverick MVD register expected"),
663 [REG_TYPE_MVFX] = N_("Maverick MVFX register expected"),
664 [REG_TYPE_MVDX] = N_("Maverick MVDX register expected"),
665 [REG_TYPE_MVAX] = N_("Maverick MVAX register expected"),
666 [REG_TYPE_DSPSC] = N_("Maverick DSPSC register expected"),
667 [REG_TYPE_MMXWR] = N_("iWMMXt data register expected"),
668 [REG_TYPE_MMXWC] = N_("iWMMXt control register expected"),
669 [REG_TYPE_MMXWCG] = N_("iWMMXt scalar register expected"),
670 [REG_TYPE_XSCALE] = N_("XScale accumulator register expected"),
671 [REG_TYPE_RNB] = N_("")
672 };
673
674 /* Some well known registers that we refer to directly elsewhere. */
675 #define REG_R12 12
676 #define REG_SP 13
677 #define REG_LR 14
678 #define REG_PC 15
679
680 /* ARM instructions take 4bytes in the object file, Thumb instructions
681 take 2: */
682 #define INSN_SIZE 4
683
684 struct asm_opcode
685 {
686 /* Basic string to match. */
687 const char * template_name;
688
689 /* Parameters to instruction. */
690 unsigned int operands[8];
691
692 /* Conditional tag - see opcode_lookup. */
693 unsigned int tag : 4;
694
695 /* Basic instruction code. */
696 unsigned int avalue : 28;
697
698 /* Thumb-format instruction code. */
699 unsigned int tvalue;
700
701 /* Which architecture variant provides this instruction. */
702 const arm_feature_set * avariant;
703 const arm_feature_set * tvariant;
704
705 /* Function to call to encode instruction in ARM format. */
706 void (* aencode) (void);
707
708 /* Function to call to encode instruction in Thumb format. */
709 void (* tencode) (void);
710 };
711
712 /* Defines for various bits that we will want to toggle. */
713 #define INST_IMMEDIATE 0x02000000
714 #define OFFSET_REG 0x02000000
715 #define HWOFFSET_IMM 0x00400000
716 #define SHIFT_BY_REG 0x00000010
717 #define PRE_INDEX 0x01000000
718 #define INDEX_UP 0x00800000
719 #define WRITE_BACK 0x00200000
720 #define LDM_TYPE_2_OR_3 0x00400000
721 #define CPSI_MMOD 0x00020000
722
723 #define LITERAL_MASK 0xf000f000
724 #define OPCODE_MASK 0xfe1fffff
725 #define V4_STR_BIT 0x00000020
726 #define VLDR_VMOV_SAME 0x0040f000
727
728 #define T2_SUBS_PC_LR 0xf3de8f00
729
730 #define DATA_OP_SHIFT 21
731 #define SBIT_SHIFT 20
732
733 #define T2_OPCODE_MASK 0xfe1fffff
734 #define T2_DATA_OP_SHIFT 21
735 #define T2_SBIT_SHIFT 20
736
737 #define A_COND_MASK 0xf0000000
738 #define A_PUSH_POP_OP_MASK 0x0fff0000
739
740 /* Opcodes for pushing/poping registers to/from the stack. */
741 #define A1_OPCODE_PUSH 0x092d0000
742 #define A2_OPCODE_PUSH 0x052d0004
743 #define A2_OPCODE_POP 0x049d0004
744
745 /* Codes to distinguish the arithmetic instructions. */
746 #define OPCODE_AND 0
747 #define OPCODE_EOR 1
748 #define OPCODE_SUB 2
749 #define OPCODE_RSB 3
750 #define OPCODE_ADD 4
751 #define OPCODE_ADC 5
752 #define OPCODE_SBC 6
753 #define OPCODE_RSC 7
754 #define OPCODE_TST 8
755 #define OPCODE_TEQ 9
756 #define OPCODE_CMP 10
757 #define OPCODE_CMN 11
758 #define OPCODE_ORR 12
759 #define OPCODE_MOV 13
760 #define OPCODE_BIC 14
761 #define OPCODE_MVN 15
762
763 #define T2_OPCODE_AND 0
764 #define T2_OPCODE_BIC 1
765 #define T2_OPCODE_ORR 2
766 #define T2_OPCODE_ORN 3
767 #define T2_OPCODE_EOR 4
768 #define T2_OPCODE_ADD 8
769 #define T2_OPCODE_ADC 10
770 #define T2_OPCODE_SBC 11
771 #define T2_OPCODE_SUB 13
772 #define T2_OPCODE_RSB 14
773
774 #define T_OPCODE_MUL 0x4340
775 #define T_OPCODE_TST 0x4200
776 #define T_OPCODE_CMN 0x42c0
777 #define T_OPCODE_NEG 0x4240
778 #define T_OPCODE_MVN 0x43c0
779
780 #define T_OPCODE_ADD_R3 0x1800
781 #define T_OPCODE_SUB_R3 0x1a00
782 #define T_OPCODE_ADD_HI 0x4400
783 #define T_OPCODE_ADD_ST 0xb000
784 #define T_OPCODE_SUB_ST 0xb080
785 #define T_OPCODE_ADD_SP 0xa800
786 #define T_OPCODE_ADD_PC 0xa000
787 #define T_OPCODE_ADD_I8 0x3000
788 #define T_OPCODE_SUB_I8 0x3800
789 #define T_OPCODE_ADD_I3 0x1c00
790 #define T_OPCODE_SUB_I3 0x1e00
791
792 #define T_OPCODE_ASR_R 0x4100
793 #define T_OPCODE_LSL_R 0x4080
794 #define T_OPCODE_LSR_R 0x40c0
795 #define T_OPCODE_ROR_R 0x41c0
796 #define T_OPCODE_ASR_I 0x1000
797 #define T_OPCODE_LSL_I 0x0000
798 #define T_OPCODE_LSR_I 0x0800
799
800 #define T_OPCODE_MOV_I8 0x2000
801 #define T_OPCODE_CMP_I8 0x2800
802 #define T_OPCODE_CMP_LR 0x4280
803 #define T_OPCODE_MOV_HR 0x4600
804 #define T_OPCODE_CMP_HR 0x4500
805
806 #define T_OPCODE_LDR_PC 0x4800
807 #define T_OPCODE_LDR_SP 0x9800
808 #define T_OPCODE_STR_SP 0x9000
809 #define T_OPCODE_LDR_IW 0x6800
810 #define T_OPCODE_STR_IW 0x6000
811 #define T_OPCODE_LDR_IH 0x8800
812 #define T_OPCODE_STR_IH 0x8000
813 #define T_OPCODE_LDR_IB 0x7800
814 #define T_OPCODE_STR_IB 0x7000
815 #define T_OPCODE_LDR_RW 0x5800
816 #define T_OPCODE_STR_RW 0x5000
817 #define T_OPCODE_LDR_RH 0x5a00
818 #define T_OPCODE_STR_RH 0x5200
819 #define T_OPCODE_LDR_RB 0x5c00
820 #define T_OPCODE_STR_RB 0x5400
821
822 #define T_OPCODE_PUSH 0xb400
823 #define T_OPCODE_POP 0xbc00
824
825 #define T_OPCODE_BRANCH 0xe000
826
827 #define THUMB_SIZE 2 /* Size of thumb instruction. */
828 #define THUMB_PP_PC_LR 0x0100
829 #define THUMB_LOAD_BIT 0x0800
830 #define THUMB2_LOAD_BIT 0x00100000
831
832 #define BAD_ARGS _("bad arguments to instruction")
833 #define BAD_SP _("r13 not allowed here")
834 #define BAD_PC _("r15 not allowed here")
835 #define BAD_COND _("instruction cannot be conditional")
836 #define BAD_OVERLAP _("registers may not be the same")
837 #define BAD_HIREG _("lo register required")
838 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
839 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
840 #define BAD_BRANCH _("branch must be last instruction in IT block")
841 #define BAD_NOT_IT _("instruction not allowed in IT block")
842 #define BAD_FPU _("selected FPU does not support instruction")
843 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
844 #define BAD_IT_COND _("incorrect condition in IT block")
845 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
846 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
847 #define BAD_PC_ADDRESSING \
848 _("cannot use register index with PC-relative addressing")
849 #define BAD_PC_WRITEBACK \
850 _("cannot use writeback with PC-relative addressing")
851 #define BAD_RANGE _("branch out of range")
852 #define BAD_FP16 _("selected processor does not support fp16 instruction")
853 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
854 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
855
856 static struct hash_control * arm_ops_hsh;
857 static struct hash_control * arm_cond_hsh;
858 static struct hash_control * arm_shift_hsh;
859 static struct hash_control * arm_psr_hsh;
860 static struct hash_control * arm_v7m_psr_hsh;
861 static struct hash_control * arm_reg_hsh;
862 static struct hash_control * arm_reloc_hsh;
863 static struct hash_control * arm_barrier_opt_hsh;
864
865 /* Stuff needed to resolve the label ambiguity
866 As:
867 ...
868 label: <insn>
869 may differ from:
870 ...
871 label:
872 <insn> */
873
874 symbolS * last_label_seen;
875 static int label_is_thumb_function_name = FALSE;
876
877 /* Literal pool structure. Held on a per-section
878 and per-sub-section basis. */
879
880 #define MAX_LITERAL_POOL_SIZE 1024
881 typedef struct literal_pool
882 {
883 expressionS literals [MAX_LITERAL_POOL_SIZE];
884 unsigned int next_free_entry;
885 unsigned int id;
886 symbolS * symbol;
887 segT section;
888 subsegT sub_section;
889 #ifdef OBJ_ELF
890 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
891 #endif
892 struct literal_pool * next;
893 unsigned int alignment;
894 } literal_pool;
895
896 /* Pointer to a linked list of literal pools. */
897 literal_pool * list_of_pools = NULL;
898
899 typedef enum asmfunc_states
900 {
901 OUTSIDE_ASMFUNC,
902 WAITING_ASMFUNC_NAME,
903 WAITING_ENDASMFUNC
904 } asmfunc_states;
905
906 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
907
908 #ifdef OBJ_ELF
909 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
910 #else
911 static struct current_it now_it;
912 #endif
913
914 static inline int
915 now_it_compatible (int cond)
916 {
917 return (cond & ~1) == (now_it.cc & ~1);
918 }
919
920 static inline int
921 conditional_insn (void)
922 {
923 return inst.cond != COND_ALWAYS;
924 }
925
926 static int in_it_block (void);
927
928 static int handle_it_state (void);
929
930 static void force_automatic_it_block_close (void);
931
932 static void it_fsm_post_encode (void);
933
934 #define set_it_insn_type(type) \
935 do \
936 { \
937 inst.it_insn_type = type; \
938 if (handle_it_state () == FAIL) \
939 return; \
940 } \
941 while (0)
942
943 #define set_it_insn_type_nonvoid(type, failret) \
944 do \
945 { \
946 inst.it_insn_type = type; \
947 if (handle_it_state () == FAIL) \
948 return failret; \
949 } \
950 while(0)
951
952 #define set_it_insn_type_last() \
953 do \
954 { \
955 if (inst.cond == COND_ALWAYS) \
956 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
957 else \
958 set_it_insn_type (INSIDE_IT_LAST_INSN); \
959 } \
960 while (0)
961
962 /* Pure syntax. */
963
964 /* This array holds the chars that always start a comment. If the
965 pre-processor is disabled, these aren't very useful. */
966 char arm_comment_chars[] = "@";
967
968 /* This array holds the chars that only start a comment at the beginning of
969 a line. If the line seems to have the form '# 123 filename'
970 .line and .file directives will appear in the pre-processed output. */
971 /* Note that input_file.c hand checks for '#' at the beginning of the
972 first line of the input file. This is because the compiler outputs
973 #NO_APP at the beginning of its output. */
974 /* Also note that comments like this one will always work. */
975 const char line_comment_chars[] = "#";
976
977 char arm_line_separator_chars[] = ";";
978
979 /* Chars that can be used to separate mant
980 from exp in floating point numbers. */
981 const char EXP_CHARS[] = "eE";
982
983 /* Chars that mean this number is a floating point constant. */
984 /* As in 0f12.456 */
985 /* or 0d1.2345e12 */
986
987 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
988
989 /* Prefix characters that indicate the start of an immediate
990 value. */
991 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
992
993 /* Separator character handling. */
994
995 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
996
997 static inline int
998 skip_past_char (char ** str, char c)
999 {
1000 /* PR gas/14987: Allow for whitespace before the expected character. */
1001 skip_whitespace (*str);
1002
1003 if (**str == c)
1004 {
1005 (*str)++;
1006 return SUCCESS;
1007 }
1008 else
1009 return FAIL;
1010 }
1011
1012 #define skip_past_comma(str) skip_past_char (str, ',')
1013
1014 /* Arithmetic expressions (possibly involving symbols). */
1015
1016 /* Return TRUE if anything in the expression is a bignum. */
1017
1018 static bfd_boolean
1019 walk_no_bignums (symbolS * sp)
1020 {
1021 if (symbol_get_value_expression (sp)->X_op == O_big)
1022 return TRUE;
1023
1024 if (symbol_get_value_expression (sp)->X_add_symbol)
1025 {
1026 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
1027 || (symbol_get_value_expression (sp)->X_op_symbol
1028 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
1029 }
1030
1031 return FALSE;
1032 }
1033
1034 static bfd_boolean in_my_get_expression = FALSE;
1035
1036 /* Third argument to my_get_expression. */
1037 #define GE_NO_PREFIX 0
1038 #define GE_IMM_PREFIX 1
1039 #define GE_OPT_PREFIX 2
1040 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1041 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1042 #define GE_OPT_PREFIX_BIG 3
1043
1044 static int
1045 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1046 {
1047 char * save_in;
1048
1049 /* In unified syntax, all prefixes are optional. */
1050 if (unified_syntax)
1051 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1052 : GE_OPT_PREFIX;
1053
1054 switch (prefix_mode)
1055 {
1056 case GE_NO_PREFIX: break;
1057 case GE_IMM_PREFIX:
1058 if (!is_immediate_prefix (**str))
1059 {
1060 inst.error = _("immediate expression requires a # prefix");
1061 return FAIL;
1062 }
1063 (*str)++;
1064 break;
1065 case GE_OPT_PREFIX:
1066 case GE_OPT_PREFIX_BIG:
1067 if (is_immediate_prefix (**str))
1068 (*str)++;
1069 break;
1070 default:
1071 abort ();
1072 }
1073
1074 memset (ep, 0, sizeof (expressionS));
1075
1076 save_in = input_line_pointer;
1077 input_line_pointer = *str;
1078 in_my_get_expression = TRUE;
1079 expression (ep);
1080 in_my_get_expression = FALSE;
1081
1082 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1083 {
1084 /* We found a bad or missing expression in md_operand(). */
1085 *str = input_line_pointer;
1086 input_line_pointer = save_in;
1087 if (inst.error == NULL)
1088 inst.error = (ep->X_op == O_absent
1089 ? _("missing expression") :_("bad expression"));
1090 return 1;
1091 }
1092
1093 /* Get rid of any bignums now, so that we don't generate an error for which
1094 we can't establish a line number later on. Big numbers are never valid
1095 in instructions, which is where this routine is always called. */
1096 if (prefix_mode != GE_OPT_PREFIX_BIG
1097 && (ep->X_op == O_big
1098 || (ep->X_add_symbol
1099 && (walk_no_bignums (ep->X_add_symbol)
1100 || (ep->X_op_symbol
1101 && walk_no_bignums (ep->X_op_symbol))))))
1102 {
1103 inst.error = _("invalid constant");
1104 *str = input_line_pointer;
1105 input_line_pointer = save_in;
1106 return 1;
1107 }
1108
1109 *str = input_line_pointer;
1110 input_line_pointer = save_in;
1111 return SUCCESS;
1112 }
1113
1114 /* Turn a string in input_line_pointer into a floating point constant
1115 of type TYPE, and store the appropriate bytes in *LITP. The number
1116 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1117 returned, or NULL on OK.
1118
1119 Note that fp constants aren't represent in the normal way on the ARM.
1120 In big endian mode, things are as expected. However, in little endian
1121 mode fp constants are big-endian word-wise, and little-endian byte-wise
1122 within the words. For example, (double) 1.1 in big endian mode is
1123 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1124 the byte sequence 99 99 f1 3f 9a 99 99 99.
1125
1126 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1127
1128 const char *
1129 md_atof (int type, char * litP, int * sizeP)
1130 {
1131 int prec;
1132 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1133 char *t;
1134 int i;
1135
1136 switch (type)
1137 {
1138 case 'f':
1139 case 'F':
1140 case 's':
1141 case 'S':
1142 prec = 2;
1143 break;
1144
1145 case 'd':
1146 case 'D':
1147 case 'r':
1148 case 'R':
1149 prec = 4;
1150 break;
1151
1152 case 'x':
1153 case 'X':
1154 prec = 5;
1155 break;
1156
1157 case 'p':
1158 case 'P':
1159 prec = 5;
1160 break;
1161
1162 default:
1163 *sizeP = 0;
1164 return _("Unrecognized or unsupported floating point constant");
1165 }
1166
1167 t = atof_ieee (input_line_pointer, type, words);
1168 if (t)
1169 input_line_pointer = t;
1170 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1171
1172 if (target_big_endian)
1173 {
1174 for (i = 0; i < prec; i++)
1175 {
1176 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1177 litP += sizeof (LITTLENUM_TYPE);
1178 }
1179 }
1180 else
1181 {
1182 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1183 for (i = prec - 1; i >= 0; i--)
1184 {
1185 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1186 litP += sizeof (LITTLENUM_TYPE);
1187 }
1188 else
1189 /* For a 4 byte float the order of elements in `words' is 1 0.
1190 For an 8 byte float the order is 1 0 3 2. */
1191 for (i = 0; i < prec; i += 2)
1192 {
1193 md_number_to_chars (litP, (valueT) words[i + 1],
1194 sizeof (LITTLENUM_TYPE));
1195 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1196 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1197 litP += 2 * sizeof (LITTLENUM_TYPE);
1198 }
1199 }
1200
1201 return NULL;
1202 }
1203
1204 /* We handle all bad expressions here, so that we can report the faulty
1205 instruction in the error message. */
1206
1207 void
1208 md_operand (expressionS * exp)
1209 {
1210 if (in_my_get_expression)
1211 exp->X_op = O_illegal;
1212 }
1213
1214 /* Immediate values. */
1215
1216 #ifdef OBJ_ELF
1217 /* Generic immediate-value read function for use in directives.
1218 Accepts anything that 'expression' can fold to a constant.
1219 *val receives the number. */
1220
1221 static int
1222 immediate_for_directive (int *val)
1223 {
1224 expressionS exp;
1225 exp.X_op = O_illegal;
1226
1227 if (is_immediate_prefix (*input_line_pointer))
1228 {
1229 input_line_pointer++;
1230 expression (&exp);
1231 }
1232
1233 if (exp.X_op != O_constant)
1234 {
1235 as_bad (_("expected #constant"));
1236 ignore_rest_of_line ();
1237 return FAIL;
1238 }
1239 *val = exp.X_add_number;
1240 return SUCCESS;
1241 }
1242 #endif
1243
1244 /* Register parsing. */
1245
1246 /* Generic register parser. CCP points to what should be the
1247 beginning of a register name. If it is indeed a valid register
1248 name, advance CCP over it and return the reg_entry structure;
1249 otherwise return NULL. Does not issue diagnostics. */
1250
1251 static struct reg_entry *
1252 arm_reg_parse_multi (char **ccp)
1253 {
1254 char *start = *ccp;
1255 char *p;
1256 struct reg_entry *reg;
1257
1258 skip_whitespace (start);
1259
1260 #ifdef REGISTER_PREFIX
1261 if (*start != REGISTER_PREFIX)
1262 return NULL;
1263 start++;
1264 #endif
1265 #ifdef OPTIONAL_REGISTER_PREFIX
1266 if (*start == OPTIONAL_REGISTER_PREFIX)
1267 start++;
1268 #endif
1269
1270 p = start;
1271 if (!ISALPHA (*p) || !is_name_beginner (*p))
1272 return NULL;
1273
1274 do
1275 p++;
1276 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1277
1278 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1279
1280 if (!reg)
1281 return NULL;
1282
1283 *ccp = p;
1284 return reg;
1285 }
1286
1287 static int
1288 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1289 enum arm_reg_type type)
1290 {
1291 /* Alternative syntaxes are accepted for a few register classes. */
1292 switch (type)
1293 {
1294 case REG_TYPE_MVF:
1295 case REG_TYPE_MVD:
1296 case REG_TYPE_MVFX:
1297 case REG_TYPE_MVDX:
1298 /* Generic coprocessor register names are allowed for these. */
1299 if (reg && reg->type == REG_TYPE_CN)
1300 return reg->number;
1301 break;
1302
1303 case REG_TYPE_CP:
1304 /* For backward compatibility, a bare number is valid here. */
1305 {
1306 unsigned long processor = strtoul (start, ccp, 10);
1307 if (*ccp != start && processor <= 15)
1308 return processor;
1309 }
1310 /* Fall through. */
1311
1312 case REG_TYPE_MMXWC:
1313 /* WC includes WCG. ??? I'm not sure this is true for all
1314 instructions that take WC registers. */
1315 if (reg && reg->type == REG_TYPE_MMXWCG)
1316 return reg->number;
1317 break;
1318
1319 default:
1320 break;
1321 }
1322
1323 return FAIL;
1324 }
1325
1326 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1327 return value is the register number or FAIL. */
1328
1329 static int
1330 arm_reg_parse (char **ccp, enum arm_reg_type type)
1331 {
1332 char *start = *ccp;
1333 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1334 int ret;
1335
1336 /* Do not allow a scalar (reg+index) to parse as a register. */
1337 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1338 return FAIL;
1339
1340 if (reg && reg->type == type)
1341 return reg->number;
1342
1343 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1344 return ret;
1345
1346 *ccp = start;
1347 return FAIL;
1348 }
1349
1350 /* Parse a Neon type specifier. *STR should point at the leading '.'
1351 character. Does no verification at this stage that the type fits the opcode
1352 properly. E.g.,
1353
1354 .i32.i32.s16
1355 .s32.f32
1356 .u16
1357
1358 Can all be legally parsed by this function.
1359
1360 Fills in neon_type struct pointer with parsed information, and updates STR
1361 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1362 type, FAIL if not. */
1363
1364 static int
1365 parse_neon_type (struct neon_type *type, char **str)
1366 {
1367 char *ptr = *str;
1368
1369 if (type)
1370 type->elems = 0;
1371
1372 while (type->elems < NEON_MAX_TYPE_ELS)
1373 {
1374 enum neon_el_type thistype = NT_untyped;
1375 unsigned thissize = -1u;
1376
1377 if (*ptr != '.')
1378 break;
1379
1380 ptr++;
1381
1382 /* Just a size without an explicit type. */
1383 if (ISDIGIT (*ptr))
1384 goto parsesize;
1385
1386 switch (TOLOWER (*ptr))
1387 {
1388 case 'i': thistype = NT_integer; break;
1389 case 'f': thistype = NT_float; break;
1390 case 'p': thistype = NT_poly; break;
1391 case 's': thistype = NT_signed; break;
1392 case 'u': thistype = NT_unsigned; break;
1393 case 'd':
1394 thistype = NT_float;
1395 thissize = 64;
1396 ptr++;
1397 goto done;
1398 default:
1399 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1400 return FAIL;
1401 }
1402
1403 ptr++;
1404
1405 /* .f is an abbreviation for .f32. */
1406 if (thistype == NT_float && !ISDIGIT (*ptr))
1407 thissize = 32;
1408 else
1409 {
1410 parsesize:
1411 thissize = strtoul (ptr, &ptr, 10);
1412
1413 if (thissize != 8 && thissize != 16 && thissize != 32
1414 && thissize != 64)
1415 {
1416 as_bad (_("bad size %d in type specifier"), thissize);
1417 return FAIL;
1418 }
1419 }
1420
1421 done:
1422 if (type)
1423 {
1424 type->el[type->elems].type = thistype;
1425 type->el[type->elems].size = thissize;
1426 type->elems++;
1427 }
1428 }
1429
1430 /* Empty/missing type is not a successful parse. */
1431 if (type->elems == 0)
1432 return FAIL;
1433
1434 *str = ptr;
1435
1436 return SUCCESS;
1437 }
1438
1439 /* Errors may be set multiple times during parsing or bit encoding
1440 (particularly in the Neon bits), but usually the earliest error which is set
1441 will be the most meaningful. Avoid overwriting it with later (cascading)
1442 errors by calling this function. */
1443
1444 static void
1445 first_error (const char *err)
1446 {
1447 if (!inst.error)
1448 inst.error = err;
1449 }
1450
1451 /* Parse a single type, e.g. ".s32", leading period included. */
1452 static int
1453 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1454 {
1455 char *str = *ccp;
1456 struct neon_type optype;
1457
1458 if (*str == '.')
1459 {
1460 if (parse_neon_type (&optype, &str) == SUCCESS)
1461 {
1462 if (optype.elems == 1)
1463 *vectype = optype.el[0];
1464 else
1465 {
1466 first_error (_("only one type should be specified for operand"));
1467 return FAIL;
1468 }
1469 }
1470 else
1471 {
1472 first_error (_("vector type expected"));
1473 return FAIL;
1474 }
1475 }
1476 else
1477 return FAIL;
1478
1479 *ccp = str;
1480
1481 return SUCCESS;
1482 }
1483
1484 /* Special meanings for indices (which have a range of 0-7), which will fit into
1485 a 4-bit integer. */
1486
1487 #define NEON_ALL_LANES 15
1488 #define NEON_INTERLEAVE_LANES 14
1489
1490 /* Parse either a register or a scalar, with an optional type. Return the
1491 register number, and optionally fill in the actual type of the register
1492 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1493 type/index information in *TYPEINFO. */
1494
1495 static int
1496 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1497 enum arm_reg_type *rtype,
1498 struct neon_typed_alias *typeinfo)
1499 {
1500 char *str = *ccp;
1501 struct reg_entry *reg = arm_reg_parse_multi (&str);
1502 struct neon_typed_alias atype;
1503 struct neon_type_el parsetype;
1504
1505 atype.defined = 0;
1506 atype.index = -1;
1507 atype.eltype.type = NT_invtype;
1508 atype.eltype.size = -1;
1509
1510 /* Try alternate syntax for some types of register. Note these are mutually
1511 exclusive with the Neon syntax extensions. */
1512 if (reg == NULL)
1513 {
1514 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1515 if (altreg != FAIL)
1516 *ccp = str;
1517 if (typeinfo)
1518 *typeinfo = atype;
1519 return altreg;
1520 }
1521
1522 /* Undo polymorphism when a set of register types may be accepted. */
1523 if ((type == REG_TYPE_NDQ
1524 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1525 || (type == REG_TYPE_VFSD
1526 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1527 || (type == REG_TYPE_NSDQ
1528 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1529 || reg->type == REG_TYPE_NQ))
1530 || (type == REG_TYPE_NSD
1531 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1532 || (type == REG_TYPE_MMXWC
1533 && (reg->type == REG_TYPE_MMXWCG)))
1534 type = (enum arm_reg_type) reg->type;
1535
1536 if (type != reg->type)
1537 return FAIL;
1538
1539 if (reg->neon)
1540 atype = *reg->neon;
1541
1542 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1543 {
1544 if ((atype.defined & NTA_HASTYPE) != 0)
1545 {
1546 first_error (_("can't redefine type for operand"));
1547 return FAIL;
1548 }
1549 atype.defined |= NTA_HASTYPE;
1550 atype.eltype = parsetype;
1551 }
1552
1553 if (skip_past_char (&str, '[') == SUCCESS)
1554 {
1555 if (type != REG_TYPE_VFD
1556 && !(type == REG_TYPE_VFS
1557 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_2)))
1558 {
1559 first_error (_("only D registers may be indexed"));
1560 return FAIL;
1561 }
1562
1563 if ((atype.defined & NTA_HASINDEX) != 0)
1564 {
1565 first_error (_("can't change index for operand"));
1566 return FAIL;
1567 }
1568
1569 atype.defined |= NTA_HASINDEX;
1570
1571 if (skip_past_char (&str, ']') == SUCCESS)
1572 atype.index = NEON_ALL_LANES;
1573 else
1574 {
1575 expressionS exp;
1576
1577 my_get_expression (&exp, &str, GE_NO_PREFIX);
1578
1579 if (exp.X_op != O_constant)
1580 {
1581 first_error (_("constant expression required"));
1582 return FAIL;
1583 }
1584
1585 if (skip_past_char (&str, ']') == FAIL)
1586 return FAIL;
1587
1588 atype.index = exp.X_add_number;
1589 }
1590 }
1591
1592 if (typeinfo)
1593 *typeinfo = atype;
1594
1595 if (rtype)
1596 *rtype = type;
1597
1598 *ccp = str;
1599
1600 return reg->number;
1601 }
1602
1603 /* Like arm_reg_parse, but allow allow the following extra features:
1604 - If RTYPE is non-zero, return the (possibly restricted) type of the
1605 register (e.g. Neon double or quad reg when either has been requested).
1606 - If this is a Neon vector type with additional type information, fill
1607 in the struct pointed to by VECTYPE (if non-NULL).
1608 This function will fault on encountering a scalar. */
1609
1610 static int
1611 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1612 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1613 {
1614 struct neon_typed_alias atype;
1615 char *str = *ccp;
1616 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1617
1618 if (reg == FAIL)
1619 return FAIL;
1620
1621 /* Do not allow regname(... to parse as a register. */
1622 if (*str == '(')
1623 return FAIL;
1624
1625 /* Do not allow a scalar (reg+index) to parse as a register. */
1626 if ((atype.defined & NTA_HASINDEX) != 0)
1627 {
1628 first_error (_("register operand expected, but got scalar"));
1629 return FAIL;
1630 }
1631
1632 if (vectype)
1633 *vectype = atype.eltype;
1634
1635 *ccp = str;
1636
1637 return reg;
1638 }
1639
1640 #define NEON_SCALAR_REG(X) ((X) >> 4)
1641 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1642
1643 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1644 have enough information to be able to do a good job bounds-checking. So, we
1645 just do easy checks here, and do further checks later. */
1646
1647 static int
1648 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1649 {
1650 int reg;
1651 char *str = *ccp;
1652 struct neon_typed_alias atype;
1653 enum arm_reg_type reg_type = REG_TYPE_VFD;
1654
1655 if (elsize == 4)
1656 reg_type = REG_TYPE_VFS;
1657
1658 reg = parse_typed_reg_or_scalar (&str, reg_type, NULL, &atype);
1659
1660 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1661 return FAIL;
1662
1663 if (atype.index == NEON_ALL_LANES)
1664 {
1665 first_error (_("scalar must have an index"));
1666 return FAIL;
1667 }
1668 else if (atype.index >= 64 / elsize)
1669 {
1670 first_error (_("scalar index out of range"));
1671 return FAIL;
1672 }
1673
1674 if (type)
1675 *type = atype.eltype;
1676
1677 *ccp = str;
1678
1679 return reg * 16 + atype.index;
1680 }
1681
1682 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1683
1684 static long
1685 parse_reg_list (char ** strp)
1686 {
1687 char * str = * strp;
1688 long range = 0;
1689 int another_range;
1690
1691 /* We come back here if we get ranges concatenated by '+' or '|'. */
1692 do
1693 {
1694 skip_whitespace (str);
1695
1696 another_range = 0;
1697
1698 if (*str == '{')
1699 {
1700 int in_range = 0;
1701 int cur_reg = -1;
1702
1703 str++;
1704 do
1705 {
1706 int reg;
1707
1708 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1709 {
1710 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1711 return FAIL;
1712 }
1713
1714 if (in_range)
1715 {
1716 int i;
1717
1718 if (reg <= cur_reg)
1719 {
1720 first_error (_("bad range in register list"));
1721 return FAIL;
1722 }
1723
1724 for (i = cur_reg + 1; i < reg; i++)
1725 {
1726 if (range & (1 << i))
1727 as_tsktsk
1728 (_("Warning: duplicated register (r%d) in register list"),
1729 i);
1730 else
1731 range |= 1 << i;
1732 }
1733 in_range = 0;
1734 }
1735
1736 if (range & (1 << reg))
1737 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1738 reg);
1739 else if (reg <= cur_reg)
1740 as_tsktsk (_("Warning: register range not in ascending order"));
1741
1742 range |= 1 << reg;
1743 cur_reg = reg;
1744 }
1745 while (skip_past_comma (&str) != FAIL
1746 || (in_range = 1, *str++ == '-'));
1747 str--;
1748
1749 if (skip_past_char (&str, '}') == FAIL)
1750 {
1751 first_error (_("missing `}'"));
1752 return FAIL;
1753 }
1754 }
1755 else
1756 {
1757 expressionS exp;
1758
1759 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1760 return FAIL;
1761
1762 if (exp.X_op == O_constant)
1763 {
1764 if (exp.X_add_number
1765 != (exp.X_add_number & 0x0000ffff))
1766 {
1767 inst.error = _("invalid register mask");
1768 return FAIL;
1769 }
1770
1771 if ((range & exp.X_add_number) != 0)
1772 {
1773 int regno = range & exp.X_add_number;
1774
1775 regno &= -regno;
1776 regno = (1 << regno) - 1;
1777 as_tsktsk
1778 (_("Warning: duplicated register (r%d) in register list"),
1779 regno);
1780 }
1781
1782 range |= exp.X_add_number;
1783 }
1784 else
1785 {
1786 if (inst.reloc.type != 0)
1787 {
1788 inst.error = _("expression too complex");
1789 return FAIL;
1790 }
1791
1792 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1793 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1794 inst.reloc.pc_rel = 0;
1795 }
1796 }
1797
1798 if (*str == '|' || *str == '+')
1799 {
1800 str++;
1801 another_range = 1;
1802 }
1803 }
1804 while (another_range);
1805
1806 *strp = str;
1807 return range;
1808 }
1809
1810 /* Types of registers in a list. */
1811
1812 enum reg_list_els
1813 {
1814 REGLIST_VFP_S,
1815 REGLIST_VFP_D,
1816 REGLIST_NEON_D
1817 };
1818
1819 /* Parse a VFP register list. If the string is invalid return FAIL.
1820 Otherwise return the number of registers, and set PBASE to the first
1821 register. Parses registers of type ETYPE.
1822 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1823 - Q registers can be used to specify pairs of D registers
1824 - { } can be omitted from around a singleton register list
1825 FIXME: This is not implemented, as it would require backtracking in
1826 some cases, e.g.:
1827 vtbl.8 d3,d4,d5
1828 This could be done (the meaning isn't really ambiguous), but doesn't
1829 fit in well with the current parsing framework.
1830 - 32 D registers may be used (also true for VFPv3).
1831 FIXME: Types are ignored in these register lists, which is probably a
1832 bug. */
1833
1834 static int
1835 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1836 {
1837 char *str = *ccp;
1838 int base_reg;
1839 int new_base;
1840 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1841 int max_regs = 0;
1842 int count = 0;
1843 int warned = 0;
1844 unsigned long mask = 0;
1845 int i;
1846
1847 if (skip_past_char (&str, '{') == FAIL)
1848 {
1849 inst.error = _("expecting {");
1850 return FAIL;
1851 }
1852
1853 switch (etype)
1854 {
1855 case REGLIST_VFP_S:
1856 regtype = REG_TYPE_VFS;
1857 max_regs = 32;
1858 break;
1859
1860 case REGLIST_VFP_D:
1861 regtype = REG_TYPE_VFD;
1862 break;
1863
1864 case REGLIST_NEON_D:
1865 regtype = REG_TYPE_NDQ;
1866 break;
1867 }
1868
1869 if (etype != REGLIST_VFP_S)
1870 {
1871 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1872 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1873 {
1874 max_regs = 32;
1875 if (thumb_mode)
1876 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1877 fpu_vfp_ext_d32);
1878 else
1879 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1880 fpu_vfp_ext_d32);
1881 }
1882 else
1883 max_regs = 16;
1884 }
1885
1886 base_reg = max_regs;
1887
1888 do
1889 {
1890 int setmask = 1, addregs = 1;
1891
1892 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1893
1894 if (new_base == FAIL)
1895 {
1896 first_error (_(reg_expected_msgs[regtype]));
1897 return FAIL;
1898 }
1899
1900 if (new_base >= max_regs)
1901 {
1902 first_error (_("register out of range in list"));
1903 return FAIL;
1904 }
1905
1906 /* Note: a value of 2 * n is returned for the register Q<n>. */
1907 if (regtype == REG_TYPE_NQ)
1908 {
1909 setmask = 3;
1910 addregs = 2;
1911 }
1912
1913 if (new_base < base_reg)
1914 base_reg = new_base;
1915
1916 if (mask & (setmask << new_base))
1917 {
1918 first_error (_("invalid register list"));
1919 return FAIL;
1920 }
1921
1922 if ((mask >> new_base) != 0 && ! warned)
1923 {
1924 as_tsktsk (_("register list not in ascending order"));
1925 warned = 1;
1926 }
1927
1928 mask |= setmask << new_base;
1929 count += addregs;
1930
1931 if (*str == '-') /* We have the start of a range expression */
1932 {
1933 int high_range;
1934
1935 str++;
1936
1937 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1938 == FAIL)
1939 {
1940 inst.error = gettext (reg_expected_msgs[regtype]);
1941 return FAIL;
1942 }
1943
1944 if (high_range >= max_regs)
1945 {
1946 first_error (_("register out of range in list"));
1947 return FAIL;
1948 }
1949
1950 if (regtype == REG_TYPE_NQ)
1951 high_range = high_range + 1;
1952
1953 if (high_range <= new_base)
1954 {
1955 inst.error = _("register range not in ascending order");
1956 return FAIL;
1957 }
1958
1959 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1960 {
1961 if (mask & (setmask << new_base))
1962 {
1963 inst.error = _("invalid register list");
1964 return FAIL;
1965 }
1966
1967 mask |= setmask << new_base;
1968 count += addregs;
1969 }
1970 }
1971 }
1972 while (skip_past_comma (&str) != FAIL);
1973
1974 str++;
1975
1976 /* Sanity check -- should have raised a parse error above. */
1977 if (count == 0 || count > max_regs)
1978 abort ();
1979
1980 *pbase = base_reg;
1981
1982 /* Final test -- the registers must be consecutive. */
1983 mask >>= base_reg;
1984 for (i = 0; i < count; i++)
1985 {
1986 if ((mask & (1u << i)) == 0)
1987 {
1988 inst.error = _("non-contiguous register range");
1989 return FAIL;
1990 }
1991 }
1992
1993 *ccp = str;
1994
1995 return count;
1996 }
1997
1998 /* True if two alias types are the same. */
1999
2000 static bfd_boolean
2001 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
2002 {
2003 if (!a && !b)
2004 return TRUE;
2005
2006 if (!a || !b)
2007 return FALSE;
2008
2009 if (a->defined != b->defined)
2010 return FALSE;
2011
2012 if ((a->defined & NTA_HASTYPE) != 0
2013 && (a->eltype.type != b->eltype.type
2014 || a->eltype.size != b->eltype.size))
2015 return FALSE;
2016
2017 if ((a->defined & NTA_HASINDEX) != 0
2018 && (a->index != b->index))
2019 return FALSE;
2020
2021 return TRUE;
2022 }
2023
2024 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2025 The base register is put in *PBASE.
2026 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2027 the return value.
2028 The register stride (minus one) is put in bit 4 of the return value.
2029 Bits [6:5] encode the list length (minus one).
2030 The type of the list elements is put in *ELTYPE, if non-NULL. */
2031
2032 #define NEON_LANE(X) ((X) & 0xf)
2033 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2034 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2035
2036 static int
2037 parse_neon_el_struct_list (char **str, unsigned *pbase,
2038 struct neon_type_el *eltype)
2039 {
2040 char *ptr = *str;
2041 int base_reg = -1;
2042 int reg_incr = -1;
2043 int count = 0;
2044 int lane = -1;
2045 int leading_brace = 0;
2046 enum arm_reg_type rtype = REG_TYPE_NDQ;
2047 const char *const incr_error = _("register stride must be 1 or 2");
2048 const char *const type_error = _("mismatched element/structure types in list");
2049 struct neon_typed_alias firsttype;
2050 firsttype.defined = 0;
2051 firsttype.eltype.type = NT_invtype;
2052 firsttype.eltype.size = -1;
2053 firsttype.index = -1;
2054
2055 if (skip_past_char (&ptr, '{') == SUCCESS)
2056 leading_brace = 1;
2057
2058 do
2059 {
2060 struct neon_typed_alias atype;
2061 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2062
2063 if (getreg == FAIL)
2064 {
2065 first_error (_(reg_expected_msgs[rtype]));
2066 return FAIL;
2067 }
2068
2069 if (base_reg == -1)
2070 {
2071 base_reg = getreg;
2072 if (rtype == REG_TYPE_NQ)
2073 {
2074 reg_incr = 1;
2075 }
2076 firsttype = atype;
2077 }
2078 else if (reg_incr == -1)
2079 {
2080 reg_incr = getreg - base_reg;
2081 if (reg_incr < 1 || reg_incr > 2)
2082 {
2083 first_error (_(incr_error));
2084 return FAIL;
2085 }
2086 }
2087 else if (getreg != base_reg + reg_incr * count)
2088 {
2089 first_error (_(incr_error));
2090 return FAIL;
2091 }
2092
2093 if (! neon_alias_types_same (&atype, &firsttype))
2094 {
2095 first_error (_(type_error));
2096 return FAIL;
2097 }
2098
2099 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2100 modes. */
2101 if (ptr[0] == '-')
2102 {
2103 struct neon_typed_alias htype;
2104 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2105 if (lane == -1)
2106 lane = NEON_INTERLEAVE_LANES;
2107 else if (lane != NEON_INTERLEAVE_LANES)
2108 {
2109 first_error (_(type_error));
2110 return FAIL;
2111 }
2112 if (reg_incr == -1)
2113 reg_incr = 1;
2114 else if (reg_incr != 1)
2115 {
2116 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2117 return FAIL;
2118 }
2119 ptr++;
2120 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2121 if (hireg == FAIL)
2122 {
2123 first_error (_(reg_expected_msgs[rtype]));
2124 return FAIL;
2125 }
2126 if (! neon_alias_types_same (&htype, &firsttype))
2127 {
2128 first_error (_(type_error));
2129 return FAIL;
2130 }
2131 count += hireg + dregs - getreg;
2132 continue;
2133 }
2134
2135 /* If we're using Q registers, we can't use [] or [n] syntax. */
2136 if (rtype == REG_TYPE_NQ)
2137 {
2138 count += 2;
2139 continue;
2140 }
2141
2142 if ((atype.defined & NTA_HASINDEX) != 0)
2143 {
2144 if (lane == -1)
2145 lane = atype.index;
2146 else if (lane != atype.index)
2147 {
2148 first_error (_(type_error));
2149 return FAIL;
2150 }
2151 }
2152 else if (lane == -1)
2153 lane = NEON_INTERLEAVE_LANES;
2154 else if (lane != NEON_INTERLEAVE_LANES)
2155 {
2156 first_error (_(type_error));
2157 return FAIL;
2158 }
2159 count++;
2160 }
2161 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2162
2163 /* No lane set by [x]. We must be interleaving structures. */
2164 if (lane == -1)
2165 lane = NEON_INTERLEAVE_LANES;
2166
2167 /* Sanity check. */
2168 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2169 || (count > 1 && reg_incr == -1))
2170 {
2171 first_error (_("error parsing element/structure list"));
2172 return FAIL;
2173 }
2174
2175 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2176 {
2177 first_error (_("expected }"));
2178 return FAIL;
2179 }
2180
2181 if (reg_incr == -1)
2182 reg_incr = 1;
2183
2184 if (eltype)
2185 *eltype = firsttype.eltype;
2186
2187 *pbase = base_reg;
2188 *str = ptr;
2189
2190 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2191 }
2192
2193 /* Parse an explicit relocation suffix on an expression. This is
2194 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2195 arm_reloc_hsh contains no entries, so this function can only
2196 succeed if there is no () after the word. Returns -1 on error,
2197 BFD_RELOC_UNUSED if there wasn't any suffix. */
2198
2199 static int
2200 parse_reloc (char **str)
2201 {
2202 struct reloc_entry *r;
2203 char *p, *q;
2204
2205 if (**str != '(')
2206 return BFD_RELOC_UNUSED;
2207
2208 p = *str + 1;
2209 q = p;
2210
2211 while (*q && *q != ')' && *q != ',')
2212 q++;
2213 if (*q != ')')
2214 return -1;
2215
2216 if ((r = (struct reloc_entry *)
2217 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2218 return -1;
2219
2220 *str = q + 1;
2221 return r->reloc;
2222 }
2223
2224 /* Directives: register aliases. */
2225
2226 static struct reg_entry *
2227 insert_reg_alias (char *str, unsigned number, int type)
2228 {
2229 struct reg_entry *new_reg;
2230 const char *name;
2231
2232 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2233 {
2234 if (new_reg->builtin)
2235 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2236
2237 /* Only warn about a redefinition if it's not defined as the
2238 same register. */
2239 else if (new_reg->number != number || new_reg->type != type)
2240 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2241
2242 return NULL;
2243 }
2244
2245 name = xstrdup (str);
2246 new_reg = XNEW (struct reg_entry);
2247
2248 new_reg->name = name;
2249 new_reg->number = number;
2250 new_reg->type = type;
2251 new_reg->builtin = FALSE;
2252 new_reg->neon = NULL;
2253
2254 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2255 abort ();
2256
2257 return new_reg;
2258 }
2259
2260 static void
2261 insert_neon_reg_alias (char *str, int number, int type,
2262 struct neon_typed_alias *atype)
2263 {
2264 struct reg_entry *reg = insert_reg_alias (str, number, type);
2265
2266 if (!reg)
2267 {
2268 first_error (_("attempt to redefine typed alias"));
2269 return;
2270 }
2271
2272 if (atype)
2273 {
2274 reg->neon = XNEW (struct neon_typed_alias);
2275 *reg->neon = *atype;
2276 }
2277 }
2278
2279 /* Look for the .req directive. This is of the form:
2280
2281 new_register_name .req existing_register_name
2282
2283 If we find one, or if it looks sufficiently like one that we want to
2284 handle any error here, return TRUE. Otherwise return FALSE. */
2285
2286 static bfd_boolean
2287 create_register_alias (char * newname, char *p)
2288 {
2289 struct reg_entry *old;
2290 char *oldname, *nbuf;
2291 size_t nlen;
2292
2293 /* The input scrubber ensures that whitespace after the mnemonic is
2294 collapsed to single spaces. */
2295 oldname = p;
2296 if (strncmp (oldname, " .req ", 6) != 0)
2297 return FALSE;
2298
2299 oldname += 6;
2300 if (*oldname == '\0')
2301 return FALSE;
2302
2303 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2304 if (!old)
2305 {
2306 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2307 return TRUE;
2308 }
2309
2310 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2311 the desired alias name, and p points to its end. If not, then
2312 the desired alias name is in the global original_case_string. */
2313 #ifdef TC_CASE_SENSITIVE
2314 nlen = p - newname;
2315 #else
2316 newname = original_case_string;
2317 nlen = strlen (newname);
2318 #endif
2319
2320 nbuf = xmemdup0 (newname, nlen);
2321
2322 /* Create aliases under the new name as stated; an all-lowercase
2323 version of the new name; and an all-uppercase version of the new
2324 name. */
2325 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2326 {
2327 for (p = nbuf; *p; p++)
2328 *p = TOUPPER (*p);
2329
2330 if (strncmp (nbuf, newname, nlen))
2331 {
2332 /* If this attempt to create an additional alias fails, do not bother
2333 trying to create the all-lower case alias. We will fail and issue
2334 a second, duplicate error message. This situation arises when the
2335 programmer does something like:
2336 foo .req r0
2337 Foo .req r1
2338 The second .req creates the "Foo" alias but then fails to create
2339 the artificial FOO alias because it has already been created by the
2340 first .req. */
2341 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2342 {
2343 free (nbuf);
2344 return TRUE;
2345 }
2346 }
2347
2348 for (p = nbuf; *p; p++)
2349 *p = TOLOWER (*p);
2350
2351 if (strncmp (nbuf, newname, nlen))
2352 insert_reg_alias (nbuf, old->number, old->type);
2353 }
2354
2355 free (nbuf);
2356 return TRUE;
2357 }
2358
2359 /* Create a Neon typed/indexed register alias using directives, e.g.:
2360 X .dn d5.s32[1]
2361 Y .qn 6.s16
2362 Z .dn d7
2363 T .dn Z[0]
2364 These typed registers can be used instead of the types specified after the
2365 Neon mnemonic, so long as all operands given have types. Types can also be
2366 specified directly, e.g.:
2367 vadd d0.s32, d1.s32, d2.s32 */
2368
2369 static bfd_boolean
2370 create_neon_reg_alias (char *newname, char *p)
2371 {
2372 enum arm_reg_type basetype;
2373 struct reg_entry *basereg;
2374 struct reg_entry mybasereg;
2375 struct neon_type ntype;
2376 struct neon_typed_alias typeinfo;
2377 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2378 int namelen;
2379
2380 typeinfo.defined = 0;
2381 typeinfo.eltype.type = NT_invtype;
2382 typeinfo.eltype.size = -1;
2383 typeinfo.index = -1;
2384
2385 nameend = p;
2386
2387 if (strncmp (p, " .dn ", 5) == 0)
2388 basetype = REG_TYPE_VFD;
2389 else if (strncmp (p, " .qn ", 5) == 0)
2390 basetype = REG_TYPE_NQ;
2391 else
2392 return FALSE;
2393
2394 p += 5;
2395
2396 if (*p == '\0')
2397 return FALSE;
2398
2399 basereg = arm_reg_parse_multi (&p);
2400
2401 if (basereg && basereg->type != basetype)
2402 {
2403 as_bad (_("bad type for register"));
2404 return FALSE;
2405 }
2406
2407 if (basereg == NULL)
2408 {
2409 expressionS exp;
2410 /* Try parsing as an integer. */
2411 my_get_expression (&exp, &p, GE_NO_PREFIX);
2412 if (exp.X_op != O_constant)
2413 {
2414 as_bad (_("expression must be constant"));
2415 return FALSE;
2416 }
2417 basereg = &mybasereg;
2418 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2419 : exp.X_add_number;
2420 basereg->neon = 0;
2421 }
2422
2423 if (basereg->neon)
2424 typeinfo = *basereg->neon;
2425
2426 if (parse_neon_type (&ntype, &p) == SUCCESS)
2427 {
2428 /* We got a type. */
2429 if (typeinfo.defined & NTA_HASTYPE)
2430 {
2431 as_bad (_("can't redefine the type of a register alias"));
2432 return FALSE;
2433 }
2434
2435 typeinfo.defined |= NTA_HASTYPE;
2436 if (ntype.elems != 1)
2437 {
2438 as_bad (_("you must specify a single type only"));
2439 return FALSE;
2440 }
2441 typeinfo.eltype = ntype.el[0];
2442 }
2443
2444 if (skip_past_char (&p, '[') == SUCCESS)
2445 {
2446 expressionS exp;
2447 /* We got a scalar index. */
2448
2449 if (typeinfo.defined & NTA_HASINDEX)
2450 {
2451 as_bad (_("can't redefine the index of a scalar alias"));
2452 return FALSE;
2453 }
2454
2455 my_get_expression (&exp, &p, GE_NO_PREFIX);
2456
2457 if (exp.X_op != O_constant)
2458 {
2459 as_bad (_("scalar index must be constant"));
2460 return FALSE;
2461 }
2462
2463 typeinfo.defined |= NTA_HASINDEX;
2464 typeinfo.index = exp.X_add_number;
2465
2466 if (skip_past_char (&p, ']') == FAIL)
2467 {
2468 as_bad (_("expecting ]"));
2469 return FALSE;
2470 }
2471 }
2472
2473 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2474 the desired alias name, and p points to its end. If not, then
2475 the desired alias name is in the global original_case_string. */
2476 #ifdef TC_CASE_SENSITIVE
2477 namelen = nameend - newname;
2478 #else
2479 newname = original_case_string;
2480 namelen = strlen (newname);
2481 #endif
2482
2483 namebuf = xmemdup0 (newname, namelen);
2484
2485 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2486 typeinfo.defined != 0 ? &typeinfo : NULL);
2487
2488 /* Insert name in all uppercase. */
2489 for (p = namebuf; *p; p++)
2490 *p = TOUPPER (*p);
2491
2492 if (strncmp (namebuf, newname, namelen))
2493 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2494 typeinfo.defined != 0 ? &typeinfo : NULL);
2495
2496 /* Insert name in all lowercase. */
2497 for (p = namebuf; *p; p++)
2498 *p = TOLOWER (*p);
2499
2500 if (strncmp (namebuf, newname, namelen))
2501 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2502 typeinfo.defined != 0 ? &typeinfo : NULL);
2503
2504 free (namebuf);
2505 return TRUE;
2506 }
2507
2508 /* Should never be called, as .req goes between the alias and the
2509 register name, not at the beginning of the line. */
2510
2511 static void
2512 s_req (int a ATTRIBUTE_UNUSED)
2513 {
2514 as_bad (_("invalid syntax for .req directive"));
2515 }
2516
2517 static void
2518 s_dn (int a ATTRIBUTE_UNUSED)
2519 {
2520 as_bad (_("invalid syntax for .dn directive"));
2521 }
2522
2523 static void
2524 s_qn (int a ATTRIBUTE_UNUSED)
2525 {
2526 as_bad (_("invalid syntax for .qn directive"));
2527 }
2528
2529 /* The .unreq directive deletes an alias which was previously defined
2530 by .req. For example:
2531
2532 my_alias .req r11
2533 .unreq my_alias */
2534
2535 static void
2536 s_unreq (int a ATTRIBUTE_UNUSED)
2537 {
2538 char * name;
2539 char saved_char;
2540
2541 name = input_line_pointer;
2542
2543 while (*input_line_pointer != 0
2544 && *input_line_pointer != ' '
2545 && *input_line_pointer != '\n')
2546 ++input_line_pointer;
2547
2548 saved_char = *input_line_pointer;
2549 *input_line_pointer = 0;
2550
2551 if (!*name)
2552 as_bad (_("invalid syntax for .unreq directive"));
2553 else
2554 {
2555 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2556 name);
2557
2558 if (!reg)
2559 as_bad (_("unknown register alias '%s'"), name);
2560 else if (reg->builtin)
2561 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2562 name);
2563 else
2564 {
2565 char * p;
2566 char * nbuf;
2567
2568 hash_delete (arm_reg_hsh, name, FALSE);
2569 free ((char *) reg->name);
2570 if (reg->neon)
2571 free (reg->neon);
2572 free (reg);
2573
2574 /* Also locate the all upper case and all lower case versions.
2575 Do not complain if we cannot find one or the other as it
2576 was probably deleted above. */
2577
2578 nbuf = strdup (name);
2579 for (p = nbuf; *p; p++)
2580 *p = TOUPPER (*p);
2581 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2582 if (reg)
2583 {
2584 hash_delete (arm_reg_hsh, nbuf, FALSE);
2585 free ((char *) reg->name);
2586 if (reg->neon)
2587 free (reg->neon);
2588 free (reg);
2589 }
2590
2591 for (p = nbuf; *p; p++)
2592 *p = TOLOWER (*p);
2593 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2594 if (reg)
2595 {
2596 hash_delete (arm_reg_hsh, nbuf, FALSE);
2597 free ((char *) reg->name);
2598 if (reg->neon)
2599 free (reg->neon);
2600 free (reg);
2601 }
2602
2603 free (nbuf);
2604 }
2605 }
2606
2607 *input_line_pointer = saved_char;
2608 demand_empty_rest_of_line ();
2609 }
2610
2611 /* Directives: Instruction set selection. */
2612
2613 #ifdef OBJ_ELF
2614 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2615 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2616 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2617 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2618
2619 /* Create a new mapping symbol for the transition to STATE. */
2620
2621 static void
2622 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2623 {
2624 symbolS * symbolP;
2625 const char * symname;
2626 int type;
2627
2628 switch (state)
2629 {
2630 case MAP_DATA:
2631 symname = "$d";
2632 type = BSF_NO_FLAGS;
2633 break;
2634 case MAP_ARM:
2635 symname = "$a";
2636 type = BSF_NO_FLAGS;
2637 break;
2638 case MAP_THUMB:
2639 symname = "$t";
2640 type = BSF_NO_FLAGS;
2641 break;
2642 default:
2643 abort ();
2644 }
2645
2646 symbolP = symbol_new (symname, now_seg, value, frag);
2647 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2648
2649 switch (state)
2650 {
2651 case MAP_ARM:
2652 THUMB_SET_FUNC (symbolP, 0);
2653 ARM_SET_THUMB (symbolP, 0);
2654 ARM_SET_INTERWORK (symbolP, support_interwork);
2655 break;
2656
2657 case MAP_THUMB:
2658 THUMB_SET_FUNC (symbolP, 1);
2659 ARM_SET_THUMB (symbolP, 1);
2660 ARM_SET_INTERWORK (symbolP, support_interwork);
2661 break;
2662
2663 case MAP_DATA:
2664 default:
2665 break;
2666 }
2667
2668 /* Save the mapping symbols for future reference. Also check that
2669 we do not place two mapping symbols at the same offset within a
2670 frag. We'll handle overlap between frags in
2671 check_mapping_symbols.
2672
2673 If .fill or other data filling directive generates zero sized data,
2674 the mapping symbol for the following code will have the same value
2675 as the one generated for the data filling directive. In this case,
2676 we replace the old symbol with the new one at the same address. */
2677 if (value == 0)
2678 {
2679 if (frag->tc_frag_data.first_map != NULL)
2680 {
2681 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2682 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2683 }
2684 frag->tc_frag_data.first_map = symbolP;
2685 }
2686 if (frag->tc_frag_data.last_map != NULL)
2687 {
2688 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2689 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2690 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2691 }
2692 frag->tc_frag_data.last_map = symbolP;
2693 }
2694
2695 /* We must sometimes convert a region marked as code to data during
2696 code alignment, if an odd number of bytes have to be padded. The
2697 code mapping symbol is pushed to an aligned address. */
2698
2699 static void
2700 insert_data_mapping_symbol (enum mstate state,
2701 valueT value, fragS *frag, offsetT bytes)
2702 {
2703 /* If there was already a mapping symbol, remove it. */
2704 if (frag->tc_frag_data.last_map != NULL
2705 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2706 {
2707 symbolS *symp = frag->tc_frag_data.last_map;
2708
2709 if (value == 0)
2710 {
2711 know (frag->tc_frag_data.first_map == symp);
2712 frag->tc_frag_data.first_map = NULL;
2713 }
2714 frag->tc_frag_data.last_map = NULL;
2715 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2716 }
2717
2718 make_mapping_symbol (MAP_DATA, value, frag);
2719 make_mapping_symbol (state, value + bytes, frag);
2720 }
2721
2722 static void mapping_state_2 (enum mstate state, int max_chars);
2723
2724 /* Set the mapping state to STATE. Only call this when about to
2725 emit some STATE bytes to the file. */
2726
2727 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2728 void
2729 mapping_state (enum mstate state)
2730 {
2731 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2732
2733 if (mapstate == state)
2734 /* The mapping symbol has already been emitted.
2735 There is nothing else to do. */
2736 return;
2737
2738 if (state == MAP_ARM || state == MAP_THUMB)
2739 /* PR gas/12931
2740 All ARM instructions require 4-byte alignment.
2741 (Almost) all Thumb instructions require 2-byte alignment.
2742
2743 When emitting instructions into any section, mark the section
2744 appropriately.
2745
2746 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2747 but themselves require 2-byte alignment; this applies to some
2748 PC- relative forms. However, these cases will involve implicit
2749 literal pool generation or an explicit .align >=2, both of
2750 which will cause the section to me marked with sufficient
2751 alignment. Thus, we don't handle those cases here. */
2752 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2753
2754 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2755 /* This case will be evaluated later. */
2756 return;
2757
2758 mapping_state_2 (state, 0);
2759 }
2760
2761 /* Same as mapping_state, but MAX_CHARS bytes have already been
2762 allocated. Put the mapping symbol that far back. */
2763
2764 static void
2765 mapping_state_2 (enum mstate state, int max_chars)
2766 {
2767 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2768
2769 if (!SEG_NORMAL (now_seg))
2770 return;
2771
2772 if (mapstate == state)
2773 /* The mapping symbol has already been emitted.
2774 There is nothing else to do. */
2775 return;
2776
2777 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2778 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2779 {
2780 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2781 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2782
2783 if (add_symbol)
2784 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2785 }
2786
2787 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2788 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2789 }
2790 #undef TRANSITION
2791 #else
2792 #define mapping_state(x) ((void)0)
2793 #define mapping_state_2(x, y) ((void)0)
2794 #endif
2795
2796 /* Find the real, Thumb encoded start of a Thumb function. */
2797
2798 #ifdef OBJ_COFF
2799 static symbolS *
2800 find_real_start (symbolS * symbolP)
2801 {
2802 char * real_start;
2803 const char * name = S_GET_NAME (symbolP);
2804 symbolS * new_target;
2805
2806 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2807 #define STUB_NAME ".real_start_of"
2808
2809 if (name == NULL)
2810 abort ();
2811
2812 /* The compiler may generate BL instructions to local labels because
2813 it needs to perform a branch to a far away location. These labels
2814 do not have a corresponding ".real_start_of" label. We check
2815 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2816 the ".real_start_of" convention for nonlocal branches. */
2817 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2818 return symbolP;
2819
2820 real_start = concat (STUB_NAME, name, NULL);
2821 new_target = symbol_find (real_start);
2822 free (real_start);
2823
2824 if (new_target == NULL)
2825 {
2826 as_warn (_("Failed to find real start of function: %s\n"), name);
2827 new_target = symbolP;
2828 }
2829
2830 return new_target;
2831 }
2832 #endif
2833
2834 static void
2835 opcode_select (int width)
2836 {
2837 switch (width)
2838 {
2839 case 16:
2840 if (! thumb_mode)
2841 {
2842 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2843 as_bad (_("selected processor does not support THUMB opcodes"));
2844
2845 thumb_mode = 1;
2846 /* No need to force the alignment, since we will have been
2847 coming from ARM mode, which is word-aligned. */
2848 record_alignment (now_seg, 1);
2849 }
2850 break;
2851
2852 case 32:
2853 if (thumb_mode)
2854 {
2855 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2856 as_bad (_("selected processor does not support ARM opcodes"));
2857
2858 thumb_mode = 0;
2859
2860 if (!need_pass_2)
2861 frag_align (2, 0, 0);
2862
2863 record_alignment (now_seg, 1);
2864 }
2865 break;
2866
2867 default:
2868 as_bad (_("invalid instruction size selected (%d)"), width);
2869 }
2870 }
2871
2872 static void
2873 s_arm (int ignore ATTRIBUTE_UNUSED)
2874 {
2875 opcode_select (32);
2876 demand_empty_rest_of_line ();
2877 }
2878
2879 static void
2880 s_thumb (int ignore ATTRIBUTE_UNUSED)
2881 {
2882 opcode_select (16);
2883 demand_empty_rest_of_line ();
2884 }
2885
2886 static void
2887 s_code (int unused ATTRIBUTE_UNUSED)
2888 {
2889 int temp;
2890
2891 temp = get_absolute_expression ();
2892 switch (temp)
2893 {
2894 case 16:
2895 case 32:
2896 opcode_select (temp);
2897 break;
2898
2899 default:
2900 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2901 }
2902 }
2903
2904 static void
2905 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2906 {
2907 /* If we are not already in thumb mode go into it, EVEN if
2908 the target processor does not support thumb instructions.
2909 This is used by gcc/config/arm/lib1funcs.asm for example
2910 to compile interworking support functions even if the
2911 target processor should not support interworking. */
2912 if (! thumb_mode)
2913 {
2914 thumb_mode = 2;
2915 record_alignment (now_seg, 1);
2916 }
2917
2918 demand_empty_rest_of_line ();
2919 }
2920
2921 static void
2922 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2923 {
2924 s_thumb (0);
2925
2926 /* The following label is the name/address of the start of a Thumb function.
2927 We need to know this for the interworking support. */
2928 label_is_thumb_function_name = TRUE;
2929 }
2930
2931 /* Perform a .set directive, but also mark the alias as
2932 being a thumb function. */
2933
2934 static void
2935 s_thumb_set (int equiv)
2936 {
2937 /* XXX the following is a duplicate of the code for s_set() in read.c
2938 We cannot just call that code as we need to get at the symbol that
2939 is created. */
2940 char * name;
2941 char delim;
2942 char * end_name;
2943 symbolS * symbolP;
2944
2945 /* Especial apologies for the random logic:
2946 This just grew, and could be parsed much more simply!
2947 Dean - in haste. */
2948 delim = get_symbol_name (& name);
2949 end_name = input_line_pointer;
2950 (void) restore_line_pointer (delim);
2951
2952 if (*input_line_pointer != ',')
2953 {
2954 *end_name = 0;
2955 as_bad (_("expected comma after name \"%s\""), name);
2956 *end_name = delim;
2957 ignore_rest_of_line ();
2958 return;
2959 }
2960
2961 input_line_pointer++;
2962 *end_name = 0;
2963
2964 if (name[0] == '.' && name[1] == '\0')
2965 {
2966 /* XXX - this should not happen to .thumb_set. */
2967 abort ();
2968 }
2969
2970 if ((symbolP = symbol_find (name)) == NULL
2971 && (symbolP = md_undefined_symbol (name)) == NULL)
2972 {
2973 #ifndef NO_LISTING
2974 /* When doing symbol listings, play games with dummy fragments living
2975 outside the normal fragment chain to record the file and line info
2976 for this symbol. */
2977 if (listing & LISTING_SYMBOLS)
2978 {
2979 extern struct list_info_struct * listing_tail;
2980 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2981
2982 memset (dummy_frag, 0, sizeof (fragS));
2983 dummy_frag->fr_type = rs_fill;
2984 dummy_frag->line = listing_tail;
2985 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2986 dummy_frag->fr_symbol = symbolP;
2987 }
2988 else
2989 #endif
2990 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2991
2992 #ifdef OBJ_COFF
2993 /* "set" symbols are local unless otherwise specified. */
2994 SF_SET_LOCAL (symbolP);
2995 #endif /* OBJ_COFF */
2996 } /* Make a new symbol. */
2997
2998 symbol_table_insert (symbolP);
2999
3000 * end_name = delim;
3001
3002 if (equiv
3003 && S_IS_DEFINED (symbolP)
3004 && S_GET_SEGMENT (symbolP) != reg_section)
3005 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
3006
3007 pseudo_set (symbolP);
3008
3009 demand_empty_rest_of_line ();
3010
3011 /* XXX Now we come to the Thumb specific bit of code. */
3012
3013 THUMB_SET_FUNC (symbolP, 1);
3014 ARM_SET_THUMB (symbolP, 1);
3015 #if defined OBJ_ELF || defined OBJ_COFF
3016 ARM_SET_INTERWORK (symbolP, support_interwork);
3017 #endif
3018 }
3019
3020 /* Directives: Mode selection. */
3021
3022 /* .syntax [unified|divided] - choose the new unified syntax
3023 (same for Arm and Thumb encoding, modulo slight differences in what
3024 can be represented) or the old divergent syntax for each mode. */
3025 static void
3026 s_syntax (int unused ATTRIBUTE_UNUSED)
3027 {
3028 char *name, delim;
3029
3030 delim = get_symbol_name (& name);
3031
3032 if (!strcasecmp (name, "unified"))
3033 unified_syntax = TRUE;
3034 else if (!strcasecmp (name, "divided"))
3035 unified_syntax = FALSE;
3036 else
3037 {
3038 as_bad (_("unrecognized syntax mode \"%s\""), name);
3039 return;
3040 }
3041 (void) restore_line_pointer (delim);
3042 demand_empty_rest_of_line ();
3043 }
3044
3045 /* Directives: sectioning and alignment. */
3046
3047 static void
3048 s_bss (int ignore ATTRIBUTE_UNUSED)
3049 {
3050 /* We don't support putting frags in the BSS segment, we fake it by
3051 marking in_bss, then looking at s_skip for clues. */
3052 subseg_set (bss_section, 0);
3053 demand_empty_rest_of_line ();
3054
3055 #ifdef md_elf_section_change_hook
3056 md_elf_section_change_hook ();
3057 #endif
3058 }
3059
3060 static void
3061 s_even (int ignore ATTRIBUTE_UNUSED)
3062 {
3063 /* Never make frag if expect extra pass. */
3064 if (!need_pass_2)
3065 frag_align (1, 0, 0);
3066
3067 record_alignment (now_seg, 1);
3068
3069 demand_empty_rest_of_line ();
3070 }
3071
3072 /* Directives: CodeComposer Studio. */
3073
3074 /* .ref (for CodeComposer Studio syntax only). */
3075 static void
3076 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3077 {
3078 if (codecomposer_syntax)
3079 ignore_rest_of_line ();
3080 else
3081 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3082 }
3083
3084 /* If name is not NULL, then it is used for marking the beginning of a
3085 function, whereas if it is NULL then it means the function end. */
3086 static void
3087 asmfunc_debug (const char * name)
3088 {
3089 static const char * last_name = NULL;
3090
3091 if (name != NULL)
3092 {
3093 gas_assert (last_name == NULL);
3094 last_name = name;
3095
3096 if (debug_type == DEBUG_STABS)
3097 stabs_generate_asm_func (name, name);
3098 }
3099 else
3100 {
3101 gas_assert (last_name != NULL);
3102
3103 if (debug_type == DEBUG_STABS)
3104 stabs_generate_asm_endfunc (last_name, last_name);
3105
3106 last_name = NULL;
3107 }
3108 }
3109
3110 static void
3111 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3112 {
3113 if (codecomposer_syntax)
3114 {
3115 switch (asmfunc_state)
3116 {
3117 case OUTSIDE_ASMFUNC:
3118 asmfunc_state = WAITING_ASMFUNC_NAME;
3119 break;
3120
3121 case WAITING_ASMFUNC_NAME:
3122 as_bad (_(".asmfunc repeated."));
3123 break;
3124
3125 case WAITING_ENDASMFUNC:
3126 as_bad (_(".asmfunc without function."));
3127 break;
3128 }
3129 demand_empty_rest_of_line ();
3130 }
3131 else
3132 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3133 }
3134
3135 static void
3136 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3137 {
3138 if (codecomposer_syntax)
3139 {
3140 switch (asmfunc_state)
3141 {
3142 case OUTSIDE_ASMFUNC:
3143 as_bad (_(".endasmfunc without a .asmfunc."));
3144 break;
3145
3146 case WAITING_ASMFUNC_NAME:
3147 as_bad (_(".endasmfunc without function."));
3148 break;
3149
3150 case WAITING_ENDASMFUNC:
3151 asmfunc_state = OUTSIDE_ASMFUNC;
3152 asmfunc_debug (NULL);
3153 break;
3154 }
3155 demand_empty_rest_of_line ();
3156 }
3157 else
3158 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3159 }
3160
3161 static void
3162 s_ccs_def (int name)
3163 {
3164 if (codecomposer_syntax)
3165 s_globl (name);
3166 else
3167 as_bad (_(".def pseudo-op only available with -mccs flag."));
3168 }
3169
3170 /* Directives: Literal pools. */
3171
3172 static literal_pool *
3173 find_literal_pool (void)
3174 {
3175 literal_pool * pool;
3176
3177 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3178 {
3179 if (pool->section == now_seg
3180 && pool->sub_section == now_subseg)
3181 break;
3182 }
3183
3184 return pool;
3185 }
3186
3187 static literal_pool *
3188 find_or_make_literal_pool (void)
3189 {
3190 /* Next literal pool ID number. */
3191 static unsigned int latest_pool_num = 1;
3192 literal_pool * pool;
3193
3194 pool = find_literal_pool ();
3195
3196 if (pool == NULL)
3197 {
3198 /* Create a new pool. */
3199 pool = XNEW (literal_pool);
3200 if (! pool)
3201 return NULL;
3202
3203 pool->next_free_entry = 0;
3204 pool->section = now_seg;
3205 pool->sub_section = now_subseg;
3206 pool->next = list_of_pools;
3207 pool->symbol = NULL;
3208 pool->alignment = 2;
3209
3210 /* Add it to the list. */
3211 list_of_pools = pool;
3212 }
3213
3214 /* New pools, and emptied pools, will have a NULL symbol. */
3215 if (pool->symbol == NULL)
3216 {
3217 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3218 (valueT) 0, &zero_address_frag);
3219 pool->id = latest_pool_num ++;
3220 }
3221
3222 /* Done. */
3223 return pool;
3224 }
3225
3226 /* Add the literal in the global 'inst'
3227 structure to the relevant literal pool. */
3228
3229 static int
3230 add_to_lit_pool (unsigned int nbytes)
3231 {
3232 #define PADDING_SLOT 0x1
3233 #define LIT_ENTRY_SIZE_MASK 0xFF
3234 literal_pool * pool;
3235 unsigned int entry, pool_size = 0;
3236 bfd_boolean padding_slot_p = FALSE;
3237 unsigned imm1 = 0;
3238 unsigned imm2 = 0;
3239
3240 if (nbytes == 8)
3241 {
3242 imm1 = inst.operands[1].imm;
3243 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3244 : inst.reloc.exp.X_unsigned ? 0
3245 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3246 if (target_big_endian)
3247 {
3248 imm1 = imm2;
3249 imm2 = inst.operands[1].imm;
3250 }
3251 }
3252
3253 pool = find_or_make_literal_pool ();
3254
3255 /* Check if this literal value is already in the pool. */
3256 for (entry = 0; entry < pool->next_free_entry; entry ++)
3257 {
3258 if (nbytes == 4)
3259 {
3260 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3261 && (inst.reloc.exp.X_op == O_constant)
3262 && (pool->literals[entry].X_add_number
3263 == inst.reloc.exp.X_add_number)
3264 && (pool->literals[entry].X_md == nbytes)
3265 && (pool->literals[entry].X_unsigned
3266 == inst.reloc.exp.X_unsigned))
3267 break;
3268
3269 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3270 && (inst.reloc.exp.X_op == O_symbol)
3271 && (pool->literals[entry].X_add_number
3272 == inst.reloc.exp.X_add_number)
3273 && (pool->literals[entry].X_add_symbol
3274 == inst.reloc.exp.X_add_symbol)
3275 && (pool->literals[entry].X_op_symbol
3276 == inst.reloc.exp.X_op_symbol)
3277 && (pool->literals[entry].X_md == nbytes))
3278 break;
3279 }
3280 else if ((nbytes == 8)
3281 && !(pool_size & 0x7)
3282 && ((entry + 1) != pool->next_free_entry)
3283 && (pool->literals[entry].X_op == O_constant)
3284 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3285 && (pool->literals[entry].X_unsigned
3286 == inst.reloc.exp.X_unsigned)
3287 && (pool->literals[entry + 1].X_op == O_constant)
3288 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3289 && (pool->literals[entry + 1].X_unsigned
3290 == inst.reloc.exp.X_unsigned))
3291 break;
3292
3293 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3294 if (padding_slot_p && (nbytes == 4))
3295 break;
3296
3297 pool_size += 4;
3298 }
3299
3300 /* Do we need to create a new entry? */
3301 if (entry == pool->next_free_entry)
3302 {
3303 if (entry >= MAX_LITERAL_POOL_SIZE)
3304 {
3305 inst.error = _("literal pool overflow");
3306 return FAIL;
3307 }
3308
3309 if (nbytes == 8)
3310 {
3311 /* For 8-byte entries, we align to an 8-byte boundary,
3312 and split it into two 4-byte entries, because on 32-bit
3313 host, 8-byte constants are treated as big num, thus
3314 saved in "generic_bignum" which will be overwritten
3315 by later assignments.
3316
3317 We also need to make sure there is enough space for
3318 the split.
3319
3320 We also check to make sure the literal operand is a
3321 constant number. */
3322 if (!(inst.reloc.exp.X_op == O_constant
3323 || inst.reloc.exp.X_op == O_big))
3324 {
3325 inst.error = _("invalid type for literal pool");
3326 return FAIL;
3327 }
3328 else if (pool_size & 0x7)
3329 {
3330 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3331 {
3332 inst.error = _("literal pool overflow");
3333 return FAIL;
3334 }
3335
3336 pool->literals[entry] = inst.reloc.exp;
3337 pool->literals[entry].X_op = O_constant;
3338 pool->literals[entry].X_add_number = 0;
3339 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3340 pool->next_free_entry += 1;
3341 pool_size += 4;
3342 }
3343 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3344 {
3345 inst.error = _("literal pool overflow");
3346 return FAIL;
3347 }
3348
3349 pool->literals[entry] = inst.reloc.exp;
3350 pool->literals[entry].X_op = O_constant;
3351 pool->literals[entry].X_add_number = imm1;
3352 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3353 pool->literals[entry++].X_md = 4;
3354 pool->literals[entry] = inst.reloc.exp;
3355 pool->literals[entry].X_op = O_constant;
3356 pool->literals[entry].X_add_number = imm2;
3357 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3358 pool->literals[entry].X_md = 4;
3359 pool->alignment = 3;
3360 pool->next_free_entry += 1;
3361 }
3362 else
3363 {
3364 pool->literals[entry] = inst.reloc.exp;
3365 pool->literals[entry].X_md = 4;
3366 }
3367
3368 #ifdef OBJ_ELF
3369 /* PR ld/12974: Record the location of the first source line to reference
3370 this entry in the literal pool. If it turns out during linking that the
3371 symbol does not exist we will be able to give an accurate line number for
3372 the (first use of the) missing reference. */
3373 if (debug_type == DEBUG_DWARF2)
3374 dwarf2_where (pool->locs + entry);
3375 #endif
3376 pool->next_free_entry += 1;
3377 }
3378 else if (padding_slot_p)
3379 {
3380 pool->literals[entry] = inst.reloc.exp;
3381 pool->literals[entry].X_md = nbytes;
3382 }
3383
3384 inst.reloc.exp.X_op = O_symbol;
3385 inst.reloc.exp.X_add_number = pool_size;
3386 inst.reloc.exp.X_add_symbol = pool->symbol;
3387
3388 return SUCCESS;
3389 }
3390
3391 bfd_boolean
3392 tc_start_label_without_colon (void)
3393 {
3394 bfd_boolean ret = TRUE;
3395
3396 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3397 {
3398 const char *label = input_line_pointer;
3399
3400 while (!is_end_of_line[(int) label[-1]])
3401 --label;
3402
3403 if (*label == '.')
3404 {
3405 as_bad (_("Invalid label '%s'"), label);
3406 ret = FALSE;
3407 }
3408
3409 asmfunc_debug (label);
3410
3411 asmfunc_state = WAITING_ENDASMFUNC;
3412 }
3413
3414 return ret;
3415 }
3416
3417 /* Can't use symbol_new here, so have to create a symbol and then at
3418 a later date assign it a value. That's what these functions do. */
3419
3420 static void
3421 symbol_locate (symbolS * symbolP,
3422 const char * name, /* It is copied, the caller can modify. */
3423 segT segment, /* Segment identifier (SEG_<something>). */
3424 valueT valu, /* Symbol value. */
3425 fragS * frag) /* Associated fragment. */
3426 {
3427 size_t name_length;
3428 char * preserved_copy_of_name;
3429
3430 name_length = strlen (name) + 1; /* +1 for \0. */
3431 obstack_grow (&notes, name, name_length);
3432 preserved_copy_of_name = (char *) obstack_finish (&notes);
3433
3434 #ifdef tc_canonicalize_symbol_name
3435 preserved_copy_of_name =
3436 tc_canonicalize_symbol_name (preserved_copy_of_name);
3437 #endif
3438
3439 S_SET_NAME (symbolP, preserved_copy_of_name);
3440
3441 S_SET_SEGMENT (symbolP, segment);
3442 S_SET_VALUE (symbolP, valu);
3443 symbol_clear_list_pointers (symbolP);
3444
3445 symbol_set_frag (symbolP, frag);
3446
3447 /* Link to end of symbol chain. */
3448 {
3449 extern int symbol_table_frozen;
3450
3451 if (symbol_table_frozen)
3452 abort ();
3453 }
3454
3455 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3456
3457 obj_symbol_new_hook (symbolP);
3458
3459 #ifdef tc_symbol_new_hook
3460 tc_symbol_new_hook (symbolP);
3461 #endif
3462
3463 #ifdef DEBUG_SYMS
3464 verify_symbol_chain (symbol_rootP, symbol_lastP);
3465 #endif /* DEBUG_SYMS */
3466 }
3467
3468 static void
3469 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3470 {
3471 unsigned int entry;
3472 literal_pool * pool;
3473 char sym_name[20];
3474
3475 pool = find_literal_pool ();
3476 if (pool == NULL
3477 || pool->symbol == NULL
3478 || pool->next_free_entry == 0)
3479 return;
3480
3481 /* Align pool as you have word accesses.
3482 Only make a frag if we have to. */
3483 if (!need_pass_2)
3484 frag_align (pool->alignment, 0, 0);
3485
3486 record_alignment (now_seg, 2);
3487
3488 #ifdef OBJ_ELF
3489 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3490 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3491 #endif
3492 sprintf (sym_name, "$$lit_\002%x", pool->id);
3493
3494 symbol_locate (pool->symbol, sym_name, now_seg,
3495 (valueT) frag_now_fix (), frag_now);
3496 symbol_table_insert (pool->symbol);
3497
3498 ARM_SET_THUMB (pool->symbol, thumb_mode);
3499
3500 #if defined OBJ_COFF || defined OBJ_ELF
3501 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3502 #endif
3503
3504 for (entry = 0; entry < pool->next_free_entry; entry ++)
3505 {
3506 #ifdef OBJ_ELF
3507 if (debug_type == DEBUG_DWARF2)
3508 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3509 #endif
3510 /* First output the expression in the instruction to the pool. */
3511 emit_expr (&(pool->literals[entry]),
3512 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3513 }
3514
3515 /* Mark the pool as empty. */
3516 pool->next_free_entry = 0;
3517 pool->symbol = NULL;
3518 }
3519
3520 #ifdef OBJ_ELF
3521 /* Forward declarations for functions below, in the MD interface
3522 section. */
3523 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3524 static valueT create_unwind_entry (int);
3525 static void start_unwind_section (const segT, int);
3526 static void add_unwind_opcode (valueT, int);
3527 static void flush_pending_unwind (void);
3528
3529 /* Directives: Data. */
3530
3531 static void
3532 s_arm_elf_cons (int nbytes)
3533 {
3534 expressionS exp;
3535
3536 #ifdef md_flush_pending_output
3537 md_flush_pending_output ();
3538 #endif
3539
3540 if (is_it_end_of_statement ())
3541 {
3542 demand_empty_rest_of_line ();
3543 return;
3544 }
3545
3546 #ifdef md_cons_align
3547 md_cons_align (nbytes);
3548 #endif
3549
3550 mapping_state (MAP_DATA);
3551 do
3552 {
3553 int reloc;
3554 char *base = input_line_pointer;
3555
3556 expression (& exp);
3557
3558 if (exp.X_op != O_symbol)
3559 emit_expr (&exp, (unsigned int) nbytes);
3560 else
3561 {
3562 char *before_reloc = input_line_pointer;
3563 reloc = parse_reloc (&input_line_pointer);
3564 if (reloc == -1)
3565 {
3566 as_bad (_("unrecognized relocation suffix"));
3567 ignore_rest_of_line ();
3568 return;
3569 }
3570 else if (reloc == BFD_RELOC_UNUSED)
3571 emit_expr (&exp, (unsigned int) nbytes);
3572 else
3573 {
3574 reloc_howto_type *howto = (reloc_howto_type *)
3575 bfd_reloc_type_lookup (stdoutput,
3576 (bfd_reloc_code_real_type) reloc);
3577 int size = bfd_get_reloc_size (howto);
3578
3579 if (reloc == BFD_RELOC_ARM_PLT32)
3580 {
3581 as_bad (_("(plt) is only valid on branch targets"));
3582 reloc = BFD_RELOC_UNUSED;
3583 size = 0;
3584 }
3585
3586 if (size > nbytes)
3587 as_bad (ngettext ("%s relocations do not fit in %d byte",
3588 "%s relocations do not fit in %d bytes",
3589 nbytes),
3590 howto->name, nbytes);
3591 else
3592 {
3593 /* We've parsed an expression stopping at O_symbol.
3594 But there may be more expression left now that we
3595 have parsed the relocation marker. Parse it again.
3596 XXX Surely there is a cleaner way to do this. */
3597 char *p = input_line_pointer;
3598 int offset;
3599 char *save_buf = XNEWVEC (char, input_line_pointer - base);
3600
3601 memcpy (save_buf, base, input_line_pointer - base);
3602 memmove (base + (input_line_pointer - before_reloc),
3603 base, before_reloc - base);
3604
3605 input_line_pointer = base + (input_line_pointer-before_reloc);
3606 expression (&exp);
3607 memcpy (base, save_buf, p - base);
3608
3609 offset = nbytes - size;
3610 p = frag_more (nbytes);
3611 memset (p, 0, nbytes);
3612 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3613 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3614 free (save_buf);
3615 }
3616 }
3617 }
3618 }
3619 while (*input_line_pointer++ == ',');
3620
3621 /* Put terminator back into stream. */
3622 input_line_pointer --;
3623 demand_empty_rest_of_line ();
3624 }
3625
3626 /* Emit an expression containing a 32-bit thumb instruction.
3627 Implementation based on put_thumb32_insn. */
3628
3629 static void
3630 emit_thumb32_expr (expressionS * exp)
3631 {
3632 expressionS exp_high = *exp;
3633
3634 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3635 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3636 exp->X_add_number &= 0xffff;
3637 emit_expr (exp, (unsigned int) THUMB_SIZE);
3638 }
3639
3640 /* Guess the instruction size based on the opcode. */
3641
3642 static int
3643 thumb_insn_size (int opcode)
3644 {
3645 if ((unsigned int) opcode < 0xe800u)
3646 return 2;
3647 else if ((unsigned int) opcode >= 0xe8000000u)
3648 return 4;
3649 else
3650 return 0;
3651 }
3652
3653 static bfd_boolean
3654 emit_insn (expressionS *exp, int nbytes)
3655 {
3656 int size = 0;
3657
3658 if (exp->X_op == O_constant)
3659 {
3660 size = nbytes;
3661
3662 if (size == 0)
3663 size = thumb_insn_size (exp->X_add_number);
3664
3665 if (size != 0)
3666 {
3667 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3668 {
3669 as_bad (_(".inst.n operand too big. "\
3670 "Use .inst.w instead"));
3671 size = 0;
3672 }
3673 else
3674 {
3675 if (now_it.state == AUTOMATIC_IT_BLOCK)
3676 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3677 else
3678 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3679
3680 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3681 emit_thumb32_expr (exp);
3682 else
3683 emit_expr (exp, (unsigned int) size);
3684
3685 it_fsm_post_encode ();
3686 }
3687 }
3688 else
3689 as_bad (_("cannot determine Thumb instruction size. " \
3690 "Use .inst.n/.inst.w instead"));
3691 }
3692 else
3693 as_bad (_("constant expression required"));
3694
3695 return (size != 0);
3696 }
3697
3698 /* Like s_arm_elf_cons but do not use md_cons_align and
3699 set the mapping state to MAP_ARM/MAP_THUMB. */
3700
3701 static void
3702 s_arm_elf_inst (int nbytes)
3703 {
3704 if (is_it_end_of_statement ())
3705 {
3706 demand_empty_rest_of_line ();
3707 return;
3708 }
3709
3710 /* Calling mapping_state () here will not change ARM/THUMB,
3711 but will ensure not to be in DATA state. */
3712
3713 if (thumb_mode)
3714 mapping_state (MAP_THUMB);
3715 else
3716 {
3717 if (nbytes != 0)
3718 {
3719 as_bad (_("width suffixes are invalid in ARM mode"));
3720 ignore_rest_of_line ();
3721 return;
3722 }
3723
3724 nbytes = 4;
3725
3726 mapping_state (MAP_ARM);
3727 }
3728
3729 do
3730 {
3731 expressionS exp;
3732
3733 expression (& exp);
3734
3735 if (! emit_insn (& exp, nbytes))
3736 {
3737 ignore_rest_of_line ();
3738 return;
3739 }
3740 }
3741 while (*input_line_pointer++ == ',');
3742
3743 /* Put terminator back into stream. */
3744 input_line_pointer --;
3745 demand_empty_rest_of_line ();
3746 }
3747
3748 /* Parse a .rel31 directive. */
3749
3750 static void
3751 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3752 {
3753 expressionS exp;
3754 char *p;
3755 valueT highbit;
3756
3757 highbit = 0;
3758 if (*input_line_pointer == '1')
3759 highbit = 0x80000000;
3760 else if (*input_line_pointer != '0')
3761 as_bad (_("expected 0 or 1"));
3762
3763 input_line_pointer++;
3764 if (*input_line_pointer != ',')
3765 as_bad (_("missing comma"));
3766 input_line_pointer++;
3767
3768 #ifdef md_flush_pending_output
3769 md_flush_pending_output ();
3770 #endif
3771
3772 #ifdef md_cons_align
3773 md_cons_align (4);
3774 #endif
3775
3776 mapping_state (MAP_DATA);
3777
3778 expression (&exp);
3779
3780 p = frag_more (4);
3781 md_number_to_chars (p, highbit, 4);
3782 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3783 BFD_RELOC_ARM_PREL31);
3784
3785 demand_empty_rest_of_line ();
3786 }
3787
3788 /* Directives: AEABI stack-unwind tables. */
3789
3790 /* Parse an unwind_fnstart directive. Simply records the current location. */
3791
3792 static void
3793 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3794 {
3795 demand_empty_rest_of_line ();
3796 if (unwind.proc_start)
3797 {
3798 as_bad (_("duplicate .fnstart directive"));
3799 return;
3800 }
3801
3802 /* Mark the start of the function. */
3803 unwind.proc_start = expr_build_dot ();
3804
3805 /* Reset the rest of the unwind info. */
3806 unwind.opcode_count = 0;
3807 unwind.table_entry = NULL;
3808 unwind.personality_routine = NULL;
3809 unwind.personality_index = -1;
3810 unwind.frame_size = 0;
3811 unwind.fp_offset = 0;
3812 unwind.fp_reg = REG_SP;
3813 unwind.fp_used = 0;
3814 unwind.sp_restored = 0;
3815 }
3816
3817
3818 /* Parse a handlerdata directive. Creates the exception handling table entry
3819 for the function. */
3820
3821 static void
3822 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3823 {
3824 demand_empty_rest_of_line ();
3825 if (!unwind.proc_start)
3826 as_bad (MISSING_FNSTART);
3827
3828 if (unwind.table_entry)
3829 as_bad (_("duplicate .handlerdata directive"));
3830
3831 create_unwind_entry (1);
3832 }
3833
3834 /* Parse an unwind_fnend directive. Generates the index table entry. */
3835
3836 static void
3837 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3838 {
3839 long where;
3840 char *ptr;
3841 valueT val;
3842 unsigned int marked_pr_dependency;
3843
3844 demand_empty_rest_of_line ();
3845
3846 if (!unwind.proc_start)
3847 {
3848 as_bad (_(".fnend directive without .fnstart"));
3849 return;
3850 }
3851
3852 /* Add eh table entry. */
3853 if (unwind.table_entry == NULL)
3854 val = create_unwind_entry (0);
3855 else
3856 val = 0;
3857
3858 /* Add index table entry. This is two words. */
3859 start_unwind_section (unwind.saved_seg, 1);
3860 frag_align (2, 0, 0);
3861 record_alignment (now_seg, 2);
3862
3863 ptr = frag_more (8);
3864 memset (ptr, 0, 8);
3865 where = frag_now_fix () - 8;
3866
3867 /* Self relative offset of the function start. */
3868 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3869 BFD_RELOC_ARM_PREL31);
3870
3871 /* Indicate dependency on EHABI-defined personality routines to the
3872 linker, if it hasn't been done already. */
3873 marked_pr_dependency
3874 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3875 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3876 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3877 {
3878 static const char *const name[] =
3879 {
3880 "__aeabi_unwind_cpp_pr0",
3881 "__aeabi_unwind_cpp_pr1",
3882 "__aeabi_unwind_cpp_pr2"
3883 };
3884 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3885 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3886 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3887 |= 1 << unwind.personality_index;
3888 }
3889
3890 if (val)
3891 /* Inline exception table entry. */
3892 md_number_to_chars (ptr + 4, val, 4);
3893 else
3894 /* Self relative offset of the table entry. */
3895 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3896 BFD_RELOC_ARM_PREL31);
3897
3898 /* Restore the original section. */
3899 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3900
3901 unwind.proc_start = NULL;
3902 }
3903
3904
3905 /* Parse an unwind_cantunwind directive. */
3906
3907 static void
3908 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3909 {
3910 demand_empty_rest_of_line ();
3911 if (!unwind.proc_start)
3912 as_bad (MISSING_FNSTART);
3913
3914 if (unwind.personality_routine || unwind.personality_index != -1)
3915 as_bad (_("personality routine specified for cantunwind frame"));
3916
3917 unwind.personality_index = -2;
3918 }
3919
3920
3921 /* Parse a personalityindex directive. */
3922
3923 static void
3924 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3925 {
3926 expressionS exp;
3927
3928 if (!unwind.proc_start)
3929 as_bad (MISSING_FNSTART);
3930
3931 if (unwind.personality_routine || unwind.personality_index != -1)
3932 as_bad (_("duplicate .personalityindex directive"));
3933
3934 expression (&exp);
3935
3936 if (exp.X_op != O_constant
3937 || exp.X_add_number < 0 || exp.X_add_number > 15)
3938 {
3939 as_bad (_("bad personality routine number"));
3940 ignore_rest_of_line ();
3941 return;
3942 }
3943
3944 unwind.personality_index = exp.X_add_number;
3945
3946 demand_empty_rest_of_line ();
3947 }
3948
3949
3950 /* Parse a personality directive. */
3951
3952 static void
3953 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3954 {
3955 char *name, *p, c;
3956
3957 if (!unwind.proc_start)
3958 as_bad (MISSING_FNSTART);
3959
3960 if (unwind.personality_routine || unwind.personality_index != -1)
3961 as_bad (_("duplicate .personality directive"));
3962
3963 c = get_symbol_name (& name);
3964 p = input_line_pointer;
3965 if (c == '"')
3966 ++ input_line_pointer;
3967 unwind.personality_routine = symbol_find_or_make (name);
3968 *p = c;
3969 demand_empty_rest_of_line ();
3970 }
3971
3972
3973 /* Parse a directive saving core registers. */
3974
3975 static void
3976 s_arm_unwind_save_core (void)
3977 {
3978 valueT op;
3979 long range;
3980 int n;
3981
3982 range = parse_reg_list (&input_line_pointer);
3983 if (range == FAIL)
3984 {
3985 as_bad (_("expected register list"));
3986 ignore_rest_of_line ();
3987 return;
3988 }
3989
3990 demand_empty_rest_of_line ();
3991
3992 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3993 into .unwind_save {..., sp...}. We aren't bothered about the value of
3994 ip because it is clobbered by calls. */
3995 if (unwind.sp_restored && unwind.fp_reg == 12
3996 && (range & 0x3000) == 0x1000)
3997 {
3998 unwind.opcode_count--;
3999 unwind.sp_restored = 0;
4000 range = (range | 0x2000) & ~0x1000;
4001 unwind.pending_offset = 0;
4002 }
4003
4004 /* Pop r4-r15. */
4005 if (range & 0xfff0)
4006 {
4007 /* See if we can use the short opcodes. These pop a block of up to 8
4008 registers starting with r4, plus maybe r14. */
4009 for (n = 0; n < 8; n++)
4010 {
4011 /* Break at the first non-saved register. */
4012 if ((range & (1 << (n + 4))) == 0)
4013 break;
4014 }
4015 /* See if there are any other bits set. */
4016 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
4017 {
4018 /* Use the long form. */
4019 op = 0x8000 | ((range >> 4) & 0xfff);
4020 add_unwind_opcode (op, 2);
4021 }
4022 else
4023 {
4024 /* Use the short form. */
4025 if (range & 0x4000)
4026 op = 0xa8; /* Pop r14. */
4027 else
4028 op = 0xa0; /* Do not pop r14. */
4029 op |= (n - 1);
4030 add_unwind_opcode (op, 1);
4031 }
4032 }
4033
4034 /* Pop r0-r3. */
4035 if (range & 0xf)
4036 {
4037 op = 0xb100 | (range & 0xf);
4038 add_unwind_opcode (op, 2);
4039 }
4040
4041 /* Record the number of bytes pushed. */
4042 for (n = 0; n < 16; n++)
4043 {
4044 if (range & (1 << n))
4045 unwind.frame_size += 4;
4046 }
4047 }
4048
4049
4050 /* Parse a directive saving FPA registers. */
4051
4052 static void
4053 s_arm_unwind_save_fpa (int reg)
4054 {
4055 expressionS exp;
4056 int num_regs;
4057 valueT op;
4058
4059 /* Get Number of registers to transfer. */
4060 if (skip_past_comma (&input_line_pointer) != FAIL)
4061 expression (&exp);
4062 else
4063 exp.X_op = O_illegal;
4064
4065 if (exp.X_op != O_constant)
4066 {
4067 as_bad (_("expected , <constant>"));
4068 ignore_rest_of_line ();
4069 return;
4070 }
4071
4072 num_regs = exp.X_add_number;
4073
4074 if (num_regs < 1 || num_regs > 4)
4075 {
4076 as_bad (_("number of registers must be in the range [1:4]"));
4077 ignore_rest_of_line ();
4078 return;
4079 }
4080
4081 demand_empty_rest_of_line ();
4082
4083 if (reg == 4)
4084 {
4085 /* Short form. */
4086 op = 0xb4 | (num_regs - 1);
4087 add_unwind_opcode (op, 1);
4088 }
4089 else
4090 {
4091 /* Long form. */
4092 op = 0xc800 | (reg << 4) | (num_regs - 1);
4093 add_unwind_opcode (op, 2);
4094 }
4095 unwind.frame_size += num_regs * 12;
4096 }
4097
4098
4099 /* Parse a directive saving VFP registers for ARMv6 and above. */
4100
4101 static void
4102 s_arm_unwind_save_vfp_armv6 (void)
4103 {
4104 int count;
4105 unsigned int start;
4106 valueT op;
4107 int num_vfpv3_regs = 0;
4108 int num_regs_below_16;
4109
4110 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4111 if (count == FAIL)
4112 {
4113 as_bad (_("expected register list"));
4114 ignore_rest_of_line ();
4115 return;
4116 }
4117
4118 demand_empty_rest_of_line ();
4119
4120 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4121 than FSTMX/FLDMX-style ones). */
4122
4123 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4124 if (start >= 16)
4125 num_vfpv3_regs = count;
4126 else if (start + count > 16)
4127 num_vfpv3_regs = start + count - 16;
4128
4129 if (num_vfpv3_regs > 0)
4130 {
4131 int start_offset = start > 16 ? start - 16 : 0;
4132 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4133 add_unwind_opcode (op, 2);
4134 }
4135
4136 /* Generate opcode for registers numbered in the range 0 .. 15. */
4137 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4138 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4139 if (num_regs_below_16 > 0)
4140 {
4141 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4142 add_unwind_opcode (op, 2);
4143 }
4144
4145 unwind.frame_size += count * 8;
4146 }
4147
4148
4149 /* Parse a directive saving VFP registers for pre-ARMv6. */
4150
4151 static void
4152 s_arm_unwind_save_vfp (void)
4153 {
4154 int count;
4155 unsigned int reg;
4156 valueT op;
4157
4158 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
4159 if (count == FAIL)
4160 {
4161 as_bad (_("expected register list"));
4162 ignore_rest_of_line ();
4163 return;
4164 }
4165
4166 demand_empty_rest_of_line ();
4167
4168 if (reg == 8)
4169 {
4170 /* Short form. */
4171 op = 0xb8 | (count - 1);
4172 add_unwind_opcode (op, 1);
4173 }
4174 else
4175 {
4176 /* Long form. */
4177 op = 0xb300 | (reg << 4) | (count - 1);
4178 add_unwind_opcode (op, 2);
4179 }
4180 unwind.frame_size += count * 8 + 4;
4181 }
4182
4183
4184 /* Parse a directive saving iWMMXt data registers. */
4185
4186 static void
4187 s_arm_unwind_save_mmxwr (void)
4188 {
4189 int reg;
4190 int hi_reg;
4191 int i;
4192 unsigned mask = 0;
4193 valueT op;
4194
4195 if (*input_line_pointer == '{')
4196 input_line_pointer++;
4197
4198 do
4199 {
4200 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4201
4202 if (reg == FAIL)
4203 {
4204 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4205 goto error;
4206 }
4207
4208 if (mask >> reg)
4209 as_tsktsk (_("register list not in ascending order"));
4210 mask |= 1 << reg;
4211
4212 if (*input_line_pointer == '-')
4213 {
4214 input_line_pointer++;
4215 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4216 if (hi_reg == FAIL)
4217 {
4218 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4219 goto error;
4220 }
4221 else if (reg >= hi_reg)
4222 {
4223 as_bad (_("bad register range"));
4224 goto error;
4225 }
4226 for (; reg < hi_reg; reg++)
4227 mask |= 1 << reg;
4228 }
4229 }
4230 while (skip_past_comma (&input_line_pointer) != FAIL);
4231
4232 skip_past_char (&input_line_pointer, '}');
4233
4234 demand_empty_rest_of_line ();
4235
4236 /* Generate any deferred opcodes because we're going to be looking at
4237 the list. */
4238 flush_pending_unwind ();
4239
4240 for (i = 0; i < 16; i++)
4241 {
4242 if (mask & (1 << i))
4243 unwind.frame_size += 8;
4244 }
4245
4246 /* Attempt to combine with a previous opcode. We do this because gcc
4247 likes to output separate unwind directives for a single block of
4248 registers. */
4249 if (unwind.opcode_count > 0)
4250 {
4251 i = unwind.opcodes[unwind.opcode_count - 1];
4252 if ((i & 0xf8) == 0xc0)
4253 {
4254 i &= 7;
4255 /* Only merge if the blocks are contiguous. */
4256 if (i < 6)
4257 {
4258 if ((mask & 0xfe00) == (1 << 9))
4259 {
4260 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4261 unwind.opcode_count--;
4262 }
4263 }
4264 else if (i == 6 && unwind.opcode_count >= 2)
4265 {
4266 i = unwind.opcodes[unwind.opcode_count - 2];
4267 reg = i >> 4;
4268 i &= 0xf;
4269
4270 op = 0xffff << (reg - 1);
4271 if (reg > 0
4272 && ((mask & op) == (1u << (reg - 1))))
4273 {
4274 op = (1 << (reg + i + 1)) - 1;
4275 op &= ~((1 << reg) - 1);
4276 mask |= op;
4277 unwind.opcode_count -= 2;
4278 }
4279 }
4280 }
4281 }
4282
4283 hi_reg = 15;
4284 /* We want to generate opcodes in the order the registers have been
4285 saved, ie. descending order. */
4286 for (reg = 15; reg >= -1; reg--)
4287 {
4288 /* Save registers in blocks. */
4289 if (reg < 0
4290 || !(mask & (1 << reg)))
4291 {
4292 /* We found an unsaved reg. Generate opcodes to save the
4293 preceding block. */
4294 if (reg != hi_reg)
4295 {
4296 if (reg == 9)
4297 {
4298 /* Short form. */
4299 op = 0xc0 | (hi_reg - 10);
4300 add_unwind_opcode (op, 1);
4301 }
4302 else
4303 {
4304 /* Long form. */
4305 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4306 add_unwind_opcode (op, 2);
4307 }
4308 }
4309 hi_reg = reg - 1;
4310 }
4311 }
4312
4313 return;
4314 error:
4315 ignore_rest_of_line ();
4316 }
4317
4318 static void
4319 s_arm_unwind_save_mmxwcg (void)
4320 {
4321 int reg;
4322 int hi_reg;
4323 unsigned mask = 0;
4324 valueT op;
4325
4326 if (*input_line_pointer == '{')
4327 input_line_pointer++;
4328
4329 skip_whitespace (input_line_pointer);
4330
4331 do
4332 {
4333 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4334
4335 if (reg == FAIL)
4336 {
4337 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4338 goto error;
4339 }
4340
4341 reg -= 8;
4342 if (mask >> reg)
4343 as_tsktsk (_("register list not in ascending order"));
4344 mask |= 1 << reg;
4345
4346 if (*input_line_pointer == '-')
4347 {
4348 input_line_pointer++;
4349 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4350 if (hi_reg == FAIL)
4351 {
4352 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4353 goto error;
4354 }
4355 else if (reg >= hi_reg)
4356 {
4357 as_bad (_("bad register range"));
4358 goto error;
4359 }
4360 for (; reg < hi_reg; reg++)
4361 mask |= 1 << reg;
4362 }
4363 }
4364 while (skip_past_comma (&input_line_pointer) != FAIL);
4365
4366 skip_past_char (&input_line_pointer, '}');
4367
4368 demand_empty_rest_of_line ();
4369
4370 /* Generate any deferred opcodes because we're going to be looking at
4371 the list. */
4372 flush_pending_unwind ();
4373
4374 for (reg = 0; reg < 16; reg++)
4375 {
4376 if (mask & (1 << reg))
4377 unwind.frame_size += 4;
4378 }
4379 op = 0xc700 | mask;
4380 add_unwind_opcode (op, 2);
4381 return;
4382 error:
4383 ignore_rest_of_line ();
4384 }
4385
4386
4387 /* Parse an unwind_save directive.
4388 If the argument is non-zero, this is a .vsave directive. */
4389
4390 static void
4391 s_arm_unwind_save (int arch_v6)
4392 {
4393 char *peek;
4394 struct reg_entry *reg;
4395 bfd_boolean had_brace = FALSE;
4396
4397 if (!unwind.proc_start)
4398 as_bad (MISSING_FNSTART);
4399
4400 /* Figure out what sort of save we have. */
4401 peek = input_line_pointer;
4402
4403 if (*peek == '{')
4404 {
4405 had_brace = TRUE;
4406 peek++;
4407 }
4408
4409 reg = arm_reg_parse_multi (&peek);
4410
4411 if (!reg)
4412 {
4413 as_bad (_("register expected"));
4414 ignore_rest_of_line ();
4415 return;
4416 }
4417
4418 switch (reg->type)
4419 {
4420 case REG_TYPE_FN:
4421 if (had_brace)
4422 {
4423 as_bad (_("FPA .unwind_save does not take a register list"));
4424 ignore_rest_of_line ();
4425 return;
4426 }
4427 input_line_pointer = peek;
4428 s_arm_unwind_save_fpa (reg->number);
4429 return;
4430
4431 case REG_TYPE_RN:
4432 s_arm_unwind_save_core ();
4433 return;
4434
4435 case REG_TYPE_VFD:
4436 if (arch_v6)
4437 s_arm_unwind_save_vfp_armv6 ();
4438 else
4439 s_arm_unwind_save_vfp ();
4440 return;
4441
4442 case REG_TYPE_MMXWR:
4443 s_arm_unwind_save_mmxwr ();
4444 return;
4445
4446 case REG_TYPE_MMXWCG:
4447 s_arm_unwind_save_mmxwcg ();
4448 return;
4449
4450 default:
4451 as_bad (_(".unwind_save does not support this kind of register"));
4452 ignore_rest_of_line ();
4453 }
4454 }
4455
4456
4457 /* Parse an unwind_movsp directive. */
4458
4459 static void
4460 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4461 {
4462 int reg;
4463 valueT op;
4464 int offset;
4465
4466 if (!unwind.proc_start)
4467 as_bad (MISSING_FNSTART);
4468
4469 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4470 if (reg == FAIL)
4471 {
4472 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4473 ignore_rest_of_line ();
4474 return;
4475 }
4476
4477 /* Optional constant. */
4478 if (skip_past_comma (&input_line_pointer) != FAIL)
4479 {
4480 if (immediate_for_directive (&offset) == FAIL)
4481 return;
4482 }
4483 else
4484 offset = 0;
4485
4486 demand_empty_rest_of_line ();
4487
4488 if (reg == REG_SP || reg == REG_PC)
4489 {
4490 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4491 return;
4492 }
4493
4494 if (unwind.fp_reg != REG_SP)
4495 as_bad (_("unexpected .unwind_movsp directive"));
4496
4497 /* Generate opcode to restore the value. */
4498 op = 0x90 | reg;
4499 add_unwind_opcode (op, 1);
4500
4501 /* Record the information for later. */
4502 unwind.fp_reg = reg;
4503 unwind.fp_offset = unwind.frame_size - offset;
4504 unwind.sp_restored = 1;
4505 }
4506
4507 /* Parse an unwind_pad directive. */
4508
4509 static void
4510 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4511 {
4512 int offset;
4513
4514 if (!unwind.proc_start)
4515 as_bad (MISSING_FNSTART);
4516
4517 if (immediate_for_directive (&offset) == FAIL)
4518 return;
4519
4520 if (offset & 3)
4521 {
4522 as_bad (_("stack increment must be multiple of 4"));
4523 ignore_rest_of_line ();
4524 return;
4525 }
4526
4527 /* Don't generate any opcodes, just record the details for later. */
4528 unwind.frame_size += offset;
4529 unwind.pending_offset += offset;
4530
4531 demand_empty_rest_of_line ();
4532 }
4533
4534 /* Parse an unwind_setfp directive. */
4535
4536 static void
4537 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4538 {
4539 int sp_reg;
4540 int fp_reg;
4541 int offset;
4542
4543 if (!unwind.proc_start)
4544 as_bad (MISSING_FNSTART);
4545
4546 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4547 if (skip_past_comma (&input_line_pointer) == FAIL)
4548 sp_reg = FAIL;
4549 else
4550 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4551
4552 if (fp_reg == FAIL || sp_reg == FAIL)
4553 {
4554 as_bad (_("expected <reg>, <reg>"));
4555 ignore_rest_of_line ();
4556 return;
4557 }
4558
4559 /* Optional constant. */
4560 if (skip_past_comma (&input_line_pointer) != FAIL)
4561 {
4562 if (immediate_for_directive (&offset) == FAIL)
4563 return;
4564 }
4565 else
4566 offset = 0;
4567
4568 demand_empty_rest_of_line ();
4569
4570 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4571 {
4572 as_bad (_("register must be either sp or set by a previous"
4573 "unwind_movsp directive"));
4574 return;
4575 }
4576
4577 /* Don't generate any opcodes, just record the information for later. */
4578 unwind.fp_reg = fp_reg;
4579 unwind.fp_used = 1;
4580 if (sp_reg == REG_SP)
4581 unwind.fp_offset = unwind.frame_size - offset;
4582 else
4583 unwind.fp_offset -= offset;
4584 }
4585
4586 /* Parse an unwind_raw directive. */
4587
4588 static void
4589 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4590 {
4591 expressionS exp;
4592 /* This is an arbitrary limit. */
4593 unsigned char op[16];
4594 int count;
4595
4596 if (!unwind.proc_start)
4597 as_bad (MISSING_FNSTART);
4598
4599 expression (&exp);
4600 if (exp.X_op == O_constant
4601 && skip_past_comma (&input_line_pointer) != FAIL)
4602 {
4603 unwind.frame_size += exp.X_add_number;
4604 expression (&exp);
4605 }
4606 else
4607 exp.X_op = O_illegal;
4608
4609 if (exp.X_op != O_constant)
4610 {
4611 as_bad (_("expected <offset>, <opcode>"));
4612 ignore_rest_of_line ();
4613 return;
4614 }
4615
4616 count = 0;
4617
4618 /* Parse the opcode. */
4619 for (;;)
4620 {
4621 if (count >= 16)
4622 {
4623 as_bad (_("unwind opcode too long"));
4624 ignore_rest_of_line ();
4625 }
4626 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4627 {
4628 as_bad (_("invalid unwind opcode"));
4629 ignore_rest_of_line ();
4630 return;
4631 }
4632 op[count++] = exp.X_add_number;
4633
4634 /* Parse the next byte. */
4635 if (skip_past_comma (&input_line_pointer) == FAIL)
4636 break;
4637
4638 expression (&exp);
4639 }
4640
4641 /* Add the opcode bytes in reverse order. */
4642 while (count--)
4643 add_unwind_opcode (op[count], 1);
4644
4645 demand_empty_rest_of_line ();
4646 }
4647
4648
4649 /* Parse a .eabi_attribute directive. */
4650
4651 static void
4652 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4653 {
4654 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4655
4656 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4657 attributes_set_explicitly[tag] = 1;
4658 }
4659
4660 /* Emit a tls fix for the symbol. */
4661
4662 static void
4663 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4664 {
4665 char *p;
4666 expressionS exp;
4667 #ifdef md_flush_pending_output
4668 md_flush_pending_output ();
4669 #endif
4670
4671 #ifdef md_cons_align
4672 md_cons_align (4);
4673 #endif
4674
4675 /* Since we're just labelling the code, there's no need to define a
4676 mapping symbol. */
4677 expression (&exp);
4678 p = obstack_next_free (&frchain_now->frch_obstack);
4679 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4680 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4681 : BFD_RELOC_ARM_TLS_DESCSEQ);
4682 }
4683 #endif /* OBJ_ELF */
4684
4685 static void s_arm_arch (int);
4686 static void s_arm_object_arch (int);
4687 static void s_arm_cpu (int);
4688 static void s_arm_fpu (int);
4689 static void s_arm_arch_extension (int);
4690
4691 #ifdef TE_PE
4692
4693 static void
4694 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4695 {
4696 expressionS exp;
4697
4698 do
4699 {
4700 expression (&exp);
4701 if (exp.X_op == O_symbol)
4702 exp.X_op = O_secrel;
4703
4704 emit_expr (&exp, 4);
4705 }
4706 while (*input_line_pointer++ == ',');
4707
4708 input_line_pointer--;
4709 demand_empty_rest_of_line ();
4710 }
4711 #endif /* TE_PE */
4712
4713 /* This table describes all the machine specific pseudo-ops the assembler
4714 has to support. The fields are:
4715 pseudo-op name without dot
4716 function to call to execute this pseudo-op
4717 Integer arg to pass to the function. */
4718
4719 const pseudo_typeS md_pseudo_table[] =
4720 {
4721 /* Never called because '.req' does not start a line. */
4722 { "req", s_req, 0 },
4723 /* Following two are likewise never called. */
4724 { "dn", s_dn, 0 },
4725 { "qn", s_qn, 0 },
4726 { "unreq", s_unreq, 0 },
4727 { "bss", s_bss, 0 },
4728 { "align", s_align_ptwo, 2 },
4729 { "arm", s_arm, 0 },
4730 { "thumb", s_thumb, 0 },
4731 { "code", s_code, 0 },
4732 { "force_thumb", s_force_thumb, 0 },
4733 { "thumb_func", s_thumb_func, 0 },
4734 { "thumb_set", s_thumb_set, 0 },
4735 { "even", s_even, 0 },
4736 { "ltorg", s_ltorg, 0 },
4737 { "pool", s_ltorg, 0 },
4738 { "syntax", s_syntax, 0 },
4739 { "cpu", s_arm_cpu, 0 },
4740 { "arch", s_arm_arch, 0 },
4741 { "object_arch", s_arm_object_arch, 0 },
4742 { "fpu", s_arm_fpu, 0 },
4743 { "arch_extension", s_arm_arch_extension, 0 },
4744 #ifdef OBJ_ELF
4745 { "word", s_arm_elf_cons, 4 },
4746 { "long", s_arm_elf_cons, 4 },
4747 { "inst.n", s_arm_elf_inst, 2 },
4748 { "inst.w", s_arm_elf_inst, 4 },
4749 { "inst", s_arm_elf_inst, 0 },
4750 { "rel31", s_arm_rel31, 0 },
4751 { "fnstart", s_arm_unwind_fnstart, 0 },
4752 { "fnend", s_arm_unwind_fnend, 0 },
4753 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4754 { "personality", s_arm_unwind_personality, 0 },
4755 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4756 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4757 { "save", s_arm_unwind_save, 0 },
4758 { "vsave", s_arm_unwind_save, 1 },
4759 { "movsp", s_arm_unwind_movsp, 0 },
4760 { "pad", s_arm_unwind_pad, 0 },
4761 { "setfp", s_arm_unwind_setfp, 0 },
4762 { "unwind_raw", s_arm_unwind_raw, 0 },
4763 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4764 { "tlsdescseq", s_arm_tls_descseq, 0 },
4765 #else
4766 { "word", cons, 4},
4767
4768 /* These are used for dwarf. */
4769 {"2byte", cons, 2},
4770 {"4byte", cons, 4},
4771 {"8byte", cons, 8},
4772 /* These are used for dwarf2. */
4773 { "file", dwarf2_directive_file, 0 },
4774 { "loc", dwarf2_directive_loc, 0 },
4775 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4776 #endif
4777 { "extend", float_cons, 'x' },
4778 { "ldouble", float_cons, 'x' },
4779 { "packed", float_cons, 'p' },
4780 #ifdef TE_PE
4781 {"secrel32", pe_directive_secrel, 0},
4782 #endif
4783
4784 /* These are for compatibility with CodeComposer Studio. */
4785 {"ref", s_ccs_ref, 0},
4786 {"def", s_ccs_def, 0},
4787 {"asmfunc", s_ccs_asmfunc, 0},
4788 {"endasmfunc", s_ccs_endasmfunc, 0},
4789
4790 { 0, 0, 0 }
4791 };
4792 \f
4793 /* Parser functions used exclusively in instruction operands. */
4794
4795 /* Generic immediate-value read function for use in insn parsing.
4796 STR points to the beginning of the immediate (the leading #);
4797 VAL receives the value; if the value is outside [MIN, MAX]
4798 issue an error. PREFIX_OPT is true if the immediate prefix is
4799 optional. */
4800
4801 static int
4802 parse_immediate (char **str, int *val, int min, int max,
4803 bfd_boolean prefix_opt)
4804 {
4805 expressionS exp;
4806
4807 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4808 if (exp.X_op != O_constant)
4809 {
4810 inst.error = _("constant expression required");
4811 return FAIL;
4812 }
4813
4814 if (exp.X_add_number < min || exp.X_add_number > max)
4815 {
4816 inst.error = _("immediate value out of range");
4817 return FAIL;
4818 }
4819
4820 *val = exp.X_add_number;
4821 return SUCCESS;
4822 }
4823
4824 /* Less-generic immediate-value read function with the possibility of loading a
4825 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4826 instructions. Puts the result directly in inst.operands[i]. */
4827
4828 static int
4829 parse_big_immediate (char **str, int i, expressionS *in_exp,
4830 bfd_boolean allow_symbol_p)
4831 {
4832 expressionS exp;
4833 expressionS *exp_p = in_exp ? in_exp : &exp;
4834 char *ptr = *str;
4835
4836 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4837
4838 if (exp_p->X_op == O_constant)
4839 {
4840 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4841 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4842 O_constant. We have to be careful not to break compilation for
4843 32-bit X_add_number, though. */
4844 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4845 {
4846 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4847 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4848 & 0xffffffff);
4849 inst.operands[i].regisimm = 1;
4850 }
4851 }
4852 else if (exp_p->X_op == O_big
4853 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4854 {
4855 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4856
4857 /* Bignums have their least significant bits in
4858 generic_bignum[0]. Make sure we put 32 bits in imm and
4859 32 bits in reg, in a (hopefully) portable way. */
4860 gas_assert (parts != 0);
4861
4862 /* Make sure that the number is not too big.
4863 PR 11972: Bignums can now be sign-extended to the
4864 size of a .octa so check that the out of range bits
4865 are all zero or all one. */
4866 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4867 {
4868 LITTLENUM_TYPE m = -1;
4869
4870 if (generic_bignum[parts * 2] != 0
4871 && generic_bignum[parts * 2] != m)
4872 return FAIL;
4873
4874 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4875 if (generic_bignum[j] != generic_bignum[j-1])
4876 return FAIL;
4877 }
4878
4879 inst.operands[i].imm = 0;
4880 for (j = 0; j < parts; j++, idx++)
4881 inst.operands[i].imm |= generic_bignum[idx]
4882 << (LITTLENUM_NUMBER_OF_BITS * j);
4883 inst.operands[i].reg = 0;
4884 for (j = 0; j < parts; j++, idx++)
4885 inst.operands[i].reg |= generic_bignum[idx]
4886 << (LITTLENUM_NUMBER_OF_BITS * j);
4887 inst.operands[i].regisimm = 1;
4888 }
4889 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4890 return FAIL;
4891
4892 *str = ptr;
4893
4894 return SUCCESS;
4895 }
4896
4897 /* Returns the pseudo-register number of an FPA immediate constant,
4898 or FAIL if there isn't a valid constant here. */
4899
4900 static int
4901 parse_fpa_immediate (char ** str)
4902 {
4903 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4904 char * save_in;
4905 expressionS exp;
4906 int i;
4907 int j;
4908
4909 /* First try and match exact strings, this is to guarantee
4910 that some formats will work even for cross assembly. */
4911
4912 for (i = 0; fp_const[i]; i++)
4913 {
4914 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4915 {
4916 char *start = *str;
4917
4918 *str += strlen (fp_const[i]);
4919 if (is_end_of_line[(unsigned char) **str])
4920 return i + 8;
4921 *str = start;
4922 }
4923 }
4924
4925 /* Just because we didn't get a match doesn't mean that the constant
4926 isn't valid, just that it is in a format that we don't
4927 automatically recognize. Try parsing it with the standard
4928 expression routines. */
4929
4930 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4931
4932 /* Look for a raw floating point number. */
4933 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4934 && is_end_of_line[(unsigned char) *save_in])
4935 {
4936 for (i = 0; i < NUM_FLOAT_VALS; i++)
4937 {
4938 for (j = 0; j < MAX_LITTLENUMS; j++)
4939 {
4940 if (words[j] != fp_values[i][j])
4941 break;
4942 }
4943
4944 if (j == MAX_LITTLENUMS)
4945 {
4946 *str = save_in;
4947 return i + 8;
4948 }
4949 }
4950 }
4951
4952 /* Try and parse a more complex expression, this will probably fail
4953 unless the code uses a floating point prefix (eg "0f"). */
4954 save_in = input_line_pointer;
4955 input_line_pointer = *str;
4956 if (expression (&exp) == absolute_section
4957 && exp.X_op == O_big
4958 && exp.X_add_number < 0)
4959 {
4960 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4961 Ditto for 15. */
4962 #define X_PRECISION 5
4963 #define E_PRECISION 15L
4964 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
4965 {
4966 for (i = 0; i < NUM_FLOAT_VALS; i++)
4967 {
4968 for (j = 0; j < MAX_LITTLENUMS; j++)
4969 {
4970 if (words[j] != fp_values[i][j])
4971 break;
4972 }
4973
4974 if (j == MAX_LITTLENUMS)
4975 {
4976 *str = input_line_pointer;
4977 input_line_pointer = save_in;
4978 return i + 8;
4979 }
4980 }
4981 }
4982 }
4983
4984 *str = input_line_pointer;
4985 input_line_pointer = save_in;
4986 inst.error = _("invalid FPA immediate expression");
4987 return FAIL;
4988 }
4989
4990 /* Returns 1 if a number has "quarter-precision" float format
4991 0baBbbbbbc defgh000 00000000 00000000. */
4992
4993 static int
4994 is_quarter_float (unsigned imm)
4995 {
4996 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4997 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4998 }
4999
5000
5001 /* Detect the presence of a floating point or integer zero constant,
5002 i.e. #0.0 or #0. */
5003
5004 static bfd_boolean
5005 parse_ifimm_zero (char **in)
5006 {
5007 int error_code;
5008
5009 if (!is_immediate_prefix (**in))
5010 {
5011 /* In unified syntax, all prefixes are optional. */
5012 if (!unified_syntax)
5013 return FALSE;
5014 }
5015 else
5016 ++*in;
5017
5018 /* Accept #0x0 as a synonym for #0. */
5019 if (strncmp (*in, "0x", 2) == 0)
5020 {
5021 int val;
5022 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
5023 return FALSE;
5024 return TRUE;
5025 }
5026
5027 error_code = atof_generic (in, ".", EXP_CHARS,
5028 &generic_floating_point_number);
5029
5030 if (!error_code
5031 && generic_floating_point_number.sign == '+'
5032 && (generic_floating_point_number.low
5033 > generic_floating_point_number.leader))
5034 return TRUE;
5035
5036 return FALSE;
5037 }
5038
5039 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5040 0baBbbbbbc defgh000 00000000 00000000.
5041 The zero and minus-zero cases need special handling, since they can't be
5042 encoded in the "quarter-precision" float format, but can nonetheless be
5043 loaded as integer constants. */
5044
5045 static unsigned
5046 parse_qfloat_immediate (char **ccp, int *immed)
5047 {
5048 char *str = *ccp;
5049 char *fpnum;
5050 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5051 int found_fpchar = 0;
5052
5053 skip_past_char (&str, '#');
5054
5055 /* We must not accidentally parse an integer as a floating-point number. Make
5056 sure that the value we parse is not an integer by checking for special
5057 characters '.' or 'e'.
5058 FIXME: This is a horrible hack, but doing better is tricky because type
5059 information isn't in a very usable state at parse time. */
5060 fpnum = str;
5061 skip_whitespace (fpnum);
5062
5063 if (strncmp (fpnum, "0x", 2) == 0)
5064 return FAIL;
5065 else
5066 {
5067 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5068 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5069 {
5070 found_fpchar = 1;
5071 break;
5072 }
5073
5074 if (!found_fpchar)
5075 return FAIL;
5076 }
5077
5078 if ((str = atof_ieee (str, 's', words)) != NULL)
5079 {
5080 unsigned fpword = 0;
5081 int i;
5082
5083 /* Our FP word must be 32 bits (single-precision FP). */
5084 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5085 {
5086 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5087 fpword |= words[i];
5088 }
5089
5090 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5091 *immed = fpword;
5092 else
5093 return FAIL;
5094
5095 *ccp = str;
5096
5097 return SUCCESS;
5098 }
5099
5100 return FAIL;
5101 }
5102
5103 /* Shift operands. */
5104 enum shift_kind
5105 {
5106 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5107 };
5108
5109 struct asm_shift_name
5110 {
5111 const char *name;
5112 enum shift_kind kind;
5113 };
5114
5115 /* Third argument to parse_shift. */
5116 enum parse_shift_mode
5117 {
5118 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5119 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5120 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5121 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5122 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5123 };
5124
5125 /* Parse a <shift> specifier on an ARM data processing instruction.
5126 This has three forms:
5127
5128 (LSL|LSR|ASL|ASR|ROR) Rs
5129 (LSL|LSR|ASL|ASR|ROR) #imm
5130 RRX
5131
5132 Note that ASL is assimilated to LSL in the instruction encoding, and
5133 RRX to ROR #0 (which cannot be written as such). */
5134
5135 static int
5136 parse_shift (char **str, int i, enum parse_shift_mode mode)
5137 {
5138 const struct asm_shift_name *shift_name;
5139 enum shift_kind shift;
5140 char *s = *str;
5141 char *p = s;
5142 int reg;
5143
5144 for (p = *str; ISALPHA (*p); p++)
5145 ;
5146
5147 if (p == *str)
5148 {
5149 inst.error = _("shift expression expected");
5150 return FAIL;
5151 }
5152
5153 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5154 p - *str);
5155
5156 if (shift_name == NULL)
5157 {
5158 inst.error = _("shift expression expected");
5159 return FAIL;
5160 }
5161
5162 shift = shift_name->kind;
5163
5164 switch (mode)
5165 {
5166 case NO_SHIFT_RESTRICT:
5167 case SHIFT_IMMEDIATE: break;
5168
5169 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5170 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5171 {
5172 inst.error = _("'LSL' or 'ASR' required");
5173 return FAIL;
5174 }
5175 break;
5176
5177 case SHIFT_LSL_IMMEDIATE:
5178 if (shift != SHIFT_LSL)
5179 {
5180 inst.error = _("'LSL' required");
5181 return FAIL;
5182 }
5183 break;
5184
5185 case SHIFT_ASR_IMMEDIATE:
5186 if (shift != SHIFT_ASR)
5187 {
5188 inst.error = _("'ASR' required");
5189 return FAIL;
5190 }
5191 break;
5192
5193 default: abort ();
5194 }
5195
5196 if (shift != SHIFT_RRX)
5197 {
5198 /* Whitespace can appear here if the next thing is a bare digit. */
5199 skip_whitespace (p);
5200
5201 if (mode == NO_SHIFT_RESTRICT
5202 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5203 {
5204 inst.operands[i].imm = reg;
5205 inst.operands[i].immisreg = 1;
5206 }
5207 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5208 return FAIL;
5209 }
5210 inst.operands[i].shift_kind = shift;
5211 inst.operands[i].shifted = 1;
5212 *str = p;
5213 return SUCCESS;
5214 }
5215
5216 /* Parse a <shifter_operand> for an ARM data processing instruction:
5217
5218 #<immediate>
5219 #<immediate>, <rotate>
5220 <Rm>
5221 <Rm>, <shift>
5222
5223 where <shift> is defined by parse_shift above, and <rotate> is a
5224 multiple of 2 between 0 and 30. Validation of immediate operands
5225 is deferred to md_apply_fix. */
5226
5227 static int
5228 parse_shifter_operand (char **str, int i)
5229 {
5230 int value;
5231 expressionS exp;
5232
5233 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5234 {
5235 inst.operands[i].reg = value;
5236 inst.operands[i].isreg = 1;
5237
5238 /* parse_shift will override this if appropriate */
5239 inst.reloc.exp.X_op = O_constant;
5240 inst.reloc.exp.X_add_number = 0;
5241
5242 if (skip_past_comma (str) == FAIL)
5243 return SUCCESS;
5244
5245 /* Shift operation on register. */
5246 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5247 }
5248
5249 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
5250 return FAIL;
5251
5252 if (skip_past_comma (str) == SUCCESS)
5253 {
5254 /* #x, y -- ie explicit rotation by Y. */
5255 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5256 return FAIL;
5257
5258 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
5259 {
5260 inst.error = _("constant expression expected");
5261 return FAIL;
5262 }
5263
5264 value = exp.X_add_number;
5265 if (value < 0 || value > 30 || value % 2 != 0)
5266 {
5267 inst.error = _("invalid rotation");
5268 return FAIL;
5269 }
5270 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
5271 {
5272 inst.error = _("invalid constant");
5273 return FAIL;
5274 }
5275
5276 /* Encode as specified. */
5277 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
5278 return SUCCESS;
5279 }
5280
5281 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5282 inst.reloc.pc_rel = 0;
5283 return SUCCESS;
5284 }
5285
5286 /* Group relocation information. Each entry in the table contains the
5287 textual name of the relocation as may appear in assembler source
5288 and must end with a colon.
5289 Along with this textual name are the relocation codes to be used if
5290 the corresponding instruction is an ALU instruction (ADD or SUB only),
5291 an LDR, an LDRS, or an LDC. */
5292
5293 struct group_reloc_table_entry
5294 {
5295 const char *name;
5296 int alu_code;
5297 int ldr_code;
5298 int ldrs_code;
5299 int ldc_code;
5300 };
5301
5302 typedef enum
5303 {
5304 /* Varieties of non-ALU group relocation. */
5305
5306 GROUP_LDR,
5307 GROUP_LDRS,
5308 GROUP_LDC
5309 } group_reloc_type;
5310
5311 static struct group_reloc_table_entry group_reloc_table[] =
5312 { /* Program counter relative: */
5313 { "pc_g0_nc",
5314 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5315 0, /* LDR */
5316 0, /* LDRS */
5317 0 }, /* LDC */
5318 { "pc_g0",
5319 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5320 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5321 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5322 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5323 { "pc_g1_nc",
5324 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5325 0, /* LDR */
5326 0, /* LDRS */
5327 0 }, /* LDC */
5328 { "pc_g1",
5329 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5330 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5331 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5332 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5333 { "pc_g2",
5334 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5335 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5336 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5337 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5338 /* Section base relative */
5339 { "sb_g0_nc",
5340 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5341 0, /* LDR */
5342 0, /* LDRS */
5343 0 }, /* LDC */
5344 { "sb_g0",
5345 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5346 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5347 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5348 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5349 { "sb_g1_nc",
5350 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5351 0, /* LDR */
5352 0, /* LDRS */
5353 0 }, /* LDC */
5354 { "sb_g1",
5355 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5356 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5357 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5358 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5359 { "sb_g2",
5360 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5361 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5362 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5363 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5364 /* Absolute thumb alu relocations. */
5365 { "lower0_7",
5366 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5367 0, /* LDR. */
5368 0, /* LDRS. */
5369 0 }, /* LDC. */
5370 { "lower8_15",
5371 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5372 0, /* LDR. */
5373 0, /* LDRS. */
5374 0 }, /* LDC. */
5375 { "upper0_7",
5376 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5377 0, /* LDR. */
5378 0, /* LDRS. */
5379 0 }, /* LDC. */
5380 { "upper8_15",
5381 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5382 0, /* LDR. */
5383 0, /* LDRS. */
5384 0 } }; /* LDC. */
5385
5386 /* Given the address of a pointer pointing to the textual name of a group
5387 relocation as may appear in assembler source, attempt to find its details
5388 in group_reloc_table. The pointer will be updated to the character after
5389 the trailing colon. On failure, FAIL will be returned; SUCCESS
5390 otherwise. On success, *entry will be updated to point at the relevant
5391 group_reloc_table entry. */
5392
5393 static int
5394 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5395 {
5396 unsigned int i;
5397 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5398 {
5399 int length = strlen (group_reloc_table[i].name);
5400
5401 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5402 && (*str)[length] == ':')
5403 {
5404 *out = &group_reloc_table[i];
5405 *str += (length + 1);
5406 return SUCCESS;
5407 }
5408 }
5409
5410 return FAIL;
5411 }
5412
5413 /* Parse a <shifter_operand> for an ARM data processing instruction
5414 (as for parse_shifter_operand) where group relocations are allowed:
5415
5416 #<immediate>
5417 #<immediate>, <rotate>
5418 #:<group_reloc>:<expression>
5419 <Rm>
5420 <Rm>, <shift>
5421
5422 where <group_reloc> is one of the strings defined in group_reloc_table.
5423 The hashes are optional.
5424
5425 Everything else is as for parse_shifter_operand. */
5426
5427 static parse_operand_result
5428 parse_shifter_operand_group_reloc (char **str, int i)
5429 {
5430 /* Determine if we have the sequence of characters #: or just :
5431 coming next. If we do, then we check for a group relocation.
5432 If we don't, punt the whole lot to parse_shifter_operand. */
5433
5434 if (((*str)[0] == '#' && (*str)[1] == ':')
5435 || (*str)[0] == ':')
5436 {
5437 struct group_reloc_table_entry *entry;
5438
5439 if ((*str)[0] == '#')
5440 (*str) += 2;
5441 else
5442 (*str)++;
5443
5444 /* Try to parse a group relocation. Anything else is an error. */
5445 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5446 {
5447 inst.error = _("unknown group relocation");
5448 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5449 }
5450
5451 /* We now have the group relocation table entry corresponding to
5452 the name in the assembler source. Next, we parse the expression. */
5453 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5454 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5455
5456 /* Record the relocation type (always the ALU variant here). */
5457 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5458 gas_assert (inst.reloc.type != 0);
5459
5460 return PARSE_OPERAND_SUCCESS;
5461 }
5462 else
5463 return parse_shifter_operand (str, i) == SUCCESS
5464 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5465
5466 /* Never reached. */
5467 }
5468
5469 /* Parse a Neon alignment expression. Information is written to
5470 inst.operands[i]. We assume the initial ':' has been skipped.
5471
5472 align .imm = align << 8, .immisalign=1, .preind=0 */
5473 static parse_operand_result
5474 parse_neon_alignment (char **str, int i)
5475 {
5476 char *p = *str;
5477 expressionS exp;
5478
5479 my_get_expression (&exp, &p, GE_NO_PREFIX);
5480
5481 if (exp.X_op != O_constant)
5482 {
5483 inst.error = _("alignment must be constant");
5484 return PARSE_OPERAND_FAIL;
5485 }
5486
5487 inst.operands[i].imm = exp.X_add_number << 8;
5488 inst.operands[i].immisalign = 1;
5489 /* Alignments are not pre-indexes. */
5490 inst.operands[i].preind = 0;
5491
5492 *str = p;
5493 return PARSE_OPERAND_SUCCESS;
5494 }
5495
5496 /* Parse all forms of an ARM address expression. Information is written
5497 to inst.operands[i] and/or inst.reloc.
5498
5499 Preindexed addressing (.preind=1):
5500
5501 [Rn, #offset] .reg=Rn .reloc.exp=offset
5502 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5503 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5504 .shift_kind=shift .reloc.exp=shift_imm
5505
5506 These three may have a trailing ! which causes .writeback to be set also.
5507
5508 Postindexed addressing (.postind=1, .writeback=1):
5509
5510 [Rn], #offset .reg=Rn .reloc.exp=offset
5511 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5512 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5513 .shift_kind=shift .reloc.exp=shift_imm
5514
5515 Unindexed addressing (.preind=0, .postind=0):
5516
5517 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5518
5519 Other:
5520
5521 [Rn]{!} shorthand for [Rn,#0]{!}
5522 =immediate .isreg=0 .reloc.exp=immediate
5523 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5524
5525 It is the caller's responsibility to check for addressing modes not
5526 supported by the instruction, and to set inst.reloc.type. */
5527
5528 static parse_operand_result
5529 parse_address_main (char **str, int i, int group_relocations,
5530 group_reloc_type group_type)
5531 {
5532 char *p = *str;
5533 int reg;
5534
5535 if (skip_past_char (&p, '[') == FAIL)
5536 {
5537 if (skip_past_char (&p, '=') == FAIL)
5538 {
5539 /* Bare address - translate to PC-relative offset. */
5540 inst.reloc.pc_rel = 1;
5541 inst.operands[i].reg = REG_PC;
5542 inst.operands[i].isreg = 1;
5543 inst.operands[i].preind = 1;
5544
5545 if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
5546 return PARSE_OPERAND_FAIL;
5547 }
5548 else if (parse_big_immediate (&p, i, &inst.reloc.exp,
5549 /*allow_symbol_p=*/TRUE))
5550 return PARSE_OPERAND_FAIL;
5551
5552 *str = p;
5553 return PARSE_OPERAND_SUCCESS;
5554 }
5555
5556 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5557 skip_whitespace (p);
5558
5559 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5560 {
5561 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5562 return PARSE_OPERAND_FAIL;
5563 }
5564 inst.operands[i].reg = reg;
5565 inst.operands[i].isreg = 1;
5566
5567 if (skip_past_comma (&p) == SUCCESS)
5568 {
5569 inst.operands[i].preind = 1;
5570
5571 if (*p == '+') p++;
5572 else if (*p == '-') p++, inst.operands[i].negative = 1;
5573
5574 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5575 {
5576 inst.operands[i].imm = reg;
5577 inst.operands[i].immisreg = 1;
5578
5579 if (skip_past_comma (&p) == SUCCESS)
5580 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5581 return PARSE_OPERAND_FAIL;
5582 }
5583 else if (skip_past_char (&p, ':') == SUCCESS)
5584 {
5585 /* FIXME: '@' should be used here, but it's filtered out by generic
5586 code before we get to see it here. This may be subject to
5587 change. */
5588 parse_operand_result result = parse_neon_alignment (&p, i);
5589
5590 if (result != PARSE_OPERAND_SUCCESS)
5591 return result;
5592 }
5593 else
5594 {
5595 if (inst.operands[i].negative)
5596 {
5597 inst.operands[i].negative = 0;
5598 p--;
5599 }
5600
5601 if (group_relocations
5602 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5603 {
5604 struct group_reloc_table_entry *entry;
5605
5606 /* Skip over the #: or : sequence. */
5607 if (*p == '#')
5608 p += 2;
5609 else
5610 p++;
5611
5612 /* Try to parse a group relocation. Anything else is an
5613 error. */
5614 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5615 {
5616 inst.error = _("unknown group relocation");
5617 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5618 }
5619
5620 /* We now have the group relocation table entry corresponding to
5621 the name in the assembler source. Next, we parse the
5622 expression. */
5623 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5624 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5625
5626 /* Record the relocation type. */
5627 switch (group_type)
5628 {
5629 case GROUP_LDR:
5630 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5631 break;
5632
5633 case GROUP_LDRS:
5634 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5635 break;
5636
5637 case GROUP_LDC:
5638 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5639 break;
5640
5641 default:
5642 gas_assert (0);
5643 }
5644
5645 if (inst.reloc.type == 0)
5646 {
5647 inst.error = _("this group relocation is not allowed on this instruction");
5648 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5649 }
5650 }
5651 else
5652 {
5653 char *q = p;
5654
5655 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5656 return PARSE_OPERAND_FAIL;
5657 /* If the offset is 0, find out if it's a +0 or -0. */
5658 if (inst.reloc.exp.X_op == O_constant
5659 && inst.reloc.exp.X_add_number == 0)
5660 {
5661 skip_whitespace (q);
5662 if (*q == '#')
5663 {
5664 q++;
5665 skip_whitespace (q);
5666 }
5667 if (*q == '-')
5668 inst.operands[i].negative = 1;
5669 }
5670 }
5671 }
5672 }
5673 else if (skip_past_char (&p, ':') == SUCCESS)
5674 {
5675 /* FIXME: '@' should be used here, but it's filtered out by generic code
5676 before we get to see it here. This may be subject to change. */
5677 parse_operand_result result = parse_neon_alignment (&p, i);
5678
5679 if (result != PARSE_OPERAND_SUCCESS)
5680 return result;
5681 }
5682
5683 if (skip_past_char (&p, ']') == FAIL)
5684 {
5685 inst.error = _("']' expected");
5686 return PARSE_OPERAND_FAIL;
5687 }
5688
5689 if (skip_past_char (&p, '!') == SUCCESS)
5690 inst.operands[i].writeback = 1;
5691
5692 else if (skip_past_comma (&p) == SUCCESS)
5693 {
5694 if (skip_past_char (&p, '{') == SUCCESS)
5695 {
5696 /* [Rn], {expr} - unindexed, with option */
5697 if (parse_immediate (&p, &inst.operands[i].imm,
5698 0, 255, TRUE) == FAIL)
5699 return PARSE_OPERAND_FAIL;
5700
5701 if (skip_past_char (&p, '}') == FAIL)
5702 {
5703 inst.error = _("'}' expected at end of 'option' field");
5704 return PARSE_OPERAND_FAIL;
5705 }
5706 if (inst.operands[i].preind)
5707 {
5708 inst.error = _("cannot combine index with option");
5709 return PARSE_OPERAND_FAIL;
5710 }
5711 *str = p;
5712 return PARSE_OPERAND_SUCCESS;
5713 }
5714 else
5715 {
5716 inst.operands[i].postind = 1;
5717 inst.operands[i].writeback = 1;
5718
5719 if (inst.operands[i].preind)
5720 {
5721 inst.error = _("cannot combine pre- and post-indexing");
5722 return PARSE_OPERAND_FAIL;
5723 }
5724
5725 if (*p == '+') p++;
5726 else if (*p == '-') p++, inst.operands[i].negative = 1;
5727
5728 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5729 {
5730 /* We might be using the immediate for alignment already. If we
5731 are, OR the register number into the low-order bits. */
5732 if (inst.operands[i].immisalign)
5733 inst.operands[i].imm |= reg;
5734 else
5735 inst.operands[i].imm = reg;
5736 inst.operands[i].immisreg = 1;
5737
5738 if (skip_past_comma (&p) == SUCCESS)
5739 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5740 return PARSE_OPERAND_FAIL;
5741 }
5742 else
5743 {
5744 char *q = p;
5745
5746 if (inst.operands[i].negative)
5747 {
5748 inst.operands[i].negative = 0;
5749 p--;
5750 }
5751 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5752 return PARSE_OPERAND_FAIL;
5753 /* If the offset is 0, find out if it's a +0 or -0. */
5754 if (inst.reloc.exp.X_op == O_constant
5755 && inst.reloc.exp.X_add_number == 0)
5756 {
5757 skip_whitespace (q);
5758 if (*q == '#')
5759 {
5760 q++;
5761 skip_whitespace (q);
5762 }
5763 if (*q == '-')
5764 inst.operands[i].negative = 1;
5765 }
5766 }
5767 }
5768 }
5769
5770 /* If at this point neither .preind nor .postind is set, we have a
5771 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5772 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5773 {
5774 inst.operands[i].preind = 1;
5775 inst.reloc.exp.X_op = O_constant;
5776 inst.reloc.exp.X_add_number = 0;
5777 }
5778 *str = p;
5779 return PARSE_OPERAND_SUCCESS;
5780 }
5781
5782 static int
5783 parse_address (char **str, int i)
5784 {
5785 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5786 ? SUCCESS : FAIL;
5787 }
5788
5789 static parse_operand_result
5790 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5791 {
5792 return parse_address_main (str, i, 1, type);
5793 }
5794
5795 /* Parse an operand for a MOVW or MOVT instruction. */
5796 static int
5797 parse_half (char **str)
5798 {
5799 char * p;
5800
5801 p = *str;
5802 skip_past_char (&p, '#');
5803 if (strncasecmp (p, ":lower16:", 9) == 0)
5804 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5805 else if (strncasecmp (p, ":upper16:", 9) == 0)
5806 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5807
5808 if (inst.reloc.type != BFD_RELOC_UNUSED)
5809 {
5810 p += 9;
5811 skip_whitespace (p);
5812 }
5813
5814 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5815 return FAIL;
5816
5817 if (inst.reloc.type == BFD_RELOC_UNUSED)
5818 {
5819 if (inst.reloc.exp.X_op != O_constant)
5820 {
5821 inst.error = _("constant expression expected");
5822 return FAIL;
5823 }
5824 if (inst.reloc.exp.X_add_number < 0
5825 || inst.reloc.exp.X_add_number > 0xffff)
5826 {
5827 inst.error = _("immediate value out of range");
5828 return FAIL;
5829 }
5830 }
5831 *str = p;
5832 return SUCCESS;
5833 }
5834
5835 /* Miscellaneous. */
5836
5837 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5838 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5839 static int
5840 parse_psr (char **str, bfd_boolean lhs)
5841 {
5842 char *p;
5843 unsigned long psr_field;
5844 const struct asm_psr *psr;
5845 char *start;
5846 bfd_boolean is_apsr = FALSE;
5847 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5848
5849 /* PR gas/12698: If the user has specified -march=all then m_profile will
5850 be TRUE, but we want to ignore it in this case as we are building for any
5851 CPU type, including non-m variants. */
5852 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
5853 m_profile = FALSE;
5854
5855 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5856 feature for ease of use and backwards compatibility. */
5857 p = *str;
5858 if (strncasecmp (p, "SPSR", 4) == 0)
5859 {
5860 if (m_profile)
5861 goto unsupported_psr;
5862
5863 psr_field = SPSR_BIT;
5864 }
5865 else if (strncasecmp (p, "CPSR", 4) == 0)
5866 {
5867 if (m_profile)
5868 goto unsupported_psr;
5869
5870 psr_field = 0;
5871 }
5872 else if (strncasecmp (p, "APSR", 4) == 0)
5873 {
5874 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5875 and ARMv7-R architecture CPUs. */
5876 is_apsr = TRUE;
5877 psr_field = 0;
5878 }
5879 else if (m_profile)
5880 {
5881 start = p;
5882 do
5883 p++;
5884 while (ISALNUM (*p) || *p == '_');
5885
5886 if (strncasecmp (start, "iapsr", 5) == 0
5887 || strncasecmp (start, "eapsr", 5) == 0
5888 || strncasecmp (start, "xpsr", 4) == 0
5889 || strncasecmp (start, "psr", 3) == 0)
5890 p = start + strcspn (start, "rR") + 1;
5891
5892 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5893 p - start);
5894
5895 if (!psr)
5896 return FAIL;
5897
5898 /* If APSR is being written, a bitfield may be specified. Note that
5899 APSR itself is handled above. */
5900 if (psr->field <= 3)
5901 {
5902 psr_field = psr->field;
5903 is_apsr = TRUE;
5904 goto check_suffix;
5905 }
5906
5907 *str = p;
5908 /* M-profile MSR instructions have the mask field set to "10", except
5909 *PSR variants which modify APSR, which may use a different mask (and
5910 have been handled already). Do that by setting the PSR_f field
5911 here. */
5912 return psr->field | (lhs ? PSR_f : 0);
5913 }
5914 else
5915 goto unsupported_psr;
5916
5917 p += 4;
5918 check_suffix:
5919 if (*p == '_')
5920 {
5921 /* A suffix follows. */
5922 p++;
5923 start = p;
5924
5925 do
5926 p++;
5927 while (ISALNUM (*p) || *p == '_');
5928
5929 if (is_apsr)
5930 {
5931 /* APSR uses a notation for bits, rather than fields. */
5932 unsigned int nzcvq_bits = 0;
5933 unsigned int g_bit = 0;
5934 char *bit;
5935
5936 for (bit = start; bit != p; bit++)
5937 {
5938 switch (TOLOWER (*bit))
5939 {
5940 case 'n':
5941 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5942 break;
5943
5944 case 'z':
5945 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5946 break;
5947
5948 case 'c':
5949 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5950 break;
5951
5952 case 'v':
5953 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5954 break;
5955
5956 case 'q':
5957 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5958 break;
5959
5960 case 'g':
5961 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5962 break;
5963
5964 default:
5965 inst.error = _("unexpected bit specified after APSR");
5966 return FAIL;
5967 }
5968 }
5969
5970 if (nzcvq_bits == 0x1f)
5971 psr_field |= PSR_f;
5972
5973 if (g_bit == 0x1)
5974 {
5975 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5976 {
5977 inst.error = _("selected processor does not "
5978 "support DSP extension");
5979 return FAIL;
5980 }
5981
5982 psr_field |= PSR_s;
5983 }
5984
5985 if ((nzcvq_bits & 0x20) != 0
5986 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5987 || (g_bit & 0x2) != 0)
5988 {
5989 inst.error = _("bad bitmask specified after APSR");
5990 return FAIL;
5991 }
5992 }
5993 else
5994 {
5995 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5996 p - start);
5997 if (!psr)
5998 goto error;
5999
6000 psr_field |= psr->field;
6001 }
6002 }
6003 else
6004 {
6005 if (ISALNUM (*p))
6006 goto error; /* Garbage after "[CS]PSR". */
6007
6008 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
6009 is deprecated, but allow it anyway. */
6010 if (is_apsr && lhs)
6011 {
6012 psr_field |= PSR_f;
6013 as_tsktsk (_("writing to APSR without specifying a bitmask is "
6014 "deprecated"));
6015 }
6016 else if (!m_profile)
6017 /* These bits are never right for M-profile devices: don't set them
6018 (only code paths which read/write APSR reach here). */
6019 psr_field |= (PSR_c | PSR_f);
6020 }
6021 *str = p;
6022 return psr_field;
6023
6024 unsupported_psr:
6025 inst.error = _("selected processor does not support requested special "
6026 "purpose register");
6027 return FAIL;
6028
6029 error:
6030 inst.error = _("flag for {c}psr instruction expected");
6031 return FAIL;
6032 }
6033
6034 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6035 value suitable for splatting into the AIF field of the instruction. */
6036
6037 static int
6038 parse_cps_flags (char **str)
6039 {
6040 int val = 0;
6041 int saw_a_flag = 0;
6042 char *s = *str;
6043
6044 for (;;)
6045 switch (*s++)
6046 {
6047 case '\0': case ',':
6048 goto done;
6049
6050 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6051 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6052 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6053
6054 default:
6055 inst.error = _("unrecognized CPS flag");
6056 return FAIL;
6057 }
6058
6059 done:
6060 if (saw_a_flag == 0)
6061 {
6062 inst.error = _("missing CPS flags");
6063 return FAIL;
6064 }
6065
6066 *str = s - 1;
6067 return val;
6068 }
6069
6070 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6071 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6072
6073 static int
6074 parse_endian_specifier (char **str)
6075 {
6076 int little_endian;
6077 char *s = *str;
6078
6079 if (strncasecmp (s, "BE", 2))
6080 little_endian = 0;
6081 else if (strncasecmp (s, "LE", 2))
6082 little_endian = 1;
6083 else
6084 {
6085 inst.error = _("valid endian specifiers are be or le");
6086 return FAIL;
6087 }
6088
6089 if (ISALNUM (s[2]) || s[2] == '_')
6090 {
6091 inst.error = _("valid endian specifiers are be or le");
6092 return FAIL;
6093 }
6094
6095 *str = s + 2;
6096 return little_endian;
6097 }
6098
6099 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6100 value suitable for poking into the rotate field of an sxt or sxta
6101 instruction, or FAIL on error. */
6102
6103 static int
6104 parse_ror (char **str)
6105 {
6106 int rot;
6107 char *s = *str;
6108
6109 if (strncasecmp (s, "ROR", 3) == 0)
6110 s += 3;
6111 else
6112 {
6113 inst.error = _("missing rotation field after comma");
6114 return FAIL;
6115 }
6116
6117 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6118 return FAIL;
6119
6120 switch (rot)
6121 {
6122 case 0: *str = s; return 0x0;
6123 case 8: *str = s; return 0x1;
6124 case 16: *str = s; return 0x2;
6125 case 24: *str = s; return 0x3;
6126
6127 default:
6128 inst.error = _("rotation can only be 0, 8, 16, or 24");
6129 return FAIL;
6130 }
6131 }
6132
6133 /* Parse a conditional code (from conds[] below). The value returned is in the
6134 range 0 .. 14, or FAIL. */
6135 static int
6136 parse_cond (char **str)
6137 {
6138 char *q;
6139 const struct asm_cond *c;
6140 int n;
6141 /* Condition codes are always 2 characters, so matching up to
6142 3 characters is sufficient. */
6143 char cond[3];
6144
6145 q = *str;
6146 n = 0;
6147 while (ISALPHA (*q) && n < 3)
6148 {
6149 cond[n] = TOLOWER (*q);
6150 q++;
6151 n++;
6152 }
6153
6154 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6155 if (!c)
6156 {
6157 inst.error = _("condition required");
6158 return FAIL;
6159 }
6160
6161 *str = q;
6162 return c->value;
6163 }
6164
6165 /* Record a use of the given feature. */
6166 static void
6167 record_feature_use (const arm_feature_set *feature)
6168 {
6169 if (thumb_mode)
6170 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6171 else
6172 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6173 }
6174
6175 /* If the given feature is currently allowed, mark it as used and return TRUE.
6176 Return FALSE otherwise. */
6177 static bfd_boolean
6178 mark_feature_used (const arm_feature_set *feature)
6179 {
6180 /* Ensure the option is currently allowed. */
6181 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6182 return FALSE;
6183
6184 /* Add the appropriate architecture feature for the barrier option used. */
6185 record_feature_use (feature);
6186
6187 return TRUE;
6188 }
6189
6190 /* Parse an option for a barrier instruction. Returns the encoding for the
6191 option, or FAIL. */
6192 static int
6193 parse_barrier (char **str)
6194 {
6195 char *p, *q;
6196 const struct asm_barrier_opt *o;
6197
6198 p = q = *str;
6199 while (ISALPHA (*q))
6200 q++;
6201
6202 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6203 q - p);
6204 if (!o)
6205 return FAIL;
6206
6207 if (!mark_feature_used (&o->arch))
6208 return FAIL;
6209
6210 *str = q;
6211 return o->value;
6212 }
6213
6214 /* Parse the operands of a table branch instruction. Similar to a memory
6215 operand. */
6216 static int
6217 parse_tb (char **str)
6218 {
6219 char * p = *str;
6220 int reg;
6221
6222 if (skip_past_char (&p, '[') == FAIL)
6223 {
6224 inst.error = _("'[' expected");
6225 return FAIL;
6226 }
6227
6228 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6229 {
6230 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6231 return FAIL;
6232 }
6233 inst.operands[0].reg = reg;
6234
6235 if (skip_past_comma (&p) == FAIL)
6236 {
6237 inst.error = _("',' expected");
6238 return FAIL;
6239 }
6240
6241 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6242 {
6243 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6244 return FAIL;
6245 }
6246 inst.operands[0].imm = reg;
6247
6248 if (skip_past_comma (&p) == SUCCESS)
6249 {
6250 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6251 return FAIL;
6252 if (inst.reloc.exp.X_add_number != 1)
6253 {
6254 inst.error = _("invalid shift");
6255 return FAIL;
6256 }
6257 inst.operands[0].shifted = 1;
6258 }
6259
6260 if (skip_past_char (&p, ']') == FAIL)
6261 {
6262 inst.error = _("']' expected");
6263 return FAIL;
6264 }
6265 *str = p;
6266 return SUCCESS;
6267 }
6268
6269 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6270 information on the types the operands can take and how they are encoded.
6271 Up to four operands may be read; this function handles setting the
6272 ".present" field for each read operand itself.
6273 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6274 else returns FAIL. */
6275
6276 static int
6277 parse_neon_mov (char **str, int *which_operand)
6278 {
6279 int i = *which_operand, val;
6280 enum arm_reg_type rtype;
6281 char *ptr = *str;
6282 struct neon_type_el optype;
6283
6284 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6285 {
6286 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6287 inst.operands[i].reg = val;
6288 inst.operands[i].isscalar = 1;
6289 inst.operands[i].vectype = optype;
6290 inst.operands[i++].present = 1;
6291
6292 if (skip_past_comma (&ptr) == FAIL)
6293 goto wanted_comma;
6294
6295 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6296 goto wanted_arm;
6297
6298 inst.operands[i].reg = val;
6299 inst.operands[i].isreg = 1;
6300 inst.operands[i].present = 1;
6301 }
6302 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6303 != FAIL)
6304 {
6305 /* Cases 0, 1, 2, 3, 5 (D only). */
6306 if (skip_past_comma (&ptr) == FAIL)
6307 goto wanted_comma;
6308
6309 inst.operands[i].reg = val;
6310 inst.operands[i].isreg = 1;
6311 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6312 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6313 inst.operands[i].isvec = 1;
6314 inst.operands[i].vectype = optype;
6315 inst.operands[i++].present = 1;
6316
6317 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6318 {
6319 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6320 Case 13: VMOV <Sd>, <Rm> */
6321 inst.operands[i].reg = val;
6322 inst.operands[i].isreg = 1;
6323 inst.operands[i].present = 1;
6324
6325 if (rtype == REG_TYPE_NQ)
6326 {
6327 first_error (_("can't use Neon quad register here"));
6328 return FAIL;
6329 }
6330 else if (rtype != REG_TYPE_VFS)
6331 {
6332 i++;
6333 if (skip_past_comma (&ptr) == FAIL)
6334 goto wanted_comma;
6335 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6336 goto wanted_arm;
6337 inst.operands[i].reg = val;
6338 inst.operands[i].isreg = 1;
6339 inst.operands[i].present = 1;
6340 }
6341 }
6342 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6343 &optype)) != FAIL)
6344 {
6345 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6346 Case 1: VMOV<c><q> <Dd>, <Dm>
6347 Case 8: VMOV.F32 <Sd>, <Sm>
6348 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6349
6350 inst.operands[i].reg = val;
6351 inst.operands[i].isreg = 1;
6352 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6353 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6354 inst.operands[i].isvec = 1;
6355 inst.operands[i].vectype = optype;
6356 inst.operands[i].present = 1;
6357
6358 if (skip_past_comma (&ptr) == SUCCESS)
6359 {
6360 /* Case 15. */
6361 i++;
6362
6363 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6364 goto wanted_arm;
6365
6366 inst.operands[i].reg = val;
6367 inst.operands[i].isreg = 1;
6368 inst.operands[i++].present = 1;
6369
6370 if (skip_past_comma (&ptr) == FAIL)
6371 goto wanted_comma;
6372
6373 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6374 goto wanted_arm;
6375
6376 inst.operands[i].reg = val;
6377 inst.operands[i].isreg = 1;
6378 inst.operands[i].present = 1;
6379 }
6380 }
6381 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6382 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6383 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6384 Case 10: VMOV.F32 <Sd>, #<imm>
6385 Case 11: VMOV.F64 <Dd>, #<imm> */
6386 inst.operands[i].immisfloat = 1;
6387 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6388 == SUCCESS)
6389 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6390 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6391 ;
6392 else
6393 {
6394 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6395 return FAIL;
6396 }
6397 }
6398 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6399 {
6400 /* Cases 6, 7. */
6401 inst.operands[i].reg = val;
6402 inst.operands[i].isreg = 1;
6403 inst.operands[i++].present = 1;
6404
6405 if (skip_past_comma (&ptr) == FAIL)
6406 goto wanted_comma;
6407
6408 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6409 {
6410 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6411 inst.operands[i].reg = val;
6412 inst.operands[i].isscalar = 1;
6413 inst.operands[i].present = 1;
6414 inst.operands[i].vectype = optype;
6415 }
6416 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6417 {
6418 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6419 inst.operands[i].reg = val;
6420 inst.operands[i].isreg = 1;
6421 inst.operands[i++].present = 1;
6422
6423 if (skip_past_comma (&ptr) == FAIL)
6424 goto wanted_comma;
6425
6426 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6427 == FAIL)
6428 {
6429 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6430 return FAIL;
6431 }
6432
6433 inst.operands[i].reg = val;
6434 inst.operands[i].isreg = 1;
6435 inst.operands[i].isvec = 1;
6436 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6437 inst.operands[i].vectype = optype;
6438 inst.operands[i].present = 1;
6439
6440 if (rtype == REG_TYPE_VFS)
6441 {
6442 /* Case 14. */
6443 i++;
6444 if (skip_past_comma (&ptr) == FAIL)
6445 goto wanted_comma;
6446 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6447 &optype)) == FAIL)
6448 {
6449 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6450 return FAIL;
6451 }
6452 inst.operands[i].reg = val;
6453 inst.operands[i].isreg = 1;
6454 inst.operands[i].isvec = 1;
6455 inst.operands[i].issingle = 1;
6456 inst.operands[i].vectype = optype;
6457 inst.operands[i].present = 1;
6458 }
6459 }
6460 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6461 != FAIL)
6462 {
6463 /* Case 13. */
6464 inst.operands[i].reg = val;
6465 inst.operands[i].isreg = 1;
6466 inst.operands[i].isvec = 1;
6467 inst.operands[i].issingle = 1;
6468 inst.operands[i].vectype = optype;
6469 inst.operands[i].present = 1;
6470 }
6471 }
6472 else
6473 {
6474 first_error (_("parse error"));
6475 return FAIL;
6476 }
6477
6478 /* Successfully parsed the operands. Update args. */
6479 *which_operand = i;
6480 *str = ptr;
6481 return SUCCESS;
6482
6483 wanted_comma:
6484 first_error (_("expected comma"));
6485 return FAIL;
6486
6487 wanted_arm:
6488 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6489 return FAIL;
6490 }
6491
6492 /* Use this macro when the operand constraints are different
6493 for ARM and THUMB (e.g. ldrd). */
6494 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6495 ((arm_operand) | ((thumb_operand) << 16))
6496
6497 /* Matcher codes for parse_operands. */
6498 enum operand_parse_code
6499 {
6500 OP_stop, /* end of line */
6501
6502 OP_RR, /* ARM register */
6503 OP_RRnpc, /* ARM register, not r15 */
6504 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6505 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6506 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6507 optional trailing ! */
6508 OP_RRw, /* ARM register, not r15, optional trailing ! */
6509 OP_RCP, /* Coprocessor number */
6510 OP_RCN, /* Coprocessor register */
6511 OP_RF, /* FPA register */
6512 OP_RVS, /* VFP single precision register */
6513 OP_RVD, /* VFP double precision register (0..15) */
6514 OP_RND, /* Neon double precision register (0..31) */
6515 OP_RNQ, /* Neon quad precision register */
6516 OP_RVSD, /* VFP single or double precision register */
6517 OP_RNSD, /* Neon single or double precision register */
6518 OP_RNDQ, /* Neon double or quad precision register */
6519 OP_RNSDQ, /* Neon single, double or quad precision register */
6520 OP_RNSC, /* Neon scalar D[X] */
6521 OP_RVC, /* VFP control register */
6522 OP_RMF, /* Maverick F register */
6523 OP_RMD, /* Maverick D register */
6524 OP_RMFX, /* Maverick FX register */
6525 OP_RMDX, /* Maverick DX register */
6526 OP_RMAX, /* Maverick AX register */
6527 OP_RMDS, /* Maverick DSPSC register */
6528 OP_RIWR, /* iWMMXt wR register */
6529 OP_RIWC, /* iWMMXt wC register */
6530 OP_RIWG, /* iWMMXt wCG register */
6531 OP_RXA, /* XScale accumulator register */
6532
6533 OP_REGLST, /* ARM register list */
6534 OP_VRSLST, /* VFP single-precision register list */
6535 OP_VRDLST, /* VFP double-precision register list */
6536 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6537 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6538 OP_NSTRLST, /* Neon element/structure list */
6539
6540 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6541 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6542 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6543 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6544 OP_RNSD_RNSC, /* Neon S or D reg, or Neon scalar. */
6545 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6546 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6547 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6548 OP_VMOV, /* Neon VMOV operands. */
6549 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6550 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6551 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6552
6553 OP_I0, /* immediate zero */
6554 OP_I7, /* immediate value 0 .. 7 */
6555 OP_I15, /* 0 .. 15 */
6556 OP_I16, /* 1 .. 16 */
6557 OP_I16z, /* 0 .. 16 */
6558 OP_I31, /* 0 .. 31 */
6559 OP_I31w, /* 0 .. 31, optional trailing ! */
6560 OP_I32, /* 1 .. 32 */
6561 OP_I32z, /* 0 .. 32 */
6562 OP_I63, /* 0 .. 63 */
6563 OP_I63s, /* -64 .. 63 */
6564 OP_I64, /* 1 .. 64 */
6565 OP_I64z, /* 0 .. 64 */
6566 OP_I255, /* 0 .. 255 */
6567
6568 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6569 OP_I7b, /* 0 .. 7 */
6570 OP_I15b, /* 0 .. 15 */
6571 OP_I31b, /* 0 .. 31 */
6572
6573 OP_SH, /* shifter operand */
6574 OP_SHG, /* shifter operand with possible group relocation */
6575 OP_ADDR, /* Memory address expression (any mode) */
6576 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6577 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6578 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6579 OP_EXP, /* arbitrary expression */
6580 OP_EXPi, /* same, with optional immediate prefix */
6581 OP_EXPr, /* same, with optional relocation suffix */
6582 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6583 OP_IROT1, /* VCADD rotate immediate: 90, 270. */
6584 OP_IROT2, /* VCMLA rotate immediate: 0, 90, 180, 270. */
6585
6586 OP_CPSF, /* CPS flags */
6587 OP_ENDI, /* Endianness specifier */
6588 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6589 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6590 OP_COND, /* conditional code */
6591 OP_TB, /* Table branch. */
6592
6593 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6594
6595 OP_RRnpc_I0, /* ARM register or literal 0 */
6596 OP_RR_EXr, /* ARM register or expression with opt. reloc stuff. */
6597 OP_RR_EXi, /* ARM register or expression with imm prefix */
6598 OP_RF_IF, /* FPA register or immediate */
6599 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6600 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6601
6602 /* Optional operands. */
6603 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6604 OP_oI31b, /* 0 .. 31 */
6605 OP_oI32b, /* 1 .. 32 */
6606 OP_oI32z, /* 0 .. 32 */
6607 OP_oIffffb, /* 0 .. 65535 */
6608 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6609
6610 OP_oRR, /* ARM register */
6611 OP_oRRnpc, /* ARM register, not the PC */
6612 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6613 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6614 OP_oRND, /* Optional Neon double precision register */
6615 OP_oRNQ, /* Optional Neon quad precision register */
6616 OP_oRNDQ, /* Optional Neon double or quad precision register */
6617 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6618 OP_oSHll, /* LSL immediate */
6619 OP_oSHar, /* ASR immediate */
6620 OP_oSHllar, /* LSL or ASR immediate */
6621 OP_oROR, /* ROR 0/8/16/24 */
6622 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6623
6624 /* Some pre-defined mixed (ARM/THUMB) operands. */
6625 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6626 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6627 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6628
6629 OP_FIRST_OPTIONAL = OP_oI7b
6630 };
6631
6632 /* Generic instruction operand parser. This does no encoding and no
6633 semantic validation; it merely squirrels values away in the inst
6634 structure. Returns SUCCESS or FAIL depending on whether the
6635 specified grammar matched. */
6636 static int
6637 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6638 {
6639 unsigned const int *upat = pattern;
6640 char *backtrack_pos = 0;
6641 const char *backtrack_error = 0;
6642 int i, val = 0, backtrack_index = 0;
6643 enum arm_reg_type rtype;
6644 parse_operand_result result;
6645 unsigned int op_parse_code;
6646
6647 #define po_char_or_fail(chr) \
6648 do \
6649 { \
6650 if (skip_past_char (&str, chr) == FAIL) \
6651 goto bad_args; \
6652 } \
6653 while (0)
6654
6655 #define po_reg_or_fail(regtype) \
6656 do \
6657 { \
6658 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6659 & inst.operands[i].vectype); \
6660 if (val == FAIL) \
6661 { \
6662 first_error (_(reg_expected_msgs[regtype])); \
6663 goto failure; \
6664 } \
6665 inst.operands[i].reg = val; \
6666 inst.operands[i].isreg = 1; \
6667 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6668 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6669 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6670 || rtype == REG_TYPE_VFD \
6671 || rtype == REG_TYPE_NQ); \
6672 } \
6673 while (0)
6674
6675 #define po_reg_or_goto(regtype, label) \
6676 do \
6677 { \
6678 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6679 & inst.operands[i].vectype); \
6680 if (val == FAIL) \
6681 goto label; \
6682 \
6683 inst.operands[i].reg = val; \
6684 inst.operands[i].isreg = 1; \
6685 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6686 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6687 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6688 || rtype == REG_TYPE_VFD \
6689 || rtype == REG_TYPE_NQ); \
6690 } \
6691 while (0)
6692
6693 #define po_imm_or_fail(min, max, popt) \
6694 do \
6695 { \
6696 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6697 goto failure; \
6698 inst.operands[i].imm = val; \
6699 } \
6700 while (0)
6701
6702 #define po_scalar_or_goto(elsz, label) \
6703 do \
6704 { \
6705 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6706 if (val == FAIL) \
6707 goto label; \
6708 inst.operands[i].reg = val; \
6709 inst.operands[i].isscalar = 1; \
6710 } \
6711 while (0)
6712
6713 #define po_misc_or_fail(expr) \
6714 do \
6715 { \
6716 if (expr) \
6717 goto failure; \
6718 } \
6719 while (0)
6720
6721 #define po_misc_or_fail_no_backtrack(expr) \
6722 do \
6723 { \
6724 result = expr; \
6725 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6726 backtrack_pos = 0; \
6727 if (result != PARSE_OPERAND_SUCCESS) \
6728 goto failure; \
6729 } \
6730 while (0)
6731
6732 #define po_barrier_or_imm(str) \
6733 do \
6734 { \
6735 val = parse_barrier (&str); \
6736 if (val == FAIL && ! ISALPHA (*str)) \
6737 goto immediate; \
6738 if (val == FAIL \
6739 /* ISB can only take SY as an option. */ \
6740 || ((inst.instruction & 0xf0) == 0x60 \
6741 && val != 0xf)) \
6742 { \
6743 inst.error = _("invalid barrier type"); \
6744 backtrack_pos = 0; \
6745 goto failure; \
6746 } \
6747 } \
6748 while (0)
6749
6750 skip_whitespace (str);
6751
6752 for (i = 0; upat[i] != OP_stop; i++)
6753 {
6754 op_parse_code = upat[i];
6755 if (op_parse_code >= 1<<16)
6756 op_parse_code = thumb ? (op_parse_code >> 16)
6757 : (op_parse_code & ((1<<16)-1));
6758
6759 if (op_parse_code >= OP_FIRST_OPTIONAL)
6760 {
6761 /* Remember where we are in case we need to backtrack. */
6762 gas_assert (!backtrack_pos);
6763 backtrack_pos = str;
6764 backtrack_error = inst.error;
6765 backtrack_index = i;
6766 }
6767
6768 if (i > 0 && (i > 1 || inst.operands[0].present))
6769 po_char_or_fail (',');
6770
6771 switch (op_parse_code)
6772 {
6773 /* Registers */
6774 case OP_oRRnpc:
6775 case OP_oRRnpcsp:
6776 case OP_RRnpc:
6777 case OP_RRnpcsp:
6778 case OP_oRR:
6779 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6780 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6781 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6782 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6783 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6784 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6785 case OP_oRND:
6786 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6787 case OP_RVC:
6788 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6789 break;
6790 /* Also accept generic coprocessor regs for unknown registers. */
6791 coproc_reg:
6792 po_reg_or_fail (REG_TYPE_CN);
6793 break;
6794 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6795 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6796 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6797 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6798 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6799 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6800 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6801 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6802 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6803 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6804 case OP_oRNQ:
6805 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6806 case OP_RNSD: po_reg_or_fail (REG_TYPE_NSD); break;
6807 case OP_oRNDQ:
6808 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6809 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6810 case OP_oRNSDQ:
6811 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6812
6813 /* Neon scalar. Using an element size of 8 means that some invalid
6814 scalars are accepted here, so deal with those in later code. */
6815 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6816
6817 case OP_RNDQ_I0:
6818 {
6819 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6820 break;
6821 try_imm0:
6822 po_imm_or_fail (0, 0, TRUE);
6823 }
6824 break;
6825
6826 case OP_RVSD_I0:
6827 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6828 break;
6829
6830 case OP_RSVD_FI0:
6831 {
6832 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6833 break;
6834 try_ifimm0:
6835 if (parse_ifimm_zero (&str))
6836 inst.operands[i].imm = 0;
6837 else
6838 {
6839 inst.error
6840 = _("only floating point zero is allowed as immediate value");
6841 goto failure;
6842 }
6843 }
6844 break;
6845
6846 case OP_RR_RNSC:
6847 {
6848 po_scalar_or_goto (8, try_rr);
6849 break;
6850 try_rr:
6851 po_reg_or_fail (REG_TYPE_RN);
6852 }
6853 break;
6854
6855 case OP_RNSDQ_RNSC:
6856 {
6857 po_scalar_or_goto (8, try_nsdq);
6858 break;
6859 try_nsdq:
6860 po_reg_or_fail (REG_TYPE_NSDQ);
6861 }
6862 break;
6863
6864 case OP_RNSD_RNSC:
6865 {
6866 po_scalar_or_goto (8, try_s_scalar);
6867 break;
6868 try_s_scalar:
6869 po_scalar_or_goto (4, try_nsd);
6870 break;
6871 try_nsd:
6872 po_reg_or_fail (REG_TYPE_NSD);
6873 }
6874 break;
6875
6876 case OP_RNDQ_RNSC:
6877 {
6878 po_scalar_or_goto (8, try_ndq);
6879 break;
6880 try_ndq:
6881 po_reg_or_fail (REG_TYPE_NDQ);
6882 }
6883 break;
6884
6885 case OP_RND_RNSC:
6886 {
6887 po_scalar_or_goto (8, try_vfd);
6888 break;
6889 try_vfd:
6890 po_reg_or_fail (REG_TYPE_VFD);
6891 }
6892 break;
6893
6894 case OP_VMOV:
6895 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6896 not careful then bad things might happen. */
6897 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6898 break;
6899
6900 case OP_RNDQ_Ibig:
6901 {
6902 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6903 break;
6904 try_immbig:
6905 /* There's a possibility of getting a 64-bit immediate here, so
6906 we need special handling. */
6907 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6908 == FAIL)
6909 {
6910 inst.error = _("immediate value is out of range");
6911 goto failure;
6912 }
6913 }
6914 break;
6915
6916 case OP_RNDQ_I63b:
6917 {
6918 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6919 break;
6920 try_shimm:
6921 po_imm_or_fail (0, 63, TRUE);
6922 }
6923 break;
6924
6925 case OP_RRnpcb:
6926 po_char_or_fail ('[');
6927 po_reg_or_fail (REG_TYPE_RN);
6928 po_char_or_fail (']');
6929 break;
6930
6931 case OP_RRnpctw:
6932 case OP_RRw:
6933 case OP_oRRw:
6934 po_reg_or_fail (REG_TYPE_RN);
6935 if (skip_past_char (&str, '!') == SUCCESS)
6936 inst.operands[i].writeback = 1;
6937 break;
6938
6939 /* Immediates */
6940 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6941 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6942 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6943 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6944 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6945 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6946 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6947 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6948 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6949 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6950 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6951 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6952
6953 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6954 case OP_oI7b:
6955 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6956 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6957 case OP_oI31b:
6958 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6959 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6960 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6961 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6962
6963 /* Immediate variants */
6964 case OP_oI255c:
6965 po_char_or_fail ('{');
6966 po_imm_or_fail (0, 255, TRUE);
6967 po_char_or_fail ('}');
6968 break;
6969
6970 case OP_I31w:
6971 /* The expression parser chokes on a trailing !, so we have
6972 to find it first and zap it. */
6973 {
6974 char *s = str;
6975 while (*s && *s != ',')
6976 s++;
6977 if (s[-1] == '!')
6978 {
6979 s[-1] = '\0';
6980 inst.operands[i].writeback = 1;
6981 }
6982 po_imm_or_fail (0, 31, TRUE);
6983 if (str == s - 1)
6984 str = s;
6985 }
6986 break;
6987
6988 /* Expressions */
6989 case OP_EXPi: EXPi:
6990 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6991 GE_OPT_PREFIX));
6992 break;
6993
6994 case OP_EXP:
6995 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6996 GE_NO_PREFIX));
6997 break;
6998
6999 case OP_EXPr: EXPr:
7000 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
7001 GE_NO_PREFIX));
7002 if (inst.reloc.exp.X_op == O_symbol)
7003 {
7004 val = parse_reloc (&str);
7005 if (val == -1)
7006 {
7007 inst.error = _("unrecognized relocation suffix");
7008 goto failure;
7009 }
7010 else if (val != BFD_RELOC_UNUSED)
7011 {
7012 inst.operands[i].imm = val;
7013 inst.operands[i].hasreloc = 1;
7014 }
7015 }
7016 break;
7017
7018 /* Operand for MOVW or MOVT. */
7019 case OP_HALF:
7020 po_misc_or_fail (parse_half (&str));
7021 break;
7022
7023 /* Register or expression. */
7024 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
7025 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
7026
7027 /* Register or immediate. */
7028 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
7029 I0: po_imm_or_fail (0, 0, FALSE); break;
7030
7031 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
7032 IF:
7033 if (!is_immediate_prefix (*str))
7034 goto bad_args;
7035 str++;
7036 val = parse_fpa_immediate (&str);
7037 if (val == FAIL)
7038 goto failure;
7039 /* FPA immediates are encoded as registers 8-15.
7040 parse_fpa_immediate has already applied the offset. */
7041 inst.operands[i].reg = val;
7042 inst.operands[i].isreg = 1;
7043 break;
7044
7045 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
7046 I32z: po_imm_or_fail (0, 32, FALSE); break;
7047
7048 /* Two kinds of register. */
7049 case OP_RIWR_RIWC:
7050 {
7051 struct reg_entry *rege = arm_reg_parse_multi (&str);
7052 if (!rege
7053 || (rege->type != REG_TYPE_MMXWR
7054 && rege->type != REG_TYPE_MMXWC
7055 && rege->type != REG_TYPE_MMXWCG))
7056 {
7057 inst.error = _("iWMMXt data or control register expected");
7058 goto failure;
7059 }
7060 inst.operands[i].reg = rege->number;
7061 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
7062 }
7063 break;
7064
7065 case OP_RIWC_RIWG:
7066 {
7067 struct reg_entry *rege = arm_reg_parse_multi (&str);
7068 if (!rege
7069 || (rege->type != REG_TYPE_MMXWC
7070 && rege->type != REG_TYPE_MMXWCG))
7071 {
7072 inst.error = _("iWMMXt control register expected");
7073 goto failure;
7074 }
7075 inst.operands[i].reg = rege->number;
7076 inst.operands[i].isreg = 1;
7077 }
7078 break;
7079
7080 /* Misc */
7081 case OP_CPSF: val = parse_cps_flags (&str); break;
7082 case OP_ENDI: val = parse_endian_specifier (&str); break;
7083 case OP_oROR: val = parse_ror (&str); break;
7084 case OP_COND: val = parse_cond (&str); break;
7085 case OP_oBARRIER_I15:
7086 po_barrier_or_imm (str); break;
7087 immediate:
7088 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
7089 goto failure;
7090 break;
7091
7092 case OP_wPSR:
7093 case OP_rPSR:
7094 po_reg_or_goto (REG_TYPE_RNB, try_psr);
7095 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7096 {
7097 inst.error = _("Banked registers are not available with this "
7098 "architecture.");
7099 goto failure;
7100 }
7101 break;
7102 try_psr:
7103 val = parse_psr (&str, op_parse_code == OP_wPSR);
7104 break;
7105
7106 case OP_APSR_RR:
7107 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7108 break;
7109 try_apsr:
7110 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7111 instruction). */
7112 if (strncasecmp (str, "APSR_", 5) == 0)
7113 {
7114 unsigned found = 0;
7115 str += 5;
7116 while (found < 15)
7117 switch (*str++)
7118 {
7119 case 'c': found = (found & 1) ? 16 : found | 1; break;
7120 case 'n': found = (found & 2) ? 16 : found | 2; break;
7121 case 'z': found = (found & 4) ? 16 : found | 4; break;
7122 case 'v': found = (found & 8) ? 16 : found | 8; break;
7123 default: found = 16;
7124 }
7125 if (found != 15)
7126 goto failure;
7127 inst.operands[i].isvec = 1;
7128 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7129 inst.operands[i].reg = REG_PC;
7130 }
7131 else
7132 goto failure;
7133 break;
7134
7135 case OP_TB:
7136 po_misc_or_fail (parse_tb (&str));
7137 break;
7138
7139 /* Register lists. */
7140 case OP_REGLST:
7141 val = parse_reg_list (&str);
7142 if (*str == '^')
7143 {
7144 inst.operands[i].writeback = 1;
7145 str++;
7146 }
7147 break;
7148
7149 case OP_VRSLST:
7150 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
7151 break;
7152
7153 case OP_VRDLST:
7154 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7155 break;
7156
7157 case OP_VRSDLST:
7158 /* Allow Q registers too. */
7159 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7160 REGLIST_NEON_D);
7161 if (val == FAIL)
7162 {
7163 inst.error = NULL;
7164 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7165 REGLIST_VFP_S);
7166 inst.operands[i].issingle = 1;
7167 }
7168 break;
7169
7170 case OP_NRDLST:
7171 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7172 REGLIST_NEON_D);
7173 break;
7174
7175 case OP_NSTRLST:
7176 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7177 &inst.operands[i].vectype);
7178 break;
7179
7180 /* Addressing modes */
7181 case OP_ADDR:
7182 po_misc_or_fail (parse_address (&str, i));
7183 break;
7184
7185 case OP_ADDRGLDR:
7186 po_misc_or_fail_no_backtrack (
7187 parse_address_group_reloc (&str, i, GROUP_LDR));
7188 break;
7189
7190 case OP_ADDRGLDRS:
7191 po_misc_or_fail_no_backtrack (
7192 parse_address_group_reloc (&str, i, GROUP_LDRS));
7193 break;
7194
7195 case OP_ADDRGLDC:
7196 po_misc_or_fail_no_backtrack (
7197 parse_address_group_reloc (&str, i, GROUP_LDC));
7198 break;
7199
7200 case OP_SH:
7201 po_misc_or_fail (parse_shifter_operand (&str, i));
7202 break;
7203
7204 case OP_SHG:
7205 po_misc_or_fail_no_backtrack (
7206 parse_shifter_operand_group_reloc (&str, i));
7207 break;
7208
7209 case OP_oSHll:
7210 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7211 break;
7212
7213 case OP_oSHar:
7214 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7215 break;
7216
7217 case OP_oSHllar:
7218 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7219 break;
7220
7221 default:
7222 as_fatal (_("unhandled operand code %d"), op_parse_code);
7223 }
7224
7225 /* Various value-based sanity checks and shared operations. We
7226 do not signal immediate failures for the register constraints;
7227 this allows a syntax error to take precedence. */
7228 switch (op_parse_code)
7229 {
7230 case OP_oRRnpc:
7231 case OP_RRnpc:
7232 case OP_RRnpcb:
7233 case OP_RRw:
7234 case OP_oRRw:
7235 case OP_RRnpc_I0:
7236 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7237 inst.error = BAD_PC;
7238 break;
7239
7240 case OP_oRRnpcsp:
7241 case OP_RRnpcsp:
7242 if (inst.operands[i].isreg)
7243 {
7244 if (inst.operands[i].reg == REG_PC)
7245 inst.error = BAD_PC;
7246 else if (inst.operands[i].reg == REG_SP
7247 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7248 relaxed since ARMv8-A. */
7249 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
7250 {
7251 gas_assert (thumb);
7252 inst.error = BAD_SP;
7253 }
7254 }
7255 break;
7256
7257 case OP_RRnpctw:
7258 if (inst.operands[i].isreg
7259 && inst.operands[i].reg == REG_PC
7260 && (inst.operands[i].writeback || thumb))
7261 inst.error = BAD_PC;
7262 break;
7263
7264 case OP_CPSF:
7265 case OP_ENDI:
7266 case OP_oROR:
7267 case OP_wPSR:
7268 case OP_rPSR:
7269 case OP_COND:
7270 case OP_oBARRIER_I15:
7271 case OP_REGLST:
7272 case OP_VRSLST:
7273 case OP_VRDLST:
7274 case OP_VRSDLST:
7275 case OP_NRDLST:
7276 case OP_NSTRLST:
7277 if (val == FAIL)
7278 goto failure;
7279 inst.operands[i].imm = val;
7280 break;
7281
7282 default:
7283 break;
7284 }
7285
7286 /* If we get here, this operand was successfully parsed. */
7287 inst.operands[i].present = 1;
7288 continue;
7289
7290 bad_args:
7291 inst.error = BAD_ARGS;
7292
7293 failure:
7294 if (!backtrack_pos)
7295 {
7296 /* The parse routine should already have set inst.error, but set a
7297 default here just in case. */
7298 if (!inst.error)
7299 inst.error = _("syntax error");
7300 return FAIL;
7301 }
7302
7303 /* Do not backtrack over a trailing optional argument that
7304 absorbed some text. We will only fail again, with the
7305 'garbage following instruction' error message, which is
7306 probably less helpful than the current one. */
7307 if (backtrack_index == i && backtrack_pos != str
7308 && upat[i+1] == OP_stop)
7309 {
7310 if (!inst.error)
7311 inst.error = _("syntax error");
7312 return FAIL;
7313 }
7314
7315 /* Try again, skipping the optional argument at backtrack_pos. */
7316 str = backtrack_pos;
7317 inst.error = backtrack_error;
7318 inst.operands[backtrack_index].present = 0;
7319 i = backtrack_index;
7320 backtrack_pos = 0;
7321 }
7322
7323 /* Check that we have parsed all the arguments. */
7324 if (*str != '\0' && !inst.error)
7325 inst.error = _("garbage following instruction");
7326
7327 return inst.error ? FAIL : SUCCESS;
7328 }
7329
7330 #undef po_char_or_fail
7331 #undef po_reg_or_fail
7332 #undef po_reg_or_goto
7333 #undef po_imm_or_fail
7334 #undef po_scalar_or_fail
7335 #undef po_barrier_or_imm
7336
7337 /* Shorthand macro for instruction encoding functions issuing errors. */
7338 #define constraint(expr, err) \
7339 do \
7340 { \
7341 if (expr) \
7342 { \
7343 inst.error = err; \
7344 return; \
7345 } \
7346 } \
7347 while (0)
7348
7349 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7350 instructions are unpredictable if these registers are used. This
7351 is the BadReg predicate in ARM's Thumb-2 documentation.
7352
7353 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7354 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
7355 #define reject_bad_reg(reg) \
7356 do \
7357 if (reg == REG_PC) \
7358 { \
7359 inst.error = BAD_PC; \
7360 return; \
7361 } \
7362 else if (reg == REG_SP \
7363 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
7364 { \
7365 inst.error = BAD_SP; \
7366 return; \
7367 } \
7368 while (0)
7369
7370 /* If REG is R13 (the stack pointer), warn that its use is
7371 deprecated. */
7372 #define warn_deprecated_sp(reg) \
7373 do \
7374 if (warn_on_deprecated && reg == REG_SP) \
7375 as_tsktsk (_("use of r13 is deprecated")); \
7376 while (0)
7377
7378 /* Functions for operand encoding. ARM, then Thumb. */
7379
7380 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7381
7382 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7383
7384 The only binary encoding difference is the Coprocessor number. Coprocessor
7385 9 is used for half-precision calculations or conversions. The format of the
7386 instruction is the same as the equivalent Coprocessor 10 instruction that
7387 exists for Single-Precision operation. */
7388
7389 static void
7390 do_scalar_fp16_v82_encode (void)
7391 {
7392 if (inst.cond != COND_ALWAYS)
7393 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7394 " the behaviour is UNPREDICTABLE"));
7395 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
7396 _(BAD_FP16));
7397
7398 inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
7399 mark_feature_used (&arm_ext_fp16);
7400 }
7401
7402 /* If VAL can be encoded in the immediate field of an ARM instruction,
7403 return the encoded form. Otherwise, return FAIL. */
7404
7405 static unsigned int
7406 encode_arm_immediate (unsigned int val)
7407 {
7408 unsigned int a, i;
7409
7410 if (val <= 0xff)
7411 return val;
7412
7413 for (i = 2; i < 32; i += 2)
7414 if ((a = rotate_left (val, i)) <= 0xff)
7415 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7416
7417 return FAIL;
7418 }
7419
7420 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7421 return the encoded form. Otherwise, return FAIL. */
7422 static unsigned int
7423 encode_thumb32_immediate (unsigned int val)
7424 {
7425 unsigned int a, i;
7426
7427 if (val <= 0xff)
7428 return val;
7429
7430 for (i = 1; i <= 24; i++)
7431 {
7432 a = val >> i;
7433 if ((val & ~(0xff << i)) == 0)
7434 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7435 }
7436
7437 a = val & 0xff;
7438 if (val == ((a << 16) | a))
7439 return 0x100 | a;
7440 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7441 return 0x300 | a;
7442
7443 a = val & 0xff00;
7444 if (val == ((a << 16) | a))
7445 return 0x200 | (a >> 8);
7446
7447 return FAIL;
7448 }
7449 /* Encode a VFP SP or DP register number into inst.instruction. */
7450
7451 static void
7452 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7453 {
7454 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7455 && reg > 15)
7456 {
7457 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7458 {
7459 if (thumb_mode)
7460 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7461 fpu_vfp_ext_d32);
7462 else
7463 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7464 fpu_vfp_ext_d32);
7465 }
7466 else
7467 {
7468 first_error (_("D register out of range for selected VFP version"));
7469 return;
7470 }
7471 }
7472
7473 switch (pos)
7474 {
7475 case VFP_REG_Sd:
7476 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7477 break;
7478
7479 case VFP_REG_Sn:
7480 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7481 break;
7482
7483 case VFP_REG_Sm:
7484 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7485 break;
7486
7487 case VFP_REG_Dd:
7488 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7489 break;
7490
7491 case VFP_REG_Dn:
7492 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7493 break;
7494
7495 case VFP_REG_Dm:
7496 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7497 break;
7498
7499 default:
7500 abort ();
7501 }
7502 }
7503
7504 /* Encode a <shift> in an ARM-format instruction. The immediate,
7505 if any, is handled by md_apply_fix. */
7506 static void
7507 encode_arm_shift (int i)
7508 {
7509 /* register-shifted register. */
7510 if (inst.operands[i].immisreg)
7511 {
7512 int op_index;
7513 for (op_index = 0; op_index <= i; ++op_index)
7514 {
7515 /* Check the operand only when it's presented. In pre-UAL syntax,
7516 if the destination register is the same as the first operand, two
7517 register form of the instruction can be used. */
7518 if (inst.operands[op_index].present && inst.operands[op_index].isreg
7519 && inst.operands[op_index].reg == REG_PC)
7520 as_warn (UNPRED_REG ("r15"));
7521 }
7522
7523 if (inst.operands[i].imm == REG_PC)
7524 as_warn (UNPRED_REG ("r15"));
7525 }
7526
7527 if (inst.operands[i].shift_kind == SHIFT_RRX)
7528 inst.instruction |= SHIFT_ROR << 5;
7529 else
7530 {
7531 inst.instruction |= inst.operands[i].shift_kind << 5;
7532 if (inst.operands[i].immisreg)
7533 {
7534 inst.instruction |= SHIFT_BY_REG;
7535 inst.instruction |= inst.operands[i].imm << 8;
7536 }
7537 else
7538 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7539 }
7540 }
7541
7542 static void
7543 encode_arm_shifter_operand (int i)
7544 {
7545 if (inst.operands[i].isreg)
7546 {
7547 inst.instruction |= inst.operands[i].reg;
7548 encode_arm_shift (i);
7549 }
7550 else
7551 {
7552 inst.instruction |= INST_IMMEDIATE;
7553 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7554 inst.instruction |= inst.operands[i].imm;
7555 }
7556 }
7557
7558 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7559 static void
7560 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7561 {
7562 /* PR 14260:
7563 Generate an error if the operand is not a register. */
7564 constraint (!inst.operands[i].isreg,
7565 _("Instruction does not support =N addresses"));
7566
7567 inst.instruction |= inst.operands[i].reg << 16;
7568
7569 if (inst.operands[i].preind)
7570 {
7571 if (is_t)
7572 {
7573 inst.error = _("instruction does not accept preindexed addressing");
7574 return;
7575 }
7576 inst.instruction |= PRE_INDEX;
7577 if (inst.operands[i].writeback)
7578 inst.instruction |= WRITE_BACK;
7579
7580 }
7581 else if (inst.operands[i].postind)
7582 {
7583 gas_assert (inst.operands[i].writeback);
7584 if (is_t)
7585 inst.instruction |= WRITE_BACK;
7586 }
7587 else /* unindexed - only for coprocessor */
7588 {
7589 inst.error = _("instruction does not accept unindexed addressing");
7590 return;
7591 }
7592
7593 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7594 && (((inst.instruction & 0x000f0000) >> 16)
7595 == ((inst.instruction & 0x0000f000) >> 12)))
7596 as_warn ((inst.instruction & LOAD_BIT)
7597 ? _("destination register same as write-back base")
7598 : _("source register same as write-back base"));
7599 }
7600
7601 /* inst.operands[i] was set up by parse_address. Encode it into an
7602 ARM-format mode 2 load or store instruction. If is_t is true,
7603 reject forms that cannot be used with a T instruction (i.e. not
7604 post-indexed). */
7605 static void
7606 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7607 {
7608 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7609
7610 encode_arm_addr_mode_common (i, is_t);
7611
7612 if (inst.operands[i].immisreg)
7613 {
7614 constraint ((inst.operands[i].imm == REG_PC
7615 || (is_pc && inst.operands[i].writeback)),
7616 BAD_PC_ADDRESSING);
7617 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7618 inst.instruction |= inst.operands[i].imm;
7619 if (!inst.operands[i].negative)
7620 inst.instruction |= INDEX_UP;
7621 if (inst.operands[i].shifted)
7622 {
7623 if (inst.operands[i].shift_kind == SHIFT_RRX)
7624 inst.instruction |= SHIFT_ROR << 5;
7625 else
7626 {
7627 inst.instruction |= inst.operands[i].shift_kind << 5;
7628 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7629 }
7630 }
7631 }
7632 else /* immediate offset in inst.reloc */
7633 {
7634 if (is_pc && !inst.reloc.pc_rel)
7635 {
7636 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7637
7638 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7639 cannot use PC in addressing.
7640 PC cannot be used in writeback addressing, either. */
7641 constraint ((is_t || inst.operands[i].writeback),
7642 BAD_PC_ADDRESSING);
7643
7644 /* Use of PC in str is deprecated for ARMv7. */
7645 if (warn_on_deprecated
7646 && !is_load
7647 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7648 as_tsktsk (_("use of PC in this instruction is deprecated"));
7649 }
7650
7651 if (inst.reloc.type == BFD_RELOC_UNUSED)
7652 {
7653 /* Prefer + for zero encoded value. */
7654 if (!inst.operands[i].negative)
7655 inst.instruction |= INDEX_UP;
7656 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7657 }
7658 }
7659 }
7660
7661 /* inst.operands[i] was set up by parse_address. Encode it into an
7662 ARM-format mode 3 load or store instruction. Reject forms that
7663 cannot be used with such instructions. If is_t is true, reject
7664 forms that cannot be used with a T instruction (i.e. not
7665 post-indexed). */
7666 static void
7667 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7668 {
7669 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7670 {
7671 inst.error = _("instruction does not accept scaled register index");
7672 return;
7673 }
7674
7675 encode_arm_addr_mode_common (i, is_t);
7676
7677 if (inst.operands[i].immisreg)
7678 {
7679 constraint ((inst.operands[i].imm == REG_PC
7680 || (is_t && inst.operands[i].reg == REG_PC)),
7681 BAD_PC_ADDRESSING);
7682 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7683 BAD_PC_WRITEBACK);
7684 inst.instruction |= inst.operands[i].imm;
7685 if (!inst.operands[i].negative)
7686 inst.instruction |= INDEX_UP;
7687 }
7688 else /* immediate offset in inst.reloc */
7689 {
7690 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7691 && inst.operands[i].writeback),
7692 BAD_PC_WRITEBACK);
7693 inst.instruction |= HWOFFSET_IMM;
7694 if (inst.reloc.type == BFD_RELOC_UNUSED)
7695 {
7696 /* Prefer + for zero encoded value. */
7697 if (!inst.operands[i].negative)
7698 inst.instruction |= INDEX_UP;
7699
7700 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7701 }
7702 }
7703 }
7704
7705 /* Write immediate bits [7:0] to the following locations:
7706
7707 |28/24|23 19|18 16|15 4|3 0|
7708 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7709
7710 This function is used by VMOV/VMVN/VORR/VBIC. */
7711
7712 static void
7713 neon_write_immbits (unsigned immbits)
7714 {
7715 inst.instruction |= immbits & 0xf;
7716 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7717 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7718 }
7719
7720 /* Invert low-order SIZE bits of XHI:XLO. */
7721
7722 static void
7723 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7724 {
7725 unsigned immlo = xlo ? *xlo : 0;
7726 unsigned immhi = xhi ? *xhi : 0;
7727
7728 switch (size)
7729 {
7730 case 8:
7731 immlo = (~immlo) & 0xff;
7732 break;
7733
7734 case 16:
7735 immlo = (~immlo) & 0xffff;
7736 break;
7737
7738 case 64:
7739 immhi = (~immhi) & 0xffffffff;
7740 /* fall through. */
7741
7742 case 32:
7743 immlo = (~immlo) & 0xffffffff;
7744 break;
7745
7746 default:
7747 abort ();
7748 }
7749
7750 if (xlo)
7751 *xlo = immlo;
7752
7753 if (xhi)
7754 *xhi = immhi;
7755 }
7756
7757 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7758 A, B, C, D. */
7759
7760 static int
7761 neon_bits_same_in_bytes (unsigned imm)
7762 {
7763 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7764 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7765 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7766 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7767 }
7768
7769 /* For immediate of above form, return 0bABCD. */
7770
7771 static unsigned
7772 neon_squash_bits (unsigned imm)
7773 {
7774 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7775 | ((imm & 0x01000000) >> 21);
7776 }
7777
7778 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7779
7780 static unsigned
7781 neon_qfloat_bits (unsigned imm)
7782 {
7783 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7784 }
7785
7786 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7787 the instruction. *OP is passed as the initial value of the op field, and
7788 may be set to a different value depending on the constant (i.e.
7789 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7790 MVN). If the immediate looks like a repeated pattern then also
7791 try smaller element sizes. */
7792
7793 static int
7794 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7795 unsigned *immbits, int *op, int size,
7796 enum neon_el_type type)
7797 {
7798 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7799 float. */
7800 if (type == NT_float && !float_p)
7801 return FAIL;
7802
7803 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7804 {
7805 if (size != 32 || *op == 1)
7806 return FAIL;
7807 *immbits = neon_qfloat_bits (immlo);
7808 return 0xf;
7809 }
7810
7811 if (size == 64)
7812 {
7813 if (neon_bits_same_in_bytes (immhi)
7814 && neon_bits_same_in_bytes (immlo))
7815 {
7816 if (*op == 1)
7817 return FAIL;
7818 *immbits = (neon_squash_bits (immhi) << 4)
7819 | neon_squash_bits (immlo);
7820 *op = 1;
7821 return 0xe;
7822 }
7823
7824 if (immhi != immlo)
7825 return FAIL;
7826 }
7827
7828 if (size >= 32)
7829 {
7830 if (immlo == (immlo & 0x000000ff))
7831 {
7832 *immbits = immlo;
7833 return 0x0;
7834 }
7835 else if (immlo == (immlo & 0x0000ff00))
7836 {
7837 *immbits = immlo >> 8;
7838 return 0x2;
7839 }
7840 else if (immlo == (immlo & 0x00ff0000))
7841 {
7842 *immbits = immlo >> 16;
7843 return 0x4;
7844 }
7845 else if (immlo == (immlo & 0xff000000))
7846 {
7847 *immbits = immlo >> 24;
7848 return 0x6;
7849 }
7850 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7851 {
7852 *immbits = (immlo >> 8) & 0xff;
7853 return 0xc;
7854 }
7855 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7856 {
7857 *immbits = (immlo >> 16) & 0xff;
7858 return 0xd;
7859 }
7860
7861 if ((immlo & 0xffff) != (immlo >> 16))
7862 return FAIL;
7863 immlo &= 0xffff;
7864 }
7865
7866 if (size >= 16)
7867 {
7868 if (immlo == (immlo & 0x000000ff))
7869 {
7870 *immbits = immlo;
7871 return 0x8;
7872 }
7873 else if (immlo == (immlo & 0x0000ff00))
7874 {
7875 *immbits = immlo >> 8;
7876 return 0xa;
7877 }
7878
7879 if ((immlo & 0xff) != (immlo >> 8))
7880 return FAIL;
7881 immlo &= 0xff;
7882 }
7883
7884 if (immlo == (immlo & 0x000000ff))
7885 {
7886 /* Don't allow MVN with 8-bit immediate. */
7887 if (*op == 1)
7888 return FAIL;
7889 *immbits = immlo;
7890 return 0xe;
7891 }
7892
7893 return FAIL;
7894 }
7895
7896 #if defined BFD_HOST_64_BIT
7897 /* Returns TRUE if double precision value V may be cast
7898 to single precision without loss of accuracy. */
7899
7900 static bfd_boolean
7901 is_double_a_single (bfd_int64_t v)
7902 {
7903 int exp = (int)((v >> 52) & 0x7FF);
7904 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7905
7906 return (exp == 0 || exp == 0x7FF
7907 || (exp >= 1023 - 126 && exp <= 1023 + 127))
7908 && (mantissa & 0x1FFFFFFFl) == 0;
7909 }
7910
7911 /* Returns a double precision value casted to single precision
7912 (ignoring the least significant bits in exponent and mantissa). */
7913
7914 static int
7915 double_to_single (bfd_int64_t v)
7916 {
7917 int sign = (int) ((v >> 63) & 1l);
7918 int exp = (int) ((v >> 52) & 0x7FF);
7919 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7920
7921 if (exp == 0x7FF)
7922 exp = 0xFF;
7923 else
7924 {
7925 exp = exp - 1023 + 127;
7926 if (exp >= 0xFF)
7927 {
7928 /* Infinity. */
7929 exp = 0x7F;
7930 mantissa = 0;
7931 }
7932 else if (exp < 0)
7933 {
7934 /* No denormalized numbers. */
7935 exp = 0;
7936 mantissa = 0;
7937 }
7938 }
7939 mantissa >>= 29;
7940 return (sign << 31) | (exp << 23) | mantissa;
7941 }
7942 #endif /* BFD_HOST_64_BIT */
7943
7944 enum lit_type
7945 {
7946 CONST_THUMB,
7947 CONST_ARM,
7948 CONST_VEC
7949 };
7950
7951 static void do_vfp_nsyn_opcode (const char *);
7952
7953 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7954 Determine whether it can be performed with a move instruction; if
7955 it can, convert inst.instruction to that move instruction and
7956 return TRUE; if it can't, convert inst.instruction to a literal-pool
7957 load and return FALSE. If this is not a valid thing to do in the
7958 current context, set inst.error and return TRUE.
7959
7960 inst.operands[i] describes the destination register. */
7961
7962 static bfd_boolean
7963 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
7964 {
7965 unsigned long tbit;
7966 bfd_boolean thumb_p = (t == CONST_THUMB);
7967 bfd_boolean arm_p = (t == CONST_ARM);
7968
7969 if (thumb_p)
7970 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7971 else
7972 tbit = LOAD_BIT;
7973
7974 if ((inst.instruction & tbit) == 0)
7975 {
7976 inst.error = _("invalid pseudo operation");
7977 return TRUE;
7978 }
7979
7980 if (inst.reloc.exp.X_op != O_constant
7981 && inst.reloc.exp.X_op != O_symbol
7982 && inst.reloc.exp.X_op != O_big)
7983 {
7984 inst.error = _("constant expression expected");
7985 return TRUE;
7986 }
7987
7988 if (inst.reloc.exp.X_op == O_constant
7989 || inst.reloc.exp.X_op == O_big)
7990 {
7991 #if defined BFD_HOST_64_BIT
7992 bfd_int64_t v;
7993 #else
7994 offsetT v;
7995 #endif
7996 if (inst.reloc.exp.X_op == O_big)
7997 {
7998 LITTLENUM_TYPE w[X_PRECISION];
7999 LITTLENUM_TYPE * l;
8000
8001 if (inst.reloc.exp.X_add_number == -1)
8002 {
8003 gen_to_words (w, X_PRECISION, E_PRECISION);
8004 l = w;
8005 /* FIXME: Should we check words w[2..5] ? */
8006 }
8007 else
8008 l = generic_bignum;
8009
8010 #if defined BFD_HOST_64_BIT
8011 v =
8012 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
8013 << LITTLENUM_NUMBER_OF_BITS)
8014 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
8015 << LITTLENUM_NUMBER_OF_BITS)
8016 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
8017 << LITTLENUM_NUMBER_OF_BITS)
8018 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
8019 #else
8020 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
8021 | (l[0] & LITTLENUM_MASK);
8022 #endif
8023 }
8024 else
8025 v = inst.reloc.exp.X_add_number;
8026
8027 if (!inst.operands[i].issingle)
8028 {
8029 if (thumb_p)
8030 {
8031 /* LDR should not use lead in a flag-setting instruction being
8032 chosen so we do not check whether movs can be used. */
8033
8034 if ((ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
8035 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8036 && inst.operands[i].reg != 13
8037 && inst.operands[i].reg != 15)
8038 {
8039 /* Check if on thumb2 it can be done with a mov.w, mvn or
8040 movw instruction. */
8041 unsigned int newimm;
8042 bfd_boolean isNegated;
8043
8044 newimm = encode_thumb32_immediate (v);
8045 if (newimm != (unsigned int) FAIL)
8046 isNegated = FALSE;
8047 else
8048 {
8049 newimm = encode_thumb32_immediate (~v);
8050 if (newimm != (unsigned int) FAIL)
8051 isNegated = TRUE;
8052 }
8053
8054 /* The number can be loaded with a mov.w or mvn
8055 instruction. */
8056 if (newimm != (unsigned int) FAIL
8057 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
8058 {
8059 inst.instruction = (0xf04f0000 /* MOV.W. */
8060 | (inst.operands[i].reg << 8));
8061 /* Change to MOVN. */
8062 inst.instruction |= (isNegated ? 0x200000 : 0);
8063 inst.instruction |= (newimm & 0x800) << 15;
8064 inst.instruction |= (newimm & 0x700) << 4;
8065 inst.instruction |= (newimm & 0x0ff);
8066 return TRUE;
8067 }
8068 /* The number can be loaded with a movw instruction. */
8069 else if ((v & ~0xFFFF) == 0
8070 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8071 {
8072 int imm = v & 0xFFFF;
8073
8074 inst.instruction = 0xf2400000; /* MOVW. */
8075 inst.instruction |= (inst.operands[i].reg << 8);
8076 inst.instruction |= (imm & 0xf000) << 4;
8077 inst.instruction |= (imm & 0x0800) << 15;
8078 inst.instruction |= (imm & 0x0700) << 4;
8079 inst.instruction |= (imm & 0x00ff);
8080 return TRUE;
8081 }
8082 }
8083 }
8084 else if (arm_p)
8085 {
8086 int value = encode_arm_immediate (v);
8087
8088 if (value != FAIL)
8089 {
8090 /* This can be done with a mov instruction. */
8091 inst.instruction &= LITERAL_MASK;
8092 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
8093 inst.instruction |= value & 0xfff;
8094 return TRUE;
8095 }
8096
8097 value = encode_arm_immediate (~ v);
8098 if (value != FAIL)
8099 {
8100 /* This can be done with a mvn instruction. */
8101 inst.instruction &= LITERAL_MASK;
8102 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8103 inst.instruction |= value & 0xfff;
8104 return TRUE;
8105 }
8106 }
8107 else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8108 {
8109 int op = 0;
8110 unsigned immbits = 0;
8111 unsigned immlo = inst.operands[1].imm;
8112 unsigned immhi = inst.operands[1].regisimm
8113 ? inst.operands[1].reg
8114 : inst.reloc.exp.X_unsigned
8115 ? 0
8116 : ((bfd_int64_t)((int) immlo)) >> 32;
8117 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8118 &op, 64, NT_invtype);
8119
8120 if (cmode == FAIL)
8121 {
8122 neon_invert_size (&immlo, &immhi, 64);
8123 op = !op;
8124 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8125 &op, 64, NT_invtype);
8126 }
8127
8128 if (cmode != FAIL)
8129 {
8130 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8131 | (1 << 23)
8132 | (cmode << 8)
8133 | (op << 5)
8134 | (1 << 4);
8135
8136 /* Fill other bits in vmov encoding for both thumb and arm. */
8137 if (thumb_mode)
8138 inst.instruction |= (0x7U << 29) | (0xF << 24);
8139 else
8140 inst.instruction |= (0xFU << 28) | (0x1 << 25);
8141 neon_write_immbits (immbits);
8142 return TRUE;
8143 }
8144 }
8145 }
8146
8147 if (t == CONST_VEC)
8148 {
8149 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8150 if (inst.operands[i].issingle
8151 && is_quarter_float (inst.operands[1].imm)
8152 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8153 {
8154 inst.operands[1].imm =
8155 neon_qfloat_bits (v);
8156 do_vfp_nsyn_opcode ("fconsts");
8157 return TRUE;
8158 }
8159
8160 /* If our host does not support a 64-bit type then we cannot perform
8161 the following optimization. This mean that there will be a
8162 discrepancy between the output produced by an assembler built for
8163 a 32-bit-only host and the output produced from a 64-bit host, but
8164 this cannot be helped. */
8165 #if defined BFD_HOST_64_BIT
8166 else if (!inst.operands[1].issingle
8167 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8168 {
8169 if (is_double_a_single (v)
8170 && is_quarter_float (double_to_single (v)))
8171 {
8172 inst.operands[1].imm =
8173 neon_qfloat_bits (double_to_single (v));
8174 do_vfp_nsyn_opcode ("fconstd");
8175 return TRUE;
8176 }
8177 }
8178 #endif
8179 }
8180 }
8181
8182 if (add_to_lit_pool ((!inst.operands[i].isvec
8183 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
8184 return TRUE;
8185
8186 inst.operands[1].reg = REG_PC;
8187 inst.operands[1].isreg = 1;
8188 inst.operands[1].preind = 1;
8189 inst.reloc.pc_rel = 1;
8190 inst.reloc.type = (thumb_p
8191 ? BFD_RELOC_ARM_THUMB_OFFSET
8192 : (mode_3
8193 ? BFD_RELOC_ARM_HWLITERAL
8194 : BFD_RELOC_ARM_LITERAL));
8195 return FALSE;
8196 }
8197
8198 /* inst.operands[i] was set up by parse_address. Encode it into an
8199 ARM-format instruction. Reject all forms which cannot be encoded
8200 into a coprocessor load/store instruction. If wb_ok is false,
8201 reject use of writeback; if unind_ok is false, reject use of
8202 unindexed addressing. If reloc_override is not 0, use it instead
8203 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8204 (in which case it is preserved). */
8205
8206 static int
8207 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8208 {
8209 if (!inst.operands[i].isreg)
8210 {
8211 /* PR 18256 */
8212 if (! inst.operands[0].isvec)
8213 {
8214 inst.error = _("invalid co-processor operand");
8215 return FAIL;
8216 }
8217 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8218 return SUCCESS;
8219 }
8220
8221 inst.instruction |= inst.operands[i].reg << 16;
8222
8223 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8224
8225 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8226 {
8227 gas_assert (!inst.operands[i].writeback);
8228 if (!unind_ok)
8229 {
8230 inst.error = _("instruction does not support unindexed addressing");
8231 return FAIL;
8232 }
8233 inst.instruction |= inst.operands[i].imm;
8234 inst.instruction |= INDEX_UP;
8235 return SUCCESS;
8236 }
8237
8238 if (inst.operands[i].preind)
8239 inst.instruction |= PRE_INDEX;
8240
8241 if (inst.operands[i].writeback)
8242 {
8243 if (inst.operands[i].reg == REG_PC)
8244 {
8245 inst.error = _("pc may not be used with write-back");
8246 return FAIL;
8247 }
8248 if (!wb_ok)
8249 {
8250 inst.error = _("instruction does not support writeback");
8251 return FAIL;
8252 }
8253 inst.instruction |= WRITE_BACK;
8254 }
8255
8256 if (reloc_override)
8257 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
8258 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
8259 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
8260 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
8261 {
8262 if (thumb_mode)
8263 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8264 else
8265 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
8266 }
8267
8268 /* Prefer + for zero encoded value. */
8269 if (!inst.operands[i].negative)
8270 inst.instruction |= INDEX_UP;
8271
8272 return SUCCESS;
8273 }
8274
8275 /* Functions for instruction encoding, sorted by sub-architecture.
8276 First some generics; their names are taken from the conventional
8277 bit positions for register arguments in ARM format instructions. */
8278
8279 static void
8280 do_noargs (void)
8281 {
8282 }
8283
8284 static void
8285 do_rd (void)
8286 {
8287 inst.instruction |= inst.operands[0].reg << 12;
8288 }
8289
8290 static void
8291 do_rn (void)
8292 {
8293 inst.instruction |= inst.operands[0].reg << 16;
8294 }
8295
8296 static void
8297 do_rd_rm (void)
8298 {
8299 inst.instruction |= inst.operands[0].reg << 12;
8300 inst.instruction |= inst.operands[1].reg;
8301 }
8302
8303 static void
8304 do_rm_rn (void)
8305 {
8306 inst.instruction |= inst.operands[0].reg;
8307 inst.instruction |= inst.operands[1].reg << 16;
8308 }
8309
8310 static void
8311 do_rd_rn (void)
8312 {
8313 inst.instruction |= inst.operands[0].reg << 12;
8314 inst.instruction |= inst.operands[1].reg << 16;
8315 }
8316
8317 static void
8318 do_rn_rd (void)
8319 {
8320 inst.instruction |= inst.operands[0].reg << 16;
8321 inst.instruction |= inst.operands[1].reg << 12;
8322 }
8323
8324 static void
8325 do_tt (void)
8326 {
8327 inst.instruction |= inst.operands[0].reg << 8;
8328 inst.instruction |= inst.operands[1].reg << 16;
8329 }
8330
8331 static bfd_boolean
8332 check_obsolete (const arm_feature_set *feature, const char *msg)
8333 {
8334 if (ARM_CPU_IS_ANY (cpu_variant))
8335 {
8336 as_tsktsk ("%s", msg);
8337 return TRUE;
8338 }
8339 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8340 {
8341 as_bad ("%s", msg);
8342 return TRUE;
8343 }
8344
8345 return FALSE;
8346 }
8347
8348 static void
8349 do_rd_rm_rn (void)
8350 {
8351 unsigned Rn = inst.operands[2].reg;
8352 /* Enforce restrictions on SWP instruction. */
8353 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8354 {
8355 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8356 _("Rn must not overlap other operands"));
8357
8358 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8359 */
8360 if (!check_obsolete (&arm_ext_v8,
8361 _("swp{b} use is obsoleted for ARMv8 and later"))
8362 && warn_on_deprecated
8363 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8364 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8365 }
8366
8367 inst.instruction |= inst.operands[0].reg << 12;
8368 inst.instruction |= inst.operands[1].reg;
8369 inst.instruction |= Rn << 16;
8370 }
8371
8372 static void
8373 do_rd_rn_rm (void)
8374 {
8375 inst.instruction |= inst.operands[0].reg << 12;
8376 inst.instruction |= inst.operands[1].reg << 16;
8377 inst.instruction |= inst.operands[2].reg;
8378 }
8379
8380 static void
8381 do_rm_rd_rn (void)
8382 {
8383 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8384 constraint (((inst.reloc.exp.X_op != O_constant
8385 && inst.reloc.exp.X_op != O_illegal)
8386 || inst.reloc.exp.X_add_number != 0),
8387 BAD_ADDR_MODE);
8388 inst.instruction |= inst.operands[0].reg;
8389 inst.instruction |= inst.operands[1].reg << 12;
8390 inst.instruction |= inst.operands[2].reg << 16;
8391 }
8392
8393 static void
8394 do_imm0 (void)
8395 {
8396 inst.instruction |= inst.operands[0].imm;
8397 }
8398
8399 static void
8400 do_rd_cpaddr (void)
8401 {
8402 inst.instruction |= inst.operands[0].reg << 12;
8403 encode_arm_cp_address (1, TRUE, TRUE, 0);
8404 }
8405
8406 /* ARM instructions, in alphabetical order by function name (except
8407 that wrapper functions appear immediately after the function they
8408 wrap). */
8409
8410 /* This is a pseudo-op of the form "adr rd, label" to be converted
8411 into a relative address of the form "add rd, pc, #label-.-8". */
8412
8413 static void
8414 do_adr (void)
8415 {
8416 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8417
8418 /* Frag hacking will turn this into a sub instruction if the offset turns
8419 out to be negative. */
8420 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8421 inst.reloc.pc_rel = 1;
8422 inst.reloc.exp.X_add_number -= 8;
8423
8424 if (support_interwork
8425 && inst.reloc.exp.X_op == O_symbol
8426 && inst.reloc.exp.X_add_symbol != NULL
8427 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8428 && THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8429 inst.reloc.exp.X_add_number |= 1;
8430 }
8431
8432 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8433 into a relative address of the form:
8434 add rd, pc, #low(label-.-8)"
8435 add rd, rd, #high(label-.-8)" */
8436
8437 static void
8438 do_adrl (void)
8439 {
8440 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8441
8442 /* Frag hacking will turn this into a sub instruction if the offset turns
8443 out to be negative. */
8444 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8445 inst.reloc.pc_rel = 1;
8446 inst.size = INSN_SIZE * 2;
8447 inst.reloc.exp.X_add_number -= 8;
8448
8449 if (support_interwork
8450 && inst.reloc.exp.X_op == O_symbol
8451 && inst.reloc.exp.X_add_symbol != NULL
8452 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8453 && THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8454 inst.reloc.exp.X_add_number |= 1;
8455 }
8456
8457 static void
8458 do_arit (void)
8459 {
8460 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8461 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
8462 THUMB1_RELOC_ONLY);
8463 if (!inst.operands[1].present)
8464 inst.operands[1].reg = inst.operands[0].reg;
8465 inst.instruction |= inst.operands[0].reg << 12;
8466 inst.instruction |= inst.operands[1].reg << 16;
8467 encode_arm_shifter_operand (2);
8468 }
8469
8470 static void
8471 do_barrier (void)
8472 {
8473 if (inst.operands[0].present)
8474 inst.instruction |= inst.operands[0].imm;
8475 else
8476 inst.instruction |= 0xf;
8477 }
8478
8479 static void
8480 do_bfc (void)
8481 {
8482 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8483 constraint (msb > 32, _("bit-field extends past end of register"));
8484 /* The instruction encoding stores the LSB and MSB,
8485 not the LSB and width. */
8486 inst.instruction |= inst.operands[0].reg << 12;
8487 inst.instruction |= inst.operands[1].imm << 7;
8488 inst.instruction |= (msb - 1) << 16;
8489 }
8490
8491 static void
8492 do_bfi (void)
8493 {
8494 unsigned int msb;
8495
8496 /* #0 in second position is alternative syntax for bfc, which is
8497 the same instruction but with REG_PC in the Rm field. */
8498 if (!inst.operands[1].isreg)
8499 inst.operands[1].reg = REG_PC;
8500
8501 msb = inst.operands[2].imm + inst.operands[3].imm;
8502 constraint (msb > 32, _("bit-field extends past end of register"));
8503 /* The instruction encoding stores the LSB and MSB,
8504 not the LSB and width. */
8505 inst.instruction |= inst.operands[0].reg << 12;
8506 inst.instruction |= inst.operands[1].reg;
8507 inst.instruction |= inst.operands[2].imm << 7;
8508 inst.instruction |= (msb - 1) << 16;
8509 }
8510
8511 static void
8512 do_bfx (void)
8513 {
8514 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8515 _("bit-field extends past end of register"));
8516 inst.instruction |= inst.operands[0].reg << 12;
8517 inst.instruction |= inst.operands[1].reg;
8518 inst.instruction |= inst.operands[2].imm << 7;
8519 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8520 }
8521
8522 /* ARM V5 breakpoint instruction (argument parse)
8523 BKPT <16 bit unsigned immediate>
8524 Instruction is not conditional.
8525 The bit pattern given in insns[] has the COND_ALWAYS condition,
8526 and it is an error if the caller tried to override that. */
8527
8528 static void
8529 do_bkpt (void)
8530 {
8531 /* Top 12 of 16 bits to bits 19:8. */
8532 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8533
8534 /* Bottom 4 of 16 bits to bits 3:0. */
8535 inst.instruction |= inst.operands[0].imm & 0xf;
8536 }
8537
8538 static void
8539 encode_branch (int default_reloc)
8540 {
8541 if (inst.operands[0].hasreloc)
8542 {
8543 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8544 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8545 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8546 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8547 ? BFD_RELOC_ARM_PLT32
8548 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8549 }
8550 else
8551 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
8552 inst.reloc.pc_rel = 1;
8553 }
8554
8555 static void
8556 do_branch (void)
8557 {
8558 #ifdef OBJ_ELF
8559 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8560 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8561 else
8562 #endif
8563 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8564 }
8565
8566 static void
8567 do_bl (void)
8568 {
8569 #ifdef OBJ_ELF
8570 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8571 {
8572 if (inst.cond == COND_ALWAYS)
8573 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8574 else
8575 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8576 }
8577 else
8578 #endif
8579 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8580 }
8581
8582 /* ARM V5 branch-link-exchange instruction (argument parse)
8583 BLX <target_addr> ie BLX(1)
8584 BLX{<condition>} <Rm> ie BLX(2)
8585 Unfortunately, there are two different opcodes for this mnemonic.
8586 So, the insns[].value is not used, and the code here zaps values
8587 into inst.instruction.
8588 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8589
8590 static void
8591 do_blx (void)
8592 {
8593 if (inst.operands[0].isreg)
8594 {
8595 /* Arg is a register; the opcode provided by insns[] is correct.
8596 It is not illegal to do "blx pc", just useless. */
8597 if (inst.operands[0].reg == REG_PC)
8598 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8599
8600 inst.instruction |= inst.operands[0].reg;
8601 }
8602 else
8603 {
8604 /* Arg is an address; this instruction cannot be executed
8605 conditionally, and the opcode must be adjusted.
8606 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8607 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8608 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8609 inst.instruction = 0xfa000000;
8610 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8611 }
8612 }
8613
8614 static void
8615 do_bx (void)
8616 {
8617 bfd_boolean want_reloc;
8618
8619 if (inst.operands[0].reg == REG_PC)
8620 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8621
8622 inst.instruction |= inst.operands[0].reg;
8623 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8624 it is for ARMv4t or earlier. */
8625 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8626 if (!ARM_FEATURE_ZERO (selected_object_arch)
8627 && !ARM_CPU_HAS_FEATURE (selected_object_arch, arm_ext_v5))
8628 want_reloc = TRUE;
8629
8630 #ifdef OBJ_ELF
8631 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8632 #endif
8633 want_reloc = FALSE;
8634
8635 if (want_reloc)
8636 inst.reloc.type = BFD_RELOC_ARM_V4BX;
8637 }
8638
8639
8640 /* ARM v5TEJ. Jump to Jazelle code. */
8641
8642 static void
8643 do_bxj (void)
8644 {
8645 if (inst.operands[0].reg == REG_PC)
8646 as_tsktsk (_("use of r15 in bxj is not really useful"));
8647
8648 inst.instruction |= inst.operands[0].reg;
8649 }
8650
8651 /* Co-processor data operation:
8652 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8653 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8654 static void
8655 do_cdp (void)
8656 {
8657 inst.instruction |= inst.operands[0].reg << 8;
8658 inst.instruction |= inst.operands[1].imm << 20;
8659 inst.instruction |= inst.operands[2].reg << 12;
8660 inst.instruction |= inst.operands[3].reg << 16;
8661 inst.instruction |= inst.operands[4].reg;
8662 inst.instruction |= inst.operands[5].imm << 5;
8663 }
8664
8665 static void
8666 do_cmp (void)
8667 {
8668 inst.instruction |= inst.operands[0].reg << 16;
8669 encode_arm_shifter_operand (1);
8670 }
8671
8672 /* Transfer between coprocessor and ARM registers.
8673 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8674 MRC2
8675 MCR{cond}
8676 MCR2
8677
8678 No special properties. */
8679
8680 struct deprecated_coproc_regs_s
8681 {
8682 unsigned cp;
8683 int opc1;
8684 unsigned crn;
8685 unsigned crm;
8686 int opc2;
8687 arm_feature_set deprecated;
8688 arm_feature_set obsoleted;
8689 const char *dep_msg;
8690 const char *obs_msg;
8691 };
8692
8693 #define DEPR_ACCESS_V8 \
8694 N_("This coprocessor register access is deprecated in ARMv8")
8695
8696 /* Table of all deprecated coprocessor registers. */
8697 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8698 {
8699 {15, 0, 7, 10, 5, /* CP15DMB. */
8700 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8701 DEPR_ACCESS_V8, NULL},
8702 {15, 0, 7, 10, 4, /* CP15DSB. */
8703 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8704 DEPR_ACCESS_V8, NULL},
8705 {15, 0, 7, 5, 4, /* CP15ISB. */
8706 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8707 DEPR_ACCESS_V8, NULL},
8708 {14, 6, 1, 0, 0, /* TEEHBR. */
8709 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8710 DEPR_ACCESS_V8, NULL},
8711 {14, 6, 0, 0, 0, /* TEECR. */
8712 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8713 DEPR_ACCESS_V8, NULL},
8714 };
8715
8716 #undef DEPR_ACCESS_V8
8717
8718 static const size_t deprecated_coproc_reg_count =
8719 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8720
8721 static void
8722 do_co_reg (void)
8723 {
8724 unsigned Rd;
8725 size_t i;
8726
8727 Rd = inst.operands[2].reg;
8728 if (thumb_mode)
8729 {
8730 if (inst.instruction == 0xee000010
8731 || inst.instruction == 0xfe000010)
8732 /* MCR, MCR2 */
8733 reject_bad_reg (Rd);
8734 else if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
8735 /* MRC, MRC2 */
8736 constraint (Rd == REG_SP, BAD_SP);
8737 }
8738 else
8739 {
8740 /* MCR */
8741 if (inst.instruction == 0xe000010)
8742 constraint (Rd == REG_PC, BAD_PC);
8743 }
8744
8745 for (i = 0; i < deprecated_coproc_reg_count; ++i)
8746 {
8747 const struct deprecated_coproc_regs_s *r =
8748 deprecated_coproc_regs + i;
8749
8750 if (inst.operands[0].reg == r->cp
8751 && inst.operands[1].imm == r->opc1
8752 && inst.operands[3].reg == r->crn
8753 && inst.operands[4].reg == r->crm
8754 && inst.operands[5].imm == r->opc2)
8755 {
8756 if (! ARM_CPU_IS_ANY (cpu_variant)
8757 && warn_on_deprecated
8758 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8759 as_tsktsk ("%s", r->dep_msg);
8760 }
8761 }
8762
8763 inst.instruction |= inst.operands[0].reg << 8;
8764 inst.instruction |= inst.operands[1].imm << 21;
8765 inst.instruction |= Rd << 12;
8766 inst.instruction |= inst.operands[3].reg << 16;
8767 inst.instruction |= inst.operands[4].reg;
8768 inst.instruction |= inst.operands[5].imm << 5;
8769 }
8770
8771 /* Transfer between coprocessor register and pair of ARM registers.
8772 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8773 MCRR2
8774 MRRC{cond}
8775 MRRC2
8776
8777 Two XScale instructions are special cases of these:
8778
8779 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8780 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8781
8782 Result unpredictable if Rd or Rn is R15. */
8783
8784 static void
8785 do_co_reg2c (void)
8786 {
8787 unsigned Rd, Rn;
8788
8789 Rd = inst.operands[2].reg;
8790 Rn = inst.operands[3].reg;
8791
8792 if (thumb_mode)
8793 {
8794 reject_bad_reg (Rd);
8795 reject_bad_reg (Rn);
8796 }
8797 else
8798 {
8799 constraint (Rd == REG_PC, BAD_PC);
8800 constraint (Rn == REG_PC, BAD_PC);
8801 }
8802
8803 /* Only check the MRRC{2} variants. */
8804 if ((inst.instruction & 0x0FF00000) == 0x0C500000)
8805 {
8806 /* If Rd == Rn, error that the operation is
8807 unpredictable (example MRRC p3,#1,r1,r1,c4). */
8808 constraint (Rd == Rn, BAD_OVERLAP);
8809 }
8810
8811 inst.instruction |= inst.operands[0].reg << 8;
8812 inst.instruction |= inst.operands[1].imm << 4;
8813 inst.instruction |= Rd << 12;
8814 inst.instruction |= Rn << 16;
8815 inst.instruction |= inst.operands[4].reg;
8816 }
8817
8818 static void
8819 do_cpsi (void)
8820 {
8821 inst.instruction |= inst.operands[0].imm << 6;
8822 if (inst.operands[1].present)
8823 {
8824 inst.instruction |= CPSI_MMOD;
8825 inst.instruction |= inst.operands[1].imm;
8826 }
8827 }
8828
8829 static void
8830 do_dbg (void)
8831 {
8832 inst.instruction |= inst.operands[0].imm;
8833 }
8834
8835 static void
8836 do_div (void)
8837 {
8838 unsigned Rd, Rn, Rm;
8839
8840 Rd = inst.operands[0].reg;
8841 Rn = (inst.operands[1].present
8842 ? inst.operands[1].reg : Rd);
8843 Rm = inst.operands[2].reg;
8844
8845 constraint ((Rd == REG_PC), BAD_PC);
8846 constraint ((Rn == REG_PC), BAD_PC);
8847 constraint ((Rm == REG_PC), BAD_PC);
8848
8849 inst.instruction |= Rd << 16;
8850 inst.instruction |= Rn << 0;
8851 inst.instruction |= Rm << 8;
8852 }
8853
8854 static void
8855 do_it (void)
8856 {
8857 /* There is no IT instruction in ARM mode. We
8858 process it to do the validation as if in
8859 thumb mode, just in case the code gets
8860 assembled for thumb using the unified syntax. */
8861
8862 inst.size = 0;
8863 if (unified_syntax)
8864 {
8865 set_it_insn_type (IT_INSN);
8866 now_it.mask = (inst.instruction & 0xf) | 0x10;
8867 now_it.cc = inst.operands[0].imm;
8868 }
8869 }
8870
8871 /* If there is only one register in the register list,
8872 then return its register number. Otherwise return -1. */
8873 static int
8874 only_one_reg_in_list (int range)
8875 {
8876 int i = ffs (range) - 1;
8877 return (i > 15 || range != (1 << i)) ? -1 : i;
8878 }
8879
8880 static void
8881 encode_ldmstm(int from_push_pop_mnem)
8882 {
8883 int base_reg = inst.operands[0].reg;
8884 int range = inst.operands[1].imm;
8885 int one_reg;
8886
8887 inst.instruction |= base_reg << 16;
8888 inst.instruction |= range;
8889
8890 if (inst.operands[1].writeback)
8891 inst.instruction |= LDM_TYPE_2_OR_3;
8892
8893 if (inst.operands[0].writeback)
8894 {
8895 inst.instruction |= WRITE_BACK;
8896 /* Check for unpredictable uses of writeback. */
8897 if (inst.instruction & LOAD_BIT)
8898 {
8899 /* Not allowed in LDM type 2. */
8900 if ((inst.instruction & LDM_TYPE_2_OR_3)
8901 && ((range & (1 << REG_PC)) == 0))
8902 as_warn (_("writeback of base register is UNPREDICTABLE"));
8903 /* Only allowed if base reg not in list for other types. */
8904 else if (range & (1 << base_reg))
8905 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8906 }
8907 else /* STM. */
8908 {
8909 /* Not allowed for type 2. */
8910 if (inst.instruction & LDM_TYPE_2_OR_3)
8911 as_warn (_("writeback of base register is UNPREDICTABLE"));
8912 /* Only allowed if base reg not in list, or first in list. */
8913 else if ((range & (1 << base_reg))
8914 && (range & ((1 << base_reg) - 1)))
8915 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8916 }
8917 }
8918
8919 /* If PUSH/POP has only one register, then use the A2 encoding. */
8920 one_reg = only_one_reg_in_list (range);
8921 if (from_push_pop_mnem && one_reg >= 0)
8922 {
8923 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
8924
8925 if (is_push && one_reg == 13 /* SP */)
8926 /* PR 22483: The A2 encoding cannot be used when
8927 pushing the stack pointer as this is UNPREDICTABLE. */
8928 return;
8929
8930 inst.instruction &= A_COND_MASK;
8931 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
8932 inst.instruction |= one_reg << 12;
8933 }
8934 }
8935
8936 static void
8937 do_ldmstm (void)
8938 {
8939 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
8940 }
8941
8942 /* ARMv5TE load-consecutive (argument parse)
8943 Mode is like LDRH.
8944
8945 LDRccD R, mode
8946 STRccD R, mode. */
8947
8948 static void
8949 do_ldrd (void)
8950 {
8951 constraint (inst.operands[0].reg % 2 != 0,
8952 _("first transfer register must be even"));
8953 constraint (inst.operands[1].present
8954 && inst.operands[1].reg != inst.operands[0].reg + 1,
8955 _("can only transfer two consecutive registers"));
8956 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8957 constraint (!inst.operands[2].isreg, _("'[' expected"));
8958
8959 if (!inst.operands[1].present)
8960 inst.operands[1].reg = inst.operands[0].reg + 1;
8961
8962 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8963 register and the first register written; we have to diagnose
8964 overlap between the base and the second register written here. */
8965
8966 if (inst.operands[2].reg == inst.operands[1].reg
8967 && (inst.operands[2].writeback || inst.operands[2].postind))
8968 as_warn (_("base register written back, and overlaps "
8969 "second transfer register"));
8970
8971 if (!(inst.instruction & V4_STR_BIT))
8972 {
8973 /* For an index-register load, the index register must not overlap the
8974 destination (even if not write-back). */
8975 if (inst.operands[2].immisreg
8976 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8977 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8978 as_warn (_("index register overlaps transfer register"));
8979 }
8980 inst.instruction |= inst.operands[0].reg << 12;
8981 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8982 }
8983
8984 static void
8985 do_ldrex (void)
8986 {
8987 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8988 || inst.operands[1].postind || inst.operands[1].writeback
8989 || inst.operands[1].immisreg || inst.operands[1].shifted
8990 || inst.operands[1].negative
8991 /* This can arise if the programmer has written
8992 strex rN, rM, foo
8993 or if they have mistakenly used a register name as the last
8994 operand, eg:
8995 strex rN, rM, rX
8996 It is very difficult to distinguish between these two cases
8997 because "rX" might actually be a label. ie the register
8998 name has been occluded by a symbol of the same name. So we
8999 just generate a general 'bad addressing mode' type error
9000 message and leave it up to the programmer to discover the
9001 true cause and fix their mistake. */
9002 || (inst.operands[1].reg == REG_PC),
9003 BAD_ADDR_MODE);
9004
9005 constraint (inst.reloc.exp.X_op != O_constant
9006 || inst.reloc.exp.X_add_number != 0,
9007 _("offset must be zero in ARM encoding"));
9008
9009 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
9010
9011 inst.instruction |= inst.operands[0].reg << 12;
9012 inst.instruction |= inst.operands[1].reg << 16;
9013 inst.reloc.type = BFD_RELOC_UNUSED;
9014 }
9015
9016 static void
9017 do_ldrexd (void)
9018 {
9019 constraint (inst.operands[0].reg % 2 != 0,
9020 _("even register required"));
9021 constraint (inst.operands[1].present
9022 && inst.operands[1].reg != inst.operands[0].reg + 1,
9023 _("can only load two consecutive registers"));
9024 /* If op 1 were present and equal to PC, this function wouldn't
9025 have been called in the first place. */
9026 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9027
9028 inst.instruction |= inst.operands[0].reg << 12;
9029 inst.instruction |= inst.operands[2].reg << 16;
9030 }
9031
9032 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9033 which is not a multiple of four is UNPREDICTABLE. */
9034 static void
9035 check_ldr_r15_aligned (void)
9036 {
9037 constraint (!(inst.operands[1].immisreg)
9038 && (inst.operands[0].reg == REG_PC
9039 && inst.operands[1].reg == REG_PC
9040 && (inst.reloc.exp.X_add_number & 0x3)),
9041 _("ldr to register 15 must be 4-byte aligned"));
9042 }
9043
9044 static void
9045 do_ldst (void)
9046 {
9047 inst.instruction |= inst.operands[0].reg << 12;
9048 if (!inst.operands[1].isreg)
9049 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
9050 return;
9051 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
9052 check_ldr_r15_aligned ();
9053 }
9054
9055 static void
9056 do_ldstt (void)
9057 {
9058 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9059 reject [Rn,...]. */
9060 if (inst.operands[1].preind)
9061 {
9062 constraint (inst.reloc.exp.X_op != O_constant
9063 || inst.reloc.exp.X_add_number != 0,
9064 _("this instruction requires a post-indexed address"));
9065
9066 inst.operands[1].preind = 0;
9067 inst.operands[1].postind = 1;
9068 inst.operands[1].writeback = 1;
9069 }
9070 inst.instruction |= inst.operands[0].reg << 12;
9071 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
9072 }
9073
9074 /* Halfword and signed-byte load/store operations. */
9075
9076 static void
9077 do_ldstv4 (void)
9078 {
9079 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9080 inst.instruction |= inst.operands[0].reg << 12;
9081 if (!inst.operands[1].isreg)
9082 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
9083 return;
9084 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
9085 }
9086
9087 static void
9088 do_ldsttv4 (void)
9089 {
9090 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9091 reject [Rn,...]. */
9092 if (inst.operands[1].preind)
9093 {
9094 constraint (inst.reloc.exp.X_op != O_constant
9095 || inst.reloc.exp.X_add_number != 0,
9096 _("this instruction requires a post-indexed address"));
9097
9098 inst.operands[1].preind = 0;
9099 inst.operands[1].postind = 1;
9100 inst.operands[1].writeback = 1;
9101 }
9102 inst.instruction |= inst.operands[0].reg << 12;
9103 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
9104 }
9105
9106 /* Co-processor register load/store.
9107 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9108 static void
9109 do_lstc (void)
9110 {
9111 inst.instruction |= inst.operands[0].reg << 8;
9112 inst.instruction |= inst.operands[1].reg << 12;
9113 encode_arm_cp_address (2, TRUE, TRUE, 0);
9114 }
9115
9116 static void
9117 do_mlas (void)
9118 {
9119 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9120 if (inst.operands[0].reg == inst.operands[1].reg
9121 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9122 && !(inst.instruction & 0x00400000))
9123 as_tsktsk (_("Rd and Rm should be different in mla"));
9124
9125 inst.instruction |= inst.operands[0].reg << 16;
9126 inst.instruction |= inst.operands[1].reg;
9127 inst.instruction |= inst.operands[2].reg << 8;
9128 inst.instruction |= inst.operands[3].reg << 12;
9129 }
9130
9131 static void
9132 do_mov (void)
9133 {
9134 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9135 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9136 THUMB1_RELOC_ONLY);
9137 inst.instruction |= inst.operands[0].reg << 12;
9138 encode_arm_shifter_operand (1);
9139 }
9140
9141 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9142 static void
9143 do_mov16 (void)
9144 {
9145 bfd_vma imm;
9146 bfd_boolean top;
9147
9148 top = (inst.instruction & 0x00400000) != 0;
9149 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
9150 _(":lower16: not allowed in this instruction"));
9151 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
9152 _(":upper16: not allowed in this instruction"));
9153 inst.instruction |= inst.operands[0].reg << 12;
9154 if (inst.reloc.type == BFD_RELOC_UNUSED)
9155 {
9156 imm = inst.reloc.exp.X_add_number;
9157 /* The value is in two pieces: 0:11, 16:19. */
9158 inst.instruction |= (imm & 0x00000fff);
9159 inst.instruction |= (imm & 0x0000f000) << 4;
9160 }
9161 }
9162
9163 static int
9164 do_vfp_nsyn_mrs (void)
9165 {
9166 if (inst.operands[0].isvec)
9167 {
9168 if (inst.operands[1].reg != 1)
9169 first_error (_("operand 1 must be FPSCR"));
9170 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
9171 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
9172 do_vfp_nsyn_opcode ("fmstat");
9173 }
9174 else if (inst.operands[1].isvec)
9175 do_vfp_nsyn_opcode ("fmrx");
9176 else
9177 return FAIL;
9178
9179 return SUCCESS;
9180 }
9181
9182 static int
9183 do_vfp_nsyn_msr (void)
9184 {
9185 if (inst.operands[0].isvec)
9186 do_vfp_nsyn_opcode ("fmxr");
9187 else
9188 return FAIL;
9189
9190 return SUCCESS;
9191 }
9192
9193 static void
9194 do_vmrs (void)
9195 {
9196 unsigned Rt = inst.operands[0].reg;
9197
9198 if (thumb_mode && Rt == REG_SP)
9199 {
9200 inst.error = BAD_SP;
9201 return;
9202 }
9203
9204 /* MVFR2 is only valid at ARMv8-A. */
9205 if (inst.operands[1].reg == 5)
9206 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9207 _(BAD_FPU));
9208
9209 /* APSR_ sets isvec. All other refs to PC are illegal. */
9210 if (!inst.operands[0].isvec && Rt == REG_PC)
9211 {
9212 inst.error = BAD_PC;
9213 return;
9214 }
9215
9216 /* If we get through parsing the register name, we just insert the number
9217 generated into the instruction without further validation. */
9218 inst.instruction |= (inst.operands[1].reg << 16);
9219 inst.instruction |= (Rt << 12);
9220 }
9221
9222 static void
9223 do_vmsr (void)
9224 {
9225 unsigned Rt = inst.operands[1].reg;
9226
9227 if (thumb_mode)
9228 reject_bad_reg (Rt);
9229 else if (Rt == REG_PC)
9230 {
9231 inst.error = BAD_PC;
9232 return;
9233 }
9234
9235 /* MVFR2 is only valid for ARMv8-A. */
9236 if (inst.operands[0].reg == 5)
9237 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9238 _(BAD_FPU));
9239
9240 /* If we get through parsing the register name, we just insert the number
9241 generated into the instruction without further validation. */
9242 inst.instruction |= (inst.operands[0].reg << 16);
9243 inst.instruction |= (Rt << 12);
9244 }
9245
9246 static void
9247 do_mrs (void)
9248 {
9249 unsigned br;
9250
9251 if (do_vfp_nsyn_mrs () == SUCCESS)
9252 return;
9253
9254 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9255 inst.instruction |= inst.operands[0].reg << 12;
9256
9257 if (inst.operands[1].isreg)
9258 {
9259 br = inst.operands[1].reg;
9260 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf0000))
9261 as_bad (_("bad register for mrs"));
9262 }
9263 else
9264 {
9265 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9266 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9267 != (PSR_c|PSR_f),
9268 _("'APSR', 'CPSR' or 'SPSR' expected"));
9269 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9270 }
9271
9272 inst.instruction |= br;
9273 }
9274
9275 /* Two possible forms:
9276 "{C|S}PSR_<field>, Rm",
9277 "{C|S}PSR_f, #expression". */
9278
9279 static void
9280 do_msr (void)
9281 {
9282 if (do_vfp_nsyn_msr () == SUCCESS)
9283 return;
9284
9285 inst.instruction |= inst.operands[0].imm;
9286 if (inst.operands[1].isreg)
9287 inst.instruction |= inst.operands[1].reg;
9288 else
9289 {
9290 inst.instruction |= INST_IMMEDIATE;
9291 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
9292 inst.reloc.pc_rel = 0;
9293 }
9294 }
9295
9296 static void
9297 do_mul (void)
9298 {
9299 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9300
9301 if (!inst.operands[2].present)
9302 inst.operands[2].reg = inst.operands[0].reg;
9303 inst.instruction |= inst.operands[0].reg << 16;
9304 inst.instruction |= inst.operands[1].reg;
9305 inst.instruction |= inst.operands[2].reg << 8;
9306
9307 if (inst.operands[0].reg == inst.operands[1].reg
9308 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9309 as_tsktsk (_("Rd and Rm should be different in mul"));
9310 }
9311
9312 /* Long Multiply Parser
9313 UMULL RdLo, RdHi, Rm, Rs
9314 SMULL RdLo, RdHi, Rm, Rs
9315 UMLAL RdLo, RdHi, Rm, Rs
9316 SMLAL RdLo, RdHi, Rm, Rs. */
9317
9318 static void
9319 do_mull (void)
9320 {
9321 inst.instruction |= inst.operands[0].reg << 12;
9322 inst.instruction |= inst.operands[1].reg << 16;
9323 inst.instruction |= inst.operands[2].reg;
9324 inst.instruction |= inst.operands[3].reg << 8;
9325
9326 /* rdhi and rdlo must be different. */
9327 if (inst.operands[0].reg == inst.operands[1].reg)
9328 as_tsktsk (_("rdhi and rdlo must be different"));
9329
9330 /* rdhi, rdlo and rm must all be different before armv6. */
9331 if ((inst.operands[0].reg == inst.operands[2].reg
9332 || inst.operands[1].reg == inst.operands[2].reg)
9333 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9334 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9335 }
9336
9337 static void
9338 do_nop (void)
9339 {
9340 if (inst.operands[0].present
9341 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9342 {
9343 /* Architectural NOP hints are CPSR sets with no bits selected. */
9344 inst.instruction &= 0xf0000000;
9345 inst.instruction |= 0x0320f000;
9346 if (inst.operands[0].present)
9347 inst.instruction |= inst.operands[0].imm;
9348 }
9349 }
9350
9351 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9352 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9353 Condition defaults to COND_ALWAYS.
9354 Error if Rd, Rn or Rm are R15. */
9355
9356 static void
9357 do_pkhbt (void)
9358 {
9359 inst.instruction |= inst.operands[0].reg << 12;
9360 inst.instruction |= inst.operands[1].reg << 16;
9361 inst.instruction |= inst.operands[2].reg;
9362 if (inst.operands[3].present)
9363 encode_arm_shift (3);
9364 }
9365
9366 /* ARM V6 PKHTB (Argument Parse). */
9367
9368 static void
9369 do_pkhtb (void)
9370 {
9371 if (!inst.operands[3].present)
9372 {
9373 /* If the shift specifier is omitted, turn the instruction
9374 into pkhbt rd, rm, rn. */
9375 inst.instruction &= 0xfff00010;
9376 inst.instruction |= inst.operands[0].reg << 12;
9377 inst.instruction |= inst.operands[1].reg;
9378 inst.instruction |= inst.operands[2].reg << 16;
9379 }
9380 else
9381 {
9382 inst.instruction |= inst.operands[0].reg << 12;
9383 inst.instruction |= inst.operands[1].reg << 16;
9384 inst.instruction |= inst.operands[2].reg;
9385 encode_arm_shift (3);
9386 }
9387 }
9388
9389 /* ARMv5TE: Preload-Cache
9390 MP Extensions: Preload for write
9391
9392 PLD(W) <addr_mode>
9393
9394 Syntactically, like LDR with B=1, W=0, L=1. */
9395
9396 static void
9397 do_pld (void)
9398 {
9399 constraint (!inst.operands[0].isreg,
9400 _("'[' expected after PLD mnemonic"));
9401 constraint (inst.operands[0].postind,
9402 _("post-indexed expression used in preload instruction"));
9403 constraint (inst.operands[0].writeback,
9404 _("writeback used in preload instruction"));
9405 constraint (!inst.operands[0].preind,
9406 _("unindexed addressing used in preload instruction"));
9407 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9408 }
9409
9410 /* ARMv7: PLI <addr_mode> */
9411 static void
9412 do_pli (void)
9413 {
9414 constraint (!inst.operands[0].isreg,
9415 _("'[' expected after PLI mnemonic"));
9416 constraint (inst.operands[0].postind,
9417 _("post-indexed expression used in preload instruction"));
9418 constraint (inst.operands[0].writeback,
9419 _("writeback used in preload instruction"));
9420 constraint (!inst.operands[0].preind,
9421 _("unindexed addressing used in preload instruction"));
9422 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9423 inst.instruction &= ~PRE_INDEX;
9424 }
9425
9426 static void
9427 do_push_pop (void)
9428 {
9429 constraint (inst.operands[0].writeback,
9430 _("push/pop do not support {reglist}^"));
9431 inst.operands[1] = inst.operands[0];
9432 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9433 inst.operands[0].isreg = 1;
9434 inst.operands[0].writeback = 1;
9435 inst.operands[0].reg = REG_SP;
9436 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9437 }
9438
9439 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9440 word at the specified address and the following word
9441 respectively.
9442 Unconditionally executed.
9443 Error if Rn is R15. */
9444
9445 static void
9446 do_rfe (void)
9447 {
9448 inst.instruction |= inst.operands[0].reg << 16;
9449 if (inst.operands[0].writeback)
9450 inst.instruction |= WRITE_BACK;
9451 }
9452
9453 /* ARM V6 ssat (argument parse). */
9454
9455 static void
9456 do_ssat (void)
9457 {
9458 inst.instruction |= inst.operands[0].reg << 12;
9459 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9460 inst.instruction |= inst.operands[2].reg;
9461
9462 if (inst.operands[3].present)
9463 encode_arm_shift (3);
9464 }
9465
9466 /* ARM V6 usat (argument parse). */
9467
9468 static void
9469 do_usat (void)
9470 {
9471 inst.instruction |= inst.operands[0].reg << 12;
9472 inst.instruction |= inst.operands[1].imm << 16;
9473 inst.instruction |= inst.operands[2].reg;
9474
9475 if (inst.operands[3].present)
9476 encode_arm_shift (3);
9477 }
9478
9479 /* ARM V6 ssat16 (argument parse). */
9480
9481 static void
9482 do_ssat16 (void)
9483 {
9484 inst.instruction |= inst.operands[0].reg << 12;
9485 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9486 inst.instruction |= inst.operands[2].reg;
9487 }
9488
9489 static void
9490 do_usat16 (void)
9491 {
9492 inst.instruction |= inst.operands[0].reg << 12;
9493 inst.instruction |= inst.operands[1].imm << 16;
9494 inst.instruction |= inst.operands[2].reg;
9495 }
9496
9497 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9498 preserving the other bits.
9499
9500 setend <endian_specifier>, where <endian_specifier> is either
9501 BE or LE. */
9502
9503 static void
9504 do_setend (void)
9505 {
9506 if (warn_on_deprecated
9507 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9508 as_tsktsk (_("setend use is deprecated for ARMv8"));
9509
9510 if (inst.operands[0].imm)
9511 inst.instruction |= 0x200;
9512 }
9513
9514 static void
9515 do_shift (void)
9516 {
9517 unsigned int Rm = (inst.operands[1].present
9518 ? inst.operands[1].reg
9519 : inst.operands[0].reg);
9520
9521 inst.instruction |= inst.operands[0].reg << 12;
9522 inst.instruction |= Rm;
9523 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9524 {
9525 inst.instruction |= inst.operands[2].reg << 8;
9526 inst.instruction |= SHIFT_BY_REG;
9527 /* PR 12854: Error on extraneous shifts. */
9528 constraint (inst.operands[2].shifted,
9529 _("extraneous shift as part of operand to shift insn"));
9530 }
9531 else
9532 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
9533 }
9534
9535 static void
9536 do_smc (void)
9537 {
9538 inst.reloc.type = BFD_RELOC_ARM_SMC;
9539 inst.reloc.pc_rel = 0;
9540 }
9541
9542 static void
9543 do_hvc (void)
9544 {
9545 inst.reloc.type = BFD_RELOC_ARM_HVC;
9546 inst.reloc.pc_rel = 0;
9547 }
9548
9549 static void
9550 do_swi (void)
9551 {
9552 inst.reloc.type = BFD_RELOC_ARM_SWI;
9553 inst.reloc.pc_rel = 0;
9554 }
9555
9556 static void
9557 do_setpan (void)
9558 {
9559 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9560 _("selected processor does not support SETPAN instruction"));
9561
9562 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9563 }
9564
9565 static void
9566 do_t_setpan (void)
9567 {
9568 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9569 _("selected processor does not support SETPAN instruction"));
9570
9571 inst.instruction |= (inst.operands[0].imm << 3);
9572 }
9573
9574 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9575 SMLAxy{cond} Rd,Rm,Rs,Rn
9576 SMLAWy{cond} Rd,Rm,Rs,Rn
9577 Error if any register is R15. */
9578
9579 static void
9580 do_smla (void)
9581 {
9582 inst.instruction |= inst.operands[0].reg << 16;
9583 inst.instruction |= inst.operands[1].reg;
9584 inst.instruction |= inst.operands[2].reg << 8;
9585 inst.instruction |= inst.operands[3].reg << 12;
9586 }
9587
9588 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9589 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9590 Error if any register is R15.
9591 Warning if Rdlo == Rdhi. */
9592
9593 static void
9594 do_smlal (void)
9595 {
9596 inst.instruction |= inst.operands[0].reg << 12;
9597 inst.instruction |= inst.operands[1].reg << 16;
9598 inst.instruction |= inst.operands[2].reg;
9599 inst.instruction |= inst.operands[3].reg << 8;
9600
9601 if (inst.operands[0].reg == inst.operands[1].reg)
9602 as_tsktsk (_("rdhi and rdlo must be different"));
9603 }
9604
9605 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9606 SMULxy{cond} Rd,Rm,Rs
9607 Error if any register is R15. */
9608
9609 static void
9610 do_smul (void)
9611 {
9612 inst.instruction |= inst.operands[0].reg << 16;
9613 inst.instruction |= inst.operands[1].reg;
9614 inst.instruction |= inst.operands[2].reg << 8;
9615 }
9616
9617 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9618 the same for both ARM and Thumb-2. */
9619
9620 static void
9621 do_srs (void)
9622 {
9623 int reg;
9624
9625 if (inst.operands[0].present)
9626 {
9627 reg = inst.operands[0].reg;
9628 constraint (reg != REG_SP, _("SRS base register must be r13"));
9629 }
9630 else
9631 reg = REG_SP;
9632
9633 inst.instruction |= reg << 16;
9634 inst.instruction |= inst.operands[1].imm;
9635 if (inst.operands[0].writeback || inst.operands[1].writeback)
9636 inst.instruction |= WRITE_BACK;
9637 }
9638
9639 /* ARM V6 strex (argument parse). */
9640
9641 static void
9642 do_strex (void)
9643 {
9644 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9645 || inst.operands[2].postind || inst.operands[2].writeback
9646 || inst.operands[2].immisreg || inst.operands[2].shifted
9647 || inst.operands[2].negative
9648 /* See comment in do_ldrex(). */
9649 || (inst.operands[2].reg == REG_PC),
9650 BAD_ADDR_MODE);
9651
9652 constraint (inst.operands[0].reg == inst.operands[1].reg
9653 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9654
9655 constraint (inst.reloc.exp.X_op != O_constant
9656 || inst.reloc.exp.X_add_number != 0,
9657 _("offset must be zero in ARM encoding"));
9658
9659 inst.instruction |= inst.operands[0].reg << 12;
9660 inst.instruction |= inst.operands[1].reg;
9661 inst.instruction |= inst.operands[2].reg << 16;
9662 inst.reloc.type = BFD_RELOC_UNUSED;
9663 }
9664
9665 static void
9666 do_t_strexbh (void)
9667 {
9668 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9669 || inst.operands[2].postind || inst.operands[2].writeback
9670 || inst.operands[2].immisreg || inst.operands[2].shifted
9671 || inst.operands[2].negative,
9672 BAD_ADDR_MODE);
9673
9674 constraint (inst.operands[0].reg == inst.operands[1].reg
9675 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9676
9677 do_rm_rd_rn ();
9678 }
9679
9680 static void
9681 do_strexd (void)
9682 {
9683 constraint (inst.operands[1].reg % 2 != 0,
9684 _("even register required"));
9685 constraint (inst.operands[2].present
9686 && inst.operands[2].reg != inst.operands[1].reg + 1,
9687 _("can only store two consecutive registers"));
9688 /* If op 2 were present and equal to PC, this function wouldn't
9689 have been called in the first place. */
9690 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9691
9692 constraint (inst.operands[0].reg == inst.operands[1].reg
9693 || inst.operands[0].reg == inst.operands[1].reg + 1
9694 || inst.operands[0].reg == inst.operands[3].reg,
9695 BAD_OVERLAP);
9696
9697 inst.instruction |= inst.operands[0].reg << 12;
9698 inst.instruction |= inst.operands[1].reg;
9699 inst.instruction |= inst.operands[3].reg << 16;
9700 }
9701
9702 /* ARM V8 STRL. */
9703 static void
9704 do_stlex (void)
9705 {
9706 constraint (inst.operands[0].reg == inst.operands[1].reg
9707 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9708
9709 do_rd_rm_rn ();
9710 }
9711
9712 static void
9713 do_t_stlex (void)
9714 {
9715 constraint (inst.operands[0].reg == inst.operands[1].reg
9716 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9717
9718 do_rm_rd_rn ();
9719 }
9720
9721 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9722 extends it to 32-bits, and adds the result to a value in another
9723 register. You can specify a rotation by 0, 8, 16, or 24 bits
9724 before extracting the 16-bit value.
9725 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9726 Condition defaults to COND_ALWAYS.
9727 Error if any register uses R15. */
9728
9729 static void
9730 do_sxtah (void)
9731 {
9732 inst.instruction |= inst.operands[0].reg << 12;
9733 inst.instruction |= inst.operands[1].reg << 16;
9734 inst.instruction |= inst.operands[2].reg;
9735 inst.instruction |= inst.operands[3].imm << 10;
9736 }
9737
9738 /* ARM V6 SXTH.
9739
9740 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9741 Condition defaults to COND_ALWAYS.
9742 Error if any register uses R15. */
9743
9744 static void
9745 do_sxth (void)
9746 {
9747 inst.instruction |= inst.operands[0].reg << 12;
9748 inst.instruction |= inst.operands[1].reg;
9749 inst.instruction |= inst.operands[2].imm << 10;
9750 }
9751 \f
9752 /* VFP instructions. In a logical order: SP variant first, monad
9753 before dyad, arithmetic then move then load/store. */
9754
9755 static void
9756 do_vfp_sp_monadic (void)
9757 {
9758 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9759 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9760 }
9761
9762 static void
9763 do_vfp_sp_dyadic (void)
9764 {
9765 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9766 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9767 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9768 }
9769
9770 static void
9771 do_vfp_sp_compare_z (void)
9772 {
9773 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9774 }
9775
9776 static void
9777 do_vfp_dp_sp_cvt (void)
9778 {
9779 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9780 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9781 }
9782
9783 static void
9784 do_vfp_sp_dp_cvt (void)
9785 {
9786 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9787 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9788 }
9789
9790 static void
9791 do_vfp_reg_from_sp (void)
9792 {
9793 inst.instruction |= inst.operands[0].reg << 12;
9794 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9795 }
9796
9797 static void
9798 do_vfp_reg2_from_sp2 (void)
9799 {
9800 constraint (inst.operands[2].imm != 2,
9801 _("only two consecutive VFP SP registers allowed here"));
9802 inst.instruction |= inst.operands[0].reg << 12;
9803 inst.instruction |= inst.operands[1].reg << 16;
9804 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9805 }
9806
9807 static void
9808 do_vfp_sp_from_reg (void)
9809 {
9810 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9811 inst.instruction |= inst.operands[1].reg << 12;
9812 }
9813
9814 static void
9815 do_vfp_sp2_from_reg2 (void)
9816 {
9817 constraint (inst.operands[0].imm != 2,
9818 _("only two consecutive VFP SP registers allowed here"));
9819 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9820 inst.instruction |= inst.operands[1].reg << 12;
9821 inst.instruction |= inst.operands[2].reg << 16;
9822 }
9823
9824 static void
9825 do_vfp_sp_ldst (void)
9826 {
9827 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9828 encode_arm_cp_address (1, FALSE, TRUE, 0);
9829 }
9830
9831 static void
9832 do_vfp_dp_ldst (void)
9833 {
9834 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9835 encode_arm_cp_address (1, FALSE, TRUE, 0);
9836 }
9837
9838
9839 static void
9840 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9841 {
9842 if (inst.operands[0].writeback)
9843 inst.instruction |= WRITE_BACK;
9844 else
9845 constraint (ldstm_type != VFP_LDSTMIA,
9846 _("this addressing mode requires base-register writeback"));
9847 inst.instruction |= inst.operands[0].reg << 16;
9848 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9849 inst.instruction |= inst.operands[1].imm;
9850 }
9851
9852 static void
9853 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9854 {
9855 int count;
9856
9857 if (inst.operands[0].writeback)
9858 inst.instruction |= WRITE_BACK;
9859 else
9860 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9861 _("this addressing mode requires base-register writeback"));
9862
9863 inst.instruction |= inst.operands[0].reg << 16;
9864 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9865
9866 count = inst.operands[1].imm << 1;
9867 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9868 count += 1;
9869
9870 inst.instruction |= count;
9871 }
9872
9873 static void
9874 do_vfp_sp_ldstmia (void)
9875 {
9876 vfp_sp_ldstm (VFP_LDSTMIA);
9877 }
9878
9879 static void
9880 do_vfp_sp_ldstmdb (void)
9881 {
9882 vfp_sp_ldstm (VFP_LDSTMDB);
9883 }
9884
9885 static void
9886 do_vfp_dp_ldstmia (void)
9887 {
9888 vfp_dp_ldstm (VFP_LDSTMIA);
9889 }
9890
9891 static void
9892 do_vfp_dp_ldstmdb (void)
9893 {
9894 vfp_dp_ldstm (VFP_LDSTMDB);
9895 }
9896
9897 static void
9898 do_vfp_xp_ldstmia (void)
9899 {
9900 vfp_dp_ldstm (VFP_LDSTMIAX);
9901 }
9902
9903 static void
9904 do_vfp_xp_ldstmdb (void)
9905 {
9906 vfp_dp_ldstm (VFP_LDSTMDBX);
9907 }
9908
9909 static void
9910 do_vfp_dp_rd_rm (void)
9911 {
9912 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9913 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9914 }
9915
9916 static void
9917 do_vfp_dp_rn_rd (void)
9918 {
9919 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9920 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9921 }
9922
9923 static void
9924 do_vfp_dp_rd_rn (void)
9925 {
9926 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9927 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9928 }
9929
9930 static void
9931 do_vfp_dp_rd_rn_rm (void)
9932 {
9933 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9934 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9935 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
9936 }
9937
9938 static void
9939 do_vfp_dp_rd (void)
9940 {
9941 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9942 }
9943
9944 static void
9945 do_vfp_dp_rm_rd_rn (void)
9946 {
9947 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
9948 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9949 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
9950 }
9951
9952 /* VFPv3 instructions. */
9953 static void
9954 do_vfp_sp_const (void)
9955 {
9956 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9957 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9958 inst.instruction |= (inst.operands[1].imm & 0x0f);
9959 }
9960
9961 static void
9962 do_vfp_dp_const (void)
9963 {
9964 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9965 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9966 inst.instruction |= (inst.operands[1].imm & 0x0f);
9967 }
9968
9969 static void
9970 vfp_conv (int srcsize)
9971 {
9972 int immbits = srcsize - inst.operands[1].imm;
9973
9974 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9975 {
9976 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9977 i.e. immbits must be in range 0 - 16. */
9978 inst.error = _("immediate value out of range, expected range [0, 16]");
9979 return;
9980 }
9981 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9982 {
9983 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9984 i.e. immbits must be in range 0 - 31. */
9985 inst.error = _("immediate value out of range, expected range [1, 32]");
9986 return;
9987 }
9988
9989 inst.instruction |= (immbits & 1) << 5;
9990 inst.instruction |= (immbits >> 1);
9991 }
9992
9993 static void
9994 do_vfp_sp_conv_16 (void)
9995 {
9996 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9997 vfp_conv (16);
9998 }
9999
10000 static void
10001 do_vfp_dp_conv_16 (void)
10002 {
10003 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10004 vfp_conv (16);
10005 }
10006
10007 static void
10008 do_vfp_sp_conv_32 (void)
10009 {
10010 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10011 vfp_conv (32);
10012 }
10013
10014 static void
10015 do_vfp_dp_conv_32 (void)
10016 {
10017 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10018 vfp_conv (32);
10019 }
10020 \f
10021 /* FPA instructions. Also in a logical order. */
10022
10023 static void
10024 do_fpa_cmp (void)
10025 {
10026 inst.instruction |= inst.operands[0].reg << 16;
10027 inst.instruction |= inst.operands[1].reg;
10028 }
10029
10030 static void
10031 do_fpa_ldmstm (void)
10032 {
10033 inst.instruction |= inst.operands[0].reg << 12;
10034 switch (inst.operands[1].imm)
10035 {
10036 case 1: inst.instruction |= CP_T_X; break;
10037 case 2: inst.instruction |= CP_T_Y; break;
10038 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
10039 case 4: break;
10040 default: abort ();
10041 }
10042
10043 if (inst.instruction & (PRE_INDEX | INDEX_UP))
10044 {
10045 /* The instruction specified "ea" or "fd", so we can only accept
10046 [Rn]{!}. The instruction does not really support stacking or
10047 unstacking, so we have to emulate these by setting appropriate
10048 bits and offsets. */
10049 constraint (inst.reloc.exp.X_op != O_constant
10050 || inst.reloc.exp.X_add_number != 0,
10051 _("this instruction does not support indexing"));
10052
10053 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
10054 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
10055
10056 if (!(inst.instruction & INDEX_UP))
10057 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
10058
10059 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
10060 {
10061 inst.operands[2].preind = 0;
10062 inst.operands[2].postind = 1;
10063 }
10064 }
10065
10066 encode_arm_cp_address (2, TRUE, TRUE, 0);
10067 }
10068 \f
10069 /* iWMMXt instructions: strictly in alphabetical order. */
10070
10071 static void
10072 do_iwmmxt_tandorc (void)
10073 {
10074 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
10075 }
10076
10077 static void
10078 do_iwmmxt_textrc (void)
10079 {
10080 inst.instruction |= inst.operands[0].reg << 12;
10081 inst.instruction |= inst.operands[1].imm;
10082 }
10083
10084 static void
10085 do_iwmmxt_textrm (void)
10086 {
10087 inst.instruction |= inst.operands[0].reg << 12;
10088 inst.instruction |= inst.operands[1].reg << 16;
10089 inst.instruction |= inst.operands[2].imm;
10090 }
10091
10092 static void
10093 do_iwmmxt_tinsr (void)
10094 {
10095 inst.instruction |= inst.operands[0].reg << 16;
10096 inst.instruction |= inst.operands[1].reg << 12;
10097 inst.instruction |= inst.operands[2].imm;
10098 }
10099
10100 static void
10101 do_iwmmxt_tmia (void)
10102 {
10103 inst.instruction |= inst.operands[0].reg << 5;
10104 inst.instruction |= inst.operands[1].reg;
10105 inst.instruction |= inst.operands[2].reg << 12;
10106 }
10107
10108 static void
10109 do_iwmmxt_waligni (void)
10110 {
10111 inst.instruction |= inst.operands[0].reg << 12;
10112 inst.instruction |= inst.operands[1].reg << 16;
10113 inst.instruction |= inst.operands[2].reg;
10114 inst.instruction |= inst.operands[3].imm << 20;
10115 }
10116
10117 static void
10118 do_iwmmxt_wmerge (void)
10119 {
10120 inst.instruction |= inst.operands[0].reg << 12;
10121 inst.instruction |= inst.operands[1].reg << 16;
10122 inst.instruction |= inst.operands[2].reg;
10123 inst.instruction |= inst.operands[3].imm << 21;
10124 }
10125
10126 static void
10127 do_iwmmxt_wmov (void)
10128 {
10129 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10130 inst.instruction |= inst.operands[0].reg << 12;
10131 inst.instruction |= inst.operands[1].reg << 16;
10132 inst.instruction |= inst.operands[1].reg;
10133 }
10134
10135 static void
10136 do_iwmmxt_wldstbh (void)
10137 {
10138 int reloc;
10139 inst.instruction |= inst.operands[0].reg << 12;
10140 if (thumb_mode)
10141 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
10142 else
10143 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
10144 encode_arm_cp_address (1, TRUE, FALSE, reloc);
10145 }
10146
10147 static void
10148 do_iwmmxt_wldstw (void)
10149 {
10150 /* RIWR_RIWC clears .isreg for a control register. */
10151 if (!inst.operands[0].isreg)
10152 {
10153 constraint (inst.cond != COND_ALWAYS, BAD_COND);
10154 inst.instruction |= 0xf0000000;
10155 }
10156
10157 inst.instruction |= inst.operands[0].reg << 12;
10158 encode_arm_cp_address (1, TRUE, TRUE, 0);
10159 }
10160
10161 static void
10162 do_iwmmxt_wldstd (void)
10163 {
10164 inst.instruction |= inst.operands[0].reg << 12;
10165 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
10166 && inst.operands[1].immisreg)
10167 {
10168 inst.instruction &= ~0x1a000ff;
10169 inst.instruction |= (0xfU << 28);
10170 if (inst.operands[1].preind)
10171 inst.instruction |= PRE_INDEX;
10172 if (!inst.operands[1].negative)
10173 inst.instruction |= INDEX_UP;
10174 if (inst.operands[1].writeback)
10175 inst.instruction |= WRITE_BACK;
10176 inst.instruction |= inst.operands[1].reg << 16;
10177 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10178 inst.instruction |= inst.operands[1].imm;
10179 }
10180 else
10181 encode_arm_cp_address (1, TRUE, FALSE, 0);
10182 }
10183
10184 static void
10185 do_iwmmxt_wshufh (void)
10186 {
10187 inst.instruction |= inst.operands[0].reg << 12;
10188 inst.instruction |= inst.operands[1].reg << 16;
10189 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
10190 inst.instruction |= (inst.operands[2].imm & 0x0f);
10191 }
10192
10193 static void
10194 do_iwmmxt_wzero (void)
10195 {
10196 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10197 inst.instruction |= inst.operands[0].reg;
10198 inst.instruction |= inst.operands[0].reg << 12;
10199 inst.instruction |= inst.operands[0].reg << 16;
10200 }
10201
10202 static void
10203 do_iwmmxt_wrwrwr_or_imm5 (void)
10204 {
10205 if (inst.operands[2].isreg)
10206 do_rd_rn_rm ();
10207 else {
10208 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
10209 _("immediate operand requires iWMMXt2"));
10210 do_rd_rn ();
10211 if (inst.operands[2].imm == 0)
10212 {
10213 switch ((inst.instruction >> 20) & 0xf)
10214 {
10215 case 4:
10216 case 5:
10217 case 6:
10218 case 7:
10219 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10220 inst.operands[2].imm = 16;
10221 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
10222 break;
10223 case 8:
10224 case 9:
10225 case 10:
10226 case 11:
10227 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10228 inst.operands[2].imm = 32;
10229 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
10230 break;
10231 case 12:
10232 case 13:
10233 case 14:
10234 case 15:
10235 {
10236 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10237 unsigned long wrn;
10238 wrn = (inst.instruction >> 16) & 0xf;
10239 inst.instruction &= 0xff0fff0f;
10240 inst.instruction |= wrn;
10241 /* Bail out here; the instruction is now assembled. */
10242 return;
10243 }
10244 }
10245 }
10246 /* Map 32 -> 0, etc. */
10247 inst.operands[2].imm &= 0x1f;
10248 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10249 }
10250 }
10251 \f
10252 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10253 operations first, then control, shift, and load/store. */
10254
10255 /* Insns like "foo X,Y,Z". */
10256
10257 static void
10258 do_mav_triple (void)
10259 {
10260 inst.instruction |= inst.operands[0].reg << 16;
10261 inst.instruction |= inst.operands[1].reg;
10262 inst.instruction |= inst.operands[2].reg << 12;
10263 }
10264
10265 /* Insns like "foo W,X,Y,Z".
10266 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10267
10268 static void
10269 do_mav_quad (void)
10270 {
10271 inst.instruction |= inst.operands[0].reg << 5;
10272 inst.instruction |= inst.operands[1].reg << 12;
10273 inst.instruction |= inst.operands[2].reg << 16;
10274 inst.instruction |= inst.operands[3].reg;
10275 }
10276
10277 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10278 static void
10279 do_mav_dspsc (void)
10280 {
10281 inst.instruction |= inst.operands[1].reg << 12;
10282 }
10283
10284 /* Maverick shift immediate instructions.
10285 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10286 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10287
10288 static void
10289 do_mav_shift (void)
10290 {
10291 int imm = inst.operands[2].imm;
10292
10293 inst.instruction |= inst.operands[0].reg << 12;
10294 inst.instruction |= inst.operands[1].reg << 16;
10295
10296 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10297 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10298 Bit 4 should be 0. */
10299 imm = (imm & 0xf) | ((imm & 0x70) << 1);
10300
10301 inst.instruction |= imm;
10302 }
10303 \f
10304 /* XScale instructions. Also sorted arithmetic before move. */
10305
10306 /* Xscale multiply-accumulate (argument parse)
10307 MIAcc acc0,Rm,Rs
10308 MIAPHcc acc0,Rm,Rs
10309 MIAxycc acc0,Rm,Rs. */
10310
10311 static void
10312 do_xsc_mia (void)
10313 {
10314 inst.instruction |= inst.operands[1].reg;
10315 inst.instruction |= inst.operands[2].reg << 12;
10316 }
10317
10318 /* Xscale move-accumulator-register (argument parse)
10319
10320 MARcc acc0,RdLo,RdHi. */
10321
10322 static void
10323 do_xsc_mar (void)
10324 {
10325 inst.instruction |= inst.operands[1].reg << 12;
10326 inst.instruction |= inst.operands[2].reg << 16;
10327 }
10328
10329 /* Xscale move-register-accumulator (argument parse)
10330
10331 MRAcc RdLo,RdHi,acc0. */
10332
10333 static void
10334 do_xsc_mra (void)
10335 {
10336 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10337 inst.instruction |= inst.operands[0].reg << 12;
10338 inst.instruction |= inst.operands[1].reg << 16;
10339 }
10340 \f
10341 /* Encoding functions relevant only to Thumb. */
10342
10343 /* inst.operands[i] is a shifted-register operand; encode
10344 it into inst.instruction in the format used by Thumb32. */
10345
10346 static void
10347 encode_thumb32_shifted_operand (int i)
10348 {
10349 unsigned int value = inst.reloc.exp.X_add_number;
10350 unsigned int shift = inst.operands[i].shift_kind;
10351
10352 constraint (inst.operands[i].immisreg,
10353 _("shift by register not allowed in thumb mode"));
10354 inst.instruction |= inst.operands[i].reg;
10355 if (shift == SHIFT_RRX)
10356 inst.instruction |= SHIFT_ROR << 4;
10357 else
10358 {
10359 constraint (inst.reloc.exp.X_op != O_constant,
10360 _("expression too complex"));
10361
10362 constraint (value > 32
10363 || (value == 32 && (shift == SHIFT_LSL
10364 || shift == SHIFT_ROR)),
10365 _("shift expression is too large"));
10366
10367 if (value == 0)
10368 shift = SHIFT_LSL;
10369 else if (value == 32)
10370 value = 0;
10371
10372 inst.instruction |= shift << 4;
10373 inst.instruction |= (value & 0x1c) << 10;
10374 inst.instruction |= (value & 0x03) << 6;
10375 }
10376 }
10377
10378
10379 /* inst.operands[i] was set up by parse_address. Encode it into a
10380 Thumb32 format load or store instruction. Reject forms that cannot
10381 be used with such instructions. If is_t is true, reject forms that
10382 cannot be used with a T instruction; if is_d is true, reject forms
10383 that cannot be used with a D instruction. If it is a store insn,
10384 reject PC in Rn. */
10385
10386 static void
10387 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10388 {
10389 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10390
10391 constraint (!inst.operands[i].isreg,
10392 _("Instruction does not support =N addresses"));
10393
10394 inst.instruction |= inst.operands[i].reg << 16;
10395 if (inst.operands[i].immisreg)
10396 {
10397 constraint (is_pc, BAD_PC_ADDRESSING);
10398 constraint (is_t || is_d, _("cannot use register index with this instruction"));
10399 constraint (inst.operands[i].negative,
10400 _("Thumb does not support negative register indexing"));
10401 constraint (inst.operands[i].postind,
10402 _("Thumb does not support register post-indexing"));
10403 constraint (inst.operands[i].writeback,
10404 _("Thumb does not support register indexing with writeback"));
10405 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10406 _("Thumb supports only LSL in shifted register indexing"));
10407
10408 inst.instruction |= inst.operands[i].imm;
10409 if (inst.operands[i].shifted)
10410 {
10411 constraint (inst.reloc.exp.X_op != O_constant,
10412 _("expression too complex"));
10413 constraint (inst.reloc.exp.X_add_number < 0
10414 || inst.reloc.exp.X_add_number > 3,
10415 _("shift out of range"));
10416 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10417 }
10418 inst.reloc.type = BFD_RELOC_UNUSED;
10419 }
10420 else if (inst.operands[i].preind)
10421 {
10422 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10423 constraint (is_t && inst.operands[i].writeback,
10424 _("cannot use writeback with this instruction"));
10425 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10426 BAD_PC_ADDRESSING);
10427
10428 if (is_d)
10429 {
10430 inst.instruction |= 0x01000000;
10431 if (inst.operands[i].writeback)
10432 inst.instruction |= 0x00200000;
10433 }
10434 else
10435 {
10436 inst.instruction |= 0x00000c00;
10437 if (inst.operands[i].writeback)
10438 inst.instruction |= 0x00000100;
10439 }
10440 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10441 }
10442 else if (inst.operands[i].postind)
10443 {
10444 gas_assert (inst.operands[i].writeback);
10445 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10446 constraint (is_t, _("cannot use post-indexing with this instruction"));
10447
10448 if (is_d)
10449 inst.instruction |= 0x00200000;
10450 else
10451 inst.instruction |= 0x00000900;
10452 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10453 }
10454 else /* unindexed - only for coprocessor */
10455 inst.error = _("instruction does not accept unindexed addressing");
10456 }
10457
10458 /* Table of Thumb instructions which exist in both 16- and 32-bit
10459 encodings (the latter only in post-V6T2 cores). The index is the
10460 value used in the insns table below. When there is more than one
10461 possible 16-bit encoding for the instruction, this table always
10462 holds variant (1).
10463 Also contains several pseudo-instructions used during relaxation. */
10464 #define T16_32_TAB \
10465 X(_adc, 4140, eb400000), \
10466 X(_adcs, 4140, eb500000), \
10467 X(_add, 1c00, eb000000), \
10468 X(_adds, 1c00, eb100000), \
10469 X(_addi, 0000, f1000000), \
10470 X(_addis, 0000, f1100000), \
10471 X(_add_pc,000f, f20f0000), \
10472 X(_add_sp,000d, f10d0000), \
10473 X(_adr, 000f, f20f0000), \
10474 X(_and, 4000, ea000000), \
10475 X(_ands, 4000, ea100000), \
10476 X(_asr, 1000, fa40f000), \
10477 X(_asrs, 1000, fa50f000), \
10478 X(_b, e000, f000b000), \
10479 X(_bcond, d000, f0008000), \
10480 X(_bic, 4380, ea200000), \
10481 X(_bics, 4380, ea300000), \
10482 X(_cmn, 42c0, eb100f00), \
10483 X(_cmp, 2800, ebb00f00), \
10484 X(_cpsie, b660, f3af8400), \
10485 X(_cpsid, b670, f3af8600), \
10486 X(_cpy, 4600, ea4f0000), \
10487 X(_dec_sp,80dd, f1ad0d00), \
10488 X(_eor, 4040, ea800000), \
10489 X(_eors, 4040, ea900000), \
10490 X(_inc_sp,00dd, f10d0d00), \
10491 X(_ldmia, c800, e8900000), \
10492 X(_ldr, 6800, f8500000), \
10493 X(_ldrb, 7800, f8100000), \
10494 X(_ldrh, 8800, f8300000), \
10495 X(_ldrsb, 5600, f9100000), \
10496 X(_ldrsh, 5e00, f9300000), \
10497 X(_ldr_pc,4800, f85f0000), \
10498 X(_ldr_pc2,4800, f85f0000), \
10499 X(_ldr_sp,9800, f85d0000), \
10500 X(_lsl, 0000, fa00f000), \
10501 X(_lsls, 0000, fa10f000), \
10502 X(_lsr, 0800, fa20f000), \
10503 X(_lsrs, 0800, fa30f000), \
10504 X(_mov, 2000, ea4f0000), \
10505 X(_movs, 2000, ea5f0000), \
10506 X(_mul, 4340, fb00f000), \
10507 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10508 X(_mvn, 43c0, ea6f0000), \
10509 X(_mvns, 43c0, ea7f0000), \
10510 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10511 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10512 X(_orr, 4300, ea400000), \
10513 X(_orrs, 4300, ea500000), \
10514 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10515 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10516 X(_rev, ba00, fa90f080), \
10517 X(_rev16, ba40, fa90f090), \
10518 X(_revsh, bac0, fa90f0b0), \
10519 X(_ror, 41c0, fa60f000), \
10520 X(_rors, 41c0, fa70f000), \
10521 X(_sbc, 4180, eb600000), \
10522 X(_sbcs, 4180, eb700000), \
10523 X(_stmia, c000, e8800000), \
10524 X(_str, 6000, f8400000), \
10525 X(_strb, 7000, f8000000), \
10526 X(_strh, 8000, f8200000), \
10527 X(_str_sp,9000, f84d0000), \
10528 X(_sub, 1e00, eba00000), \
10529 X(_subs, 1e00, ebb00000), \
10530 X(_subi, 8000, f1a00000), \
10531 X(_subis, 8000, f1b00000), \
10532 X(_sxtb, b240, fa4ff080), \
10533 X(_sxth, b200, fa0ff080), \
10534 X(_tst, 4200, ea100f00), \
10535 X(_uxtb, b2c0, fa5ff080), \
10536 X(_uxth, b280, fa1ff080), \
10537 X(_nop, bf00, f3af8000), \
10538 X(_yield, bf10, f3af8001), \
10539 X(_wfe, bf20, f3af8002), \
10540 X(_wfi, bf30, f3af8003), \
10541 X(_sev, bf40, f3af8004), \
10542 X(_sevl, bf50, f3af8005), \
10543 X(_udf, de00, f7f0a000)
10544
10545 /* To catch errors in encoding functions, the codes are all offset by
10546 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10547 as 16-bit instructions. */
10548 #define X(a,b,c) T_MNEM##a
10549 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10550 #undef X
10551
10552 #define X(a,b,c) 0x##b
10553 static const unsigned short thumb_op16[] = { T16_32_TAB };
10554 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10555 #undef X
10556
10557 #define X(a,b,c) 0x##c
10558 static const unsigned int thumb_op32[] = { T16_32_TAB };
10559 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10560 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10561 #undef X
10562 #undef T16_32_TAB
10563
10564 /* Thumb instruction encoders, in alphabetical order. */
10565
10566 /* ADDW or SUBW. */
10567
10568 static void
10569 do_t_add_sub_w (void)
10570 {
10571 int Rd, Rn;
10572
10573 Rd = inst.operands[0].reg;
10574 Rn = inst.operands[1].reg;
10575
10576 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10577 is the SP-{plus,minus}-immediate form of the instruction. */
10578 if (Rn == REG_SP)
10579 constraint (Rd == REG_PC, BAD_PC);
10580 else
10581 reject_bad_reg (Rd);
10582
10583 inst.instruction |= (Rn << 16) | (Rd << 8);
10584 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10585 }
10586
10587 /* Parse an add or subtract instruction. We get here with inst.instruction
10588 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
10589
10590 static void
10591 do_t_add_sub (void)
10592 {
10593 int Rd, Rs, Rn;
10594
10595 Rd = inst.operands[0].reg;
10596 Rs = (inst.operands[1].present
10597 ? inst.operands[1].reg /* Rd, Rs, foo */
10598 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10599
10600 if (Rd == REG_PC)
10601 set_it_insn_type_last ();
10602
10603 if (unified_syntax)
10604 {
10605 bfd_boolean flags;
10606 bfd_boolean narrow;
10607 int opcode;
10608
10609 flags = (inst.instruction == T_MNEM_adds
10610 || inst.instruction == T_MNEM_subs);
10611 if (flags)
10612 narrow = !in_it_block ();
10613 else
10614 narrow = in_it_block ();
10615 if (!inst.operands[2].isreg)
10616 {
10617 int add;
10618
10619 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10620 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10621
10622 add = (inst.instruction == T_MNEM_add
10623 || inst.instruction == T_MNEM_adds);
10624 opcode = 0;
10625 if (inst.size_req != 4)
10626 {
10627 /* Attempt to use a narrow opcode, with relaxation if
10628 appropriate. */
10629 if (Rd == REG_SP && Rs == REG_SP && !flags)
10630 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10631 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10632 opcode = T_MNEM_add_sp;
10633 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10634 opcode = T_MNEM_add_pc;
10635 else if (Rd <= 7 && Rs <= 7 && narrow)
10636 {
10637 if (flags)
10638 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10639 else
10640 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10641 }
10642 if (opcode)
10643 {
10644 inst.instruction = THUMB_OP16(opcode);
10645 inst.instruction |= (Rd << 4) | Rs;
10646 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10647 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
10648 {
10649 if (inst.size_req == 2)
10650 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10651 else
10652 inst.relax = opcode;
10653 }
10654 }
10655 else
10656 constraint (inst.size_req == 2, BAD_HIREG);
10657 }
10658 if (inst.size_req == 4
10659 || (inst.size_req != 2 && !opcode))
10660 {
10661 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10662 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
10663 THUMB1_RELOC_ONLY);
10664 if (Rd == REG_PC)
10665 {
10666 constraint (add, BAD_PC);
10667 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10668 _("only SUBS PC, LR, #const allowed"));
10669 constraint (inst.reloc.exp.X_op != O_constant,
10670 _("expression too complex"));
10671 constraint (inst.reloc.exp.X_add_number < 0
10672 || inst.reloc.exp.X_add_number > 0xff,
10673 _("immediate value out of range"));
10674 inst.instruction = T2_SUBS_PC_LR
10675 | inst.reloc.exp.X_add_number;
10676 inst.reloc.type = BFD_RELOC_UNUSED;
10677 return;
10678 }
10679 else if (Rs == REG_PC)
10680 {
10681 /* Always use addw/subw. */
10682 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10683 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10684 }
10685 else
10686 {
10687 inst.instruction = THUMB_OP32 (inst.instruction);
10688 inst.instruction = (inst.instruction & 0xe1ffffff)
10689 | 0x10000000;
10690 if (flags)
10691 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10692 else
10693 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
10694 }
10695 inst.instruction |= Rd << 8;
10696 inst.instruction |= Rs << 16;
10697 }
10698 }
10699 else
10700 {
10701 unsigned int value = inst.reloc.exp.X_add_number;
10702 unsigned int shift = inst.operands[2].shift_kind;
10703
10704 Rn = inst.operands[2].reg;
10705 /* See if we can do this with a 16-bit instruction. */
10706 if (!inst.operands[2].shifted && inst.size_req != 4)
10707 {
10708 if (Rd > 7 || Rs > 7 || Rn > 7)
10709 narrow = FALSE;
10710
10711 if (narrow)
10712 {
10713 inst.instruction = ((inst.instruction == T_MNEM_adds
10714 || inst.instruction == T_MNEM_add)
10715 ? T_OPCODE_ADD_R3
10716 : T_OPCODE_SUB_R3);
10717 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10718 return;
10719 }
10720
10721 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10722 {
10723 /* Thumb-1 cores (except v6-M) require at least one high
10724 register in a narrow non flag setting add. */
10725 if (Rd > 7 || Rn > 7
10726 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10727 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10728 {
10729 if (Rd == Rn)
10730 {
10731 Rn = Rs;
10732 Rs = Rd;
10733 }
10734 inst.instruction = T_OPCODE_ADD_HI;
10735 inst.instruction |= (Rd & 8) << 4;
10736 inst.instruction |= (Rd & 7);
10737 inst.instruction |= Rn << 3;
10738 return;
10739 }
10740 }
10741 }
10742
10743 constraint (Rd == REG_PC, BAD_PC);
10744 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10745 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10746 constraint (Rs == REG_PC, BAD_PC);
10747 reject_bad_reg (Rn);
10748
10749 /* If we get here, it can't be done in 16 bits. */
10750 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10751 _("shift must be constant"));
10752 inst.instruction = THUMB_OP32 (inst.instruction);
10753 inst.instruction |= Rd << 8;
10754 inst.instruction |= Rs << 16;
10755 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10756 _("shift value over 3 not allowed in thumb mode"));
10757 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10758 _("only LSL shift allowed in thumb mode"));
10759 encode_thumb32_shifted_operand (2);
10760 }
10761 }
10762 else
10763 {
10764 constraint (inst.instruction == T_MNEM_adds
10765 || inst.instruction == T_MNEM_subs,
10766 BAD_THUMB32);
10767
10768 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10769 {
10770 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10771 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10772 BAD_HIREG);
10773
10774 inst.instruction = (inst.instruction == T_MNEM_add
10775 ? 0x0000 : 0x8000);
10776 inst.instruction |= (Rd << 4) | Rs;
10777 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10778 return;
10779 }
10780
10781 Rn = inst.operands[2].reg;
10782 constraint (inst.operands[2].shifted, _("unshifted register required"));
10783
10784 /* We now have Rd, Rs, and Rn set to registers. */
10785 if (Rd > 7 || Rs > 7 || Rn > 7)
10786 {
10787 /* Can't do this for SUB. */
10788 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10789 inst.instruction = T_OPCODE_ADD_HI;
10790 inst.instruction |= (Rd & 8) << 4;
10791 inst.instruction |= (Rd & 7);
10792 if (Rs == Rd)
10793 inst.instruction |= Rn << 3;
10794 else if (Rn == Rd)
10795 inst.instruction |= Rs << 3;
10796 else
10797 constraint (1, _("dest must overlap one source register"));
10798 }
10799 else
10800 {
10801 inst.instruction = (inst.instruction == T_MNEM_add
10802 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10803 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10804 }
10805 }
10806 }
10807
10808 static void
10809 do_t_adr (void)
10810 {
10811 unsigned Rd;
10812
10813 Rd = inst.operands[0].reg;
10814 reject_bad_reg (Rd);
10815
10816 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10817 {
10818 /* Defer to section relaxation. */
10819 inst.relax = inst.instruction;
10820 inst.instruction = THUMB_OP16 (inst.instruction);
10821 inst.instruction |= Rd << 4;
10822 }
10823 else if (unified_syntax && inst.size_req != 2)
10824 {
10825 /* Generate a 32-bit opcode. */
10826 inst.instruction = THUMB_OP32 (inst.instruction);
10827 inst.instruction |= Rd << 8;
10828 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
10829 inst.reloc.pc_rel = 1;
10830 }
10831 else
10832 {
10833 /* Generate a 16-bit opcode. */
10834 inst.instruction = THUMB_OP16 (inst.instruction);
10835 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10836 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
10837 inst.reloc.pc_rel = 1;
10838 inst.instruction |= Rd << 4;
10839 }
10840
10841 if (inst.reloc.exp.X_op == O_symbol
10842 && inst.reloc.exp.X_add_symbol != NULL
10843 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
10844 && THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
10845 inst.reloc.exp.X_add_number += 1;
10846 }
10847
10848 /* Arithmetic instructions for which there is just one 16-bit
10849 instruction encoding, and it allows only two low registers.
10850 For maximal compatibility with ARM syntax, we allow three register
10851 operands even when Thumb-32 instructions are not available, as long
10852 as the first two are identical. For instance, both "sbc r0,r1" and
10853 "sbc r0,r0,r1" are allowed. */
10854 static void
10855 do_t_arit3 (void)
10856 {
10857 int Rd, Rs, Rn;
10858
10859 Rd = inst.operands[0].reg;
10860 Rs = (inst.operands[1].present
10861 ? inst.operands[1].reg /* Rd, Rs, foo */
10862 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10863 Rn = inst.operands[2].reg;
10864
10865 reject_bad_reg (Rd);
10866 reject_bad_reg (Rs);
10867 if (inst.operands[2].isreg)
10868 reject_bad_reg (Rn);
10869
10870 if (unified_syntax)
10871 {
10872 if (!inst.operands[2].isreg)
10873 {
10874 /* For an immediate, we always generate a 32-bit opcode;
10875 section relaxation will shrink it later if possible. */
10876 inst.instruction = THUMB_OP32 (inst.instruction);
10877 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10878 inst.instruction |= Rd << 8;
10879 inst.instruction |= Rs << 16;
10880 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10881 }
10882 else
10883 {
10884 bfd_boolean narrow;
10885
10886 /* See if we can do this with a 16-bit instruction. */
10887 if (THUMB_SETS_FLAGS (inst.instruction))
10888 narrow = !in_it_block ();
10889 else
10890 narrow = in_it_block ();
10891
10892 if (Rd > 7 || Rn > 7 || Rs > 7)
10893 narrow = FALSE;
10894 if (inst.operands[2].shifted)
10895 narrow = FALSE;
10896 if (inst.size_req == 4)
10897 narrow = FALSE;
10898
10899 if (narrow
10900 && Rd == Rs)
10901 {
10902 inst.instruction = THUMB_OP16 (inst.instruction);
10903 inst.instruction |= Rd;
10904 inst.instruction |= Rn << 3;
10905 return;
10906 }
10907
10908 /* If we get here, it can't be done in 16 bits. */
10909 constraint (inst.operands[2].shifted
10910 && inst.operands[2].immisreg,
10911 _("shift must be constant"));
10912 inst.instruction = THUMB_OP32 (inst.instruction);
10913 inst.instruction |= Rd << 8;
10914 inst.instruction |= Rs << 16;
10915 encode_thumb32_shifted_operand (2);
10916 }
10917 }
10918 else
10919 {
10920 /* On its face this is a lie - the instruction does set the
10921 flags. However, the only supported mnemonic in this mode
10922 says it doesn't. */
10923 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10924
10925 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10926 _("unshifted register required"));
10927 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10928 constraint (Rd != Rs,
10929 _("dest and source1 must be the same register"));
10930
10931 inst.instruction = THUMB_OP16 (inst.instruction);
10932 inst.instruction |= Rd;
10933 inst.instruction |= Rn << 3;
10934 }
10935 }
10936
10937 /* Similarly, but for instructions where the arithmetic operation is
10938 commutative, so we can allow either of them to be different from
10939 the destination operand in a 16-bit instruction. For instance, all
10940 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10941 accepted. */
10942 static void
10943 do_t_arit3c (void)
10944 {
10945 int Rd, Rs, Rn;
10946
10947 Rd = inst.operands[0].reg;
10948 Rs = (inst.operands[1].present
10949 ? inst.operands[1].reg /* Rd, Rs, foo */
10950 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10951 Rn = inst.operands[2].reg;
10952
10953 reject_bad_reg (Rd);
10954 reject_bad_reg (Rs);
10955 if (inst.operands[2].isreg)
10956 reject_bad_reg (Rn);
10957
10958 if (unified_syntax)
10959 {
10960 if (!inst.operands[2].isreg)
10961 {
10962 /* For an immediate, we always generate a 32-bit opcode;
10963 section relaxation will shrink it later if possible. */
10964 inst.instruction = THUMB_OP32 (inst.instruction);
10965 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10966 inst.instruction |= Rd << 8;
10967 inst.instruction |= Rs << 16;
10968 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10969 }
10970 else
10971 {
10972 bfd_boolean narrow;
10973
10974 /* See if we can do this with a 16-bit instruction. */
10975 if (THUMB_SETS_FLAGS (inst.instruction))
10976 narrow = !in_it_block ();
10977 else
10978 narrow = in_it_block ();
10979
10980 if (Rd > 7 || Rn > 7 || Rs > 7)
10981 narrow = FALSE;
10982 if (inst.operands[2].shifted)
10983 narrow = FALSE;
10984 if (inst.size_req == 4)
10985 narrow = FALSE;
10986
10987 if (narrow)
10988 {
10989 if (Rd == Rs)
10990 {
10991 inst.instruction = THUMB_OP16 (inst.instruction);
10992 inst.instruction |= Rd;
10993 inst.instruction |= Rn << 3;
10994 return;
10995 }
10996 if (Rd == Rn)
10997 {
10998 inst.instruction = THUMB_OP16 (inst.instruction);
10999 inst.instruction |= Rd;
11000 inst.instruction |= Rs << 3;
11001 return;
11002 }
11003 }
11004
11005 /* If we get here, it can't be done in 16 bits. */
11006 constraint (inst.operands[2].shifted
11007 && inst.operands[2].immisreg,
11008 _("shift must be constant"));
11009 inst.instruction = THUMB_OP32 (inst.instruction);
11010 inst.instruction |= Rd << 8;
11011 inst.instruction |= Rs << 16;
11012 encode_thumb32_shifted_operand (2);
11013 }
11014 }
11015 else
11016 {
11017 /* On its face this is a lie - the instruction does set the
11018 flags. However, the only supported mnemonic in this mode
11019 says it doesn't. */
11020 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11021
11022 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11023 _("unshifted register required"));
11024 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11025
11026 inst.instruction = THUMB_OP16 (inst.instruction);
11027 inst.instruction |= Rd;
11028
11029 if (Rd == Rs)
11030 inst.instruction |= Rn << 3;
11031 else if (Rd == Rn)
11032 inst.instruction |= Rs << 3;
11033 else
11034 constraint (1, _("dest must overlap one source register"));
11035 }
11036 }
11037
11038 static void
11039 do_t_bfc (void)
11040 {
11041 unsigned Rd;
11042 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
11043 constraint (msb > 32, _("bit-field extends past end of register"));
11044 /* The instruction encoding stores the LSB and MSB,
11045 not the LSB and width. */
11046 Rd = inst.operands[0].reg;
11047 reject_bad_reg (Rd);
11048 inst.instruction |= Rd << 8;
11049 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
11050 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
11051 inst.instruction |= msb - 1;
11052 }
11053
11054 static void
11055 do_t_bfi (void)
11056 {
11057 int Rd, Rn;
11058 unsigned int msb;
11059
11060 Rd = inst.operands[0].reg;
11061 reject_bad_reg (Rd);
11062
11063 /* #0 in second position is alternative syntax for bfc, which is
11064 the same instruction but with REG_PC in the Rm field. */
11065 if (!inst.operands[1].isreg)
11066 Rn = REG_PC;
11067 else
11068 {
11069 Rn = inst.operands[1].reg;
11070 reject_bad_reg (Rn);
11071 }
11072
11073 msb = inst.operands[2].imm + inst.operands[3].imm;
11074 constraint (msb > 32, _("bit-field extends past end of register"));
11075 /* The instruction encoding stores the LSB and MSB,
11076 not the LSB and width. */
11077 inst.instruction |= Rd << 8;
11078 inst.instruction |= Rn << 16;
11079 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11080 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11081 inst.instruction |= msb - 1;
11082 }
11083
11084 static void
11085 do_t_bfx (void)
11086 {
11087 unsigned Rd, Rn;
11088
11089 Rd = inst.operands[0].reg;
11090 Rn = inst.operands[1].reg;
11091
11092 reject_bad_reg (Rd);
11093 reject_bad_reg (Rn);
11094
11095 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
11096 _("bit-field extends past end of register"));
11097 inst.instruction |= Rd << 8;
11098 inst.instruction |= Rn << 16;
11099 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11100 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11101 inst.instruction |= inst.operands[3].imm - 1;
11102 }
11103
11104 /* ARM V5 Thumb BLX (argument parse)
11105 BLX <target_addr> which is BLX(1)
11106 BLX <Rm> which is BLX(2)
11107 Unfortunately, there are two different opcodes for this mnemonic.
11108 So, the insns[].value is not used, and the code here zaps values
11109 into inst.instruction.
11110
11111 ??? How to take advantage of the additional two bits of displacement
11112 available in Thumb32 mode? Need new relocation? */
11113
11114 static void
11115 do_t_blx (void)
11116 {
11117 set_it_insn_type_last ();
11118
11119 if (inst.operands[0].isreg)
11120 {
11121 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
11122 /* We have a register, so this is BLX(2). */
11123 inst.instruction |= inst.operands[0].reg << 3;
11124 }
11125 else
11126 {
11127 /* No register. This must be BLX(1). */
11128 inst.instruction = 0xf000e800;
11129 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
11130 }
11131 }
11132
11133 static void
11134 do_t_branch (void)
11135 {
11136 int opcode;
11137 int cond;
11138 bfd_reloc_code_real_type reloc;
11139
11140 cond = inst.cond;
11141 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
11142
11143 if (in_it_block ())
11144 {
11145 /* Conditional branches inside IT blocks are encoded as unconditional
11146 branches. */
11147 cond = COND_ALWAYS;
11148 }
11149 else
11150 cond = inst.cond;
11151
11152 if (cond != COND_ALWAYS)
11153 opcode = T_MNEM_bcond;
11154 else
11155 opcode = inst.instruction;
11156
11157 if (unified_syntax
11158 && (inst.size_req == 4
11159 || (inst.size_req != 2
11160 && (inst.operands[0].hasreloc
11161 || inst.reloc.exp.X_op == O_constant))))
11162 {
11163 inst.instruction = THUMB_OP32(opcode);
11164 if (cond == COND_ALWAYS)
11165 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
11166 else
11167 {
11168 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
11169 _("selected architecture does not support "
11170 "wide conditional branch instruction"));
11171
11172 gas_assert (cond != 0xF);
11173 inst.instruction |= cond << 22;
11174 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
11175 }
11176 }
11177 else
11178 {
11179 inst.instruction = THUMB_OP16(opcode);
11180 if (cond == COND_ALWAYS)
11181 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
11182 else
11183 {
11184 inst.instruction |= cond << 8;
11185 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
11186 }
11187 /* Allow section relaxation. */
11188 if (unified_syntax && inst.size_req != 2)
11189 inst.relax = opcode;
11190 }
11191 inst.reloc.type = reloc;
11192 inst.reloc.pc_rel = 1;
11193 }
11194
11195 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11196 between the two is the maximum immediate allowed - which is passed in
11197 RANGE. */
11198 static void
11199 do_t_bkpt_hlt1 (int range)
11200 {
11201 constraint (inst.cond != COND_ALWAYS,
11202 _("instruction is always unconditional"));
11203 if (inst.operands[0].present)
11204 {
11205 constraint (inst.operands[0].imm > range,
11206 _("immediate value out of range"));
11207 inst.instruction |= inst.operands[0].imm;
11208 }
11209
11210 set_it_insn_type (NEUTRAL_IT_INSN);
11211 }
11212
11213 static void
11214 do_t_hlt (void)
11215 {
11216 do_t_bkpt_hlt1 (63);
11217 }
11218
11219 static void
11220 do_t_bkpt (void)
11221 {
11222 do_t_bkpt_hlt1 (255);
11223 }
11224
11225 static void
11226 do_t_branch23 (void)
11227 {
11228 set_it_insn_type_last ();
11229 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
11230
11231 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11232 this file. We used to simply ignore the PLT reloc type here --
11233 the branch encoding is now needed to deal with TLSCALL relocs.
11234 So if we see a PLT reloc now, put it back to how it used to be to
11235 keep the preexisting behaviour. */
11236 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
11237 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
11238
11239 #if defined(OBJ_COFF)
11240 /* If the destination of the branch is a defined symbol which does not have
11241 the THUMB_FUNC attribute, then we must be calling a function which has
11242 the (interfacearm) attribute. We look for the Thumb entry point to that
11243 function and change the branch to refer to that function instead. */
11244 if ( inst.reloc.exp.X_op == O_symbol
11245 && inst.reloc.exp.X_add_symbol != NULL
11246 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
11247 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
11248 inst.reloc.exp.X_add_symbol =
11249 find_real_start (inst.reloc.exp.X_add_symbol);
11250 #endif
11251 }
11252
11253 static void
11254 do_t_bx (void)
11255 {
11256 set_it_insn_type_last ();
11257 inst.instruction |= inst.operands[0].reg << 3;
11258 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11259 should cause the alignment to be checked once it is known. This is
11260 because BX PC only works if the instruction is word aligned. */
11261 }
11262
11263 static void
11264 do_t_bxj (void)
11265 {
11266 int Rm;
11267
11268 set_it_insn_type_last ();
11269 Rm = inst.operands[0].reg;
11270 reject_bad_reg (Rm);
11271 inst.instruction |= Rm << 16;
11272 }
11273
11274 static void
11275 do_t_clz (void)
11276 {
11277 unsigned Rd;
11278 unsigned Rm;
11279
11280 Rd = inst.operands[0].reg;
11281 Rm = inst.operands[1].reg;
11282
11283 reject_bad_reg (Rd);
11284 reject_bad_reg (Rm);
11285
11286 inst.instruction |= Rd << 8;
11287 inst.instruction |= Rm << 16;
11288 inst.instruction |= Rm;
11289 }
11290
11291 static void
11292 do_t_csdb (void)
11293 {
11294 set_it_insn_type (OUTSIDE_IT_INSN);
11295 }
11296
11297 static void
11298 do_t_cps (void)
11299 {
11300 set_it_insn_type (OUTSIDE_IT_INSN);
11301 inst.instruction |= inst.operands[0].imm;
11302 }
11303
11304 static void
11305 do_t_cpsi (void)
11306 {
11307 set_it_insn_type (OUTSIDE_IT_INSN);
11308 if (unified_syntax
11309 && (inst.operands[1].present || inst.size_req == 4)
11310 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11311 {
11312 unsigned int imod = (inst.instruction & 0x0030) >> 4;
11313 inst.instruction = 0xf3af8000;
11314 inst.instruction |= imod << 9;
11315 inst.instruction |= inst.operands[0].imm << 5;
11316 if (inst.operands[1].present)
11317 inst.instruction |= 0x100 | inst.operands[1].imm;
11318 }
11319 else
11320 {
11321 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11322 && (inst.operands[0].imm & 4),
11323 _("selected processor does not support 'A' form "
11324 "of this instruction"));
11325 constraint (inst.operands[1].present || inst.size_req == 4,
11326 _("Thumb does not support the 2-argument "
11327 "form of this instruction"));
11328 inst.instruction |= inst.operands[0].imm;
11329 }
11330 }
11331
11332 /* THUMB CPY instruction (argument parse). */
11333
11334 static void
11335 do_t_cpy (void)
11336 {
11337 if (inst.size_req == 4)
11338 {
11339 inst.instruction = THUMB_OP32 (T_MNEM_mov);
11340 inst.instruction |= inst.operands[0].reg << 8;
11341 inst.instruction |= inst.operands[1].reg;
11342 }
11343 else
11344 {
11345 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11346 inst.instruction |= (inst.operands[0].reg & 0x7);
11347 inst.instruction |= inst.operands[1].reg << 3;
11348 }
11349 }
11350
11351 static void
11352 do_t_cbz (void)
11353 {
11354 set_it_insn_type (OUTSIDE_IT_INSN);
11355 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11356 inst.instruction |= inst.operands[0].reg;
11357 inst.reloc.pc_rel = 1;
11358 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11359 }
11360
11361 static void
11362 do_t_dbg (void)
11363 {
11364 inst.instruction |= inst.operands[0].imm;
11365 }
11366
11367 static void
11368 do_t_div (void)
11369 {
11370 unsigned Rd, Rn, Rm;
11371
11372 Rd = inst.operands[0].reg;
11373 Rn = (inst.operands[1].present
11374 ? inst.operands[1].reg : Rd);
11375 Rm = inst.operands[2].reg;
11376
11377 reject_bad_reg (Rd);
11378 reject_bad_reg (Rn);
11379 reject_bad_reg (Rm);
11380
11381 inst.instruction |= Rd << 8;
11382 inst.instruction |= Rn << 16;
11383 inst.instruction |= Rm;
11384 }
11385
11386 static void
11387 do_t_hint (void)
11388 {
11389 if (unified_syntax && inst.size_req == 4)
11390 inst.instruction = THUMB_OP32 (inst.instruction);
11391 else
11392 inst.instruction = THUMB_OP16 (inst.instruction);
11393 }
11394
11395 static void
11396 do_t_it (void)
11397 {
11398 unsigned int cond = inst.operands[0].imm;
11399
11400 set_it_insn_type (IT_INSN);
11401 now_it.mask = (inst.instruction & 0xf) | 0x10;
11402 now_it.cc = cond;
11403 now_it.warn_deprecated = FALSE;
11404
11405 /* If the condition is a negative condition, invert the mask. */
11406 if ((cond & 0x1) == 0x0)
11407 {
11408 unsigned int mask = inst.instruction & 0x000f;
11409
11410 if ((mask & 0x7) == 0)
11411 {
11412 /* No conversion needed. */
11413 now_it.block_length = 1;
11414 }
11415 else if ((mask & 0x3) == 0)
11416 {
11417 mask ^= 0x8;
11418 now_it.block_length = 2;
11419 }
11420 else if ((mask & 0x1) == 0)
11421 {
11422 mask ^= 0xC;
11423 now_it.block_length = 3;
11424 }
11425 else
11426 {
11427 mask ^= 0xE;
11428 now_it.block_length = 4;
11429 }
11430
11431 inst.instruction &= 0xfff0;
11432 inst.instruction |= mask;
11433 }
11434
11435 inst.instruction |= cond << 4;
11436 }
11437
11438 /* Helper function used for both push/pop and ldm/stm. */
11439 static void
11440 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
11441 {
11442 bfd_boolean load;
11443
11444 load = (inst.instruction & (1 << 20)) != 0;
11445
11446 if (mask & (1 << 13))
11447 inst.error = _("SP not allowed in register list");
11448
11449 if ((mask & (1 << base)) != 0
11450 && writeback)
11451 inst.error = _("having the base register in the register list when "
11452 "using write back is UNPREDICTABLE");
11453
11454 if (load)
11455 {
11456 if (mask & (1 << 15))
11457 {
11458 if (mask & (1 << 14))
11459 inst.error = _("LR and PC should not both be in register list");
11460 else
11461 set_it_insn_type_last ();
11462 }
11463 }
11464 else
11465 {
11466 if (mask & (1 << 15))
11467 inst.error = _("PC not allowed in register list");
11468 }
11469
11470 if ((mask & (mask - 1)) == 0)
11471 {
11472 /* Single register transfers implemented as str/ldr. */
11473 if (writeback)
11474 {
11475 if (inst.instruction & (1 << 23))
11476 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11477 else
11478 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11479 }
11480 else
11481 {
11482 if (inst.instruction & (1 << 23))
11483 inst.instruction = 0x00800000; /* ia -> [base] */
11484 else
11485 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11486 }
11487
11488 inst.instruction |= 0xf8400000;
11489 if (load)
11490 inst.instruction |= 0x00100000;
11491
11492 mask = ffs (mask) - 1;
11493 mask <<= 12;
11494 }
11495 else if (writeback)
11496 inst.instruction |= WRITE_BACK;
11497
11498 inst.instruction |= mask;
11499 inst.instruction |= base << 16;
11500 }
11501
11502 static void
11503 do_t_ldmstm (void)
11504 {
11505 /* This really doesn't seem worth it. */
11506 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11507 _("expression too complex"));
11508 constraint (inst.operands[1].writeback,
11509 _("Thumb load/store multiple does not support {reglist}^"));
11510
11511 if (unified_syntax)
11512 {
11513 bfd_boolean narrow;
11514 unsigned mask;
11515
11516 narrow = FALSE;
11517 /* See if we can use a 16-bit instruction. */
11518 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11519 && inst.size_req != 4
11520 && !(inst.operands[1].imm & ~0xff))
11521 {
11522 mask = 1 << inst.operands[0].reg;
11523
11524 if (inst.operands[0].reg <= 7)
11525 {
11526 if (inst.instruction == T_MNEM_stmia
11527 ? inst.operands[0].writeback
11528 : (inst.operands[0].writeback
11529 == !(inst.operands[1].imm & mask)))
11530 {
11531 if (inst.instruction == T_MNEM_stmia
11532 && (inst.operands[1].imm & mask)
11533 && (inst.operands[1].imm & (mask - 1)))
11534 as_warn (_("value stored for r%d is UNKNOWN"),
11535 inst.operands[0].reg);
11536
11537 inst.instruction = THUMB_OP16 (inst.instruction);
11538 inst.instruction |= inst.operands[0].reg << 8;
11539 inst.instruction |= inst.operands[1].imm;
11540 narrow = TRUE;
11541 }
11542 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11543 {
11544 /* This means 1 register in reg list one of 3 situations:
11545 1. Instruction is stmia, but without writeback.
11546 2. lmdia without writeback, but with Rn not in
11547 reglist.
11548 3. ldmia with writeback, but with Rn in reglist.
11549 Case 3 is UNPREDICTABLE behaviour, so we handle
11550 case 1 and 2 which can be converted into a 16-bit
11551 str or ldr. The SP cases are handled below. */
11552 unsigned long opcode;
11553 /* First, record an error for Case 3. */
11554 if (inst.operands[1].imm & mask
11555 && inst.operands[0].writeback)
11556 inst.error =
11557 _("having the base register in the register list when "
11558 "using write back is UNPREDICTABLE");
11559
11560 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11561 : T_MNEM_ldr);
11562 inst.instruction = THUMB_OP16 (opcode);
11563 inst.instruction |= inst.operands[0].reg << 3;
11564 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11565 narrow = TRUE;
11566 }
11567 }
11568 else if (inst.operands[0] .reg == REG_SP)
11569 {
11570 if (inst.operands[0].writeback)
11571 {
11572 inst.instruction =
11573 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11574 ? T_MNEM_push : T_MNEM_pop);
11575 inst.instruction |= inst.operands[1].imm;
11576 narrow = TRUE;
11577 }
11578 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11579 {
11580 inst.instruction =
11581 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11582 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11583 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11584 narrow = TRUE;
11585 }
11586 }
11587 }
11588
11589 if (!narrow)
11590 {
11591 if (inst.instruction < 0xffff)
11592 inst.instruction = THUMB_OP32 (inst.instruction);
11593
11594 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
11595 inst.operands[0].writeback);
11596 }
11597 }
11598 else
11599 {
11600 constraint (inst.operands[0].reg > 7
11601 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11602 constraint (inst.instruction != T_MNEM_ldmia
11603 && inst.instruction != T_MNEM_stmia,
11604 _("Thumb-2 instruction only valid in unified syntax"));
11605 if (inst.instruction == T_MNEM_stmia)
11606 {
11607 if (!inst.operands[0].writeback)
11608 as_warn (_("this instruction will write back the base register"));
11609 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11610 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11611 as_warn (_("value stored for r%d is UNKNOWN"),
11612 inst.operands[0].reg);
11613 }
11614 else
11615 {
11616 if (!inst.operands[0].writeback
11617 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11618 as_warn (_("this instruction will write back the base register"));
11619 else if (inst.operands[0].writeback
11620 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11621 as_warn (_("this instruction will not write back the base register"));
11622 }
11623
11624 inst.instruction = THUMB_OP16 (inst.instruction);
11625 inst.instruction |= inst.operands[0].reg << 8;
11626 inst.instruction |= inst.operands[1].imm;
11627 }
11628 }
11629
11630 static void
11631 do_t_ldrex (void)
11632 {
11633 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11634 || inst.operands[1].postind || inst.operands[1].writeback
11635 || inst.operands[1].immisreg || inst.operands[1].shifted
11636 || inst.operands[1].negative,
11637 BAD_ADDR_MODE);
11638
11639 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11640
11641 inst.instruction |= inst.operands[0].reg << 12;
11642 inst.instruction |= inst.operands[1].reg << 16;
11643 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11644 }
11645
11646 static void
11647 do_t_ldrexd (void)
11648 {
11649 if (!inst.operands[1].present)
11650 {
11651 constraint (inst.operands[0].reg == REG_LR,
11652 _("r14 not allowed as first register "
11653 "when second register is omitted"));
11654 inst.operands[1].reg = inst.operands[0].reg + 1;
11655 }
11656 constraint (inst.operands[0].reg == inst.operands[1].reg,
11657 BAD_OVERLAP);
11658
11659 inst.instruction |= inst.operands[0].reg << 12;
11660 inst.instruction |= inst.operands[1].reg << 8;
11661 inst.instruction |= inst.operands[2].reg << 16;
11662 }
11663
11664 static void
11665 do_t_ldst (void)
11666 {
11667 unsigned long opcode;
11668 int Rn;
11669
11670 if (inst.operands[0].isreg
11671 && !inst.operands[0].preind
11672 && inst.operands[0].reg == REG_PC)
11673 set_it_insn_type_last ();
11674
11675 opcode = inst.instruction;
11676 if (unified_syntax)
11677 {
11678 if (!inst.operands[1].isreg)
11679 {
11680 if (opcode <= 0xffff)
11681 inst.instruction = THUMB_OP32 (opcode);
11682 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11683 return;
11684 }
11685 if (inst.operands[1].isreg
11686 && !inst.operands[1].writeback
11687 && !inst.operands[1].shifted && !inst.operands[1].postind
11688 && !inst.operands[1].negative && inst.operands[0].reg <= 7
11689 && opcode <= 0xffff
11690 && inst.size_req != 4)
11691 {
11692 /* Insn may have a 16-bit form. */
11693 Rn = inst.operands[1].reg;
11694 if (inst.operands[1].immisreg)
11695 {
11696 inst.instruction = THUMB_OP16 (opcode);
11697 /* [Rn, Rik] */
11698 if (Rn <= 7 && inst.operands[1].imm <= 7)
11699 goto op16;
11700 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11701 reject_bad_reg (inst.operands[1].imm);
11702 }
11703 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11704 && opcode != T_MNEM_ldrsb)
11705 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11706 || (Rn == REG_SP && opcode == T_MNEM_str))
11707 {
11708 /* [Rn, #const] */
11709 if (Rn > 7)
11710 {
11711 if (Rn == REG_PC)
11712 {
11713 if (inst.reloc.pc_rel)
11714 opcode = T_MNEM_ldr_pc2;
11715 else
11716 opcode = T_MNEM_ldr_pc;
11717 }
11718 else
11719 {
11720 if (opcode == T_MNEM_ldr)
11721 opcode = T_MNEM_ldr_sp;
11722 else
11723 opcode = T_MNEM_str_sp;
11724 }
11725 inst.instruction = inst.operands[0].reg << 8;
11726 }
11727 else
11728 {
11729 inst.instruction = inst.operands[0].reg;
11730 inst.instruction |= inst.operands[1].reg << 3;
11731 }
11732 inst.instruction |= THUMB_OP16 (opcode);
11733 if (inst.size_req == 2)
11734 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11735 else
11736 inst.relax = opcode;
11737 return;
11738 }
11739 }
11740 /* Definitely a 32-bit variant. */
11741
11742 /* Warning for Erratum 752419. */
11743 if (opcode == T_MNEM_ldr
11744 && inst.operands[0].reg == REG_SP
11745 && inst.operands[1].writeback == 1
11746 && !inst.operands[1].immisreg)
11747 {
11748 if (no_cpu_selected ()
11749 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11750 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11751 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11752 as_warn (_("This instruction may be unpredictable "
11753 "if executed on M-profile cores "
11754 "with interrupts enabled."));
11755 }
11756
11757 /* Do some validations regarding addressing modes. */
11758 if (inst.operands[1].immisreg)
11759 reject_bad_reg (inst.operands[1].imm);
11760
11761 constraint (inst.operands[1].writeback == 1
11762 && inst.operands[0].reg == inst.operands[1].reg,
11763 BAD_OVERLAP);
11764
11765 inst.instruction = THUMB_OP32 (opcode);
11766 inst.instruction |= inst.operands[0].reg << 12;
11767 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11768 check_ldr_r15_aligned ();
11769 return;
11770 }
11771
11772 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11773
11774 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11775 {
11776 /* Only [Rn,Rm] is acceptable. */
11777 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11778 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11779 || inst.operands[1].postind || inst.operands[1].shifted
11780 || inst.operands[1].negative,
11781 _("Thumb does not support this addressing mode"));
11782 inst.instruction = THUMB_OP16 (inst.instruction);
11783 goto op16;
11784 }
11785
11786 inst.instruction = THUMB_OP16 (inst.instruction);
11787 if (!inst.operands[1].isreg)
11788 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11789 return;
11790
11791 constraint (!inst.operands[1].preind
11792 || inst.operands[1].shifted
11793 || inst.operands[1].writeback,
11794 _("Thumb does not support this addressing mode"));
11795 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11796 {
11797 constraint (inst.instruction & 0x0600,
11798 _("byte or halfword not valid for base register"));
11799 constraint (inst.operands[1].reg == REG_PC
11800 && !(inst.instruction & THUMB_LOAD_BIT),
11801 _("r15 based store not allowed"));
11802 constraint (inst.operands[1].immisreg,
11803 _("invalid base register for register offset"));
11804
11805 if (inst.operands[1].reg == REG_PC)
11806 inst.instruction = T_OPCODE_LDR_PC;
11807 else if (inst.instruction & THUMB_LOAD_BIT)
11808 inst.instruction = T_OPCODE_LDR_SP;
11809 else
11810 inst.instruction = T_OPCODE_STR_SP;
11811
11812 inst.instruction |= inst.operands[0].reg << 8;
11813 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11814 return;
11815 }
11816
11817 constraint (inst.operands[1].reg > 7, BAD_HIREG);
11818 if (!inst.operands[1].immisreg)
11819 {
11820 /* Immediate offset. */
11821 inst.instruction |= inst.operands[0].reg;
11822 inst.instruction |= inst.operands[1].reg << 3;
11823 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11824 return;
11825 }
11826
11827 /* Register offset. */
11828 constraint (inst.operands[1].imm > 7, BAD_HIREG);
11829 constraint (inst.operands[1].negative,
11830 _("Thumb does not support this addressing mode"));
11831
11832 op16:
11833 switch (inst.instruction)
11834 {
11835 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11836 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11837 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11838 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11839 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11840 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11841 case 0x5600 /* ldrsb */:
11842 case 0x5e00 /* ldrsh */: break;
11843 default: abort ();
11844 }
11845
11846 inst.instruction |= inst.operands[0].reg;
11847 inst.instruction |= inst.operands[1].reg << 3;
11848 inst.instruction |= inst.operands[1].imm << 6;
11849 }
11850
11851 static void
11852 do_t_ldstd (void)
11853 {
11854 if (!inst.operands[1].present)
11855 {
11856 inst.operands[1].reg = inst.operands[0].reg + 1;
11857 constraint (inst.operands[0].reg == REG_LR,
11858 _("r14 not allowed here"));
11859 constraint (inst.operands[0].reg == REG_R12,
11860 _("r12 not allowed here"));
11861 }
11862
11863 if (inst.operands[2].writeback
11864 && (inst.operands[0].reg == inst.operands[2].reg
11865 || inst.operands[1].reg == inst.operands[2].reg))
11866 as_warn (_("base register written back, and overlaps "
11867 "one of transfer registers"));
11868
11869 inst.instruction |= inst.operands[0].reg << 12;
11870 inst.instruction |= inst.operands[1].reg << 8;
11871 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11872 }
11873
11874 static void
11875 do_t_ldstt (void)
11876 {
11877 inst.instruction |= inst.operands[0].reg << 12;
11878 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11879 }
11880
11881 static void
11882 do_t_mla (void)
11883 {
11884 unsigned Rd, Rn, Rm, Ra;
11885
11886 Rd = inst.operands[0].reg;
11887 Rn = inst.operands[1].reg;
11888 Rm = inst.operands[2].reg;
11889 Ra = inst.operands[3].reg;
11890
11891 reject_bad_reg (Rd);
11892 reject_bad_reg (Rn);
11893 reject_bad_reg (Rm);
11894 reject_bad_reg (Ra);
11895
11896 inst.instruction |= Rd << 8;
11897 inst.instruction |= Rn << 16;
11898 inst.instruction |= Rm;
11899 inst.instruction |= Ra << 12;
11900 }
11901
11902 static void
11903 do_t_mlal (void)
11904 {
11905 unsigned RdLo, RdHi, Rn, Rm;
11906
11907 RdLo = inst.operands[0].reg;
11908 RdHi = inst.operands[1].reg;
11909 Rn = inst.operands[2].reg;
11910 Rm = inst.operands[3].reg;
11911
11912 reject_bad_reg (RdLo);
11913 reject_bad_reg (RdHi);
11914 reject_bad_reg (Rn);
11915 reject_bad_reg (Rm);
11916
11917 inst.instruction |= RdLo << 12;
11918 inst.instruction |= RdHi << 8;
11919 inst.instruction |= Rn << 16;
11920 inst.instruction |= Rm;
11921 }
11922
11923 static void
11924 do_t_mov_cmp (void)
11925 {
11926 unsigned Rn, Rm;
11927
11928 Rn = inst.operands[0].reg;
11929 Rm = inst.operands[1].reg;
11930
11931 if (Rn == REG_PC)
11932 set_it_insn_type_last ();
11933
11934 if (unified_syntax)
11935 {
11936 int r0off = (inst.instruction == T_MNEM_mov
11937 || inst.instruction == T_MNEM_movs) ? 8 : 16;
11938 unsigned long opcode;
11939 bfd_boolean narrow;
11940 bfd_boolean low_regs;
11941
11942 low_regs = (Rn <= 7 && Rm <= 7);
11943 opcode = inst.instruction;
11944 if (in_it_block ())
11945 narrow = opcode != T_MNEM_movs;
11946 else
11947 narrow = opcode != T_MNEM_movs || low_regs;
11948 if (inst.size_req == 4
11949 || inst.operands[1].shifted)
11950 narrow = FALSE;
11951
11952 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11953 if (opcode == T_MNEM_movs && inst.operands[1].isreg
11954 && !inst.operands[1].shifted
11955 && Rn == REG_PC
11956 && Rm == REG_LR)
11957 {
11958 inst.instruction = T2_SUBS_PC_LR;
11959 return;
11960 }
11961
11962 if (opcode == T_MNEM_cmp)
11963 {
11964 constraint (Rn == REG_PC, BAD_PC);
11965 if (narrow)
11966 {
11967 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11968 but valid. */
11969 warn_deprecated_sp (Rm);
11970 /* R15 was documented as a valid choice for Rm in ARMv6,
11971 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11972 tools reject R15, so we do too. */
11973 constraint (Rm == REG_PC, BAD_PC);
11974 }
11975 else
11976 reject_bad_reg (Rm);
11977 }
11978 else if (opcode == T_MNEM_mov
11979 || opcode == T_MNEM_movs)
11980 {
11981 if (inst.operands[1].isreg)
11982 {
11983 if (opcode == T_MNEM_movs)
11984 {
11985 reject_bad_reg (Rn);
11986 reject_bad_reg (Rm);
11987 }
11988 else if (narrow)
11989 {
11990 /* This is mov.n. */
11991 if ((Rn == REG_SP || Rn == REG_PC)
11992 && (Rm == REG_SP || Rm == REG_PC))
11993 {
11994 as_tsktsk (_("Use of r%u as a source register is "
11995 "deprecated when r%u is the destination "
11996 "register."), Rm, Rn);
11997 }
11998 }
11999 else
12000 {
12001 /* This is mov.w. */
12002 constraint (Rn == REG_PC, BAD_PC);
12003 constraint (Rm == REG_PC, BAD_PC);
12004 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12005 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
12006 }
12007 }
12008 else
12009 reject_bad_reg (Rn);
12010 }
12011
12012 if (!inst.operands[1].isreg)
12013 {
12014 /* Immediate operand. */
12015 if (!in_it_block () && opcode == T_MNEM_mov)
12016 narrow = 0;
12017 if (low_regs && narrow)
12018 {
12019 inst.instruction = THUMB_OP16 (opcode);
12020 inst.instruction |= Rn << 8;
12021 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12022 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
12023 {
12024 if (inst.size_req == 2)
12025 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
12026 else
12027 inst.relax = opcode;
12028 }
12029 }
12030 else
12031 {
12032 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12033 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
12034 THUMB1_RELOC_ONLY);
12035
12036 inst.instruction = THUMB_OP32 (inst.instruction);
12037 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12038 inst.instruction |= Rn << r0off;
12039 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12040 }
12041 }
12042 else if (inst.operands[1].shifted && inst.operands[1].immisreg
12043 && (inst.instruction == T_MNEM_mov
12044 || inst.instruction == T_MNEM_movs))
12045 {
12046 /* Register shifts are encoded as separate shift instructions. */
12047 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
12048
12049 if (in_it_block ())
12050 narrow = !flags;
12051 else
12052 narrow = flags;
12053
12054 if (inst.size_req == 4)
12055 narrow = FALSE;
12056
12057 if (!low_regs || inst.operands[1].imm > 7)
12058 narrow = FALSE;
12059
12060 if (Rn != Rm)
12061 narrow = FALSE;
12062
12063 switch (inst.operands[1].shift_kind)
12064 {
12065 case SHIFT_LSL:
12066 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
12067 break;
12068 case SHIFT_ASR:
12069 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
12070 break;
12071 case SHIFT_LSR:
12072 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
12073 break;
12074 case SHIFT_ROR:
12075 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
12076 break;
12077 default:
12078 abort ();
12079 }
12080
12081 inst.instruction = opcode;
12082 if (narrow)
12083 {
12084 inst.instruction |= Rn;
12085 inst.instruction |= inst.operands[1].imm << 3;
12086 }
12087 else
12088 {
12089 if (flags)
12090 inst.instruction |= CONDS_BIT;
12091
12092 inst.instruction |= Rn << 8;
12093 inst.instruction |= Rm << 16;
12094 inst.instruction |= inst.operands[1].imm;
12095 }
12096 }
12097 else if (!narrow)
12098 {
12099 /* Some mov with immediate shift have narrow variants.
12100 Register shifts are handled above. */
12101 if (low_regs && inst.operands[1].shifted
12102 && (inst.instruction == T_MNEM_mov
12103 || inst.instruction == T_MNEM_movs))
12104 {
12105 if (in_it_block ())
12106 narrow = (inst.instruction == T_MNEM_mov);
12107 else
12108 narrow = (inst.instruction == T_MNEM_movs);
12109 }
12110
12111 if (narrow)
12112 {
12113 switch (inst.operands[1].shift_kind)
12114 {
12115 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12116 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12117 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12118 default: narrow = FALSE; break;
12119 }
12120 }
12121
12122 if (narrow)
12123 {
12124 inst.instruction |= Rn;
12125 inst.instruction |= Rm << 3;
12126 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12127 }
12128 else
12129 {
12130 inst.instruction = THUMB_OP32 (inst.instruction);
12131 inst.instruction |= Rn << r0off;
12132 encode_thumb32_shifted_operand (1);
12133 }
12134 }
12135 else
12136 switch (inst.instruction)
12137 {
12138 case T_MNEM_mov:
12139 /* In v4t or v5t a move of two lowregs produces unpredictable
12140 results. Don't allow this. */
12141 if (low_regs)
12142 {
12143 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
12144 "MOV Rd, Rs with two low registers is not "
12145 "permitted on this architecture");
12146 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12147 arm_ext_v6);
12148 }
12149
12150 inst.instruction = T_OPCODE_MOV_HR;
12151 inst.instruction |= (Rn & 0x8) << 4;
12152 inst.instruction |= (Rn & 0x7);
12153 inst.instruction |= Rm << 3;
12154 break;
12155
12156 case T_MNEM_movs:
12157 /* We know we have low registers at this point.
12158 Generate LSLS Rd, Rs, #0. */
12159 inst.instruction = T_OPCODE_LSL_I;
12160 inst.instruction |= Rn;
12161 inst.instruction |= Rm << 3;
12162 break;
12163
12164 case T_MNEM_cmp:
12165 if (low_regs)
12166 {
12167 inst.instruction = T_OPCODE_CMP_LR;
12168 inst.instruction |= Rn;
12169 inst.instruction |= Rm << 3;
12170 }
12171 else
12172 {
12173 inst.instruction = T_OPCODE_CMP_HR;
12174 inst.instruction |= (Rn & 0x8) << 4;
12175 inst.instruction |= (Rn & 0x7);
12176 inst.instruction |= Rm << 3;
12177 }
12178 break;
12179 }
12180 return;
12181 }
12182
12183 inst.instruction = THUMB_OP16 (inst.instruction);
12184
12185 /* PR 10443: Do not silently ignore shifted operands. */
12186 constraint (inst.operands[1].shifted,
12187 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12188
12189 if (inst.operands[1].isreg)
12190 {
12191 if (Rn < 8 && Rm < 8)
12192 {
12193 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12194 since a MOV instruction produces unpredictable results. */
12195 if (inst.instruction == T_OPCODE_MOV_I8)
12196 inst.instruction = T_OPCODE_ADD_I3;
12197 else
12198 inst.instruction = T_OPCODE_CMP_LR;
12199
12200 inst.instruction |= Rn;
12201 inst.instruction |= Rm << 3;
12202 }
12203 else
12204 {
12205 if (inst.instruction == T_OPCODE_MOV_I8)
12206 inst.instruction = T_OPCODE_MOV_HR;
12207 else
12208 inst.instruction = T_OPCODE_CMP_HR;
12209 do_t_cpy ();
12210 }
12211 }
12212 else
12213 {
12214 constraint (Rn > 7,
12215 _("only lo regs allowed with immediate"));
12216 inst.instruction |= Rn << 8;
12217 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
12218 }
12219 }
12220
12221 static void
12222 do_t_mov16 (void)
12223 {
12224 unsigned Rd;
12225 bfd_vma imm;
12226 bfd_boolean top;
12227
12228 top = (inst.instruction & 0x00800000) != 0;
12229 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
12230 {
12231 constraint (top, _(":lower16: not allowed in this instruction"));
12232 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
12233 }
12234 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
12235 {
12236 constraint (!top, _(":upper16: not allowed in this instruction"));
12237 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
12238 }
12239
12240 Rd = inst.operands[0].reg;
12241 reject_bad_reg (Rd);
12242
12243 inst.instruction |= Rd << 8;
12244 if (inst.reloc.type == BFD_RELOC_UNUSED)
12245 {
12246 imm = inst.reloc.exp.X_add_number;
12247 inst.instruction |= (imm & 0xf000) << 4;
12248 inst.instruction |= (imm & 0x0800) << 15;
12249 inst.instruction |= (imm & 0x0700) << 4;
12250 inst.instruction |= (imm & 0x00ff);
12251 }
12252 }
12253
12254 static void
12255 do_t_mvn_tst (void)
12256 {
12257 unsigned Rn, Rm;
12258
12259 Rn = inst.operands[0].reg;
12260 Rm = inst.operands[1].reg;
12261
12262 if (inst.instruction == T_MNEM_cmp
12263 || inst.instruction == T_MNEM_cmn)
12264 constraint (Rn == REG_PC, BAD_PC);
12265 else
12266 reject_bad_reg (Rn);
12267 reject_bad_reg (Rm);
12268
12269 if (unified_syntax)
12270 {
12271 int r0off = (inst.instruction == T_MNEM_mvn
12272 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
12273 bfd_boolean narrow;
12274
12275 if (inst.size_req == 4
12276 || inst.instruction > 0xffff
12277 || inst.operands[1].shifted
12278 || Rn > 7 || Rm > 7)
12279 narrow = FALSE;
12280 else if (inst.instruction == T_MNEM_cmn
12281 || inst.instruction == T_MNEM_tst)
12282 narrow = TRUE;
12283 else if (THUMB_SETS_FLAGS (inst.instruction))
12284 narrow = !in_it_block ();
12285 else
12286 narrow = in_it_block ();
12287
12288 if (!inst.operands[1].isreg)
12289 {
12290 /* For an immediate, we always generate a 32-bit opcode;
12291 section relaxation will shrink it later if possible. */
12292 if (inst.instruction < 0xffff)
12293 inst.instruction = THUMB_OP32 (inst.instruction);
12294 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12295 inst.instruction |= Rn << r0off;
12296 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12297 }
12298 else
12299 {
12300 /* See if we can do this with a 16-bit instruction. */
12301 if (narrow)
12302 {
12303 inst.instruction = THUMB_OP16 (inst.instruction);
12304 inst.instruction |= Rn;
12305 inst.instruction |= Rm << 3;
12306 }
12307 else
12308 {
12309 constraint (inst.operands[1].shifted
12310 && inst.operands[1].immisreg,
12311 _("shift must be constant"));
12312 if (inst.instruction < 0xffff)
12313 inst.instruction = THUMB_OP32 (inst.instruction);
12314 inst.instruction |= Rn << r0off;
12315 encode_thumb32_shifted_operand (1);
12316 }
12317 }
12318 }
12319 else
12320 {
12321 constraint (inst.instruction > 0xffff
12322 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12323 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12324 _("unshifted register required"));
12325 constraint (Rn > 7 || Rm > 7,
12326 BAD_HIREG);
12327
12328 inst.instruction = THUMB_OP16 (inst.instruction);
12329 inst.instruction |= Rn;
12330 inst.instruction |= Rm << 3;
12331 }
12332 }
12333
12334 static void
12335 do_t_mrs (void)
12336 {
12337 unsigned Rd;
12338
12339 if (do_vfp_nsyn_mrs () == SUCCESS)
12340 return;
12341
12342 Rd = inst.operands[0].reg;
12343 reject_bad_reg (Rd);
12344 inst.instruction |= Rd << 8;
12345
12346 if (inst.operands[1].isreg)
12347 {
12348 unsigned br = inst.operands[1].reg;
12349 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12350 as_bad (_("bad register for mrs"));
12351
12352 inst.instruction |= br & (0xf << 16);
12353 inst.instruction |= (br & 0x300) >> 4;
12354 inst.instruction |= (br & SPSR_BIT) >> 2;
12355 }
12356 else
12357 {
12358 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12359
12360 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12361 {
12362 /* PR gas/12698: The constraint is only applied for m_profile.
12363 If the user has specified -march=all, we want to ignore it as
12364 we are building for any CPU type, including non-m variants. */
12365 bfd_boolean m_profile =
12366 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12367 constraint ((flags != 0) && m_profile, _("selected processor does "
12368 "not support requested special purpose register"));
12369 }
12370 else
12371 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12372 devices). */
12373 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12374 _("'APSR', 'CPSR' or 'SPSR' expected"));
12375
12376 inst.instruction |= (flags & SPSR_BIT) >> 2;
12377 inst.instruction |= inst.operands[1].imm & 0xff;
12378 inst.instruction |= 0xf0000;
12379 }
12380 }
12381
12382 static void
12383 do_t_msr (void)
12384 {
12385 int flags;
12386 unsigned Rn;
12387
12388 if (do_vfp_nsyn_msr () == SUCCESS)
12389 return;
12390
12391 constraint (!inst.operands[1].isreg,
12392 _("Thumb encoding does not support an immediate here"));
12393
12394 if (inst.operands[0].isreg)
12395 flags = (int)(inst.operands[0].reg);
12396 else
12397 flags = inst.operands[0].imm;
12398
12399 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12400 {
12401 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12402
12403 /* PR gas/12698: The constraint is only applied for m_profile.
12404 If the user has specified -march=all, we want to ignore it as
12405 we are building for any CPU type, including non-m variants. */
12406 bfd_boolean m_profile =
12407 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12408 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12409 && (bits & ~(PSR_s | PSR_f)) != 0)
12410 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12411 && bits != PSR_f)) && m_profile,
12412 _("selected processor does not support requested special "
12413 "purpose register"));
12414 }
12415 else
12416 constraint ((flags & 0xff) != 0, _("selected processor does not support "
12417 "requested special purpose register"));
12418
12419 Rn = inst.operands[1].reg;
12420 reject_bad_reg (Rn);
12421
12422 inst.instruction |= (flags & SPSR_BIT) >> 2;
12423 inst.instruction |= (flags & 0xf0000) >> 8;
12424 inst.instruction |= (flags & 0x300) >> 4;
12425 inst.instruction |= (flags & 0xff);
12426 inst.instruction |= Rn << 16;
12427 }
12428
12429 static void
12430 do_t_mul (void)
12431 {
12432 bfd_boolean narrow;
12433 unsigned Rd, Rn, Rm;
12434
12435 if (!inst.operands[2].present)
12436 inst.operands[2].reg = inst.operands[0].reg;
12437
12438 Rd = inst.operands[0].reg;
12439 Rn = inst.operands[1].reg;
12440 Rm = inst.operands[2].reg;
12441
12442 if (unified_syntax)
12443 {
12444 if (inst.size_req == 4
12445 || (Rd != Rn
12446 && Rd != Rm)
12447 || Rn > 7
12448 || Rm > 7)
12449 narrow = FALSE;
12450 else if (inst.instruction == T_MNEM_muls)
12451 narrow = !in_it_block ();
12452 else
12453 narrow = in_it_block ();
12454 }
12455 else
12456 {
12457 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12458 constraint (Rn > 7 || Rm > 7,
12459 BAD_HIREG);
12460 narrow = TRUE;
12461 }
12462
12463 if (narrow)
12464 {
12465 /* 16-bit MULS/Conditional MUL. */
12466 inst.instruction = THUMB_OP16 (inst.instruction);
12467 inst.instruction |= Rd;
12468
12469 if (Rd == Rn)
12470 inst.instruction |= Rm << 3;
12471 else if (Rd == Rm)
12472 inst.instruction |= Rn << 3;
12473 else
12474 constraint (1, _("dest must overlap one source register"));
12475 }
12476 else
12477 {
12478 constraint (inst.instruction != T_MNEM_mul,
12479 _("Thumb-2 MUL must not set flags"));
12480 /* 32-bit MUL. */
12481 inst.instruction = THUMB_OP32 (inst.instruction);
12482 inst.instruction |= Rd << 8;
12483 inst.instruction |= Rn << 16;
12484 inst.instruction |= Rm << 0;
12485
12486 reject_bad_reg (Rd);
12487 reject_bad_reg (Rn);
12488 reject_bad_reg (Rm);
12489 }
12490 }
12491
12492 static void
12493 do_t_mull (void)
12494 {
12495 unsigned RdLo, RdHi, Rn, Rm;
12496
12497 RdLo = inst.operands[0].reg;
12498 RdHi = inst.operands[1].reg;
12499 Rn = inst.operands[2].reg;
12500 Rm = inst.operands[3].reg;
12501
12502 reject_bad_reg (RdLo);
12503 reject_bad_reg (RdHi);
12504 reject_bad_reg (Rn);
12505 reject_bad_reg (Rm);
12506
12507 inst.instruction |= RdLo << 12;
12508 inst.instruction |= RdHi << 8;
12509 inst.instruction |= Rn << 16;
12510 inst.instruction |= Rm;
12511
12512 if (RdLo == RdHi)
12513 as_tsktsk (_("rdhi and rdlo must be different"));
12514 }
12515
12516 static void
12517 do_t_nop (void)
12518 {
12519 set_it_insn_type (NEUTRAL_IT_INSN);
12520
12521 if (unified_syntax)
12522 {
12523 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12524 {
12525 inst.instruction = THUMB_OP32 (inst.instruction);
12526 inst.instruction |= inst.operands[0].imm;
12527 }
12528 else
12529 {
12530 /* PR9722: Check for Thumb2 availability before
12531 generating a thumb2 nop instruction. */
12532 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12533 {
12534 inst.instruction = THUMB_OP16 (inst.instruction);
12535 inst.instruction |= inst.operands[0].imm << 4;
12536 }
12537 else
12538 inst.instruction = 0x46c0;
12539 }
12540 }
12541 else
12542 {
12543 constraint (inst.operands[0].present,
12544 _("Thumb does not support NOP with hints"));
12545 inst.instruction = 0x46c0;
12546 }
12547 }
12548
12549 static void
12550 do_t_neg (void)
12551 {
12552 if (unified_syntax)
12553 {
12554 bfd_boolean narrow;
12555
12556 if (THUMB_SETS_FLAGS (inst.instruction))
12557 narrow = !in_it_block ();
12558 else
12559 narrow = in_it_block ();
12560 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12561 narrow = FALSE;
12562 if (inst.size_req == 4)
12563 narrow = FALSE;
12564
12565 if (!narrow)
12566 {
12567 inst.instruction = THUMB_OP32 (inst.instruction);
12568 inst.instruction |= inst.operands[0].reg << 8;
12569 inst.instruction |= inst.operands[1].reg << 16;
12570 }
12571 else
12572 {
12573 inst.instruction = THUMB_OP16 (inst.instruction);
12574 inst.instruction |= inst.operands[0].reg;
12575 inst.instruction |= inst.operands[1].reg << 3;
12576 }
12577 }
12578 else
12579 {
12580 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12581 BAD_HIREG);
12582 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12583
12584 inst.instruction = THUMB_OP16 (inst.instruction);
12585 inst.instruction |= inst.operands[0].reg;
12586 inst.instruction |= inst.operands[1].reg << 3;
12587 }
12588 }
12589
12590 static void
12591 do_t_orn (void)
12592 {
12593 unsigned Rd, Rn;
12594
12595 Rd = inst.operands[0].reg;
12596 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12597
12598 reject_bad_reg (Rd);
12599 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12600 reject_bad_reg (Rn);
12601
12602 inst.instruction |= Rd << 8;
12603 inst.instruction |= Rn << 16;
12604
12605 if (!inst.operands[2].isreg)
12606 {
12607 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12608 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12609 }
12610 else
12611 {
12612 unsigned Rm;
12613
12614 Rm = inst.operands[2].reg;
12615 reject_bad_reg (Rm);
12616
12617 constraint (inst.operands[2].shifted
12618 && inst.operands[2].immisreg,
12619 _("shift must be constant"));
12620 encode_thumb32_shifted_operand (2);
12621 }
12622 }
12623
12624 static void
12625 do_t_pkhbt (void)
12626 {
12627 unsigned Rd, Rn, Rm;
12628
12629 Rd = inst.operands[0].reg;
12630 Rn = inst.operands[1].reg;
12631 Rm = inst.operands[2].reg;
12632
12633 reject_bad_reg (Rd);
12634 reject_bad_reg (Rn);
12635 reject_bad_reg (Rm);
12636
12637 inst.instruction |= Rd << 8;
12638 inst.instruction |= Rn << 16;
12639 inst.instruction |= Rm;
12640 if (inst.operands[3].present)
12641 {
12642 unsigned int val = inst.reloc.exp.X_add_number;
12643 constraint (inst.reloc.exp.X_op != O_constant,
12644 _("expression too complex"));
12645 inst.instruction |= (val & 0x1c) << 10;
12646 inst.instruction |= (val & 0x03) << 6;
12647 }
12648 }
12649
12650 static void
12651 do_t_pkhtb (void)
12652 {
12653 if (!inst.operands[3].present)
12654 {
12655 unsigned Rtmp;
12656
12657 inst.instruction &= ~0x00000020;
12658
12659 /* PR 10168. Swap the Rm and Rn registers. */
12660 Rtmp = inst.operands[1].reg;
12661 inst.operands[1].reg = inst.operands[2].reg;
12662 inst.operands[2].reg = Rtmp;
12663 }
12664 do_t_pkhbt ();
12665 }
12666
12667 static void
12668 do_t_pld (void)
12669 {
12670 if (inst.operands[0].immisreg)
12671 reject_bad_reg (inst.operands[0].imm);
12672
12673 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12674 }
12675
12676 static void
12677 do_t_push_pop (void)
12678 {
12679 unsigned mask;
12680
12681 constraint (inst.operands[0].writeback,
12682 _("push/pop do not support {reglist}^"));
12683 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
12684 _("expression too complex"));
12685
12686 mask = inst.operands[0].imm;
12687 if (inst.size_req != 4 && (mask & ~0xff) == 0)
12688 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12689 else if (inst.size_req != 4
12690 && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
12691 ? REG_LR : REG_PC)))
12692 {
12693 inst.instruction = THUMB_OP16 (inst.instruction);
12694 inst.instruction |= THUMB_PP_PC_LR;
12695 inst.instruction |= mask & 0xff;
12696 }
12697 else if (unified_syntax)
12698 {
12699 inst.instruction = THUMB_OP32 (inst.instruction);
12700 encode_thumb2_ldmstm (13, mask, TRUE);
12701 }
12702 else
12703 {
12704 inst.error = _("invalid register list to push/pop instruction");
12705 return;
12706 }
12707 }
12708
12709 static void
12710 do_t_rbit (void)
12711 {
12712 unsigned Rd, Rm;
12713
12714 Rd = inst.operands[0].reg;
12715 Rm = inst.operands[1].reg;
12716
12717 reject_bad_reg (Rd);
12718 reject_bad_reg (Rm);
12719
12720 inst.instruction |= Rd << 8;
12721 inst.instruction |= Rm << 16;
12722 inst.instruction |= Rm;
12723 }
12724
12725 static void
12726 do_t_rev (void)
12727 {
12728 unsigned Rd, Rm;
12729
12730 Rd = inst.operands[0].reg;
12731 Rm = inst.operands[1].reg;
12732
12733 reject_bad_reg (Rd);
12734 reject_bad_reg (Rm);
12735
12736 if (Rd <= 7 && Rm <= 7
12737 && inst.size_req != 4)
12738 {
12739 inst.instruction = THUMB_OP16 (inst.instruction);
12740 inst.instruction |= Rd;
12741 inst.instruction |= Rm << 3;
12742 }
12743 else if (unified_syntax)
12744 {
12745 inst.instruction = THUMB_OP32 (inst.instruction);
12746 inst.instruction |= Rd << 8;
12747 inst.instruction |= Rm << 16;
12748 inst.instruction |= Rm;
12749 }
12750 else
12751 inst.error = BAD_HIREG;
12752 }
12753
12754 static void
12755 do_t_rrx (void)
12756 {
12757 unsigned Rd, Rm;
12758
12759 Rd = inst.operands[0].reg;
12760 Rm = inst.operands[1].reg;
12761
12762 reject_bad_reg (Rd);
12763 reject_bad_reg (Rm);
12764
12765 inst.instruction |= Rd << 8;
12766 inst.instruction |= Rm;
12767 }
12768
12769 static void
12770 do_t_rsb (void)
12771 {
12772 unsigned Rd, Rs;
12773
12774 Rd = inst.operands[0].reg;
12775 Rs = (inst.operands[1].present
12776 ? inst.operands[1].reg /* Rd, Rs, foo */
12777 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
12778
12779 reject_bad_reg (Rd);
12780 reject_bad_reg (Rs);
12781 if (inst.operands[2].isreg)
12782 reject_bad_reg (inst.operands[2].reg);
12783
12784 inst.instruction |= Rd << 8;
12785 inst.instruction |= Rs << 16;
12786 if (!inst.operands[2].isreg)
12787 {
12788 bfd_boolean narrow;
12789
12790 if ((inst.instruction & 0x00100000) != 0)
12791 narrow = !in_it_block ();
12792 else
12793 narrow = in_it_block ();
12794
12795 if (Rd > 7 || Rs > 7)
12796 narrow = FALSE;
12797
12798 if (inst.size_req == 4 || !unified_syntax)
12799 narrow = FALSE;
12800
12801 if (inst.reloc.exp.X_op != O_constant
12802 || inst.reloc.exp.X_add_number != 0)
12803 narrow = FALSE;
12804
12805 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12806 relaxation, but it doesn't seem worth the hassle. */
12807 if (narrow)
12808 {
12809 inst.reloc.type = BFD_RELOC_UNUSED;
12810 inst.instruction = THUMB_OP16 (T_MNEM_negs);
12811 inst.instruction |= Rs << 3;
12812 inst.instruction |= Rd;
12813 }
12814 else
12815 {
12816 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12817 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12818 }
12819 }
12820 else
12821 encode_thumb32_shifted_operand (2);
12822 }
12823
12824 static void
12825 do_t_setend (void)
12826 {
12827 if (warn_on_deprecated
12828 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12829 as_tsktsk (_("setend use is deprecated for ARMv8"));
12830
12831 set_it_insn_type (OUTSIDE_IT_INSN);
12832 if (inst.operands[0].imm)
12833 inst.instruction |= 0x8;
12834 }
12835
12836 static void
12837 do_t_shift (void)
12838 {
12839 if (!inst.operands[1].present)
12840 inst.operands[1].reg = inst.operands[0].reg;
12841
12842 if (unified_syntax)
12843 {
12844 bfd_boolean narrow;
12845 int shift_kind;
12846
12847 switch (inst.instruction)
12848 {
12849 case T_MNEM_asr:
12850 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12851 case T_MNEM_lsl:
12852 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12853 case T_MNEM_lsr:
12854 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12855 case T_MNEM_ror:
12856 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12857 default: abort ();
12858 }
12859
12860 if (THUMB_SETS_FLAGS (inst.instruction))
12861 narrow = !in_it_block ();
12862 else
12863 narrow = in_it_block ();
12864 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12865 narrow = FALSE;
12866 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12867 narrow = FALSE;
12868 if (inst.operands[2].isreg
12869 && (inst.operands[1].reg != inst.operands[0].reg
12870 || inst.operands[2].reg > 7))
12871 narrow = FALSE;
12872 if (inst.size_req == 4)
12873 narrow = FALSE;
12874
12875 reject_bad_reg (inst.operands[0].reg);
12876 reject_bad_reg (inst.operands[1].reg);
12877
12878 if (!narrow)
12879 {
12880 if (inst.operands[2].isreg)
12881 {
12882 reject_bad_reg (inst.operands[2].reg);
12883 inst.instruction = THUMB_OP32 (inst.instruction);
12884 inst.instruction |= inst.operands[0].reg << 8;
12885 inst.instruction |= inst.operands[1].reg << 16;
12886 inst.instruction |= inst.operands[2].reg;
12887
12888 /* PR 12854: Error on extraneous shifts. */
12889 constraint (inst.operands[2].shifted,
12890 _("extraneous shift as part of operand to shift insn"));
12891 }
12892 else
12893 {
12894 inst.operands[1].shifted = 1;
12895 inst.operands[1].shift_kind = shift_kind;
12896 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
12897 ? T_MNEM_movs : T_MNEM_mov);
12898 inst.instruction |= inst.operands[0].reg << 8;
12899 encode_thumb32_shifted_operand (1);
12900 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12901 inst.reloc.type = BFD_RELOC_UNUSED;
12902 }
12903 }
12904 else
12905 {
12906 if (inst.operands[2].isreg)
12907 {
12908 switch (shift_kind)
12909 {
12910 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
12911 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
12912 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
12913 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
12914 default: abort ();
12915 }
12916
12917 inst.instruction |= inst.operands[0].reg;
12918 inst.instruction |= inst.operands[2].reg << 3;
12919
12920 /* PR 12854: Error on extraneous shifts. */
12921 constraint (inst.operands[2].shifted,
12922 _("extraneous shift as part of operand to shift insn"));
12923 }
12924 else
12925 {
12926 switch (shift_kind)
12927 {
12928 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12929 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12930 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12931 default: abort ();
12932 }
12933 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12934 inst.instruction |= inst.operands[0].reg;
12935 inst.instruction |= inst.operands[1].reg << 3;
12936 }
12937 }
12938 }
12939 else
12940 {
12941 constraint (inst.operands[0].reg > 7
12942 || inst.operands[1].reg > 7, BAD_HIREG);
12943 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12944
12945 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
12946 {
12947 constraint (inst.operands[2].reg > 7, BAD_HIREG);
12948 constraint (inst.operands[0].reg != inst.operands[1].reg,
12949 _("source1 and dest must be same register"));
12950
12951 switch (inst.instruction)
12952 {
12953 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
12954 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
12955 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
12956 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
12957 default: abort ();
12958 }
12959
12960 inst.instruction |= inst.operands[0].reg;
12961 inst.instruction |= inst.operands[2].reg << 3;
12962
12963 /* PR 12854: Error on extraneous shifts. */
12964 constraint (inst.operands[2].shifted,
12965 _("extraneous shift as part of operand to shift insn"));
12966 }
12967 else
12968 {
12969 switch (inst.instruction)
12970 {
12971 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
12972 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
12973 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
12974 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
12975 default: abort ();
12976 }
12977 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12978 inst.instruction |= inst.operands[0].reg;
12979 inst.instruction |= inst.operands[1].reg << 3;
12980 }
12981 }
12982 }
12983
12984 static void
12985 do_t_simd (void)
12986 {
12987 unsigned Rd, Rn, Rm;
12988
12989 Rd = inst.operands[0].reg;
12990 Rn = inst.operands[1].reg;
12991 Rm = inst.operands[2].reg;
12992
12993 reject_bad_reg (Rd);
12994 reject_bad_reg (Rn);
12995 reject_bad_reg (Rm);
12996
12997 inst.instruction |= Rd << 8;
12998 inst.instruction |= Rn << 16;
12999 inst.instruction |= Rm;
13000 }
13001
13002 static void
13003 do_t_simd2 (void)
13004 {
13005 unsigned Rd, Rn, Rm;
13006
13007 Rd = inst.operands[0].reg;
13008 Rm = inst.operands[1].reg;
13009 Rn = inst.operands[2].reg;
13010
13011 reject_bad_reg (Rd);
13012 reject_bad_reg (Rn);
13013 reject_bad_reg (Rm);
13014
13015 inst.instruction |= Rd << 8;
13016 inst.instruction |= Rn << 16;
13017 inst.instruction |= Rm;
13018 }
13019
13020 static void
13021 do_t_smc (void)
13022 {
13023 unsigned int value = inst.reloc.exp.X_add_number;
13024 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
13025 _("SMC is not permitted on this architecture"));
13026 constraint (inst.reloc.exp.X_op != O_constant,
13027 _("expression too complex"));
13028 inst.reloc.type = BFD_RELOC_UNUSED;
13029 inst.instruction |= (value & 0xf000) >> 12;
13030 inst.instruction |= (value & 0x0ff0);
13031 inst.instruction |= (value & 0x000f) << 16;
13032 /* PR gas/15623: SMC instructions must be last in an IT block. */
13033 set_it_insn_type_last ();
13034 }
13035
13036 static void
13037 do_t_hvc (void)
13038 {
13039 unsigned int value = inst.reloc.exp.X_add_number;
13040
13041 inst.reloc.type = BFD_RELOC_UNUSED;
13042 inst.instruction |= (value & 0x0fff);
13043 inst.instruction |= (value & 0xf000) << 4;
13044 }
13045
13046 static void
13047 do_t_ssat_usat (int bias)
13048 {
13049 unsigned Rd, Rn;
13050
13051 Rd = inst.operands[0].reg;
13052 Rn = inst.operands[2].reg;
13053
13054 reject_bad_reg (Rd);
13055 reject_bad_reg (Rn);
13056
13057 inst.instruction |= Rd << 8;
13058 inst.instruction |= inst.operands[1].imm - bias;
13059 inst.instruction |= Rn << 16;
13060
13061 if (inst.operands[3].present)
13062 {
13063 offsetT shift_amount = inst.reloc.exp.X_add_number;
13064
13065 inst.reloc.type = BFD_RELOC_UNUSED;
13066
13067 constraint (inst.reloc.exp.X_op != O_constant,
13068 _("expression too complex"));
13069
13070 if (shift_amount != 0)
13071 {
13072 constraint (shift_amount > 31,
13073 _("shift expression is too large"));
13074
13075 if (inst.operands[3].shift_kind == SHIFT_ASR)
13076 inst.instruction |= 0x00200000; /* sh bit. */
13077
13078 inst.instruction |= (shift_amount & 0x1c) << 10;
13079 inst.instruction |= (shift_amount & 0x03) << 6;
13080 }
13081 }
13082 }
13083
13084 static void
13085 do_t_ssat (void)
13086 {
13087 do_t_ssat_usat (1);
13088 }
13089
13090 static void
13091 do_t_ssat16 (void)
13092 {
13093 unsigned Rd, Rn;
13094
13095 Rd = inst.operands[0].reg;
13096 Rn = inst.operands[2].reg;
13097
13098 reject_bad_reg (Rd);
13099 reject_bad_reg (Rn);
13100
13101 inst.instruction |= Rd << 8;
13102 inst.instruction |= inst.operands[1].imm - 1;
13103 inst.instruction |= Rn << 16;
13104 }
13105
13106 static void
13107 do_t_strex (void)
13108 {
13109 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
13110 || inst.operands[2].postind || inst.operands[2].writeback
13111 || inst.operands[2].immisreg || inst.operands[2].shifted
13112 || inst.operands[2].negative,
13113 BAD_ADDR_MODE);
13114
13115 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
13116
13117 inst.instruction |= inst.operands[0].reg << 8;
13118 inst.instruction |= inst.operands[1].reg << 12;
13119 inst.instruction |= inst.operands[2].reg << 16;
13120 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
13121 }
13122
13123 static void
13124 do_t_strexd (void)
13125 {
13126 if (!inst.operands[2].present)
13127 inst.operands[2].reg = inst.operands[1].reg + 1;
13128
13129 constraint (inst.operands[0].reg == inst.operands[1].reg
13130 || inst.operands[0].reg == inst.operands[2].reg
13131 || inst.operands[0].reg == inst.operands[3].reg,
13132 BAD_OVERLAP);
13133
13134 inst.instruction |= inst.operands[0].reg;
13135 inst.instruction |= inst.operands[1].reg << 12;
13136 inst.instruction |= inst.operands[2].reg << 8;
13137 inst.instruction |= inst.operands[3].reg << 16;
13138 }
13139
13140 static void
13141 do_t_sxtah (void)
13142 {
13143 unsigned Rd, Rn, Rm;
13144
13145 Rd = inst.operands[0].reg;
13146 Rn = inst.operands[1].reg;
13147 Rm = inst.operands[2].reg;
13148
13149 reject_bad_reg (Rd);
13150 reject_bad_reg (Rn);
13151 reject_bad_reg (Rm);
13152
13153 inst.instruction |= Rd << 8;
13154 inst.instruction |= Rn << 16;
13155 inst.instruction |= Rm;
13156 inst.instruction |= inst.operands[3].imm << 4;
13157 }
13158
13159 static void
13160 do_t_sxth (void)
13161 {
13162 unsigned Rd, Rm;
13163
13164 Rd = inst.operands[0].reg;
13165 Rm = inst.operands[1].reg;
13166
13167 reject_bad_reg (Rd);
13168 reject_bad_reg (Rm);
13169
13170 if (inst.instruction <= 0xffff
13171 && inst.size_req != 4
13172 && Rd <= 7 && Rm <= 7
13173 && (!inst.operands[2].present || inst.operands[2].imm == 0))
13174 {
13175 inst.instruction = THUMB_OP16 (inst.instruction);
13176 inst.instruction |= Rd;
13177 inst.instruction |= Rm << 3;
13178 }
13179 else if (unified_syntax)
13180 {
13181 if (inst.instruction <= 0xffff)
13182 inst.instruction = THUMB_OP32 (inst.instruction);
13183 inst.instruction |= Rd << 8;
13184 inst.instruction |= Rm;
13185 inst.instruction |= inst.operands[2].imm << 4;
13186 }
13187 else
13188 {
13189 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
13190 _("Thumb encoding does not support rotation"));
13191 constraint (1, BAD_HIREG);
13192 }
13193 }
13194
13195 static void
13196 do_t_swi (void)
13197 {
13198 inst.reloc.type = BFD_RELOC_ARM_SWI;
13199 }
13200
13201 static void
13202 do_t_tb (void)
13203 {
13204 unsigned Rn, Rm;
13205 int half;
13206
13207 half = (inst.instruction & 0x10) != 0;
13208 set_it_insn_type_last ();
13209 constraint (inst.operands[0].immisreg,
13210 _("instruction requires register index"));
13211
13212 Rn = inst.operands[0].reg;
13213 Rm = inst.operands[0].imm;
13214
13215 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13216 constraint (Rn == REG_SP, BAD_SP);
13217 reject_bad_reg (Rm);
13218
13219 constraint (!half && inst.operands[0].shifted,
13220 _("instruction does not allow shifted index"));
13221 inst.instruction |= (Rn << 16) | Rm;
13222 }
13223
13224 static void
13225 do_t_udf (void)
13226 {
13227 if (!inst.operands[0].present)
13228 inst.operands[0].imm = 0;
13229
13230 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
13231 {
13232 constraint (inst.size_req == 2,
13233 _("immediate value out of range"));
13234 inst.instruction = THUMB_OP32 (inst.instruction);
13235 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
13236 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
13237 }
13238 else
13239 {
13240 inst.instruction = THUMB_OP16 (inst.instruction);
13241 inst.instruction |= inst.operands[0].imm;
13242 }
13243
13244 set_it_insn_type (NEUTRAL_IT_INSN);
13245 }
13246
13247
13248 static void
13249 do_t_usat (void)
13250 {
13251 do_t_ssat_usat (0);
13252 }
13253
13254 static void
13255 do_t_usat16 (void)
13256 {
13257 unsigned Rd, Rn;
13258
13259 Rd = inst.operands[0].reg;
13260 Rn = inst.operands[2].reg;
13261
13262 reject_bad_reg (Rd);
13263 reject_bad_reg (Rn);
13264
13265 inst.instruction |= Rd << 8;
13266 inst.instruction |= inst.operands[1].imm;
13267 inst.instruction |= Rn << 16;
13268 }
13269
13270 /* Neon instruction encoder helpers. */
13271
13272 /* Encodings for the different types for various Neon opcodes. */
13273
13274 /* An "invalid" code for the following tables. */
13275 #define N_INV -1u
13276
13277 struct neon_tab_entry
13278 {
13279 unsigned integer;
13280 unsigned float_or_poly;
13281 unsigned scalar_or_imm;
13282 };
13283
13284 /* Map overloaded Neon opcodes to their respective encodings. */
13285 #define NEON_ENC_TAB \
13286 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13287 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13288 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13289 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13290 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13291 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13292 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13293 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13294 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13295 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13296 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13297 /* Register variants of the following two instructions are encoded as
13298 vcge / vcgt with the operands reversed. */ \
13299 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13300 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13301 X(vfma, N_INV, 0x0000c10, N_INV), \
13302 X(vfms, N_INV, 0x0200c10, N_INV), \
13303 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13304 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13305 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13306 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13307 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13308 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13309 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13310 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13311 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13312 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13313 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13314 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13315 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13316 X(vshl, 0x0000400, N_INV, 0x0800510), \
13317 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13318 X(vand, 0x0000110, N_INV, 0x0800030), \
13319 X(vbic, 0x0100110, N_INV, 0x0800030), \
13320 X(veor, 0x1000110, N_INV, N_INV), \
13321 X(vorn, 0x0300110, N_INV, 0x0800010), \
13322 X(vorr, 0x0200110, N_INV, 0x0800010), \
13323 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13324 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13325 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13326 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13327 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13328 X(vst1, 0x0000000, 0x0800000, N_INV), \
13329 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13330 X(vst2, 0x0000100, 0x0800100, N_INV), \
13331 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13332 X(vst3, 0x0000200, 0x0800200, N_INV), \
13333 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13334 X(vst4, 0x0000300, 0x0800300, N_INV), \
13335 X(vmovn, 0x1b20200, N_INV, N_INV), \
13336 X(vtrn, 0x1b20080, N_INV, N_INV), \
13337 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13338 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13339 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13340 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13341 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13342 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13343 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13344 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13345 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13346 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13347 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13348 X(vseleq, 0xe000a00, N_INV, N_INV), \
13349 X(vselvs, 0xe100a00, N_INV, N_INV), \
13350 X(vselge, 0xe200a00, N_INV, N_INV), \
13351 X(vselgt, 0xe300a00, N_INV, N_INV), \
13352 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13353 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13354 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13355 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13356 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13357 X(aes, 0x3b00300, N_INV, N_INV), \
13358 X(sha3op, 0x2000c00, N_INV, N_INV), \
13359 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13360 X(sha2op, 0x3ba0380, N_INV, N_INV)
13361
13362 enum neon_opc
13363 {
13364 #define X(OPC,I,F,S) N_MNEM_##OPC
13365 NEON_ENC_TAB
13366 #undef X
13367 };
13368
13369 static const struct neon_tab_entry neon_enc_tab[] =
13370 {
13371 #define X(OPC,I,F,S) { (I), (F), (S) }
13372 NEON_ENC_TAB
13373 #undef X
13374 };
13375
13376 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13377 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13378 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13379 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13380 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13381 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13382 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13383 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13384 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13385 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13386 #define NEON_ENC_SINGLE_(X) \
13387 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13388 #define NEON_ENC_DOUBLE_(X) \
13389 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13390 #define NEON_ENC_FPV8_(X) \
13391 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13392
13393 #define NEON_ENCODE(type, inst) \
13394 do \
13395 { \
13396 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13397 inst.is_neon = 1; \
13398 } \
13399 while (0)
13400
13401 #define check_neon_suffixes \
13402 do \
13403 { \
13404 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13405 { \
13406 as_bad (_("invalid neon suffix for non neon instruction")); \
13407 return; \
13408 } \
13409 } \
13410 while (0)
13411
13412 /* Define shapes for instruction operands. The following mnemonic characters
13413 are used in this table:
13414
13415 F - VFP S<n> register
13416 D - Neon D<n> register
13417 Q - Neon Q<n> register
13418 I - Immediate
13419 S - Scalar
13420 R - ARM register
13421 L - D<n> register list
13422
13423 This table is used to generate various data:
13424 - enumerations of the form NS_DDR to be used as arguments to
13425 neon_select_shape.
13426 - a table classifying shapes into single, double, quad, mixed.
13427 - a table used to drive neon_select_shape. */
13428
13429 #define NEON_SHAPE_DEF \
13430 X(3, (D, D, D), DOUBLE), \
13431 X(3, (Q, Q, Q), QUAD), \
13432 X(3, (D, D, I), DOUBLE), \
13433 X(3, (Q, Q, I), QUAD), \
13434 X(3, (D, D, S), DOUBLE), \
13435 X(3, (Q, Q, S), QUAD), \
13436 X(2, (D, D), DOUBLE), \
13437 X(2, (Q, Q), QUAD), \
13438 X(2, (D, S), DOUBLE), \
13439 X(2, (Q, S), QUAD), \
13440 X(2, (D, R), DOUBLE), \
13441 X(2, (Q, R), QUAD), \
13442 X(2, (D, I), DOUBLE), \
13443 X(2, (Q, I), QUAD), \
13444 X(3, (D, L, D), DOUBLE), \
13445 X(2, (D, Q), MIXED), \
13446 X(2, (Q, D), MIXED), \
13447 X(3, (D, Q, I), MIXED), \
13448 X(3, (Q, D, I), MIXED), \
13449 X(3, (Q, D, D), MIXED), \
13450 X(3, (D, Q, Q), MIXED), \
13451 X(3, (Q, Q, D), MIXED), \
13452 X(3, (Q, D, S), MIXED), \
13453 X(3, (D, Q, S), MIXED), \
13454 X(4, (D, D, D, I), DOUBLE), \
13455 X(4, (Q, Q, Q, I), QUAD), \
13456 X(4, (D, D, S, I), DOUBLE), \
13457 X(4, (Q, Q, S, I), QUAD), \
13458 X(2, (F, F), SINGLE), \
13459 X(3, (F, F, F), SINGLE), \
13460 X(2, (F, I), SINGLE), \
13461 X(2, (F, D), MIXED), \
13462 X(2, (D, F), MIXED), \
13463 X(3, (F, F, I), MIXED), \
13464 X(4, (R, R, F, F), SINGLE), \
13465 X(4, (F, F, R, R), SINGLE), \
13466 X(3, (D, R, R), DOUBLE), \
13467 X(3, (R, R, D), DOUBLE), \
13468 X(2, (S, R), SINGLE), \
13469 X(2, (R, S), SINGLE), \
13470 X(2, (F, R), SINGLE), \
13471 X(2, (R, F), SINGLE), \
13472 /* Half float shape supported so far. */\
13473 X (2, (H, D), MIXED), \
13474 X (2, (D, H), MIXED), \
13475 X (2, (H, F), MIXED), \
13476 X (2, (F, H), MIXED), \
13477 X (2, (H, H), HALF), \
13478 X (2, (H, R), HALF), \
13479 X (2, (R, H), HALF), \
13480 X (2, (H, I), HALF), \
13481 X (3, (H, H, H), HALF), \
13482 X (3, (H, F, I), MIXED), \
13483 X (3, (F, H, I), MIXED), \
13484 X (3, (D, H, H), MIXED), \
13485 X (3, (D, H, S), MIXED)
13486
13487 #define S2(A,B) NS_##A##B
13488 #define S3(A,B,C) NS_##A##B##C
13489 #define S4(A,B,C,D) NS_##A##B##C##D
13490
13491 #define X(N, L, C) S##N L
13492
13493 enum neon_shape
13494 {
13495 NEON_SHAPE_DEF,
13496 NS_NULL
13497 };
13498
13499 #undef X
13500 #undef S2
13501 #undef S3
13502 #undef S4
13503
13504 enum neon_shape_class
13505 {
13506 SC_HALF,
13507 SC_SINGLE,
13508 SC_DOUBLE,
13509 SC_QUAD,
13510 SC_MIXED
13511 };
13512
13513 #define X(N, L, C) SC_##C
13514
13515 static enum neon_shape_class neon_shape_class[] =
13516 {
13517 NEON_SHAPE_DEF
13518 };
13519
13520 #undef X
13521
13522 enum neon_shape_el
13523 {
13524 SE_H,
13525 SE_F,
13526 SE_D,
13527 SE_Q,
13528 SE_I,
13529 SE_S,
13530 SE_R,
13531 SE_L
13532 };
13533
13534 /* Register widths of above. */
13535 static unsigned neon_shape_el_size[] =
13536 {
13537 16,
13538 32,
13539 64,
13540 128,
13541 0,
13542 32,
13543 32,
13544 0
13545 };
13546
13547 struct neon_shape_info
13548 {
13549 unsigned els;
13550 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13551 };
13552
13553 #define S2(A,B) { SE_##A, SE_##B }
13554 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13555 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13556
13557 #define X(N, L, C) { N, S##N L }
13558
13559 static struct neon_shape_info neon_shape_tab[] =
13560 {
13561 NEON_SHAPE_DEF
13562 };
13563
13564 #undef X
13565 #undef S2
13566 #undef S3
13567 #undef S4
13568
13569 /* Bit masks used in type checking given instructions.
13570 'N_EQK' means the type must be the same as (or based on in some way) the key
13571 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13572 set, various other bits can be set as well in order to modify the meaning of
13573 the type constraint. */
13574
13575 enum neon_type_mask
13576 {
13577 N_S8 = 0x0000001,
13578 N_S16 = 0x0000002,
13579 N_S32 = 0x0000004,
13580 N_S64 = 0x0000008,
13581 N_U8 = 0x0000010,
13582 N_U16 = 0x0000020,
13583 N_U32 = 0x0000040,
13584 N_U64 = 0x0000080,
13585 N_I8 = 0x0000100,
13586 N_I16 = 0x0000200,
13587 N_I32 = 0x0000400,
13588 N_I64 = 0x0000800,
13589 N_8 = 0x0001000,
13590 N_16 = 0x0002000,
13591 N_32 = 0x0004000,
13592 N_64 = 0x0008000,
13593 N_P8 = 0x0010000,
13594 N_P16 = 0x0020000,
13595 N_F16 = 0x0040000,
13596 N_F32 = 0x0080000,
13597 N_F64 = 0x0100000,
13598 N_P64 = 0x0200000,
13599 N_KEY = 0x1000000, /* Key element (main type specifier). */
13600 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
13601 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
13602 N_UNT = 0x8000000, /* Must be explicitly untyped. */
13603 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
13604 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
13605 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13606 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13607 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13608 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
13609 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13610 N_UTYP = 0,
13611 N_MAX_NONSPECIAL = N_P64
13612 };
13613
13614 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13615
13616 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13617 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13618 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13619 #define N_S_32 (N_S8 | N_S16 | N_S32)
13620 #define N_F_16_32 (N_F16 | N_F32)
13621 #define N_SUF_32 (N_SU_32 | N_F_16_32)
13622 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13623 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
13624 #define N_F_ALL (N_F16 | N_F32 | N_F64)
13625
13626 /* Pass this as the first type argument to neon_check_type to ignore types
13627 altogether. */
13628 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13629
13630 /* Select a "shape" for the current instruction (describing register types or
13631 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13632 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13633 function of operand parsing, so this function doesn't need to be called.
13634 Shapes should be listed in order of decreasing length. */
13635
13636 static enum neon_shape
13637 neon_select_shape (enum neon_shape shape, ...)
13638 {
13639 va_list ap;
13640 enum neon_shape first_shape = shape;
13641
13642 /* Fix missing optional operands. FIXME: we don't know at this point how
13643 many arguments we should have, so this makes the assumption that we have
13644 > 1. This is true of all current Neon opcodes, I think, but may not be
13645 true in the future. */
13646 if (!inst.operands[1].present)
13647 inst.operands[1] = inst.operands[0];
13648
13649 va_start (ap, shape);
13650
13651 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13652 {
13653 unsigned j;
13654 int matches = 1;
13655
13656 for (j = 0; j < neon_shape_tab[shape].els; j++)
13657 {
13658 if (!inst.operands[j].present)
13659 {
13660 matches = 0;
13661 break;
13662 }
13663
13664 switch (neon_shape_tab[shape].el[j])
13665 {
13666 /* If a .f16, .16, .u16, .s16 type specifier is given over
13667 a VFP single precision register operand, it's essentially
13668 means only half of the register is used.
13669
13670 If the type specifier is given after the mnemonics, the
13671 information is stored in inst.vectype. If the type specifier
13672 is given after register operand, the information is stored
13673 in inst.operands[].vectype.
13674
13675 When there is only one type specifier, and all the register
13676 operands are the same type of hardware register, the type
13677 specifier applies to all register operands.
13678
13679 If no type specifier is given, the shape is inferred from
13680 operand information.
13681
13682 for example:
13683 vadd.f16 s0, s1, s2: NS_HHH
13684 vabs.f16 s0, s1: NS_HH
13685 vmov.f16 s0, r1: NS_HR
13686 vmov.f16 r0, s1: NS_RH
13687 vcvt.f16 r0, s1: NS_RH
13688 vcvt.f16.s32 s2, s2, #29: NS_HFI
13689 vcvt.f16.s32 s2, s2: NS_HF
13690 */
13691 case SE_H:
13692 if (!(inst.operands[j].isreg
13693 && inst.operands[j].isvec
13694 && inst.operands[j].issingle
13695 && !inst.operands[j].isquad
13696 && ((inst.vectype.elems == 1
13697 && inst.vectype.el[0].size == 16)
13698 || (inst.vectype.elems > 1
13699 && inst.vectype.el[j].size == 16)
13700 || (inst.vectype.elems == 0
13701 && inst.operands[j].vectype.type != NT_invtype
13702 && inst.operands[j].vectype.size == 16))))
13703 matches = 0;
13704 break;
13705
13706 case SE_F:
13707 if (!(inst.operands[j].isreg
13708 && inst.operands[j].isvec
13709 && inst.operands[j].issingle
13710 && !inst.operands[j].isquad
13711 && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
13712 || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
13713 || (inst.vectype.elems == 0
13714 && (inst.operands[j].vectype.size == 32
13715 || inst.operands[j].vectype.type == NT_invtype)))))
13716 matches = 0;
13717 break;
13718
13719 case SE_D:
13720 if (!(inst.operands[j].isreg
13721 && inst.operands[j].isvec
13722 && !inst.operands[j].isquad
13723 && !inst.operands[j].issingle))
13724 matches = 0;
13725 break;
13726
13727 case SE_R:
13728 if (!(inst.operands[j].isreg
13729 && !inst.operands[j].isvec))
13730 matches = 0;
13731 break;
13732
13733 case SE_Q:
13734 if (!(inst.operands[j].isreg
13735 && inst.operands[j].isvec
13736 && inst.operands[j].isquad
13737 && !inst.operands[j].issingle))
13738 matches = 0;
13739 break;
13740
13741 case SE_I:
13742 if (!(!inst.operands[j].isreg
13743 && !inst.operands[j].isscalar))
13744 matches = 0;
13745 break;
13746
13747 case SE_S:
13748 if (!(!inst.operands[j].isreg
13749 && inst.operands[j].isscalar))
13750 matches = 0;
13751 break;
13752
13753 case SE_L:
13754 break;
13755 }
13756 if (!matches)
13757 break;
13758 }
13759 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
13760 /* We've matched all the entries in the shape table, and we don't
13761 have any left over operands which have not been matched. */
13762 break;
13763 }
13764
13765 va_end (ap);
13766
13767 if (shape == NS_NULL && first_shape != NS_NULL)
13768 first_error (_("invalid instruction shape"));
13769
13770 return shape;
13771 }
13772
13773 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13774 means the Q bit should be set). */
13775
13776 static int
13777 neon_quad (enum neon_shape shape)
13778 {
13779 return neon_shape_class[shape] == SC_QUAD;
13780 }
13781
13782 static void
13783 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
13784 unsigned *g_size)
13785 {
13786 /* Allow modification to be made to types which are constrained to be
13787 based on the key element, based on bits set alongside N_EQK. */
13788 if ((typebits & N_EQK) != 0)
13789 {
13790 if ((typebits & N_HLF) != 0)
13791 *g_size /= 2;
13792 else if ((typebits & N_DBL) != 0)
13793 *g_size *= 2;
13794 if ((typebits & N_SGN) != 0)
13795 *g_type = NT_signed;
13796 else if ((typebits & N_UNS) != 0)
13797 *g_type = NT_unsigned;
13798 else if ((typebits & N_INT) != 0)
13799 *g_type = NT_integer;
13800 else if ((typebits & N_FLT) != 0)
13801 *g_type = NT_float;
13802 else if ((typebits & N_SIZ) != 0)
13803 *g_type = NT_untyped;
13804 }
13805 }
13806
13807 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13808 operand type, i.e. the single type specified in a Neon instruction when it
13809 is the only one given. */
13810
13811 static struct neon_type_el
13812 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
13813 {
13814 struct neon_type_el dest = *key;
13815
13816 gas_assert ((thisarg & N_EQK) != 0);
13817
13818 neon_modify_type_size (thisarg, &dest.type, &dest.size);
13819
13820 return dest;
13821 }
13822
13823 /* Convert Neon type and size into compact bitmask representation. */
13824
13825 static enum neon_type_mask
13826 type_chk_of_el_type (enum neon_el_type type, unsigned size)
13827 {
13828 switch (type)
13829 {
13830 case NT_untyped:
13831 switch (size)
13832 {
13833 case 8: return N_8;
13834 case 16: return N_16;
13835 case 32: return N_32;
13836 case 64: return N_64;
13837 default: ;
13838 }
13839 break;
13840
13841 case NT_integer:
13842 switch (size)
13843 {
13844 case 8: return N_I8;
13845 case 16: return N_I16;
13846 case 32: return N_I32;
13847 case 64: return N_I64;
13848 default: ;
13849 }
13850 break;
13851
13852 case NT_float:
13853 switch (size)
13854 {
13855 case 16: return N_F16;
13856 case 32: return N_F32;
13857 case 64: return N_F64;
13858 default: ;
13859 }
13860 break;
13861
13862 case NT_poly:
13863 switch (size)
13864 {
13865 case 8: return N_P8;
13866 case 16: return N_P16;
13867 case 64: return N_P64;
13868 default: ;
13869 }
13870 break;
13871
13872 case NT_signed:
13873 switch (size)
13874 {
13875 case 8: return N_S8;
13876 case 16: return N_S16;
13877 case 32: return N_S32;
13878 case 64: return N_S64;
13879 default: ;
13880 }
13881 break;
13882
13883 case NT_unsigned:
13884 switch (size)
13885 {
13886 case 8: return N_U8;
13887 case 16: return N_U16;
13888 case 32: return N_U32;
13889 case 64: return N_U64;
13890 default: ;
13891 }
13892 break;
13893
13894 default: ;
13895 }
13896
13897 return N_UTYP;
13898 }
13899
13900 /* Convert compact Neon bitmask type representation to a type and size. Only
13901 handles the case where a single bit is set in the mask. */
13902
13903 static int
13904 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
13905 enum neon_type_mask mask)
13906 {
13907 if ((mask & N_EQK) != 0)
13908 return FAIL;
13909
13910 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
13911 *size = 8;
13912 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
13913 *size = 16;
13914 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
13915 *size = 32;
13916 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
13917 *size = 64;
13918 else
13919 return FAIL;
13920
13921 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
13922 *type = NT_signed;
13923 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
13924 *type = NT_unsigned;
13925 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
13926 *type = NT_integer;
13927 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
13928 *type = NT_untyped;
13929 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
13930 *type = NT_poly;
13931 else if ((mask & (N_F_ALL)) != 0)
13932 *type = NT_float;
13933 else
13934 return FAIL;
13935
13936 return SUCCESS;
13937 }
13938
13939 /* Modify a bitmask of allowed types. This is only needed for type
13940 relaxation. */
13941
13942 static unsigned
13943 modify_types_allowed (unsigned allowed, unsigned mods)
13944 {
13945 unsigned size;
13946 enum neon_el_type type;
13947 unsigned destmask;
13948 int i;
13949
13950 destmask = 0;
13951
13952 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
13953 {
13954 if (el_type_of_type_chk (&type, &size,
13955 (enum neon_type_mask) (allowed & i)) == SUCCESS)
13956 {
13957 neon_modify_type_size (mods, &type, &size);
13958 destmask |= type_chk_of_el_type (type, size);
13959 }
13960 }
13961
13962 return destmask;
13963 }
13964
13965 /* Check type and return type classification.
13966 The manual states (paraphrase): If one datatype is given, it indicates the
13967 type given in:
13968 - the second operand, if there is one
13969 - the operand, if there is no second operand
13970 - the result, if there are no operands.
13971 This isn't quite good enough though, so we use a concept of a "key" datatype
13972 which is set on a per-instruction basis, which is the one which matters when
13973 only one data type is written.
13974 Note: this function has side-effects (e.g. filling in missing operands). All
13975 Neon instructions should call it before performing bit encoding. */
13976
13977 static struct neon_type_el
13978 neon_check_type (unsigned els, enum neon_shape ns, ...)
13979 {
13980 va_list ap;
13981 unsigned i, pass, key_el = 0;
13982 unsigned types[NEON_MAX_TYPE_ELS];
13983 enum neon_el_type k_type = NT_invtype;
13984 unsigned k_size = -1u;
13985 struct neon_type_el badtype = {NT_invtype, -1};
13986 unsigned key_allowed = 0;
13987
13988 /* Optional registers in Neon instructions are always (not) in operand 1.
13989 Fill in the missing operand here, if it was omitted. */
13990 if (els > 1 && !inst.operands[1].present)
13991 inst.operands[1] = inst.operands[0];
13992
13993 /* Suck up all the varargs. */
13994 va_start (ap, ns);
13995 for (i = 0; i < els; i++)
13996 {
13997 unsigned thisarg = va_arg (ap, unsigned);
13998 if (thisarg == N_IGNORE_TYPE)
13999 {
14000 va_end (ap);
14001 return badtype;
14002 }
14003 types[i] = thisarg;
14004 if ((thisarg & N_KEY) != 0)
14005 key_el = i;
14006 }
14007 va_end (ap);
14008
14009 if (inst.vectype.elems > 0)
14010 for (i = 0; i < els; i++)
14011 if (inst.operands[i].vectype.type != NT_invtype)
14012 {
14013 first_error (_("types specified in both the mnemonic and operands"));
14014 return badtype;
14015 }
14016
14017 /* Duplicate inst.vectype elements here as necessary.
14018 FIXME: No idea if this is exactly the same as the ARM assembler,
14019 particularly when an insn takes one register and one non-register
14020 operand. */
14021 if (inst.vectype.elems == 1 && els > 1)
14022 {
14023 unsigned j;
14024 inst.vectype.elems = els;
14025 inst.vectype.el[key_el] = inst.vectype.el[0];
14026 for (j = 0; j < els; j++)
14027 if (j != key_el)
14028 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
14029 types[j]);
14030 }
14031 else if (inst.vectype.elems == 0 && els > 0)
14032 {
14033 unsigned j;
14034 /* No types were given after the mnemonic, so look for types specified
14035 after each operand. We allow some flexibility here; as long as the
14036 "key" operand has a type, we can infer the others. */
14037 for (j = 0; j < els; j++)
14038 if (inst.operands[j].vectype.type != NT_invtype)
14039 inst.vectype.el[j] = inst.operands[j].vectype;
14040
14041 if (inst.operands[key_el].vectype.type != NT_invtype)
14042 {
14043 for (j = 0; j < els; j++)
14044 if (inst.operands[j].vectype.type == NT_invtype)
14045 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
14046 types[j]);
14047 }
14048 else
14049 {
14050 first_error (_("operand types can't be inferred"));
14051 return badtype;
14052 }
14053 }
14054 else if (inst.vectype.elems != els)
14055 {
14056 first_error (_("type specifier has the wrong number of parts"));
14057 return badtype;
14058 }
14059
14060 for (pass = 0; pass < 2; pass++)
14061 {
14062 for (i = 0; i < els; i++)
14063 {
14064 unsigned thisarg = types[i];
14065 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
14066 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
14067 enum neon_el_type g_type = inst.vectype.el[i].type;
14068 unsigned g_size = inst.vectype.el[i].size;
14069
14070 /* Decay more-specific signed & unsigned types to sign-insensitive
14071 integer types if sign-specific variants are unavailable. */
14072 if ((g_type == NT_signed || g_type == NT_unsigned)
14073 && (types_allowed & N_SU_ALL) == 0)
14074 g_type = NT_integer;
14075
14076 /* If only untyped args are allowed, decay any more specific types to
14077 them. Some instructions only care about signs for some element
14078 sizes, so handle that properly. */
14079 if (((types_allowed & N_UNT) == 0)
14080 && ((g_size == 8 && (types_allowed & N_8) != 0)
14081 || (g_size == 16 && (types_allowed & N_16) != 0)
14082 || (g_size == 32 && (types_allowed & N_32) != 0)
14083 || (g_size == 64 && (types_allowed & N_64) != 0)))
14084 g_type = NT_untyped;
14085
14086 if (pass == 0)
14087 {
14088 if ((thisarg & N_KEY) != 0)
14089 {
14090 k_type = g_type;
14091 k_size = g_size;
14092 key_allowed = thisarg & ~N_KEY;
14093
14094 /* Check architecture constraint on FP16 extension. */
14095 if (k_size == 16
14096 && k_type == NT_float
14097 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14098 {
14099 inst.error = _(BAD_FP16);
14100 return badtype;
14101 }
14102 }
14103 }
14104 else
14105 {
14106 if ((thisarg & N_VFP) != 0)
14107 {
14108 enum neon_shape_el regshape;
14109 unsigned regwidth, match;
14110
14111 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14112 if (ns == NS_NULL)
14113 {
14114 first_error (_("invalid instruction shape"));
14115 return badtype;
14116 }
14117 regshape = neon_shape_tab[ns].el[i];
14118 regwidth = neon_shape_el_size[regshape];
14119
14120 /* In VFP mode, operands must match register widths. If we
14121 have a key operand, use its width, else use the width of
14122 the current operand. */
14123 if (k_size != -1u)
14124 match = k_size;
14125 else
14126 match = g_size;
14127
14128 /* FP16 will use a single precision register. */
14129 if (regwidth == 32 && match == 16)
14130 {
14131 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14132 match = regwidth;
14133 else
14134 {
14135 inst.error = _(BAD_FP16);
14136 return badtype;
14137 }
14138 }
14139
14140 if (regwidth != match)
14141 {
14142 first_error (_("operand size must match register width"));
14143 return badtype;
14144 }
14145 }
14146
14147 if ((thisarg & N_EQK) == 0)
14148 {
14149 unsigned given_type = type_chk_of_el_type (g_type, g_size);
14150
14151 if ((given_type & types_allowed) == 0)
14152 {
14153 first_error (_("bad type in Neon instruction"));
14154 return badtype;
14155 }
14156 }
14157 else
14158 {
14159 enum neon_el_type mod_k_type = k_type;
14160 unsigned mod_k_size = k_size;
14161 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
14162 if (g_type != mod_k_type || g_size != mod_k_size)
14163 {
14164 first_error (_("inconsistent types in Neon instruction"));
14165 return badtype;
14166 }
14167 }
14168 }
14169 }
14170 }
14171
14172 return inst.vectype.el[key_el];
14173 }
14174
14175 /* Neon-style VFP instruction forwarding. */
14176
14177 /* Thumb VFP instructions have 0xE in the condition field. */
14178
14179 static void
14180 do_vfp_cond_or_thumb (void)
14181 {
14182 inst.is_neon = 1;
14183
14184 if (thumb_mode)
14185 inst.instruction |= 0xe0000000;
14186 else
14187 inst.instruction |= inst.cond << 28;
14188 }
14189
14190 /* Look up and encode a simple mnemonic, for use as a helper function for the
14191 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14192 etc. It is assumed that operand parsing has already been done, and that the
14193 operands are in the form expected by the given opcode (this isn't necessarily
14194 the same as the form in which they were parsed, hence some massaging must
14195 take place before this function is called).
14196 Checks current arch version against that in the looked-up opcode. */
14197
14198 static void
14199 do_vfp_nsyn_opcode (const char *opname)
14200 {
14201 const struct asm_opcode *opcode;
14202
14203 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
14204
14205 if (!opcode)
14206 abort ();
14207
14208 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
14209 thumb_mode ? *opcode->tvariant : *opcode->avariant),
14210 _(BAD_FPU));
14211
14212 inst.is_neon = 1;
14213
14214 if (thumb_mode)
14215 {
14216 inst.instruction = opcode->tvalue;
14217 opcode->tencode ();
14218 }
14219 else
14220 {
14221 inst.instruction = (inst.cond << 28) | opcode->avalue;
14222 opcode->aencode ();
14223 }
14224 }
14225
14226 static void
14227 do_vfp_nsyn_add_sub (enum neon_shape rs)
14228 {
14229 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
14230
14231 if (rs == NS_FFF || rs == NS_HHH)
14232 {
14233 if (is_add)
14234 do_vfp_nsyn_opcode ("fadds");
14235 else
14236 do_vfp_nsyn_opcode ("fsubs");
14237
14238 /* ARMv8.2 fp16 instruction. */
14239 if (rs == NS_HHH)
14240 do_scalar_fp16_v82_encode ();
14241 }
14242 else
14243 {
14244 if (is_add)
14245 do_vfp_nsyn_opcode ("faddd");
14246 else
14247 do_vfp_nsyn_opcode ("fsubd");
14248 }
14249 }
14250
14251 /* Check operand types to see if this is a VFP instruction, and if so call
14252 PFN (). */
14253
14254 static int
14255 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
14256 {
14257 enum neon_shape rs;
14258 struct neon_type_el et;
14259
14260 switch (args)
14261 {
14262 case 2:
14263 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14264 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14265 break;
14266
14267 case 3:
14268 rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14269 et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14270 N_F_ALL | N_KEY | N_VFP);
14271 break;
14272
14273 default:
14274 abort ();
14275 }
14276
14277 if (et.type != NT_invtype)
14278 {
14279 pfn (rs);
14280 return SUCCESS;
14281 }
14282
14283 inst.error = NULL;
14284 return FAIL;
14285 }
14286
14287 static void
14288 do_vfp_nsyn_mla_mls (enum neon_shape rs)
14289 {
14290 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
14291
14292 if (rs == NS_FFF || rs == NS_HHH)
14293 {
14294 if (is_mla)
14295 do_vfp_nsyn_opcode ("fmacs");
14296 else
14297 do_vfp_nsyn_opcode ("fnmacs");
14298
14299 /* ARMv8.2 fp16 instruction. */
14300 if (rs == NS_HHH)
14301 do_scalar_fp16_v82_encode ();
14302 }
14303 else
14304 {
14305 if (is_mla)
14306 do_vfp_nsyn_opcode ("fmacd");
14307 else
14308 do_vfp_nsyn_opcode ("fnmacd");
14309 }
14310 }
14311
14312 static void
14313 do_vfp_nsyn_fma_fms (enum neon_shape rs)
14314 {
14315 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
14316
14317 if (rs == NS_FFF || rs == NS_HHH)
14318 {
14319 if (is_fma)
14320 do_vfp_nsyn_opcode ("ffmas");
14321 else
14322 do_vfp_nsyn_opcode ("ffnmas");
14323
14324 /* ARMv8.2 fp16 instruction. */
14325 if (rs == NS_HHH)
14326 do_scalar_fp16_v82_encode ();
14327 }
14328 else
14329 {
14330 if (is_fma)
14331 do_vfp_nsyn_opcode ("ffmad");
14332 else
14333 do_vfp_nsyn_opcode ("ffnmad");
14334 }
14335 }
14336
14337 static void
14338 do_vfp_nsyn_mul (enum neon_shape rs)
14339 {
14340 if (rs == NS_FFF || rs == NS_HHH)
14341 {
14342 do_vfp_nsyn_opcode ("fmuls");
14343
14344 /* ARMv8.2 fp16 instruction. */
14345 if (rs == NS_HHH)
14346 do_scalar_fp16_v82_encode ();
14347 }
14348 else
14349 do_vfp_nsyn_opcode ("fmuld");
14350 }
14351
14352 static void
14353 do_vfp_nsyn_abs_neg (enum neon_shape rs)
14354 {
14355 int is_neg = (inst.instruction & 0x80) != 0;
14356 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
14357
14358 if (rs == NS_FF || rs == NS_HH)
14359 {
14360 if (is_neg)
14361 do_vfp_nsyn_opcode ("fnegs");
14362 else
14363 do_vfp_nsyn_opcode ("fabss");
14364
14365 /* ARMv8.2 fp16 instruction. */
14366 if (rs == NS_HH)
14367 do_scalar_fp16_v82_encode ();
14368 }
14369 else
14370 {
14371 if (is_neg)
14372 do_vfp_nsyn_opcode ("fnegd");
14373 else
14374 do_vfp_nsyn_opcode ("fabsd");
14375 }
14376 }
14377
14378 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14379 insns belong to Neon, and are handled elsewhere. */
14380
14381 static void
14382 do_vfp_nsyn_ldm_stm (int is_dbmode)
14383 {
14384 int is_ldm = (inst.instruction & (1 << 20)) != 0;
14385 if (is_ldm)
14386 {
14387 if (is_dbmode)
14388 do_vfp_nsyn_opcode ("fldmdbs");
14389 else
14390 do_vfp_nsyn_opcode ("fldmias");
14391 }
14392 else
14393 {
14394 if (is_dbmode)
14395 do_vfp_nsyn_opcode ("fstmdbs");
14396 else
14397 do_vfp_nsyn_opcode ("fstmias");
14398 }
14399 }
14400
14401 static void
14402 do_vfp_nsyn_sqrt (void)
14403 {
14404 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14405 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14406
14407 if (rs == NS_FF || rs == NS_HH)
14408 {
14409 do_vfp_nsyn_opcode ("fsqrts");
14410
14411 /* ARMv8.2 fp16 instruction. */
14412 if (rs == NS_HH)
14413 do_scalar_fp16_v82_encode ();
14414 }
14415 else
14416 do_vfp_nsyn_opcode ("fsqrtd");
14417 }
14418
14419 static void
14420 do_vfp_nsyn_div (void)
14421 {
14422 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14423 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14424 N_F_ALL | N_KEY | N_VFP);
14425
14426 if (rs == NS_FFF || rs == NS_HHH)
14427 {
14428 do_vfp_nsyn_opcode ("fdivs");
14429
14430 /* ARMv8.2 fp16 instruction. */
14431 if (rs == NS_HHH)
14432 do_scalar_fp16_v82_encode ();
14433 }
14434 else
14435 do_vfp_nsyn_opcode ("fdivd");
14436 }
14437
14438 static void
14439 do_vfp_nsyn_nmul (void)
14440 {
14441 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14442 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14443 N_F_ALL | N_KEY | N_VFP);
14444
14445 if (rs == NS_FFF || rs == NS_HHH)
14446 {
14447 NEON_ENCODE (SINGLE, inst);
14448 do_vfp_sp_dyadic ();
14449
14450 /* ARMv8.2 fp16 instruction. */
14451 if (rs == NS_HHH)
14452 do_scalar_fp16_v82_encode ();
14453 }
14454 else
14455 {
14456 NEON_ENCODE (DOUBLE, inst);
14457 do_vfp_dp_rd_rn_rm ();
14458 }
14459 do_vfp_cond_or_thumb ();
14460
14461 }
14462
14463 static void
14464 do_vfp_nsyn_cmp (void)
14465 {
14466 enum neon_shape rs;
14467 if (inst.operands[1].isreg)
14468 {
14469 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14470 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14471
14472 if (rs == NS_FF || rs == NS_HH)
14473 {
14474 NEON_ENCODE (SINGLE, inst);
14475 do_vfp_sp_monadic ();
14476 }
14477 else
14478 {
14479 NEON_ENCODE (DOUBLE, inst);
14480 do_vfp_dp_rd_rm ();
14481 }
14482 }
14483 else
14484 {
14485 rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
14486 neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
14487
14488 switch (inst.instruction & 0x0fffffff)
14489 {
14490 case N_MNEM_vcmp:
14491 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
14492 break;
14493 case N_MNEM_vcmpe:
14494 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
14495 break;
14496 default:
14497 abort ();
14498 }
14499
14500 if (rs == NS_FI || rs == NS_HI)
14501 {
14502 NEON_ENCODE (SINGLE, inst);
14503 do_vfp_sp_compare_z ();
14504 }
14505 else
14506 {
14507 NEON_ENCODE (DOUBLE, inst);
14508 do_vfp_dp_rd ();
14509 }
14510 }
14511 do_vfp_cond_or_thumb ();
14512
14513 /* ARMv8.2 fp16 instruction. */
14514 if (rs == NS_HI || rs == NS_HH)
14515 do_scalar_fp16_v82_encode ();
14516 }
14517
14518 static void
14519 nsyn_insert_sp (void)
14520 {
14521 inst.operands[1] = inst.operands[0];
14522 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
14523 inst.operands[0].reg = REG_SP;
14524 inst.operands[0].isreg = 1;
14525 inst.operands[0].writeback = 1;
14526 inst.operands[0].present = 1;
14527 }
14528
14529 static void
14530 do_vfp_nsyn_push (void)
14531 {
14532 nsyn_insert_sp ();
14533
14534 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14535 _("register list must contain at least 1 and at most 16 "
14536 "registers"));
14537
14538 if (inst.operands[1].issingle)
14539 do_vfp_nsyn_opcode ("fstmdbs");
14540 else
14541 do_vfp_nsyn_opcode ("fstmdbd");
14542 }
14543
14544 static void
14545 do_vfp_nsyn_pop (void)
14546 {
14547 nsyn_insert_sp ();
14548
14549 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14550 _("register list must contain at least 1 and at most 16 "
14551 "registers"));
14552
14553 if (inst.operands[1].issingle)
14554 do_vfp_nsyn_opcode ("fldmias");
14555 else
14556 do_vfp_nsyn_opcode ("fldmiad");
14557 }
14558
14559 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14560 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14561
14562 static void
14563 neon_dp_fixup (struct arm_it* insn)
14564 {
14565 unsigned int i = insn->instruction;
14566 insn->is_neon = 1;
14567
14568 if (thumb_mode)
14569 {
14570 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14571 if (i & (1 << 24))
14572 i |= 1 << 28;
14573
14574 i &= ~(1 << 24);
14575
14576 i |= 0xef000000;
14577 }
14578 else
14579 i |= 0xf2000000;
14580
14581 insn->instruction = i;
14582 }
14583
14584 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14585 (0, 1, 2, 3). */
14586
14587 static unsigned
14588 neon_logbits (unsigned x)
14589 {
14590 return ffs (x) - 4;
14591 }
14592
14593 #define LOW4(R) ((R) & 0xf)
14594 #define HI1(R) (((R) >> 4) & 1)
14595
14596 /* Encode insns with bit pattern:
14597
14598 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14599 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14600
14601 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14602 different meaning for some instruction. */
14603
14604 static void
14605 neon_three_same (int isquad, int ubit, int size)
14606 {
14607 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14608 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14609 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14610 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14611 inst.instruction |= LOW4 (inst.operands[2].reg);
14612 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14613 inst.instruction |= (isquad != 0) << 6;
14614 inst.instruction |= (ubit != 0) << 24;
14615 if (size != -1)
14616 inst.instruction |= neon_logbits (size) << 20;
14617
14618 neon_dp_fixup (&inst);
14619 }
14620
14621 /* Encode instructions of the form:
14622
14623 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14624 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14625
14626 Don't write size if SIZE == -1. */
14627
14628 static void
14629 neon_two_same (int qbit, int ubit, int size)
14630 {
14631 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14632 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14633 inst.instruction |= LOW4 (inst.operands[1].reg);
14634 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14635 inst.instruction |= (qbit != 0) << 6;
14636 inst.instruction |= (ubit != 0) << 24;
14637
14638 if (size != -1)
14639 inst.instruction |= neon_logbits (size) << 18;
14640
14641 neon_dp_fixup (&inst);
14642 }
14643
14644 /* Neon instruction encoders, in approximate order of appearance. */
14645
14646 static void
14647 do_neon_dyadic_i_su (void)
14648 {
14649 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14650 struct neon_type_el et = neon_check_type (3, rs,
14651 N_EQK, N_EQK, N_SU_32 | N_KEY);
14652 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14653 }
14654
14655 static void
14656 do_neon_dyadic_i64_su (void)
14657 {
14658 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14659 struct neon_type_el et = neon_check_type (3, rs,
14660 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14661 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14662 }
14663
14664 static void
14665 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14666 unsigned immbits)
14667 {
14668 unsigned size = et.size >> 3;
14669 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14670 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14671 inst.instruction |= LOW4 (inst.operands[1].reg);
14672 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14673 inst.instruction |= (isquad != 0) << 6;
14674 inst.instruction |= immbits << 16;
14675 inst.instruction |= (size >> 3) << 7;
14676 inst.instruction |= (size & 0x7) << 19;
14677 if (write_ubit)
14678 inst.instruction |= (uval != 0) << 24;
14679
14680 neon_dp_fixup (&inst);
14681 }
14682
14683 static void
14684 do_neon_shl_imm (void)
14685 {
14686 if (!inst.operands[2].isreg)
14687 {
14688 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14689 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14690 int imm = inst.operands[2].imm;
14691
14692 constraint (imm < 0 || (unsigned)imm >= et.size,
14693 _("immediate out of range for shift"));
14694 NEON_ENCODE (IMMED, inst);
14695 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14696 }
14697 else
14698 {
14699 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14700 struct neon_type_el et = neon_check_type (3, rs,
14701 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14702 unsigned int tmp;
14703
14704 /* VSHL/VQSHL 3-register variants have syntax such as:
14705 vshl.xx Dd, Dm, Dn
14706 whereas other 3-register operations encoded by neon_three_same have
14707 syntax like:
14708 vadd.xx Dd, Dn, Dm
14709 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14710 here. */
14711 tmp = inst.operands[2].reg;
14712 inst.operands[2].reg = inst.operands[1].reg;
14713 inst.operands[1].reg = tmp;
14714 NEON_ENCODE (INTEGER, inst);
14715 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14716 }
14717 }
14718
14719 static void
14720 do_neon_qshl_imm (void)
14721 {
14722 if (!inst.operands[2].isreg)
14723 {
14724 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14725 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14726 int imm = inst.operands[2].imm;
14727
14728 constraint (imm < 0 || (unsigned)imm >= et.size,
14729 _("immediate out of range for shift"));
14730 NEON_ENCODE (IMMED, inst);
14731 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
14732 }
14733 else
14734 {
14735 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14736 struct neon_type_el et = neon_check_type (3, rs,
14737 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14738 unsigned int tmp;
14739
14740 /* See note in do_neon_shl_imm. */
14741 tmp = inst.operands[2].reg;
14742 inst.operands[2].reg = inst.operands[1].reg;
14743 inst.operands[1].reg = tmp;
14744 NEON_ENCODE (INTEGER, inst);
14745 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14746 }
14747 }
14748
14749 static void
14750 do_neon_rshl (void)
14751 {
14752 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14753 struct neon_type_el et = neon_check_type (3, rs,
14754 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14755 unsigned int tmp;
14756
14757 tmp = inst.operands[2].reg;
14758 inst.operands[2].reg = inst.operands[1].reg;
14759 inst.operands[1].reg = tmp;
14760 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14761 }
14762
14763 static int
14764 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
14765 {
14766 /* Handle .I8 pseudo-instructions. */
14767 if (size == 8)
14768 {
14769 /* Unfortunately, this will make everything apart from zero out-of-range.
14770 FIXME is this the intended semantics? There doesn't seem much point in
14771 accepting .I8 if so. */
14772 immediate |= immediate << 8;
14773 size = 16;
14774 }
14775
14776 if (size >= 32)
14777 {
14778 if (immediate == (immediate & 0x000000ff))
14779 {
14780 *immbits = immediate;
14781 return 0x1;
14782 }
14783 else if (immediate == (immediate & 0x0000ff00))
14784 {
14785 *immbits = immediate >> 8;
14786 return 0x3;
14787 }
14788 else if (immediate == (immediate & 0x00ff0000))
14789 {
14790 *immbits = immediate >> 16;
14791 return 0x5;
14792 }
14793 else if (immediate == (immediate & 0xff000000))
14794 {
14795 *immbits = immediate >> 24;
14796 return 0x7;
14797 }
14798 if ((immediate & 0xffff) != (immediate >> 16))
14799 goto bad_immediate;
14800 immediate &= 0xffff;
14801 }
14802
14803 if (immediate == (immediate & 0x000000ff))
14804 {
14805 *immbits = immediate;
14806 return 0x9;
14807 }
14808 else if (immediate == (immediate & 0x0000ff00))
14809 {
14810 *immbits = immediate >> 8;
14811 return 0xb;
14812 }
14813
14814 bad_immediate:
14815 first_error (_("immediate value out of range"));
14816 return FAIL;
14817 }
14818
14819 static void
14820 do_neon_logic (void)
14821 {
14822 if (inst.operands[2].present && inst.operands[2].isreg)
14823 {
14824 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14825 neon_check_type (3, rs, N_IGNORE_TYPE);
14826 /* U bit and size field were set as part of the bitmask. */
14827 NEON_ENCODE (INTEGER, inst);
14828 neon_three_same (neon_quad (rs), 0, -1);
14829 }
14830 else
14831 {
14832 const int three_ops_form = (inst.operands[2].present
14833 && !inst.operands[2].isreg);
14834 const int immoperand = (three_ops_form ? 2 : 1);
14835 enum neon_shape rs = (three_ops_form
14836 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
14837 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
14838 struct neon_type_el et = neon_check_type (2, rs,
14839 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14840 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
14841 unsigned immbits;
14842 int cmode;
14843
14844 if (et.type == NT_invtype)
14845 return;
14846
14847 if (three_ops_form)
14848 constraint (inst.operands[0].reg != inst.operands[1].reg,
14849 _("first and second operands shall be the same register"));
14850
14851 NEON_ENCODE (IMMED, inst);
14852
14853 immbits = inst.operands[immoperand].imm;
14854 if (et.size == 64)
14855 {
14856 /* .i64 is a pseudo-op, so the immediate must be a repeating
14857 pattern. */
14858 if (immbits != (inst.operands[immoperand].regisimm ?
14859 inst.operands[immoperand].reg : 0))
14860 {
14861 /* Set immbits to an invalid constant. */
14862 immbits = 0xdeadbeef;
14863 }
14864 }
14865
14866 switch (opcode)
14867 {
14868 case N_MNEM_vbic:
14869 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14870 break;
14871
14872 case N_MNEM_vorr:
14873 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14874 break;
14875
14876 case N_MNEM_vand:
14877 /* Pseudo-instruction for VBIC. */
14878 neon_invert_size (&immbits, 0, et.size);
14879 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14880 break;
14881
14882 case N_MNEM_vorn:
14883 /* Pseudo-instruction for VORR. */
14884 neon_invert_size (&immbits, 0, et.size);
14885 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14886 break;
14887
14888 default:
14889 abort ();
14890 }
14891
14892 if (cmode == FAIL)
14893 return;
14894
14895 inst.instruction |= neon_quad (rs) << 6;
14896 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14897 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14898 inst.instruction |= cmode << 8;
14899 neon_write_immbits (immbits);
14900
14901 neon_dp_fixup (&inst);
14902 }
14903 }
14904
14905 static void
14906 do_neon_bitfield (void)
14907 {
14908 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14909 neon_check_type (3, rs, N_IGNORE_TYPE);
14910 neon_three_same (neon_quad (rs), 0, -1);
14911 }
14912
14913 static void
14914 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
14915 unsigned destbits)
14916 {
14917 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14918 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
14919 types | N_KEY);
14920 if (et.type == NT_float)
14921 {
14922 NEON_ENCODE (FLOAT, inst);
14923 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
14924 }
14925 else
14926 {
14927 NEON_ENCODE (INTEGER, inst);
14928 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
14929 }
14930 }
14931
14932 static void
14933 do_neon_dyadic_if_su (void)
14934 {
14935 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14936 }
14937
14938 static void
14939 do_neon_dyadic_if_su_d (void)
14940 {
14941 /* This version only allow D registers, but that constraint is enforced during
14942 operand parsing so we don't need to do anything extra here. */
14943 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14944 }
14945
14946 static void
14947 do_neon_dyadic_if_i_d (void)
14948 {
14949 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14950 affected if we specify unsigned args. */
14951 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14952 }
14953
14954 enum vfp_or_neon_is_neon_bits
14955 {
14956 NEON_CHECK_CC = 1,
14957 NEON_CHECK_ARCH = 2,
14958 NEON_CHECK_ARCH8 = 4
14959 };
14960
14961 /* Call this function if an instruction which may have belonged to the VFP or
14962 Neon instruction sets, but turned out to be a Neon instruction (due to the
14963 operand types involved, etc.). We have to check and/or fix-up a couple of
14964 things:
14965
14966 - Make sure the user hasn't attempted to make a Neon instruction
14967 conditional.
14968 - Alter the value in the condition code field if necessary.
14969 - Make sure that the arch supports Neon instructions.
14970
14971 Which of these operations take place depends on bits from enum
14972 vfp_or_neon_is_neon_bits.
14973
14974 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14975 current instruction's condition is COND_ALWAYS, the condition field is
14976 changed to inst.uncond_value. This is necessary because instructions shared
14977 between VFP and Neon may be conditional for the VFP variants only, and the
14978 unconditional Neon version must have, e.g., 0xF in the condition field. */
14979
14980 static int
14981 vfp_or_neon_is_neon (unsigned check)
14982 {
14983 /* Conditions are always legal in Thumb mode (IT blocks). */
14984 if (!thumb_mode && (check & NEON_CHECK_CC))
14985 {
14986 if (inst.cond != COND_ALWAYS)
14987 {
14988 first_error (_(BAD_COND));
14989 return FAIL;
14990 }
14991 if (inst.uncond_value != -1)
14992 inst.instruction |= inst.uncond_value << 28;
14993 }
14994
14995 if ((check & NEON_CHECK_ARCH)
14996 && !mark_feature_used (&fpu_neon_ext_v1))
14997 {
14998 first_error (_(BAD_FPU));
14999 return FAIL;
15000 }
15001
15002 if ((check & NEON_CHECK_ARCH8)
15003 && !mark_feature_used (&fpu_neon_ext_armv8))
15004 {
15005 first_error (_(BAD_FPU));
15006 return FAIL;
15007 }
15008
15009 return SUCCESS;
15010 }
15011
15012 static void
15013 do_neon_addsub_if_i (void)
15014 {
15015 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
15016 return;
15017
15018 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15019 return;
15020
15021 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15022 affected if we specify unsigned args. */
15023 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
15024 }
15025
15026 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
15027 result to be:
15028 V<op> A,B (A is operand 0, B is operand 2)
15029 to mean:
15030 V<op> A,B,A
15031 not:
15032 V<op> A,B,B
15033 so handle that case specially. */
15034
15035 static void
15036 neon_exchange_operands (void)
15037 {
15038 if (inst.operands[1].present)
15039 {
15040 void *scratch = xmalloc (sizeof (inst.operands[0]));
15041
15042 /* Swap operands[1] and operands[2]. */
15043 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
15044 inst.operands[1] = inst.operands[2];
15045 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
15046 free (scratch);
15047 }
15048 else
15049 {
15050 inst.operands[1] = inst.operands[2];
15051 inst.operands[2] = inst.operands[0];
15052 }
15053 }
15054
15055 static void
15056 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
15057 {
15058 if (inst.operands[2].isreg)
15059 {
15060 if (invert)
15061 neon_exchange_operands ();
15062 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
15063 }
15064 else
15065 {
15066 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15067 struct neon_type_el et = neon_check_type (2, rs,
15068 N_EQK | N_SIZ, immtypes | N_KEY);
15069
15070 NEON_ENCODE (IMMED, inst);
15071 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15072 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15073 inst.instruction |= LOW4 (inst.operands[1].reg);
15074 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15075 inst.instruction |= neon_quad (rs) << 6;
15076 inst.instruction |= (et.type == NT_float) << 10;
15077 inst.instruction |= neon_logbits (et.size) << 18;
15078
15079 neon_dp_fixup (&inst);
15080 }
15081 }
15082
15083 static void
15084 do_neon_cmp (void)
15085 {
15086 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
15087 }
15088
15089 static void
15090 do_neon_cmp_inv (void)
15091 {
15092 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
15093 }
15094
15095 static void
15096 do_neon_ceq (void)
15097 {
15098 neon_compare (N_IF_32, N_IF_32, FALSE);
15099 }
15100
15101 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
15102 scalars, which are encoded in 5 bits, M : Rm.
15103 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
15104 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
15105 index in M.
15106
15107 Dot Product instructions are similar to multiply instructions except elsize
15108 should always be 32.
15109
15110 This function translates SCALAR, which is GAS's internal encoding of indexed
15111 scalar register, to raw encoding. There is also register and index range
15112 check based on ELSIZE. */
15113
15114 static unsigned
15115 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
15116 {
15117 unsigned regno = NEON_SCALAR_REG (scalar);
15118 unsigned elno = NEON_SCALAR_INDEX (scalar);
15119
15120 switch (elsize)
15121 {
15122 case 16:
15123 if (regno > 7 || elno > 3)
15124 goto bad_scalar;
15125 return regno | (elno << 3);
15126
15127 case 32:
15128 if (regno > 15 || elno > 1)
15129 goto bad_scalar;
15130 return regno | (elno << 4);
15131
15132 default:
15133 bad_scalar:
15134 first_error (_("scalar out of range for multiply instruction"));
15135 }
15136
15137 return 0;
15138 }
15139
15140 /* Encode multiply / multiply-accumulate scalar instructions. */
15141
15142 static void
15143 neon_mul_mac (struct neon_type_el et, int ubit)
15144 {
15145 unsigned scalar;
15146
15147 /* Give a more helpful error message if we have an invalid type. */
15148 if (et.type == NT_invtype)
15149 return;
15150
15151 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
15152 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15153 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15154 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15155 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15156 inst.instruction |= LOW4 (scalar);
15157 inst.instruction |= HI1 (scalar) << 5;
15158 inst.instruction |= (et.type == NT_float) << 8;
15159 inst.instruction |= neon_logbits (et.size) << 20;
15160 inst.instruction |= (ubit != 0) << 24;
15161
15162 neon_dp_fixup (&inst);
15163 }
15164
15165 static void
15166 do_neon_mac_maybe_scalar (void)
15167 {
15168 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
15169 return;
15170
15171 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15172 return;
15173
15174 if (inst.operands[2].isscalar)
15175 {
15176 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15177 struct neon_type_el et = neon_check_type (3, rs,
15178 N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
15179 NEON_ENCODE (SCALAR, inst);
15180 neon_mul_mac (et, neon_quad (rs));
15181 }
15182 else
15183 {
15184 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15185 affected if we specify unsigned args. */
15186 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15187 }
15188 }
15189
15190 static void
15191 do_neon_fmac (void)
15192 {
15193 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
15194 return;
15195
15196 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15197 return;
15198
15199 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15200 }
15201
15202 static void
15203 do_neon_tst (void)
15204 {
15205 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15206 struct neon_type_el et = neon_check_type (3, rs,
15207 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
15208 neon_three_same (neon_quad (rs), 0, et.size);
15209 }
15210
15211 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15212 same types as the MAC equivalents. The polynomial type for this instruction
15213 is encoded the same as the integer type. */
15214
15215 static void
15216 do_neon_mul (void)
15217 {
15218 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
15219 return;
15220
15221 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15222 return;
15223
15224 if (inst.operands[2].isscalar)
15225 do_neon_mac_maybe_scalar ();
15226 else
15227 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
15228 }
15229
15230 static void
15231 do_neon_qdmulh (void)
15232 {
15233 if (inst.operands[2].isscalar)
15234 {
15235 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15236 struct neon_type_el et = neon_check_type (3, rs,
15237 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15238 NEON_ENCODE (SCALAR, inst);
15239 neon_mul_mac (et, neon_quad (rs));
15240 }
15241 else
15242 {
15243 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15244 struct neon_type_el et = neon_check_type (3, rs,
15245 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15246 NEON_ENCODE (INTEGER, inst);
15247 /* The U bit (rounding) comes from bit mask. */
15248 neon_three_same (neon_quad (rs), 0, et.size);
15249 }
15250 }
15251
15252 static void
15253 do_neon_qrdmlah (void)
15254 {
15255 /* Check we're on the correct architecture. */
15256 if (!mark_feature_used (&fpu_neon_ext_armv8))
15257 inst.error =
15258 _("instruction form not available on this architecture.");
15259 else if (!mark_feature_used (&fpu_neon_ext_v8_1))
15260 {
15261 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
15262 record_feature_use (&fpu_neon_ext_v8_1);
15263 }
15264
15265 if (inst.operands[2].isscalar)
15266 {
15267 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15268 struct neon_type_el et = neon_check_type (3, rs,
15269 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15270 NEON_ENCODE (SCALAR, inst);
15271 neon_mul_mac (et, neon_quad (rs));
15272 }
15273 else
15274 {
15275 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15276 struct neon_type_el et = neon_check_type (3, rs,
15277 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15278 NEON_ENCODE (INTEGER, inst);
15279 /* The U bit (rounding) comes from bit mask. */
15280 neon_three_same (neon_quad (rs), 0, et.size);
15281 }
15282 }
15283
15284 static void
15285 do_neon_fcmp_absolute (void)
15286 {
15287 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15288 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15289 N_F_16_32 | N_KEY);
15290 /* Size field comes from bit mask. */
15291 neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
15292 }
15293
15294 static void
15295 do_neon_fcmp_absolute_inv (void)
15296 {
15297 neon_exchange_operands ();
15298 do_neon_fcmp_absolute ();
15299 }
15300
15301 static void
15302 do_neon_step (void)
15303 {
15304 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15305 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15306 N_F_16_32 | N_KEY);
15307 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
15308 }
15309
15310 static void
15311 do_neon_abs_neg (void)
15312 {
15313 enum neon_shape rs;
15314 struct neon_type_el et;
15315
15316 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
15317 return;
15318
15319 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15320 return;
15321
15322 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15323 et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
15324
15325 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15326 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15327 inst.instruction |= LOW4 (inst.operands[1].reg);
15328 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15329 inst.instruction |= neon_quad (rs) << 6;
15330 inst.instruction |= (et.type == NT_float) << 10;
15331 inst.instruction |= neon_logbits (et.size) << 18;
15332
15333 neon_dp_fixup (&inst);
15334 }
15335
15336 static void
15337 do_neon_sli (void)
15338 {
15339 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15340 struct neon_type_el et = neon_check_type (2, rs,
15341 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15342 int imm = inst.operands[2].imm;
15343 constraint (imm < 0 || (unsigned)imm >= et.size,
15344 _("immediate out of range for insert"));
15345 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15346 }
15347
15348 static void
15349 do_neon_sri (void)
15350 {
15351 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15352 struct neon_type_el et = neon_check_type (2, rs,
15353 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15354 int imm = inst.operands[2].imm;
15355 constraint (imm < 1 || (unsigned)imm > et.size,
15356 _("immediate out of range for insert"));
15357 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
15358 }
15359
15360 static void
15361 do_neon_qshlu_imm (void)
15362 {
15363 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15364 struct neon_type_el et = neon_check_type (2, rs,
15365 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
15366 int imm = inst.operands[2].imm;
15367 constraint (imm < 0 || (unsigned)imm >= et.size,
15368 _("immediate out of range for shift"));
15369 /* Only encodes the 'U present' variant of the instruction.
15370 In this case, signed types have OP (bit 8) set to 0.
15371 Unsigned types have OP set to 1. */
15372 inst.instruction |= (et.type == NT_unsigned) << 8;
15373 /* The rest of the bits are the same as other immediate shifts. */
15374 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15375 }
15376
15377 static void
15378 do_neon_qmovn (void)
15379 {
15380 struct neon_type_el et = neon_check_type (2, NS_DQ,
15381 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15382 /* Saturating move where operands can be signed or unsigned, and the
15383 destination has the same signedness. */
15384 NEON_ENCODE (INTEGER, inst);
15385 if (et.type == NT_unsigned)
15386 inst.instruction |= 0xc0;
15387 else
15388 inst.instruction |= 0x80;
15389 neon_two_same (0, 1, et.size / 2);
15390 }
15391
15392 static void
15393 do_neon_qmovun (void)
15394 {
15395 struct neon_type_el et = neon_check_type (2, NS_DQ,
15396 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15397 /* Saturating move with unsigned results. Operands must be signed. */
15398 NEON_ENCODE (INTEGER, inst);
15399 neon_two_same (0, 1, et.size / 2);
15400 }
15401
15402 static void
15403 do_neon_rshift_sat_narrow (void)
15404 {
15405 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15406 or unsigned. If operands are unsigned, results must also be unsigned. */
15407 struct neon_type_el et = neon_check_type (2, NS_DQI,
15408 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15409 int imm = inst.operands[2].imm;
15410 /* This gets the bounds check, size encoding and immediate bits calculation
15411 right. */
15412 et.size /= 2;
15413
15414 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15415 VQMOVN.I<size> <Dd>, <Qm>. */
15416 if (imm == 0)
15417 {
15418 inst.operands[2].present = 0;
15419 inst.instruction = N_MNEM_vqmovn;
15420 do_neon_qmovn ();
15421 return;
15422 }
15423
15424 constraint (imm < 1 || (unsigned)imm > et.size,
15425 _("immediate out of range"));
15426 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
15427 }
15428
15429 static void
15430 do_neon_rshift_sat_narrow_u (void)
15431 {
15432 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15433 or unsigned. If operands are unsigned, results must also be unsigned. */
15434 struct neon_type_el et = neon_check_type (2, NS_DQI,
15435 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15436 int imm = inst.operands[2].imm;
15437 /* This gets the bounds check, size encoding and immediate bits calculation
15438 right. */
15439 et.size /= 2;
15440
15441 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15442 VQMOVUN.I<size> <Dd>, <Qm>. */
15443 if (imm == 0)
15444 {
15445 inst.operands[2].present = 0;
15446 inst.instruction = N_MNEM_vqmovun;
15447 do_neon_qmovun ();
15448 return;
15449 }
15450
15451 constraint (imm < 1 || (unsigned)imm > et.size,
15452 _("immediate out of range"));
15453 /* FIXME: The manual is kind of unclear about what value U should have in
15454 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15455 must be 1. */
15456 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
15457 }
15458
15459 static void
15460 do_neon_movn (void)
15461 {
15462 struct neon_type_el et = neon_check_type (2, NS_DQ,
15463 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15464 NEON_ENCODE (INTEGER, inst);
15465 neon_two_same (0, 1, et.size / 2);
15466 }
15467
15468 static void
15469 do_neon_rshift_narrow (void)
15470 {
15471 struct neon_type_el et = neon_check_type (2, NS_DQI,
15472 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15473 int imm = inst.operands[2].imm;
15474 /* This gets the bounds check, size encoding and immediate bits calculation
15475 right. */
15476 et.size /= 2;
15477
15478 /* If immediate is zero then we are a pseudo-instruction for
15479 VMOVN.I<size> <Dd>, <Qm> */
15480 if (imm == 0)
15481 {
15482 inst.operands[2].present = 0;
15483 inst.instruction = N_MNEM_vmovn;
15484 do_neon_movn ();
15485 return;
15486 }
15487
15488 constraint (imm < 1 || (unsigned)imm > et.size,
15489 _("immediate out of range for narrowing operation"));
15490 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
15491 }
15492
15493 static void
15494 do_neon_shll (void)
15495 {
15496 /* FIXME: Type checking when lengthening. */
15497 struct neon_type_el et = neon_check_type (2, NS_QDI,
15498 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
15499 unsigned imm = inst.operands[2].imm;
15500
15501 if (imm == et.size)
15502 {
15503 /* Maximum shift variant. */
15504 NEON_ENCODE (INTEGER, inst);
15505 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15506 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15507 inst.instruction |= LOW4 (inst.operands[1].reg);
15508 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15509 inst.instruction |= neon_logbits (et.size) << 18;
15510
15511 neon_dp_fixup (&inst);
15512 }
15513 else
15514 {
15515 /* A more-specific type check for non-max versions. */
15516 et = neon_check_type (2, NS_QDI,
15517 N_EQK | N_DBL, N_SU_32 | N_KEY);
15518 NEON_ENCODE (IMMED, inst);
15519 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
15520 }
15521 }
15522
15523 /* Check the various types for the VCVT instruction, and return which version
15524 the current instruction is. */
15525
15526 #define CVT_FLAVOUR_VAR \
15527 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15528 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15529 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15530 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15531 /* Half-precision conversions. */ \
15532 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15533 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15534 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
15535 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
15536 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15537 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15538 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
15539 Compared with single/double precision variants, only the co-processor \
15540 field is different, so the encoding flow is reused here. */ \
15541 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
15542 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
15543 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15544 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15545 /* VFP instructions. */ \
15546 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15547 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15548 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15549 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15550 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15551 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15552 /* VFP instructions with bitshift. */ \
15553 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15554 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15555 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15556 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15557 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15558 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15559 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15560 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15561
15562 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15563 neon_cvt_flavour_##C,
15564
15565 /* The different types of conversions we can do. */
15566 enum neon_cvt_flavour
15567 {
15568 CVT_FLAVOUR_VAR
15569 neon_cvt_flavour_invalid,
15570 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
15571 };
15572
15573 #undef CVT_VAR
15574
15575 static enum neon_cvt_flavour
15576 get_neon_cvt_flavour (enum neon_shape rs)
15577 {
15578 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15579 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15580 if (et.type != NT_invtype) \
15581 { \
15582 inst.error = NULL; \
15583 return (neon_cvt_flavour_##C); \
15584 }
15585
15586 struct neon_type_el et;
15587 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
15588 || rs == NS_FF) ? N_VFP : 0;
15589 /* The instruction versions which take an immediate take one register
15590 argument, which is extended to the width of the full register. Thus the
15591 "source" and "destination" registers must have the same width. Hack that
15592 here by making the size equal to the key (wider, in this case) operand. */
15593 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
15594
15595 CVT_FLAVOUR_VAR;
15596
15597 return neon_cvt_flavour_invalid;
15598 #undef CVT_VAR
15599 }
15600
15601 enum neon_cvt_mode
15602 {
15603 neon_cvt_mode_a,
15604 neon_cvt_mode_n,
15605 neon_cvt_mode_p,
15606 neon_cvt_mode_m,
15607 neon_cvt_mode_z,
15608 neon_cvt_mode_x,
15609 neon_cvt_mode_r
15610 };
15611
15612 /* Neon-syntax VFP conversions. */
15613
15614 static void
15615 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
15616 {
15617 const char *opname = 0;
15618
15619 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
15620 || rs == NS_FHI || rs == NS_HFI)
15621 {
15622 /* Conversions with immediate bitshift. */
15623 const char *enc[] =
15624 {
15625 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15626 CVT_FLAVOUR_VAR
15627 NULL
15628 #undef CVT_VAR
15629 };
15630
15631 if (flavour < (int) ARRAY_SIZE (enc))
15632 {
15633 opname = enc[flavour];
15634 constraint (inst.operands[0].reg != inst.operands[1].reg,
15635 _("operands 0 and 1 must be the same register"));
15636 inst.operands[1] = inst.operands[2];
15637 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
15638 }
15639 }
15640 else
15641 {
15642 /* Conversions without bitshift. */
15643 const char *enc[] =
15644 {
15645 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15646 CVT_FLAVOUR_VAR
15647 NULL
15648 #undef CVT_VAR
15649 };
15650
15651 if (flavour < (int) ARRAY_SIZE (enc))
15652 opname = enc[flavour];
15653 }
15654
15655 if (opname)
15656 do_vfp_nsyn_opcode (opname);
15657
15658 /* ARMv8.2 fp16 VCVT instruction. */
15659 if (flavour == neon_cvt_flavour_s32_f16
15660 || flavour == neon_cvt_flavour_u32_f16
15661 || flavour == neon_cvt_flavour_f16_u32
15662 || flavour == neon_cvt_flavour_f16_s32)
15663 do_scalar_fp16_v82_encode ();
15664 }
15665
15666 static void
15667 do_vfp_nsyn_cvtz (void)
15668 {
15669 enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
15670 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15671 const char *enc[] =
15672 {
15673 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15674 CVT_FLAVOUR_VAR
15675 NULL
15676 #undef CVT_VAR
15677 };
15678
15679 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
15680 do_vfp_nsyn_opcode (enc[flavour]);
15681 }
15682
15683 static void
15684 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
15685 enum neon_cvt_mode mode)
15686 {
15687 int sz, op;
15688 int rm;
15689
15690 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15691 D register operands. */
15692 if (flavour == neon_cvt_flavour_s32_f64
15693 || flavour == neon_cvt_flavour_u32_f64)
15694 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15695 _(BAD_FPU));
15696
15697 if (flavour == neon_cvt_flavour_s32_f16
15698 || flavour == neon_cvt_flavour_u32_f16)
15699 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
15700 _(BAD_FP16));
15701
15702 set_it_insn_type (OUTSIDE_IT_INSN);
15703
15704 switch (flavour)
15705 {
15706 case neon_cvt_flavour_s32_f64:
15707 sz = 1;
15708 op = 1;
15709 break;
15710 case neon_cvt_flavour_s32_f32:
15711 sz = 0;
15712 op = 1;
15713 break;
15714 case neon_cvt_flavour_s32_f16:
15715 sz = 0;
15716 op = 1;
15717 break;
15718 case neon_cvt_flavour_u32_f64:
15719 sz = 1;
15720 op = 0;
15721 break;
15722 case neon_cvt_flavour_u32_f32:
15723 sz = 0;
15724 op = 0;
15725 break;
15726 case neon_cvt_flavour_u32_f16:
15727 sz = 0;
15728 op = 0;
15729 break;
15730 default:
15731 first_error (_("invalid instruction shape"));
15732 return;
15733 }
15734
15735 switch (mode)
15736 {
15737 case neon_cvt_mode_a: rm = 0; break;
15738 case neon_cvt_mode_n: rm = 1; break;
15739 case neon_cvt_mode_p: rm = 2; break;
15740 case neon_cvt_mode_m: rm = 3; break;
15741 default: first_error (_("invalid rounding mode")); return;
15742 }
15743
15744 NEON_ENCODE (FPV8, inst);
15745 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
15746 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
15747 inst.instruction |= sz << 8;
15748
15749 /* ARMv8.2 fp16 VCVT instruction. */
15750 if (flavour == neon_cvt_flavour_s32_f16
15751 ||flavour == neon_cvt_flavour_u32_f16)
15752 do_scalar_fp16_v82_encode ();
15753 inst.instruction |= op << 7;
15754 inst.instruction |= rm << 16;
15755 inst.instruction |= 0xf0000000;
15756 inst.is_neon = TRUE;
15757 }
15758
15759 static void
15760 do_neon_cvt_1 (enum neon_cvt_mode mode)
15761 {
15762 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
15763 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
15764 NS_FH, NS_HF, NS_FHI, NS_HFI,
15765 NS_NULL);
15766 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15767
15768 if (flavour == neon_cvt_flavour_invalid)
15769 return;
15770
15771 /* PR11109: Handle round-to-zero for VCVT conversions. */
15772 if (mode == neon_cvt_mode_z
15773 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
15774 && (flavour == neon_cvt_flavour_s16_f16
15775 || flavour == neon_cvt_flavour_u16_f16
15776 || flavour == neon_cvt_flavour_s32_f32
15777 || flavour == neon_cvt_flavour_u32_f32
15778 || flavour == neon_cvt_flavour_s32_f64
15779 || flavour == neon_cvt_flavour_u32_f64)
15780 && (rs == NS_FD || rs == NS_FF))
15781 {
15782 do_vfp_nsyn_cvtz ();
15783 return;
15784 }
15785
15786 /* ARMv8.2 fp16 VCVT conversions. */
15787 if (mode == neon_cvt_mode_z
15788 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
15789 && (flavour == neon_cvt_flavour_s32_f16
15790 || flavour == neon_cvt_flavour_u32_f16)
15791 && (rs == NS_FH))
15792 {
15793 do_vfp_nsyn_cvtz ();
15794 do_scalar_fp16_v82_encode ();
15795 return;
15796 }
15797
15798 /* VFP rather than Neon conversions. */
15799 if (flavour >= neon_cvt_flavour_first_fp)
15800 {
15801 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15802 do_vfp_nsyn_cvt (rs, flavour);
15803 else
15804 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15805
15806 return;
15807 }
15808
15809 switch (rs)
15810 {
15811 case NS_DDI:
15812 case NS_QQI:
15813 {
15814 unsigned immbits;
15815 unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
15816 0x0000100, 0x1000100, 0x0, 0x1000000};
15817
15818 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15819 return;
15820
15821 /* Fixed-point conversion with #0 immediate is encoded as an
15822 integer conversion. */
15823 if (inst.operands[2].present && inst.operands[2].imm == 0)
15824 goto int_encode;
15825 NEON_ENCODE (IMMED, inst);
15826 if (flavour != neon_cvt_flavour_invalid)
15827 inst.instruction |= enctab[flavour];
15828 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15829 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15830 inst.instruction |= LOW4 (inst.operands[1].reg);
15831 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15832 inst.instruction |= neon_quad (rs) << 6;
15833 inst.instruction |= 1 << 21;
15834 if (flavour < neon_cvt_flavour_s16_f16)
15835 {
15836 inst.instruction |= 1 << 21;
15837 immbits = 32 - inst.operands[2].imm;
15838 inst.instruction |= immbits << 16;
15839 }
15840 else
15841 {
15842 inst.instruction |= 3 << 20;
15843 immbits = 16 - inst.operands[2].imm;
15844 inst.instruction |= immbits << 16;
15845 inst.instruction &= ~(1 << 9);
15846 }
15847
15848 neon_dp_fixup (&inst);
15849 }
15850 break;
15851
15852 case NS_DD:
15853 case NS_QQ:
15854 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
15855 {
15856 NEON_ENCODE (FLOAT, inst);
15857 set_it_insn_type (OUTSIDE_IT_INSN);
15858
15859 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
15860 return;
15861
15862 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15863 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15864 inst.instruction |= LOW4 (inst.operands[1].reg);
15865 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15866 inst.instruction |= neon_quad (rs) << 6;
15867 inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
15868 || flavour == neon_cvt_flavour_u32_f32) << 7;
15869 inst.instruction |= mode << 8;
15870 if (flavour == neon_cvt_flavour_u16_f16
15871 || flavour == neon_cvt_flavour_s16_f16)
15872 /* Mask off the original size bits and reencode them. */
15873 inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
15874
15875 if (thumb_mode)
15876 inst.instruction |= 0xfc000000;
15877 else
15878 inst.instruction |= 0xf0000000;
15879 }
15880 else
15881 {
15882 int_encode:
15883 {
15884 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
15885 0x100, 0x180, 0x0, 0x080};
15886
15887 NEON_ENCODE (INTEGER, inst);
15888
15889 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15890 return;
15891
15892 if (flavour != neon_cvt_flavour_invalid)
15893 inst.instruction |= enctab[flavour];
15894
15895 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15896 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15897 inst.instruction |= LOW4 (inst.operands[1].reg);
15898 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15899 inst.instruction |= neon_quad (rs) << 6;
15900 if (flavour >= neon_cvt_flavour_s16_f16
15901 && flavour <= neon_cvt_flavour_f16_u16)
15902 /* Half precision. */
15903 inst.instruction |= 1 << 18;
15904 else
15905 inst.instruction |= 2 << 18;
15906
15907 neon_dp_fixup (&inst);
15908 }
15909 }
15910 break;
15911
15912 /* Half-precision conversions for Advanced SIMD -- neon. */
15913 case NS_QD:
15914 case NS_DQ:
15915
15916 if ((rs == NS_DQ)
15917 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
15918 {
15919 as_bad (_("operand size must match register width"));
15920 break;
15921 }
15922
15923 if ((rs == NS_QD)
15924 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
15925 {
15926 as_bad (_("operand size must match register width"));
15927 break;
15928 }
15929
15930 if (rs == NS_DQ)
15931 inst.instruction = 0x3b60600;
15932 else
15933 inst.instruction = 0x3b60700;
15934
15935 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15936 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15937 inst.instruction |= LOW4 (inst.operands[1].reg);
15938 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15939 neon_dp_fixup (&inst);
15940 break;
15941
15942 default:
15943 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15944 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15945 do_vfp_nsyn_cvt (rs, flavour);
15946 else
15947 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15948 }
15949 }
15950
15951 static void
15952 do_neon_cvtr (void)
15953 {
15954 do_neon_cvt_1 (neon_cvt_mode_x);
15955 }
15956
15957 static void
15958 do_neon_cvt (void)
15959 {
15960 do_neon_cvt_1 (neon_cvt_mode_z);
15961 }
15962
15963 static void
15964 do_neon_cvta (void)
15965 {
15966 do_neon_cvt_1 (neon_cvt_mode_a);
15967 }
15968
15969 static void
15970 do_neon_cvtn (void)
15971 {
15972 do_neon_cvt_1 (neon_cvt_mode_n);
15973 }
15974
15975 static void
15976 do_neon_cvtp (void)
15977 {
15978 do_neon_cvt_1 (neon_cvt_mode_p);
15979 }
15980
15981 static void
15982 do_neon_cvtm (void)
15983 {
15984 do_neon_cvt_1 (neon_cvt_mode_m);
15985 }
15986
15987 static void
15988 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
15989 {
15990 if (is_double)
15991 mark_feature_used (&fpu_vfp_ext_armv8);
15992
15993 encode_arm_vfp_reg (inst.operands[0].reg,
15994 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
15995 encode_arm_vfp_reg (inst.operands[1].reg,
15996 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
15997 inst.instruction |= to ? 0x10000 : 0;
15998 inst.instruction |= t ? 0x80 : 0;
15999 inst.instruction |= is_double ? 0x100 : 0;
16000 do_vfp_cond_or_thumb ();
16001 }
16002
16003 static void
16004 do_neon_cvttb_1 (bfd_boolean t)
16005 {
16006 enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
16007 NS_DF, NS_DH, NS_NULL);
16008
16009 if (rs == NS_NULL)
16010 return;
16011 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
16012 {
16013 inst.error = NULL;
16014 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
16015 }
16016 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
16017 {
16018 inst.error = NULL;
16019 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
16020 }
16021 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
16022 {
16023 /* The VCVTB and VCVTT instructions with D-register operands
16024 don't work for SP only targets. */
16025 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16026 _(BAD_FPU));
16027
16028 inst.error = NULL;
16029 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
16030 }
16031 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
16032 {
16033 /* The VCVTB and VCVTT instructions with D-register operands
16034 don't work for SP only targets. */
16035 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16036 _(BAD_FPU));
16037
16038 inst.error = NULL;
16039 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
16040 }
16041 else
16042 return;
16043 }
16044
16045 static void
16046 do_neon_cvtb (void)
16047 {
16048 do_neon_cvttb_1 (FALSE);
16049 }
16050
16051
16052 static void
16053 do_neon_cvtt (void)
16054 {
16055 do_neon_cvttb_1 (TRUE);
16056 }
16057
16058 static void
16059 neon_move_immediate (void)
16060 {
16061 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
16062 struct neon_type_el et = neon_check_type (2, rs,
16063 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
16064 unsigned immlo, immhi = 0, immbits;
16065 int op, cmode, float_p;
16066
16067 constraint (et.type == NT_invtype,
16068 _("operand size must be specified for immediate VMOV"));
16069
16070 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
16071 op = (inst.instruction & (1 << 5)) != 0;
16072
16073 immlo = inst.operands[1].imm;
16074 if (inst.operands[1].regisimm)
16075 immhi = inst.operands[1].reg;
16076
16077 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
16078 _("immediate has bits set outside the operand size"));
16079
16080 float_p = inst.operands[1].immisfloat;
16081
16082 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
16083 et.size, et.type)) == FAIL)
16084 {
16085 /* Invert relevant bits only. */
16086 neon_invert_size (&immlo, &immhi, et.size);
16087 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
16088 with one or the other; those cases are caught by
16089 neon_cmode_for_move_imm. */
16090 op = !op;
16091 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
16092 &op, et.size, et.type)) == FAIL)
16093 {
16094 first_error (_("immediate out of range"));
16095 return;
16096 }
16097 }
16098
16099 inst.instruction &= ~(1 << 5);
16100 inst.instruction |= op << 5;
16101
16102 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16103 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16104 inst.instruction |= neon_quad (rs) << 6;
16105 inst.instruction |= cmode << 8;
16106
16107 neon_write_immbits (immbits);
16108 }
16109
16110 static void
16111 do_neon_mvn (void)
16112 {
16113 if (inst.operands[1].isreg)
16114 {
16115 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16116
16117 NEON_ENCODE (INTEGER, inst);
16118 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16119 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16120 inst.instruction |= LOW4 (inst.operands[1].reg);
16121 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16122 inst.instruction |= neon_quad (rs) << 6;
16123 }
16124 else
16125 {
16126 NEON_ENCODE (IMMED, inst);
16127 neon_move_immediate ();
16128 }
16129
16130 neon_dp_fixup (&inst);
16131 }
16132
16133 /* Encode instructions of form:
16134
16135 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16136 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
16137
16138 static void
16139 neon_mixed_length (struct neon_type_el et, unsigned size)
16140 {
16141 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16142 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16143 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16144 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16145 inst.instruction |= LOW4 (inst.operands[2].reg);
16146 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16147 inst.instruction |= (et.type == NT_unsigned) << 24;
16148 inst.instruction |= neon_logbits (size) << 20;
16149
16150 neon_dp_fixup (&inst);
16151 }
16152
16153 static void
16154 do_neon_dyadic_long (void)
16155 {
16156 /* FIXME: Type checking for lengthening op. */
16157 struct neon_type_el et = neon_check_type (3, NS_QDD,
16158 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
16159 neon_mixed_length (et, et.size);
16160 }
16161
16162 static void
16163 do_neon_abal (void)
16164 {
16165 struct neon_type_el et = neon_check_type (3, NS_QDD,
16166 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
16167 neon_mixed_length (et, et.size);
16168 }
16169
16170 static void
16171 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
16172 {
16173 if (inst.operands[2].isscalar)
16174 {
16175 struct neon_type_el et = neon_check_type (3, NS_QDS,
16176 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
16177 NEON_ENCODE (SCALAR, inst);
16178 neon_mul_mac (et, et.type == NT_unsigned);
16179 }
16180 else
16181 {
16182 struct neon_type_el et = neon_check_type (3, NS_QDD,
16183 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
16184 NEON_ENCODE (INTEGER, inst);
16185 neon_mixed_length (et, et.size);
16186 }
16187 }
16188
16189 static void
16190 do_neon_mac_maybe_scalar_long (void)
16191 {
16192 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
16193 }
16194
16195 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
16196 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
16197
16198 static unsigned
16199 neon_scalar_for_fmac_fp16_long (unsigned scalar, unsigned quad_p)
16200 {
16201 unsigned regno = NEON_SCALAR_REG (scalar);
16202 unsigned elno = NEON_SCALAR_INDEX (scalar);
16203
16204 if (quad_p)
16205 {
16206 if (regno > 7 || elno > 3)
16207 goto bad_scalar;
16208
16209 return ((regno & 0x7)
16210 | ((elno & 0x1) << 3)
16211 | (((elno >> 1) & 0x1) << 5));
16212 }
16213 else
16214 {
16215 if (regno > 15 || elno > 1)
16216 goto bad_scalar;
16217
16218 return (((regno & 0x1) << 5)
16219 | ((regno >> 1) & 0x7)
16220 | ((elno & 0x1) << 3));
16221 }
16222
16223 bad_scalar:
16224 first_error (_("scalar out of range for multiply instruction"));
16225 return 0;
16226 }
16227
16228 static void
16229 do_neon_fmac_maybe_scalar_long (int subtype)
16230 {
16231 enum neon_shape rs;
16232 int high8;
16233 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
16234 field (bits[21:20]) has different meaning. For scalar index variant, it's
16235 used to differentiate add and subtract, otherwise it's with fixed value
16236 0x2. */
16237 int size = -1;
16238
16239 if (inst.cond != COND_ALWAYS)
16240 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
16241 "behaviour is UNPREDICTABLE"));
16242
16243 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16_fml),
16244 _(BAD_FP16));
16245
16246 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
16247 _(BAD_FPU));
16248
16249 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
16250 be a scalar index register. */
16251 if (inst.operands[2].isscalar)
16252 {
16253 high8 = 0xfe000000;
16254 if (subtype)
16255 size = 16;
16256 rs = neon_select_shape (NS_DHS, NS_QDS, NS_NULL);
16257 }
16258 else
16259 {
16260 high8 = 0xfc000000;
16261 size = 32;
16262 if (subtype)
16263 inst.instruction |= (0x1 << 23);
16264 rs = neon_select_shape (NS_DHH, NS_QDD, NS_NULL);
16265 }
16266
16267 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16);
16268
16269 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
16270 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
16271 so we simply pass -1 as size. */
16272 unsigned quad_p = (rs == NS_QDD || rs == NS_QDS);
16273 neon_three_same (quad_p, 0, size);
16274
16275 /* Undo neon_dp_fixup. Redo the high eight bits. */
16276 inst.instruction &= 0x00ffffff;
16277 inst.instruction |= high8;
16278
16279 #define LOW1(R) ((R) & 0x1)
16280 #define HI4(R) (((R) >> 1) & 0xf)
16281 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
16282 whether the instruction is in Q form and whether Vm is a scalar indexed
16283 operand. */
16284 if (inst.operands[2].isscalar)
16285 {
16286 unsigned rm
16287 = neon_scalar_for_fmac_fp16_long (inst.operands[2].reg, quad_p);
16288 inst.instruction &= 0xffffffd0;
16289 inst.instruction |= rm;
16290
16291 if (!quad_p)
16292 {
16293 /* Redo Rn as well. */
16294 inst.instruction &= 0xfff0ff7f;
16295 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
16296 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
16297 }
16298 }
16299 else if (!quad_p)
16300 {
16301 /* Redo Rn and Rm. */
16302 inst.instruction &= 0xfff0ff50;
16303 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
16304 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
16305 inst.instruction |= HI4 (inst.operands[2].reg);
16306 inst.instruction |= LOW1 (inst.operands[2].reg) << 5;
16307 }
16308 }
16309
16310 static void
16311 do_neon_vfmal (void)
16312 {
16313 return do_neon_fmac_maybe_scalar_long (0);
16314 }
16315
16316 static void
16317 do_neon_vfmsl (void)
16318 {
16319 return do_neon_fmac_maybe_scalar_long (1);
16320 }
16321
16322 static void
16323 do_neon_dyadic_wide (void)
16324 {
16325 struct neon_type_el et = neon_check_type (3, NS_QQD,
16326 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
16327 neon_mixed_length (et, et.size);
16328 }
16329
16330 static void
16331 do_neon_dyadic_narrow (void)
16332 {
16333 struct neon_type_el et = neon_check_type (3, NS_QDD,
16334 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
16335 /* Operand sign is unimportant, and the U bit is part of the opcode,
16336 so force the operand type to integer. */
16337 et.type = NT_integer;
16338 neon_mixed_length (et, et.size / 2);
16339 }
16340
16341 static void
16342 do_neon_mul_sat_scalar_long (void)
16343 {
16344 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
16345 }
16346
16347 static void
16348 do_neon_vmull (void)
16349 {
16350 if (inst.operands[2].isscalar)
16351 do_neon_mac_maybe_scalar_long ();
16352 else
16353 {
16354 struct neon_type_el et = neon_check_type (3, NS_QDD,
16355 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
16356
16357 if (et.type == NT_poly)
16358 NEON_ENCODE (POLY, inst);
16359 else
16360 NEON_ENCODE (INTEGER, inst);
16361
16362 /* For polynomial encoding the U bit must be zero, and the size must
16363 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
16364 obviously, as 0b10). */
16365 if (et.size == 64)
16366 {
16367 /* Check we're on the correct architecture. */
16368 if (!mark_feature_used (&fpu_crypto_ext_armv8))
16369 inst.error =
16370 _("Instruction form not available on this architecture.");
16371
16372 et.size = 32;
16373 }
16374
16375 neon_mixed_length (et, et.size);
16376 }
16377 }
16378
16379 static void
16380 do_neon_ext (void)
16381 {
16382 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
16383 struct neon_type_el et = neon_check_type (3, rs,
16384 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16385 unsigned imm = (inst.operands[3].imm * et.size) / 8;
16386
16387 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
16388 _("shift out of range"));
16389 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16390 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16391 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16392 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16393 inst.instruction |= LOW4 (inst.operands[2].reg);
16394 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16395 inst.instruction |= neon_quad (rs) << 6;
16396 inst.instruction |= imm << 8;
16397
16398 neon_dp_fixup (&inst);
16399 }
16400
16401 static void
16402 do_neon_rev (void)
16403 {
16404 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16405 struct neon_type_el et = neon_check_type (2, rs,
16406 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16407 unsigned op = (inst.instruction >> 7) & 3;
16408 /* N (width of reversed regions) is encoded as part of the bitmask. We
16409 extract it here to check the elements to be reversed are smaller.
16410 Otherwise we'd get a reserved instruction. */
16411 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
16412 gas_assert (elsize != 0);
16413 constraint (et.size >= elsize,
16414 _("elements must be smaller than reversal region"));
16415 neon_two_same (neon_quad (rs), 1, et.size);
16416 }
16417
16418 static void
16419 do_neon_dup (void)
16420 {
16421 if (inst.operands[1].isscalar)
16422 {
16423 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
16424 struct neon_type_el et = neon_check_type (2, rs,
16425 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16426 unsigned sizebits = et.size >> 3;
16427 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
16428 int logsize = neon_logbits (et.size);
16429 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
16430
16431 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
16432 return;
16433
16434 NEON_ENCODE (SCALAR, inst);
16435 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16436 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16437 inst.instruction |= LOW4 (dm);
16438 inst.instruction |= HI1 (dm) << 5;
16439 inst.instruction |= neon_quad (rs) << 6;
16440 inst.instruction |= x << 17;
16441 inst.instruction |= sizebits << 16;
16442
16443 neon_dp_fixup (&inst);
16444 }
16445 else
16446 {
16447 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
16448 struct neon_type_el et = neon_check_type (2, rs,
16449 N_8 | N_16 | N_32 | N_KEY, N_EQK);
16450 /* Duplicate ARM register to lanes of vector. */
16451 NEON_ENCODE (ARMREG, inst);
16452 switch (et.size)
16453 {
16454 case 8: inst.instruction |= 0x400000; break;
16455 case 16: inst.instruction |= 0x000020; break;
16456 case 32: inst.instruction |= 0x000000; break;
16457 default: break;
16458 }
16459 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16460 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
16461 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
16462 inst.instruction |= neon_quad (rs) << 21;
16463 /* The encoding for this instruction is identical for the ARM and Thumb
16464 variants, except for the condition field. */
16465 do_vfp_cond_or_thumb ();
16466 }
16467 }
16468
16469 /* VMOV has particularly many variations. It can be one of:
16470 0. VMOV<c><q> <Qd>, <Qm>
16471 1. VMOV<c><q> <Dd>, <Dm>
16472 (Register operations, which are VORR with Rm = Rn.)
16473 2. VMOV<c><q>.<dt> <Qd>, #<imm>
16474 3. VMOV<c><q>.<dt> <Dd>, #<imm>
16475 (Immediate loads.)
16476 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16477 (ARM register to scalar.)
16478 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16479 (Two ARM registers to vector.)
16480 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16481 (Scalar to ARM register.)
16482 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16483 (Vector to two ARM registers.)
16484 8. VMOV.F32 <Sd>, <Sm>
16485 9. VMOV.F64 <Dd>, <Dm>
16486 (VFP register moves.)
16487 10. VMOV.F32 <Sd>, #imm
16488 11. VMOV.F64 <Dd>, #imm
16489 (VFP float immediate load.)
16490 12. VMOV <Rd>, <Sm>
16491 (VFP single to ARM reg.)
16492 13. VMOV <Sd>, <Rm>
16493 (ARM reg to VFP single.)
16494 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16495 (Two ARM regs to two VFP singles.)
16496 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16497 (Two VFP singles to two ARM regs.)
16498
16499 These cases can be disambiguated using neon_select_shape, except cases 1/9
16500 and 3/11 which depend on the operand type too.
16501
16502 All the encoded bits are hardcoded by this function.
16503
16504 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16505 Cases 5, 7 may be used with VFPv2 and above.
16506
16507 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16508 can specify a type where it doesn't make sense to, and is ignored). */
16509
16510 static void
16511 do_neon_mov (void)
16512 {
16513 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
16514 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR,
16515 NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
16516 NS_HR, NS_RH, NS_HI, NS_NULL);
16517 struct neon_type_el et;
16518 const char *ldconst = 0;
16519
16520 switch (rs)
16521 {
16522 case NS_DD: /* case 1/9. */
16523 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16524 /* It is not an error here if no type is given. */
16525 inst.error = NULL;
16526 if (et.type == NT_float && et.size == 64)
16527 {
16528 do_vfp_nsyn_opcode ("fcpyd");
16529 break;
16530 }
16531 /* fall through. */
16532
16533 case NS_QQ: /* case 0/1. */
16534 {
16535 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16536 return;
16537 /* The architecture manual I have doesn't explicitly state which
16538 value the U bit should have for register->register moves, but
16539 the equivalent VORR instruction has U = 0, so do that. */
16540 inst.instruction = 0x0200110;
16541 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16542 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16543 inst.instruction |= LOW4 (inst.operands[1].reg);
16544 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16545 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16546 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16547 inst.instruction |= neon_quad (rs) << 6;
16548
16549 neon_dp_fixup (&inst);
16550 }
16551 break;
16552
16553 case NS_DI: /* case 3/11. */
16554 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16555 inst.error = NULL;
16556 if (et.type == NT_float && et.size == 64)
16557 {
16558 /* case 11 (fconstd). */
16559 ldconst = "fconstd";
16560 goto encode_fconstd;
16561 }
16562 /* fall through. */
16563
16564 case NS_QI: /* case 2/3. */
16565 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16566 return;
16567 inst.instruction = 0x0800010;
16568 neon_move_immediate ();
16569 neon_dp_fixup (&inst);
16570 break;
16571
16572 case NS_SR: /* case 4. */
16573 {
16574 unsigned bcdebits = 0;
16575 int logsize;
16576 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
16577 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
16578
16579 /* .<size> is optional here, defaulting to .32. */
16580 if (inst.vectype.elems == 0
16581 && inst.operands[0].vectype.type == NT_invtype
16582 && inst.operands[1].vectype.type == NT_invtype)
16583 {
16584 inst.vectype.el[0].type = NT_untyped;
16585 inst.vectype.el[0].size = 32;
16586 inst.vectype.elems = 1;
16587 }
16588
16589 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
16590 logsize = neon_logbits (et.size);
16591
16592 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16593 _(BAD_FPU));
16594 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16595 && et.size != 32, _(BAD_FPU));
16596 constraint (et.type == NT_invtype, _("bad type for scalar"));
16597 constraint (x >= 64 / et.size, _("scalar index out of range"));
16598
16599 switch (et.size)
16600 {
16601 case 8: bcdebits = 0x8; break;
16602 case 16: bcdebits = 0x1; break;
16603 case 32: bcdebits = 0x0; break;
16604 default: ;
16605 }
16606
16607 bcdebits |= x << logsize;
16608
16609 inst.instruction = 0xe000b10;
16610 do_vfp_cond_or_thumb ();
16611 inst.instruction |= LOW4 (dn) << 16;
16612 inst.instruction |= HI1 (dn) << 7;
16613 inst.instruction |= inst.operands[1].reg << 12;
16614 inst.instruction |= (bcdebits & 3) << 5;
16615 inst.instruction |= (bcdebits >> 2) << 21;
16616 }
16617 break;
16618
16619 case NS_DRR: /* case 5 (fmdrr). */
16620 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16621 _(BAD_FPU));
16622
16623 inst.instruction = 0xc400b10;
16624 do_vfp_cond_or_thumb ();
16625 inst.instruction |= LOW4 (inst.operands[0].reg);
16626 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
16627 inst.instruction |= inst.operands[1].reg << 12;
16628 inst.instruction |= inst.operands[2].reg << 16;
16629 break;
16630
16631 case NS_RS: /* case 6. */
16632 {
16633 unsigned logsize;
16634 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
16635 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
16636 unsigned abcdebits = 0;
16637
16638 /* .<dt> is optional here, defaulting to .32. */
16639 if (inst.vectype.elems == 0
16640 && inst.operands[0].vectype.type == NT_invtype
16641 && inst.operands[1].vectype.type == NT_invtype)
16642 {
16643 inst.vectype.el[0].type = NT_untyped;
16644 inst.vectype.el[0].size = 32;
16645 inst.vectype.elems = 1;
16646 }
16647
16648 et = neon_check_type (2, NS_NULL,
16649 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
16650 logsize = neon_logbits (et.size);
16651
16652 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16653 _(BAD_FPU));
16654 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16655 && et.size != 32, _(BAD_FPU));
16656 constraint (et.type == NT_invtype, _("bad type for scalar"));
16657 constraint (x >= 64 / et.size, _("scalar index out of range"));
16658
16659 switch (et.size)
16660 {
16661 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
16662 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
16663 case 32: abcdebits = 0x00; break;
16664 default: ;
16665 }
16666
16667 abcdebits |= x << logsize;
16668 inst.instruction = 0xe100b10;
16669 do_vfp_cond_or_thumb ();
16670 inst.instruction |= LOW4 (dn) << 16;
16671 inst.instruction |= HI1 (dn) << 7;
16672 inst.instruction |= inst.operands[0].reg << 12;
16673 inst.instruction |= (abcdebits & 3) << 5;
16674 inst.instruction |= (abcdebits >> 2) << 21;
16675 }
16676 break;
16677
16678 case NS_RRD: /* case 7 (fmrrd). */
16679 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16680 _(BAD_FPU));
16681
16682 inst.instruction = 0xc500b10;
16683 do_vfp_cond_or_thumb ();
16684 inst.instruction |= inst.operands[0].reg << 12;
16685 inst.instruction |= inst.operands[1].reg << 16;
16686 inst.instruction |= LOW4 (inst.operands[2].reg);
16687 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16688 break;
16689
16690 case NS_FF: /* case 8 (fcpys). */
16691 do_vfp_nsyn_opcode ("fcpys");
16692 break;
16693
16694 case NS_HI:
16695 case NS_FI: /* case 10 (fconsts). */
16696 ldconst = "fconsts";
16697 encode_fconstd:
16698 if (!inst.operands[1].immisfloat)
16699 {
16700 unsigned new_imm;
16701 /* Immediate has to fit in 8 bits so float is enough. */
16702 float imm = (float) inst.operands[1].imm;
16703 memcpy (&new_imm, &imm, sizeof (float));
16704 /* But the assembly may have been written to provide an integer
16705 bit pattern that equates to a float, so check that the
16706 conversion has worked. */
16707 if (is_quarter_float (new_imm))
16708 {
16709 if (is_quarter_float (inst.operands[1].imm))
16710 as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
16711
16712 inst.operands[1].imm = new_imm;
16713 inst.operands[1].immisfloat = 1;
16714 }
16715 }
16716
16717 if (is_quarter_float (inst.operands[1].imm))
16718 {
16719 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
16720 do_vfp_nsyn_opcode (ldconst);
16721
16722 /* ARMv8.2 fp16 vmov.f16 instruction. */
16723 if (rs == NS_HI)
16724 do_scalar_fp16_v82_encode ();
16725 }
16726 else
16727 first_error (_("immediate out of range"));
16728 break;
16729
16730 case NS_RH:
16731 case NS_RF: /* case 12 (fmrs). */
16732 do_vfp_nsyn_opcode ("fmrs");
16733 /* ARMv8.2 fp16 vmov.f16 instruction. */
16734 if (rs == NS_RH)
16735 do_scalar_fp16_v82_encode ();
16736 break;
16737
16738 case NS_HR:
16739 case NS_FR: /* case 13 (fmsr). */
16740 do_vfp_nsyn_opcode ("fmsr");
16741 /* ARMv8.2 fp16 vmov.f16 instruction. */
16742 if (rs == NS_HR)
16743 do_scalar_fp16_v82_encode ();
16744 break;
16745
16746 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16747 (one of which is a list), but we have parsed four. Do some fiddling to
16748 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16749 expect. */
16750 case NS_RRFF: /* case 14 (fmrrs). */
16751 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
16752 _("VFP registers must be adjacent"));
16753 inst.operands[2].imm = 2;
16754 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16755 do_vfp_nsyn_opcode ("fmrrs");
16756 break;
16757
16758 case NS_FFRR: /* case 15 (fmsrr). */
16759 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
16760 _("VFP registers must be adjacent"));
16761 inst.operands[1] = inst.operands[2];
16762 inst.operands[2] = inst.operands[3];
16763 inst.operands[0].imm = 2;
16764 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16765 do_vfp_nsyn_opcode ("fmsrr");
16766 break;
16767
16768 case NS_NULL:
16769 /* neon_select_shape has determined that the instruction
16770 shape is wrong and has already set the error message. */
16771 break;
16772
16773 default:
16774 abort ();
16775 }
16776 }
16777
16778 static void
16779 do_neon_rshift_round_imm (void)
16780 {
16781 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16782 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
16783 int imm = inst.operands[2].imm;
16784
16785 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16786 if (imm == 0)
16787 {
16788 inst.operands[2].present = 0;
16789 do_neon_mov ();
16790 return;
16791 }
16792
16793 constraint (imm < 1 || (unsigned)imm > et.size,
16794 _("immediate out of range for shift"));
16795 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
16796 et.size - imm);
16797 }
16798
16799 static void
16800 do_neon_movhf (void)
16801 {
16802 enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
16803 constraint (rs != NS_HH, _("invalid suffix"));
16804
16805 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16806 _(BAD_FPU));
16807
16808 if (inst.cond != COND_ALWAYS)
16809 {
16810 if (thumb_mode)
16811 {
16812 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
16813 " the behaviour is UNPREDICTABLE"));
16814 }
16815 else
16816 {
16817 inst.error = BAD_COND;
16818 return;
16819 }
16820 }
16821
16822 do_vfp_sp_monadic ();
16823
16824 inst.is_neon = 1;
16825 inst.instruction |= 0xf0000000;
16826 }
16827
16828 static void
16829 do_neon_movl (void)
16830 {
16831 struct neon_type_el et = neon_check_type (2, NS_QD,
16832 N_EQK | N_DBL, N_SU_32 | N_KEY);
16833 unsigned sizebits = et.size >> 3;
16834 inst.instruction |= sizebits << 19;
16835 neon_two_same (0, et.type == NT_unsigned, -1);
16836 }
16837
16838 static void
16839 do_neon_trn (void)
16840 {
16841 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16842 struct neon_type_el et = neon_check_type (2, rs,
16843 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16844 NEON_ENCODE (INTEGER, inst);
16845 neon_two_same (neon_quad (rs), 1, et.size);
16846 }
16847
16848 static void
16849 do_neon_zip_uzp (void)
16850 {
16851 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16852 struct neon_type_el et = neon_check_type (2, rs,
16853 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16854 if (rs == NS_DD && et.size == 32)
16855 {
16856 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16857 inst.instruction = N_MNEM_vtrn;
16858 do_neon_trn ();
16859 return;
16860 }
16861 neon_two_same (neon_quad (rs), 1, et.size);
16862 }
16863
16864 static void
16865 do_neon_sat_abs_neg (void)
16866 {
16867 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16868 struct neon_type_el et = neon_check_type (2, rs,
16869 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16870 neon_two_same (neon_quad (rs), 1, et.size);
16871 }
16872
16873 static void
16874 do_neon_pair_long (void)
16875 {
16876 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16877 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
16878 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16879 inst.instruction |= (et.type == NT_unsigned) << 7;
16880 neon_two_same (neon_quad (rs), 1, et.size);
16881 }
16882
16883 static void
16884 do_neon_recip_est (void)
16885 {
16886 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16887 struct neon_type_el et = neon_check_type (2, rs,
16888 N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
16889 inst.instruction |= (et.type == NT_float) << 8;
16890 neon_two_same (neon_quad (rs), 1, et.size);
16891 }
16892
16893 static void
16894 do_neon_cls (void)
16895 {
16896 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16897 struct neon_type_el et = neon_check_type (2, rs,
16898 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16899 neon_two_same (neon_quad (rs), 1, et.size);
16900 }
16901
16902 static void
16903 do_neon_clz (void)
16904 {
16905 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16906 struct neon_type_el et = neon_check_type (2, rs,
16907 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
16908 neon_two_same (neon_quad (rs), 1, et.size);
16909 }
16910
16911 static void
16912 do_neon_cnt (void)
16913 {
16914 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16915 struct neon_type_el et = neon_check_type (2, rs,
16916 N_EQK | N_INT, N_8 | N_KEY);
16917 neon_two_same (neon_quad (rs), 1, et.size);
16918 }
16919
16920 static void
16921 do_neon_swp (void)
16922 {
16923 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16924 neon_two_same (neon_quad (rs), 1, -1);
16925 }
16926
16927 static void
16928 do_neon_tbl_tbx (void)
16929 {
16930 unsigned listlenbits;
16931 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
16932
16933 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
16934 {
16935 first_error (_("bad list length for table lookup"));
16936 return;
16937 }
16938
16939 listlenbits = inst.operands[1].imm - 1;
16940 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16941 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16942 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16943 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16944 inst.instruction |= LOW4 (inst.operands[2].reg);
16945 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16946 inst.instruction |= listlenbits << 8;
16947
16948 neon_dp_fixup (&inst);
16949 }
16950
16951 static void
16952 do_neon_ldm_stm (void)
16953 {
16954 /* P, U and L bits are part of bitmask. */
16955 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
16956 unsigned offsetbits = inst.operands[1].imm * 2;
16957
16958 if (inst.operands[1].issingle)
16959 {
16960 do_vfp_nsyn_ldm_stm (is_dbmode);
16961 return;
16962 }
16963
16964 constraint (is_dbmode && !inst.operands[0].writeback,
16965 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16966
16967 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16968 _("register list must contain at least 1 and at most 16 "
16969 "registers"));
16970
16971 inst.instruction |= inst.operands[0].reg << 16;
16972 inst.instruction |= inst.operands[0].writeback << 21;
16973 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16974 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
16975
16976 inst.instruction |= offsetbits;
16977
16978 do_vfp_cond_or_thumb ();
16979 }
16980
16981 static void
16982 do_neon_ldr_str (void)
16983 {
16984 int is_ldr = (inst.instruction & (1 << 20)) != 0;
16985
16986 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16987 And is UNPREDICTABLE in thumb mode. */
16988 if (!is_ldr
16989 && inst.operands[1].reg == REG_PC
16990 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
16991 {
16992 if (thumb_mode)
16993 inst.error = _("Use of PC here is UNPREDICTABLE");
16994 else if (warn_on_deprecated)
16995 as_tsktsk (_("Use of PC here is deprecated"));
16996 }
16997
16998 if (inst.operands[0].issingle)
16999 {
17000 if (is_ldr)
17001 do_vfp_nsyn_opcode ("flds");
17002 else
17003 do_vfp_nsyn_opcode ("fsts");
17004
17005 /* ARMv8.2 vldr.16/vstr.16 instruction. */
17006 if (inst.vectype.el[0].size == 16)
17007 do_scalar_fp16_v82_encode ();
17008 }
17009 else
17010 {
17011 if (is_ldr)
17012 do_vfp_nsyn_opcode ("fldd");
17013 else
17014 do_vfp_nsyn_opcode ("fstd");
17015 }
17016 }
17017
17018 /* "interleave" version also handles non-interleaving register VLD1/VST1
17019 instructions. */
17020
17021 static void
17022 do_neon_ld_st_interleave (void)
17023 {
17024 struct neon_type_el et = neon_check_type (1, NS_NULL,
17025 N_8 | N_16 | N_32 | N_64);
17026 unsigned alignbits = 0;
17027 unsigned idx;
17028 /* The bits in this table go:
17029 0: register stride of one (0) or two (1)
17030 1,2: register list length, minus one (1, 2, 3, 4).
17031 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
17032 We use -1 for invalid entries. */
17033 const int typetable[] =
17034 {
17035 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
17036 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
17037 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
17038 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
17039 };
17040 int typebits;
17041
17042 if (et.type == NT_invtype)
17043 return;
17044
17045 if (inst.operands[1].immisalign)
17046 switch (inst.operands[1].imm >> 8)
17047 {
17048 case 64: alignbits = 1; break;
17049 case 128:
17050 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
17051 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
17052 goto bad_alignment;
17053 alignbits = 2;
17054 break;
17055 case 256:
17056 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
17057 goto bad_alignment;
17058 alignbits = 3;
17059 break;
17060 default:
17061 bad_alignment:
17062 first_error (_("bad alignment"));
17063 return;
17064 }
17065
17066 inst.instruction |= alignbits << 4;
17067 inst.instruction |= neon_logbits (et.size) << 6;
17068
17069 /* Bits [4:6] of the immediate in a list specifier encode register stride
17070 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
17071 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
17072 up the right value for "type" in a table based on this value and the given
17073 list style, then stick it back. */
17074 idx = ((inst.operands[0].imm >> 4) & 7)
17075 | (((inst.instruction >> 8) & 3) << 3);
17076
17077 typebits = typetable[idx];
17078
17079 constraint (typebits == -1, _("bad list type for instruction"));
17080 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
17081 _("bad element type for instruction"));
17082
17083 inst.instruction &= ~0xf00;
17084 inst.instruction |= typebits << 8;
17085 }
17086
17087 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
17088 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
17089 otherwise. The variable arguments are a list of pairs of legal (size, align)
17090 values, terminated with -1. */
17091
17092 static int
17093 neon_alignment_bit (int size, int align, int *do_alignment, ...)
17094 {
17095 va_list ap;
17096 int result = FAIL, thissize, thisalign;
17097
17098 if (!inst.operands[1].immisalign)
17099 {
17100 *do_alignment = 0;
17101 return SUCCESS;
17102 }
17103
17104 va_start (ap, do_alignment);
17105
17106 do
17107 {
17108 thissize = va_arg (ap, int);
17109 if (thissize == -1)
17110 break;
17111 thisalign = va_arg (ap, int);
17112
17113 if (size == thissize && align == thisalign)
17114 result = SUCCESS;
17115 }
17116 while (result != SUCCESS);
17117
17118 va_end (ap);
17119
17120 if (result == SUCCESS)
17121 *do_alignment = 1;
17122 else
17123 first_error (_("unsupported alignment for instruction"));
17124
17125 return result;
17126 }
17127
17128 static void
17129 do_neon_ld_st_lane (void)
17130 {
17131 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
17132 int align_good, do_alignment = 0;
17133 int logsize = neon_logbits (et.size);
17134 int align = inst.operands[1].imm >> 8;
17135 int n = (inst.instruction >> 8) & 3;
17136 int max_el = 64 / et.size;
17137
17138 if (et.type == NT_invtype)
17139 return;
17140
17141 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
17142 _("bad list length"));
17143 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
17144 _("scalar index out of range"));
17145 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
17146 && et.size == 8,
17147 _("stride of 2 unavailable when element size is 8"));
17148
17149 switch (n)
17150 {
17151 case 0: /* VLD1 / VST1. */
17152 align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
17153 32, 32, -1);
17154 if (align_good == FAIL)
17155 return;
17156 if (do_alignment)
17157 {
17158 unsigned alignbits = 0;
17159 switch (et.size)
17160 {
17161 case 16: alignbits = 0x1; break;
17162 case 32: alignbits = 0x3; break;
17163 default: ;
17164 }
17165 inst.instruction |= alignbits << 4;
17166 }
17167 break;
17168
17169 case 1: /* VLD2 / VST2. */
17170 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
17171 16, 32, 32, 64, -1);
17172 if (align_good == FAIL)
17173 return;
17174 if (do_alignment)
17175 inst.instruction |= 1 << 4;
17176 break;
17177
17178 case 2: /* VLD3 / VST3. */
17179 constraint (inst.operands[1].immisalign,
17180 _("can't use alignment with this instruction"));
17181 break;
17182
17183 case 3: /* VLD4 / VST4. */
17184 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
17185 16, 64, 32, 64, 32, 128, -1);
17186 if (align_good == FAIL)
17187 return;
17188 if (do_alignment)
17189 {
17190 unsigned alignbits = 0;
17191 switch (et.size)
17192 {
17193 case 8: alignbits = 0x1; break;
17194 case 16: alignbits = 0x1; break;
17195 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
17196 default: ;
17197 }
17198 inst.instruction |= alignbits << 4;
17199 }
17200 break;
17201
17202 default: ;
17203 }
17204
17205 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
17206 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17207 inst.instruction |= 1 << (4 + logsize);
17208
17209 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
17210 inst.instruction |= logsize << 10;
17211 }
17212
17213 /* Encode single n-element structure to all lanes VLD<n> instructions. */
17214
17215 static void
17216 do_neon_ld_dup (void)
17217 {
17218 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
17219 int align_good, do_alignment = 0;
17220
17221 if (et.type == NT_invtype)
17222 return;
17223
17224 switch ((inst.instruction >> 8) & 3)
17225 {
17226 case 0: /* VLD1. */
17227 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
17228 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
17229 &do_alignment, 16, 16, 32, 32, -1);
17230 if (align_good == FAIL)
17231 return;
17232 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
17233 {
17234 case 1: break;
17235 case 2: inst.instruction |= 1 << 5; break;
17236 default: first_error (_("bad list length")); return;
17237 }
17238 inst.instruction |= neon_logbits (et.size) << 6;
17239 break;
17240
17241 case 1: /* VLD2. */
17242 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
17243 &do_alignment, 8, 16, 16, 32, 32, 64,
17244 -1);
17245 if (align_good == FAIL)
17246 return;
17247 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
17248 _("bad list length"));
17249 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17250 inst.instruction |= 1 << 5;
17251 inst.instruction |= neon_logbits (et.size) << 6;
17252 break;
17253
17254 case 2: /* VLD3. */
17255 constraint (inst.operands[1].immisalign,
17256 _("can't use alignment with this instruction"));
17257 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
17258 _("bad list length"));
17259 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17260 inst.instruction |= 1 << 5;
17261 inst.instruction |= neon_logbits (et.size) << 6;
17262 break;
17263
17264 case 3: /* VLD4. */
17265 {
17266 int align = inst.operands[1].imm >> 8;
17267 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
17268 16, 64, 32, 64, 32, 128, -1);
17269 if (align_good == FAIL)
17270 return;
17271 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
17272 _("bad list length"));
17273 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17274 inst.instruction |= 1 << 5;
17275 if (et.size == 32 && align == 128)
17276 inst.instruction |= 0x3 << 6;
17277 else
17278 inst.instruction |= neon_logbits (et.size) << 6;
17279 }
17280 break;
17281
17282 default: ;
17283 }
17284
17285 inst.instruction |= do_alignment << 4;
17286 }
17287
17288 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
17289 apart from bits [11:4]. */
17290
17291 static void
17292 do_neon_ldx_stx (void)
17293 {
17294 if (inst.operands[1].isreg)
17295 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
17296
17297 switch (NEON_LANE (inst.operands[0].imm))
17298 {
17299 case NEON_INTERLEAVE_LANES:
17300 NEON_ENCODE (INTERLV, inst);
17301 do_neon_ld_st_interleave ();
17302 break;
17303
17304 case NEON_ALL_LANES:
17305 NEON_ENCODE (DUP, inst);
17306 if (inst.instruction == N_INV)
17307 {
17308 first_error ("only loads support such operands");
17309 break;
17310 }
17311 do_neon_ld_dup ();
17312 break;
17313
17314 default:
17315 NEON_ENCODE (LANE, inst);
17316 do_neon_ld_st_lane ();
17317 }
17318
17319 /* L bit comes from bit mask. */
17320 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17321 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17322 inst.instruction |= inst.operands[1].reg << 16;
17323
17324 if (inst.operands[1].postind)
17325 {
17326 int postreg = inst.operands[1].imm & 0xf;
17327 constraint (!inst.operands[1].immisreg,
17328 _("post-index must be a register"));
17329 constraint (postreg == 0xd || postreg == 0xf,
17330 _("bad register for post-index"));
17331 inst.instruction |= postreg;
17332 }
17333 else
17334 {
17335 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
17336 constraint (inst.reloc.exp.X_op != O_constant
17337 || inst.reloc.exp.X_add_number != 0,
17338 BAD_ADDR_MODE);
17339
17340 if (inst.operands[1].writeback)
17341 {
17342 inst.instruction |= 0xd;
17343 }
17344 else
17345 inst.instruction |= 0xf;
17346 }
17347
17348 if (thumb_mode)
17349 inst.instruction |= 0xf9000000;
17350 else
17351 inst.instruction |= 0xf4000000;
17352 }
17353
17354 /* FP v8. */
17355 static void
17356 do_vfp_nsyn_fpv8 (enum neon_shape rs)
17357 {
17358 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17359 D register operands. */
17360 if (neon_shape_class[rs] == SC_DOUBLE)
17361 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17362 _(BAD_FPU));
17363
17364 NEON_ENCODE (FPV8, inst);
17365
17366 if (rs == NS_FFF || rs == NS_HHH)
17367 {
17368 do_vfp_sp_dyadic ();
17369
17370 /* ARMv8.2 fp16 instruction. */
17371 if (rs == NS_HHH)
17372 do_scalar_fp16_v82_encode ();
17373 }
17374 else
17375 do_vfp_dp_rd_rn_rm ();
17376
17377 if (rs == NS_DDD)
17378 inst.instruction |= 0x100;
17379
17380 inst.instruction |= 0xf0000000;
17381 }
17382
17383 static void
17384 do_vsel (void)
17385 {
17386 set_it_insn_type (OUTSIDE_IT_INSN);
17387
17388 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
17389 first_error (_("invalid instruction shape"));
17390 }
17391
17392 static void
17393 do_vmaxnm (void)
17394 {
17395 set_it_insn_type (OUTSIDE_IT_INSN);
17396
17397 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
17398 return;
17399
17400 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17401 return;
17402
17403 neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
17404 }
17405
17406 static void
17407 do_vrint_1 (enum neon_cvt_mode mode)
17408 {
17409 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
17410 struct neon_type_el et;
17411
17412 if (rs == NS_NULL)
17413 return;
17414
17415 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17416 D register operands. */
17417 if (neon_shape_class[rs] == SC_DOUBLE)
17418 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17419 _(BAD_FPU));
17420
17421 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
17422 | N_VFP);
17423 if (et.type != NT_invtype)
17424 {
17425 /* VFP encodings. */
17426 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
17427 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
17428 set_it_insn_type (OUTSIDE_IT_INSN);
17429
17430 NEON_ENCODE (FPV8, inst);
17431 if (rs == NS_FF || rs == NS_HH)
17432 do_vfp_sp_monadic ();
17433 else
17434 do_vfp_dp_rd_rm ();
17435
17436 switch (mode)
17437 {
17438 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
17439 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
17440 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
17441 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
17442 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
17443 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
17444 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
17445 default: abort ();
17446 }
17447
17448 inst.instruction |= (rs == NS_DD) << 8;
17449 do_vfp_cond_or_thumb ();
17450
17451 /* ARMv8.2 fp16 vrint instruction. */
17452 if (rs == NS_HH)
17453 do_scalar_fp16_v82_encode ();
17454 }
17455 else
17456 {
17457 /* Neon encodings (or something broken...). */
17458 inst.error = NULL;
17459 et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
17460
17461 if (et.type == NT_invtype)
17462 return;
17463
17464 set_it_insn_type (OUTSIDE_IT_INSN);
17465 NEON_ENCODE (FLOAT, inst);
17466
17467 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17468 return;
17469
17470 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17471 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17472 inst.instruction |= LOW4 (inst.operands[1].reg);
17473 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17474 inst.instruction |= neon_quad (rs) << 6;
17475 /* Mask off the original size bits and reencode them. */
17476 inst.instruction = ((inst.instruction & 0xfff3ffff)
17477 | neon_logbits (et.size) << 18);
17478
17479 switch (mode)
17480 {
17481 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
17482 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
17483 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
17484 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
17485 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
17486 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
17487 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
17488 default: abort ();
17489 }
17490
17491 if (thumb_mode)
17492 inst.instruction |= 0xfc000000;
17493 else
17494 inst.instruction |= 0xf0000000;
17495 }
17496 }
17497
17498 static void
17499 do_vrintx (void)
17500 {
17501 do_vrint_1 (neon_cvt_mode_x);
17502 }
17503
17504 static void
17505 do_vrintz (void)
17506 {
17507 do_vrint_1 (neon_cvt_mode_z);
17508 }
17509
17510 static void
17511 do_vrintr (void)
17512 {
17513 do_vrint_1 (neon_cvt_mode_r);
17514 }
17515
17516 static void
17517 do_vrinta (void)
17518 {
17519 do_vrint_1 (neon_cvt_mode_a);
17520 }
17521
17522 static void
17523 do_vrintn (void)
17524 {
17525 do_vrint_1 (neon_cvt_mode_n);
17526 }
17527
17528 static void
17529 do_vrintp (void)
17530 {
17531 do_vrint_1 (neon_cvt_mode_p);
17532 }
17533
17534 static void
17535 do_vrintm (void)
17536 {
17537 do_vrint_1 (neon_cvt_mode_m);
17538 }
17539
17540 static unsigned
17541 neon_scalar_for_vcmla (unsigned opnd, unsigned elsize)
17542 {
17543 unsigned regno = NEON_SCALAR_REG (opnd);
17544 unsigned elno = NEON_SCALAR_INDEX (opnd);
17545
17546 if (elsize == 16 && elno < 2 && regno < 16)
17547 return regno | (elno << 4);
17548 else if (elsize == 32 && elno == 0)
17549 return regno;
17550
17551 first_error (_("scalar out of range"));
17552 return 0;
17553 }
17554
17555 static void
17556 do_vcmla (void)
17557 {
17558 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17559 _(BAD_FPU));
17560 constraint (inst.reloc.exp.X_op != O_constant, _("expression too complex"));
17561 unsigned rot = inst.reloc.exp.X_add_number;
17562 constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
17563 _("immediate out of range"));
17564 rot /= 90;
17565 if (inst.operands[2].isscalar)
17566 {
17567 enum neon_shape rs = neon_select_shape (NS_DDSI, NS_QQSI, NS_NULL);
17568 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17569 N_KEY | N_F16 | N_F32).size;
17570 unsigned m = neon_scalar_for_vcmla (inst.operands[2].reg, size);
17571 inst.is_neon = 1;
17572 inst.instruction = 0xfe000800;
17573 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17574 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17575 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17576 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17577 inst.instruction |= LOW4 (m);
17578 inst.instruction |= HI1 (m) << 5;
17579 inst.instruction |= neon_quad (rs) << 6;
17580 inst.instruction |= rot << 20;
17581 inst.instruction |= (size == 32) << 23;
17582 }
17583 else
17584 {
17585 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
17586 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17587 N_KEY | N_F16 | N_F32).size;
17588 neon_three_same (neon_quad (rs), 0, -1);
17589 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
17590 inst.instruction |= 0xfc200800;
17591 inst.instruction |= rot << 23;
17592 inst.instruction |= (size == 32) << 20;
17593 }
17594 }
17595
17596 static void
17597 do_vcadd (void)
17598 {
17599 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17600 _(BAD_FPU));
17601 constraint (inst.reloc.exp.X_op != O_constant, _("expression too complex"));
17602 unsigned rot = inst.reloc.exp.X_add_number;
17603 constraint (rot != 90 && rot != 270, _("immediate out of range"));
17604 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
17605 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17606 N_KEY | N_F16 | N_F32).size;
17607 neon_three_same (neon_quad (rs), 0, -1);
17608 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
17609 inst.instruction |= 0xfc800800;
17610 inst.instruction |= (rot == 270) << 24;
17611 inst.instruction |= (size == 32) << 20;
17612 }
17613
17614 /* Dot Product instructions encoding support. */
17615
17616 static void
17617 do_neon_dotproduct (int unsigned_p)
17618 {
17619 enum neon_shape rs;
17620 unsigned scalar_oprd2 = 0;
17621 int high8;
17622
17623 if (inst.cond != COND_ALWAYS)
17624 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
17625 "is UNPREDICTABLE"));
17626
17627 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17628 _(BAD_FPU));
17629
17630 /* Dot Product instructions are in three-same D/Q register format or the third
17631 operand can be a scalar index register. */
17632 if (inst.operands[2].isscalar)
17633 {
17634 scalar_oprd2 = neon_scalar_for_mul (inst.operands[2].reg, 32);
17635 high8 = 0xfe000000;
17636 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
17637 }
17638 else
17639 {
17640 high8 = 0xfc000000;
17641 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17642 }
17643
17644 if (unsigned_p)
17645 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_U8);
17646 else
17647 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_S8);
17648
17649 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
17650 Product instruction, so we pass 0 as the "ubit" parameter. And the
17651 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
17652 neon_three_same (neon_quad (rs), 0, 32);
17653
17654 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
17655 different NEON three-same encoding. */
17656 inst.instruction &= 0x00ffffff;
17657 inst.instruction |= high8;
17658 /* Encode 'U' bit which indicates signedness. */
17659 inst.instruction |= (unsigned_p ? 1 : 0) << 4;
17660 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
17661 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
17662 the instruction encoding. */
17663 if (inst.operands[2].isscalar)
17664 {
17665 inst.instruction &= 0xffffffd0;
17666 inst.instruction |= LOW4 (scalar_oprd2);
17667 inst.instruction |= HI1 (scalar_oprd2) << 5;
17668 }
17669 }
17670
17671 /* Dot Product instructions for signed integer. */
17672
17673 static void
17674 do_neon_dotproduct_s (void)
17675 {
17676 return do_neon_dotproduct (0);
17677 }
17678
17679 /* Dot Product instructions for unsigned integer. */
17680
17681 static void
17682 do_neon_dotproduct_u (void)
17683 {
17684 return do_neon_dotproduct (1);
17685 }
17686
17687 /* Crypto v1 instructions. */
17688 static void
17689 do_crypto_2op_1 (unsigned elttype, int op)
17690 {
17691 set_it_insn_type (OUTSIDE_IT_INSN);
17692
17693 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
17694 == NT_invtype)
17695 return;
17696
17697 inst.error = NULL;
17698
17699 NEON_ENCODE (INTEGER, inst);
17700 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17701 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17702 inst.instruction |= LOW4 (inst.operands[1].reg);
17703 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17704 if (op != -1)
17705 inst.instruction |= op << 6;
17706
17707 if (thumb_mode)
17708 inst.instruction |= 0xfc000000;
17709 else
17710 inst.instruction |= 0xf0000000;
17711 }
17712
17713 static void
17714 do_crypto_3op_1 (int u, int op)
17715 {
17716 set_it_insn_type (OUTSIDE_IT_INSN);
17717
17718 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
17719 N_32 | N_UNT | N_KEY).type == NT_invtype)
17720 return;
17721
17722 inst.error = NULL;
17723
17724 NEON_ENCODE (INTEGER, inst);
17725 neon_three_same (1, u, 8 << op);
17726 }
17727
17728 static void
17729 do_aese (void)
17730 {
17731 do_crypto_2op_1 (N_8, 0);
17732 }
17733
17734 static void
17735 do_aesd (void)
17736 {
17737 do_crypto_2op_1 (N_8, 1);
17738 }
17739
17740 static void
17741 do_aesmc (void)
17742 {
17743 do_crypto_2op_1 (N_8, 2);
17744 }
17745
17746 static void
17747 do_aesimc (void)
17748 {
17749 do_crypto_2op_1 (N_8, 3);
17750 }
17751
17752 static void
17753 do_sha1c (void)
17754 {
17755 do_crypto_3op_1 (0, 0);
17756 }
17757
17758 static void
17759 do_sha1p (void)
17760 {
17761 do_crypto_3op_1 (0, 1);
17762 }
17763
17764 static void
17765 do_sha1m (void)
17766 {
17767 do_crypto_3op_1 (0, 2);
17768 }
17769
17770 static void
17771 do_sha1su0 (void)
17772 {
17773 do_crypto_3op_1 (0, 3);
17774 }
17775
17776 static void
17777 do_sha256h (void)
17778 {
17779 do_crypto_3op_1 (1, 0);
17780 }
17781
17782 static void
17783 do_sha256h2 (void)
17784 {
17785 do_crypto_3op_1 (1, 1);
17786 }
17787
17788 static void
17789 do_sha256su1 (void)
17790 {
17791 do_crypto_3op_1 (1, 2);
17792 }
17793
17794 static void
17795 do_sha1h (void)
17796 {
17797 do_crypto_2op_1 (N_32, -1);
17798 }
17799
17800 static void
17801 do_sha1su1 (void)
17802 {
17803 do_crypto_2op_1 (N_32, 0);
17804 }
17805
17806 static void
17807 do_sha256su0 (void)
17808 {
17809 do_crypto_2op_1 (N_32, 1);
17810 }
17811
17812 static void
17813 do_crc32_1 (unsigned int poly, unsigned int sz)
17814 {
17815 unsigned int Rd = inst.operands[0].reg;
17816 unsigned int Rn = inst.operands[1].reg;
17817 unsigned int Rm = inst.operands[2].reg;
17818
17819 set_it_insn_type (OUTSIDE_IT_INSN);
17820 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
17821 inst.instruction |= LOW4 (Rn) << 16;
17822 inst.instruction |= LOW4 (Rm);
17823 inst.instruction |= sz << (thumb_mode ? 4 : 21);
17824 inst.instruction |= poly << (thumb_mode ? 20 : 9);
17825
17826 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
17827 as_warn (UNPRED_REG ("r15"));
17828 }
17829
17830 static void
17831 do_crc32b (void)
17832 {
17833 do_crc32_1 (0, 0);
17834 }
17835
17836 static void
17837 do_crc32h (void)
17838 {
17839 do_crc32_1 (0, 1);
17840 }
17841
17842 static void
17843 do_crc32w (void)
17844 {
17845 do_crc32_1 (0, 2);
17846 }
17847
17848 static void
17849 do_crc32cb (void)
17850 {
17851 do_crc32_1 (1, 0);
17852 }
17853
17854 static void
17855 do_crc32ch (void)
17856 {
17857 do_crc32_1 (1, 1);
17858 }
17859
17860 static void
17861 do_crc32cw (void)
17862 {
17863 do_crc32_1 (1, 2);
17864 }
17865
17866 static void
17867 do_vjcvt (void)
17868 {
17869 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17870 _(BAD_FPU));
17871 neon_check_type (2, NS_FD, N_S32, N_F64);
17872 do_vfp_sp_dp_cvt ();
17873 do_vfp_cond_or_thumb ();
17874 }
17875
17876 \f
17877 /* Overall per-instruction processing. */
17878
17879 /* We need to be able to fix up arbitrary expressions in some statements.
17880 This is so that we can handle symbols that are an arbitrary distance from
17881 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17882 which returns part of an address in a form which will be valid for
17883 a data instruction. We do this by pushing the expression into a symbol
17884 in the expr_section, and creating a fix for that. */
17885
17886 static void
17887 fix_new_arm (fragS * frag,
17888 int where,
17889 short int size,
17890 expressionS * exp,
17891 int pc_rel,
17892 int reloc)
17893 {
17894 fixS * new_fix;
17895
17896 switch (exp->X_op)
17897 {
17898 case O_constant:
17899 if (pc_rel)
17900 {
17901 /* Create an absolute valued symbol, so we have something to
17902 refer to in the object file. Unfortunately for us, gas's
17903 generic expression parsing will already have folded out
17904 any use of .set foo/.type foo %function that may have
17905 been used to set type information of the target location,
17906 that's being specified symbolically. We have to presume
17907 the user knows what they are doing. */
17908 char name[16 + 8];
17909 symbolS *symbol;
17910
17911 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
17912
17913 symbol = symbol_find_or_make (name);
17914 S_SET_SEGMENT (symbol, absolute_section);
17915 symbol_set_frag (symbol, &zero_address_frag);
17916 S_SET_VALUE (symbol, exp->X_add_number);
17917 exp->X_op = O_symbol;
17918 exp->X_add_symbol = symbol;
17919 exp->X_add_number = 0;
17920 }
17921 /* FALLTHROUGH */
17922 case O_symbol:
17923 case O_add:
17924 case O_subtract:
17925 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
17926 (enum bfd_reloc_code_real) reloc);
17927 break;
17928
17929 default:
17930 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
17931 pc_rel, (enum bfd_reloc_code_real) reloc);
17932 break;
17933 }
17934
17935 /* Mark whether the fix is to a THUMB instruction, or an ARM
17936 instruction. */
17937 new_fix->tc_fix_data = thumb_mode;
17938 }
17939
17940 /* Create a frg for an instruction requiring relaxation. */
17941 static void
17942 output_relax_insn (void)
17943 {
17944 char * to;
17945 symbolS *sym;
17946 int offset;
17947
17948 /* The size of the instruction is unknown, so tie the debug info to the
17949 start of the instruction. */
17950 dwarf2_emit_insn (0);
17951
17952 switch (inst.reloc.exp.X_op)
17953 {
17954 case O_symbol:
17955 sym = inst.reloc.exp.X_add_symbol;
17956 offset = inst.reloc.exp.X_add_number;
17957 break;
17958 case O_constant:
17959 sym = NULL;
17960 offset = inst.reloc.exp.X_add_number;
17961 break;
17962 default:
17963 sym = make_expr_symbol (&inst.reloc.exp);
17964 offset = 0;
17965 break;
17966 }
17967 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
17968 inst.relax, sym, offset, NULL/*offset, opcode*/);
17969 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
17970 }
17971
17972 /* Write a 32-bit thumb instruction to buf. */
17973 static void
17974 put_thumb32_insn (char * buf, unsigned long insn)
17975 {
17976 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
17977 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
17978 }
17979
17980 static void
17981 output_inst (const char * str)
17982 {
17983 char * to = NULL;
17984
17985 if (inst.error)
17986 {
17987 as_bad ("%s -- `%s'", inst.error, str);
17988 return;
17989 }
17990 if (inst.relax)
17991 {
17992 output_relax_insn ();
17993 return;
17994 }
17995 if (inst.size == 0)
17996 return;
17997
17998 to = frag_more (inst.size);
17999 /* PR 9814: Record the thumb mode into the current frag so that we know
18000 what type of NOP padding to use, if necessary. We override any previous
18001 setting so that if the mode has changed then the NOPS that we use will
18002 match the encoding of the last instruction in the frag. */
18003 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
18004
18005 if (thumb_mode && (inst.size > THUMB_SIZE))
18006 {
18007 gas_assert (inst.size == (2 * THUMB_SIZE));
18008 put_thumb32_insn (to, inst.instruction);
18009 }
18010 else if (inst.size > INSN_SIZE)
18011 {
18012 gas_assert (inst.size == (2 * INSN_SIZE));
18013 md_number_to_chars (to, inst.instruction, INSN_SIZE);
18014 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
18015 }
18016 else
18017 md_number_to_chars (to, inst.instruction, inst.size);
18018
18019 if (inst.reloc.type != BFD_RELOC_UNUSED)
18020 fix_new_arm (frag_now, to - frag_now->fr_literal,
18021 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
18022 inst.reloc.type);
18023
18024 dwarf2_emit_insn (inst.size);
18025 }
18026
18027 static char *
18028 output_it_inst (int cond, int mask, char * to)
18029 {
18030 unsigned long instruction = 0xbf00;
18031
18032 mask &= 0xf;
18033 instruction |= mask;
18034 instruction |= cond << 4;
18035
18036 if (to == NULL)
18037 {
18038 to = frag_more (2);
18039 #ifdef OBJ_ELF
18040 dwarf2_emit_insn (2);
18041 #endif
18042 }
18043
18044 md_number_to_chars (to, instruction, 2);
18045
18046 return to;
18047 }
18048
18049 /* Tag values used in struct asm_opcode's tag field. */
18050 enum opcode_tag
18051 {
18052 OT_unconditional, /* Instruction cannot be conditionalized.
18053 The ARM condition field is still 0xE. */
18054 OT_unconditionalF, /* Instruction cannot be conditionalized
18055 and carries 0xF in its ARM condition field. */
18056 OT_csuffix, /* Instruction takes a conditional suffix. */
18057 OT_csuffixF, /* Some forms of the instruction take a conditional
18058 suffix, others place 0xF where the condition field
18059 would be. */
18060 OT_cinfix3, /* Instruction takes a conditional infix,
18061 beginning at character index 3. (In
18062 unified mode, it becomes a suffix.) */
18063 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
18064 tsts, cmps, cmns, and teqs. */
18065 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
18066 character index 3, even in unified mode. Used for
18067 legacy instructions where suffix and infix forms
18068 may be ambiguous. */
18069 OT_csuf_or_in3, /* Instruction takes either a conditional
18070 suffix or an infix at character index 3. */
18071 OT_odd_infix_unc, /* This is the unconditional variant of an
18072 instruction that takes a conditional infix
18073 at an unusual position. In unified mode,
18074 this variant will accept a suffix. */
18075 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
18076 are the conditional variants of instructions that
18077 take conditional infixes in unusual positions.
18078 The infix appears at character index
18079 (tag - OT_odd_infix_0). These are not accepted
18080 in unified mode. */
18081 };
18082
18083 /* Subroutine of md_assemble, responsible for looking up the primary
18084 opcode from the mnemonic the user wrote. STR points to the
18085 beginning of the mnemonic.
18086
18087 This is not simply a hash table lookup, because of conditional
18088 variants. Most instructions have conditional variants, which are
18089 expressed with a _conditional affix_ to the mnemonic. If we were
18090 to encode each conditional variant as a literal string in the opcode
18091 table, it would have approximately 20,000 entries.
18092
18093 Most mnemonics take this affix as a suffix, and in unified syntax,
18094 'most' is upgraded to 'all'. However, in the divided syntax, some
18095 instructions take the affix as an infix, notably the s-variants of
18096 the arithmetic instructions. Of those instructions, all but six
18097 have the infix appear after the third character of the mnemonic.
18098
18099 Accordingly, the algorithm for looking up primary opcodes given
18100 an identifier is:
18101
18102 1. Look up the identifier in the opcode table.
18103 If we find a match, go to step U.
18104
18105 2. Look up the last two characters of the identifier in the
18106 conditions table. If we find a match, look up the first N-2
18107 characters of the identifier in the opcode table. If we
18108 find a match, go to step CE.
18109
18110 3. Look up the fourth and fifth characters of the identifier in
18111 the conditions table. If we find a match, extract those
18112 characters from the identifier, and look up the remaining
18113 characters in the opcode table. If we find a match, go
18114 to step CM.
18115
18116 4. Fail.
18117
18118 U. Examine the tag field of the opcode structure, in case this is
18119 one of the six instructions with its conditional infix in an
18120 unusual place. If it is, the tag tells us where to find the
18121 infix; look it up in the conditions table and set inst.cond
18122 accordingly. Otherwise, this is an unconditional instruction.
18123 Again set inst.cond accordingly. Return the opcode structure.
18124
18125 CE. Examine the tag field to make sure this is an instruction that
18126 should receive a conditional suffix. If it is not, fail.
18127 Otherwise, set inst.cond from the suffix we already looked up,
18128 and return the opcode structure.
18129
18130 CM. Examine the tag field to make sure this is an instruction that
18131 should receive a conditional infix after the third character.
18132 If it is not, fail. Otherwise, undo the edits to the current
18133 line of input and proceed as for case CE. */
18134
18135 static const struct asm_opcode *
18136 opcode_lookup (char **str)
18137 {
18138 char *end, *base;
18139 char *affix;
18140 const struct asm_opcode *opcode;
18141 const struct asm_cond *cond;
18142 char save[2];
18143
18144 /* Scan up to the end of the mnemonic, which must end in white space,
18145 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
18146 for (base = end = *str; *end != '\0'; end++)
18147 if (*end == ' ' || *end == '.')
18148 break;
18149
18150 if (end == base)
18151 return NULL;
18152
18153 /* Handle a possible width suffix and/or Neon type suffix. */
18154 if (end[0] == '.')
18155 {
18156 int offset = 2;
18157
18158 /* The .w and .n suffixes are only valid if the unified syntax is in
18159 use. */
18160 if (unified_syntax && end[1] == 'w')
18161 inst.size_req = 4;
18162 else if (unified_syntax && end[1] == 'n')
18163 inst.size_req = 2;
18164 else
18165 offset = 0;
18166
18167 inst.vectype.elems = 0;
18168
18169 *str = end + offset;
18170
18171 if (end[offset] == '.')
18172 {
18173 /* See if we have a Neon type suffix (possible in either unified or
18174 non-unified ARM syntax mode). */
18175 if (parse_neon_type (&inst.vectype, str) == FAIL)
18176 return NULL;
18177 }
18178 else if (end[offset] != '\0' && end[offset] != ' ')
18179 return NULL;
18180 }
18181 else
18182 *str = end;
18183
18184 /* Look for unaffixed or special-case affixed mnemonic. */
18185 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
18186 end - base);
18187 if (opcode)
18188 {
18189 /* step U */
18190 if (opcode->tag < OT_odd_infix_0)
18191 {
18192 inst.cond = COND_ALWAYS;
18193 return opcode;
18194 }
18195
18196 if (warn_on_deprecated && unified_syntax)
18197 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18198 affix = base + (opcode->tag - OT_odd_infix_0);
18199 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
18200 gas_assert (cond);
18201
18202 inst.cond = cond->value;
18203 return opcode;
18204 }
18205
18206 /* Cannot have a conditional suffix on a mnemonic of less than two
18207 characters. */
18208 if (end - base < 3)
18209 return NULL;
18210
18211 /* Look for suffixed mnemonic. */
18212 affix = end - 2;
18213 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
18214 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
18215 affix - base);
18216 if (opcode && cond)
18217 {
18218 /* step CE */
18219 switch (opcode->tag)
18220 {
18221 case OT_cinfix3_legacy:
18222 /* Ignore conditional suffixes matched on infix only mnemonics. */
18223 break;
18224
18225 case OT_cinfix3:
18226 case OT_cinfix3_deprecated:
18227 case OT_odd_infix_unc:
18228 if (!unified_syntax)
18229 return NULL;
18230 /* Fall through. */
18231
18232 case OT_csuffix:
18233 case OT_csuffixF:
18234 case OT_csuf_or_in3:
18235 inst.cond = cond->value;
18236 return opcode;
18237
18238 case OT_unconditional:
18239 case OT_unconditionalF:
18240 if (thumb_mode)
18241 inst.cond = cond->value;
18242 else
18243 {
18244 /* Delayed diagnostic. */
18245 inst.error = BAD_COND;
18246 inst.cond = COND_ALWAYS;
18247 }
18248 return opcode;
18249
18250 default:
18251 return NULL;
18252 }
18253 }
18254
18255 /* Cannot have a usual-position infix on a mnemonic of less than
18256 six characters (five would be a suffix). */
18257 if (end - base < 6)
18258 return NULL;
18259
18260 /* Look for infixed mnemonic in the usual position. */
18261 affix = base + 3;
18262 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
18263 if (!cond)
18264 return NULL;
18265
18266 memcpy (save, affix, 2);
18267 memmove (affix, affix + 2, (end - affix) - 2);
18268 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
18269 (end - base) - 2);
18270 memmove (affix + 2, affix, (end - affix) - 2);
18271 memcpy (affix, save, 2);
18272
18273 if (opcode
18274 && (opcode->tag == OT_cinfix3
18275 || opcode->tag == OT_cinfix3_deprecated
18276 || opcode->tag == OT_csuf_or_in3
18277 || opcode->tag == OT_cinfix3_legacy))
18278 {
18279 /* Step CM. */
18280 if (warn_on_deprecated && unified_syntax
18281 && (opcode->tag == OT_cinfix3
18282 || opcode->tag == OT_cinfix3_deprecated))
18283 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18284
18285 inst.cond = cond->value;
18286 return opcode;
18287 }
18288
18289 return NULL;
18290 }
18291
18292 /* This function generates an initial IT instruction, leaving its block
18293 virtually open for the new instructions. Eventually,
18294 the mask will be updated by now_it_add_mask () each time
18295 a new instruction needs to be included in the IT block.
18296 Finally, the block is closed with close_automatic_it_block ().
18297 The block closure can be requested either from md_assemble (),
18298 a tencode (), or due to a label hook. */
18299
18300 static void
18301 new_automatic_it_block (int cond)
18302 {
18303 now_it.state = AUTOMATIC_IT_BLOCK;
18304 now_it.mask = 0x18;
18305 now_it.cc = cond;
18306 now_it.block_length = 1;
18307 mapping_state (MAP_THUMB);
18308 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
18309 now_it.warn_deprecated = FALSE;
18310 now_it.insn_cond = TRUE;
18311 }
18312
18313 /* Close an automatic IT block.
18314 See comments in new_automatic_it_block (). */
18315
18316 static void
18317 close_automatic_it_block (void)
18318 {
18319 now_it.mask = 0x10;
18320 now_it.block_length = 0;
18321 }
18322
18323 /* Update the mask of the current automatically-generated IT
18324 instruction. See comments in new_automatic_it_block (). */
18325
18326 static void
18327 now_it_add_mask (int cond)
18328 {
18329 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
18330 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
18331 | ((bitvalue) << (nbit)))
18332 const int resulting_bit = (cond & 1);
18333
18334 now_it.mask &= 0xf;
18335 now_it.mask = SET_BIT_VALUE (now_it.mask,
18336 resulting_bit,
18337 (5 - now_it.block_length));
18338 now_it.mask = SET_BIT_VALUE (now_it.mask,
18339 1,
18340 ((5 - now_it.block_length) - 1) );
18341 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
18342
18343 #undef CLEAR_BIT
18344 #undef SET_BIT_VALUE
18345 }
18346
18347 /* The IT blocks handling machinery is accessed through the these functions:
18348 it_fsm_pre_encode () from md_assemble ()
18349 set_it_insn_type () optional, from the tencode functions
18350 set_it_insn_type_last () ditto
18351 in_it_block () ditto
18352 it_fsm_post_encode () from md_assemble ()
18353 force_automatic_it_block_close () from label handling functions
18354
18355 Rationale:
18356 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
18357 initializing the IT insn type with a generic initial value depending
18358 on the inst.condition.
18359 2) During the tencode function, two things may happen:
18360 a) The tencode function overrides the IT insn type by
18361 calling either set_it_insn_type (type) or set_it_insn_type_last ().
18362 b) The tencode function queries the IT block state by
18363 calling in_it_block () (i.e. to determine narrow/not narrow mode).
18364
18365 Both set_it_insn_type and in_it_block run the internal FSM state
18366 handling function (handle_it_state), because: a) setting the IT insn
18367 type may incur in an invalid state (exiting the function),
18368 and b) querying the state requires the FSM to be updated.
18369 Specifically we want to avoid creating an IT block for conditional
18370 branches, so it_fsm_pre_encode is actually a guess and we can't
18371 determine whether an IT block is required until the tencode () routine
18372 has decided what type of instruction this actually it.
18373 Because of this, if set_it_insn_type and in_it_block have to be used,
18374 set_it_insn_type has to be called first.
18375
18376 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
18377 determines the insn IT type depending on the inst.cond code.
18378 When a tencode () routine encodes an instruction that can be
18379 either outside an IT block, or, in the case of being inside, has to be
18380 the last one, set_it_insn_type_last () will determine the proper
18381 IT instruction type based on the inst.cond code. Otherwise,
18382 set_it_insn_type can be called for overriding that logic or
18383 for covering other cases.
18384
18385 Calling handle_it_state () may not transition the IT block state to
18386 OUTSIDE_IT_BLOCK immediately, since the (current) state could be
18387 still queried. Instead, if the FSM determines that the state should
18388 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
18389 after the tencode () function: that's what it_fsm_post_encode () does.
18390
18391 Since in_it_block () calls the state handling function to get an
18392 updated state, an error may occur (due to invalid insns combination).
18393 In that case, inst.error is set.
18394 Therefore, inst.error has to be checked after the execution of
18395 the tencode () routine.
18396
18397 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
18398 any pending state change (if any) that didn't take place in
18399 handle_it_state () as explained above. */
18400
18401 static void
18402 it_fsm_pre_encode (void)
18403 {
18404 if (inst.cond != COND_ALWAYS)
18405 inst.it_insn_type = INSIDE_IT_INSN;
18406 else
18407 inst.it_insn_type = OUTSIDE_IT_INSN;
18408
18409 now_it.state_handled = 0;
18410 }
18411
18412 /* IT state FSM handling function. */
18413
18414 static int
18415 handle_it_state (void)
18416 {
18417 now_it.state_handled = 1;
18418 now_it.insn_cond = FALSE;
18419
18420 switch (now_it.state)
18421 {
18422 case OUTSIDE_IT_BLOCK:
18423 switch (inst.it_insn_type)
18424 {
18425 case OUTSIDE_IT_INSN:
18426 break;
18427
18428 case INSIDE_IT_INSN:
18429 case INSIDE_IT_LAST_INSN:
18430 if (thumb_mode == 0)
18431 {
18432 if (unified_syntax
18433 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
18434 as_tsktsk (_("Warning: conditional outside an IT block"\
18435 " for Thumb."));
18436 }
18437 else
18438 {
18439 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
18440 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
18441 {
18442 /* Automatically generate the IT instruction. */
18443 new_automatic_it_block (inst.cond);
18444 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
18445 close_automatic_it_block ();
18446 }
18447 else
18448 {
18449 inst.error = BAD_OUT_IT;
18450 return FAIL;
18451 }
18452 }
18453 break;
18454
18455 case IF_INSIDE_IT_LAST_INSN:
18456 case NEUTRAL_IT_INSN:
18457 break;
18458
18459 case IT_INSN:
18460 now_it.state = MANUAL_IT_BLOCK;
18461 now_it.block_length = 0;
18462 break;
18463 }
18464 break;
18465
18466 case AUTOMATIC_IT_BLOCK:
18467 /* Three things may happen now:
18468 a) We should increment current it block size;
18469 b) We should close current it block (closing insn or 4 insns);
18470 c) We should close current it block and start a new one (due
18471 to incompatible conditions or
18472 4 insns-length block reached). */
18473
18474 switch (inst.it_insn_type)
18475 {
18476 case OUTSIDE_IT_INSN:
18477 /* The closure of the block shall happen immediately,
18478 so any in_it_block () call reports the block as closed. */
18479 force_automatic_it_block_close ();
18480 break;
18481
18482 case INSIDE_IT_INSN:
18483 case INSIDE_IT_LAST_INSN:
18484 case IF_INSIDE_IT_LAST_INSN:
18485 now_it.block_length++;
18486
18487 if (now_it.block_length > 4
18488 || !now_it_compatible (inst.cond))
18489 {
18490 force_automatic_it_block_close ();
18491 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
18492 new_automatic_it_block (inst.cond);
18493 }
18494 else
18495 {
18496 now_it.insn_cond = TRUE;
18497 now_it_add_mask (inst.cond);
18498 }
18499
18500 if (now_it.state == AUTOMATIC_IT_BLOCK
18501 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
18502 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
18503 close_automatic_it_block ();
18504 break;
18505
18506 case NEUTRAL_IT_INSN:
18507 now_it.block_length++;
18508 now_it.insn_cond = TRUE;
18509
18510 if (now_it.block_length > 4)
18511 force_automatic_it_block_close ();
18512 else
18513 now_it_add_mask (now_it.cc & 1);
18514 break;
18515
18516 case IT_INSN:
18517 close_automatic_it_block ();
18518 now_it.state = MANUAL_IT_BLOCK;
18519 break;
18520 }
18521 break;
18522
18523 case MANUAL_IT_BLOCK:
18524 {
18525 /* Check conditional suffixes. */
18526 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
18527 int is_last;
18528 now_it.mask <<= 1;
18529 now_it.mask &= 0x1f;
18530 is_last = (now_it.mask == 0x10);
18531 now_it.insn_cond = TRUE;
18532
18533 switch (inst.it_insn_type)
18534 {
18535 case OUTSIDE_IT_INSN:
18536 inst.error = BAD_NOT_IT;
18537 return FAIL;
18538
18539 case INSIDE_IT_INSN:
18540 if (cond != inst.cond)
18541 {
18542 inst.error = BAD_IT_COND;
18543 return FAIL;
18544 }
18545 break;
18546
18547 case INSIDE_IT_LAST_INSN:
18548 case IF_INSIDE_IT_LAST_INSN:
18549 if (cond != inst.cond)
18550 {
18551 inst.error = BAD_IT_COND;
18552 return FAIL;
18553 }
18554 if (!is_last)
18555 {
18556 inst.error = BAD_BRANCH;
18557 return FAIL;
18558 }
18559 break;
18560
18561 case NEUTRAL_IT_INSN:
18562 /* The BKPT instruction is unconditional even in an IT block. */
18563 break;
18564
18565 case IT_INSN:
18566 inst.error = BAD_IT_IT;
18567 return FAIL;
18568 }
18569 }
18570 break;
18571 }
18572
18573 return SUCCESS;
18574 }
18575
18576 struct depr_insn_mask
18577 {
18578 unsigned long pattern;
18579 unsigned long mask;
18580 const char* description;
18581 };
18582
18583 /* List of 16-bit instruction patterns deprecated in an IT block in
18584 ARMv8. */
18585 static const struct depr_insn_mask depr_it_insns[] = {
18586 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
18587 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
18588 { 0xa000, 0xb800, N_("ADR") },
18589 { 0x4800, 0xf800, N_("Literal loads") },
18590 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
18591 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
18592 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
18593 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
18594 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
18595 { 0, 0, NULL }
18596 };
18597
18598 static void
18599 it_fsm_post_encode (void)
18600 {
18601 int is_last;
18602
18603 if (!now_it.state_handled)
18604 handle_it_state ();
18605
18606 if (now_it.insn_cond
18607 && !now_it.warn_deprecated
18608 && warn_on_deprecated
18609 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)
18610 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m))
18611 {
18612 if (inst.instruction >= 0x10000)
18613 {
18614 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
18615 "performance deprecated in ARMv8-A and ARMv8-R"));
18616 now_it.warn_deprecated = TRUE;
18617 }
18618 else
18619 {
18620 const struct depr_insn_mask *p = depr_it_insns;
18621
18622 while (p->mask != 0)
18623 {
18624 if ((inst.instruction & p->mask) == p->pattern)
18625 {
18626 as_tsktsk (_("IT blocks containing 16-bit Thumb "
18627 "instructions of the following class are "
18628 "performance deprecated in ARMv8-A and "
18629 "ARMv8-R: %s"), p->description);
18630 now_it.warn_deprecated = TRUE;
18631 break;
18632 }
18633
18634 ++p;
18635 }
18636 }
18637
18638 if (now_it.block_length > 1)
18639 {
18640 as_tsktsk (_("IT blocks containing more than one conditional "
18641 "instruction are performance deprecated in ARMv8-A and "
18642 "ARMv8-R"));
18643 now_it.warn_deprecated = TRUE;
18644 }
18645 }
18646
18647 is_last = (now_it.mask == 0x10);
18648 if (is_last)
18649 {
18650 now_it.state = OUTSIDE_IT_BLOCK;
18651 now_it.mask = 0;
18652 }
18653 }
18654
18655 static void
18656 force_automatic_it_block_close (void)
18657 {
18658 if (now_it.state == AUTOMATIC_IT_BLOCK)
18659 {
18660 close_automatic_it_block ();
18661 now_it.state = OUTSIDE_IT_BLOCK;
18662 now_it.mask = 0;
18663 }
18664 }
18665
18666 static int
18667 in_it_block (void)
18668 {
18669 if (!now_it.state_handled)
18670 handle_it_state ();
18671
18672 return now_it.state != OUTSIDE_IT_BLOCK;
18673 }
18674
18675 /* Whether OPCODE only has T32 encoding. Since this function is only used by
18676 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
18677 here, hence the "known" in the function name. */
18678
18679 static bfd_boolean
18680 known_t32_only_insn (const struct asm_opcode *opcode)
18681 {
18682 /* Original Thumb-1 wide instruction. */
18683 if (opcode->tencode == do_t_blx
18684 || opcode->tencode == do_t_branch23
18685 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
18686 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
18687 return TRUE;
18688
18689 /* Wide-only instruction added to ARMv8-M Baseline. */
18690 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
18691 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
18692 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
18693 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
18694 return TRUE;
18695
18696 return FALSE;
18697 }
18698
18699 /* Whether wide instruction variant can be used if available for a valid OPCODE
18700 in ARCH. */
18701
18702 static bfd_boolean
18703 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
18704 {
18705 if (known_t32_only_insn (opcode))
18706 return TRUE;
18707
18708 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
18709 of variant T3 of B.W is checked in do_t_branch. */
18710 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
18711 && opcode->tencode == do_t_branch)
18712 return TRUE;
18713
18714 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
18715 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
18716 && opcode->tencode == do_t_mov_cmp
18717 /* Make sure CMP instruction is not affected. */
18718 && opcode->aencode == do_mov)
18719 return TRUE;
18720
18721 /* Wide instruction variants of all instructions with narrow *and* wide
18722 variants become available with ARMv6t2. Other opcodes are either
18723 narrow-only or wide-only and are thus available if OPCODE is valid. */
18724 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
18725 return TRUE;
18726
18727 /* OPCODE with narrow only instruction variant or wide variant not
18728 available. */
18729 return FALSE;
18730 }
18731
18732 void
18733 md_assemble (char *str)
18734 {
18735 char *p = str;
18736 const struct asm_opcode * opcode;
18737
18738 /* Align the previous label if needed. */
18739 if (last_label_seen != NULL)
18740 {
18741 symbol_set_frag (last_label_seen, frag_now);
18742 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
18743 S_SET_SEGMENT (last_label_seen, now_seg);
18744 }
18745
18746 memset (&inst, '\0', sizeof (inst));
18747 inst.reloc.type = BFD_RELOC_UNUSED;
18748
18749 opcode = opcode_lookup (&p);
18750 if (!opcode)
18751 {
18752 /* It wasn't an instruction, but it might be a register alias of
18753 the form alias .req reg, or a Neon .dn/.qn directive. */
18754 if (! create_register_alias (str, p)
18755 && ! create_neon_reg_alias (str, p))
18756 as_bad (_("bad instruction `%s'"), str);
18757
18758 return;
18759 }
18760
18761 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
18762 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
18763
18764 /* The value which unconditional instructions should have in place of the
18765 condition field. */
18766 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
18767
18768 if (thumb_mode)
18769 {
18770 arm_feature_set variant;
18771
18772 variant = cpu_variant;
18773 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
18774 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
18775 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
18776 /* Check that this instruction is supported for this CPU. */
18777 if (!opcode->tvariant
18778 || (thumb_mode == 1
18779 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
18780 {
18781 if (opcode->tencode == do_t_swi)
18782 as_bad (_("SVC is not permitted on this architecture"));
18783 else
18784 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
18785 return;
18786 }
18787 if (inst.cond != COND_ALWAYS && !unified_syntax
18788 && opcode->tencode != do_t_branch)
18789 {
18790 as_bad (_("Thumb does not support conditional execution"));
18791 return;
18792 }
18793
18794 /* Two things are addressed here:
18795 1) Implicit require narrow instructions on Thumb-1.
18796 This avoids relaxation accidentally introducing Thumb-2
18797 instructions.
18798 2) Reject wide instructions in non Thumb-2 cores.
18799
18800 Only instructions with narrow and wide variants need to be handled
18801 but selecting all non wide-only instructions is easier. */
18802 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
18803 && !t32_insn_ok (variant, opcode))
18804 {
18805 if (inst.size_req == 0)
18806 inst.size_req = 2;
18807 else if (inst.size_req == 4)
18808 {
18809 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
18810 as_bad (_("selected processor does not support 32bit wide "
18811 "variant of instruction `%s'"), str);
18812 else
18813 as_bad (_("selected processor does not support `%s' in "
18814 "Thumb-2 mode"), str);
18815 return;
18816 }
18817 }
18818
18819 inst.instruction = opcode->tvalue;
18820
18821 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
18822 {
18823 /* Prepare the it_insn_type for those encodings that don't set
18824 it. */
18825 it_fsm_pre_encode ();
18826
18827 opcode->tencode ();
18828
18829 it_fsm_post_encode ();
18830 }
18831
18832 if (!(inst.error || inst.relax))
18833 {
18834 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
18835 inst.size = (inst.instruction > 0xffff ? 4 : 2);
18836 if (inst.size_req && inst.size_req != inst.size)
18837 {
18838 as_bad (_("cannot honor width suffix -- `%s'"), str);
18839 return;
18840 }
18841 }
18842
18843 /* Something has gone badly wrong if we try to relax a fixed size
18844 instruction. */
18845 gas_assert (inst.size_req == 0 || !inst.relax);
18846
18847 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18848 *opcode->tvariant);
18849 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
18850 set those bits when Thumb-2 32-bit instructions are seen. The impact
18851 of relaxable instructions will be considered later after we finish all
18852 relaxation. */
18853 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
18854 variant = arm_arch_none;
18855 else
18856 variant = cpu_variant;
18857 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
18858 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18859 arm_ext_v6t2);
18860
18861 check_neon_suffixes;
18862
18863 if (!inst.error)
18864 {
18865 mapping_state (MAP_THUMB);
18866 }
18867 }
18868 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
18869 {
18870 bfd_boolean is_bx;
18871
18872 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18873 is_bx = (opcode->aencode == do_bx);
18874
18875 /* Check that this instruction is supported for this CPU. */
18876 if (!(is_bx && fix_v4bx)
18877 && !(opcode->avariant &&
18878 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
18879 {
18880 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
18881 return;
18882 }
18883 if (inst.size_req)
18884 {
18885 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
18886 return;
18887 }
18888
18889 inst.instruction = opcode->avalue;
18890 if (opcode->tag == OT_unconditionalF)
18891 inst.instruction |= 0xFU << 28;
18892 else
18893 inst.instruction |= inst.cond << 28;
18894 inst.size = INSN_SIZE;
18895 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
18896 {
18897 it_fsm_pre_encode ();
18898 opcode->aencode ();
18899 it_fsm_post_encode ();
18900 }
18901 /* Arm mode bx is marked as both v4T and v5 because it's still required
18902 on a hypothetical non-thumb v5 core. */
18903 if (is_bx)
18904 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
18905 else
18906 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
18907 *opcode->avariant);
18908
18909 check_neon_suffixes;
18910
18911 if (!inst.error)
18912 {
18913 mapping_state (MAP_ARM);
18914 }
18915 }
18916 else
18917 {
18918 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18919 "-- `%s'"), str);
18920 return;
18921 }
18922 output_inst (str);
18923 }
18924
18925 static void
18926 check_it_blocks_finished (void)
18927 {
18928 #ifdef OBJ_ELF
18929 asection *sect;
18930
18931 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
18932 if (seg_info (sect)->tc_segment_info_data.current_it.state
18933 == MANUAL_IT_BLOCK)
18934 {
18935 as_warn (_("section '%s' finished with an open IT block."),
18936 sect->name);
18937 }
18938 #else
18939 if (now_it.state == MANUAL_IT_BLOCK)
18940 as_warn (_("file finished with an open IT block."));
18941 #endif
18942 }
18943
18944 /* Various frobbings of labels and their addresses. */
18945
18946 void
18947 arm_start_line_hook (void)
18948 {
18949 last_label_seen = NULL;
18950 }
18951
18952 void
18953 arm_frob_label (symbolS * sym)
18954 {
18955 last_label_seen = sym;
18956
18957 ARM_SET_THUMB (sym, thumb_mode);
18958
18959 #if defined OBJ_COFF || defined OBJ_ELF
18960 ARM_SET_INTERWORK (sym, support_interwork);
18961 #endif
18962
18963 force_automatic_it_block_close ();
18964
18965 /* Note - do not allow local symbols (.Lxxx) to be labelled
18966 as Thumb functions. This is because these labels, whilst
18967 they exist inside Thumb code, are not the entry points for
18968 possible ARM->Thumb calls. Also, these labels can be used
18969 as part of a computed goto or switch statement. eg gcc
18970 can generate code that looks like this:
18971
18972 ldr r2, [pc, .Laaa]
18973 lsl r3, r3, #2
18974 ldr r2, [r3, r2]
18975 mov pc, r2
18976
18977 .Lbbb: .word .Lxxx
18978 .Lccc: .word .Lyyy
18979 ..etc...
18980 .Laaa: .word Lbbb
18981
18982 The first instruction loads the address of the jump table.
18983 The second instruction converts a table index into a byte offset.
18984 The third instruction gets the jump address out of the table.
18985 The fourth instruction performs the jump.
18986
18987 If the address stored at .Laaa is that of a symbol which has the
18988 Thumb_Func bit set, then the linker will arrange for this address
18989 to have the bottom bit set, which in turn would mean that the
18990 address computation performed by the third instruction would end
18991 up with the bottom bit set. Since the ARM is capable of unaligned
18992 word loads, the instruction would then load the incorrect address
18993 out of the jump table, and chaos would ensue. */
18994 if (label_is_thumb_function_name
18995 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
18996 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
18997 {
18998 /* When the address of a Thumb function is taken the bottom
18999 bit of that address should be set. This will allow
19000 interworking between Arm and Thumb functions to work
19001 correctly. */
19002
19003 THUMB_SET_FUNC (sym, 1);
19004
19005 label_is_thumb_function_name = FALSE;
19006 }
19007
19008 dwarf2_emit_label (sym);
19009 }
19010
19011 bfd_boolean
19012 arm_data_in_code (void)
19013 {
19014 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
19015 {
19016 *input_line_pointer = '/';
19017 input_line_pointer += 5;
19018 *input_line_pointer = 0;
19019 return TRUE;
19020 }
19021
19022 return FALSE;
19023 }
19024
19025 char *
19026 arm_canonicalize_symbol_name (char * name)
19027 {
19028 int len;
19029
19030 if (thumb_mode && (len = strlen (name)) > 5
19031 && streq (name + len - 5, "/data"))
19032 *(name + len - 5) = 0;
19033
19034 return name;
19035 }
19036 \f
19037 /* Table of all register names defined by default. The user can
19038 define additional names with .req. Note that all register names
19039 should appear in both upper and lowercase variants. Some registers
19040 also have mixed-case names. */
19041
19042 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
19043 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
19044 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
19045 #define REGSET(p,t) \
19046 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
19047 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
19048 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
19049 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
19050 #define REGSETH(p,t) \
19051 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
19052 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
19053 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
19054 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
19055 #define REGSET2(p,t) \
19056 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
19057 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
19058 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
19059 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
19060 #define SPLRBANK(base,bank,t) \
19061 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
19062 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
19063 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
19064 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
19065 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
19066 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
19067
19068 static const struct reg_entry reg_names[] =
19069 {
19070 /* ARM integer registers. */
19071 REGSET(r, RN), REGSET(R, RN),
19072
19073 /* ATPCS synonyms. */
19074 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
19075 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
19076 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
19077
19078 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
19079 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
19080 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
19081
19082 /* Well-known aliases. */
19083 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
19084 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
19085
19086 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
19087 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
19088
19089 /* Coprocessor numbers. */
19090 REGSET(p, CP), REGSET(P, CP),
19091
19092 /* Coprocessor register numbers. The "cr" variants are for backward
19093 compatibility. */
19094 REGSET(c, CN), REGSET(C, CN),
19095 REGSET(cr, CN), REGSET(CR, CN),
19096
19097 /* ARM banked registers. */
19098 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
19099 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
19100 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
19101 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
19102 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
19103 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
19104 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
19105
19106 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
19107 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
19108 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
19109 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
19110 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
19111 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
19112 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
19113 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
19114
19115 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
19116 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
19117 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
19118 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
19119 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
19120 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
19121 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
19122 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
19123 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
19124
19125 /* FPA registers. */
19126 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
19127 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
19128
19129 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
19130 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
19131
19132 /* VFP SP registers. */
19133 REGSET(s,VFS), REGSET(S,VFS),
19134 REGSETH(s,VFS), REGSETH(S,VFS),
19135
19136 /* VFP DP Registers. */
19137 REGSET(d,VFD), REGSET(D,VFD),
19138 /* Extra Neon DP registers. */
19139 REGSETH(d,VFD), REGSETH(D,VFD),
19140
19141 /* Neon QP registers. */
19142 REGSET2(q,NQ), REGSET2(Q,NQ),
19143
19144 /* VFP control registers. */
19145 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
19146 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
19147 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
19148 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
19149 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
19150 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
19151 REGDEF(mvfr2,5,VFC), REGDEF(MVFR2,5,VFC),
19152
19153 /* Maverick DSP coprocessor registers. */
19154 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
19155 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
19156
19157 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
19158 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
19159 REGDEF(dspsc,0,DSPSC),
19160
19161 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
19162 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
19163 REGDEF(DSPSC,0,DSPSC),
19164
19165 /* iWMMXt data registers - p0, c0-15. */
19166 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
19167
19168 /* iWMMXt control registers - p1, c0-3. */
19169 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
19170 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
19171 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
19172 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
19173
19174 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
19175 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
19176 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
19177 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
19178 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
19179
19180 /* XScale accumulator registers. */
19181 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
19182 };
19183 #undef REGDEF
19184 #undef REGNUM
19185 #undef REGSET
19186
19187 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
19188 within psr_required_here. */
19189 static const struct asm_psr psrs[] =
19190 {
19191 /* Backward compatibility notation. Note that "all" is no longer
19192 truly all possible PSR bits. */
19193 {"all", PSR_c | PSR_f},
19194 {"flg", PSR_f},
19195 {"ctl", PSR_c},
19196
19197 /* Individual flags. */
19198 {"f", PSR_f},
19199 {"c", PSR_c},
19200 {"x", PSR_x},
19201 {"s", PSR_s},
19202
19203 /* Combinations of flags. */
19204 {"fs", PSR_f | PSR_s},
19205 {"fx", PSR_f | PSR_x},
19206 {"fc", PSR_f | PSR_c},
19207 {"sf", PSR_s | PSR_f},
19208 {"sx", PSR_s | PSR_x},
19209 {"sc", PSR_s | PSR_c},
19210 {"xf", PSR_x | PSR_f},
19211 {"xs", PSR_x | PSR_s},
19212 {"xc", PSR_x | PSR_c},
19213 {"cf", PSR_c | PSR_f},
19214 {"cs", PSR_c | PSR_s},
19215 {"cx", PSR_c | PSR_x},
19216 {"fsx", PSR_f | PSR_s | PSR_x},
19217 {"fsc", PSR_f | PSR_s | PSR_c},
19218 {"fxs", PSR_f | PSR_x | PSR_s},
19219 {"fxc", PSR_f | PSR_x | PSR_c},
19220 {"fcs", PSR_f | PSR_c | PSR_s},
19221 {"fcx", PSR_f | PSR_c | PSR_x},
19222 {"sfx", PSR_s | PSR_f | PSR_x},
19223 {"sfc", PSR_s | PSR_f | PSR_c},
19224 {"sxf", PSR_s | PSR_x | PSR_f},
19225 {"sxc", PSR_s | PSR_x | PSR_c},
19226 {"scf", PSR_s | PSR_c | PSR_f},
19227 {"scx", PSR_s | PSR_c | PSR_x},
19228 {"xfs", PSR_x | PSR_f | PSR_s},
19229 {"xfc", PSR_x | PSR_f | PSR_c},
19230 {"xsf", PSR_x | PSR_s | PSR_f},
19231 {"xsc", PSR_x | PSR_s | PSR_c},
19232 {"xcf", PSR_x | PSR_c | PSR_f},
19233 {"xcs", PSR_x | PSR_c | PSR_s},
19234 {"cfs", PSR_c | PSR_f | PSR_s},
19235 {"cfx", PSR_c | PSR_f | PSR_x},
19236 {"csf", PSR_c | PSR_s | PSR_f},
19237 {"csx", PSR_c | PSR_s | PSR_x},
19238 {"cxf", PSR_c | PSR_x | PSR_f},
19239 {"cxs", PSR_c | PSR_x | PSR_s},
19240 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
19241 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
19242 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
19243 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
19244 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
19245 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
19246 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
19247 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
19248 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
19249 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
19250 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
19251 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
19252 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
19253 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
19254 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
19255 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
19256 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
19257 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
19258 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
19259 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
19260 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
19261 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
19262 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
19263 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
19264 };
19265
19266 /* Table of V7M psr names. */
19267 static const struct asm_psr v7m_psrs[] =
19268 {
19269 {"apsr", 0x0 }, {"APSR", 0x0 },
19270 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
19271 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
19272 {"psr", 0x3 }, {"PSR", 0x3 },
19273 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
19274 {"ipsr", 0x5 }, {"IPSR", 0x5 },
19275 {"epsr", 0x6 }, {"EPSR", 0x6 },
19276 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
19277 {"msp", 0x8 }, {"MSP", 0x8 },
19278 {"psp", 0x9 }, {"PSP", 0x9 },
19279 {"msplim", 0xa }, {"MSPLIM", 0xa },
19280 {"psplim", 0xb }, {"PSPLIM", 0xb },
19281 {"primask", 0x10}, {"PRIMASK", 0x10},
19282 {"basepri", 0x11}, {"BASEPRI", 0x11},
19283 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
19284 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
19285 {"control", 0x14}, {"CONTROL", 0x14},
19286 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
19287 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
19288 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
19289 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
19290 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
19291 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
19292 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
19293 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
19294 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
19295 };
19296
19297 /* Table of all shift-in-operand names. */
19298 static const struct asm_shift_name shift_names [] =
19299 {
19300 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
19301 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
19302 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
19303 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
19304 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
19305 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
19306 };
19307
19308 /* Table of all explicit relocation names. */
19309 #ifdef OBJ_ELF
19310 static struct reloc_entry reloc_names[] =
19311 {
19312 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
19313 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
19314 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
19315 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
19316 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
19317 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
19318 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
19319 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
19320 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
19321 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
19322 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
19323 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
19324 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
19325 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
19326 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
19327 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
19328 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
19329 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ},
19330 { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC },
19331 { "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC },
19332 { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC },
19333 { "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC },
19334 { "funcdesc", BFD_RELOC_ARM_FUNCDESC },
19335 { "FUNCDESC", BFD_RELOC_ARM_FUNCDESC },
19336 { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC }, { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC },
19337 { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC }, { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC },
19338 { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC }, { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC },
19339 };
19340 #endif
19341
19342 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
19343 static const struct asm_cond conds[] =
19344 {
19345 {"eq", 0x0},
19346 {"ne", 0x1},
19347 {"cs", 0x2}, {"hs", 0x2},
19348 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
19349 {"mi", 0x4},
19350 {"pl", 0x5},
19351 {"vs", 0x6},
19352 {"vc", 0x7},
19353 {"hi", 0x8},
19354 {"ls", 0x9},
19355 {"ge", 0xa},
19356 {"lt", 0xb},
19357 {"gt", 0xc},
19358 {"le", 0xd},
19359 {"al", 0xe}
19360 };
19361
19362 #define UL_BARRIER(L,U,CODE,FEAT) \
19363 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
19364 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
19365
19366 static struct asm_barrier_opt barrier_opt_names[] =
19367 {
19368 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
19369 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
19370 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
19371 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
19372 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
19373 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
19374 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
19375 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
19376 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
19377 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
19378 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
19379 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
19380 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
19381 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
19382 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
19383 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
19384 };
19385
19386 #undef UL_BARRIER
19387
19388 /* Table of ARM-format instructions. */
19389
19390 /* Macros for gluing together operand strings. N.B. In all cases
19391 other than OPS0, the trailing OP_stop comes from default
19392 zero-initialization of the unspecified elements of the array. */
19393 #define OPS0() { OP_stop, }
19394 #define OPS1(a) { OP_##a, }
19395 #define OPS2(a,b) { OP_##a,OP_##b, }
19396 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
19397 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
19398 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
19399 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
19400
19401 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
19402 This is useful when mixing operands for ARM and THUMB, i.e. using the
19403 MIX_ARM_THUMB_OPERANDS macro.
19404 In order to use these macros, prefix the number of operands with _
19405 e.g. _3. */
19406 #define OPS_1(a) { a, }
19407 #define OPS_2(a,b) { a,b, }
19408 #define OPS_3(a,b,c) { a,b,c, }
19409 #define OPS_4(a,b,c,d) { a,b,c,d, }
19410 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
19411 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
19412
19413 /* These macros abstract out the exact format of the mnemonic table and
19414 save some repeated characters. */
19415
19416 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
19417 #define TxCE(mnem, op, top, nops, ops, ae, te) \
19418 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
19419 THUMB_VARIANT, do_##ae, do_##te }
19420
19421 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
19422 a T_MNEM_xyz enumerator. */
19423 #define TCE(mnem, aop, top, nops, ops, ae, te) \
19424 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
19425 #define tCE(mnem, aop, top, nops, ops, ae, te) \
19426 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19427
19428 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
19429 infix after the third character. */
19430 #define TxC3(mnem, op, top, nops, ops, ae, te) \
19431 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
19432 THUMB_VARIANT, do_##ae, do_##te }
19433 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
19434 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
19435 THUMB_VARIANT, do_##ae, do_##te }
19436 #define TC3(mnem, aop, top, nops, ops, ae, te) \
19437 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
19438 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
19439 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
19440 #define tC3(mnem, aop, top, nops, ops, ae, te) \
19441 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19442 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
19443 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19444
19445 /* Mnemonic that cannot be conditionalized. The ARM condition-code
19446 field is still 0xE. Many of the Thumb variants can be executed
19447 conditionally, so this is checked separately. */
19448 #define TUE(mnem, op, top, nops, ops, ae, te) \
19449 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19450 THUMB_VARIANT, do_##ae, do_##te }
19451
19452 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
19453 Used by mnemonics that have very minimal differences in the encoding for
19454 ARM and Thumb variants and can be handled in a common function. */
19455 #define TUEc(mnem, op, top, nops, ops, en) \
19456 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19457 THUMB_VARIANT, do_##en, do_##en }
19458
19459 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
19460 condition code field. */
19461 #define TUF(mnem, op, top, nops, ops, ae, te) \
19462 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
19463 THUMB_VARIANT, do_##ae, do_##te }
19464
19465 /* ARM-only variants of all the above. */
19466 #define CE(mnem, op, nops, ops, ae) \
19467 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19468
19469 #define C3(mnem, op, nops, ops, ae) \
19470 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19471
19472 /* Thumb-only variants of TCE and TUE. */
19473 #define ToC(mnem, top, nops, ops, te) \
19474 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
19475 do_##te }
19476
19477 #define ToU(mnem, top, nops, ops, te) \
19478 { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
19479 NULL, do_##te }
19480
19481 /* Legacy mnemonics that always have conditional infix after the third
19482 character. */
19483 #define CL(mnem, op, nops, ops, ae) \
19484 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19485 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19486
19487 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
19488 #define cCE(mnem, op, nops, ops, ae) \
19489 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19490
19491 /* Legacy coprocessor instructions where conditional infix and conditional
19492 suffix are ambiguous. For consistency this includes all FPA instructions,
19493 not just the potentially ambiguous ones. */
19494 #define cCL(mnem, op, nops, ops, ae) \
19495 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19496 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19497
19498 /* Coprocessor, takes either a suffix or a position-3 infix
19499 (for an FPA corner case). */
19500 #define C3E(mnem, op, nops, ops, ae) \
19501 { mnem, OPS##nops ops, OT_csuf_or_in3, \
19502 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19503
19504 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
19505 { m1 #m2 m3, OPS##nops ops, \
19506 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
19507 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19508
19509 #define CM(m1, m2, op, nops, ops, ae) \
19510 xCM_ (m1, , m2, op, nops, ops, ae), \
19511 xCM_ (m1, eq, m2, op, nops, ops, ae), \
19512 xCM_ (m1, ne, m2, op, nops, ops, ae), \
19513 xCM_ (m1, cs, m2, op, nops, ops, ae), \
19514 xCM_ (m1, hs, m2, op, nops, ops, ae), \
19515 xCM_ (m1, cc, m2, op, nops, ops, ae), \
19516 xCM_ (m1, ul, m2, op, nops, ops, ae), \
19517 xCM_ (m1, lo, m2, op, nops, ops, ae), \
19518 xCM_ (m1, mi, m2, op, nops, ops, ae), \
19519 xCM_ (m1, pl, m2, op, nops, ops, ae), \
19520 xCM_ (m1, vs, m2, op, nops, ops, ae), \
19521 xCM_ (m1, vc, m2, op, nops, ops, ae), \
19522 xCM_ (m1, hi, m2, op, nops, ops, ae), \
19523 xCM_ (m1, ls, m2, op, nops, ops, ae), \
19524 xCM_ (m1, ge, m2, op, nops, ops, ae), \
19525 xCM_ (m1, lt, m2, op, nops, ops, ae), \
19526 xCM_ (m1, gt, m2, op, nops, ops, ae), \
19527 xCM_ (m1, le, m2, op, nops, ops, ae), \
19528 xCM_ (m1, al, m2, op, nops, ops, ae)
19529
19530 #define UE(mnem, op, nops, ops, ae) \
19531 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19532
19533 #define UF(mnem, op, nops, ops, ae) \
19534 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19535
19536 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
19537 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
19538 use the same encoding function for each. */
19539 #define NUF(mnem, op, nops, ops, enc) \
19540 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
19541 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19542
19543 /* Neon data processing, version which indirects through neon_enc_tab for
19544 the various overloaded versions of opcodes. */
19545 #define nUF(mnem, op, nops, ops, enc) \
19546 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
19547 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19548
19549 /* Neon insn with conditional suffix for the ARM version, non-overloaded
19550 version. */
19551 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
19552 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
19553 THUMB_VARIANT, do_##enc, do_##enc }
19554
19555 #define NCE(mnem, op, nops, ops, enc) \
19556 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19557
19558 #define NCEF(mnem, op, nops, ops, enc) \
19559 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19560
19561 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
19562 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
19563 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
19564 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19565
19566 #define nCE(mnem, op, nops, ops, enc) \
19567 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19568
19569 #define nCEF(mnem, op, nops, ops, enc) \
19570 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19571
19572 #define do_0 0
19573
19574 static const struct asm_opcode insns[] =
19575 {
19576 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
19577 #define THUMB_VARIANT & arm_ext_v4t
19578 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
19579 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
19580 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
19581 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
19582 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
19583 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
19584 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
19585 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
19586 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
19587 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
19588 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
19589 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
19590 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
19591 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
19592 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
19593 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
19594
19595 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
19596 for setting PSR flag bits. They are obsolete in V6 and do not
19597 have Thumb equivalents. */
19598 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
19599 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
19600 CL("tstp", 110f000, 2, (RR, SH), cmp),
19601 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
19602 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
19603 CL("cmpp", 150f000, 2, (RR, SH), cmp),
19604 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
19605 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
19606 CL("cmnp", 170f000, 2, (RR, SH), cmp),
19607
19608 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
19609 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
19610 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
19611 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
19612
19613 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
19614 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19615 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
19616 OP_RRnpc),
19617 OP_ADDRGLDR),ldst, t_ldst),
19618 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19619
19620 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19621 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19622 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19623 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19624 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19625 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19626
19627 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
19628 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
19629
19630 /* Pseudo ops. */
19631 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
19632 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
19633 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
19634 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
19635
19636 /* Thumb-compatibility pseudo ops. */
19637 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
19638 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
19639 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
19640 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
19641 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
19642 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
19643 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
19644 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
19645 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
19646 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
19647 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
19648 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
19649
19650 /* These may simplify to neg. */
19651 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
19652 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
19653
19654 #undef THUMB_VARIANT
19655 #define THUMB_VARIANT & arm_ext_os
19656
19657 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
19658 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
19659
19660 #undef THUMB_VARIANT
19661 #define THUMB_VARIANT & arm_ext_v6
19662
19663 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
19664
19665 /* V1 instructions with no Thumb analogue prior to V6T2. */
19666 #undef THUMB_VARIANT
19667 #define THUMB_VARIANT & arm_ext_v6t2
19668
19669 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19670 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19671 CL("teqp", 130f000, 2, (RR, SH), cmp),
19672
19673 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19674 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19675 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
19676 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19677
19678 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19679 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19680
19681 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19682 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19683
19684 /* V1 instructions with no Thumb analogue at all. */
19685 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
19686 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
19687
19688 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
19689 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
19690 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
19691 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
19692 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
19693 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
19694 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
19695 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
19696
19697 #undef ARM_VARIANT
19698 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
19699 #undef THUMB_VARIANT
19700 #define THUMB_VARIANT & arm_ext_v4t
19701
19702 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
19703 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
19704
19705 #undef THUMB_VARIANT
19706 #define THUMB_VARIANT & arm_ext_v6t2
19707
19708 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19709 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
19710
19711 /* Generic coprocessor instructions. */
19712 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
19713 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19714 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19715 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19716 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19717 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19718 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
19719
19720 #undef ARM_VARIANT
19721 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
19722
19723 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19724 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19725
19726 #undef ARM_VARIANT
19727 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
19728 #undef THUMB_VARIANT
19729 #define THUMB_VARIANT & arm_ext_msr
19730
19731 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
19732 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
19733
19734 #undef ARM_VARIANT
19735 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
19736 #undef THUMB_VARIANT
19737 #define THUMB_VARIANT & arm_ext_v6t2
19738
19739 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19740 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19741 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19742 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19743 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19744 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19745 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19746 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19747
19748 #undef ARM_VARIANT
19749 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
19750 #undef THUMB_VARIANT
19751 #define THUMB_VARIANT & arm_ext_v4t
19752
19753 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19754 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19755 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19756 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19757 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19758 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19759
19760 #undef ARM_VARIANT
19761 #define ARM_VARIANT & arm_ext_v4t_5
19762
19763 /* ARM Architecture 4T. */
19764 /* Note: bx (and blx) are required on V5, even if the processor does
19765 not support Thumb. */
19766 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
19767
19768 #undef ARM_VARIANT
19769 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
19770 #undef THUMB_VARIANT
19771 #define THUMB_VARIANT & arm_ext_v5t
19772
19773 /* Note: blx has 2 variants; the .value coded here is for
19774 BLX(2). Only this variant has conditional execution. */
19775 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
19776 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
19777
19778 #undef THUMB_VARIANT
19779 #define THUMB_VARIANT & arm_ext_v6t2
19780
19781 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
19782 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19783 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19784 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19785 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19786 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
19787 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19788 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19789
19790 #undef ARM_VARIANT
19791 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
19792 #undef THUMB_VARIANT
19793 #define THUMB_VARIANT & arm_ext_v5exp
19794
19795 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19796 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19797 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19798 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19799
19800 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19801 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19802
19803 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19804 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19805 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19806 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19807
19808 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19809 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19810 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19811 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19812
19813 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19814 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19815
19816 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19817 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19818 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19819 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19820
19821 #undef ARM_VARIANT
19822 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
19823 #undef THUMB_VARIANT
19824 #define THUMB_VARIANT & arm_ext_v6t2
19825
19826 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
19827 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
19828 ldrd, t_ldstd),
19829 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
19830 ADDRGLDRS), ldrd, t_ldstd),
19831
19832 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19833 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19834
19835 #undef ARM_VARIANT
19836 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
19837
19838 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
19839
19840 #undef ARM_VARIANT
19841 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
19842 #undef THUMB_VARIANT
19843 #define THUMB_VARIANT & arm_ext_v6
19844
19845 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
19846 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
19847 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19848 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19849 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19850 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19851 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19852 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19853 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19854 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
19855
19856 #undef THUMB_VARIANT
19857 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19858
19859 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
19860 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19861 strex, t_strex),
19862 #undef THUMB_VARIANT
19863 #define THUMB_VARIANT & arm_ext_v6t2
19864
19865 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19866 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19867
19868 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
19869 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
19870
19871 /* ARM V6 not included in V7M. */
19872 #undef THUMB_VARIANT
19873 #define THUMB_VARIANT & arm_ext_v6_notm
19874 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19875 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19876 UF(rfeib, 9900a00, 1, (RRw), rfe),
19877 UF(rfeda, 8100a00, 1, (RRw), rfe),
19878 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19879 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19880 UF(rfefa, 8100a00, 1, (RRw), rfe),
19881 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19882 UF(rfeed, 9900a00, 1, (RRw), rfe),
19883 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19884 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19885 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19886 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
19887 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
19888 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
19889 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
19890 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19891 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19892 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
19893
19894 /* ARM V6 not included in V7M (eg. integer SIMD). */
19895 #undef THUMB_VARIANT
19896 #define THUMB_VARIANT & arm_ext_v6_dsp
19897 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
19898 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
19899 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19900 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19901 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19902 /* Old name for QASX. */
19903 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19904 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19905 /* Old name for QSAX. */
19906 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19907 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19908 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19909 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19910 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19911 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19912 /* Old name for SASX. */
19913 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19914 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19915 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19916 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19917 /* Old name for SHASX. */
19918 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19919 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19920 /* Old name for SHSAX. */
19921 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19922 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19923 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19924 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19925 /* Old name for SSAX. */
19926 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19927 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19928 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19929 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19930 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19931 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19932 /* Old name for UASX. */
19933 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19934 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19935 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19936 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19937 /* Old name for UHASX. */
19938 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19939 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19940 /* Old name for UHSAX. */
19941 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19942 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19943 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19944 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19945 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19946 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19947 /* Old name for UQASX. */
19948 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19949 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19950 /* Old name for UQSAX. */
19951 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19952 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19953 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19954 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19955 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19956 /* Old name for USAX. */
19957 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19958 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19959 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19960 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19961 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19962 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19963 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19964 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19965 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19966 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19967 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19968 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19969 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19970 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19971 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19972 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19973 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19974 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19975 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19976 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19977 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19978 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19979 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19980 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19981 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19982 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19983 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19984 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19985 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19986 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
19987 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
19988 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19989 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19990 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
19991
19992 #undef ARM_VARIANT
19993 #define ARM_VARIANT & arm_ext_v6k
19994 #undef THUMB_VARIANT
19995 #define THUMB_VARIANT & arm_ext_v6k
19996
19997 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
19998 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
19999 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
20000 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
20001
20002 #undef THUMB_VARIANT
20003 #define THUMB_VARIANT & arm_ext_v6_notm
20004 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
20005 ldrexd, t_ldrexd),
20006 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
20007 RRnpcb), strexd, t_strexd),
20008
20009 #undef THUMB_VARIANT
20010 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20011 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
20012 rd_rn, rd_rn),
20013 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
20014 rd_rn, rd_rn),
20015 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
20016 strex, t_strexbh),
20017 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
20018 strex, t_strexbh),
20019 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
20020
20021 #undef ARM_VARIANT
20022 #define ARM_VARIANT & arm_ext_sec
20023 #undef THUMB_VARIANT
20024 #define THUMB_VARIANT & arm_ext_sec
20025
20026 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
20027
20028 #undef ARM_VARIANT
20029 #define ARM_VARIANT & arm_ext_virt
20030 #undef THUMB_VARIANT
20031 #define THUMB_VARIANT & arm_ext_virt
20032
20033 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
20034 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
20035
20036 #undef ARM_VARIANT
20037 #define ARM_VARIANT & arm_ext_pan
20038 #undef THUMB_VARIANT
20039 #define THUMB_VARIANT & arm_ext_pan
20040
20041 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
20042
20043 #undef ARM_VARIANT
20044 #define ARM_VARIANT & arm_ext_v6t2
20045 #undef THUMB_VARIANT
20046 #define THUMB_VARIANT & arm_ext_v6t2
20047
20048 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
20049 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
20050 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
20051 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
20052
20053 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
20054 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
20055
20056 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20057 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20058 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20059 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20060
20061 #undef ARM_VARIANT
20062 #define ARM_VARIANT & arm_ext_v3
20063 #undef THUMB_VARIANT
20064 #define THUMB_VARIANT & arm_ext_v6t2
20065
20066 TUE("csdb", 320f014, f3af8014, 0, (), noargs, t_csdb),
20067 TUF("ssbb", 57ff040, f3bf8f40, 0, (), noargs, t_csdb),
20068 TUF("pssbb", 57ff044, f3bf8f44, 0, (), noargs, t_csdb),
20069
20070 #undef ARM_VARIANT
20071 #define ARM_VARIANT & arm_ext_v6t2
20072 #undef THUMB_VARIANT
20073 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20074 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
20075 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
20076
20077 /* Thumb-only instructions. */
20078 #undef ARM_VARIANT
20079 #define ARM_VARIANT NULL
20080 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
20081 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
20082
20083 /* ARM does not really have an IT instruction, so always allow it.
20084 The opcode is copied from Thumb in order to allow warnings in
20085 -mimplicit-it=[never | arm] modes. */
20086 #undef ARM_VARIANT
20087 #define ARM_VARIANT & arm_ext_v1
20088 #undef THUMB_VARIANT
20089 #define THUMB_VARIANT & arm_ext_v6t2
20090
20091 TUE("it", bf08, bf08, 1, (COND), it, t_it),
20092 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
20093 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
20094 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
20095 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
20096 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
20097 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
20098 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
20099 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
20100 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
20101 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
20102 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
20103 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
20104 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
20105 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
20106 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
20107 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
20108 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
20109
20110 /* Thumb2 only instructions. */
20111 #undef ARM_VARIANT
20112 #define ARM_VARIANT NULL
20113
20114 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
20115 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
20116 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
20117 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
20118 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
20119 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
20120
20121 /* Hardware division instructions. */
20122 #undef ARM_VARIANT
20123 #define ARM_VARIANT & arm_ext_adiv
20124 #undef THUMB_VARIANT
20125 #define THUMB_VARIANT & arm_ext_div
20126
20127 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
20128 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
20129
20130 /* ARM V6M/V7 instructions. */
20131 #undef ARM_VARIANT
20132 #define ARM_VARIANT & arm_ext_barrier
20133 #undef THUMB_VARIANT
20134 #define THUMB_VARIANT & arm_ext_barrier
20135
20136 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
20137 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
20138 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
20139
20140 /* ARM V7 instructions. */
20141 #undef ARM_VARIANT
20142 #define ARM_VARIANT & arm_ext_v7
20143 #undef THUMB_VARIANT
20144 #define THUMB_VARIANT & arm_ext_v7
20145
20146 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
20147 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
20148
20149 #undef ARM_VARIANT
20150 #define ARM_VARIANT & arm_ext_mp
20151 #undef THUMB_VARIANT
20152 #define THUMB_VARIANT & arm_ext_mp
20153
20154 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
20155
20156 /* AArchv8 instructions. */
20157 #undef ARM_VARIANT
20158 #define ARM_VARIANT & arm_ext_v8
20159
20160 /* Instructions shared between armv8-a and armv8-m. */
20161 #undef THUMB_VARIANT
20162 #define THUMB_VARIANT & arm_ext_atomics
20163
20164 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20165 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20166 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20167 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
20168 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
20169 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
20170 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20171 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
20172 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20173 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
20174 stlex, t_stlex),
20175 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
20176 stlex, t_stlex),
20177 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
20178 stlex, t_stlex),
20179 #undef THUMB_VARIANT
20180 #define THUMB_VARIANT & arm_ext_v8
20181
20182 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
20183 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
20184 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
20185 ldrexd, t_ldrexd),
20186 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
20187 strexd, t_strexd),
20188 /* ARMv8 T32 only. */
20189 #undef ARM_VARIANT
20190 #define ARM_VARIANT NULL
20191 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
20192 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
20193 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
20194
20195 /* FP for ARMv8. */
20196 #undef ARM_VARIANT
20197 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
20198 #undef THUMB_VARIANT
20199 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
20200
20201 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
20202 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
20203 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
20204 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
20205 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
20206 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
20207 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
20208 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
20209 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
20210 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
20211 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
20212 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
20213 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
20214 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
20215 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
20216 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
20217 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
20218
20219 /* Crypto v1 extensions. */
20220 #undef ARM_VARIANT
20221 #define ARM_VARIANT & fpu_crypto_ext_armv8
20222 #undef THUMB_VARIANT
20223 #define THUMB_VARIANT & fpu_crypto_ext_armv8
20224
20225 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
20226 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
20227 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
20228 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
20229 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
20230 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
20231 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
20232 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
20233 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
20234 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
20235 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
20236 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
20237 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
20238 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
20239
20240 #undef ARM_VARIANT
20241 #define ARM_VARIANT & crc_ext_armv8
20242 #undef THUMB_VARIANT
20243 #define THUMB_VARIANT & crc_ext_armv8
20244 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
20245 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
20246 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
20247 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
20248 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
20249 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
20250
20251 /* ARMv8.2 RAS extension. */
20252 #undef ARM_VARIANT
20253 #define ARM_VARIANT & arm_ext_ras
20254 #undef THUMB_VARIANT
20255 #define THUMB_VARIANT & arm_ext_ras
20256 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
20257
20258 #undef ARM_VARIANT
20259 #define ARM_VARIANT & arm_ext_v8_3
20260 #undef THUMB_VARIANT
20261 #define THUMB_VARIANT & arm_ext_v8_3
20262 NCE (vjcvt, eb90bc0, 2, (RVS, RVD), vjcvt),
20263 NUF (vcmla, 0, 4, (RNDQ, RNDQ, RNDQ_RNSC, EXPi), vcmla),
20264 NUF (vcadd, 0, 4, (RNDQ, RNDQ, RNDQ, EXPi), vcadd),
20265
20266 #undef ARM_VARIANT
20267 #define ARM_VARIANT & fpu_neon_ext_dotprod
20268 #undef THUMB_VARIANT
20269 #define THUMB_VARIANT & fpu_neon_ext_dotprod
20270 NUF (vsdot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_s),
20271 NUF (vudot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_u),
20272
20273 #undef ARM_VARIANT
20274 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
20275 #undef THUMB_VARIANT
20276 #define THUMB_VARIANT NULL
20277
20278 cCE("wfs", e200110, 1, (RR), rd),
20279 cCE("rfs", e300110, 1, (RR), rd),
20280 cCE("wfc", e400110, 1, (RR), rd),
20281 cCE("rfc", e500110, 1, (RR), rd),
20282
20283 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
20284 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
20285 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
20286 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
20287
20288 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
20289 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
20290 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
20291 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
20292
20293 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
20294 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
20295 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
20296 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
20297 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
20298 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
20299 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
20300 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
20301 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
20302 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
20303 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
20304 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
20305
20306 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
20307 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
20308 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
20309 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
20310 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
20311 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
20312 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
20313 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
20314 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
20315 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
20316 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
20317 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
20318
20319 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
20320 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
20321 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
20322 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
20323 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
20324 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
20325 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
20326 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
20327 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
20328 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
20329 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
20330 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
20331
20332 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
20333 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
20334 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
20335 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
20336 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
20337 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
20338 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
20339 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
20340 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
20341 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
20342 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
20343 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
20344
20345 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
20346 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
20347 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
20348 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
20349 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
20350 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
20351 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
20352 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
20353 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
20354 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
20355 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
20356 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
20357
20358 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
20359 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
20360 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
20361 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
20362 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
20363 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
20364 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
20365 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
20366 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
20367 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
20368 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
20369 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
20370
20371 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
20372 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
20373 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
20374 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
20375 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
20376 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
20377 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
20378 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
20379 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
20380 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
20381 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
20382 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
20383
20384 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
20385 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
20386 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
20387 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
20388 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
20389 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
20390 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
20391 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
20392 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
20393 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
20394 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
20395 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
20396
20397 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
20398 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
20399 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
20400 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
20401 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
20402 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
20403 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
20404 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
20405 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
20406 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
20407 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
20408 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
20409
20410 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
20411 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
20412 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
20413 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
20414 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
20415 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
20416 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
20417 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
20418 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
20419 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
20420 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
20421 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
20422
20423 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
20424 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
20425 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
20426 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
20427 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
20428 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
20429 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
20430 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
20431 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
20432 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
20433 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
20434 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
20435
20436 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
20437 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
20438 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
20439 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
20440 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
20441 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
20442 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
20443 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
20444 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
20445 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
20446 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
20447 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
20448
20449 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
20450 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
20451 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
20452 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
20453 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
20454 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
20455 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
20456 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
20457 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
20458 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
20459 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
20460 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
20461
20462 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
20463 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
20464 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
20465 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
20466 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
20467 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
20468 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
20469 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
20470 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
20471 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
20472 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
20473 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
20474
20475 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
20476 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
20477 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
20478 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
20479 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
20480 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
20481 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
20482 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
20483 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
20484 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
20485 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
20486 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
20487
20488 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
20489 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
20490 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
20491 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
20492 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
20493 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
20494 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
20495 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
20496 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
20497 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
20498 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
20499 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
20500
20501 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
20502 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
20503 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
20504 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
20505 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
20506 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20507 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20508 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20509 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
20510 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
20511 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
20512 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
20513
20514 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
20515 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
20516 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
20517 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
20518 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
20519 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20520 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20521 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20522 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
20523 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
20524 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
20525 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
20526
20527 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
20528 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
20529 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
20530 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
20531 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
20532 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20533 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20534 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20535 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
20536 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
20537 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
20538 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
20539
20540 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
20541 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
20542 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
20543 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
20544 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
20545 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20546 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20547 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20548 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
20549 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
20550 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
20551 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
20552
20553 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
20554 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
20555 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
20556 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
20557 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
20558 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20559 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20560 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20561 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
20562 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
20563 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
20564 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
20565
20566 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
20567 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
20568 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
20569 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
20570 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
20571 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20572 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20573 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20574 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
20575 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
20576 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
20577 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
20578
20579 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
20580 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
20581 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
20582 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
20583 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
20584 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20585 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20586 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20587 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
20588 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
20589 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
20590 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
20591
20592 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
20593 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
20594 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
20595 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
20596 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
20597 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20598 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20599 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20600 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
20601 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
20602 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
20603 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
20604
20605 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
20606 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
20607 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
20608 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
20609 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
20610 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20611 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20612 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20613 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
20614 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
20615 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
20616 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
20617
20618 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
20619 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
20620 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
20621 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
20622 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
20623 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20624 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20625 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20626 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
20627 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
20628 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
20629 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
20630
20631 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20632 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20633 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20634 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20635 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20636 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20637 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20638 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20639 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20640 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20641 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20642 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20643
20644 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20645 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20646 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20647 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20648 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20649 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20650 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20651 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20652 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20653 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20654 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20655 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20656
20657 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20658 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20659 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20660 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20661 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20662 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20663 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20664 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20665 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20666 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20667 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20668 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20669
20670 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
20671 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
20672 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
20673 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
20674
20675 cCL("flts", e000110, 2, (RF, RR), rn_rd),
20676 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
20677 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
20678 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
20679 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
20680 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
20681 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
20682 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
20683 cCL("flte", e080110, 2, (RF, RR), rn_rd),
20684 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
20685 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
20686 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
20687
20688 /* The implementation of the FIX instruction is broken on some
20689 assemblers, in that it accepts a precision specifier as well as a
20690 rounding specifier, despite the fact that this is meaningless.
20691 To be more compatible, we accept it as well, though of course it
20692 does not set any bits. */
20693 cCE("fix", e100110, 2, (RR, RF), rd_rm),
20694 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
20695 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
20696 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
20697 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
20698 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
20699 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
20700 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
20701 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
20702 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
20703 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
20704 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
20705 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
20706
20707 /* Instructions that were new with the real FPA, call them V2. */
20708 #undef ARM_VARIANT
20709 #define ARM_VARIANT & fpu_fpa_ext_v2
20710
20711 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20712 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20713 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20714 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20715 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20716 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20717
20718 #undef ARM_VARIANT
20719 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
20720
20721 /* Moves and type conversions. */
20722 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
20723 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
20724 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
20725 cCE("fmstat", ef1fa10, 0, (), noargs),
20726 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
20727 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
20728 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
20729 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
20730 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
20731 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
20732 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
20733 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
20734 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
20735 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
20736
20737 /* Memory operations. */
20738 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
20739 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
20740 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20741 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20742 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20743 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20744 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20745 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20746 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20747 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20748 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20749 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20750 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20751 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20752 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20753 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20754 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20755 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20756
20757 /* Monadic operations. */
20758 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
20759 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
20760 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
20761
20762 /* Dyadic operations. */
20763 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20764 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20765 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20766 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20767 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20768 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20769 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20770 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20771 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20772
20773 /* Comparisons. */
20774 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
20775 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
20776 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
20777 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
20778
20779 /* Double precision load/store are still present on single precision
20780 implementations. */
20781 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
20782 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
20783 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20784 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20785 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20786 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20787 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20788 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20789 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20790 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20791
20792 #undef ARM_VARIANT
20793 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
20794
20795 /* Moves and type conversions. */
20796 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20797 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
20798 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20799 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
20800 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
20801 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
20802 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
20803 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
20804 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
20805 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
20806 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20807 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
20808 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20809
20810 /* Monadic operations. */
20811 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20812 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20813 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20814
20815 /* Dyadic operations. */
20816 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20817 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20818 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20819 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20820 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20821 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20822 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20823 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20824 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20825
20826 /* Comparisons. */
20827 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20828 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
20829 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20830 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
20831
20832 #undef ARM_VARIANT
20833 #define ARM_VARIANT & fpu_vfp_ext_v2
20834
20835 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
20836 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
20837 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
20838 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
20839
20840 /* Instructions which may belong to either the Neon or VFP instruction sets.
20841 Individual encoder functions perform additional architecture checks. */
20842 #undef ARM_VARIANT
20843 #define ARM_VARIANT & fpu_vfp_ext_v1xd
20844 #undef THUMB_VARIANT
20845 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
20846
20847 /* These mnemonics are unique to VFP. */
20848 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
20849 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
20850 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20851 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20852 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20853 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20854 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20855 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
20856 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
20857 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
20858
20859 /* Mnemonics shared by Neon and VFP. */
20860 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
20861 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20862 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20863
20864 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20865 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20866
20867 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20868 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20869
20870 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20871 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20872 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20873 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20874 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20875 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20876 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20877 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20878
20879 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
20880 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
20881 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
20882 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
20883
20884
20885 /* NOTE: All VMOV encoding is special-cased! */
20886 NCE(vmov, 0, 1, (VMOV), neon_mov),
20887 NCE(vmovq, 0, 1, (VMOV), neon_mov),
20888
20889 #undef ARM_VARIANT
20890 #define ARM_VARIANT & arm_ext_fp16
20891 #undef THUMB_VARIANT
20892 #define THUMB_VARIANT & arm_ext_fp16
20893 /* New instructions added from v8.2, allowing the extraction and insertion of
20894 the upper 16 bits of a 32-bit vector register. */
20895 NCE (vmovx, eb00a40, 2, (RVS, RVS), neon_movhf),
20896 NCE (vins, eb00ac0, 2, (RVS, RVS), neon_movhf),
20897
20898 /* New backported fma/fms instructions optional in v8.2. */
20899 NCE (vfmal, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmal),
20900 NCE (vfmsl, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmsl),
20901
20902 #undef THUMB_VARIANT
20903 #define THUMB_VARIANT & fpu_neon_ext_v1
20904 #undef ARM_VARIANT
20905 #define ARM_VARIANT & fpu_neon_ext_v1
20906
20907 /* Data processing with three registers of the same length. */
20908 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
20909 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
20910 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
20911 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20912 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20913 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20914 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20915 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20916 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20917 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
20918 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20919 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20920 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20921 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20922 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20923 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20924 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20925 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20926 /* If not immediate, fall back to neon_dyadic_i64_su.
20927 shl_imm should accept I8 I16 I32 I64,
20928 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
20929 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
20930 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
20931 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
20932 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
20933 /* Logic ops, types optional & ignored. */
20934 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20935 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20936 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20937 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20938 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20939 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20940 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20941 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20942 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
20943 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
20944 /* Bitfield ops, untyped. */
20945 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20946 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20947 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20948 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20949 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20950 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20951 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
20952 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20953 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20954 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20955 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20956 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20957 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20958 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20959 back to neon_dyadic_if_su. */
20960 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20961 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20962 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20963 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20964 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20965 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20966 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20967 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20968 /* Comparison. Type I8 I16 I32 F32. */
20969 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
20970 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
20971 /* As above, D registers only. */
20972 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20973 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20974 /* Int and float variants, signedness unimportant. */
20975 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20976 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20977 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
20978 /* Add/sub take types I8 I16 I32 I64 F32. */
20979 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20980 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20981 /* vtst takes sizes 8, 16, 32. */
20982 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
20983 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
20984 /* VMUL takes I8 I16 I32 F32 P8. */
20985 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
20986 /* VQD{R}MULH takes S16 S32. */
20987 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20988 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20989 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20990 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20991 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20992 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20993 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20994 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20995 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20996 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20997 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20998 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20999 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
21000 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
21001 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
21002 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
21003 /* ARM v8.1 extension. */
21004 nUF (vqrdmlah, _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
21005 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
21006 nUF (vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
21007 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
21008
21009 /* Two address, int/float. Types S8 S16 S32 F32. */
21010 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
21011 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
21012
21013 /* Data processing with two registers and a shift amount. */
21014 /* Right shifts, and variants with rounding.
21015 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
21016 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
21017 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
21018 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
21019 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
21020 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
21021 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
21022 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
21023 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
21024 /* Shift and insert. Sizes accepted 8 16 32 64. */
21025 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
21026 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
21027 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
21028 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
21029 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
21030 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
21031 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
21032 /* Right shift immediate, saturating & narrowing, with rounding variants.
21033 Types accepted S16 S32 S64 U16 U32 U64. */
21034 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
21035 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
21036 /* As above, unsigned. Types accepted S16 S32 S64. */
21037 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
21038 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
21039 /* Right shift narrowing. Types accepted I16 I32 I64. */
21040 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
21041 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
21042 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
21043 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
21044 /* CVT with optional immediate for fixed-point variant. */
21045 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
21046
21047 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
21048 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
21049
21050 /* Data processing, three registers of different lengths. */
21051 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
21052 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
21053 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
21054 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
21055 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
21056 /* If not scalar, fall back to neon_dyadic_long.
21057 Vector types as above, scalar types S16 S32 U16 U32. */
21058 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
21059 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
21060 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
21061 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
21062 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
21063 /* Dyadic, narrowing insns. Types I16 I32 I64. */
21064 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21065 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21066 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21067 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21068 /* Saturating doubling multiplies. Types S16 S32. */
21069 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
21070 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
21071 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
21072 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
21073 S16 S32 U16 U32. */
21074 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
21075
21076 /* Extract. Size 8. */
21077 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
21078 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
21079
21080 /* Two registers, miscellaneous. */
21081 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
21082 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
21083 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
21084 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
21085 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
21086 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
21087 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
21088 /* Vector replicate. Sizes 8 16 32. */
21089 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
21090 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
21091 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
21092 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
21093 /* VMOVN. Types I16 I32 I64. */
21094 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
21095 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
21096 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
21097 /* VQMOVUN. Types S16 S32 S64. */
21098 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
21099 /* VZIP / VUZP. Sizes 8 16 32. */
21100 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
21101 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
21102 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
21103 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
21104 /* VQABS / VQNEG. Types S8 S16 S32. */
21105 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
21106 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
21107 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
21108 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
21109 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
21110 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
21111 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
21112 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
21113 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
21114 /* Reciprocal estimates. Types U32 F16 F32. */
21115 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
21116 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
21117 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
21118 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
21119 /* VCLS. Types S8 S16 S32. */
21120 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
21121 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
21122 /* VCLZ. Types I8 I16 I32. */
21123 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
21124 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
21125 /* VCNT. Size 8. */
21126 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
21127 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
21128 /* Two address, untyped. */
21129 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
21130 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
21131 /* VTRN. Sizes 8 16 32. */
21132 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
21133 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
21134
21135 /* Table lookup. Size 8. */
21136 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
21137 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
21138
21139 #undef THUMB_VARIANT
21140 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
21141 #undef ARM_VARIANT
21142 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
21143
21144 /* Neon element/structure load/store. */
21145 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
21146 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
21147 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
21148 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
21149 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
21150 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
21151 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
21152 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
21153
21154 #undef THUMB_VARIANT
21155 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
21156 #undef ARM_VARIANT
21157 #define ARM_VARIANT & fpu_vfp_ext_v3xd
21158 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
21159 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21160 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21161 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21162 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21163 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21164 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21165 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21166 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21167
21168 #undef THUMB_VARIANT
21169 #define THUMB_VARIANT & fpu_vfp_ext_v3
21170 #undef ARM_VARIANT
21171 #define ARM_VARIANT & fpu_vfp_ext_v3
21172
21173 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
21174 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21175 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21176 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21177 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21178 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21179 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21180 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21181 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21182
21183 #undef ARM_VARIANT
21184 #define ARM_VARIANT & fpu_vfp_ext_fma
21185 #undef THUMB_VARIANT
21186 #define THUMB_VARIANT & fpu_vfp_ext_fma
21187 /* Mnemonics shared by Neon and VFP. These are included in the
21188 VFP FMA variant; NEON and VFP FMA always includes the NEON
21189 FMA instructions. */
21190 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
21191 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
21192 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
21193 the v form should always be used. */
21194 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21195 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21196 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21197 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21198 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
21199 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
21200
21201 #undef THUMB_VARIANT
21202 #undef ARM_VARIANT
21203 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
21204
21205 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21206 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21207 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21208 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21209 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21210 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21211 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
21212 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
21213
21214 #undef ARM_VARIANT
21215 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
21216
21217 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
21218 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
21219 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
21220 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
21221 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
21222 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
21223 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
21224 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
21225 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
21226 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
21227 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
21228 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
21229 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
21230 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
21231 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
21232 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
21233 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
21234 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
21235 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
21236 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
21237 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21238 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21239 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21240 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21241 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21242 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21243 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
21244 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
21245 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
21246 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
21247 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
21248 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
21249 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
21250 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
21251 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
21252 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
21253 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
21254 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21255 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21256 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21257 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21258 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21259 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21260 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21261 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21262 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21263 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
21264 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21265 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21266 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21267 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21268 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21269 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21270 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21271 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21272 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21273 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21274 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21275 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21276 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21277 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21278 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21279 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21280 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21281 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21282 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21283 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21284 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21285 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
21286 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
21287 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21288 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21289 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21290 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21291 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21292 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21293 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21294 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21295 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21296 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21297 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21298 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21299 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21300 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21301 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21302 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21303 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21304 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21305 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
21306 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21307 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21308 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21309 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21310 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21311 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21312 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21313 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21314 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21315 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21316 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21317 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21318 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21319 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21320 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21321 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21322 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21323 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21324 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21325 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21326 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21327 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
21328 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21329 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21330 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21331 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21332 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21333 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21334 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21335 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21336 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21337 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21338 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21339 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21340 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21341 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21342 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21343 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21344 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21345 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21346 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21347 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21348 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
21349 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
21350 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21351 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21352 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21353 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21354 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21355 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21356 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21357 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21358 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21359 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
21360 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
21361 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
21362 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
21363 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
21364 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
21365 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21366 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21367 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21368 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
21369 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
21370 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
21371 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
21372 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
21373 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
21374 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21375 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21376 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21377 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21378 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
21379
21380 #undef ARM_VARIANT
21381 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
21382
21383 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
21384 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
21385 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
21386 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
21387 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
21388 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
21389 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21390 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21391 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21392 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21393 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21394 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21395 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21396 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21397 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21398 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21399 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21400 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21401 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21402 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21403 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
21404 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21405 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21406 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21407 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21408 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21409 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21410 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21411 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21412 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21413 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21414 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21415 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21416 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21417 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21418 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21419 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21420 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21421 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21422 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21423 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21424 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21425 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21426 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21427 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21428 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21429 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21430 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21431 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21432 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21433 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21434 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21435 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21436 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21437 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21438 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21439 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21440
21441 #undef ARM_VARIANT
21442 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
21443
21444 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
21445 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
21446 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
21447 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
21448 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
21449 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
21450 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
21451 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
21452 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
21453 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
21454 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
21455 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
21456 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
21457 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
21458 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
21459 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
21460 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
21461 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
21462 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
21463 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
21464 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
21465 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
21466 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
21467 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
21468 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
21469 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
21470 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
21471 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
21472 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
21473 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
21474 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
21475 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
21476 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
21477 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
21478 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
21479 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
21480 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
21481 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
21482 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
21483 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
21484 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
21485 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
21486 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
21487 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
21488 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
21489 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
21490 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
21491 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
21492 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
21493 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
21494 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
21495 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
21496 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
21497 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
21498 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
21499 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
21500 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
21501 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
21502 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
21503 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
21504 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
21505 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
21506 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
21507 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
21508 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21509 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21510 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21511 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21512 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21513 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21514 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21515 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21516 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
21517 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
21518 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
21519 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
21520
21521 /* ARMv8.5-A instructions. */
21522 #undef ARM_VARIANT
21523 #define ARM_VARIANT & arm_ext_sb
21524 #undef THUMB_VARIANT
21525 #define THUMB_VARIANT & arm_ext_sb
21526 TUF("sb", 57ff070, f3bf8f70, 0, (), noargs, noargs),
21527
21528 /* ARMv8-M instructions. */
21529 #undef ARM_VARIANT
21530 #define ARM_VARIANT NULL
21531 #undef THUMB_VARIANT
21532 #define THUMB_VARIANT & arm_ext_v8m
21533 ToU("sg", e97fe97f, 0, (), noargs),
21534 ToC("blxns", 4784, 1, (RRnpc), t_blx),
21535 ToC("bxns", 4704, 1, (RRnpc), t_bx),
21536 ToC("tt", e840f000, 2, (RRnpc, RRnpc), tt),
21537 ToC("ttt", e840f040, 2, (RRnpc, RRnpc), tt),
21538 ToC("tta", e840f080, 2, (RRnpc, RRnpc), tt),
21539 ToC("ttat", e840f0c0, 2, (RRnpc, RRnpc), tt),
21540
21541 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
21542 instructions behave as nop if no VFP is present. */
21543 #undef THUMB_VARIANT
21544 #define THUMB_VARIANT & arm_ext_v8m_main
21545 ToC("vlldm", ec300a00, 1, (RRnpc), rn),
21546 ToC("vlstm", ec200a00, 1, (RRnpc), rn),
21547 };
21548 #undef ARM_VARIANT
21549 #undef THUMB_VARIANT
21550 #undef TCE
21551 #undef TUE
21552 #undef TUF
21553 #undef TCC
21554 #undef cCE
21555 #undef cCL
21556 #undef C3E
21557 #undef CE
21558 #undef CM
21559 #undef UE
21560 #undef UF
21561 #undef UT
21562 #undef NUF
21563 #undef nUF
21564 #undef NCE
21565 #undef nCE
21566 #undef OPS0
21567 #undef OPS1
21568 #undef OPS2
21569 #undef OPS3
21570 #undef OPS4
21571 #undef OPS5
21572 #undef OPS6
21573 #undef do_0
21574 \f
21575 /* MD interface: bits in the object file. */
21576
21577 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
21578 for use in the a.out file, and stores them in the array pointed to by buf.
21579 This knows about the endian-ness of the target machine and does
21580 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
21581 2 (short) and 4 (long) Floating numbers are put out as a series of
21582 LITTLENUMS (shorts, here at least). */
21583
21584 void
21585 md_number_to_chars (char * buf, valueT val, int n)
21586 {
21587 if (target_big_endian)
21588 number_to_chars_bigendian (buf, val, n);
21589 else
21590 number_to_chars_littleendian (buf, val, n);
21591 }
21592
21593 static valueT
21594 md_chars_to_number (char * buf, int n)
21595 {
21596 valueT result = 0;
21597 unsigned char * where = (unsigned char *) buf;
21598
21599 if (target_big_endian)
21600 {
21601 while (n--)
21602 {
21603 result <<= 8;
21604 result |= (*where++ & 255);
21605 }
21606 }
21607 else
21608 {
21609 while (n--)
21610 {
21611 result <<= 8;
21612 result |= (where[n] & 255);
21613 }
21614 }
21615
21616 return result;
21617 }
21618
21619 /* MD interface: Sections. */
21620
21621 /* Calculate the maximum variable size (i.e., excluding fr_fix)
21622 that an rs_machine_dependent frag may reach. */
21623
21624 unsigned int
21625 arm_frag_max_var (fragS *fragp)
21626 {
21627 /* We only use rs_machine_dependent for variable-size Thumb instructions,
21628 which are either THUMB_SIZE (2) or INSN_SIZE (4).
21629
21630 Note that we generate relaxable instructions even for cases that don't
21631 really need it, like an immediate that's a trivial constant. So we're
21632 overestimating the instruction size for some of those cases. Rather
21633 than putting more intelligence here, it would probably be better to
21634 avoid generating a relaxation frag in the first place when it can be
21635 determined up front that a short instruction will suffice. */
21636
21637 gas_assert (fragp->fr_type == rs_machine_dependent);
21638 return INSN_SIZE;
21639 }
21640
21641 /* Estimate the size of a frag before relaxing. Assume everything fits in
21642 2 bytes. */
21643
21644 int
21645 md_estimate_size_before_relax (fragS * fragp,
21646 segT segtype ATTRIBUTE_UNUSED)
21647 {
21648 fragp->fr_var = 2;
21649 return 2;
21650 }
21651
21652 /* Convert a machine dependent frag. */
21653
21654 void
21655 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
21656 {
21657 unsigned long insn;
21658 unsigned long old_op;
21659 char *buf;
21660 expressionS exp;
21661 fixS *fixp;
21662 int reloc_type;
21663 int pc_rel;
21664 int opcode;
21665
21666 buf = fragp->fr_literal + fragp->fr_fix;
21667
21668 old_op = bfd_get_16(abfd, buf);
21669 if (fragp->fr_symbol)
21670 {
21671 exp.X_op = O_symbol;
21672 exp.X_add_symbol = fragp->fr_symbol;
21673 }
21674 else
21675 {
21676 exp.X_op = O_constant;
21677 }
21678 exp.X_add_number = fragp->fr_offset;
21679 opcode = fragp->fr_subtype;
21680 switch (opcode)
21681 {
21682 case T_MNEM_ldr_pc:
21683 case T_MNEM_ldr_pc2:
21684 case T_MNEM_ldr_sp:
21685 case T_MNEM_str_sp:
21686 case T_MNEM_ldr:
21687 case T_MNEM_ldrb:
21688 case T_MNEM_ldrh:
21689 case T_MNEM_str:
21690 case T_MNEM_strb:
21691 case T_MNEM_strh:
21692 if (fragp->fr_var == 4)
21693 {
21694 insn = THUMB_OP32 (opcode);
21695 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
21696 {
21697 insn |= (old_op & 0x700) << 4;
21698 }
21699 else
21700 {
21701 insn |= (old_op & 7) << 12;
21702 insn |= (old_op & 0x38) << 13;
21703 }
21704 insn |= 0x00000c00;
21705 put_thumb32_insn (buf, insn);
21706 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
21707 }
21708 else
21709 {
21710 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
21711 }
21712 pc_rel = (opcode == T_MNEM_ldr_pc2);
21713 break;
21714 case T_MNEM_adr:
21715 if (fragp->fr_var == 4)
21716 {
21717 insn = THUMB_OP32 (opcode);
21718 insn |= (old_op & 0xf0) << 4;
21719 put_thumb32_insn (buf, insn);
21720 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
21721 }
21722 else
21723 {
21724 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21725 exp.X_add_number -= 4;
21726 }
21727 pc_rel = 1;
21728 break;
21729 case T_MNEM_mov:
21730 case T_MNEM_movs:
21731 case T_MNEM_cmp:
21732 case T_MNEM_cmn:
21733 if (fragp->fr_var == 4)
21734 {
21735 int r0off = (opcode == T_MNEM_mov
21736 || opcode == T_MNEM_movs) ? 0 : 8;
21737 insn = THUMB_OP32 (opcode);
21738 insn = (insn & 0xe1ffffff) | 0x10000000;
21739 insn |= (old_op & 0x700) << r0off;
21740 put_thumb32_insn (buf, insn);
21741 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21742 }
21743 else
21744 {
21745 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
21746 }
21747 pc_rel = 0;
21748 break;
21749 case T_MNEM_b:
21750 if (fragp->fr_var == 4)
21751 {
21752 insn = THUMB_OP32(opcode);
21753 put_thumb32_insn (buf, insn);
21754 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
21755 }
21756 else
21757 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
21758 pc_rel = 1;
21759 break;
21760 case T_MNEM_bcond:
21761 if (fragp->fr_var == 4)
21762 {
21763 insn = THUMB_OP32(opcode);
21764 insn |= (old_op & 0xf00) << 14;
21765 put_thumb32_insn (buf, insn);
21766 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
21767 }
21768 else
21769 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
21770 pc_rel = 1;
21771 break;
21772 case T_MNEM_add_sp:
21773 case T_MNEM_add_pc:
21774 case T_MNEM_inc_sp:
21775 case T_MNEM_dec_sp:
21776 if (fragp->fr_var == 4)
21777 {
21778 /* ??? Choose between add and addw. */
21779 insn = THUMB_OP32 (opcode);
21780 insn |= (old_op & 0xf0) << 4;
21781 put_thumb32_insn (buf, insn);
21782 if (opcode == T_MNEM_add_pc)
21783 reloc_type = BFD_RELOC_ARM_T32_IMM12;
21784 else
21785 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21786 }
21787 else
21788 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21789 pc_rel = 0;
21790 break;
21791
21792 case T_MNEM_addi:
21793 case T_MNEM_addis:
21794 case T_MNEM_subi:
21795 case T_MNEM_subis:
21796 if (fragp->fr_var == 4)
21797 {
21798 insn = THUMB_OP32 (opcode);
21799 insn |= (old_op & 0xf0) << 4;
21800 insn |= (old_op & 0xf) << 16;
21801 put_thumb32_insn (buf, insn);
21802 if (insn & (1 << 20))
21803 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21804 else
21805 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21806 }
21807 else
21808 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21809 pc_rel = 0;
21810 break;
21811 default:
21812 abort ();
21813 }
21814 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
21815 (enum bfd_reloc_code_real) reloc_type);
21816 fixp->fx_file = fragp->fr_file;
21817 fixp->fx_line = fragp->fr_line;
21818 fragp->fr_fix += fragp->fr_var;
21819
21820 /* Set whether we use thumb-2 ISA based on final relaxation results. */
21821 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
21822 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
21823 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
21824 }
21825
21826 /* Return the size of a relaxable immediate operand instruction.
21827 SHIFT and SIZE specify the form of the allowable immediate. */
21828 static int
21829 relax_immediate (fragS *fragp, int size, int shift)
21830 {
21831 offsetT offset;
21832 offsetT mask;
21833 offsetT low;
21834
21835 /* ??? Should be able to do better than this. */
21836 if (fragp->fr_symbol)
21837 return 4;
21838
21839 low = (1 << shift) - 1;
21840 mask = (1 << (shift + size)) - (1 << shift);
21841 offset = fragp->fr_offset;
21842 /* Force misaligned offsets to 32-bit variant. */
21843 if (offset & low)
21844 return 4;
21845 if (offset & ~mask)
21846 return 4;
21847 return 2;
21848 }
21849
21850 /* Get the address of a symbol during relaxation. */
21851 static addressT
21852 relaxed_symbol_addr (fragS *fragp, long stretch)
21853 {
21854 fragS *sym_frag;
21855 addressT addr;
21856 symbolS *sym;
21857
21858 sym = fragp->fr_symbol;
21859 sym_frag = symbol_get_frag (sym);
21860 know (S_GET_SEGMENT (sym) != absolute_section
21861 || sym_frag == &zero_address_frag);
21862 addr = S_GET_VALUE (sym) + fragp->fr_offset;
21863
21864 /* If frag has yet to be reached on this pass, assume it will
21865 move by STRETCH just as we did. If this is not so, it will
21866 be because some frag between grows, and that will force
21867 another pass. */
21868
21869 if (stretch != 0
21870 && sym_frag->relax_marker != fragp->relax_marker)
21871 {
21872 fragS *f;
21873
21874 /* Adjust stretch for any alignment frag. Note that if have
21875 been expanding the earlier code, the symbol may be
21876 defined in what appears to be an earlier frag. FIXME:
21877 This doesn't handle the fr_subtype field, which specifies
21878 a maximum number of bytes to skip when doing an
21879 alignment. */
21880 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
21881 {
21882 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
21883 {
21884 if (stretch < 0)
21885 stretch = - ((- stretch)
21886 & ~ ((1 << (int) f->fr_offset) - 1));
21887 else
21888 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
21889 if (stretch == 0)
21890 break;
21891 }
21892 }
21893 if (f != NULL)
21894 addr += stretch;
21895 }
21896
21897 return addr;
21898 }
21899
21900 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
21901 load. */
21902 static int
21903 relax_adr (fragS *fragp, asection *sec, long stretch)
21904 {
21905 addressT addr;
21906 offsetT val;
21907
21908 /* Assume worst case for symbols not known to be in the same section. */
21909 if (fragp->fr_symbol == NULL
21910 || !S_IS_DEFINED (fragp->fr_symbol)
21911 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21912 || S_IS_WEAK (fragp->fr_symbol))
21913 return 4;
21914
21915 val = relaxed_symbol_addr (fragp, stretch);
21916 addr = fragp->fr_address + fragp->fr_fix;
21917 addr = (addr + 4) & ~3;
21918 /* Force misaligned targets to 32-bit variant. */
21919 if (val & 3)
21920 return 4;
21921 val -= addr;
21922 if (val < 0 || val > 1020)
21923 return 4;
21924 return 2;
21925 }
21926
21927 /* Return the size of a relaxable add/sub immediate instruction. */
21928 static int
21929 relax_addsub (fragS *fragp, asection *sec)
21930 {
21931 char *buf;
21932 int op;
21933
21934 buf = fragp->fr_literal + fragp->fr_fix;
21935 op = bfd_get_16(sec->owner, buf);
21936 if ((op & 0xf) == ((op >> 4) & 0xf))
21937 return relax_immediate (fragp, 8, 0);
21938 else
21939 return relax_immediate (fragp, 3, 0);
21940 }
21941
21942 /* Return TRUE iff the definition of symbol S could be pre-empted
21943 (overridden) at link or load time. */
21944 static bfd_boolean
21945 symbol_preemptible (symbolS *s)
21946 {
21947 /* Weak symbols can always be pre-empted. */
21948 if (S_IS_WEAK (s))
21949 return TRUE;
21950
21951 /* Non-global symbols cannot be pre-empted. */
21952 if (! S_IS_EXTERNAL (s))
21953 return FALSE;
21954
21955 #ifdef OBJ_ELF
21956 /* In ELF, a global symbol can be marked protected, or private. In that
21957 case it can't be pre-empted (other definitions in the same link unit
21958 would violate the ODR). */
21959 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
21960 return FALSE;
21961 #endif
21962
21963 /* Other global symbols might be pre-empted. */
21964 return TRUE;
21965 }
21966
21967 /* Return the size of a relaxable branch instruction. BITS is the
21968 size of the offset field in the narrow instruction. */
21969
21970 static int
21971 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
21972 {
21973 addressT addr;
21974 offsetT val;
21975 offsetT limit;
21976
21977 /* Assume worst case for symbols not known to be in the same section. */
21978 if (!S_IS_DEFINED (fragp->fr_symbol)
21979 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21980 || S_IS_WEAK (fragp->fr_symbol))
21981 return 4;
21982
21983 #ifdef OBJ_ELF
21984 /* A branch to a function in ARM state will require interworking. */
21985 if (S_IS_DEFINED (fragp->fr_symbol)
21986 && ARM_IS_FUNC (fragp->fr_symbol))
21987 return 4;
21988 #endif
21989
21990 if (symbol_preemptible (fragp->fr_symbol))
21991 return 4;
21992
21993 val = relaxed_symbol_addr (fragp, stretch);
21994 addr = fragp->fr_address + fragp->fr_fix + 4;
21995 val -= addr;
21996
21997 /* Offset is a signed value *2 */
21998 limit = 1 << bits;
21999 if (val >= limit || val < -limit)
22000 return 4;
22001 return 2;
22002 }
22003
22004
22005 /* Relax a machine dependent frag. This returns the amount by which
22006 the current size of the frag should change. */
22007
22008 int
22009 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
22010 {
22011 int oldsize;
22012 int newsize;
22013
22014 oldsize = fragp->fr_var;
22015 switch (fragp->fr_subtype)
22016 {
22017 case T_MNEM_ldr_pc2:
22018 newsize = relax_adr (fragp, sec, stretch);
22019 break;
22020 case T_MNEM_ldr_pc:
22021 case T_MNEM_ldr_sp:
22022 case T_MNEM_str_sp:
22023 newsize = relax_immediate (fragp, 8, 2);
22024 break;
22025 case T_MNEM_ldr:
22026 case T_MNEM_str:
22027 newsize = relax_immediate (fragp, 5, 2);
22028 break;
22029 case T_MNEM_ldrh:
22030 case T_MNEM_strh:
22031 newsize = relax_immediate (fragp, 5, 1);
22032 break;
22033 case T_MNEM_ldrb:
22034 case T_MNEM_strb:
22035 newsize = relax_immediate (fragp, 5, 0);
22036 break;
22037 case T_MNEM_adr:
22038 newsize = relax_adr (fragp, sec, stretch);
22039 break;
22040 case T_MNEM_mov:
22041 case T_MNEM_movs:
22042 case T_MNEM_cmp:
22043 case T_MNEM_cmn:
22044 newsize = relax_immediate (fragp, 8, 0);
22045 break;
22046 case T_MNEM_b:
22047 newsize = relax_branch (fragp, sec, 11, stretch);
22048 break;
22049 case T_MNEM_bcond:
22050 newsize = relax_branch (fragp, sec, 8, stretch);
22051 break;
22052 case T_MNEM_add_sp:
22053 case T_MNEM_add_pc:
22054 newsize = relax_immediate (fragp, 8, 2);
22055 break;
22056 case T_MNEM_inc_sp:
22057 case T_MNEM_dec_sp:
22058 newsize = relax_immediate (fragp, 7, 2);
22059 break;
22060 case T_MNEM_addi:
22061 case T_MNEM_addis:
22062 case T_MNEM_subi:
22063 case T_MNEM_subis:
22064 newsize = relax_addsub (fragp, sec);
22065 break;
22066 default:
22067 abort ();
22068 }
22069
22070 fragp->fr_var = newsize;
22071 /* Freeze wide instructions that are at or before the same location as
22072 in the previous pass. This avoids infinite loops.
22073 Don't freeze them unconditionally because targets may be artificially
22074 misaligned by the expansion of preceding frags. */
22075 if (stretch <= 0 && newsize > 2)
22076 {
22077 md_convert_frag (sec->owner, sec, fragp);
22078 frag_wane (fragp);
22079 }
22080
22081 return newsize - oldsize;
22082 }
22083
22084 /* Round up a section size to the appropriate boundary. */
22085
22086 valueT
22087 md_section_align (segT segment ATTRIBUTE_UNUSED,
22088 valueT size)
22089 {
22090 return size;
22091 }
22092
22093 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
22094 of an rs_align_code fragment. */
22095
22096 void
22097 arm_handle_align (fragS * fragP)
22098 {
22099 static unsigned char const arm_noop[2][2][4] =
22100 {
22101 { /* ARMv1 */
22102 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
22103 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
22104 },
22105 { /* ARMv6k */
22106 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
22107 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
22108 },
22109 };
22110 static unsigned char const thumb_noop[2][2][2] =
22111 {
22112 { /* Thumb-1 */
22113 {0xc0, 0x46}, /* LE */
22114 {0x46, 0xc0}, /* BE */
22115 },
22116 { /* Thumb-2 */
22117 {0x00, 0xbf}, /* LE */
22118 {0xbf, 0x00} /* BE */
22119 }
22120 };
22121 static unsigned char const wide_thumb_noop[2][4] =
22122 { /* Wide Thumb-2 */
22123 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
22124 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
22125 };
22126
22127 unsigned bytes, fix, noop_size;
22128 char * p;
22129 const unsigned char * noop;
22130 const unsigned char *narrow_noop = NULL;
22131 #ifdef OBJ_ELF
22132 enum mstate state;
22133 #endif
22134
22135 if (fragP->fr_type != rs_align_code)
22136 return;
22137
22138 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
22139 p = fragP->fr_literal + fragP->fr_fix;
22140 fix = 0;
22141
22142 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
22143 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
22144
22145 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
22146
22147 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
22148 {
22149 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
22150 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
22151 {
22152 narrow_noop = thumb_noop[1][target_big_endian];
22153 noop = wide_thumb_noop[target_big_endian];
22154 }
22155 else
22156 noop = thumb_noop[0][target_big_endian];
22157 noop_size = 2;
22158 #ifdef OBJ_ELF
22159 state = MAP_THUMB;
22160 #endif
22161 }
22162 else
22163 {
22164 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
22165 ? selected_cpu : arm_arch_none,
22166 arm_ext_v6k) != 0]
22167 [target_big_endian];
22168 noop_size = 4;
22169 #ifdef OBJ_ELF
22170 state = MAP_ARM;
22171 #endif
22172 }
22173
22174 fragP->fr_var = noop_size;
22175
22176 if (bytes & (noop_size - 1))
22177 {
22178 fix = bytes & (noop_size - 1);
22179 #ifdef OBJ_ELF
22180 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
22181 #endif
22182 memset (p, 0, fix);
22183 p += fix;
22184 bytes -= fix;
22185 }
22186
22187 if (narrow_noop)
22188 {
22189 if (bytes & noop_size)
22190 {
22191 /* Insert a narrow noop. */
22192 memcpy (p, narrow_noop, noop_size);
22193 p += noop_size;
22194 bytes -= noop_size;
22195 fix += noop_size;
22196 }
22197
22198 /* Use wide noops for the remainder */
22199 noop_size = 4;
22200 }
22201
22202 while (bytes >= noop_size)
22203 {
22204 memcpy (p, noop, noop_size);
22205 p += noop_size;
22206 bytes -= noop_size;
22207 fix += noop_size;
22208 }
22209
22210 fragP->fr_fix += fix;
22211 }
22212
22213 /* Called from md_do_align. Used to create an alignment
22214 frag in a code section. */
22215
22216 void
22217 arm_frag_align_code (int n, int max)
22218 {
22219 char * p;
22220
22221 /* We assume that there will never be a requirement
22222 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
22223 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
22224 {
22225 char err_msg[128];
22226
22227 sprintf (err_msg,
22228 _("alignments greater than %d bytes not supported in .text sections."),
22229 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
22230 as_fatal ("%s", err_msg);
22231 }
22232
22233 p = frag_var (rs_align_code,
22234 MAX_MEM_FOR_RS_ALIGN_CODE,
22235 1,
22236 (relax_substateT) max,
22237 (symbolS *) NULL,
22238 (offsetT) n,
22239 (char *) NULL);
22240 *p = 0;
22241 }
22242
22243 /* Perform target specific initialisation of a frag.
22244 Note - despite the name this initialisation is not done when the frag
22245 is created, but only when its type is assigned. A frag can be created
22246 and used a long time before its type is set, so beware of assuming that
22247 this initialisation is performed first. */
22248
22249 #ifndef OBJ_ELF
22250 void
22251 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
22252 {
22253 /* Record whether this frag is in an ARM or a THUMB area. */
22254 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
22255 }
22256
22257 #else /* OBJ_ELF is defined. */
22258 void
22259 arm_init_frag (fragS * fragP, int max_chars)
22260 {
22261 bfd_boolean frag_thumb_mode;
22262
22263 /* If the current ARM vs THUMB mode has not already
22264 been recorded into this frag then do so now. */
22265 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
22266 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
22267
22268 /* PR 21809: Do not set a mapping state for debug sections
22269 - it just confuses other tools. */
22270 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
22271 return;
22272
22273 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
22274
22275 /* Record a mapping symbol for alignment frags. We will delete this
22276 later if the alignment ends up empty. */
22277 switch (fragP->fr_type)
22278 {
22279 case rs_align:
22280 case rs_align_test:
22281 case rs_fill:
22282 mapping_state_2 (MAP_DATA, max_chars);
22283 break;
22284 case rs_align_code:
22285 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
22286 break;
22287 default:
22288 break;
22289 }
22290 }
22291
22292 /* When we change sections we need to issue a new mapping symbol. */
22293
22294 void
22295 arm_elf_change_section (void)
22296 {
22297 /* Link an unlinked unwind index table section to the .text section. */
22298 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
22299 && elf_linked_to_section (now_seg) == NULL)
22300 elf_linked_to_section (now_seg) = text_section;
22301 }
22302
22303 int
22304 arm_elf_section_type (const char * str, size_t len)
22305 {
22306 if (len == 5 && strncmp (str, "exidx", 5) == 0)
22307 return SHT_ARM_EXIDX;
22308
22309 return -1;
22310 }
22311 \f
22312 /* Code to deal with unwinding tables. */
22313
22314 static void add_unwind_adjustsp (offsetT);
22315
22316 /* Generate any deferred unwind frame offset. */
22317
22318 static void
22319 flush_pending_unwind (void)
22320 {
22321 offsetT offset;
22322
22323 offset = unwind.pending_offset;
22324 unwind.pending_offset = 0;
22325 if (offset != 0)
22326 add_unwind_adjustsp (offset);
22327 }
22328
22329 /* Add an opcode to this list for this function. Two-byte opcodes should
22330 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
22331 order. */
22332
22333 static void
22334 add_unwind_opcode (valueT op, int length)
22335 {
22336 /* Add any deferred stack adjustment. */
22337 if (unwind.pending_offset)
22338 flush_pending_unwind ();
22339
22340 unwind.sp_restored = 0;
22341
22342 if (unwind.opcode_count + length > unwind.opcode_alloc)
22343 {
22344 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
22345 if (unwind.opcodes)
22346 unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
22347 unwind.opcode_alloc);
22348 else
22349 unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
22350 }
22351 while (length > 0)
22352 {
22353 length--;
22354 unwind.opcodes[unwind.opcode_count] = op & 0xff;
22355 op >>= 8;
22356 unwind.opcode_count++;
22357 }
22358 }
22359
22360 /* Add unwind opcodes to adjust the stack pointer. */
22361
22362 static void
22363 add_unwind_adjustsp (offsetT offset)
22364 {
22365 valueT op;
22366
22367 if (offset > 0x200)
22368 {
22369 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
22370 char bytes[5];
22371 int n;
22372 valueT o;
22373
22374 /* Long form: 0xb2, uleb128. */
22375 /* This might not fit in a word so add the individual bytes,
22376 remembering the list is built in reverse order. */
22377 o = (valueT) ((offset - 0x204) >> 2);
22378 if (o == 0)
22379 add_unwind_opcode (0, 1);
22380
22381 /* Calculate the uleb128 encoding of the offset. */
22382 n = 0;
22383 while (o)
22384 {
22385 bytes[n] = o & 0x7f;
22386 o >>= 7;
22387 if (o)
22388 bytes[n] |= 0x80;
22389 n++;
22390 }
22391 /* Add the insn. */
22392 for (; n; n--)
22393 add_unwind_opcode (bytes[n - 1], 1);
22394 add_unwind_opcode (0xb2, 1);
22395 }
22396 else if (offset > 0x100)
22397 {
22398 /* Two short opcodes. */
22399 add_unwind_opcode (0x3f, 1);
22400 op = (offset - 0x104) >> 2;
22401 add_unwind_opcode (op, 1);
22402 }
22403 else if (offset > 0)
22404 {
22405 /* Short opcode. */
22406 op = (offset - 4) >> 2;
22407 add_unwind_opcode (op, 1);
22408 }
22409 else if (offset < 0)
22410 {
22411 offset = -offset;
22412 while (offset > 0x100)
22413 {
22414 add_unwind_opcode (0x7f, 1);
22415 offset -= 0x100;
22416 }
22417 op = ((offset - 4) >> 2) | 0x40;
22418 add_unwind_opcode (op, 1);
22419 }
22420 }
22421
22422 /* Finish the list of unwind opcodes for this function. */
22423
22424 static void
22425 finish_unwind_opcodes (void)
22426 {
22427 valueT op;
22428
22429 if (unwind.fp_used)
22430 {
22431 /* Adjust sp as necessary. */
22432 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
22433 flush_pending_unwind ();
22434
22435 /* After restoring sp from the frame pointer. */
22436 op = 0x90 | unwind.fp_reg;
22437 add_unwind_opcode (op, 1);
22438 }
22439 else
22440 flush_pending_unwind ();
22441 }
22442
22443
22444 /* Start an exception table entry. If idx is nonzero this is an index table
22445 entry. */
22446
22447 static void
22448 start_unwind_section (const segT text_seg, int idx)
22449 {
22450 const char * text_name;
22451 const char * prefix;
22452 const char * prefix_once;
22453 const char * group_name;
22454 char * sec_name;
22455 int type;
22456 int flags;
22457 int linkonce;
22458
22459 if (idx)
22460 {
22461 prefix = ELF_STRING_ARM_unwind;
22462 prefix_once = ELF_STRING_ARM_unwind_once;
22463 type = SHT_ARM_EXIDX;
22464 }
22465 else
22466 {
22467 prefix = ELF_STRING_ARM_unwind_info;
22468 prefix_once = ELF_STRING_ARM_unwind_info_once;
22469 type = SHT_PROGBITS;
22470 }
22471
22472 text_name = segment_name (text_seg);
22473 if (streq (text_name, ".text"))
22474 text_name = "";
22475
22476 if (strncmp (text_name, ".gnu.linkonce.t.",
22477 strlen (".gnu.linkonce.t.")) == 0)
22478 {
22479 prefix = prefix_once;
22480 text_name += strlen (".gnu.linkonce.t.");
22481 }
22482
22483 sec_name = concat (prefix, text_name, (char *) NULL);
22484
22485 flags = SHF_ALLOC;
22486 linkonce = 0;
22487 group_name = 0;
22488
22489 /* Handle COMDAT group. */
22490 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
22491 {
22492 group_name = elf_group_name (text_seg);
22493 if (group_name == NULL)
22494 {
22495 as_bad (_("Group section `%s' has no group signature"),
22496 segment_name (text_seg));
22497 ignore_rest_of_line ();
22498 return;
22499 }
22500 flags |= SHF_GROUP;
22501 linkonce = 1;
22502 }
22503
22504 obj_elf_change_section (sec_name, type, 0, flags, 0, group_name,
22505 linkonce, 0);
22506
22507 /* Set the section link for index tables. */
22508 if (idx)
22509 elf_linked_to_section (now_seg) = text_seg;
22510 }
22511
22512
22513 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
22514 personality routine data. Returns zero, or the index table value for
22515 an inline entry. */
22516
22517 static valueT
22518 create_unwind_entry (int have_data)
22519 {
22520 int size;
22521 addressT where;
22522 char *ptr;
22523 /* The current word of data. */
22524 valueT data;
22525 /* The number of bytes left in this word. */
22526 int n;
22527
22528 finish_unwind_opcodes ();
22529
22530 /* Remember the current text section. */
22531 unwind.saved_seg = now_seg;
22532 unwind.saved_subseg = now_subseg;
22533
22534 start_unwind_section (now_seg, 0);
22535
22536 if (unwind.personality_routine == NULL)
22537 {
22538 if (unwind.personality_index == -2)
22539 {
22540 if (have_data)
22541 as_bad (_("handlerdata in cantunwind frame"));
22542 return 1; /* EXIDX_CANTUNWIND. */
22543 }
22544
22545 /* Use a default personality routine if none is specified. */
22546 if (unwind.personality_index == -1)
22547 {
22548 if (unwind.opcode_count > 3)
22549 unwind.personality_index = 1;
22550 else
22551 unwind.personality_index = 0;
22552 }
22553
22554 /* Space for the personality routine entry. */
22555 if (unwind.personality_index == 0)
22556 {
22557 if (unwind.opcode_count > 3)
22558 as_bad (_("too many unwind opcodes for personality routine 0"));
22559
22560 if (!have_data)
22561 {
22562 /* All the data is inline in the index table. */
22563 data = 0x80;
22564 n = 3;
22565 while (unwind.opcode_count > 0)
22566 {
22567 unwind.opcode_count--;
22568 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22569 n--;
22570 }
22571
22572 /* Pad with "finish" opcodes. */
22573 while (n--)
22574 data = (data << 8) | 0xb0;
22575
22576 return data;
22577 }
22578 size = 0;
22579 }
22580 else
22581 /* We get two opcodes "free" in the first word. */
22582 size = unwind.opcode_count - 2;
22583 }
22584 else
22585 {
22586 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
22587 if (unwind.personality_index != -1)
22588 {
22589 as_bad (_("attempt to recreate an unwind entry"));
22590 return 1;
22591 }
22592
22593 /* An extra byte is required for the opcode count. */
22594 size = unwind.opcode_count + 1;
22595 }
22596
22597 size = (size + 3) >> 2;
22598 if (size > 0xff)
22599 as_bad (_("too many unwind opcodes"));
22600
22601 frag_align (2, 0, 0);
22602 record_alignment (now_seg, 2);
22603 unwind.table_entry = expr_build_dot ();
22604
22605 /* Allocate the table entry. */
22606 ptr = frag_more ((size << 2) + 4);
22607 /* PR 13449: Zero the table entries in case some of them are not used. */
22608 memset (ptr, 0, (size << 2) + 4);
22609 where = frag_now_fix () - ((size << 2) + 4);
22610
22611 switch (unwind.personality_index)
22612 {
22613 case -1:
22614 /* ??? Should this be a PLT generating relocation? */
22615 /* Custom personality routine. */
22616 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
22617 BFD_RELOC_ARM_PREL31);
22618
22619 where += 4;
22620 ptr += 4;
22621
22622 /* Set the first byte to the number of additional words. */
22623 data = size > 0 ? size - 1 : 0;
22624 n = 3;
22625 break;
22626
22627 /* ABI defined personality routines. */
22628 case 0:
22629 /* Three opcodes bytes are packed into the first word. */
22630 data = 0x80;
22631 n = 3;
22632 break;
22633
22634 case 1:
22635 case 2:
22636 /* The size and first two opcode bytes go in the first word. */
22637 data = ((0x80 + unwind.personality_index) << 8) | size;
22638 n = 2;
22639 break;
22640
22641 default:
22642 /* Should never happen. */
22643 abort ();
22644 }
22645
22646 /* Pack the opcodes into words (MSB first), reversing the list at the same
22647 time. */
22648 while (unwind.opcode_count > 0)
22649 {
22650 if (n == 0)
22651 {
22652 md_number_to_chars (ptr, data, 4);
22653 ptr += 4;
22654 n = 4;
22655 data = 0;
22656 }
22657 unwind.opcode_count--;
22658 n--;
22659 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22660 }
22661
22662 /* Finish off the last word. */
22663 if (n < 4)
22664 {
22665 /* Pad with "finish" opcodes. */
22666 while (n--)
22667 data = (data << 8) | 0xb0;
22668
22669 md_number_to_chars (ptr, data, 4);
22670 }
22671
22672 if (!have_data)
22673 {
22674 /* Add an empty descriptor if there is no user-specified data. */
22675 ptr = frag_more (4);
22676 md_number_to_chars (ptr, 0, 4);
22677 }
22678
22679 return 0;
22680 }
22681
22682
22683 /* Initialize the DWARF-2 unwind information for this procedure. */
22684
22685 void
22686 tc_arm_frame_initial_instructions (void)
22687 {
22688 cfi_add_CFA_def_cfa (REG_SP, 0);
22689 }
22690 #endif /* OBJ_ELF */
22691
22692 /* Convert REGNAME to a DWARF-2 register number. */
22693
22694 int
22695 tc_arm_regname_to_dw2regnum (char *regname)
22696 {
22697 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
22698 if (reg != FAIL)
22699 return reg;
22700
22701 /* PR 16694: Allow VFP registers as well. */
22702 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
22703 if (reg != FAIL)
22704 return 64 + reg;
22705
22706 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
22707 if (reg != FAIL)
22708 return reg + 256;
22709
22710 return FAIL;
22711 }
22712
22713 #ifdef TE_PE
22714 void
22715 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
22716 {
22717 expressionS exp;
22718
22719 exp.X_op = O_secrel;
22720 exp.X_add_symbol = symbol;
22721 exp.X_add_number = 0;
22722 emit_expr (&exp, size);
22723 }
22724 #endif
22725
22726 /* MD interface: Symbol and relocation handling. */
22727
22728 /* Return the address within the segment that a PC-relative fixup is
22729 relative to. For ARM, PC-relative fixups applied to instructions
22730 are generally relative to the location of the fixup plus 8 bytes.
22731 Thumb branches are offset by 4, and Thumb loads relative to PC
22732 require special handling. */
22733
22734 long
22735 md_pcrel_from_section (fixS * fixP, segT seg)
22736 {
22737 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
22738
22739 /* If this is pc-relative and we are going to emit a relocation
22740 then we just want to put out any pipeline compensation that the linker
22741 will need. Otherwise we want to use the calculated base.
22742 For WinCE we skip the bias for externals as well, since this
22743 is how the MS ARM-CE assembler behaves and we want to be compatible. */
22744 if (fixP->fx_pcrel
22745 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
22746 || (arm_force_relocation (fixP)
22747 #ifdef TE_WINCE
22748 && !S_IS_EXTERNAL (fixP->fx_addsy)
22749 #endif
22750 )))
22751 base = 0;
22752
22753
22754 switch (fixP->fx_r_type)
22755 {
22756 /* PC relative addressing on the Thumb is slightly odd as the
22757 bottom two bits of the PC are forced to zero for the
22758 calculation. This happens *after* application of the
22759 pipeline offset. However, Thumb adrl already adjusts for
22760 this, so we need not do it again. */
22761 case BFD_RELOC_ARM_THUMB_ADD:
22762 return base & ~3;
22763
22764 case BFD_RELOC_ARM_THUMB_OFFSET:
22765 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22766 case BFD_RELOC_ARM_T32_ADD_PC12:
22767 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
22768 return (base + 4) & ~3;
22769
22770 /* Thumb branches are simply offset by +4. */
22771 case BFD_RELOC_THUMB_PCREL_BRANCH7:
22772 case BFD_RELOC_THUMB_PCREL_BRANCH9:
22773 case BFD_RELOC_THUMB_PCREL_BRANCH12:
22774 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22775 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22776 return base + 4;
22777
22778 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22779 if (fixP->fx_addsy
22780 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22781 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22782 && ARM_IS_FUNC (fixP->fx_addsy)
22783 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22784 base = fixP->fx_where + fixP->fx_frag->fr_address;
22785 return base + 4;
22786
22787 /* BLX is like branches above, but forces the low two bits of PC to
22788 zero. */
22789 case BFD_RELOC_THUMB_PCREL_BLX:
22790 if (fixP->fx_addsy
22791 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22792 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22793 && THUMB_IS_FUNC (fixP->fx_addsy)
22794 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22795 base = fixP->fx_where + fixP->fx_frag->fr_address;
22796 return (base + 4) & ~3;
22797
22798 /* ARM mode branches are offset by +8. However, the Windows CE
22799 loader expects the relocation not to take this into account. */
22800 case BFD_RELOC_ARM_PCREL_BLX:
22801 if (fixP->fx_addsy
22802 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22803 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22804 && ARM_IS_FUNC (fixP->fx_addsy)
22805 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22806 base = fixP->fx_where + fixP->fx_frag->fr_address;
22807 return base + 8;
22808
22809 case BFD_RELOC_ARM_PCREL_CALL:
22810 if (fixP->fx_addsy
22811 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22812 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22813 && THUMB_IS_FUNC (fixP->fx_addsy)
22814 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22815 base = fixP->fx_where + fixP->fx_frag->fr_address;
22816 return base + 8;
22817
22818 case BFD_RELOC_ARM_PCREL_BRANCH:
22819 case BFD_RELOC_ARM_PCREL_JUMP:
22820 case BFD_RELOC_ARM_PLT32:
22821 #ifdef TE_WINCE
22822 /* When handling fixups immediately, because we have already
22823 discovered the value of a symbol, or the address of the frag involved
22824 we must account for the offset by +8, as the OS loader will never see the reloc.
22825 see fixup_segment() in write.c
22826 The S_IS_EXTERNAL test handles the case of global symbols.
22827 Those need the calculated base, not just the pipe compensation the linker will need. */
22828 if (fixP->fx_pcrel
22829 && fixP->fx_addsy != NULL
22830 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22831 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
22832 return base + 8;
22833 return base;
22834 #else
22835 return base + 8;
22836 #endif
22837
22838
22839 /* ARM mode loads relative to PC are also offset by +8. Unlike
22840 branches, the Windows CE loader *does* expect the relocation
22841 to take this into account. */
22842 case BFD_RELOC_ARM_OFFSET_IMM:
22843 case BFD_RELOC_ARM_OFFSET_IMM8:
22844 case BFD_RELOC_ARM_HWLITERAL:
22845 case BFD_RELOC_ARM_LITERAL:
22846 case BFD_RELOC_ARM_CP_OFF_IMM:
22847 return base + 8;
22848
22849
22850 /* Other PC-relative relocations are un-offset. */
22851 default:
22852 return base;
22853 }
22854 }
22855
22856 static bfd_boolean flag_warn_syms = TRUE;
22857
22858 bfd_boolean
22859 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
22860 {
22861 /* PR 18347 - Warn if the user attempts to create a symbol with the same
22862 name as an ARM instruction. Whilst strictly speaking it is allowed, it
22863 does mean that the resulting code might be very confusing to the reader.
22864 Also this warning can be triggered if the user omits an operand before
22865 an immediate address, eg:
22866
22867 LDR =foo
22868
22869 GAS treats this as an assignment of the value of the symbol foo to a
22870 symbol LDR, and so (without this code) it will not issue any kind of
22871 warning or error message.
22872
22873 Note - ARM instructions are case-insensitive but the strings in the hash
22874 table are all stored in lower case, so we must first ensure that name is
22875 lower case too. */
22876 if (flag_warn_syms && arm_ops_hsh)
22877 {
22878 char * nbuf = strdup (name);
22879 char * p;
22880
22881 for (p = nbuf; *p; p++)
22882 *p = TOLOWER (*p);
22883 if (hash_find (arm_ops_hsh, nbuf) != NULL)
22884 {
22885 static struct hash_control * already_warned = NULL;
22886
22887 if (already_warned == NULL)
22888 already_warned = hash_new ();
22889 /* Only warn about the symbol once. To keep the code
22890 simple we let hash_insert do the lookup for us. */
22891 if (hash_insert (already_warned, name, NULL) == NULL)
22892 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
22893 }
22894 else
22895 free (nbuf);
22896 }
22897
22898 return FALSE;
22899 }
22900
22901 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
22902 Otherwise we have no need to default values of symbols. */
22903
22904 symbolS *
22905 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
22906 {
22907 #ifdef OBJ_ELF
22908 if (name[0] == '_' && name[1] == 'G'
22909 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
22910 {
22911 if (!GOT_symbol)
22912 {
22913 if (symbol_find (name))
22914 as_bad (_("GOT already in the symbol table"));
22915
22916 GOT_symbol = symbol_new (name, undefined_section,
22917 (valueT) 0, & zero_address_frag);
22918 }
22919
22920 return GOT_symbol;
22921 }
22922 #endif
22923
22924 return NULL;
22925 }
22926
22927 /* Subroutine of md_apply_fix. Check to see if an immediate can be
22928 computed as two separate immediate values, added together. We
22929 already know that this value cannot be computed by just one ARM
22930 instruction. */
22931
22932 static unsigned int
22933 validate_immediate_twopart (unsigned int val,
22934 unsigned int * highpart)
22935 {
22936 unsigned int a;
22937 unsigned int i;
22938
22939 for (i = 0; i < 32; i += 2)
22940 if (((a = rotate_left (val, i)) & 0xff) != 0)
22941 {
22942 if (a & 0xff00)
22943 {
22944 if (a & ~ 0xffff)
22945 continue;
22946 * highpart = (a >> 8) | ((i + 24) << 7);
22947 }
22948 else if (a & 0xff0000)
22949 {
22950 if (a & 0xff000000)
22951 continue;
22952 * highpart = (a >> 16) | ((i + 16) << 7);
22953 }
22954 else
22955 {
22956 gas_assert (a & 0xff000000);
22957 * highpart = (a >> 24) | ((i + 8) << 7);
22958 }
22959
22960 return (a & 0xff) | (i << 7);
22961 }
22962
22963 return FAIL;
22964 }
22965
22966 static int
22967 validate_offset_imm (unsigned int val, int hwse)
22968 {
22969 if ((hwse && val > 255) || val > 4095)
22970 return FAIL;
22971 return val;
22972 }
22973
22974 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22975 negative immediate constant by altering the instruction. A bit of
22976 a hack really.
22977 MOV <-> MVN
22978 AND <-> BIC
22979 ADC <-> SBC
22980 by inverting the second operand, and
22981 ADD <-> SUB
22982 CMP <-> CMN
22983 by negating the second operand. */
22984
22985 static int
22986 negate_data_op (unsigned long * instruction,
22987 unsigned long value)
22988 {
22989 int op, new_inst;
22990 unsigned long negated, inverted;
22991
22992 negated = encode_arm_immediate (-value);
22993 inverted = encode_arm_immediate (~value);
22994
22995 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
22996 switch (op)
22997 {
22998 /* First negates. */
22999 case OPCODE_SUB: /* ADD <-> SUB */
23000 new_inst = OPCODE_ADD;
23001 value = negated;
23002 break;
23003
23004 case OPCODE_ADD:
23005 new_inst = OPCODE_SUB;
23006 value = negated;
23007 break;
23008
23009 case OPCODE_CMP: /* CMP <-> CMN */
23010 new_inst = OPCODE_CMN;
23011 value = negated;
23012 break;
23013
23014 case OPCODE_CMN:
23015 new_inst = OPCODE_CMP;
23016 value = negated;
23017 break;
23018
23019 /* Now Inverted ops. */
23020 case OPCODE_MOV: /* MOV <-> MVN */
23021 new_inst = OPCODE_MVN;
23022 value = inverted;
23023 break;
23024
23025 case OPCODE_MVN:
23026 new_inst = OPCODE_MOV;
23027 value = inverted;
23028 break;
23029
23030 case OPCODE_AND: /* AND <-> BIC */
23031 new_inst = OPCODE_BIC;
23032 value = inverted;
23033 break;
23034
23035 case OPCODE_BIC:
23036 new_inst = OPCODE_AND;
23037 value = inverted;
23038 break;
23039
23040 case OPCODE_ADC: /* ADC <-> SBC */
23041 new_inst = OPCODE_SBC;
23042 value = inverted;
23043 break;
23044
23045 case OPCODE_SBC:
23046 new_inst = OPCODE_ADC;
23047 value = inverted;
23048 break;
23049
23050 /* We cannot do anything. */
23051 default:
23052 return FAIL;
23053 }
23054
23055 if (value == (unsigned) FAIL)
23056 return FAIL;
23057
23058 *instruction &= OPCODE_MASK;
23059 *instruction |= new_inst << DATA_OP_SHIFT;
23060 return value;
23061 }
23062
23063 /* Like negate_data_op, but for Thumb-2. */
23064
23065 static unsigned int
23066 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
23067 {
23068 int op, new_inst;
23069 int rd;
23070 unsigned int negated, inverted;
23071
23072 negated = encode_thumb32_immediate (-value);
23073 inverted = encode_thumb32_immediate (~value);
23074
23075 rd = (*instruction >> 8) & 0xf;
23076 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
23077 switch (op)
23078 {
23079 /* ADD <-> SUB. Includes CMP <-> CMN. */
23080 case T2_OPCODE_SUB:
23081 new_inst = T2_OPCODE_ADD;
23082 value = negated;
23083 break;
23084
23085 case T2_OPCODE_ADD:
23086 new_inst = T2_OPCODE_SUB;
23087 value = negated;
23088 break;
23089
23090 /* ORR <-> ORN. Includes MOV <-> MVN. */
23091 case T2_OPCODE_ORR:
23092 new_inst = T2_OPCODE_ORN;
23093 value = inverted;
23094 break;
23095
23096 case T2_OPCODE_ORN:
23097 new_inst = T2_OPCODE_ORR;
23098 value = inverted;
23099 break;
23100
23101 /* AND <-> BIC. TST has no inverted equivalent. */
23102 case T2_OPCODE_AND:
23103 new_inst = T2_OPCODE_BIC;
23104 if (rd == 15)
23105 value = FAIL;
23106 else
23107 value = inverted;
23108 break;
23109
23110 case T2_OPCODE_BIC:
23111 new_inst = T2_OPCODE_AND;
23112 value = inverted;
23113 break;
23114
23115 /* ADC <-> SBC */
23116 case T2_OPCODE_ADC:
23117 new_inst = T2_OPCODE_SBC;
23118 value = inverted;
23119 break;
23120
23121 case T2_OPCODE_SBC:
23122 new_inst = T2_OPCODE_ADC;
23123 value = inverted;
23124 break;
23125
23126 /* We cannot do anything. */
23127 default:
23128 return FAIL;
23129 }
23130
23131 if (value == (unsigned int)FAIL)
23132 return FAIL;
23133
23134 *instruction &= T2_OPCODE_MASK;
23135 *instruction |= new_inst << T2_DATA_OP_SHIFT;
23136 return value;
23137 }
23138
23139 /* Read a 32-bit thumb instruction from buf. */
23140
23141 static unsigned long
23142 get_thumb32_insn (char * buf)
23143 {
23144 unsigned long insn;
23145 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
23146 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23147
23148 return insn;
23149 }
23150
23151 /* We usually want to set the low bit on the address of thumb function
23152 symbols. In particular .word foo - . should have the low bit set.
23153 Generic code tries to fold the difference of two symbols to
23154 a constant. Prevent this and force a relocation when the first symbols
23155 is a thumb function. */
23156
23157 bfd_boolean
23158 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
23159 {
23160 if (op == O_subtract
23161 && l->X_op == O_symbol
23162 && r->X_op == O_symbol
23163 && THUMB_IS_FUNC (l->X_add_symbol))
23164 {
23165 l->X_op = O_subtract;
23166 l->X_op_symbol = r->X_add_symbol;
23167 l->X_add_number -= r->X_add_number;
23168 return TRUE;
23169 }
23170
23171 /* Process as normal. */
23172 return FALSE;
23173 }
23174
23175 /* Encode Thumb2 unconditional branches and calls. The encoding
23176 for the 2 are identical for the immediate values. */
23177
23178 static void
23179 encode_thumb2_b_bl_offset (char * buf, offsetT value)
23180 {
23181 #define T2I1I2MASK ((1 << 13) | (1 << 11))
23182 offsetT newval;
23183 offsetT newval2;
23184 addressT S, I1, I2, lo, hi;
23185
23186 S = (value >> 24) & 0x01;
23187 I1 = (value >> 23) & 0x01;
23188 I2 = (value >> 22) & 0x01;
23189 hi = (value >> 12) & 0x3ff;
23190 lo = (value >> 1) & 0x7ff;
23191 newval = md_chars_to_number (buf, THUMB_SIZE);
23192 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23193 newval |= (S << 10) | hi;
23194 newval2 &= ~T2I1I2MASK;
23195 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
23196 md_number_to_chars (buf, newval, THUMB_SIZE);
23197 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
23198 }
23199
23200 void
23201 md_apply_fix (fixS * fixP,
23202 valueT * valP,
23203 segT seg)
23204 {
23205 offsetT value = * valP;
23206 offsetT newval;
23207 unsigned int newimm;
23208 unsigned long temp;
23209 int sign;
23210 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
23211
23212 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
23213
23214 /* Note whether this will delete the relocation. */
23215
23216 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
23217 fixP->fx_done = 1;
23218
23219 /* On a 64-bit host, silently truncate 'value' to 32 bits for
23220 consistency with the behaviour on 32-bit hosts. Remember value
23221 for emit_reloc. */
23222 value &= 0xffffffff;
23223 value ^= 0x80000000;
23224 value -= 0x80000000;
23225
23226 *valP = value;
23227 fixP->fx_addnumber = value;
23228
23229 /* Same treatment for fixP->fx_offset. */
23230 fixP->fx_offset &= 0xffffffff;
23231 fixP->fx_offset ^= 0x80000000;
23232 fixP->fx_offset -= 0x80000000;
23233
23234 switch (fixP->fx_r_type)
23235 {
23236 case BFD_RELOC_NONE:
23237 /* This will need to go in the object file. */
23238 fixP->fx_done = 0;
23239 break;
23240
23241 case BFD_RELOC_ARM_IMMEDIATE:
23242 /* We claim that this fixup has been processed here,
23243 even if in fact we generate an error because we do
23244 not have a reloc for it, so tc_gen_reloc will reject it. */
23245 fixP->fx_done = 1;
23246
23247 if (fixP->fx_addsy)
23248 {
23249 const char *msg = 0;
23250
23251 if (! S_IS_DEFINED (fixP->fx_addsy))
23252 msg = _("undefined symbol %s used as an immediate value");
23253 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
23254 msg = _("symbol %s is in a different section");
23255 else if (S_IS_WEAK (fixP->fx_addsy))
23256 msg = _("symbol %s is weak and may be overridden later");
23257
23258 if (msg)
23259 {
23260 as_bad_where (fixP->fx_file, fixP->fx_line,
23261 msg, S_GET_NAME (fixP->fx_addsy));
23262 break;
23263 }
23264 }
23265
23266 temp = md_chars_to_number (buf, INSN_SIZE);
23267
23268 /* If the offset is negative, we should use encoding A2 for ADR. */
23269 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
23270 newimm = negate_data_op (&temp, value);
23271 else
23272 {
23273 newimm = encode_arm_immediate (value);
23274
23275 /* If the instruction will fail, see if we can fix things up by
23276 changing the opcode. */
23277 if (newimm == (unsigned int) FAIL)
23278 newimm = negate_data_op (&temp, value);
23279 /* MOV accepts both ARM modified immediate (A1 encoding) and
23280 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
23281 When disassembling, MOV is preferred when there is no encoding
23282 overlap. */
23283 if (newimm == (unsigned int) FAIL
23284 && ((temp >> DATA_OP_SHIFT) & 0xf) == OPCODE_MOV
23285 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
23286 && !((temp >> SBIT_SHIFT) & 0x1)
23287 && value >= 0 && value <= 0xffff)
23288 {
23289 /* Clear bits[23:20] to change encoding from A1 to A2. */
23290 temp &= 0xff0fffff;
23291 /* Encoding high 4bits imm. Code below will encode the remaining
23292 low 12bits. */
23293 temp |= (value & 0x0000f000) << 4;
23294 newimm = value & 0x00000fff;
23295 }
23296 }
23297
23298 if (newimm == (unsigned int) FAIL)
23299 {
23300 as_bad_where (fixP->fx_file, fixP->fx_line,
23301 _("invalid constant (%lx) after fixup"),
23302 (unsigned long) value);
23303 break;
23304 }
23305
23306 newimm |= (temp & 0xfffff000);
23307 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
23308 break;
23309
23310 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
23311 {
23312 unsigned int highpart = 0;
23313 unsigned int newinsn = 0xe1a00000; /* nop. */
23314
23315 if (fixP->fx_addsy)
23316 {
23317 const char *msg = 0;
23318
23319 if (! S_IS_DEFINED (fixP->fx_addsy))
23320 msg = _("undefined symbol %s used as an immediate value");
23321 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
23322 msg = _("symbol %s is in a different section");
23323 else if (S_IS_WEAK (fixP->fx_addsy))
23324 msg = _("symbol %s is weak and may be overridden later");
23325
23326 if (msg)
23327 {
23328 as_bad_where (fixP->fx_file, fixP->fx_line,
23329 msg, S_GET_NAME (fixP->fx_addsy));
23330 break;
23331 }
23332 }
23333
23334 newimm = encode_arm_immediate (value);
23335 temp = md_chars_to_number (buf, INSN_SIZE);
23336
23337 /* If the instruction will fail, see if we can fix things up by
23338 changing the opcode. */
23339 if (newimm == (unsigned int) FAIL
23340 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
23341 {
23342 /* No ? OK - try using two ADD instructions to generate
23343 the value. */
23344 newimm = validate_immediate_twopart (value, & highpart);
23345
23346 /* Yes - then make sure that the second instruction is
23347 also an add. */
23348 if (newimm != (unsigned int) FAIL)
23349 newinsn = temp;
23350 /* Still No ? Try using a negated value. */
23351 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
23352 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
23353 /* Otherwise - give up. */
23354 else
23355 {
23356 as_bad_where (fixP->fx_file, fixP->fx_line,
23357 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
23358 (long) value);
23359 break;
23360 }
23361
23362 /* Replace the first operand in the 2nd instruction (which
23363 is the PC) with the destination register. We have
23364 already added in the PC in the first instruction and we
23365 do not want to do it again. */
23366 newinsn &= ~ 0xf0000;
23367 newinsn |= ((newinsn & 0x0f000) << 4);
23368 }
23369
23370 newimm |= (temp & 0xfffff000);
23371 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
23372
23373 highpart |= (newinsn & 0xfffff000);
23374 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
23375 }
23376 break;
23377
23378 case BFD_RELOC_ARM_OFFSET_IMM:
23379 if (!fixP->fx_done && seg->use_rela_p)
23380 value = 0;
23381 /* Fall through. */
23382
23383 case BFD_RELOC_ARM_LITERAL:
23384 sign = value > 0;
23385
23386 if (value < 0)
23387 value = - value;
23388
23389 if (validate_offset_imm (value, 0) == FAIL)
23390 {
23391 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
23392 as_bad_where (fixP->fx_file, fixP->fx_line,
23393 _("invalid literal constant: pool needs to be closer"));
23394 else
23395 as_bad_where (fixP->fx_file, fixP->fx_line,
23396 _("bad immediate value for offset (%ld)"),
23397 (long) value);
23398 break;
23399 }
23400
23401 newval = md_chars_to_number (buf, INSN_SIZE);
23402 if (value == 0)
23403 newval &= 0xfffff000;
23404 else
23405 {
23406 newval &= 0xff7ff000;
23407 newval |= value | (sign ? INDEX_UP : 0);
23408 }
23409 md_number_to_chars (buf, newval, INSN_SIZE);
23410 break;
23411
23412 case BFD_RELOC_ARM_OFFSET_IMM8:
23413 case BFD_RELOC_ARM_HWLITERAL:
23414 sign = value > 0;
23415
23416 if (value < 0)
23417 value = - value;
23418
23419 if (validate_offset_imm (value, 1) == FAIL)
23420 {
23421 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
23422 as_bad_where (fixP->fx_file, fixP->fx_line,
23423 _("invalid literal constant: pool needs to be closer"));
23424 else
23425 as_bad_where (fixP->fx_file, fixP->fx_line,
23426 _("bad immediate value for 8-bit offset (%ld)"),
23427 (long) value);
23428 break;
23429 }
23430
23431 newval = md_chars_to_number (buf, INSN_SIZE);
23432 if (value == 0)
23433 newval &= 0xfffff0f0;
23434 else
23435 {
23436 newval &= 0xff7ff0f0;
23437 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
23438 }
23439 md_number_to_chars (buf, newval, INSN_SIZE);
23440 break;
23441
23442 case BFD_RELOC_ARM_T32_OFFSET_U8:
23443 if (value < 0 || value > 1020 || value % 4 != 0)
23444 as_bad_where (fixP->fx_file, fixP->fx_line,
23445 _("bad immediate value for offset (%ld)"), (long) value);
23446 value /= 4;
23447
23448 newval = md_chars_to_number (buf+2, THUMB_SIZE);
23449 newval |= value;
23450 md_number_to_chars (buf+2, newval, THUMB_SIZE);
23451 break;
23452
23453 case BFD_RELOC_ARM_T32_OFFSET_IMM:
23454 /* This is a complicated relocation used for all varieties of Thumb32
23455 load/store instruction with immediate offset:
23456
23457 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
23458 *4, optional writeback(W)
23459 (doubleword load/store)
23460
23461 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
23462 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
23463 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
23464 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
23465 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
23466
23467 Uppercase letters indicate bits that are already encoded at
23468 this point. Lowercase letters are our problem. For the
23469 second block of instructions, the secondary opcode nybble
23470 (bits 8..11) is present, and bit 23 is zero, even if this is
23471 a PC-relative operation. */
23472 newval = md_chars_to_number (buf, THUMB_SIZE);
23473 newval <<= 16;
23474 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
23475
23476 if ((newval & 0xf0000000) == 0xe0000000)
23477 {
23478 /* Doubleword load/store: 8-bit offset, scaled by 4. */
23479 if (value >= 0)
23480 newval |= (1 << 23);
23481 else
23482 value = -value;
23483 if (value % 4 != 0)
23484 {
23485 as_bad_where (fixP->fx_file, fixP->fx_line,
23486 _("offset not a multiple of 4"));
23487 break;
23488 }
23489 value /= 4;
23490 if (value > 0xff)
23491 {
23492 as_bad_where (fixP->fx_file, fixP->fx_line,
23493 _("offset out of range"));
23494 break;
23495 }
23496 newval &= ~0xff;
23497 }
23498 else if ((newval & 0x000f0000) == 0x000f0000)
23499 {
23500 /* PC-relative, 12-bit offset. */
23501 if (value >= 0)
23502 newval |= (1 << 23);
23503 else
23504 value = -value;
23505 if (value > 0xfff)
23506 {
23507 as_bad_where (fixP->fx_file, fixP->fx_line,
23508 _("offset out of range"));
23509 break;
23510 }
23511 newval &= ~0xfff;
23512 }
23513 else if ((newval & 0x00000100) == 0x00000100)
23514 {
23515 /* Writeback: 8-bit, +/- offset. */
23516 if (value >= 0)
23517 newval |= (1 << 9);
23518 else
23519 value = -value;
23520 if (value > 0xff)
23521 {
23522 as_bad_where (fixP->fx_file, fixP->fx_line,
23523 _("offset out of range"));
23524 break;
23525 }
23526 newval &= ~0xff;
23527 }
23528 else if ((newval & 0x00000f00) == 0x00000e00)
23529 {
23530 /* T-instruction: positive 8-bit offset. */
23531 if (value < 0 || value > 0xff)
23532 {
23533 as_bad_where (fixP->fx_file, fixP->fx_line,
23534 _("offset out of range"));
23535 break;
23536 }
23537 newval &= ~0xff;
23538 newval |= value;
23539 }
23540 else
23541 {
23542 /* Positive 12-bit or negative 8-bit offset. */
23543 int limit;
23544 if (value >= 0)
23545 {
23546 newval |= (1 << 23);
23547 limit = 0xfff;
23548 }
23549 else
23550 {
23551 value = -value;
23552 limit = 0xff;
23553 }
23554 if (value > limit)
23555 {
23556 as_bad_where (fixP->fx_file, fixP->fx_line,
23557 _("offset out of range"));
23558 break;
23559 }
23560 newval &= ~limit;
23561 }
23562
23563 newval |= value;
23564 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
23565 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
23566 break;
23567
23568 case BFD_RELOC_ARM_SHIFT_IMM:
23569 newval = md_chars_to_number (buf, INSN_SIZE);
23570 if (((unsigned long) value) > 32
23571 || (value == 32
23572 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
23573 {
23574 as_bad_where (fixP->fx_file, fixP->fx_line,
23575 _("shift expression is too large"));
23576 break;
23577 }
23578
23579 if (value == 0)
23580 /* Shifts of zero must be done as lsl. */
23581 newval &= ~0x60;
23582 else if (value == 32)
23583 value = 0;
23584 newval &= 0xfffff07f;
23585 newval |= (value & 0x1f) << 7;
23586 md_number_to_chars (buf, newval, INSN_SIZE);
23587 break;
23588
23589 case BFD_RELOC_ARM_T32_IMMEDIATE:
23590 case BFD_RELOC_ARM_T32_ADD_IMM:
23591 case BFD_RELOC_ARM_T32_IMM12:
23592 case BFD_RELOC_ARM_T32_ADD_PC12:
23593 /* We claim that this fixup has been processed here,
23594 even if in fact we generate an error because we do
23595 not have a reloc for it, so tc_gen_reloc will reject it. */
23596 fixP->fx_done = 1;
23597
23598 if (fixP->fx_addsy
23599 && ! S_IS_DEFINED (fixP->fx_addsy))
23600 {
23601 as_bad_where (fixP->fx_file, fixP->fx_line,
23602 _("undefined symbol %s used as an immediate value"),
23603 S_GET_NAME (fixP->fx_addsy));
23604 break;
23605 }
23606
23607 newval = md_chars_to_number (buf, THUMB_SIZE);
23608 newval <<= 16;
23609 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
23610
23611 newimm = FAIL;
23612 if ((fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
23613 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
23614 Thumb2 modified immediate encoding (T2). */
23615 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
23616 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23617 {
23618 newimm = encode_thumb32_immediate (value);
23619 if (newimm == (unsigned int) FAIL)
23620 newimm = thumb32_negate_data_op (&newval, value);
23621 }
23622 if (newimm == (unsigned int) FAIL)
23623 {
23624 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE)
23625 {
23626 /* Turn add/sum into addw/subw. */
23627 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23628 newval = (newval & 0xfeffffff) | 0x02000000;
23629 /* No flat 12-bit imm encoding for addsw/subsw. */
23630 if ((newval & 0x00100000) == 0)
23631 {
23632 /* 12 bit immediate for addw/subw. */
23633 if (value < 0)
23634 {
23635 value = -value;
23636 newval ^= 0x00a00000;
23637 }
23638 if (value > 0xfff)
23639 newimm = (unsigned int) FAIL;
23640 else
23641 newimm = value;
23642 }
23643 }
23644 else
23645 {
23646 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
23647 UINT16 (T3 encoding), MOVW only accepts UINT16. When
23648 disassembling, MOV is preferred when there is no encoding
23649 overlap. */
23650 if (((newval >> T2_DATA_OP_SHIFT) & 0xf) == T2_OPCODE_ORR
23651 /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
23652 but with the Rn field [19:16] set to 1111. */
23653 && (((newval >> 16) & 0xf) == 0xf)
23654 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
23655 && !((newval >> T2_SBIT_SHIFT) & 0x1)
23656 && value >= 0 && value <= 0xffff)
23657 {
23658 /* Toggle bit[25] to change encoding from T2 to T3. */
23659 newval ^= 1 << 25;
23660 /* Clear bits[19:16]. */
23661 newval &= 0xfff0ffff;
23662 /* Encoding high 4bits imm. Code below will encode the
23663 remaining low 12bits. */
23664 newval |= (value & 0x0000f000) << 4;
23665 newimm = value & 0x00000fff;
23666 }
23667 }
23668 }
23669
23670 if (newimm == (unsigned int)FAIL)
23671 {
23672 as_bad_where (fixP->fx_file, fixP->fx_line,
23673 _("invalid constant (%lx) after fixup"),
23674 (unsigned long) value);
23675 break;
23676 }
23677
23678 newval |= (newimm & 0x800) << 15;
23679 newval |= (newimm & 0x700) << 4;
23680 newval |= (newimm & 0x0ff);
23681
23682 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
23683 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
23684 break;
23685
23686 case BFD_RELOC_ARM_SMC:
23687 if (((unsigned long) value) > 0xffff)
23688 as_bad_where (fixP->fx_file, fixP->fx_line,
23689 _("invalid smc expression"));
23690 newval = md_chars_to_number (buf, INSN_SIZE);
23691 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23692 md_number_to_chars (buf, newval, INSN_SIZE);
23693 break;
23694
23695 case BFD_RELOC_ARM_HVC:
23696 if (((unsigned long) value) > 0xffff)
23697 as_bad_where (fixP->fx_file, fixP->fx_line,
23698 _("invalid hvc expression"));
23699 newval = md_chars_to_number (buf, INSN_SIZE);
23700 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23701 md_number_to_chars (buf, newval, INSN_SIZE);
23702 break;
23703
23704 case BFD_RELOC_ARM_SWI:
23705 if (fixP->tc_fix_data != 0)
23706 {
23707 if (((unsigned long) value) > 0xff)
23708 as_bad_where (fixP->fx_file, fixP->fx_line,
23709 _("invalid swi expression"));
23710 newval = md_chars_to_number (buf, THUMB_SIZE);
23711 newval |= value;
23712 md_number_to_chars (buf, newval, THUMB_SIZE);
23713 }
23714 else
23715 {
23716 if (((unsigned long) value) > 0x00ffffff)
23717 as_bad_where (fixP->fx_file, fixP->fx_line,
23718 _("invalid swi expression"));
23719 newval = md_chars_to_number (buf, INSN_SIZE);
23720 newval |= value;
23721 md_number_to_chars (buf, newval, INSN_SIZE);
23722 }
23723 break;
23724
23725 case BFD_RELOC_ARM_MULTI:
23726 if (((unsigned long) value) > 0xffff)
23727 as_bad_where (fixP->fx_file, fixP->fx_line,
23728 _("invalid expression in load/store multiple"));
23729 newval = value | md_chars_to_number (buf, INSN_SIZE);
23730 md_number_to_chars (buf, newval, INSN_SIZE);
23731 break;
23732
23733 #ifdef OBJ_ELF
23734 case BFD_RELOC_ARM_PCREL_CALL:
23735
23736 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23737 && fixP->fx_addsy
23738 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23739 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23740 && THUMB_IS_FUNC (fixP->fx_addsy))
23741 /* Flip the bl to blx. This is a simple flip
23742 bit here because we generate PCREL_CALL for
23743 unconditional bls. */
23744 {
23745 newval = md_chars_to_number (buf, INSN_SIZE);
23746 newval = newval | 0x10000000;
23747 md_number_to_chars (buf, newval, INSN_SIZE);
23748 temp = 1;
23749 fixP->fx_done = 1;
23750 }
23751 else
23752 temp = 3;
23753 goto arm_branch_common;
23754
23755 case BFD_RELOC_ARM_PCREL_JUMP:
23756 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23757 && fixP->fx_addsy
23758 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23759 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23760 && THUMB_IS_FUNC (fixP->fx_addsy))
23761 {
23762 /* This would map to a bl<cond>, b<cond>,
23763 b<always> to a Thumb function. We
23764 need to force a relocation for this particular
23765 case. */
23766 newval = md_chars_to_number (buf, INSN_SIZE);
23767 fixP->fx_done = 0;
23768 }
23769 /* Fall through. */
23770
23771 case BFD_RELOC_ARM_PLT32:
23772 #endif
23773 case BFD_RELOC_ARM_PCREL_BRANCH:
23774 temp = 3;
23775 goto arm_branch_common;
23776
23777 case BFD_RELOC_ARM_PCREL_BLX:
23778
23779 temp = 1;
23780 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23781 && fixP->fx_addsy
23782 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23783 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23784 && ARM_IS_FUNC (fixP->fx_addsy))
23785 {
23786 /* Flip the blx to a bl and warn. */
23787 const char *name = S_GET_NAME (fixP->fx_addsy);
23788 newval = 0xeb000000;
23789 as_warn_where (fixP->fx_file, fixP->fx_line,
23790 _("blx to '%s' an ARM ISA state function changed to bl"),
23791 name);
23792 md_number_to_chars (buf, newval, INSN_SIZE);
23793 temp = 3;
23794 fixP->fx_done = 1;
23795 }
23796
23797 #ifdef OBJ_ELF
23798 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23799 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
23800 #endif
23801
23802 arm_branch_common:
23803 /* We are going to store value (shifted right by two) in the
23804 instruction, in a 24 bit, signed field. Bits 26 through 32 either
23805 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
23806 also be clear. */
23807 if (value & temp)
23808 as_bad_where (fixP->fx_file, fixP->fx_line,
23809 _("misaligned branch destination"));
23810 if ((value & (offsetT)0xfe000000) != (offsetT)0
23811 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
23812 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23813
23814 if (fixP->fx_done || !seg->use_rela_p)
23815 {
23816 newval = md_chars_to_number (buf, INSN_SIZE);
23817 newval |= (value >> 2) & 0x00ffffff;
23818 /* Set the H bit on BLX instructions. */
23819 if (temp == 1)
23820 {
23821 if (value & 2)
23822 newval |= 0x01000000;
23823 else
23824 newval &= ~0x01000000;
23825 }
23826 md_number_to_chars (buf, newval, INSN_SIZE);
23827 }
23828 break;
23829
23830 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
23831 /* CBZ can only branch forward. */
23832
23833 /* Attempts to use CBZ to branch to the next instruction
23834 (which, strictly speaking, are prohibited) will be turned into
23835 no-ops.
23836
23837 FIXME: It may be better to remove the instruction completely and
23838 perform relaxation. */
23839 if (value == -2)
23840 {
23841 newval = md_chars_to_number (buf, THUMB_SIZE);
23842 newval = 0xbf00; /* NOP encoding T1 */
23843 md_number_to_chars (buf, newval, THUMB_SIZE);
23844 }
23845 else
23846 {
23847 if (value & ~0x7e)
23848 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23849
23850 if (fixP->fx_done || !seg->use_rela_p)
23851 {
23852 newval = md_chars_to_number (buf, THUMB_SIZE);
23853 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
23854 md_number_to_chars (buf, newval, THUMB_SIZE);
23855 }
23856 }
23857 break;
23858
23859 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
23860 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
23861 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23862
23863 if (fixP->fx_done || !seg->use_rela_p)
23864 {
23865 newval = md_chars_to_number (buf, THUMB_SIZE);
23866 newval |= (value & 0x1ff) >> 1;
23867 md_number_to_chars (buf, newval, THUMB_SIZE);
23868 }
23869 break;
23870
23871 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
23872 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
23873 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23874
23875 if (fixP->fx_done || !seg->use_rela_p)
23876 {
23877 newval = md_chars_to_number (buf, THUMB_SIZE);
23878 newval |= (value & 0xfff) >> 1;
23879 md_number_to_chars (buf, newval, THUMB_SIZE);
23880 }
23881 break;
23882
23883 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23884 if (fixP->fx_addsy
23885 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23886 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23887 && ARM_IS_FUNC (fixP->fx_addsy)
23888 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23889 {
23890 /* Force a relocation for a branch 20 bits wide. */
23891 fixP->fx_done = 0;
23892 }
23893 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
23894 as_bad_where (fixP->fx_file, fixP->fx_line,
23895 _("conditional branch out of range"));
23896
23897 if (fixP->fx_done || !seg->use_rela_p)
23898 {
23899 offsetT newval2;
23900 addressT S, J1, J2, lo, hi;
23901
23902 S = (value & 0x00100000) >> 20;
23903 J2 = (value & 0x00080000) >> 19;
23904 J1 = (value & 0x00040000) >> 18;
23905 hi = (value & 0x0003f000) >> 12;
23906 lo = (value & 0x00000ffe) >> 1;
23907
23908 newval = md_chars_to_number (buf, THUMB_SIZE);
23909 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23910 newval |= (S << 10) | hi;
23911 newval2 |= (J1 << 13) | (J2 << 11) | lo;
23912 md_number_to_chars (buf, newval, THUMB_SIZE);
23913 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
23914 }
23915 break;
23916
23917 case BFD_RELOC_THUMB_PCREL_BLX:
23918 /* If there is a blx from a thumb state function to
23919 another thumb function flip this to a bl and warn
23920 about it. */
23921
23922 if (fixP->fx_addsy
23923 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23924 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23925 && THUMB_IS_FUNC (fixP->fx_addsy))
23926 {
23927 const char *name = S_GET_NAME (fixP->fx_addsy);
23928 as_warn_where (fixP->fx_file, fixP->fx_line,
23929 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
23930 name);
23931 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23932 newval = newval | 0x1000;
23933 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23934 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23935 fixP->fx_done = 1;
23936 }
23937
23938
23939 goto thumb_bl_common;
23940
23941 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23942 /* A bl from Thumb state ISA to an internal ARM state function
23943 is converted to a blx. */
23944 if (fixP->fx_addsy
23945 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23946 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23947 && ARM_IS_FUNC (fixP->fx_addsy)
23948 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23949 {
23950 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23951 newval = newval & ~0x1000;
23952 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23953 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
23954 fixP->fx_done = 1;
23955 }
23956
23957 thumb_bl_common:
23958
23959 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23960 /* For a BLX instruction, make sure that the relocation is rounded up
23961 to a word boundary. This follows the semantics of the instruction
23962 which specifies that bit 1 of the target address will come from bit
23963 1 of the base address. */
23964 value = (value + 3) & ~ 3;
23965
23966 #ifdef OBJ_ELF
23967 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
23968 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23969 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23970 #endif
23971
23972 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
23973 {
23974 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
23975 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23976 else if ((value & ~0x1ffffff)
23977 && ((value & ~0x1ffffff) != ~0x1ffffff))
23978 as_bad_where (fixP->fx_file, fixP->fx_line,
23979 _("Thumb2 branch out of range"));
23980 }
23981
23982 if (fixP->fx_done || !seg->use_rela_p)
23983 encode_thumb2_b_bl_offset (buf, value);
23984
23985 break;
23986
23987 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23988 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
23989 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23990
23991 if (fixP->fx_done || !seg->use_rela_p)
23992 encode_thumb2_b_bl_offset (buf, value);
23993
23994 break;
23995
23996 case BFD_RELOC_8:
23997 if (fixP->fx_done || !seg->use_rela_p)
23998 *buf = value;
23999 break;
24000
24001 case BFD_RELOC_16:
24002 if (fixP->fx_done || !seg->use_rela_p)
24003 md_number_to_chars (buf, value, 2);
24004 break;
24005
24006 #ifdef OBJ_ELF
24007 case BFD_RELOC_ARM_TLS_CALL:
24008 case BFD_RELOC_ARM_THM_TLS_CALL:
24009 case BFD_RELOC_ARM_TLS_DESCSEQ:
24010 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
24011 case BFD_RELOC_ARM_TLS_GOTDESC:
24012 case BFD_RELOC_ARM_TLS_GD32:
24013 case BFD_RELOC_ARM_TLS_LE32:
24014 case BFD_RELOC_ARM_TLS_IE32:
24015 case BFD_RELOC_ARM_TLS_LDM32:
24016 case BFD_RELOC_ARM_TLS_LDO32:
24017 S_SET_THREAD_LOCAL (fixP->fx_addsy);
24018 break;
24019
24020 /* Same handling as above, but with the arm_fdpic guard. */
24021 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
24022 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
24023 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
24024 if (arm_fdpic)
24025 {
24026 S_SET_THREAD_LOCAL (fixP->fx_addsy);
24027 }
24028 else
24029 {
24030 as_bad_where (fixP->fx_file, fixP->fx_line,
24031 _("Relocation supported only in FDPIC mode"));
24032 }
24033 break;
24034
24035 case BFD_RELOC_ARM_GOT32:
24036 case BFD_RELOC_ARM_GOTOFF:
24037 break;
24038
24039 case BFD_RELOC_ARM_GOT_PREL:
24040 if (fixP->fx_done || !seg->use_rela_p)
24041 md_number_to_chars (buf, value, 4);
24042 break;
24043
24044 case BFD_RELOC_ARM_TARGET2:
24045 /* TARGET2 is not partial-inplace, so we need to write the
24046 addend here for REL targets, because it won't be written out
24047 during reloc processing later. */
24048 if (fixP->fx_done || !seg->use_rela_p)
24049 md_number_to_chars (buf, fixP->fx_offset, 4);
24050 break;
24051
24052 /* Relocations for FDPIC. */
24053 case BFD_RELOC_ARM_GOTFUNCDESC:
24054 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
24055 case BFD_RELOC_ARM_FUNCDESC:
24056 if (arm_fdpic)
24057 {
24058 if (fixP->fx_done || !seg->use_rela_p)
24059 md_number_to_chars (buf, 0, 4);
24060 }
24061 else
24062 {
24063 as_bad_where (fixP->fx_file, fixP->fx_line,
24064 _("Relocation supported only in FDPIC mode"));
24065 }
24066 break;
24067 #endif
24068
24069 case BFD_RELOC_RVA:
24070 case BFD_RELOC_32:
24071 case BFD_RELOC_ARM_TARGET1:
24072 case BFD_RELOC_ARM_ROSEGREL32:
24073 case BFD_RELOC_ARM_SBREL32:
24074 case BFD_RELOC_32_PCREL:
24075 #ifdef TE_PE
24076 case BFD_RELOC_32_SECREL:
24077 #endif
24078 if (fixP->fx_done || !seg->use_rela_p)
24079 #ifdef TE_WINCE
24080 /* For WinCE we only do this for pcrel fixups. */
24081 if (fixP->fx_done || fixP->fx_pcrel)
24082 #endif
24083 md_number_to_chars (buf, value, 4);
24084 break;
24085
24086 #ifdef OBJ_ELF
24087 case BFD_RELOC_ARM_PREL31:
24088 if (fixP->fx_done || !seg->use_rela_p)
24089 {
24090 newval = md_chars_to_number (buf, 4) & 0x80000000;
24091 if ((value ^ (value >> 1)) & 0x40000000)
24092 {
24093 as_bad_where (fixP->fx_file, fixP->fx_line,
24094 _("rel31 relocation overflow"));
24095 }
24096 newval |= value & 0x7fffffff;
24097 md_number_to_chars (buf, newval, 4);
24098 }
24099 break;
24100 #endif
24101
24102 case BFD_RELOC_ARM_CP_OFF_IMM:
24103 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
24104 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
24105 newval = md_chars_to_number (buf, INSN_SIZE);
24106 else
24107 newval = get_thumb32_insn (buf);
24108 if ((newval & 0x0f200f00) == 0x0d000900)
24109 {
24110 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
24111 has permitted values that are multiples of 2, in the range 0
24112 to 510. */
24113 if (value < -510 || value > 510 || (value & 1))
24114 as_bad_where (fixP->fx_file, fixP->fx_line,
24115 _("co-processor offset out of range"));
24116 }
24117 else if (value < -1023 || value > 1023 || (value & 3))
24118 as_bad_where (fixP->fx_file, fixP->fx_line,
24119 _("co-processor offset out of range"));
24120 cp_off_common:
24121 sign = value > 0;
24122 if (value < 0)
24123 value = -value;
24124 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24125 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
24126 newval = md_chars_to_number (buf, INSN_SIZE);
24127 else
24128 newval = get_thumb32_insn (buf);
24129 if (value == 0)
24130 newval &= 0xffffff00;
24131 else
24132 {
24133 newval &= 0xff7fff00;
24134 if ((newval & 0x0f200f00) == 0x0d000900)
24135 {
24136 /* This is a fp16 vstr/vldr.
24137
24138 It requires the immediate offset in the instruction is shifted
24139 left by 1 to be a half-word offset.
24140
24141 Here, left shift by 1 first, and later right shift by 2
24142 should get the right offset. */
24143 value <<= 1;
24144 }
24145 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
24146 }
24147 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24148 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
24149 md_number_to_chars (buf, newval, INSN_SIZE);
24150 else
24151 put_thumb32_insn (buf, newval);
24152 break;
24153
24154 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
24155 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
24156 if (value < -255 || value > 255)
24157 as_bad_where (fixP->fx_file, fixP->fx_line,
24158 _("co-processor offset out of range"));
24159 value *= 4;
24160 goto cp_off_common;
24161
24162 case BFD_RELOC_ARM_THUMB_OFFSET:
24163 newval = md_chars_to_number (buf, THUMB_SIZE);
24164 /* Exactly what ranges, and where the offset is inserted depends
24165 on the type of instruction, we can establish this from the
24166 top 4 bits. */
24167 switch (newval >> 12)
24168 {
24169 case 4: /* PC load. */
24170 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
24171 forced to zero for these loads; md_pcrel_from has already
24172 compensated for this. */
24173 if (value & 3)
24174 as_bad_where (fixP->fx_file, fixP->fx_line,
24175 _("invalid offset, target not word aligned (0x%08lX)"),
24176 (((unsigned long) fixP->fx_frag->fr_address
24177 + (unsigned long) fixP->fx_where) & ~3)
24178 + (unsigned long) value);
24179
24180 if (value & ~0x3fc)
24181 as_bad_where (fixP->fx_file, fixP->fx_line,
24182 _("invalid offset, value too big (0x%08lX)"),
24183 (long) value);
24184
24185 newval |= value >> 2;
24186 break;
24187
24188 case 9: /* SP load/store. */
24189 if (value & ~0x3fc)
24190 as_bad_where (fixP->fx_file, fixP->fx_line,
24191 _("invalid offset, value too big (0x%08lX)"),
24192 (long) value);
24193 newval |= value >> 2;
24194 break;
24195
24196 case 6: /* Word load/store. */
24197 if (value & ~0x7c)
24198 as_bad_where (fixP->fx_file, fixP->fx_line,
24199 _("invalid offset, value too big (0x%08lX)"),
24200 (long) value);
24201 newval |= value << 4; /* 6 - 2. */
24202 break;
24203
24204 case 7: /* Byte load/store. */
24205 if (value & ~0x1f)
24206 as_bad_where (fixP->fx_file, fixP->fx_line,
24207 _("invalid offset, value too big (0x%08lX)"),
24208 (long) value);
24209 newval |= value << 6;
24210 break;
24211
24212 case 8: /* Halfword load/store. */
24213 if (value & ~0x3e)
24214 as_bad_where (fixP->fx_file, fixP->fx_line,
24215 _("invalid offset, value too big (0x%08lX)"),
24216 (long) value);
24217 newval |= value << 5; /* 6 - 1. */
24218 break;
24219
24220 default:
24221 as_bad_where (fixP->fx_file, fixP->fx_line,
24222 "Unable to process relocation for thumb opcode: %lx",
24223 (unsigned long) newval);
24224 break;
24225 }
24226 md_number_to_chars (buf, newval, THUMB_SIZE);
24227 break;
24228
24229 case BFD_RELOC_ARM_THUMB_ADD:
24230 /* This is a complicated relocation, since we use it for all of
24231 the following immediate relocations:
24232
24233 3bit ADD/SUB
24234 8bit ADD/SUB
24235 9bit ADD/SUB SP word-aligned
24236 10bit ADD PC/SP word-aligned
24237
24238 The type of instruction being processed is encoded in the
24239 instruction field:
24240
24241 0x8000 SUB
24242 0x00F0 Rd
24243 0x000F Rs
24244 */
24245 newval = md_chars_to_number (buf, THUMB_SIZE);
24246 {
24247 int rd = (newval >> 4) & 0xf;
24248 int rs = newval & 0xf;
24249 int subtract = !!(newval & 0x8000);
24250
24251 /* Check for HI regs, only very restricted cases allowed:
24252 Adjusting SP, and using PC or SP to get an address. */
24253 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
24254 || (rs > 7 && rs != REG_SP && rs != REG_PC))
24255 as_bad_where (fixP->fx_file, fixP->fx_line,
24256 _("invalid Hi register with immediate"));
24257
24258 /* If value is negative, choose the opposite instruction. */
24259 if (value < 0)
24260 {
24261 value = -value;
24262 subtract = !subtract;
24263 if (value < 0)
24264 as_bad_where (fixP->fx_file, fixP->fx_line,
24265 _("immediate value out of range"));
24266 }
24267
24268 if (rd == REG_SP)
24269 {
24270 if (value & ~0x1fc)
24271 as_bad_where (fixP->fx_file, fixP->fx_line,
24272 _("invalid immediate for stack address calculation"));
24273 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
24274 newval |= value >> 2;
24275 }
24276 else if (rs == REG_PC || rs == REG_SP)
24277 {
24278 /* PR gas/18541. If the addition is for a defined symbol
24279 within range of an ADR instruction then accept it. */
24280 if (subtract
24281 && value == 4
24282 && fixP->fx_addsy != NULL)
24283 {
24284 subtract = 0;
24285
24286 if (! S_IS_DEFINED (fixP->fx_addsy)
24287 || S_GET_SEGMENT (fixP->fx_addsy) != seg
24288 || S_IS_WEAK (fixP->fx_addsy))
24289 {
24290 as_bad_where (fixP->fx_file, fixP->fx_line,
24291 _("address calculation needs a strongly defined nearby symbol"));
24292 }
24293 else
24294 {
24295 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
24296
24297 /* Round up to the next 4-byte boundary. */
24298 if (v & 3)
24299 v = (v + 3) & ~ 3;
24300 else
24301 v += 4;
24302 v = S_GET_VALUE (fixP->fx_addsy) - v;
24303
24304 if (v & ~0x3fc)
24305 {
24306 as_bad_where (fixP->fx_file, fixP->fx_line,
24307 _("symbol too far away"));
24308 }
24309 else
24310 {
24311 fixP->fx_done = 1;
24312 value = v;
24313 }
24314 }
24315 }
24316
24317 if (subtract || value & ~0x3fc)
24318 as_bad_where (fixP->fx_file, fixP->fx_line,
24319 _("invalid immediate for address calculation (value = 0x%08lX)"),
24320 (unsigned long) (subtract ? - value : value));
24321 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
24322 newval |= rd << 8;
24323 newval |= value >> 2;
24324 }
24325 else if (rs == rd)
24326 {
24327 if (value & ~0xff)
24328 as_bad_where (fixP->fx_file, fixP->fx_line,
24329 _("immediate value out of range"));
24330 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
24331 newval |= (rd << 8) | value;
24332 }
24333 else
24334 {
24335 if (value & ~0x7)
24336 as_bad_where (fixP->fx_file, fixP->fx_line,
24337 _("immediate value out of range"));
24338 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
24339 newval |= rd | (rs << 3) | (value << 6);
24340 }
24341 }
24342 md_number_to_chars (buf, newval, THUMB_SIZE);
24343 break;
24344
24345 case BFD_RELOC_ARM_THUMB_IMM:
24346 newval = md_chars_to_number (buf, THUMB_SIZE);
24347 if (value < 0 || value > 255)
24348 as_bad_where (fixP->fx_file, fixP->fx_line,
24349 _("invalid immediate: %ld is out of range"),
24350 (long) value);
24351 newval |= value;
24352 md_number_to_chars (buf, newval, THUMB_SIZE);
24353 break;
24354
24355 case BFD_RELOC_ARM_THUMB_SHIFT:
24356 /* 5bit shift value (0..32). LSL cannot take 32. */
24357 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
24358 temp = newval & 0xf800;
24359 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
24360 as_bad_where (fixP->fx_file, fixP->fx_line,
24361 _("invalid shift value: %ld"), (long) value);
24362 /* Shifts of zero must be encoded as LSL. */
24363 if (value == 0)
24364 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
24365 /* Shifts of 32 are encoded as zero. */
24366 else if (value == 32)
24367 value = 0;
24368 newval |= value << 6;
24369 md_number_to_chars (buf, newval, THUMB_SIZE);
24370 break;
24371
24372 case BFD_RELOC_VTABLE_INHERIT:
24373 case BFD_RELOC_VTABLE_ENTRY:
24374 fixP->fx_done = 0;
24375 return;
24376
24377 case BFD_RELOC_ARM_MOVW:
24378 case BFD_RELOC_ARM_MOVT:
24379 case BFD_RELOC_ARM_THUMB_MOVW:
24380 case BFD_RELOC_ARM_THUMB_MOVT:
24381 if (fixP->fx_done || !seg->use_rela_p)
24382 {
24383 /* REL format relocations are limited to a 16-bit addend. */
24384 if (!fixP->fx_done)
24385 {
24386 if (value < -0x8000 || value > 0x7fff)
24387 as_bad_where (fixP->fx_file, fixP->fx_line,
24388 _("offset out of range"));
24389 }
24390 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
24391 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
24392 {
24393 value >>= 16;
24394 }
24395
24396 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
24397 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
24398 {
24399 newval = get_thumb32_insn (buf);
24400 newval &= 0xfbf08f00;
24401 newval |= (value & 0xf000) << 4;
24402 newval |= (value & 0x0800) << 15;
24403 newval |= (value & 0x0700) << 4;
24404 newval |= (value & 0x00ff);
24405 put_thumb32_insn (buf, newval);
24406 }
24407 else
24408 {
24409 newval = md_chars_to_number (buf, 4);
24410 newval &= 0xfff0f000;
24411 newval |= value & 0x0fff;
24412 newval |= (value & 0xf000) << 4;
24413 md_number_to_chars (buf, newval, 4);
24414 }
24415 }
24416 return;
24417
24418 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
24419 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
24420 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
24421 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
24422 gas_assert (!fixP->fx_done);
24423 {
24424 bfd_vma insn;
24425 bfd_boolean is_mov;
24426 bfd_vma encoded_addend = value;
24427
24428 /* Check that addend can be encoded in instruction. */
24429 if (!seg->use_rela_p && (value < 0 || value > 255))
24430 as_bad_where (fixP->fx_file, fixP->fx_line,
24431 _("the offset 0x%08lX is not representable"),
24432 (unsigned long) encoded_addend);
24433
24434 /* Extract the instruction. */
24435 insn = md_chars_to_number (buf, THUMB_SIZE);
24436 is_mov = (insn & 0xf800) == 0x2000;
24437
24438 /* Encode insn. */
24439 if (is_mov)
24440 {
24441 if (!seg->use_rela_p)
24442 insn |= encoded_addend;
24443 }
24444 else
24445 {
24446 int rd, rs;
24447
24448 /* Extract the instruction. */
24449 /* Encoding is the following
24450 0x8000 SUB
24451 0x00F0 Rd
24452 0x000F Rs
24453 */
24454 /* The following conditions must be true :
24455 - ADD
24456 - Rd == Rs
24457 - Rd <= 7
24458 */
24459 rd = (insn >> 4) & 0xf;
24460 rs = insn & 0xf;
24461 if ((insn & 0x8000) || (rd != rs) || rd > 7)
24462 as_bad_where (fixP->fx_file, fixP->fx_line,
24463 _("Unable to process relocation for thumb opcode: %lx"),
24464 (unsigned long) insn);
24465
24466 /* Encode as ADD immediate8 thumb 1 code. */
24467 insn = 0x3000 | (rd << 8);
24468
24469 /* Place the encoded addend into the first 8 bits of the
24470 instruction. */
24471 if (!seg->use_rela_p)
24472 insn |= encoded_addend;
24473 }
24474
24475 /* Update the instruction. */
24476 md_number_to_chars (buf, insn, THUMB_SIZE);
24477 }
24478 break;
24479
24480 case BFD_RELOC_ARM_ALU_PC_G0_NC:
24481 case BFD_RELOC_ARM_ALU_PC_G0:
24482 case BFD_RELOC_ARM_ALU_PC_G1_NC:
24483 case BFD_RELOC_ARM_ALU_PC_G1:
24484 case BFD_RELOC_ARM_ALU_PC_G2:
24485 case BFD_RELOC_ARM_ALU_SB_G0_NC:
24486 case BFD_RELOC_ARM_ALU_SB_G0:
24487 case BFD_RELOC_ARM_ALU_SB_G1_NC:
24488 case BFD_RELOC_ARM_ALU_SB_G1:
24489 case BFD_RELOC_ARM_ALU_SB_G2:
24490 gas_assert (!fixP->fx_done);
24491 if (!seg->use_rela_p)
24492 {
24493 bfd_vma insn;
24494 bfd_vma encoded_addend;
24495 bfd_vma addend_abs = abs (value);
24496
24497 /* Check that the absolute value of the addend can be
24498 expressed as an 8-bit constant plus a rotation. */
24499 encoded_addend = encode_arm_immediate (addend_abs);
24500 if (encoded_addend == (unsigned int) FAIL)
24501 as_bad_where (fixP->fx_file, fixP->fx_line,
24502 _("the offset 0x%08lX is not representable"),
24503 (unsigned long) addend_abs);
24504
24505 /* Extract the instruction. */
24506 insn = md_chars_to_number (buf, INSN_SIZE);
24507
24508 /* If the addend is positive, use an ADD instruction.
24509 Otherwise use a SUB. Take care not to destroy the S bit. */
24510 insn &= 0xff1fffff;
24511 if (value < 0)
24512 insn |= 1 << 22;
24513 else
24514 insn |= 1 << 23;
24515
24516 /* Place the encoded addend into the first 12 bits of the
24517 instruction. */
24518 insn &= 0xfffff000;
24519 insn |= encoded_addend;
24520
24521 /* Update the instruction. */
24522 md_number_to_chars (buf, insn, INSN_SIZE);
24523 }
24524 break;
24525
24526 case BFD_RELOC_ARM_LDR_PC_G0:
24527 case BFD_RELOC_ARM_LDR_PC_G1:
24528 case BFD_RELOC_ARM_LDR_PC_G2:
24529 case BFD_RELOC_ARM_LDR_SB_G0:
24530 case BFD_RELOC_ARM_LDR_SB_G1:
24531 case BFD_RELOC_ARM_LDR_SB_G2:
24532 gas_assert (!fixP->fx_done);
24533 if (!seg->use_rela_p)
24534 {
24535 bfd_vma insn;
24536 bfd_vma addend_abs = abs (value);
24537
24538 /* Check that the absolute value of the addend can be
24539 encoded in 12 bits. */
24540 if (addend_abs >= 0x1000)
24541 as_bad_where (fixP->fx_file, fixP->fx_line,
24542 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
24543 (unsigned long) addend_abs);
24544
24545 /* Extract the instruction. */
24546 insn = md_chars_to_number (buf, INSN_SIZE);
24547
24548 /* If the addend is negative, clear bit 23 of the instruction.
24549 Otherwise set it. */
24550 if (value < 0)
24551 insn &= ~(1 << 23);
24552 else
24553 insn |= 1 << 23;
24554
24555 /* Place the absolute value of the addend into the first 12 bits
24556 of the instruction. */
24557 insn &= 0xfffff000;
24558 insn |= addend_abs;
24559
24560 /* Update the instruction. */
24561 md_number_to_chars (buf, insn, INSN_SIZE);
24562 }
24563 break;
24564
24565 case BFD_RELOC_ARM_LDRS_PC_G0:
24566 case BFD_RELOC_ARM_LDRS_PC_G1:
24567 case BFD_RELOC_ARM_LDRS_PC_G2:
24568 case BFD_RELOC_ARM_LDRS_SB_G0:
24569 case BFD_RELOC_ARM_LDRS_SB_G1:
24570 case BFD_RELOC_ARM_LDRS_SB_G2:
24571 gas_assert (!fixP->fx_done);
24572 if (!seg->use_rela_p)
24573 {
24574 bfd_vma insn;
24575 bfd_vma addend_abs = abs (value);
24576
24577 /* Check that the absolute value of the addend can be
24578 encoded in 8 bits. */
24579 if (addend_abs >= 0x100)
24580 as_bad_where (fixP->fx_file, fixP->fx_line,
24581 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
24582 (unsigned long) addend_abs);
24583
24584 /* Extract the instruction. */
24585 insn = md_chars_to_number (buf, INSN_SIZE);
24586
24587 /* If the addend is negative, clear bit 23 of the instruction.
24588 Otherwise set it. */
24589 if (value < 0)
24590 insn &= ~(1 << 23);
24591 else
24592 insn |= 1 << 23;
24593
24594 /* Place the first four bits of the absolute value of the addend
24595 into the first 4 bits of the instruction, and the remaining
24596 four into bits 8 .. 11. */
24597 insn &= 0xfffff0f0;
24598 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
24599
24600 /* Update the instruction. */
24601 md_number_to_chars (buf, insn, INSN_SIZE);
24602 }
24603 break;
24604
24605 case BFD_RELOC_ARM_LDC_PC_G0:
24606 case BFD_RELOC_ARM_LDC_PC_G1:
24607 case BFD_RELOC_ARM_LDC_PC_G2:
24608 case BFD_RELOC_ARM_LDC_SB_G0:
24609 case BFD_RELOC_ARM_LDC_SB_G1:
24610 case BFD_RELOC_ARM_LDC_SB_G2:
24611 gas_assert (!fixP->fx_done);
24612 if (!seg->use_rela_p)
24613 {
24614 bfd_vma insn;
24615 bfd_vma addend_abs = abs (value);
24616
24617 /* Check that the absolute value of the addend is a multiple of
24618 four and, when divided by four, fits in 8 bits. */
24619 if (addend_abs & 0x3)
24620 as_bad_where (fixP->fx_file, fixP->fx_line,
24621 _("bad offset 0x%08lX (must be word-aligned)"),
24622 (unsigned long) addend_abs);
24623
24624 if ((addend_abs >> 2) > 0xff)
24625 as_bad_where (fixP->fx_file, fixP->fx_line,
24626 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
24627 (unsigned long) addend_abs);
24628
24629 /* Extract the instruction. */
24630 insn = md_chars_to_number (buf, INSN_SIZE);
24631
24632 /* If the addend is negative, clear bit 23 of the instruction.
24633 Otherwise set it. */
24634 if (value < 0)
24635 insn &= ~(1 << 23);
24636 else
24637 insn |= 1 << 23;
24638
24639 /* Place the addend (divided by four) into the first eight
24640 bits of the instruction. */
24641 insn &= 0xfffffff0;
24642 insn |= addend_abs >> 2;
24643
24644 /* Update the instruction. */
24645 md_number_to_chars (buf, insn, INSN_SIZE);
24646 }
24647 break;
24648
24649 case BFD_RELOC_ARM_V4BX:
24650 /* This will need to go in the object file. */
24651 fixP->fx_done = 0;
24652 break;
24653
24654 case BFD_RELOC_UNUSED:
24655 default:
24656 as_bad_where (fixP->fx_file, fixP->fx_line,
24657 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
24658 }
24659 }
24660
24661 /* Translate internal representation of relocation info to BFD target
24662 format. */
24663
24664 arelent *
24665 tc_gen_reloc (asection *section, fixS *fixp)
24666 {
24667 arelent * reloc;
24668 bfd_reloc_code_real_type code;
24669
24670 reloc = XNEW (arelent);
24671
24672 reloc->sym_ptr_ptr = XNEW (asymbol *);
24673 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
24674 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
24675
24676 if (fixp->fx_pcrel)
24677 {
24678 if (section->use_rela_p)
24679 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
24680 else
24681 fixp->fx_offset = reloc->address;
24682 }
24683 reloc->addend = fixp->fx_offset;
24684
24685 switch (fixp->fx_r_type)
24686 {
24687 case BFD_RELOC_8:
24688 if (fixp->fx_pcrel)
24689 {
24690 code = BFD_RELOC_8_PCREL;
24691 break;
24692 }
24693 /* Fall through. */
24694
24695 case BFD_RELOC_16:
24696 if (fixp->fx_pcrel)
24697 {
24698 code = BFD_RELOC_16_PCREL;
24699 break;
24700 }
24701 /* Fall through. */
24702
24703 case BFD_RELOC_32:
24704 if (fixp->fx_pcrel)
24705 {
24706 code = BFD_RELOC_32_PCREL;
24707 break;
24708 }
24709 /* Fall through. */
24710
24711 case BFD_RELOC_ARM_MOVW:
24712 if (fixp->fx_pcrel)
24713 {
24714 code = BFD_RELOC_ARM_MOVW_PCREL;
24715 break;
24716 }
24717 /* Fall through. */
24718
24719 case BFD_RELOC_ARM_MOVT:
24720 if (fixp->fx_pcrel)
24721 {
24722 code = BFD_RELOC_ARM_MOVT_PCREL;
24723 break;
24724 }
24725 /* Fall through. */
24726
24727 case BFD_RELOC_ARM_THUMB_MOVW:
24728 if (fixp->fx_pcrel)
24729 {
24730 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
24731 break;
24732 }
24733 /* Fall through. */
24734
24735 case BFD_RELOC_ARM_THUMB_MOVT:
24736 if (fixp->fx_pcrel)
24737 {
24738 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
24739 break;
24740 }
24741 /* Fall through. */
24742
24743 case BFD_RELOC_NONE:
24744 case BFD_RELOC_ARM_PCREL_BRANCH:
24745 case BFD_RELOC_ARM_PCREL_BLX:
24746 case BFD_RELOC_RVA:
24747 case BFD_RELOC_THUMB_PCREL_BRANCH7:
24748 case BFD_RELOC_THUMB_PCREL_BRANCH9:
24749 case BFD_RELOC_THUMB_PCREL_BRANCH12:
24750 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24751 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24752 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24753 case BFD_RELOC_VTABLE_ENTRY:
24754 case BFD_RELOC_VTABLE_INHERIT:
24755 #ifdef TE_PE
24756 case BFD_RELOC_32_SECREL:
24757 #endif
24758 code = fixp->fx_r_type;
24759 break;
24760
24761 case BFD_RELOC_THUMB_PCREL_BLX:
24762 #ifdef OBJ_ELF
24763 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
24764 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
24765 else
24766 #endif
24767 code = BFD_RELOC_THUMB_PCREL_BLX;
24768 break;
24769
24770 case BFD_RELOC_ARM_LITERAL:
24771 case BFD_RELOC_ARM_HWLITERAL:
24772 /* If this is called then the a literal has
24773 been referenced across a section boundary. */
24774 as_bad_where (fixp->fx_file, fixp->fx_line,
24775 _("literal referenced across section boundary"));
24776 return NULL;
24777
24778 #ifdef OBJ_ELF
24779 case BFD_RELOC_ARM_TLS_CALL:
24780 case BFD_RELOC_ARM_THM_TLS_CALL:
24781 case BFD_RELOC_ARM_TLS_DESCSEQ:
24782 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
24783 case BFD_RELOC_ARM_GOT32:
24784 case BFD_RELOC_ARM_GOTOFF:
24785 case BFD_RELOC_ARM_GOT_PREL:
24786 case BFD_RELOC_ARM_PLT32:
24787 case BFD_RELOC_ARM_TARGET1:
24788 case BFD_RELOC_ARM_ROSEGREL32:
24789 case BFD_RELOC_ARM_SBREL32:
24790 case BFD_RELOC_ARM_PREL31:
24791 case BFD_RELOC_ARM_TARGET2:
24792 case BFD_RELOC_ARM_TLS_LDO32:
24793 case BFD_RELOC_ARM_PCREL_CALL:
24794 case BFD_RELOC_ARM_PCREL_JUMP:
24795 case BFD_RELOC_ARM_ALU_PC_G0_NC:
24796 case BFD_RELOC_ARM_ALU_PC_G0:
24797 case BFD_RELOC_ARM_ALU_PC_G1_NC:
24798 case BFD_RELOC_ARM_ALU_PC_G1:
24799 case BFD_RELOC_ARM_ALU_PC_G2:
24800 case BFD_RELOC_ARM_LDR_PC_G0:
24801 case BFD_RELOC_ARM_LDR_PC_G1:
24802 case BFD_RELOC_ARM_LDR_PC_G2:
24803 case BFD_RELOC_ARM_LDRS_PC_G0:
24804 case BFD_RELOC_ARM_LDRS_PC_G1:
24805 case BFD_RELOC_ARM_LDRS_PC_G2:
24806 case BFD_RELOC_ARM_LDC_PC_G0:
24807 case BFD_RELOC_ARM_LDC_PC_G1:
24808 case BFD_RELOC_ARM_LDC_PC_G2:
24809 case BFD_RELOC_ARM_ALU_SB_G0_NC:
24810 case BFD_RELOC_ARM_ALU_SB_G0:
24811 case BFD_RELOC_ARM_ALU_SB_G1_NC:
24812 case BFD_RELOC_ARM_ALU_SB_G1:
24813 case BFD_RELOC_ARM_ALU_SB_G2:
24814 case BFD_RELOC_ARM_LDR_SB_G0:
24815 case BFD_RELOC_ARM_LDR_SB_G1:
24816 case BFD_RELOC_ARM_LDR_SB_G2:
24817 case BFD_RELOC_ARM_LDRS_SB_G0:
24818 case BFD_RELOC_ARM_LDRS_SB_G1:
24819 case BFD_RELOC_ARM_LDRS_SB_G2:
24820 case BFD_RELOC_ARM_LDC_SB_G0:
24821 case BFD_RELOC_ARM_LDC_SB_G1:
24822 case BFD_RELOC_ARM_LDC_SB_G2:
24823 case BFD_RELOC_ARM_V4BX:
24824 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
24825 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
24826 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
24827 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
24828 case BFD_RELOC_ARM_GOTFUNCDESC:
24829 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
24830 case BFD_RELOC_ARM_FUNCDESC:
24831 code = fixp->fx_r_type;
24832 break;
24833
24834 case BFD_RELOC_ARM_TLS_GOTDESC:
24835 case BFD_RELOC_ARM_TLS_GD32:
24836 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
24837 case BFD_RELOC_ARM_TLS_LE32:
24838 case BFD_RELOC_ARM_TLS_IE32:
24839 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
24840 case BFD_RELOC_ARM_TLS_LDM32:
24841 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
24842 /* BFD will include the symbol's address in the addend.
24843 But we don't want that, so subtract it out again here. */
24844 if (!S_IS_COMMON (fixp->fx_addsy))
24845 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
24846 code = fixp->fx_r_type;
24847 break;
24848 #endif
24849
24850 case BFD_RELOC_ARM_IMMEDIATE:
24851 as_bad_where (fixp->fx_file, fixp->fx_line,
24852 _("internal relocation (type: IMMEDIATE) not fixed up"));
24853 return NULL;
24854
24855 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
24856 as_bad_where (fixp->fx_file, fixp->fx_line,
24857 _("ADRL used for a symbol not defined in the same file"));
24858 return NULL;
24859
24860 case BFD_RELOC_ARM_OFFSET_IMM:
24861 if (section->use_rela_p)
24862 {
24863 code = fixp->fx_r_type;
24864 break;
24865 }
24866
24867 if (fixp->fx_addsy != NULL
24868 && !S_IS_DEFINED (fixp->fx_addsy)
24869 && S_IS_LOCAL (fixp->fx_addsy))
24870 {
24871 as_bad_where (fixp->fx_file, fixp->fx_line,
24872 _("undefined local label `%s'"),
24873 S_GET_NAME (fixp->fx_addsy));
24874 return NULL;
24875 }
24876
24877 as_bad_where (fixp->fx_file, fixp->fx_line,
24878 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
24879 return NULL;
24880
24881 default:
24882 {
24883 const char * type;
24884
24885 switch (fixp->fx_r_type)
24886 {
24887 case BFD_RELOC_NONE: type = "NONE"; break;
24888 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
24889 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
24890 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
24891 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
24892 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
24893 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
24894 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
24895 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
24896 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
24897 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
24898 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
24899 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
24900 default: type = _("<unknown>"); break;
24901 }
24902 as_bad_where (fixp->fx_file, fixp->fx_line,
24903 _("cannot represent %s relocation in this object file format"),
24904 type);
24905 return NULL;
24906 }
24907 }
24908
24909 #ifdef OBJ_ELF
24910 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
24911 && GOT_symbol
24912 && fixp->fx_addsy == GOT_symbol)
24913 {
24914 code = BFD_RELOC_ARM_GOTPC;
24915 reloc->addend = fixp->fx_offset = reloc->address;
24916 }
24917 #endif
24918
24919 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
24920
24921 if (reloc->howto == NULL)
24922 {
24923 as_bad_where (fixp->fx_file, fixp->fx_line,
24924 _("cannot represent %s relocation in this object file format"),
24925 bfd_get_reloc_code_name (code));
24926 return NULL;
24927 }
24928
24929 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
24930 vtable entry to be used in the relocation's section offset. */
24931 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24932 reloc->address = fixp->fx_offset;
24933
24934 return reloc;
24935 }
24936
24937 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
24938
24939 void
24940 cons_fix_new_arm (fragS * frag,
24941 int where,
24942 int size,
24943 expressionS * exp,
24944 bfd_reloc_code_real_type reloc)
24945 {
24946 int pcrel = 0;
24947
24948 /* Pick a reloc.
24949 FIXME: @@ Should look at CPU word size. */
24950 switch (size)
24951 {
24952 case 1:
24953 reloc = BFD_RELOC_8;
24954 break;
24955 case 2:
24956 reloc = BFD_RELOC_16;
24957 break;
24958 case 4:
24959 default:
24960 reloc = BFD_RELOC_32;
24961 break;
24962 case 8:
24963 reloc = BFD_RELOC_64;
24964 break;
24965 }
24966
24967 #ifdef TE_PE
24968 if (exp->X_op == O_secrel)
24969 {
24970 exp->X_op = O_symbol;
24971 reloc = BFD_RELOC_32_SECREL;
24972 }
24973 #endif
24974
24975 fix_new_exp (frag, where, size, exp, pcrel, reloc);
24976 }
24977
24978 #if defined (OBJ_COFF)
24979 void
24980 arm_validate_fix (fixS * fixP)
24981 {
24982 /* If the destination of the branch is a defined symbol which does not have
24983 the THUMB_FUNC attribute, then we must be calling a function which has
24984 the (interfacearm) attribute. We look for the Thumb entry point to that
24985 function and change the branch to refer to that function instead. */
24986 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
24987 && fixP->fx_addsy != NULL
24988 && S_IS_DEFINED (fixP->fx_addsy)
24989 && ! THUMB_IS_FUNC (fixP->fx_addsy))
24990 {
24991 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
24992 }
24993 }
24994 #endif
24995
24996
24997 int
24998 arm_force_relocation (struct fix * fixp)
24999 {
25000 #if defined (OBJ_COFF) && defined (TE_PE)
25001 if (fixp->fx_r_type == BFD_RELOC_RVA)
25002 return 1;
25003 #endif
25004
25005 /* In case we have a call or a branch to a function in ARM ISA mode from
25006 a thumb function or vice-versa force the relocation. These relocations
25007 are cleared off for some cores that might have blx and simple transformations
25008 are possible. */
25009
25010 #ifdef OBJ_ELF
25011 switch (fixp->fx_r_type)
25012 {
25013 case BFD_RELOC_ARM_PCREL_JUMP:
25014 case BFD_RELOC_ARM_PCREL_CALL:
25015 case BFD_RELOC_THUMB_PCREL_BLX:
25016 if (THUMB_IS_FUNC (fixp->fx_addsy))
25017 return 1;
25018 break;
25019
25020 case BFD_RELOC_ARM_PCREL_BLX:
25021 case BFD_RELOC_THUMB_PCREL_BRANCH25:
25022 case BFD_RELOC_THUMB_PCREL_BRANCH20:
25023 case BFD_RELOC_THUMB_PCREL_BRANCH23:
25024 if (ARM_IS_FUNC (fixp->fx_addsy))
25025 return 1;
25026 break;
25027
25028 default:
25029 break;
25030 }
25031 #endif
25032
25033 /* Resolve these relocations even if the symbol is extern or weak.
25034 Technically this is probably wrong due to symbol preemption.
25035 In practice these relocations do not have enough range to be useful
25036 at dynamic link time, and some code (e.g. in the Linux kernel)
25037 expects these references to be resolved. */
25038 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
25039 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
25040 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
25041 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
25042 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
25043 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
25044 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
25045 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
25046 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
25047 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
25048 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
25049 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
25050 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
25051 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
25052 return 0;
25053
25054 /* Always leave these relocations for the linker. */
25055 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
25056 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
25057 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
25058 return 1;
25059
25060 /* Always generate relocations against function symbols. */
25061 if (fixp->fx_r_type == BFD_RELOC_32
25062 && fixp->fx_addsy
25063 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
25064 return 1;
25065
25066 return generic_force_reloc (fixp);
25067 }
25068
25069 #if defined (OBJ_ELF) || defined (OBJ_COFF)
25070 /* Relocations against function names must be left unadjusted,
25071 so that the linker can use this information to generate interworking
25072 stubs. The MIPS version of this function
25073 also prevents relocations that are mips-16 specific, but I do not
25074 know why it does this.
25075
25076 FIXME:
25077 There is one other problem that ought to be addressed here, but
25078 which currently is not: Taking the address of a label (rather
25079 than a function) and then later jumping to that address. Such
25080 addresses also ought to have their bottom bit set (assuming that
25081 they reside in Thumb code), but at the moment they will not. */
25082
25083 bfd_boolean
25084 arm_fix_adjustable (fixS * fixP)
25085 {
25086 if (fixP->fx_addsy == NULL)
25087 return 1;
25088
25089 /* Preserve relocations against symbols with function type. */
25090 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
25091 return FALSE;
25092
25093 if (THUMB_IS_FUNC (fixP->fx_addsy)
25094 && fixP->fx_subsy == NULL)
25095 return FALSE;
25096
25097 /* We need the symbol name for the VTABLE entries. */
25098 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
25099 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
25100 return FALSE;
25101
25102 /* Don't allow symbols to be discarded on GOT related relocs. */
25103 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
25104 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
25105 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
25106 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
25107 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32_FDPIC
25108 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
25109 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
25110 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32_FDPIC
25111 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
25112 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32_FDPIC
25113 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
25114 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
25115 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
25116 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
25117 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
25118 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
25119 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
25120 return FALSE;
25121
25122 /* Similarly for group relocations. */
25123 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
25124 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
25125 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
25126 return FALSE;
25127
25128 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
25129 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
25130 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
25131 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
25132 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
25133 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
25134 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
25135 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
25136 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
25137 return FALSE;
25138
25139 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
25140 offsets, so keep these symbols. */
25141 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
25142 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
25143 return FALSE;
25144
25145 return TRUE;
25146 }
25147 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
25148
25149 #ifdef OBJ_ELF
25150 const char *
25151 elf32_arm_target_format (void)
25152 {
25153 #ifdef TE_SYMBIAN
25154 return (target_big_endian
25155 ? "elf32-bigarm-symbian"
25156 : "elf32-littlearm-symbian");
25157 #elif defined (TE_VXWORKS)
25158 return (target_big_endian
25159 ? "elf32-bigarm-vxworks"
25160 : "elf32-littlearm-vxworks");
25161 #elif defined (TE_NACL)
25162 return (target_big_endian
25163 ? "elf32-bigarm-nacl"
25164 : "elf32-littlearm-nacl");
25165 #else
25166 if (arm_fdpic)
25167 {
25168 if (target_big_endian)
25169 return "elf32-bigarm-fdpic";
25170 else
25171 return "elf32-littlearm-fdpic";
25172 }
25173 else
25174 {
25175 if (target_big_endian)
25176 return "elf32-bigarm";
25177 else
25178 return "elf32-littlearm";
25179 }
25180 #endif
25181 }
25182
25183 void
25184 armelf_frob_symbol (symbolS * symp,
25185 int * puntp)
25186 {
25187 elf_frob_symbol (symp, puntp);
25188 }
25189 #endif
25190
25191 /* MD interface: Finalization. */
25192
25193 void
25194 arm_cleanup (void)
25195 {
25196 literal_pool * pool;
25197
25198 /* Ensure that all the IT blocks are properly closed. */
25199 check_it_blocks_finished ();
25200
25201 for (pool = list_of_pools; pool; pool = pool->next)
25202 {
25203 /* Put it at the end of the relevant section. */
25204 subseg_set (pool->section, pool->sub_section);
25205 #ifdef OBJ_ELF
25206 arm_elf_change_section ();
25207 #endif
25208 s_ltorg (0);
25209 }
25210 }
25211
25212 #ifdef OBJ_ELF
25213 /* Remove any excess mapping symbols generated for alignment frags in
25214 SEC. We may have created a mapping symbol before a zero byte
25215 alignment; remove it if there's a mapping symbol after the
25216 alignment. */
25217 static void
25218 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
25219 void *dummy ATTRIBUTE_UNUSED)
25220 {
25221 segment_info_type *seginfo = seg_info (sec);
25222 fragS *fragp;
25223
25224 if (seginfo == NULL || seginfo->frchainP == NULL)
25225 return;
25226
25227 for (fragp = seginfo->frchainP->frch_root;
25228 fragp != NULL;
25229 fragp = fragp->fr_next)
25230 {
25231 symbolS *sym = fragp->tc_frag_data.last_map;
25232 fragS *next = fragp->fr_next;
25233
25234 /* Variable-sized frags have been converted to fixed size by
25235 this point. But if this was variable-sized to start with,
25236 there will be a fixed-size frag after it. So don't handle
25237 next == NULL. */
25238 if (sym == NULL || next == NULL)
25239 continue;
25240
25241 if (S_GET_VALUE (sym) < next->fr_address)
25242 /* Not at the end of this frag. */
25243 continue;
25244 know (S_GET_VALUE (sym) == next->fr_address);
25245
25246 do
25247 {
25248 if (next->tc_frag_data.first_map != NULL)
25249 {
25250 /* Next frag starts with a mapping symbol. Discard this
25251 one. */
25252 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
25253 break;
25254 }
25255
25256 if (next->fr_next == NULL)
25257 {
25258 /* This mapping symbol is at the end of the section. Discard
25259 it. */
25260 know (next->fr_fix == 0 && next->fr_var == 0);
25261 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
25262 break;
25263 }
25264
25265 /* As long as we have empty frags without any mapping symbols,
25266 keep looking. */
25267 /* If the next frag is non-empty and does not start with a
25268 mapping symbol, then this mapping symbol is required. */
25269 if (next->fr_address != next->fr_next->fr_address)
25270 break;
25271
25272 next = next->fr_next;
25273 }
25274 while (next != NULL);
25275 }
25276 }
25277 #endif
25278
25279 /* Adjust the symbol table. This marks Thumb symbols as distinct from
25280 ARM ones. */
25281
25282 void
25283 arm_adjust_symtab (void)
25284 {
25285 #ifdef OBJ_COFF
25286 symbolS * sym;
25287
25288 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
25289 {
25290 if (ARM_IS_THUMB (sym))
25291 {
25292 if (THUMB_IS_FUNC (sym))
25293 {
25294 /* Mark the symbol as a Thumb function. */
25295 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
25296 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
25297 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
25298
25299 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
25300 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
25301 else
25302 as_bad (_("%s: unexpected function type: %d"),
25303 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
25304 }
25305 else switch (S_GET_STORAGE_CLASS (sym))
25306 {
25307 case C_EXT:
25308 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
25309 break;
25310 case C_STAT:
25311 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
25312 break;
25313 case C_LABEL:
25314 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
25315 break;
25316 default:
25317 /* Do nothing. */
25318 break;
25319 }
25320 }
25321
25322 if (ARM_IS_INTERWORK (sym))
25323 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
25324 }
25325 #endif
25326 #ifdef OBJ_ELF
25327 symbolS * sym;
25328 char bind;
25329
25330 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
25331 {
25332 if (ARM_IS_THUMB (sym))
25333 {
25334 elf_symbol_type * elf_sym;
25335
25336 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
25337 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
25338
25339 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
25340 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
25341 {
25342 /* If it's a .thumb_func, declare it as so,
25343 otherwise tag label as .code 16. */
25344 if (THUMB_IS_FUNC (sym))
25345 ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
25346 ST_BRANCH_TO_THUMB);
25347 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
25348 elf_sym->internal_elf_sym.st_info =
25349 ELF_ST_INFO (bind, STT_ARM_16BIT);
25350 }
25351 }
25352 }
25353
25354 /* Remove any overlapping mapping symbols generated by alignment frags. */
25355 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
25356 /* Now do generic ELF adjustments. */
25357 elf_adjust_symtab ();
25358 #endif
25359 }
25360
25361 /* MD interface: Initialization. */
25362
25363 static void
25364 set_constant_flonums (void)
25365 {
25366 int i;
25367
25368 for (i = 0; i < NUM_FLOAT_VALS; i++)
25369 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
25370 abort ();
25371 }
25372
25373 /* Auto-select Thumb mode if it's the only available instruction set for the
25374 given architecture. */
25375
25376 static void
25377 autoselect_thumb_from_cpu_variant (void)
25378 {
25379 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
25380 opcode_select (16);
25381 }
25382
25383 void
25384 md_begin (void)
25385 {
25386 unsigned mach;
25387 unsigned int i;
25388
25389 if ( (arm_ops_hsh = hash_new ()) == NULL
25390 || (arm_cond_hsh = hash_new ()) == NULL
25391 || (arm_shift_hsh = hash_new ()) == NULL
25392 || (arm_psr_hsh = hash_new ()) == NULL
25393 || (arm_v7m_psr_hsh = hash_new ()) == NULL
25394 || (arm_reg_hsh = hash_new ()) == NULL
25395 || (arm_reloc_hsh = hash_new ()) == NULL
25396 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
25397 as_fatal (_("virtual memory exhausted"));
25398
25399 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
25400 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
25401 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
25402 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
25403 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
25404 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
25405 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
25406 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
25407 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
25408 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
25409 (void *) (v7m_psrs + i));
25410 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
25411 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
25412 for (i = 0;
25413 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
25414 i++)
25415 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
25416 (void *) (barrier_opt_names + i));
25417 #ifdef OBJ_ELF
25418 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
25419 {
25420 struct reloc_entry * entry = reloc_names + i;
25421
25422 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
25423 /* This makes encode_branch() use the EABI versions of this relocation. */
25424 entry->reloc = BFD_RELOC_UNUSED;
25425
25426 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
25427 }
25428 #endif
25429
25430 set_constant_flonums ();
25431
25432 /* Set the cpu variant based on the command-line options. We prefer
25433 -mcpu= over -march= if both are set (as for GCC); and we prefer
25434 -mfpu= over any other way of setting the floating point unit.
25435 Use of legacy options with new options are faulted. */
25436 if (legacy_cpu)
25437 {
25438 if (mcpu_cpu_opt || march_cpu_opt)
25439 as_bad (_("use of old and new-style options to set CPU type"));
25440
25441 selected_arch = *legacy_cpu;
25442 }
25443 else if (mcpu_cpu_opt)
25444 {
25445 selected_arch = *mcpu_cpu_opt;
25446 selected_ext = *mcpu_ext_opt;
25447 }
25448 else if (march_cpu_opt)
25449 {
25450 selected_arch = *march_cpu_opt;
25451 selected_ext = *march_ext_opt;
25452 }
25453 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
25454
25455 if (legacy_fpu)
25456 {
25457 if (mfpu_opt)
25458 as_bad (_("use of old and new-style options to set FPU type"));
25459
25460 selected_fpu = *legacy_fpu;
25461 }
25462 else if (mfpu_opt)
25463 selected_fpu = *mfpu_opt;
25464 else
25465 {
25466 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
25467 || defined (TE_NetBSD) || defined (TE_VXWORKS))
25468 /* Some environments specify a default FPU. If they don't, infer it
25469 from the processor. */
25470 if (mcpu_fpu_opt)
25471 selected_fpu = *mcpu_fpu_opt;
25472 else if (march_fpu_opt)
25473 selected_fpu = *march_fpu_opt;
25474 #else
25475 selected_fpu = fpu_default;
25476 #endif
25477 }
25478
25479 if (ARM_FEATURE_ZERO (selected_fpu))
25480 {
25481 if (!no_cpu_selected ())
25482 selected_fpu = fpu_default;
25483 else
25484 selected_fpu = fpu_arch_fpa;
25485 }
25486
25487 #ifdef CPU_DEFAULT
25488 if (ARM_FEATURE_ZERO (selected_arch))
25489 {
25490 selected_arch = cpu_default;
25491 selected_cpu = selected_arch;
25492 }
25493 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
25494 #else
25495 /* Autodection of feature mode: allow all features in cpu_variant but leave
25496 selected_cpu unset. It will be set in aeabi_set_public_attributes ()
25497 after all instruction have been processed and we can decide what CPU
25498 should be selected. */
25499 if (ARM_FEATURE_ZERO (selected_arch))
25500 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
25501 else
25502 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
25503 #endif
25504
25505 autoselect_thumb_from_cpu_variant ();
25506
25507 arm_arch_used = thumb_arch_used = arm_arch_none;
25508
25509 #if defined OBJ_COFF || defined OBJ_ELF
25510 {
25511 unsigned int flags = 0;
25512
25513 #if defined OBJ_ELF
25514 flags = meabi_flags;
25515
25516 switch (meabi_flags)
25517 {
25518 case EF_ARM_EABI_UNKNOWN:
25519 #endif
25520 /* Set the flags in the private structure. */
25521 if (uses_apcs_26) flags |= F_APCS26;
25522 if (support_interwork) flags |= F_INTERWORK;
25523 if (uses_apcs_float) flags |= F_APCS_FLOAT;
25524 if (pic_code) flags |= F_PIC;
25525 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
25526 flags |= F_SOFT_FLOAT;
25527
25528 switch (mfloat_abi_opt)
25529 {
25530 case ARM_FLOAT_ABI_SOFT:
25531 case ARM_FLOAT_ABI_SOFTFP:
25532 flags |= F_SOFT_FLOAT;
25533 break;
25534
25535 case ARM_FLOAT_ABI_HARD:
25536 if (flags & F_SOFT_FLOAT)
25537 as_bad (_("hard-float conflicts with specified fpu"));
25538 break;
25539 }
25540
25541 /* Using pure-endian doubles (even if soft-float). */
25542 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
25543 flags |= F_VFP_FLOAT;
25544
25545 #if defined OBJ_ELF
25546 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
25547 flags |= EF_ARM_MAVERICK_FLOAT;
25548 break;
25549
25550 case EF_ARM_EABI_VER4:
25551 case EF_ARM_EABI_VER5:
25552 /* No additional flags to set. */
25553 break;
25554
25555 default:
25556 abort ();
25557 }
25558 #endif
25559 bfd_set_private_flags (stdoutput, flags);
25560
25561 /* We have run out flags in the COFF header to encode the
25562 status of ATPCS support, so instead we create a dummy,
25563 empty, debug section called .arm.atpcs. */
25564 if (atpcs)
25565 {
25566 asection * sec;
25567
25568 sec = bfd_make_section (stdoutput, ".arm.atpcs");
25569
25570 if (sec != NULL)
25571 {
25572 bfd_set_section_flags
25573 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
25574 bfd_set_section_size (stdoutput, sec, 0);
25575 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
25576 }
25577 }
25578 }
25579 #endif
25580
25581 /* Record the CPU type as well. */
25582 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
25583 mach = bfd_mach_arm_iWMMXt2;
25584 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
25585 mach = bfd_mach_arm_iWMMXt;
25586 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
25587 mach = bfd_mach_arm_XScale;
25588 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
25589 mach = bfd_mach_arm_ep9312;
25590 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
25591 mach = bfd_mach_arm_5TE;
25592 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
25593 {
25594 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
25595 mach = bfd_mach_arm_5T;
25596 else
25597 mach = bfd_mach_arm_5;
25598 }
25599 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
25600 {
25601 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
25602 mach = bfd_mach_arm_4T;
25603 else
25604 mach = bfd_mach_arm_4;
25605 }
25606 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
25607 mach = bfd_mach_arm_3M;
25608 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
25609 mach = bfd_mach_arm_3;
25610 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
25611 mach = bfd_mach_arm_2a;
25612 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
25613 mach = bfd_mach_arm_2;
25614 else
25615 mach = bfd_mach_arm_unknown;
25616
25617 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
25618 }
25619
25620 /* Command line processing. */
25621
25622 /* md_parse_option
25623 Invocation line includes a switch not recognized by the base assembler.
25624 See if it's a processor-specific option.
25625
25626 This routine is somewhat complicated by the need for backwards
25627 compatibility (since older releases of gcc can't be changed).
25628 The new options try to make the interface as compatible as
25629 possible with GCC.
25630
25631 New options (supported) are:
25632
25633 -mcpu=<cpu name> Assemble for selected processor
25634 -march=<architecture name> Assemble for selected architecture
25635 -mfpu=<fpu architecture> Assemble for selected FPU.
25636 -EB/-mbig-endian Big-endian
25637 -EL/-mlittle-endian Little-endian
25638 -k Generate PIC code
25639 -mthumb Start in Thumb mode
25640 -mthumb-interwork Code supports ARM/Thumb interworking
25641
25642 -m[no-]warn-deprecated Warn about deprecated features
25643 -m[no-]warn-syms Warn when symbols match instructions
25644
25645 For now we will also provide support for:
25646
25647 -mapcs-32 32-bit Program counter
25648 -mapcs-26 26-bit Program counter
25649 -macps-float Floats passed in FP registers
25650 -mapcs-reentrant Reentrant code
25651 -matpcs
25652 (sometime these will probably be replaced with -mapcs=<list of options>
25653 and -matpcs=<list of options>)
25654
25655 The remaining options are only supported for back-wards compatibility.
25656 Cpu variants, the arm part is optional:
25657 -m[arm]1 Currently not supported.
25658 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
25659 -m[arm]3 Arm 3 processor
25660 -m[arm]6[xx], Arm 6 processors
25661 -m[arm]7[xx][t][[d]m] Arm 7 processors
25662 -m[arm]8[10] Arm 8 processors
25663 -m[arm]9[20][tdmi] Arm 9 processors
25664 -mstrongarm[110[0]] StrongARM processors
25665 -mxscale XScale processors
25666 -m[arm]v[2345[t[e]]] Arm architectures
25667 -mall All (except the ARM1)
25668 FP variants:
25669 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
25670 -mfpe-old (No float load/store multiples)
25671 -mvfpxd VFP Single precision
25672 -mvfp All VFP
25673 -mno-fpu Disable all floating point instructions
25674
25675 The following CPU names are recognized:
25676 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
25677 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
25678 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
25679 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
25680 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
25681 arm10t arm10e, arm1020t, arm1020e, arm10200e,
25682 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
25683
25684 */
25685
25686 const char * md_shortopts = "m:k";
25687
25688 #ifdef ARM_BI_ENDIAN
25689 #define OPTION_EB (OPTION_MD_BASE + 0)
25690 #define OPTION_EL (OPTION_MD_BASE + 1)
25691 #else
25692 #if TARGET_BYTES_BIG_ENDIAN
25693 #define OPTION_EB (OPTION_MD_BASE + 0)
25694 #else
25695 #define OPTION_EL (OPTION_MD_BASE + 1)
25696 #endif
25697 #endif
25698 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
25699 #define OPTION_FDPIC (OPTION_MD_BASE + 3)
25700
25701 struct option md_longopts[] =
25702 {
25703 #ifdef OPTION_EB
25704 {"EB", no_argument, NULL, OPTION_EB},
25705 #endif
25706 #ifdef OPTION_EL
25707 {"EL", no_argument, NULL, OPTION_EL},
25708 #endif
25709 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
25710 #ifdef OBJ_ELF
25711 {"fdpic", no_argument, NULL, OPTION_FDPIC},
25712 #endif
25713 {NULL, no_argument, NULL, 0}
25714 };
25715
25716 size_t md_longopts_size = sizeof (md_longopts);
25717
25718 struct arm_option_table
25719 {
25720 const char * option; /* Option name to match. */
25721 const char * help; /* Help information. */
25722 int * var; /* Variable to change. */
25723 int value; /* What to change it to. */
25724 const char * deprecated; /* If non-null, print this message. */
25725 };
25726
25727 struct arm_option_table arm_opts[] =
25728 {
25729 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
25730 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
25731 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
25732 &support_interwork, 1, NULL},
25733 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
25734 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
25735 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
25736 1, NULL},
25737 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
25738 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
25739 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
25740 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
25741 NULL},
25742
25743 /* These are recognized by the assembler, but have no affect on code. */
25744 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
25745 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
25746
25747 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
25748 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
25749 &warn_on_deprecated, 0, NULL},
25750 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
25751 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
25752 {NULL, NULL, NULL, 0, NULL}
25753 };
25754
25755 struct arm_legacy_option_table
25756 {
25757 const char * option; /* Option name to match. */
25758 const arm_feature_set ** var; /* Variable to change. */
25759 const arm_feature_set value; /* What to change it to. */
25760 const char * deprecated; /* If non-null, print this message. */
25761 };
25762
25763 const struct arm_legacy_option_table arm_legacy_opts[] =
25764 {
25765 /* DON'T add any new processors to this list -- we want the whole list
25766 to go away... Add them to the processors table instead. */
25767 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
25768 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
25769 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
25770 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
25771 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25772 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25773 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25774 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25775 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
25776 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
25777 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
25778 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
25779 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
25780 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
25781 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
25782 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
25783 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
25784 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
25785 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
25786 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
25787 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
25788 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
25789 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
25790 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
25791 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
25792 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
25793 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
25794 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
25795 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
25796 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
25797 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
25798 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
25799 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
25800 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
25801 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25802 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25803 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25804 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25805 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25806 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25807 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
25808 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
25809 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
25810 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
25811 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
25812 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
25813 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25814 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25815 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25816 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25817 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25818 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25819 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25820 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25821 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25822 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25823 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
25824 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
25825 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
25826 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
25827 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25828 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25829 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25830 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25831 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25832 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25833 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25834 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25835 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
25836 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
25837 N_("use -mcpu=strongarm110")},
25838 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
25839 N_("use -mcpu=strongarm1100")},
25840 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
25841 N_("use -mcpu=strongarm1110")},
25842 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
25843 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
25844 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
25845
25846 /* Architecture variants -- don't add any more to this list either. */
25847 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
25848 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
25849 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25850 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25851 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
25852 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
25853 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25854 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25855 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
25856 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
25857 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25858 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25859 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
25860 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
25861 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25862 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25863 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25864 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25865
25866 /* Floating point variants -- don't add any more to this list either. */
25867 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
25868 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
25869 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
25870 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
25871 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
25872
25873 {NULL, NULL, ARM_ARCH_NONE, NULL}
25874 };
25875
25876 struct arm_cpu_option_table
25877 {
25878 const char * name;
25879 size_t name_len;
25880 const arm_feature_set value;
25881 const arm_feature_set ext;
25882 /* For some CPUs we assume an FPU unless the user explicitly sets
25883 -mfpu=... */
25884 const arm_feature_set default_fpu;
25885 /* The canonical name of the CPU, or NULL to use NAME converted to upper
25886 case. */
25887 const char * canonical_name;
25888 };
25889
25890 /* This list should, at a minimum, contain all the cpu names
25891 recognized by GCC. */
25892 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
25893
25894 static const struct arm_cpu_option_table arm_cpus[] =
25895 {
25896 ARM_CPU_OPT ("all", NULL, ARM_ANY,
25897 ARM_ARCH_NONE,
25898 FPU_ARCH_FPA),
25899 ARM_CPU_OPT ("arm1", NULL, ARM_ARCH_V1,
25900 ARM_ARCH_NONE,
25901 FPU_ARCH_FPA),
25902 ARM_CPU_OPT ("arm2", NULL, ARM_ARCH_V2,
25903 ARM_ARCH_NONE,
25904 FPU_ARCH_FPA),
25905 ARM_CPU_OPT ("arm250", NULL, ARM_ARCH_V2S,
25906 ARM_ARCH_NONE,
25907 FPU_ARCH_FPA),
25908 ARM_CPU_OPT ("arm3", NULL, ARM_ARCH_V2S,
25909 ARM_ARCH_NONE,
25910 FPU_ARCH_FPA),
25911 ARM_CPU_OPT ("arm6", NULL, ARM_ARCH_V3,
25912 ARM_ARCH_NONE,
25913 FPU_ARCH_FPA),
25914 ARM_CPU_OPT ("arm60", NULL, ARM_ARCH_V3,
25915 ARM_ARCH_NONE,
25916 FPU_ARCH_FPA),
25917 ARM_CPU_OPT ("arm600", NULL, ARM_ARCH_V3,
25918 ARM_ARCH_NONE,
25919 FPU_ARCH_FPA),
25920 ARM_CPU_OPT ("arm610", NULL, ARM_ARCH_V3,
25921 ARM_ARCH_NONE,
25922 FPU_ARCH_FPA),
25923 ARM_CPU_OPT ("arm620", NULL, ARM_ARCH_V3,
25924 ARM_ARCH_NONE,
25925 FPU_ARCH_FPA),
25926 ARM_CPU_OPT ("arm7", NULL, ARM_ARCH_V3,
25927 ARM_ARCH_NONE,
25928 FPU_ARCH_FPA),
25929 ARM_CPU_OPT ("arm7m", NULL, ARM_ARCH_V3M,
25930 ARM_ARCH_NONE,
25931 FPU_ARCH_FPA),
25932 ARM_CPU_OPT ("arm7d", NULL, ARM_ARCH_V3,
25933 ARM_ARCH_NONE,
25934 FPU_ARCH_FPA),
25935 ARM_CPU_OPT ("arm7dm", NULL, ARM_ARCH_V3M,
25936 ARM_ARCH_NONE,
25937 FPU_ARCH_FPA),
25938 ARM_CPU_OPT ("arm7di", NULL, ARM_ARCH_V3,
25939 ARM_ARCH_NONE,
25940 FPU_ARCH_FPA),
25941 ARM_CPU_OPT ("arm7dmi", NULL, ARM_ARCH_V3M,
25942 ARM_ARCH_NONE,
25943 FPU_ARCH_FPA),
25944 ARM_CPU_OPT ("arm70", NULL, ARM_ARCH_V3,
25945 ARM_ARCH_NONE,
25946 FPU_ARCH_FPA),
25947 ARM_CPU_OPT ("arm700", NULL, ARM_ARCH_V3,
25948 ARM_ARCH_NONE,
25949 FPU_ARCH_FPA),
25950 ARM_CPU_OPT ("arm700i", NULL, ARM_ARCH_V3,
25951 ARM_ARCH_NONE,
25952 FPU_ARCH_FPA),
25953 ARM_CPU_OPT ("arm710", NULL, ARM_ARCH_V3,
25954 ARM_ARCH_NONE,
25955 FPU_ARCH_FPA),
25956 ARM_CPU_OPT ("arm710t", NULL, ARM_ARCH_V4T,
25957 ARM_ARCH_NONE,
25958 FPU_ARCH_FPA),
25959 ARM_CPU_OPT ("arm720", NULL, ARM_ARCH_V3,
25960 ARM_ARCH_NONE,
25961 FPU_ARCH_FPA),
25962 ARM_CPU_OPT ("arm720t", NULL, ARM_ARCH_V4T,
25963 ARM_ARCH_NONE,
25964 FPU_ARCH_FPA),
25965 ARM_CPU_OPT ("arm740t", NULL, ARM_ARCH_V4T,
25966 ARM_ARCH_NONE,
25967 FPU_ARCH_FPA),
25968 ARM_CPU_OPT ("arm710c", NULL, ARM_ARCH_V3,
25969 ARM_ARCH_NONE,
25970 FPU_ARCH_FPA),
25971 ARM_CPU_OPT ("arm7100", NULL, ARM_ARCH_V3,
25972 ARM_ARCH_NONE,
25973 FPU_ARCH_FPA),
25974 ARM_CPU_OPT ("arm7500", NULL, ARM_ARCH_V3,
25975 ARM_ARCH_NONE,
25976 FPU_ARCH_FPA),
25977 ARM_CPU_OPT ("arm7500fe", NULL, ARM_ARCH_V3,
25978 ARM_ARCH_NONE,
25979 FPU_ARCH_FPA),
25980 ARM_CPU_OPT ("arm7t", NULL, ARM_ARCH_V4T,
25981 ARM_ARCH_NONE,
25982 FPU_ARCH_FPA),
25983 ARM_CPU_OPT ("arm7tdmi", NULL, ARM_ARCH_V4T,
25984 ARM_ARCH_NONE,
25985 FPU_ARCH_FPA),
25986 ARM_CPU_OPT ("arm7tdmi-s", NULL, ARM_ARCH_V4T,
25987 ARM_ARCH_NONE,
25988 FPU_ARCH_FPA),
25989 ARM_CPU_OPT ("arm8", NULL, ARM_ARCH_V4,
25990 ARM_ARCH_NONE,
25991 FPU_ARCH_FPA),
25992 ARM_CPU_OPT ("arm810", NULL, ARM_ARCH_V4,
25993 ARM_ARCH_NONE,
25994 FPU_ARCH_FPA),
25995 ARM_CPU_OPT ("strongarm", NULL, ARM_ARCH_V4,
25996 ARM_ARCH_NONE,
25997 FPU_ARCH_FPA),
25998 ARM_CPU_OPT ("strongarm1", NULL, ARM_ARCH_V4,
25999 ARM_ARCH_NONE,
26000 FPU_ARCH_FPA),
26001 ARM_CPU_OPT ("strongarm110", NULL, ARM_ARCH_V4,
26002 ARM_ARCH_NONE,
26003 FPU_ARCH_FPA),
26004 ARM_CPU_OPT ("strongarm1100", NULL, ARM_ARCH_V4,
26005 ARM_ARCH_NONE,
26006 FPU_ARCH_FPA),
26007 ARM_CPU_OPT ("strongarm1110", NULL, ARM_ARCH_V4,
26008 ARM_ARCH_NONE,
26009 FPU_ARCH_FPA),
26010 ARM_CPU_OPT ("arm9", NULL, ARM_ARCH_V4T,
26011 ARM_ARCH_NONE,
26012 FPU_ARCH_FPA),
26013 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T,
26014 ARM_ARCH_NONE,
26015 FPU_ARCH_FPA),
26016 ARM_CPU_OPT ("arm920t", NULL, ARM_ARCH_V4T,
26017 ARM_ARCH_NONE,
26018 FPU_ARCH_FPA),
26019 ARM_CPU_OPT ("arm922t", NULL, ARM_ARCH_V4T,
26020 ARM_ARCH_NONE,
26021 FPU_ARCH_FPA),
26022 ARM_CPU_OPT ("arm940t", NULL, ARM_ARCH_V4T,
26023 ARM_ARCH_NONE,
26024 FPU_ARCH_FPA),
26025 ARM_CPU_OPT ("arm9tdmi", NULL, ARM_ARCH_V4T,
26026 ARM_ARCH_NONE,
26027 FPU_ARCH_FPA),
26028 ARM_CPU_OPT ("fa526", NULL, ARM_ARCH_V4,
26029 ARM_ARCH_NONE,
26030 FPU_ARCH_FPA),
26031 ARM_CPU_OPT ("fa626", NULL, ARM_ARCH_V4,
26032 ARM_ARCH_NONE,
26033 FPU_ARCH_FPA),
26034
26035 /* For V5 or later processors we default to using VFP; but the user
26036 should really set the FPU type explicitly. */
26037 ARM_CPU_OPT ("arm9e-r0", NULL, ARM_ARCH_V5TExP,
26038 ARM_ARCH_NONE,
26039 FPU_ARCH_VFP_V2),
26040 ARM_CPU_OPT ("arm9e", NULL, ARM_ARCH_V5TE,
26041 ARM_ARCH_NONE,
26042 FPU_ARCH_VFP_V2),
26043 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ,
26044 ARM_ARCH_NONE,
26045 FPU_ARCH_VFP_V2),
26046 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ,
26047 ARM_ARCH_NONE,
26048 FPU_ARCH_VFP_V2),
26049 ARM_CPU_OPT ("arm926ej-s", NULL, ARM_ARCH_V5TEJ,
26050 ARM_ARCH_NONE,
26051 FPU_ARCH_VFP_V2),
26052 ARM_CPU_OPT ("arm946e-r0", NULL, ARM_ARCH_V5TExP,
26053 ARM_ARCH_NONE,
26054 FPU_ARCH_VFP_V2),
26055 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE,
26056 ARM_ARCH_NONE,
26057 FPU_ARCH_VFP_V2),
26058 ARM_CPU_OPT ("arm946e-s", NULL, ARM_ARCH_V5TE,
26059 ARM_ARCH_NONE,
26060 FPU_ARCH_VFP_V2),
26061 ARM_CPU_OPT ("arm966e-r0", NULL, ARM_ARCH_V5TExP,
26062 ARM_ARCH_NONE,
26063 FPU_ARCH_VFP_V2),
26064 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE,
26065 ARM_ARCH_NONE,
26066 FPU_ARCH_VFP_V2),
26067 ARM_CPU_OPT ("arm966e-s", NULL, ARM_ARCH_V5TE,
26068 ARM_ARCH_NONE,
26069 FPU_ARCH_VFP_V2),
26070 ARM_CPU_OPT ("arm968e-s", NULL, ARM_ARCH_V5TE,
26071 ARM_ARCH_NONE,
26072 FPU_ARCH_VFP_V2),
26073 ARM_CPU_OPT ("arm10t", NULL, ARM_ARCH_V5T,
26074 ARM_ARCH_NONE,
26075 FPU_ARCH_VFP_V1),
26076 ARM_CPU_OPT ("arm10tdmi", NULL, ARM_ARCH_V5T,
26077 ARM_ARCH_NONE,
26078 FPU_ARCH_VFP_V1),
26079 ARM_CPU_OPT ("arm10e", NULL, ARM_ARCH_V5TE,
26080 ARM_ARCH_NONE,
26081 FPU_ARCH_VFP_V2),
26082 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE,
26083 ARM_ARCH_NONE,
26084 FPU_ARCH_VFP_V2),
26085 ARM_CPU_OPT ("arm1020t", NULL, ARM_ARCH_V5T,
26086 ARM_ARCH_NONE,
26087 FPU_ARCH_VFP_V1),
26088 ARM_CPU_OPT ("arm1020e", NULL, ARM_ARCH_V5TE,
26089 ARM_ARCH_NONE,
26090 FPU_ARCH_VFP_V2),
26091 ARM_CPU_OPT ("arm1022e", NULL, ARM_ARCH_V5TE,
26092 ARM_ARCH_NONE,
26093 FPU_ARCH_VFP_V2),
26094 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ,
26095 ARM_ARCH_NONE,
26096 FPU_ARCH_VFP_V2),
26097 ARM_CPU_OPT ("arm1026ej-s", NULL, ARM_ARCH_V5TEJ,
26098 ARM_ARCH_NONE,
26099 FPU_ARCH_VFP_V2),
26100 ARM_CPU_OPT ("fa606te", NULL, ARM_ARCH_V5TE,
26101 ARM_ARCH_NONE,
26102 FPU_ARCH_VFP_V2),
26103 ARM_CPU_OPT ("fa616te", NULL, ARM_ARCH_V5TE,
26104 ARM_ARCH_NONE,
26105 FPU_ARCH_VFP_V2),
26106 ARM_CPU_OPT ("fa626te", NULL, ARM_ARCH_V5TE,
26107 ARM_ARCH_NONE,
26108 FPU_ARCH_VFP_V2),
26109 ARM_CPU_OPT ("fmp626", NULL, ARM_ARCH_V5TE,
26110 ARM_ARCH_NONE,
26111 FPU_ARCH_VFP_V2),
26112 ARM_CPU_OPT ("fa726te", NULL, ARM_ARCH_V5TE,
26113 ARM_ARCH_NONE,
26114 FPU_ARCH_VFP_V2),
26115 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6,
26116 ARM_ARCH_NONE,
26117 FPU_NONE),
26118 ARM_CPU_OPT ("arm1136j-s", NULL, ARM_ARCH_V6,
26119 ARM_ARCH_NONE,
26120 FPU_NONE),
26121 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6,
26122 ARM_ARCH_NONE,
26123 FPU_ARCH_VFP_V2),
26124 ARM_CPU_OPT ("arm1136jf-s", NULL, ARM_ARCH_V6,
26125 ARM_ARCH_NONE,
26126 FPU_ARCH_VFP_V2),
26127 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K,
26128 ARM_ARCH_NONE,
26129 FPU_ARCH_VFP_V2),
26130 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K,
26131 ARM_ARCH_NONE,
26132 FPU_NONE),
26133 ARM_CPU_OPT ("arm1156t2-s", NULL, ARM_ARCH_V6T2,
26134 ARM_ARCH_NONE,
26135 FPU_NONE),
26136 ARM_CPU_OPT ("arm1156t2f-s", NULL, ARM_ARCH_V6T2,
26137 ARM_ARCH_NONE,
26138 FPU_ARCH_VFP_V2),
26139 ARM_CPU_OPT ("arm1176jz-s", NULL, ARM_ARCH_V6KZ,
26140 ARM_ARCH_NONE,
26141 FPU_NONE),
26142 ARM_CPU_OPT ("arm1176jzf-s", NULL, ARM_ARCH_V6KZ,
26143 ARM_ARCH_NONE,
26144 FPU_ARCH_VFP_V2),
26145 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A,
26146 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26147 FPU_NONE),
26148 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE,
26149 ARM_ARCH_NONE,
26150 FPU_ARCH_NEON_VFP_V4),
26151 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A,
26152 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
26153 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
26154 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A,
26155 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26156 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
26157 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE,
26158 ARM_ARCH_NONE,
26159 FPU_ARCH_NEON_VFP_V4),
26160 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE,
26161 ARM_ARCH_NONE,
26162 FPU_ARCH_NEON_VFP_V4),
26163 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE,
26164 ARM_ARCH_NONE,
26165 FPU_ARCH_NEON_VFP_V4),
26166 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A,
26167 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26168 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26169 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A,
26170 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26171 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26172 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A,
26173 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26174 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26175 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A,
26176 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26177 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26178 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A,
26179 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26180 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26181 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A,
26182 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26183 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26184 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A,
26185 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26186 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26187 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A,
26188 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26189 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26190 ARM_CPU_OPT ("cortex-a76", "Cortex-A76", ARM_ARCH_V8_2A,
26191 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26192 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26193 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R,
26194 ARM_ARCH_NONE,
26195 FPU_NONE),
26196 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R,
26197 ARM_ARCH_NONE,
26198 FPU_ARCH_VFP_V3D16),
26199 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R,
26200 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
26201 FPU_NONE),
26202 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R,
26203 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
26204 FPU_ARCH_VFP_V3D16),
26205 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R,
26206 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
26207 FPU_ARCH_VFP_V3D16),
26208 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R,
26209 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26210 FPU_ARCH_NEON_VFP_ARMV8),
26211 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN,
26212 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
26213 FPU_NONE),
26214 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE,
26215 ARM_ARCH_NONE,
26216 FPU_NONE),
26217 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM,
26218 ARM_ARCH_NONE,
26219 FPU_NONE),
26220 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM,
26221 ARM_ARCH_NONE,
26222 FPU_NONE),
26223 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M,
26224 ARM_ARCH_NONE,
26225 FPU_NONE),
26226 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM,
26227 ARM_ARCH_NONE,
26228 FPU_NONE),
26229 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM,
26230 ARM_ARCH_NONE,
26231 FPU_NONE),
26232 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM,
26233 ARM_ARCH_NONE,
26234 FPU_NONE),
26235 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A,
26236 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26237 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26238
26239 /* ??? XSCALE is really an architecture. */
26240 ARM_CPU_OPT ("xscale", NULL, ARM_ARCH_XSCALE,
26241 ARM_ARCH_NONE,
26242 FPU_ARCH_VFP_V2),
26243
26244 /* ??? iwmmxt is not a processor. */
26245 ARM_CPU_OPT ("iwmmxt", NULL, ARM_ARCH_IWMMXT,
26246 ARM_ARCH_NONE,
26247 FPU_ARCH_VFP_V2),
26248 ARM_CPU_OPT ("iwmmxt2", NULL, ARM_ARCH_IWMMXT2,
26249 ARM_ARCH_NONE,
26250 FPU_ARCH_VFP_V2),
26251 ARM_CPU_OPT ("i80200", NULL, ARM_ARCH_XSCALE,
26252 ARM_ARCH_NONE,
26253 FPU_ARCH_VFP_V2),
26254
26255 /* Maverick. */
26256 ARM_CPU_OPT ("ep9312", "ARM920T",
26257 ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
26258 ARM_ARCH_NONE, FPU_ARCH_MAVERICK),
26259
26260 /* Marvell processors. */
26261 ARM_CPU_OPT ("marvell-pj4", NULL, ARM_ARCH_V7A,
26262 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26263 FPU_ARCH_VFP_V3D16),
26264 ARM_CPU_OPT ("marvell-whitney", NULL, ARM_ARCH_V7A,
26265 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26266 FPU_ARCH_NEON_VFP_V4),
26267
26268 /* APM X-Gene family. */
26269 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A,
26270 ARM_ARCH_NONE,
26271 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26272 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A,
26273 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26274 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26275
26276 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
26277 };
26278 #undef ARM_CPU_OPT
26279
26280 struct arm_arch_option_table
26281 {
26282 const char * name;
26283 size_t name_len;
26284 const arm_feature_set value;
26285 const arm_feature_set default_fpu;
26286 };
26287
26288 /* This list should, at a minimum, contain all the architecture names
26289 recognized by GCC. */
26290 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
26291
26292 static const struct arm_arch_option_table arm_archs[] =
26293 {
26294 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
26295 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
26296 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
26297 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
26298 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
26299 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
26300 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
26301 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
26302 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
26303 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
26304 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
26305 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
26306 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
26307 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
26308 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
26309 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
26310 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
26311 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
26312 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
26313 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
26314 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
26315 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
26316 kept to preserve existing behaviour. */
26317 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
26318 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
26319 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
26320 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
26321 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
26322 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
26323 kept to preserve existing behaviour. */
26324 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
26325 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
26326 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
26327 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
26328 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
26329 /* The official spelling of the ARMv7 profile variants is the dashed form.
26330 Accept the non-dashed form for compatibility with old toolchains. */
26331 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
26332 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP),
26333 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
26334 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
26335 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
26336 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
26337 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
26338 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
26339 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
26340 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP),
26341 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
26342 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP),
26343 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP),
26344 ARM_ARCH_OPT ("armv8.3-a", ARM_ARCH_V8_3A, FPU_ARCH_VFP),
26345 ARM_ARCH_OPT ("armv8-r", ARM_ARCH_V8R, FPU_ARCH_VFP),
26346 ARM_ARCH_OPT ("armv8.4-a", ARM_ARCH_V8_4A, FPU_ARCH_VFP),
26347 ARM_ARCH_OPT ("armv8.5-a", ARM_ARCH_V8_5A, FPU_ARCH_VFP),
26348 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
26349 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
26350 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
26351 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
26352 };
26353 #undef ARM_ARCH_OPT
26354
26355 /* ISA extensions in the co-processor and main instruction set space. */
26356
26357 struct arm_option_extension_value_table
26358 {
26359 const char * name;
26360 size_t name_len;
26361 const arm_feature_set merge_value;
26362 const arm_feature_set clear_value;
26363 /* List of architectures for which an extension is available. ARM_ARCH_NONE
26364 indicates that an extension is available for all architectures while
26365 ARM_ANY marks an empty entry. */
26366 const arm_feature_set allowed_archs[2];
26367 };
26368
26369 /* The following table must be in alphabetical order with a NULL last entry. */
26370
26371 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
26372 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
26373
26374 static const struct arm_option_extension_value_table arm_extensions[] =
26375 {
26376 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26377 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
26378 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
26379 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
26380 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
26381 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8,
26382 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD),
26383 ARM_ARCH_V8_2A),
26384 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
26385 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
26386 ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
26387 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
26388 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
26389 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26390 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26391 ARM_ARCH_V8_2A),
26392 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
26393 | ARM_EXT2_FP16_FML),
26394 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
26395 | ARM_EXT2_FP16_FML),
26396 ARM_ARCH_V8_2A),
26397 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
26398 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
26399 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
26400 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
26401 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
26402 Thumb divide instruction. Due to this having the same name as the
26403 previous entry, this will be ignored when doing command-line parsing and
26404 only considered by build attribute selection code. */
26405 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
26406 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
26407 ARM_FEATURE_CORE_LOW (ARM_EXT_V7)),
26408 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
26409 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
26410 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
26411 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
26412 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
26413 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
26414 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
26415 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
26416 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
26417 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
26418 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
26419 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
26420 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
26421 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
26422 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
26423 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
26424 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
26425 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
26426 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
26427 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1,
26428 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
26429 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
26430 ARM_EXT_OPT ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
26431 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
26432 ARM_ARCH_V8A),
26433 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
26434 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
26435 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
26436 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
26437 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
26438 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
26439 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
26440 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
26441 | ARM_EXT_DIV),
26442 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
26443 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
26444 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
26445 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
26446 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
26447 };
26448 #undef ARM_EXT_OPT
26449
26450 /* ISA floating-point and Advanced SIMD extensions. */
26451 struct arm_option_fpu_value_table
26452 {
26453 const char * name;
26454 const arm_feature_set value;
26455 };
26456
26457 /* This list should, at a minimum, contain all the fpu names
26458 recognized by GCC. */
26459 static const struct arm_option_fpu_value_table arm_fpus[] =
26460 {
26461 {"softfpa", FPU_NONE},
26462 {"fpe", FPU_ARCH_FPE},
26463 {"fpe2", FPU_ARCH_FPE},
26464 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
26465 {"fpa", FPU_ARCH_FPA},
26466 {"fpa10", FPU_ARCH_FPA},
26467 {"fpa11", FPU_ARCH_FPA},
26468 {"arm7500fe", FPU_ARCH_FPA},
26469 {"softvfp", FPU_ARCH_VFP},
26470 {"softvfp+vfp", FPU_ARCH_VFP_V2},
26471 {"vfp", FPU_ARCH_VFP_V2},
26472 {"vfp9", FPU_ARCH_VFP_V2},
26473 {"vfp3", FPU_ARCH_VFP_V3}, /* Undocumented, use vfpv3. */
26474 {"vfp10", FPU_ARCH_VFP_V2},
26475 {"vfp10-r0", FPU_ARCH_VFP_V1},
26476 {"vfpxd", FPU_ARCH_VFP_V1xD},
26477 {"vfpv2", FPU_ARCH_VFP_V2},
26478 {"vfpv3", FPU_ARCH_VFP_V3},
26479 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
26480 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
26481 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
26482 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
26483 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
26484 {"arm1020t", FPU_ARCH_VFP_V1},
26485 {"arm1020e", FPU_ARCH_VFP_V2},
26486 {"arm1136jfs", FPU_ARCH_VFP_V2}, /* Undocumented, use arm1136jf-s. */
26487 {"arm1136jf-s", FPU_ARCH_VFP_V2},
26488 {"maverick", FPU_ARCH_MAVERICK},
26489 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
26490 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
26491 {"neon-fp16", FPU_ARCH_NEON_FP16},
26492 {"vfpv4", FPU_ARCH_VFP_V4},
26493 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
26494 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
26495 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
26496 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
26497 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
26498 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
26499 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
26500 {"crypto-neon-fp-armv8",
26501 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
26502 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
26503 {"crypto-neon-fp-armv8.1",
26504 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
26505 {NULL, ARM_ARCH_NONE}
26506 };
26507
26508 struct arm_option_value_table
26509 {
26510 const char *name;
26511 long value;
26512 };
26513
26514 static const struct arm_option_value_table arm_float_abis[] =
26515 {
26516 {"hard", ARM_FLOAT_ABI_HARD},
26517 {"softfp", ARM_FLOAT_ABI_SOFTFP},
26518 {"soft", ARM_FLOAT_ABI_SOFT},
26519 {NULL, 0}
26520 };
26521
26522 #ifdef OBJ_ELF
26523 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
26524 static const struct arm_option_value_table arm_eabis[] =
26525 {
26526 {"gnu", EF_ARM_EABI_UNKNOWN},
26527 {"4", EF_ARM_EABI_VER4},
26528 {"5", EF_ARM_EABI_VER5},
26529 {NULL, 0}
26530 };
26531 #endif
26532
26533 struct arm_long_option_table
26534 {
26535 const char * option; /* Substring to match. */
26536 const char * help; /* Help information. */
26537 int (* func) (const char * subopt); /* Function to decode sub-option. */
26538 const char * deprecated; /* If non-null, print this message. */
26539 };
26540
26541 static bfd_boolean
26542 arm_parse_extension (const char *str, const arm_feature_set *opt_set,
26543 arm_feature_set *ext_set)
26544 {
26545 /* We insist on extensions being specified in alphabetical order, and with
26546 extensions being added before being removed. We achieve this by having
26547 the global ARM_EXTENSIONS table in alphabetical order, and using the
26548 ADDING_VALUE variable to indicate whether we are adding an extension (1)
26549 or removing it (0) and only allowing it to change in the order
26550 -1 -> 1 -> 0. */
26551 const struct arm_option_extension_value_table * opt = NULL;
26552 const arm_feature_set arm_any = ARM_ANY;
26553 int adding_value = -1;
26554
26555 while (str != NULL && *str != 0)
26556 {
26557 const char *ext;
26558 size_t len;
26559
26560 if (*str != '+')
26561 {
26562 as_bad (_("invalid architectural extension"));
26563 return FALSE;
26564 }
26565
26566 str++;
26567 ext = strchr (str, '+');
26568
26569 if (ext != NULL)
26570 len = ext - str;
26571 else
26572 len = strlen (str);
26573
26574 if (len >= 2 && strncmp (str, "no", 2) == 0)
26575 {
26576 if (adding_value != 0)
26577 {
26578 adding_value = 0;
26579 opt = arm_extensions;
26580 }
26581
26582 len -= 2;
26583 str += 2;
26584 }
26585 else if (len > 0)
26586 {
26587 if (adding_value == -1)
26588 {
26589 adding_value = 1;
26590 opt = arm_extensions;
26591 }
26592 else if (adding_value != 1)
26593 {
26594 as_bad (_("must specify extensions to add before specifying "
26595 "those to remove"));
26596 return FALSE;
26597 }
26598 }
26599
26600 if (len == 0)
26601 {
26602 as_bad (_("missing architectural extension"));
26603 return FALSE;
26604 }
26605
26606 gas_assert (adding_value != -1);
26607 gas_assert (opt != NULL);
26608
26609 /* Scan over the options table trying to find an exact match. */
26610 for (; opt->name != NULL; opt++)
26611 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26612 {
26613 int i, nb_allowed_archs =
26614 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
26615 /* Check we can apply the extension to this architecture. */
26616 for (i = 0; i < nb_allowed_archs; i++)
26617 {
26618 /* Empty entry. */
26619 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
26620 continue;
26621 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *opt_set))
26622 break;
26623 }
26624 if (i == nb_allowed_archs)
26625 {
26626 as_bad (_("extension does not apply to the base architecture"));
26627 return FALSE;
26628 }
26629
26630 /* Add or remove the extension. */
26631 if (adding_value)
26632 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
26633 else
26634 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
26635
26636 /* Allowing Thumb division instructions for ARMv7 in autodetection
26637 rely on this break so that duplicate extensions (extensions
26638 with the same name as a previous extension in the list) are not
26639 considered for command-line parsing. */
26640 break;
26641 }
26642
26643 if (opt->name == NULL)
26644 {
26645 /* Did we fail to find an extension because it wasn't specified in
26646 alphabetical order, or because it does not exist? */
26647
26648 for (opt = arm_extensions; opt->name != NULL; opt++)
26649 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26650 break;
26651
26652 if (opt->name == NULL)
26653 as_bad (_("unknown architectural extension `%s'"), str);
26654 else
26655 as_bad (_("architectural extensions must be specified in "
26656 "alphabetical order"));
26657
26658 return FALSE;
26659 }
26660 else
26661 {
26662 /* We should skip the extension we've just matched the next time
26663 round. */
26664 opt++;
26665 }
26666
26667 str = ext;
26668 };
26669
26670 return TRUE;
26671 }
26672
26673 static bfd_boolean
26674 arm_parse_cpu (const char *str)
26675 {
26676 const struct arm_cpu_option_table *opt;
26677 const char *ext = strchr (str, '+');
26678 size_t len;
26679
26680 if (ext != NULL)
26681 len = ext - str;
26682 else
26683 len = strlen (str);
26684
26685 if (len == 0)
26686 {
26687 as_bad (_("missing cpu name `%s'"), str);
26688 return FALSE;
26689 }
26690
26691 for (opt = arm_cpus; opt->name != NULL; opt++)
26692 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26693 {
26694 mcpu_cpu_opt = &opt->value;
26695 if (mcpu_ext_opt == NULL)
26696 mcpu_ext_opt = XNEW (arm_feature_set);
26697 *mcpu_ext_opt = opt->ext;
26698 mcpu_fpu_opt = &opt->default_fpu;
26699 if (opt->canonical_name)
26700 {
26701 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
26702 strcpy (selected_cpu_name, opt->canonical_name);
26703 }
26704 else
26705 {
26706 size_t i;
26707
26708 if (len >= sizeof selected_cpu_name)
26709 len = (sizeof selected_cpu_name) - 1;
26710
26711 for (i = 0; i < len; i++)
26712 selected_cpu_name[i] = TOUPPER (opt->name[i]);
26713 selected_cpu_name[i] = 0;
26714 }
26715
26716 if (ext != NULL)
26717 return arm_parse_extension (ext, mcpu_cpu_opt, mcpu_ext_opt);
26718
26719 return TRUE;
26720 }
26721
26722 as_bad (_("unknown cpu `%s'"), str);
26723 return FALSE;
26724 }
26725
26726 static bfd_boolean
26727 arm_parse_arch (const char *str)
26728 {
26729 const struct arm_arch_option_table *opt;
26730 const char *ext = strchr (str, '+');
26731 size_t len;
26732
26733 if (ext != NULL)
26734 len = ext - str;
26735 else
26736 len = strlen (str);
26737
26738 if (len == 0)
26739 {
26740 as_bad (_("missing architecture name `%s'"), str);
26741 return FALSE;
26742 }
26743
26744 for (opt = arm_archs; opt->name != NULL; opt++)
26745 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26746 {
26747 march_cpu_opt = &opt->value;
26748 if (march_ext_opt == NULL)
26749 march_ext_opt = XNEW (arm_feature_set);
26750 *march_ext_opt = arm_arch_none;
26751 march_fpu_opt = &opt->default_fpu;
26752 strcpy (selected_cpu_name, opt->name);
26753
26754 if (ext != NULL)
26755 return arm_parse_extension (ext, march_cpu_opt, march_ext_opt);
26756
26757 return TRUE;
26758 }
26759
26760 as_bad (_("unknown architecture `%s'\n"), str);
26761 return FALSE;
26762 }
26763
26764 static bfd_boolean
26765 arm_parse_fpu (const char * str)
26766 {
26767 const struct arm_option_fpu_value_table * opt;
26768
26769 for (opt = arm_fpus; opt->name != NULL; opt++)
26770 if (streq (opt->name, str))
26771 {
26772 mfpu_opt = &opt->value;
26773 return TRUE;
26774 }
26775
26776 as_bad (_("unknown floating point format `%s'\n"), str);
26777 return FALSE;
26778 }
26779
26780 static bfd_boolean
26781 arm_parse_float_abi (const char * str)
26782 {
26783 const struct arm_option_value_table * opt;
26784
26785 for (opt = arm_float_abis; opt->name != NULL; opt++)
26786 if (streq (opt->name, str))
26787 {
26788 mfloat_abi_opt = opt->value;
26789 return TRUE;
26790 }
26791
26792 as_bad (_("unknown floating point abi `%s'\n"), str);
26793 return FALSE;
26794 }
26795
26796 #ifdef OBJ_ELF
26797 static bfd_boolean
26798 arm_parse_eabi (const char * str)
26799 {
26800 const struct arm_option_value_table *opt;
26801
26802 for (opt = arm_eabis; opt->name != NULL; opt++)
26803 if (streq (opt->name, str))
26804 {
26805 meabi_flags = opt->value;
26806 return TRUE;
26807 }
26808 as_bad (_("unknown EABI `%s'\n"), str);
26809 return FALSE;
26810 }
26811 #endif
26812
26813 static bfd_boolean
26814 arm_parse_it_mode (const char * str)
26815 {
26816 bfd_boolean ret = TRUE;
26817
26818 if (streq ("arm", str))
26819 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
26820 else if (streq ("thumb", str))
26821 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
26822 else if (streq ("always", str))
26823 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
26824 else if (streq ("never", str))
26825 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
26826 else
26827 {
26828 as_bad (_("unknown implicit IT mode `%s', should be "\
26829 "arm, thumb, always, or never."), str);
26830 ret = FALSE;
26831 }
26832
26833 return ret;
26834 }
26835
26836 static bfd_boolean
26837 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
26838 {
26839 codecomposer_syntax = TRUE;
26840 arm_comment_chars[0] = ';';
26841 arm_line_separator_chars[0] = 0;
26842 return TRUE;
26843 }
26844
26845 struct arm_long_option_table arm_long_opts[] =
26846 {
26847 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
26848 arm_parse_cpu, NULL},
26849 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
26850 arm_parse_arch, NULL},
26851 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
26852 arm_parse_fpu, NULL},
26853 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
26854 arm_parse_float_abi, NULL},
26855 #ifdef OBJ_ELF
26856 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
26857 arm_parse_eabi, NULL},
26858 #endif
26859 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
26860 arm_parse_it_mode, NULL},
26861 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
26862 arm_ccs_mode, NULL},
26863 {NULL, NULL, 0, NULL}
26864 };
26865
26866 int
26867 md_parse_option (int c, const char * arg)
26868 {
26869 struct arm_option_table *opt;
26870 const struct arm_legacy_option_table *fopt;
26871 struct arm_long_option_table *lopt;
26872
26873 switch (c)
26874 {
26875 #ifdef OPTION_EB
26876 case OPTION_EB:
26877 target_big_endian = 1;
26878 break;
26879 #endif
26880
26881 #ifdef OPTION_EL
26882 case OPTION_EL:
26883 target_big_endian = 0;
26884 break;
26885 #endif
26886
26887 case OPTION_FIX_V4BX:
26888 fix_v4bx = TRUE;
26889 break;
26890
26891 #ifdef OBJ_ELF
26892 case OPTION_FDPIC:
26893 arm_fdpic = TRUE;
26894 break;
26895 #endif /* OBJ_ELF */
26896
26897 case 'a':
26898 /* Listing option. Just ignore these, we don't support additional
26899 ones. */
26900 return 0;
26901
26902 default:
26903 for (opt = arm_opts; opt->option != NULL; opt++)
26904 {
26905 if (c == opt->option[0]
26906 && ((arg == NULL && opt->option[1] == 0)
26907 || streq (arg, opt->option + 1)))
26908 {
26909 /* If the option is deprecated, tell the user. */
26910 if (warn_on_deprecated && opt->deprecated != NULL)
26911 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
26912 arg ? arg : "", _(opt->deprecated));
26913
26914 if (opt->var != NULL)
26915 *opt->var = opt->value;
26916
26917 return 1;
26918 }
26919 }
26920
26921 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
26922 {
26923 if (c == fopt->option[0]
26924 && ((arg == NULL && fopt->option[1] == 0)
26925 || streq (arg, fopt->option + 1)))
26926 {
26927 /* If the option is deprecated, tell the user. */
26928 if (warn_on_deprecated && fopt->deprecated != NULL)
26929 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
26930 arg ? arg : "", _(fopt->deprecated));
26931
26932 if (fopt->var != NULL)
26933 *fopt->var = &fopt->value;
26934
26935 return 1;
26936 }
26937 }
26938
26939 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26940 {
26941 /* These options are expected to have an argument. */
26942 if (c == lopt->option[0]
26943 && arg != NULL
26944 && strncmp (arg, lopt->option + 1,
26945 strlen (lopt->option + 1)) == 0)
26946 {
26947 /* If the option is deprecated, tell the user. */
26948 if (warn_on_deprecated && lopt->deprecated != NULL)
26949 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
26950 _(lopt->deprecated));
26951
26952 /* Call the sup-option parser. */
26953 return lopt->func (arg + strlen (lopt->option) - 1);
26954 }
26955 }
26956
26957 return 0;
26958 }
26959
26960 return 1;
26961 }
26962
26963 void
26964 md_show_usage (FILE * fp)
26965 {
26966 struct arm_option_table *opt;
26967 struct arm_long_option_table *lopt;
26968
26969 fprintf (fp, _(" ARM-specific assembler options:\n"));
26970
26971 for (opt = arm_opts; opt->option != NULL; opt++)
26972 if (opt->help != NULL)
26973 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
26974
26975 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26976 if (lopt->help != NULL)
26977 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
26978
26979 #ifdef OPTION_EB
26980 fprintf (fp, _("\
26981 -EB assemble code for a big-endian cpu\n"));
26982 #endif
26983
26984 #ifdef OPTION_EL
26985 fprintf (fp, _("\
26986 -EL assemble code for a little-endian cpu\n"));
26987 #endif
26988
26989 fprintf (fp, _("\
26990 --fix-v4bx Allow BX in ARMv4 code\n"));
26991
26992 #ifdef OBJ_ELF
26993 fprintf (fp, _("\
26994 --fdpic generate an FDPIC object file\n"));
26995 #endif /* OBJ_ELF */
26996 }
26997
26998 #ifdef OBJ_ELF
26999
27000 typedef struct
27001 {
27002 int val;
27003 arm_feature_set flags;
27004 } cpu_arch_ver_table;
27005
27006 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
27007 chronologically for architectures, with an exception for ARMv6-M and
27008 ARMv6S-M due to legacy reasons. No new architecture should have a
27009 special case. This allows for build attribute selection results to be
27010 stable when new architectures are added. */
27011 static const cpu_arch_ver_table cpu_arch_ver[] =
27012 {
27013 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V1},
27014 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2},
27015 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2S},
27016 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3},
27017 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3M},
27018 {TAG_CPU_ARCH_V4, ARM_ARCH_V4xM},
27019 {TAG_CPU_ARCH_V4, ARM_ARCH_V4},
27020 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4TxM},
27021 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4T},
27022 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5xM},
27023 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5},
27024 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5TxM},
27025 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5T},
27026 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TExP},
27027 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TE},
27028 {TAG_CPU_ARCH_V5TEJ, ARM_ARCH_V5TEJ},
27029 {TAG_CPU_ARCH_V6, ARM_ARCH_V6},
27030 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6Z},
27031 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6KZ},
27032 {TAG_CPU_ARCH_V6K, ARM_ARCH_V6K},
27033 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6T2},
27034 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KT2},
27035 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6ZT2},
27036 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KZT2},
27037
27038 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
27039 always selected build attributes to match those of ARMv6-M
27040 (resp. ARMv6S-M). However, due to these architectures being a strict
27041 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
27042 would be selected when fully respecting chronology of architectures.
27043 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
27044 move them before ARMv7 architectures. */
27045 {TAG_CPU_ARCH_V6_M, ARM_ARCH_V6M},
27046 {TAG_CPU_ARCH_V6S_M, ARM_ARCH_V6SM},
27047
27048 {TAG_CPU_ARCH_V7, ARM_ARCH_V7},
27049 {TAG_CPU_ARCH_V7, ARM_ARCH_V7A},
27050 {TAG_CPU_ARCH_V7, ARM_ARCH_V7R},
27051 {TAG_CPU_ARCH_V7, ARM_ARCH_V7M},
27052 {TAG_CPU_ARCH_V7, ARM_ARCH_V7VE},
27053 {TAG_CPU_ARCH_V7E_M, ARM_ARCH_V7EM},
27054 {TAG_CPU_ARCH_V8, ARM_ARCH_V8A},
27055 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_1A},
27056 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_2A},
27057 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_3A},
27058 {TAG_CPU_ARCH_V8M_BASE, ARM_ARCH_V8M_BASE},
27059 {TAG_CPU_ARCH_V8M_MAIN, ARM_ARCH_V8M_MAIN},
27060 {TAG_CPU_ARCH_V8R, ARM_ARCH_V8R},
27061 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_4A},
27062 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_5A},
27063 {-1, ARM_ARCH_NONE}
27064 };
27065
27066 /* Set an attribute if it has not already been set by the user. */
27067
27068 static void
27069 aeabi_set_attribute_int (int tag, int value)
27070 {
27071 if (tag < 1
27072 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
27073 || !attributes_set_explicitly[tag])
27074 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
27075 }
27076
27077 static void
27078 aeabi_set_attribute_string (int tag, const char *value)
27079 {
27080 if (tag < 1
27081 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
27082 || !attributes_set_explicitly[tag])
27083 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
27084 }
27085
27086 /* Return whether features in the *NEEDED feature set are available via
27087 extensions for the architecture whose feature set is *ARCH_FSET. */
27088
27089 static bfd_boolean
27090 have_ext_for_needed_feat_p (const arm_feature_set *arch_fset,
27091 const arm_feature_set *needed)
27092 {
27093 int i, nb_allowed_archs;
27094 arm_feature_set ext_fset;
27095 const struct arm_option_extension_value_table *opt;
27096
27097 ext_fset = arm_arch_none;
27098 for (opt = arm_extensions; opt->name != NULL; opt++)
27099 {
27100 /* Extension does not provide any feature we need. */
27101 if (!ARM_CPU_HAS_FEATURE (*needed, opt->merge_value))
27102 continue;
27103
27104 nb_allowed_archs =
27105 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
27106 for (i = 0; i < nb_allowed_archs; i++)
27107 {
27108 /* Empty entry. */
27109 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_arch_any))
27110 break;
27111
27112 /* Extension is available, add it. */
27113 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *arch_fset))
27114 ARM_MERGE_FEATURE_SETS (ext_fset, ext_fset, opt->merge_value);
27115 }
27116 }
27117
27118 /* Can we enable all features in *needed? */
27119 return ARM_FSET_CPU_SUBSET (*needed, ext_fset);
27120 }
27121
27122 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
27123 a given architecture feature set *ARCH_EXT_FSET including extension feature
27124 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
27125 - if true, check for an exact match of the architecture modulo extensions;
27126 - otherwise, select build attribute value of the first superset
27127 architecture released so that results remains stable when new architectures
27128 are added.
27129 For -march/-mcpu=all the build attribute value of the most featureful
27130 architecture is returned. Tag_CPU_arch_profile result is returned in
27131 PROFILE. */
27132
27133 static int
27134 get_aeabi_cpu_arch_from_fset (const arm_feature_set *arch_ext_fset,
27135 const arm_feature_set *ext_fset,
27136 char *profile, int exact_match)
27137 {
27138 arm_feature_set arch_fset;
27139 const cpu_arch_ver_table *p_ver, *p_ver_ret = NULL;
27140
27141 /* Select most featureful architecture with all its extensions if building
27142 for -march=all as the feature sets used to set build attributes. */
27143 if (ARM_FEATURE_EQUAL (*arch_ext_fset, arm_arch_any))
27144 {
27145 /* Force revisiting of decision for each new architecture. */
27146 gas_assert (MAX_TAG_CPU_ARCH <= TAG_CPU_ARCH_V8M_MAIN);
27147 *profile = 'A';
27148 return TAG_CPU_ARCH_V8;
27149 }
27150
27151 ARM_CLEAR_FEATURE (arch_fset, *arch_ext_fset, *ext_fset);
27152
27153 for (p_ver = cpu_arch_ver; p_ver->val != -1; p_ver++)
27154 {
27155 arm_feature_set known_arch_fset;
27156
27157 ARM_CLEAR_FEATURE (known_arch_fset, p_ver->flags, fpu_any);
27158 if (exact_match)
27159 {
27160 /* Base architecture match user-specified architecture and
27161 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
27162 if (ARM_FEATURE_EQUAL (*arch_ext_fset, known_arch_fset))
27163 {
27164 p_ver_ret = p_ver;
27165 goto found;
27166 }
27167 /* Base architecture match user-specified architecture only
27168 (eg. ARMv6-M in the same case as above). Record it in case we
27169 find a match with above condition. */
27170 else if (p_ver_ret == NULL
27171 && ARM_FEATURE_EQUAL (arch_fset, known_arch_fset))
27172 p_ver_ret = p_ver;
27173 }
27174 else
27175 {
27176
27177 /* Architecture has all features wanted. */
27178 if (ARM_FSET_CPU_SUBSET (arch_fset, known_arch_fset))
27179 {
27180 arm_feature_set added_fset;
27181
27182 /* Compute features added by this architecture over the one
27183 recorded in p_ver_ret. */
27184 if (p_ver_ret != NULL)
27185 ARM_CLEAR_FEATURE (added_fset, known_arch_fset,
27186 p_ver_ret->flags);
27187 /* First architecture that match incl. with extensions, or the
27188 only difference in features over the recorded match is
27189 features that were optional and are now mandatory. */
27190 if (p_ver_ret == NULL
27191 || ARM_FSET_CPU_SUBSET (added_fset, arch_fset))
27192 {
27193 p_ver_ret = p_ver;
27194 goto found;
27195 }
27196 }
27197 else if (p_ver_ret == NULL)
27198 {
27199 arm_feature_set needed_ext_fset;
27200
27201 ARM_CLEAR_FEATURE (needed_ext_fset, arch_fset, known_arch_fset);
27202
27203 /* Architecture has all features needed when using some
27204 extensions. Record it and continue searching in case there
27205 exist an architecture providing all needed features without
27206 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
27207 OS extension). */
27208 if (have_ext_for_needed_feat_p (&known_arch_fset,
27209 &needed_ext_fset))
27210 p_ver_ret = p_ver;
27211 }
27212 }
27213 }
27214
27215 if (p_ver_ret == NULL)
27216 return -1;
27217
27218 found:
27219 /* Tag_CPU_arch_profile. */
27220 if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7a)
27221 || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8)
27222 || (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_atomics)
27223 && !ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8m_m_only)))
27224 *profile = 'A';
27225 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7r))
27226 *profile = 'R';
27227 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_m))
27228 *profile = 'M';
27229 else
27230 *profile = '\0';
27231 return p_ver_ret->val;
27232 }
27233
27234 /* Set the public EABI object attributes. */
27235
27236 static void
27237 aeabi_set_public_attributes (void)
27238 {
27239 char profile = '\0';
27240 int arch = -1;
27241 int virt_sec = 0;
27242 int fp16_optional = 0;
27243 int skip_exact_match = 0;
27244 arm_feature_set flags, flags_arch, flags_ext;
27245
27246 /* Autodetection mode, choose the architecture based the instructions
27247 actually used. */
27248 if (no_cpu_selected ())
27249 {
27250 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
27251
27252 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
27253 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
27254
27255 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
27256 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
27257
27258 /* Code run during relaxation relies on selected_cpu being set. */
27259 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
27260 flags_ext = arm_arch_none;
27261 ARM_CLEAR_FEATURE (selected_arch, flags_arch, flags_ext);
27262 selected_ext = flags_ext;
27263 selected_cpu = flags;
27264 }
27265 /* Otherwise, choose the architecture based on the capabilities of the
27266 requested cpu. */
27267 else
27268 {
27269 ARM_MERGE_FEATURE_SETS (flags_arch, selected_arch, selected_ext);
27270 ARM_CLEAR_FEATURE (flags_arch, flags_arch, fpu_any);
27271 flags_ext = selected_ext;
27272 flags = selected_cpu;
27273 }
27274 ARM_MERGE_FEATURE_SETS (flags, flags, selected_fpu);
27275
27276 /* Allow the user to override the reported architecture. */
27277 if (!ARM_FEATURE_ZERO (selected_object_arch))
27278 {
27279 ARM_CLEAR_FEATURE (flags_arch, selected_object_arch, fpu_any);
27280 flags_ext = arm_arch_none;
27281 }
27282 else
27283 skip_exact_match = ARM_FEATURE_EQUAL (selected_cpu, arm_arch_any);
27284
27285 /* When this function is run again after relaxation has happened there is no
27286 way to determine whether an architecture or CPU was specified by the user:
27287 - selected_cpu is set above for relaxation to work;
27288 - march_cpu_opt is not set if only -mcpu or .cpu is used;
27289 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
27290 Therefore, if not in -march=all case we first try an exact match and fall
27291 back to autodetection. */
27292 if (!skip_exact_match)
27293 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 1);
27294 if (arch == -1)
27295 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 0);
27296 if (arch == -1)
27297 as_bad (_("no architecture contains all the instructions used\n"));
27298
27299 /* Tag_CPU_name. */
27300 if (selected_cpu_name[0])
27301 {
27302 char *q;
27303
27304 q = selected_cpu_name;
27305 if (strncmp (q, "armv", 4) == 0)
27306 {
27307 int i;
27308
27309 q += 4;
27310 for (i = 0; q[i]; i++)
27311 q[i] = TOUPPER (q[i]);
27312 }
27313 aeabi_set_attribute_string (Tag_CPU_name, q);
27314 }
27315
27316 /* Tag_CPU_arch. */
27317 aeabi_set_attribute_int (Tag_CPU_arch, arch);
27318
27319 /* Tag_CPU_arch_profile. */
27320 if (profile != '\0')
27321 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
27322
27323 /* Tag_DSP_extension. */
27324 if (ARM_CPU_HAS_FEATURE (selected_ext, arm_ext_dsp))
27325 aeabi_set_attribute_int (Tag_DSP_extension, 1);
27326
27327 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
27328 /* Tag_ARM_ISA_use. */
27329 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
27330 || ARM_FEATURE_ZERO (flags_arch))
27331 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
27332
27333 /* Tag_THUMB_ISA_use. */
27334 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
27335 || ARM_FEATURE_ZERO (flags_arch))
27336 {
27337 int thumb_isa_use;
27338
27339 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
27340 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
27341 thumb_isa_use = 3;
27342 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
27343 thumb_isa_use = 2;
27344 else
27345 thumb_isa_use = 1;
27346 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
27347 }
27348
27349 /* Tag_VFP_arch. */
27350 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
27351 aeabi_set_attribute_int (Tag_VFP_arch,
27352 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
27353 ? 7 : 8);
27354 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
27355 aeabi_set_attribute_int (Tag_VFP_arch,
27356 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
27357 ? 5 : 6);
27358 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
27359 {
27360 fp16_optional = 1;
27361 aeabi_set_attribute_int (Tag_VFP_arch, 3);
27362 }
27363 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
27364 {
27365 aeabi_set_attribute_int (Tag_VFP_arch, 4);
27366 fp16_optional = 1;
27367 }
27368 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
27369 aeabi_set_attribute_int (Tag_VFP_arch, 2);
27370 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
27371 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
27372 aeabi_set_attribute_int (Tag_VFP_arch, 1);
27373
27374 /* Tag_ABI_HardFP_use. */
27375 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
27376 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
27377 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
27378
27379 /* Tag_WMMX_arch. */
27380 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
27381 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
27382 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
27383 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
27384
27385 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
27386 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
27387 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
27388 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
27389 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
27390 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
27391 {
27392 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
27393 {
27394 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
27395 }
27396 else
27397 {
27398 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
27399 fp16_optional = 1;
27400 }
27401 }
27402
27403 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
27404 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
27405 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
27406
27407 /* Tag_DIV_use.
27408
27409 We set Tag_DIV_use to two when integer divide instructions have been used
27410 in ARM state, or when Thumb integer divide instructions have been used,
27411 but we have no architecture profile set, nor have we any ARM instructions.
27412
27413 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
27414 by the base architecture.
27415
27416 For new architectures we will have to check these tests. */
27417 gas_assert (arch <= TAG_CPU_ARCH_V8M_MAIN);
27418 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
27419 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
27420 aeabi_set_attribute_int (Tag_DIV_use, 0);
27421 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
27422 || (profile == '\0'
27423 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
27424 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
27425 aeabi_set_attribute_int (Tag_DIV_use, 2);
27426
27427 /* Tag_MP_extension_use. */
27428 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
27429 aeabi_set_attribute_int (Tag_MPextension_use, 1);
27430
27431 /* Tag Virtualization_use. */
27432 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
27433 virt_sec |= 1;
27434 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
27435 virt_sec |= 2;
27436 if (virt_sec != 0)
27437 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
27438 }
27439
27440 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
27441 finished and free extension feature bits which will not be used anymore. */
27442
27443 void
27444 arm_md_post_relax (void)
27445 {
27446 aeabi_set_public_attributes ();
27447 XDELETE (mcpu_ext_opt);
27448 mcpu_ext_opt = NULL;
27449 XDELETE (march_ext_opt);
27450 march_ext_opt = NULL;
27451 }
27452
27453 /* Add the default contents for the .ARM.attributes section. */
27454
27455 void
27456 arm_md_end (void)
27457 {
27458 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
27459 return;
27460
27461 aeabi_set_public_attributes ();
27462 }
27463 #endif /* OBJ_ELF */
27464
27465 /* Parse a .cpu directive. */
27466
27467 static void
27468 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
27469 {
27470 const struct arm_cpu_option_table *opt;
27471 char *name;
27472 char saved_char;
27473
27474 name = input_line_pointer;
27475 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27476 input_line_pointer++;
27477 saved_char = *input_line_pointer;
27478 *input_line_pointer = 0;
27479
27480 /* Skip the first "all" entry. */
27481 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
27482 if (streq (opt->name, name))
27483 {
27484 selected_arch = opt->value;
27485 selected_ext = opt->ext;
27486 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
27487 if (opt->canonical_name)
27488 strcpy (selected_cpu_name, opt->canonical_name);
27489 else
27490 {
27491 int i;
27492 for (i = 0; opt->name[i]; i++)
27493 selected_cpu_name[i] = TOUPPER (opt->name[i]);
27494
27495 selected_cpu_name[i] = 0;
27496 }
27497 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
27498
27499 *input_line_pointer = saved_char;
27500 demand_empty_rest_of_line ();
27501 return;
27502 }
27503 as_bad (_("unknown cpu `%s'"), name);
27504 *input_line_pointer = saved_char;
27505 ignore_rest_of_line ();
27506 }
27507
27508 /* Parse a .arch directive. */
27509
27510 static void
27511 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
27512 {
27513 const struct arm_arch_option_table *opt;
27514 char saved_char;
27515 char *name;
27516
27517 name = input_line_pointer;
27518 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27519 input_line_pointer++;
27520 saved_char = *input_line_pointer;
27521 *input_line_pointer = 0;
27522
27523 /* Skip the first "all" entry. */
27524 for (opt = arm_archs + 1; opt->name != NULL; opt++)
27525 if (streq (opt->name, name))
27526 {
27527 selected_arch = opt->value;
27528 selected_ext = arm_arch_none;
27529 selected_cpu = selected_arch;
27530 strcpy (selected_cpu_name, opt->name);
27531 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
27532 *input_line_pointer = saved_char;
27533 demand_empty_rest_of_line ();
27534 return;
27535 }
27536
27537 as_bad (_("unknown architecture `%s'\n"), name);
27538 *input_line_pointer = saved_char;
27539 ignore_rest_of_line ();
27540 }
27541
27542 /* Parse a .object_arch directive. */
27543
27544 static void
27545 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
27546 {
27547 const struct arm_arch_option_table *opt;
27548 char saved_char;
27549 char *name;
27550
27551 name = input_line_pointer;
27552 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27553 input_line_pointer++;
27554 saved_char = *input_line_pointer;
27555 *input_line_pointer = 0;
27556
27557 /* Skip the first "all" entry. */
27558 for (opt = arm_archs + 1; opt->name != NULL; opt++)
27559 if (streq (opt->name, name))
27560 {
27561 selected_object_arch = opt->value;
27562 *input_line_pointer = saved_char;
27563 demand_empty_rest_of_line ();
27564 return;
27565 }
27566
27567 as_bad (_("unknown architecture `%s'\n"), name);
27568 *input_line_pointer = saved_char;
27569 ignore_rest_of_line ();
27570 }
27571
27572 /* Parse a .arch_extension directive. */
27573
27574 static void
27575 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
27576 {
27577 const struct arm_option_extension_value_table *opt;
27578 char saved_char;
27579 char *name;
27580 int adding_value = 1;
27581
27582 name = input_line_pointer;
27583 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27584 input_line_pointer++;
27585 saved_char = *input_line_pointer;
27586 *input_line_pointer = 0;
27587
27588 if (strlen (name) >= 2
27589 && strncmp (name, "no", 2) == 0)
27590 {
27591 adding_value = 0;
27592 name += 2;
27593 }
27594
27595 for (opt = arm_extensions; opt->name != NULL; opt++)
27596 if (streq (opt->name, name))
27597 {
27598 int i, nb_allowed_archs =
27599 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
27600 for (i = 0; i < nb_allowed_archs; i++)
27601 {
27602 /* Empty entry. */
27603 if (ARM_CPU_IS_ANY (opt->allowed_archs[i]))
27604 continue;
27605 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], selected_arch))
27606 break;
27607 }
27608
27609 if (i == nb_allowed_archs)
27610 {
27611 as_bad (_("architectural extension `%s' is not allowed for the "
27612 "current base architecture"), name);
27613 break;
27614 }
27615
27616 if (adding_value)
27617 ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
27618 opt->merge_value);
27619 else
27620 ARM_CLEAR_FEATURE (selected_ext, selected_ext, opt->clear_value);
27621
27622 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
27623 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
27624 *input_line_pointer = saved_char;
27625 demand_empty_rest_of_line ();
27626 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
27627 on this return so that duplicate extensions (extensions with the
27628 same name as a previous extension in the list) are not considered
27629 for command-line parsing. */
27630 return;
27631 }
27632
27633 if (opt->name == NULL)
27634 as_bad (_("unknown architecture extension `%s'\n"), name);
27635
27636 *input_line_pointer = saved_char;
27637 ignore_rest_of_line ();
27638 }
27639
27640 /* Parse a .fpu directive. */
27641
27642 static void
27643 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
27644 {
27645 const struct arm_option_fpu_value_table *opt;
27646 char saved_char;
27647 char *name;
27648
27649 name = input_line_pointer;
27650 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27651 input_line_pointer++;
27652 saved_char = *input_line_pointer;
27653 *input_line_pointer = 0;
27654
27655 for (opt = arm_fpus; opt->name != NULL; opt++)
27656 if (streq (opt->name, name))
27657 {
27658 selected_fpu = opt->value;
27659 #ifndef CPU_DEFAULT
27660 if (no_cpu_selected ())
27661 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
27662 else
27663 #endif
27664 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
27665 *input_line_pointer = saved_char;
27666 demand_empty_rest_of_line ();
27667 return;
27668 }
27669
27670 as_bad (_("unknown floating point format `%s'\n"), name);
27671 *input_line_pointer = saved_char;
27672 ignore_rest_of_line ();
27673 }
27674
27675 /* Copy symbol information. */
27676
27677 void
27678 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
27679 {
27680 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
27681 }
27682
27683 #ifdef OBJ_ELF
27684 /* Given a symbolic attribute NAME, return the proper integer value.
27685 Returns -1 if the attribute is not known. */
27686
27687 int
27688 arm_convert_symbolic_attribute (const char *name)
27689 {
27690 static const struct
27691 {
27692 const char * name;
27693 const int tag;
27694 }
27695 attribute_table[] =
27696 {
27697 /* When you modify this table you should
27698 also modify the list in doc/c-arm.texi. */
27699 #define T(tag) {#tag, tag}
27700 T (Tag_CPU_raw_name),
27701 T (Tag_CPU_name),
27702 T (Tag_CPU_arch),
27703 T (Tag_CPU_arch_profile),
27704 T (Tag_ARM_ISA_use),
27705 T (Tag_THUMB_ISA_use),
27706 T (Tag_FP_arch),
27707 T (Tag_VFP_arch),
27708 T (Tag_WMMX_arch),
27709 T (Tag_Advanced_SIMD_arch),
27710 T (Tag_PCS_config),
27711 T (Tag_ABI_PCS_R9_use),
27712 T (Tag_ABI_PCS_RW_data),
27713 T (Tag_ABI_PCS_RO_data),
27714 T (Tag_ABI_PCS_GOT_use),
27715 T (Tag_ABI_PCS_wchar_t),
27716 T (Tag_ABI_FP_rounding),
27717 T (Tag_ABI_FP_denormal),
27718 T (Tag_ABI_FP_exceptions),
27719 T (Tag_ABI_FP_user_exceptions),
27720 T (Tag_ABI_FP_number_model),
27721 T (Tag_ABI_align_needed),
27722 T (Tag_ABI_align8_needed),
27723 T (Tag_ABI_align_preserved),
27724 T (Tag_ABI_align8_preserved),
27725 T (Tag_ABI_enum_size),
27726 T (Tag_ABI_HardFP_use),
27727 T (Tag_ABI_VFP_args),
27728 T (Tag_ABI_WMMX_args),
27729 T (Tag_ABI_optimization_goals),
27730 T (Tag_ABI_FP_optimization_goals),
27731 T (Tag_compatibility),
27732 T (Tag_CPU_unaligned_access),
27733 T (Tag_FP_HP_extension),
27734 T (Tag_VFP_HP_extension),
27735 T (Tag_ABI_FP_16bit_format),
27736 T (Tag_MPextension_use),
27737 T (Tag_DIV_use),
27738 T (Tag_nodefaults),
27739 T (Tag_also_compatible_with),
27740 T (Tag_conformance),
27741 T (Tag_T2EE_use),
27742 T (Tag_Virtualization_use),
27743 T (Tag_DSP_extension),
27744 /* We deliberately do not include Tag_MPextension_use_legacy. */
27745 #undef T
27746 };
27747 unsigned int i;
27748
27749 if (name == NULL)
27750 return -1;
27751
27752 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
27753 if (streq (name, attribute_table[i].name))
27754 return attribute_table[i].tag;
27755
27756 return -1;
27757 }
27758
27759 /* Apply sym value for relocations only in the case that they are for
27760 local symbols in the same segment as the fixup and you have the
27761 respective architectural feature for blx and simple switches. */
27762
27763 int
27764 arm_apply_sym_value (struct fix * fixP, segT this_seg)
27765 {
27766 if (fixP->fx_addsy
27767 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
27768 /* PR 17444: If the local symbol is in a different section then a reloc
27769 will always be generated for it, so applying the symbol value now
27770 will result in a double offset being stored in the relocation. */
27771 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
27772 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
27773 {
27774 switch (fixP->fx_r_type)
27775 {
27776 case BFD_RELOC_ARM_PCREL_BLX:
27777 case BFD_RELOC_THUMB_PCREL_BRANCH23:
27778 if (ARM_IS_FUNC (fixP->fx_addsy))
27779 return 1;
27780 break;
27781
27782 case BFD_RELOC_ARM_PCREL_CALL:
27783 case BFD_RELOC_THUMB_PCREL_BLX:
27784 if (THUMB_IS_FUNC (fixP->fx_addsy))
27785 return 1;
27786 break;
27787
27788 default:
27789 break;
27790 }
27791
27792 }
27793 return 0;
27794 }
27795 #endif /* OBJ_ELF */
This page took 0.876628 seconds and 5 git commands to generate.