[ARM] Allow Thumb division as an extension for ARMv7
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2017 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 #endif /* OBJ_ELF */
79
80 /* Results from operand parsing worker functions. */
81
82 typedef enum
83 {
84 PARSE_OPERAND_SUCCESS,
85 PARSE_OPERAND_FAIL,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result;
88
89 enum arm_float_abi
90 {
91 ARM_FLOAT_ABI_HARD,
92 ARM_FLOAT_ABI_SOFTFP,
93 ARM_FLOAT_ABI_SOFT
94 };
95
96 /* Types of processor to assemble for. */
97 #ifndef CPU_DEFAULT
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
101
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
104 #endif
105
106 #ifndef FPU_DEFAULT
107 # ifdef TE_LINUX
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
110 # ifdef OBJ_ELF
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
112 # else
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
115 # endif
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
118 # else
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
121 # endif
122 #endif /* ifndef FPU_DEFAULT */
123
124 #define streq(a, b) (strcmp (a, b) == 0)
125
126 static arm_feature_set cpu_variant;
127 static arm_feature_set arm_arch_used;
128 static arm_feature_set thumb_arch_used;
129
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26 = FALSE;
132 static int atpcs = FALSE;
133 static int support_interwork = FALSE;
134 static int uses_apcs_float = FALSE;
135 static int pic_code = FALSE;
136 static int fix_v4bx = FALSE;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated = TRUE;
139
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax = FALSE;
142
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
145 assembly flags. */
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
148
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static arm_feature_set *dyn_mcpu_ext_opt = NULL;
151 static const arm_feature_set *mcpu_fpu_opt = NULL;
152 static const arm_feature_set *march_cpu_opt = NULL;
153 static arm_feature_set *dyn_march_ext_opt = NULL;
154 static const arm_feature_set *march_fpu_opt = NULL;
155 static const arm_feature_set *mfpu_opt = NULL;
156 static const arm_feature_set *object_arch = NULL;
157
158 /* Constants for known architecture features. */
159 static const arm_feature_set fpu_default = FPU_DEFAULT;
160 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
161 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
162 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
163 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
164 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
165 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
166 #ifdef OBJ_ELF
167 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
168 #endif
169 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
170
171 #ifdef CPU_DEFAULT
172 static const arm_feature_set cpu_default = CPU_DEFAULT;
173 #endif
174
175 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
176 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
177 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
178 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
179 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
180 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
181 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
182 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
183 static const arm_feature_set arm_ext_v4t_5 =
184 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
185 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
186 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
187 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
188 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
189 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
190 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
191 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
192 static const arm_feature_set arm_ext_v6m = ARM_FEATURE_CORE_LOW (ARM_EXT_V6M);
193 static const arm_feature_set arm_ext_v6_notm =
194 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
195 static const arm_feature_set arm_ext_v6_dsp =
196 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
197 static const arm_feature_set arm_ext_barrier =
198 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
199 static const arm_feature_set arm_ext_msr =
200 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
201 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
202 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
203 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
204 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
205 #ifdef OBJ_ELF
206 static const arm_feature_set arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
207 #endif
208 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
209 static const arm_feature_set arm_ext_m =
210 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M,
211 ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
212 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
213 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
214 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
215 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
216 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
217 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
218 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
219 static const arm_feature_set arm_ext_v8m_main =
220 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
221 /* Instructions in ARMv8-M only found in M profile architectures. */
222 static const arm_feature_set arm_ext_v8m_m_only =
223 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
224 static const arm_feature_set arm_ext_v6t2_v8m =
225 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
226 /* Instructions shared between ARMv8-A and ARMv8-M. */
227 static const arm_feature_set arm_ext_atomics =
228 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
229 #ifdef OBJ_ELF
230 /* DSP instructions Tag_DSP_extension refers to. */
231 static const arm_feature_set arm_ext_dsp =
232 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
233 #endif
234 static const arm_feature_set arm_ext_ras =
235 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
236 /* FP16 instructions. */
237 static const arm_feature_set arm_ext_fp16 =
238 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
239 static const arm_feature_set arm_ext_v8_3 =
240 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A);
241
242 static const arm_feature_set arm_arch_any = ARM_ANY;
243 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
244 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
245 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
246 #ifdef OBJ_ELF
247 static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
248 #endif
249
250 static const arm_feature_set arm_cext_iwmmxt2 =
251 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
252 static const arm_feature_set arm_cext_iwmmxt =
253 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
254 static const arm_feature_set arm_cext_xscale =
255 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
256 static const arm_feature_set arm_cext_maverick =
257 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
258 static const arm_feature_set fpu_fpa_ext_v1 =
259 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
260 static const arm_feature_set fpu_fpa_ext_v2 =
261 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
262 static const arm_feature_set fpu_vfp_ext_v1xd =
263 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
264 static const arm_feature_set fpu_vfp_ext_v1 =
265 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
266 static const arm_feature_set fpu_vfp_ext_v2 =
267 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
268 static const arm_feature_set fpu_vfp_ext_v3xd =
269 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
270 static const arm_feature_set fpu_vfp_ext_v3 =
271 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
272 static const arm_feature_set fpu_vfp_ext_d32 =
273 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
274 static const arm_feature_set fpu_neon_ext_v1 =
275 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
276 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
277 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
278 #ifdef OBJ_ELF
279 static const arm_feature_set fpu_vfp_fp16 =
280 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
281 static const arm_feature_set fpu_neon_ext_fma =
282 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
283 #endif
284 static const arm_feature_set fpu_vfp_ext_fma =
285 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
286 static const arm_feature_set fpu_vfp_ext_armv8 =
287 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
288 static const arm_feature_set fpu_vfp_ext_armv8xd =
289 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
290 static const arm_feature_set fpu_neon_ext_armv8 =
291 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
292 static const arm_feature_set fpu_crypto_ext_armv8 =
293 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
294 static const arm_feature_set crc_ext_armv8 =
295 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
296 static const arm_feature_set fpu_neon_ext_v8_1 =
297 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
298
299 static int mfloat_abi_opt = -1;
300 /* Record user cpu selection for object attributes. */
301 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
302 /* Must be long enough to hold any of the names in arm_cpus. */
303 static char selected_cpu_name[20];
304
305 extern FLONUM_TYPE generic_floating_point_number;
306
307 /* Return if no cpu was selected on command-line. */
308 static bfd_boolean
309 no_cpu_selected (void)
310 {
311 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
312 }
313
314 #ifdef OBJ_ELF
315 # ifdef EABI_DEFAULT
316 static int meabi_flags = EABI_DEFAULT;
317 # else
318 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
319 # endif
320
321 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
322
323 bfd_boolean
324 arm_is_eabi (void)
325 {
326 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
327 }
328 #endif
329
330 #ifdef OBJ_ELF
331 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
332 symbolS * GOT_symbol;
333 #endif
334
335 /* 0: assemble for ARM,
336 1: assemble for Thumb,
337 2: assemble for Thumb even though target CPU does not support thumb
338 instructions. */
339 static int thumb_mode = 0;
340 /* A value distinct from the possible values for thumb_mode that we
341 can use to record whether thumb_mode has been copied into the
342 tc_frag_data field of a frag. */
343 #define MODE_RECORDED (1 << 4)
344
345 /* Specifies the intrinsic IT insn behavior mode. */
346 enum implicit_it_mode
347 {
348 IMPLICIT_IT_MODE_NEVER = 0x00,
349 IMPLICIT_IT_MODE_ARM = 0x01,
350 IMPLICIT_IT_MODE_THUMB = 0x02,
351 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
352 };
353 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
354
355 /* If unified_syntax is true, we are processing the new unified
356 ARM/Thumb syntax. Important differences from the old ARM mode:
357
358 - Immediate operands do not require a # prefix.
359 - Conditional affixes always appear at the end of the
360 instruction. (For backward compatibility, those instructions
361 that formerly had them in the middle, continue to accept them
362 there.)
363 - The IT instruction may appear, and if it does is validated
364 against subsequent conditional affixes. It does not generate
365 machine code.
366
367 Important differences from the old Thumb mode:
368
369 - Immediate operands do not require a # prefix.
370 - Most of the V6T2 instructions are only available in unified mode.
371 - The .N and .W suffixes are recognized and honored (it is an error
372 if they cannot be honored).
373 - All instructions set the flags if and only if they have an 's' affix.
374 - Conditional affixes may be used. They are validated against
375 preceding IT instructions. Unlike ARM mode, you cannot use a
376 conditional affix except in the scope of an IT instruction. */
377
378 static bfd_boolean unified_syntax = FALSE;
379
380 /* An immediate operand can start with #, and ld*, st*, pld operands
381 can contain [ and ]. We need to tell APP not to elide whitespace
382 before a [, which can appear as the first operand for pld.
383 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
384 const char arm_symbol_chars[] = "#[]{}";
385
386 enum neon_el_type
387 {
388 NT_invtype,
389 NT_untyped,
390 NT_integer,
391 NT_float,
392 NT_poly,
393 NT_signed,
394 NT_unsigned
395 };
396
397 struct neon_type_el
398 {
399 enum neon_el_type type;
400 unsigned size;
401 };
402
403 #define NEON_MAX_TYPE_ELS 4
404
405 struct neon_type
406 {
407 struct neon_type_el el[NEON_MAX_TYPE_ELS];
408 unsigned elems;
409 };
410
411 enum it_instruction_type
412 {
413 OUTSIDE_IT_INSN,
414 INSIDE_IT_INSN,
415 INSIDE_IT_LAST_INSN,
416 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
417 if inside, should be the last one. */
418 NEUTRAL_IT_INSN, /* This could be either inside or outside,
419 i.e. BKPT and NOP. */
420 IT_INSN /* The IT insn has been parsed. */
421 };
422
423 /* The maximum number of operands we need. */
424 #define ARM_IT_MAX_OPERANDS 6
425
426 struct arm_it
427 {
428 const char * error;
429 unsigned long instruction;
430 int size;
431 int size_req;
432 int cond;
433 /* "uncond_value" is set to the value in place of the conditional field in
434 unconditional versions of the instruction, or -1 if nothing is
435 appropriate. */
436 int uncond_value;
437 struct neon_type vectype;
438 /* This does not indicate an actual NEON instruction, only that
439 the mnemonic accepts neon-style type suffixes. */
440 int is_neon;
441 /* Set to the opcode if the instruction needs relaxation.
442 Zero if the instruction is not relaxed. */
443 unsigned long relax;
444 struct
445 {
446 bfd_reloc_code_real_type type;
447 expressionS exp;
448 int pc_rel;
449 } reloc;
450
451 enum it_instruction_type it_insn_type;
452
453 struct
454 {
455 unsigned reg;
456 signed int imm;
457 struct neon_type_el vectype;
458 unsigned present : 1; /* Operand present. */
459 unsigned isreg : 1; /* Operand was a register. */
460 unsigned immisreg : 1; /* .imm field is a second register. */
461 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
462 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
463 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
464 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
465 instructions. This allows us to disambiguate ARM <-> vector insns. */
466 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
467 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
468 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
469 unsigned issingle : 1; /* Operand is VFP single-precision register. */
470 unsigned hasreloc : 1; /* Operand has relocation suffix. */
471 unsigned writeback : 1; /* Operand has trailing ! */
472 unsigned preind : 1; /* Preindexed address. */
473 unsigned postind : 1; /* Postindexed address. */
474 unsigned negative : 1; /* Index register was negated. */
475 unsigned shifted : 1; /* Shift applied to operation. */
476 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
477 } operands[ARM_IT_MAX_OPERANDS];
478 };
479
480 static struct arm_it inst;
481
482 #define NUM_FLOAT_VALS 8
483
484 const char * fp_const[] =
485 {
486 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
487 };
488
489 /* Number of littlenums required to hold an extended precision number. */
490 #define MAX_LITTLENUMS 6
491
492 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
493
494 #define FAIL (-1)
495 #define SUCCESS (0)
496
497 #define SUFF_S 1
498 #define SUFF_D 2
499 #define SUFF_E 3
500 #define SUFF_P 4
501
502 #define CP_T_X 0x00008000
503 #define CP_T_Y 0x00400000
504
505 #define CONDS_BIT 0x00100000
506 #define LOAD_BIT 0x00100000
507
508 #define DOUBLE_LOAD_FLAG 0x00000001
509
510 struct asm_cond
511 {
512 const char * template_name;
513 unsigned long value;
514 };
515
516 #define COND_ALWAYS 0xE
517
518 struct asm_psr
519 {
520 const char * template_name;
521 unsigned long field;
522 };
523
524 struct asm_barrier_opt
525 {
526 const char * template_name;
527 unsigned long value;
528 const arm_feature_set arch;
529 };
530
531 /* The bit that distinguishes CPSR and SPSR. */
532 #define SPSR_BIT (1 << 22)
533
534 /* The individual PSR flag bits. */
535 #define PSR_c (1 << 16)
536 #define PSR_x (1 << 17)
537 #define PSR_s (1 << 18)
538 #define PSR_f (1 << 19)
539
540 struct reloc_entry
541 {
542 const char * name;
543 bfd_reloc_code_real_type reloc;
544 };
545
546 enum vfp_reg_pos
547 {
548 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
549 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
550 };
551
552 enum vfp_ldstm_type
553 {
554 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
555 };
556
557 /* Bits for DEFINED field in neon_typed_alias. */
558 #define NTA_HASTYPE 1
559 #define NTA_HASINDEX 2
560
561 struct neon_typed_alias
562 {
563 unsigned char defined;
564 unsigned char index;
565 struct neon_type_el eltype;
566 };
567
568 /* ARM register categories. This includes coprocessor numbers and various
569 architecture extensions' registers. */
570 enum arm_reg_type
571 {
572 REG_TYPE_RN,
573 REG_TYPE_CP,
574 REG_TYPE_CN,
575 REG_TYPE_FN,
576 REG_TYPE_VFS,
577 REG_TYPE_VFD,
578 REG_TYPE_NQ,
579 REG_TYPE_VFSD,
580 REG_TYPE_NDQ,
581 REG_TYPE_NSDQ,
582 REG_TYPE_VFC,
583 REG_TYPE_MVF,
584 REG_TYPE_MVD,
585 REG_TYPE_MVFX,
586 REG_TYPE_MVDX,
587 REG_TYPE_MVAX,
588 REG_TYPE_DSPSC,
589 REG_TYPE_MMXWR,
590 REG_TYPE_MMXWC,
591 REG_TYPE_MMXWCG,
592 REG_TYPE_XSCALE,
593 REG_TYPE_RNB
594 };
595
596 /* Structure for a hash table entry for a register.
597 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
598 information which states whether a vector type or index is specified (for a
599 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
600 struct reg_entry
601 {
602 const char * name;
603 unsigned int number;
604 unsigned char type;
605 unsigned char builtin;
606 struct neon_typed_alias * neon;
607 };
608
609 /* Diagnostics used when we don't get a register of the expected type. */
610 const char * const reg_expected_msgs[] =
611 {
612 N_("ARM register expected"),
613 N_("bad or missing co-processor number"),
614 N_("co-processor register expected"),
615 N_("FPA register expected"),
616 N_("VFP single precision register expected"),
617 N_("VFP/Neon double precision register expected"),
618 N_("Neon quad precision register expected"),
619 N_("VFP single or double precision register expected"),
620 N_("Neon double or quad precision register expected"),
621 N_("VFP single, double or Neon quad precision register expected"),
622 N_("VFP system register expected"),
623 N_("Maverick MVF register expected"),
624 N_("Maverick MVD register expected"),
625 N_("Maverick MVFX register expected"),
626 N_("Maverick MVDX register expected"),
627 N_("Maverick MVAX register expected"),
628 N_("Maverick DSPSC register expected"),
629 N_("iWMMXt data register expected"),
630 N_("iWMMXt control register expected"),
631 N_("iWMMXt scalar register expected"),
632 N_("XScale accumulator register expected"),
633 };
634
635 /* Some well known registers that we refer to directly elsewhere. */
636 #define REG_R12 12
637 #define REG_SP 13
638 #define REG_LR 14
639 #define REG_PC 15
640
641 /* ARM instructions take 4bytes in the object file, Thumb instructions
642 take 2: */
643 #define INSN_SIZE 4
644
645 struct asm_opcode
646 {
647 /* Basic string to match. */
648 const char * template_name;
649
650 /* Parameters to instruction. */
651 unsigned int operands[8];
652
653 /* Conditional tag - see opcode_lookup. */
654 unsigned int tag : 4;
655
656 /* Basic instruction code. */
657 unsigned int avalue : 28;
658
659 /* Thumb-format instruction code. */
660 unsigned int tvalue;
661
662 /* Which architecture variant provides this instruction. */
663 const arm_feature_set * avariant;
664 const arm_feature_set * tvariant;
665
666 /* Function to call to encode instruction in ARM format. */
667 void (* aencode) (void);
668
669 /* Function to call to encode instruction in Thumb format. */
670 void (* tencode) (void);
671 };
672
673 /* Defines for various bits that we will want to toggle. */
674 #define INST_IMMEDIATE 0x02000000
675 #define OFFSET_REG 0x02000000
676 #define HWOFFSET_IMM 0x00400000
677 #define SHIFT_BY_REG 0x00000010
678 #define PRE_INDEX 0x01000000
679 #define INDEX_UP 0x00800000
680 #define WRITE_BACK 0x00200000
681 #define LDM_TYPE_2_OR_3 0x00400000
682 #define CPSI_MMOD 0x00020000
683
684 #define LITERAL_MASK 0xf000f000
685 #define OPCODE_MASK 0xfe1fffff
686 #define V4_STR_BIT 0x00000020
687 #define VLDR_VMOV_SAME 0x0040f000
688
689 #define T2_SUBS_PC_LR 0xf3de8f00
690
691 #define DATA_OP_SHIFT 21
692 #define SBIT_SHIFT 20
693
694 #define T2_OPCODE_MASK 0xfe1fffff
695 #define T2_DATA_OP_SHIFT 21
696 #define T2_SBIT_SHIFT 20
697
698 #define A_COND_MASK 0xf0000000
699 #define A_PUSH_POP_OP_MASK 0x0fff0000
700
701 /* Opcodes for pushing/poping registers to/from the stack. */
702 #define A1_OPCODE_PUSH 0x092d0000
703 #define A2_OPCODE_PUSH 0x052d0004
704 #define A2_OPCODE_POP 0x049d0004
705
706 /* Codes to distinguish the arithmetic instructions. */
707 #define OPCODE_AND 0
708 #define OPCODE_EOR 1
709 #define OPCODE_SUB 2
710 #define OPCODE_RSB 3
711 #define OPCODE_ADD 4
712 #define OPCODE_ADC 5
713 #define OPCODE_SBC 6
714 #define OPCODE_RSC 7
715 #define OPCODE_TST 8
716 #define OPCODE_TEQ 9
717 #define OPCODE_CMP 10
718 #define OPCODE_CMN 11
719 #define OPCODE_ORR 12
720 #define OPCODE_MOV 13
721 #define OPCODE_BIC 14
722 #define OPCODE_MVN 15
723
724 #define T2_OPCODE_AND 0
725 #define T2_OPCODE_BIC 1
726 #define T2_OPCODE_ORR 2
727 #define T2_OPCODE_ORN 3
728 #define T2_OPCODE_EOR 4
729 #define T2_OPCODE_ADD 8
730 #define T2_OPCODE_ADC 10
731 #define T2_OPCODE_SBC 11
732 #define T2_OPCODE_SUB 13
733 #define T2_OPCODE_RSB 14
734
735 #define T_OPCODE_MUL 0x4340
736 #define T_OPCODE_TST 0x4200
737 #define T_OPCODE_CMN 0x42c0
738 #define T_OPCODE_NEG 0x4240
739 #define T_OPCODE_MVN 0x43c0
740
741 #define T_OPCODE_ADD_R3 0x1800
742 #define T_OPCODE_SUB_R3 0x1a00
743 #define T_OPCODE_ADD_HI 0x4400
744 #define T_OPCODE_ADD_ST 0xb000
745 #define T_OPCODE_SUB_ST 0xb080
746 #define T_OPCODE_ADD_SP 0xa800
747 #define T_OPCODE_ADD_PC 0xa000
748 #define T_OPCODE_ADD_I8 0x3000
749 #define T_OPCODE_SUB_I8 0x3800
750 #define T_OPCODE_ADD_I3 0x1c00
751 #define T_OPCODE_SUB_I3 0x1e00
752
753 #define T_OPCODE_ASR_R 0x4100
754 #define T_OPCODE_LSL_R 0x4080
755 #define T_OPCODE_LSR_R 0x40c0
756 #define T_OPCODE_ROR_R 0x41c0
757 #define T_OPCODE_ASR_I 0x1000
758 #define T_OPCODE_LSL_I 0x0000
759 #define T_OPCODE_LSR_I 0x0800
760
761 #define T_OPCODE_MOV_I8 0x2000
762 #define T_OPCODE_CMP_I8 0x2800
763 #define T_OPCODE_CMP_LR 0x4280
764 #define T_OPCODE_MOV_HR 0x4600
765 #define T_OPCODE_CMP_HR 0x4500
766
767 #define T_OPCODE_LDR_PC 0x4800
768 #define T_OPCODE_LDR_SP 0x9800
769 #define T_OPCODE_STR_SP 0x9000
770 #define T_OPCODE_LDR_IW 0x6800
771 #define T_OPCODE_STR_IW 0x6000
772 #define T_OPCODE_LDR_IH 0x8800
773 #define T_OPCODE_STR_IH 0x8000
774 #define T_OPCODE_LDR_IB 0x7800
775 #define T_OPCODE_STR_IB 0x7000
776 #define T_OPCODE_LDR_RW 0x5800
777 #define T_OPCODE_STR_RW 0x5000
778 #define T_OPCODE_LDR_RH 0x5a00
779 #define T_OPCODE_STR_RH 0x5200
780 #define T_OPCODE_LDR_RB 0x5c00
781 #define T_OPCODE_STR_RB 0x5400
782
783 #define T_OPCODE_PUSH 0xb400
784 #define T_OPCODE_POP 0xbc00
785
786 #define T_OPCODE_BRANCH 0xe000
787
788 #define THUMB_SIZE 2 /* Size of thumb instruction. */
789 #define THUMB_PP_PC_LR 0x0100
790 #define THUMB_LOAD_BIT 0x0800
791 #define THUMB2_LOAD_BIT 0x00100000
792
793 #define BAD_ARGS _("bad arguments to instruction")
794 #define BAD_SP _("r13 not allowed here")
795 #define BAD_PC _("r15 not allowed here")
796 #define BAD_COND _("instruction cannot be conditional")
797 #define BAD_OVERLAP _("registers may not be the same")
798 #define BAD_HIREG _("lo register required")
799 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
800 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
801 #define BAD_BRANCH _("branch must be last instruction in IT block")
802 #define BAD_NOT_IT _("instruction not allowed in IT block")
803 #define BAD_FPU _("selected FPU does not support instruction")
804 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
805 #define BAD_IT_COND _("incorrect condition in IT block")
806 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
807 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
808 #define BAD_PC_ADDRESSING \
809 _("cannot use register index with PC-relative addressing")
810 #define BAD_PC_WRITEBACK \
811 _("cannot use writeback with PC-relative addressing")
812 #define BAD_RANGE _("branch out of range")
813 #define BAD_FP16 _("selected processor does not support fp16 instruction")
814 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
815 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
816
817 static struct hash_control * arm_ops_hsh;
818 static struct hash_control * arm_cond_hsh;
819 static struct hash_control * arm_shift_hsh;
820 static struct hash_control * arm_psr_hsh;
821 static struct hash_control * arm_v7m_psr_hsh;
822 static struct hash_control * arm_reg_hsh;
823 static struct hash_control * arm_reloc_hsh;
824 static struct hash_control * arm_barrier_opt_hsh;
825
826 /* Stuff needed to resolve the label ambiguity
827 As:
828 ...
829 label: <insn>
830 may differ from:
831 ...
832 label:
833 <insn> */
834
835 symbolS * last_label_seen;
836 static int label_is_thumb_function_name = FALSE;
837
838 /* Literal pool structure. Held on a per-section
839 and per-sub-section basis. */
840
841 #define MAX_LITERAL_POOL_SIZE 1024
842 typedef struct literal_pool
843 {
844 expressionS literals [MAX_LITERAL_POOL_SIZE];
845 unsigned int next_free_entry;
846 unsigned int id;
847 symbolS * symbol;
848 segT section;
849 subsegT sub_section;
850 #ifdef OBJ_ELF
851 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
852 #endif
853 struct literal_pool * next;
854 unsigned int alignment;
855 } literal_pool;
856
857 /* Pointer to a linked list of literal pools. */
858 literal_pool * list_of_pools = NULL;
859
860 typedef enum asmfunc_states
861 {
862 OUTSIDE_ASMFUNC,
863 WAITING_ASMFUNC_NAME,
864 WAITING_ENDASMFUNC
865 } asmfunc_states;
866
867 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
868
869 #ifdef OBJ_ELF
870 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
871 #else
872 static struct current_it now_it;
873 #endif
874
875 static inline int
876 now_it_compatible (int cond)
877 {
878 return (cond & ~1) == (now_it.cc & ~1);
879 }
880
881 static inline int
882 conditional_insn (void)
883 {
884 return inst.cond != COND_ALWAYS;
885 }
886
887 static int in_it_block (void);
888
889 static int handle_it_state (void);
890
891 static void force_automatic_it_block_close (void);
892
893 static void it_fsm_post_encode (void);
894
895 #define set_it_insn_type(type) \
896 do \
897 { \
898 inst.it_insn_type = type; \
899 if (handle_it_state () == FAIL) \
900 return; \
901 } \
902 while (0)
903
904 #define set_it_insn_type_nonvoid(type, failret) \
905 do \
906 { \
907 inst.it_insn_type = type; \
908 if (handle_it_state () == FAIL) \
909 return failret; \
910 } \
911 while(0)
912
913 #define set_it_insn_type_last() \
914 do \
915 { \
916 if (inst.cond == COND_ALWAYS) \
917 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
918 else \
919 set_it_insn_type (INSIDE_IT_LAST_INSN); \
920 } \
921 while (0)
922
923 /* Pure syntax. */
924
925 /* This array holds the chars that always start a comment. If the
926 pre-processor is disabled, these aren't very useful. */
927 char arm_comment_chars[] = "@";
928
929 /* This array holds the chars that only start a comment at the beginning of
930 a line. If the line seems to have the form '# 123 filename'
931 .line and .file directives will appear in the pre-processed output. */
932 /* Note that input_file.c hand checks for '#' at the beginning of the
933 first line of the input file. This is because the compiler outputs
934 #NO_APP at the beginning of its output. */
935 /* Also note that comments like this one will always work. */
936 const char line_comment_chars[] = "#";
937
938 char arm_line_separator_chars[] = ";";
939
940 /* Chars that can be used to separate mant
941 from exp in floating point numbers. */
942 const char EXP_CHARS[] = "eE";
943
944 /* Chars that mean this number is a floating point constant. */
945 /* As in 0f12.456 */
946 /* or 0d1.2345e12 */
947
948 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
949
950 /* Prefix characters that indicate the start of an immediate
951 value. */
952 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
953
954 /* Separator character handling. */
955
956 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
957
958 static inline int
959 skip_past_char (char ** str, char c)
960 {
961 /* PR gas/14987: Allow for whitespace before the expected character. */
962 skip_whitespace (*str);
963
964 if (**str == c)
965 {
966 (*str)++;
967 return SUCCESS;
968 }
969 else
970 return FAIL;
971 }
972
973 #define skip_past_comma(str) skip_past_char (str, ',')
974
975 /* Arithmetic expressions (possibly involving symbols). */
976
977 /* Return TRUE if anything in the expression is a bignum. */
978
979 static int
980 walk_no_bignums (symbolS * sp)
981 {
982 if (symbol_get_value_expression (sp)->X_op == O_big)
983 return 1;
984
985 if (symbol_get_value_expression (sp)->X_add_symbol)
986 {
987 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
988 || (symbol_get_value_expression (sp)->X_op_symbol
989 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
990 }
991
992 return 0;
993 }
994
995 static int in_my_get_expression = 0;
996
997 /* Third argument to my_get_expression. */
998 #define GE_NO_PREFIX 0
999 #define GE_IMM_PREFIX 1
1000 #define GE_OPT_PREFIX 2
1001 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1002 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1003 #define GE_OPT_PREFIX_BIG 3
1004
1005 static int
1006 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1007 {
1008 char * save_in;
1009 segT seg;
1010
1011 /* In unified syntax, all prefixes are optional. */
1012 if (unified_syntax)
1013 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1014 : GE_OPT_PREFIX;
1015
1016 switch (prefix_mode)
1017 {
1018 case GE_NO_PREFIX: break;
1019 case GE_IMM_PREFIX:
1020 if (!is_immediate_prefix (**str))
1021 {
1022 inst.error = _("immediate expression requires a # prefix");
1023 return FAIL;
1024 }
1025 (*str)++;
1026 break;
1027 case GE_OPT_PREFIX:
1028 case GE_OPT_PREFIX_BIG:
1029 if (is_immediate_prefix (**str))
1030 (*str)++;
1031 break;
1032 default: abort ();
1033 }
1034
1035 memset (ep, 0, sizeof (expressionS));
1036
1037 save_in = input_line_pointer;
1038 input_line_pointer = *str;
1039 in_my_get_expression = 1;
1040 seg = expression (ep);
1041 in_my_get_expression = 0;
1042
1043 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1044 {
1045 /* We found a bad or missing expression in md_operand(). */
1046 *str = input_line_pointer;
1047 input_line_pointer = save_in;
1048 if (inst.error == NULL)
1049 inst.error = (ep->X_op == O_absent
1050 ? _("missing expression") :_("bad expression"));
1051 return 1;
1052 }
1053
1054 #ifdef OBJ_AOUT
1055 if (seg != absolute_section
1056 && seg != text_section
1057 && seg != data_section
1058 && seg != bss_section
1059 && seg != undefined_section)
1060 {
1061 inst.error = _("bad segment");
1062 *str = input_line_pointer;
1063 input_line_pointer = save_in;
1064 return 1;
1065 }
1066 #else
1067 (void) seg;
1068 #endif
1069
1070 /* Get rid of any bignums now, so that we don't generate an error for which
1071 we can't establish a line number later on. Big numbers are never valid
1072 in instructions, which is where this routine is always called. */
1073 if (prefix_mode != GE_OPT_PREFIX_BIG
1074 && (ep->X_op == O_big
1075 || (ep->X_add_symbol
1076 && (walk_no_bignums (ep->X_add_symbol)
1077 || (ep->X_op_symbol
1078 && walk_no_bignums (ep->X_op_symbol))))))
1079 {
1080 inst.error = _("invalid constant");
1081 *str = input_line_pointer;
1082 input_line_pointer = save_in;
1083 return 1;
1084 }
1085
1086 *str = input_line_pointer;
1087 input_line_pointer = save_in;
1088 return 0;
1089 }
1090
1091 /* Turn a string in input_line_pointer into a floating point constant
1092 of type TYPE, and store the appropriate bytes in *LITP. The number
1093 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1094 returned, or NULL on OK.
1095
1096 Note that fp constants aren't represent in the normal way on the ARM.
1097 In big endian mode, things are as expected. However, in little endian
1098 mode fp constants are big-endian word-wise, and little-endian byte-wise
1099 within the words. For example, (double) 1.1 in big endian mode is
1100 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1101 the byte sequence 99 99 f1 3f 9a 99 99 99.
1102
1103 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1104
1105 const char *
1106 md_atof (int type, char * litP, int * sizeP)
1107 {
1108 int prec;
1109 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1110 char *t;
1111 int i;
1112
1113 switch (type)
1114 {
1115 case 'f':
1116 case 'F':
1117 case 's':
1118 case 'S':
1119 prec = 2;
1120 break;
1121
1122 case 'd':
1123 case 'D':
1124 case 'r':
1125 case 'R':
1126 prec = 4;
1127 break;
1128
1129 case 'x':
1130 case 'X':
1131 prec = 5;
1132 break;
1133
1134 case 'p':
1135 case 'P':
1136 prec = 5;
1137 break;
1138
1139 default:
1140 *sizeP = 0;
1141 return _("Unrecognized or unsupported floating point constant");
1142 }
1143
1144 t = atof_ieee (input_line_pointer, type, words);
1145 if (t)
1146 input_line_pointer = t;
1147 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1148
1149 if (target_big_endian)
1150 {
1151 for (i = 0; i < prec; i++)
1152 {
1153 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1154 litP += sizeof (LITTLENUM_TYPE);
1155 }
1156 }
1157 else
1158 {
1159 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1160 for (i = prec - 1; i >= 0; i--)
1161 {
1162 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1163 litP += sizeof (LITTLENUM_TYPE);
1164 }
1165 else
1166 /* For a 4 byte float the order of elements in `words' is 1 0.
1167 For an 8 byte float the order is 1 0 3 2. */
1168 for (i = 0; i < prec; i += 2)
1169 {
1170 md_number_to_chars (litP, (valueT) words[i + 1],
1171 sizeof (LITTLENUM_TYPE));
1172 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1173 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1174 litP += 2 * sizeof (LITTLENUM_TYPE);
1175 }
1176 }
1177
1178 return NULL;
1179 }
1180
1181 /* We handle all bad expressions here, so that we can report the faulty
1182 instruction in the error message. */
1183 void
1184 md_operand (expressionS * exp)
1185 {
1186 if (in_my_get_expression)
1187 exp->X_op = O_illegal;
1188 }
1189
1190 /* Immediate values. */
1191
1192 /* Generic immediate-value read function for use in directives.
1193 Accepts anything that 'expression' can fold to a constant.
1194 *val receives the number. */
1195 #ifdef OBJ_ELF
1196 static int
1197 immediate_for_directive (int *val)
1198 {
1199 expressionS exp;
1200 exp.X_op = O_illegal;
1201
1202 if (is_immediate_prefix (*input_line_pointer))
1203 {
1204 input_line_pointer++;
1205 expression (&exp);
1206 }
1207
1208 if (exp.X_op != O_constant)
1209 {
1210 as_bad (_("expected #constant"));
1211 ignore_rest_of_line ();
1212 return FAIL;
1213 }
1214 *val = exp.X_add_number;
1215 return SUCCESS;
1216 }
1217 #endif
1218
1219 /* Register parsing. */
1220
1221 /* Generic register parser. CCP points to what should be the
1222 beginning of a register name. If it is indeed a valid register
1223 name, advance CCP over it and return the reg_entry structure;
1224 otherwise return NULL. Does not issue diagnostics. */
1225
1226 static struct reg_entry *
1227 arm_reg_parse_multi (char **ccp)
1228 {
1229 char *start = *ccp;
1230 char *p;
1231 struct reg_entry *reg;
1232
1233 skip_whitespace (start);
1234
1235 #ifdef REGISTER_PREFIX
1236 if (*start != REGISTER_PREFIX)
1237 return NULL;
1238 start++;
1239 #endif
1240 #ifdef OPTIONAL_REGISTER_PREFIX
1241 if (*start == OPTIONAL_REGISTER_PREFIX)
1242 start++;
1243 #endif
1244
1245 p = start;
1246 if (!ISALPHA (*p) || !is_name_beginner (*p))
1247 return NULL;
1248
1249 do
1250 p++;
1251 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1252
1253 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1254
1255 if (!reg)
1256 return NULL;
1257
1258 *ccp = p;
1259 return reg;
1260 }
1261
1262 static int
1263 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1264 enum arm_reg_type type)
1265 {
1266 /* Alternative syntaxes are accepted for a few register classes. */
1267 switch (type)
1268 {
1269 case REG_TYPE_MVF:
1270 case REG_TYPE_MVD:
1271 case REG_TYPE_MVFX:
1272 case REG_TYPE_MVDX:
1273 /* Generic coprocessor register names are allowed for these. */
1274 if (reg && reg->type == REG_TYPE_CN)
1275 return reg->number;
1276 break;
1277
1278 case REG_TYPE_CP:
1279 /* For backward compatibility, a bare number is valid here. */
1280 {
1281 unsigned long processor = strtoul (start, ccp, 10);
1282 if (*ccp != start && processor <= 15)
1283 return processor;
1284 }
1285 /* Fall through. */
1286
1287 case REG_TYPE_MMXWC:
1288 /* WC includes WCG. ??? I'm not sure this is true for all
1289 instructions that take WC registers. */
1290 if (reg && reg->type == REG_TYPE_MMXWCG)
1291 return reg->number;
1292 break;
1293
1294 default:
1295 break;
1296 }
1297
1298 return FAIL;
1299 }
1300
1301 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1302 return value is the register number or FAIL. */
1303
1304 static int
1305 arm_reg_parse (char **ccp, enum arm_reg_type type)
1306 {
1307 char *start = *ccp;
1308 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1309 int ret;
1310
1311 /* Do not allow a scalar (reg+index) to parse as a register. */
1312 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1313 return FAIL;
1314
1315 if (reg && reg->type == type)
1316 return reg->number;
1317
1318 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1319 return ret;
1320
1321 *ccp = start;
1322 return FAIL;
1323 }
1324
1325 /* Parse a Neon type specifier. *STR should point at the leading '.'
1326 character. Does no verification at this stage that the type fits the opcode
1327 properly. E.g.,
1328
1329 .i32.i32.s16
1330 .s32.f32
1331 .u16
1332
1333 Can all be legally parsed by this function.
1334
1335 Fills in neon_type struct pointer with parsed information, and updates STR
1336 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1337 type, FAIL if not. */
1338
1339 static int
1340 parse_neon_type (struct neon_type *type, char **str)
1341 {
1342 char *ptr = *str;
1343
1344 if (type)
1345 type->elems = 0;
1346
1347 while (type->elems < NEON_MAX_TYPE_ELS)
1348 {
1349 enum neon_el_type thistype = NT_untyped;
1350 unsigned thissize = -1u;
1351
1352 if (*ptr != '.')
1353 break;
1354
1355 ptr++;
1356
1357 /* Just a size without an explicit type. */
1358 if (ISDIGIT (*ptr))
1359 goto parsesize;
1360
1361 switch (TOLOWER (*ptr))
1362 {
1363 case 'i': thistype = NT_integer; break;
1364 case 'f': thistype = NT_float; break;
1365 case 'p': thistype = NT_poly; break;
1366 case 's': thistype = NT_signed; break;
1367 case 'u': thistype = NT_unsigned; break;
1368 case 'd':
1369 thistype = NT_float;
1370 thissize = 64;
1371 ptr++;
1372 goto done;
1373 default:
1374 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1375 return FAIL;
1376 }
1377
1378 ptr++;
1379
1380 /* .f is an abbreviation for .f32. */
1381 if (thistype == NT_float && !ISDIGIT (*ptr))
1382 thissize = 32;
1383 else
1384 {
1385 parsesize:
1386 thissize = strtoul (ptr, &ptr, 10);
1387
1388 if (thissize != 8 && thissize != 16 && thissize != 32
1389 && thissize != 64)
1390 {
1391 as_bad (_("bad size %d in type specifier"), thissize);
1392 return FAIL;
1393 }
1394 }
1395
1396 done:
1397 if (type)
1398 {
1399 type->el[type->elems].type = thistype;
1400 type->el[type->elems].size = thissize;
1401 type->elems++;
1402 }
1403 }
1404
1405 /* Empty/missing type is not a successful parse. */
1406 if (type->elems == 0)
1407 return FAIL;
1408
1409 *str = ptr;
1410
1411 return SUCCESS;
1412 }
1413
1414 /* Errors may be set multiple times during parsing or bit encoding
1415 (particularly in the Neon bits), but usually the earliest error which is set
1416 will be the most meaningful. Avoid overwriting it with later (cascading)
1417 errors by calling this function. */
1418
1419 static void
1420 first_error (const char *err)
1421 {
1422 if (!inst.error)
1423 inst.error = err;
1424 }
1425
1426 /* Parse a single type, e.g. ".s32", leading period included. */
1427 static int
1428 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1429 {
1430 char *str = *ccp;
1431 struct neon_type optype;
1432
1433 if (*str == '.')
1434 {
1435 if (parse_neon_type (&optype, &str) == SUCCESS)
1436 {
1437 if (optype.elems == 1)
1438 *vectype = optype.el[0];
1439 else
1440 {
1441 first_error (_("only one type should be specified for operand"));
1442 return FAIL;
1443 }
1444 }
1445 else
1446 {
1447 first_error (_("vector type expected"));
1448 return FAIL;
1449 }
1450 }
1451 else
1452 return FAIL;
1453
1454 *ccp = str;
1455
1456 return SUCCESS;
1457 }
1458
1459 /* Special meanings for indices (which have a range of 0-7), which will fit into
1460 a 4-bit integer. */
1461
1462 #define NEON_ALL_LANES 15
1463 #define NEON_INTERLEAVE_LANES 14
1464
1465 /* Parse either a register or a scalar, with an optional type. Return the
1466 register number, and optionally fill in the actual type of the register
1467 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1468 type/index information in *TYPEINFO. */
1469
1470 static int
1471 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1472 enum arm_reg_type *rtype,
1473 struct neon_typed_alias *typeinfo)
1474 {
1475 char *str = *ccp;
1476 struct reg_entry *reg = arm_reg_parse_multi (&str);
1477 struct neon_typed_alias atype;
1478 struct neon_type_el parsetype;
1479
1480 atype.defined = 0;
1481 atype.index = -1;
1482 atype.eltype.type = NT_invtype;
1483 atype.eltype.size = -1;
1484
1485 /* Try alternate syntax for some types of register. Note these are mutually
1486 exclusive with the Neon syntax extensions. */
1487 if (reg == NULL)
1488 {
1489 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1490 if (altreg != FAIL)
1491 *ccp = str;
1492 if (typeinfo)
1493 *typeinfo = atype;
1494 return altreg;
1495 }
1496
1497 /* Undo polymorphism when a set of register types may be accepted. */
1498 if ((type == REG_TYPE_NDQ
1499 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1500 || (type == REG_TYPE_VFSD
1501 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1502 || (type == REG_TYPE_NSDQ
1503 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1504 || reg->type == REG_TYPE_NQ))
1505 || (type == REG_TYPE_MMXWC
1506 && (reg->type == REG_TYPE_MMXWCG)))
1507 type = (enum arm_reg_type) reg->type;
1508
1509 if (type != reg->type)
1510 return FAIL;
1511
1512 if (reg->neon)
1513 atype = *reg->neon;
1514
1515 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1516 {
1517 if ((atype.defined & NTA_HASTYPE) != 0)
1518 {
1519 first_error (_("can't redefine type for operand"));
1520 return FAIL;
1521 }
1522 atype.defined |= NTA_HASTYPE;
1523 atype.eltype = parsetype;
1524 }
1525
1526 if (skip_past_char (&str, '[') == SUCCESS)
1527 {
1528 if (type != REG_TYPE_VFD)
1529 {
1530 first_error (_("only D registers may be indexed"));
1531 return FAIL;
1532 }
1533
1534 if ((atype.defined & NTA_HASINDEX) != 0)
1535 {
1536 first_error (_("can't change index for operand"));
1537 return FAIL;
1538 }
1539
1540 atype.defined |= NTA_HASINDEX;
1541
1542 if (skip_past_char (&str, ']') == SUCCESS)
1543 atype.index = NEON_ALL_LANES;
1544 else
1545 {
1546 expressionS exp;
1547
1548 my_get_expression (&exp, &str, GE_NO_PREFIX);
1549
1550 if (exp.X_op != O_constant)
1551 {
1552 first_error (_("constant expression required"));
1553 return FAIL;
1554 }
1555
1556 if (skip_past_char (&str, ']') == FAIL)
1557 return FAIL;
1558
1559 atype.index = exp.X_add_number;
1560 }
1561 }
1562
1563 if (typeinfo)
1564 *typeinfo = atype;
1565
1566 if (rtype)
1567 *rtype = type;
1568
1569 *ccp = str;
1570
1571 return reg->number;
1572 }
1573
1574 /* Like arm_reg_parse, but allow allow the following extra features:
1575 - If RTYPE is non-zero, return the (possibly restricted) type of the
1576 register (e.g. Neon double or quad reg when either has been requested).
1577 - If this is a Neon vector type with additional type information, fill
1578 in the struct pointed to by VECTYPE (if non-NULL).
1579 This function will fault on encountering a scalar. */
1580
1581 static int
1582 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1583 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1584 {
1585 struct neon_typed_alias atype;
1586 char *str = *ccp;
1587 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1588
1589 if (reg == FAIL)
1590 return FAIL;
1591
1592 /* Do not allow regname(... to parse as a register. */
1593 if (*str == '(')
1594 return FAIL;
1595
1596 /* Do not allow a scalar (reg+index) to parse as a register. */
1597 if ((atype.defined & NTA_HASINDEX) != 0)
1598 {
1599 first_error (_("register operand expected, but got scalar"));
1600 return FAIL;
1601 }
1602
1603 if (vectype)
1604 *vectype = atype.eltype;
1605
1606 *ccp = str;
1607
1608 return reg;
1609 }
1610
1611 #define NEON_SCALAR_REG(X) ((X) >> 4)
1612 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1613
1614 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1615 have enough information to be able to do a good job bounds-checking. So, we
1616 just do easy checks here, and do further checks later. */
1617
1618 static int
1619 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1620 {
1621 int reg;
1622 char *str = *ccp;
1623 struct neon_typed_alias atype;
1624
1625 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1626
1627 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1628 return FAIL;
1629
1630 if (atype.index == NEON_ALL_LANES)
1631 {
1632 first_error (_("scalar must have an index"));
1633 return FAIL;
1634 }
1635 else if (atype.index >= 64 / elsize)
1636 {
1637 first_error (_("scalar index out of range"));
1638 return FAIL;
1639 }
1640
1641 if (type)
1642 *type = atype.eltype;
1643
1644 *ccp = str;
1645
1646 return reg * 16 + atype.index;
1647 }
1648
1649 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1650
1651 static long
1652 parse_reg_list (char ** strp)
1653 {
1654 char * str = * strp;
1655 long range = 0;
1656 int another_range;
1657
1658 /* We come back here if we get ranges concatenated by '+' or '|'. */
1659 do
1660 {
1661 skip_whitespace (str);
1662
1663 another_range = 0;
1664
1665 if (*str == '{')
1666 {
1667 int in_range = 0;
1668 int cur_reg = -1;
1669
1670 str++;
1671 do
1672 {
1673 int reg;
1674
1675 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1676 {
1677 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1678 return FAIL;
1679 }
1680
1681 if (in_range)
1682 {
1683 int i;
1684
1685 if (reg <= cur_reg)
1686 {
1687 first_error (_("bad range in register list"));
1688 return FAIL;
1689 }
1690
1691 for (i = cur_reg + 1; i < reg; i++)
1692 {
1693 if (range & (1 << i))
1694 as_tsktsk
1695 (_("Warning: duplicated register (r%d) in register list"),
1696 i);
1697 else
1698 range |= 1 << i;
1699 }
1700 in_range = 0;
1701 }
1702
1703 if (range & (1 << reg))
1704 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1705 reg);
1706 else if (reg <= cur_reg)
1707 as_tsktsk (_("Warning: register range not in ascending order"));
1708
1709 range |= 1 << reg;
1710 cur_reg = reg;
1711 }
1712 while (skip_past_comma (&str) != FAIL
1713 || (in_range = 1, *str++ == '-'));
1714 str--;
1715
1716 if (skip_past_char (&str, '}') == FAIL)
1717 {
1718 first_error (_("missing `}'"));
1719 return FAIL;
1720 }
1721 }
1722 else
1723 {
1724 expressionS exp;
1725
1726 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1727 return FAIL;
1728
1729 if (exp.X_op == O_constant)
1730 {
1731 if (exp.X_add_number
1732 != (exp.X_add_number & 0x0000ffff))
1733 {
1734 inst.error = _("invalid register mask");
1735 return FAIL;
1736 }
1737
1738 if ((range & exp.X_add_number) != 0)
1739 {
1740 int regno = range & exp.X_add_number;
1741
1742 regno &= -regno;
1743 regno = (1 << regno) - 1;
1744 as_tsktsk
1745 (_("Warning: duplicated register (r%d) in register list"),
1746 regno);
1747 }
1748
1749 range |= exp.X_add_number;
1750 }
1751 else
1752 {
1753 if (inst.reloc.type != 0)
1754 {
1755 inst.error = _("expression too complex");
1756 return FAIL;
1757 }
1758
1759 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1760 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1761 inst.reloc.pc_rel = 0;
1762 }
1763 }
1764
1765 if (*str == '|' || *str == '+')
1766 {
1767 str++;
1768 another_range = 1;
1769 }
1770 }
1771 while (another_range);
1772
1773 *strp = str;
1774 return range;
1775 }
1776
1777 /* Types of registers in a list. */
1778
1779 enum reg_list_els
1780 {
1781 REGLIST_VFP_S,
1782 REGLIST_VFP_D,
1783 REGLIST_NEON_D
1784 };
1785
1786 /* Parse a VFP register list. If the string is invalid return FAIL.
1787 Otherwise return the number of registers, and set PBASE to the first
1788 register. Parses registers of type ETYPE.
1789 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1790 - Q registers can be used to specify pairs of D registers
1791 - { } can be omitted from around a singleton register list
1792 FIXME: This is not implemented, as it would require backtracking in
1793 some cases, e.g.:
1794 vtbl.8 d3,d4,d5
1795 This could be done (the meaning isn't really ambiguous), but doesn't
1796 fit in well with the current parsing framework.
1797 - 32 D registers may be used (also true for VFPv3).
1798 FIXME: Types are ignored in these register lists, which is probably a
1799 bug. */
1800
1801 static int
1802 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1803 {
1804 char *str = *ccp;
1805 int base_reg;
1806 int new_base;
1807 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1808 int max_regs = 0;
1809 int count = 0;
1810 int warned = 0;
1811 unsigned long mask = 0;
1812 int i;
1813
1814 if (skip_past_char (&str, '{') == FAIL)
1815 {
1816 inst.error = _("expecting {");
1817 return FAIL;
1818 }
1819
1820 switch (etype)
1821 {
1822 case REGLIST_VFP_S:
1823 regtype = REG_TYPE_VFS;
1824 max_regs = 32;
1825 break;
1826
1827 case REGLIST_VFP_D:
1828 regtype = REG_TYPE_VFD;
1829 break;
1830
1831 case REGLIST_NEON_D:
1832 regtype = REG_TYPE_NDQ;
1833 break;
1834 }
1835
1836 if (etype != REGLIST_VFP_S)
1837 {
1838 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1839 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1840 {
1841 max_regs = 32;
1842 if (thumb_mode)
1843 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1844 fpu_vfp_ext_d32);
1845 else
1846 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1847 fpu_vfp_ext_d32);
1848 }
1849 else
1850 max_regs = 16;
1851 }
1852
1853 base_reg = max_regs;
1854
1855 do
1856 {
1857 int setmask = 1, addregs = 1;
1858
1859 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1860
1861 if (new_base == FAIL)
1862 {
1863 first_error (_(reg_expected_msgs[regtype]));
1864 return FAIL;
1865 }
1866
1867 if (new_base >= max_regs)
1868 {
1869 first_error (_("register out of range in list"));
1870 return FAIL;
1871 }
1872
1873 /* Note: a value of 2 * n is returned for the register Q<n>. */
1874 if (regtype == REG_TYPE_NQ)
1875 {
1876 setmask = 3;
1877 addregs = 2;
1878 }
1879
1880 if (new_base < base_reg)
1881 base_reg = new_base;
1882
1883 if (mask & (setmask << new_base))
1884 {
1885 first_error (_("invalid register list"));
1886 return FAIL;
1887 }
1888
1889 if ((mask >> new_base) != 0 && ! warned)
1890 {
1891 as_tsktsk (_("register list not in ascending order"));
1892 warned = 1;
1893 }
1894
1895 mask |= setmask << new_base;
1896 count += addregs;
1897
1898 if (*str == '-') /* We have the start of a range expression */
1899 {
1900 int high_range;
1901
1902 str++;
1903
1904 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1905 == FAIL)
1906 {
1907 inst.error = gettext (reg_expected_msgs[regtype]);
1908 return FAIL;
1909 }
1910
1911 if (high_range >= max_regs)
1912 {
1913 first_error (_("register out of range in list"));
1914 return FAIL;
1915 }
1916
1917 if (regtype == REG_TYPE_NQ)
1918 high_range = high_range + 1;
1919
1920 if (high_range <= new_base)
1921 {
1922 inst.error = _("register range not in ascending order");
1923 return FAIL;
1924 }
1925
1926 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1927 {
1928 if (mask & (setmask << new_base))
1929 {
1930 inst.error = _("invalid register list");
1931 return FAIL;
1932 }
1933
1934 mask |= setmask << new_base;
1935 count += addregs;
1936 }
1937 }
1938 }
1939 while (skip_past_comma (&str) != FAIL);
1940
1941 str++;
1942
1943 /* Sanity check -- should have raised a parse error above. */
1944 if (count == 0 || count > max_regs)
1945 abort ();
1946
1947 *pbase = base_reg;
1948
1949 /* Final test -- the registers must be consecutive. */
1950 mask >>= base_reg;
1951 for (i = 0; i < count; i++)
1952 {
1953 if ((mask & (1u << i)) == 0)
1954 {
1955 inst.error = _("non-contiguous register range");
1956 return FAIL;
1957 }
1958 }
1959
1960 *ccp = str;
1961
1962 return count;
1963 }
1964
1965 /* True if two alias types are the same. */
1966
1967 static bfd_boolean
1968 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1969 {
1970 if (!a && !b)
1971 return TRUE;
1972
1973 if (!a || !b)
1974 return FALSE;
1975
1976 if (a->defined != b->defined)
1977 return FALSE;
1978
1979 if ((a->defined & NTA_HASTYPE) != 0
1980 && (a->eltype.type != b->eltype.type
1981 || a->eltype.size != b->eltype.size))
1982 return FALSE;
1983
1984 if ((a->defined & NTA_HASINDEX) != 0
1985 && (a->index != b->index))
1986 return FALSE;
1987
1988 return TRUE;
1989 }
1990
1991 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1992 The base register is put in *PBASE.
1993 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1994 the return value.
1995 The register stride (minus one) is put in bit 4 of the return value.
1996 Bits [6:5] encode the list length (minus one).
1997 The type of the list elements is put in *ELTYPE, if non-NULL. */
1998
1999 #define NEON_LANE(X) ((X) & 0xf)
2000 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2001 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2002
2003 static int
2004 parse_neon_el_struct_list (char **str, unsigned *pbase,
2005 struct neon_type_el *eltype)
2006 {
2007 char *ptr = *str;
2008 int base_reg = -1;
2009 int reg_incr = -1;
2010 int count = 0;
2011 int lane = -1;
2012 int leading_brace = 0;
2013 enum arm_reg_type rtype = REG_TYPE_NDQ;
2014 const char *const incr_error = _("register stride must be 1 or 2");
2015 const char *const type_error = _("mismatched element/structure types in list");
2016 struct neon_typed_alias firsttype;
2017 firsttype.defined = 0;
2018 firsttype.eltype.type = NT_invtype;
2019 firsttype.eltype.size = -1;
2020 firsttype.index = -1;
2021
2022 if (skip_past_char (&ptr, '{') == SUCCESS)
2023 leading_brace = 1;
2024
2025 do
2026 {
2027 struct neon_typed_alias atype;
2028 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2029
2030 if (getreg == FAIL)
2031 {
2032 first_error (_(reg_expected_msgs[rtype]));
2033 return FAIL;
2034 }
2035
2036 if (base_reg == -1)
2037 {
2038 base_reg = getreg;
2039 if (rtype == REG_TYPE_NQ)
2040 {
2041 reg_incr = 1;
2042 }
2043 firsttype = atype;
2044 }
2045 else if (reg_incr == -1)
2046 {
2047 reg_incr = getreg - base_reg;
2048 if (reg_incr < 1 || reg_incr > 2)
2049 {
2050 first_error (_(incr_error));
2051 return FAIL;
2052 }
2053 }
2054 else if (getreg != base_reg + reg_incr * count)
2055 {
2056 first_error (_(incr_error));
2057 return FAIL;
2058 }
2059
2060 if (! neon_alias_types_same (&atype, &firsttype))
2061 {
2062 first_error (_(type_error));
2063 return FAIL;
2064 }
2065
2066 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2067 modes. */
2068 if (ptr[0] == '-')
2069 {
2070 struct neon_typed_alias htype;
2071 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2072 if (lane == -1)
2073 lane = NEON_INTERLEAVE_LANES;
2074 else if (lane != NEON_INTERLEAVE_LANES)
2075 {
2076 first_error (_(type_error));
2077 return FAIL;
2078 }
2079 if (reg_incr == -1)
2080 reg_incr = 1;
2081 else if (reg_incr != 1)
2082 {
2083 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2084 return FAIL;
2085 }
2086 ptr++;
2087 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2088 if (hireg == FAIL)
2089 {
2090 first_error (_(reg_expected_msgs[rtype]));
2091 return FAIL;
2092 }
2093 if (! neon_alias_types_same (&htype, &firsttype))
2094 {
2095 first_error (_(type_error));
2096 return FAIL;
2097 }
2098 count += hireg + dregs - getreg;
2099 continue;
2100 }
2101
2102 /* If we're using Q registers, we can't use [] or [n] syntax. */
2103 if (rtype == REG_TYPE_NQ)
2104 {
2105 count += 2;
2106 continue;
2107 }
2108
2109 if ((atype.defined & NTA_HASINDEX) != 0)
2110 {
2111 if (lane == -1)
2112 lane = atype.index;
2113 else if (lane != atype.index)
2114 {
2115 first_error (_(type_error));
2116 return FAIL;
2117 }
2118 }
2119 else if (lane == -1)
2120 lane = NEON_INTERLEAVE_LANES;
2121 else if (lane != NEON_INTERLEAVE_LANES)
2122 {
2123 first_error (_(type_error));
2124 return FAIL;
2125 }
2126 count++;
2127 }
2128 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2129
2130 /* No lane set by [x]. We must be interleaving structures. */
2131 if (lane == -1)
2132 lane = NEON_INTERLEAVE_LANES;
2133
2134 /* Sanity check. */
2135 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2136 || (count > 1 && reg_incr == -1))
2137 {
2138 first_error (_("error parsing element/structure list"));
2139 return FAIL;
2140 }
2141
2142 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2143 {
2144 first_error (_("expected }"));
2145 return FAIL;
2146 }
2147
2148 if (reg_incr == -1)
2149 reg_incr = 1;
2150
2151 if (eltype)
2152 *eltype = firsttype.eltype;
2153
2154 *pbase = base_reg;
2155 *str = ptr;
2156
2157 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2158 }
2159
2160 /* Parse an explicit relocation suffix on an expression. This is
2161 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2162 arm_reloc_hsh contains no entries, so this function can only
2163 succeed if there is no () after the word. Returns -1 on error,
2164 BFD_RELOC_UNUSED if there wasn't any suffix. */
2165
2166 static int
2167 parse_reloc (char **str)
2168 {
2169 struct reloc_entry *r;
2170 char *p, *q;
2171
2172 if (**str != '(')
2173 return BFD_RELOC_UNUSED;
2174
2175 p = *str + 1;
2176 q = p;
2177
2178 while (*q && *q != ')' && *q != ',')
2179 q++;
2180 if (*q != ')')
2181 return -1;
2182
2183 if ((r = (struct reloc_entry *)
2184 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2185 return -1;
2186
2187 *str = q + 1;
2188 return r->reloc;
2189 }
2190
2191 /* Directives: register aliases. */
2192
2193 static struct reg_entry *
2194 insert_reg_alias (char *str, unsigned number, int type)
2195 {
2196 struct reg_entry *new_reg;
2197 const char *name;
2198
2199 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2200 {
2201 if (new_reg->builtin)
2202 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2203
2204 /* Only warn about a redefinition if it's not defined as the
2205 same register. */
2206 else if (new_reg->number != number || new_reg->type != type)
2207 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2208
2209 return NULL;
2210 }
2211
2212 name = xstrdup (str);
2213 new_reg = XNEW (struct reg_entry);
2214
2215 new_reg->name = name;
2216 new_reg->number = number;
2217 new_reg->type = type;
2218 new_reg->builtin = FALSE;
2219 new_reg->neon = NULL;
2220
2221 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2222 abort ();
2223
2224 return new_reg;
2225 }
2226
2227 static void
2228 insert_neon_reg_alias (char *str, int number, int type,
2229 struct neon_typed_alias *atype)
2230 {
2231 struct reg_entry *reg = insert_reg_alias (str, number, type);
2232
2233 if (!reg)
2234 {
2235 first_error (_("attempt to redefine typed alias"));
2236 return;
2237 }
2238
2239 if (atype)
2240 {
2241 reg->neon = XNEW (struct neon_typed_alias);
2242 *reg->neon = *atype;
2243 }
2244 }
2245
2246 /* Look for the .req directive. This is of the form:
2247
2248 new_register_name .req existing_register_name
2249
2250 If we find one, or if it looks sufficiently like one that we want to
2251 handle any error here, return TRUE. Otherwise return FALSE. */
2252
2253 static bfd_boolean
2254 create_register_alias (char * newname, char *p)
2255 {
2256 struct reg_entry *old;
2257 char *oldname, *nbuf;
2258 size_t nlen;
2259
2260 /* The input scrubber ensures that whitespace after the mnemonic is
2261 collapsed to single spaces. */
2262 oldname = p;
2263 if (strncmp (oldname, " .req ", 6) != 0)
2264 return FALSE;
2265
2266 oldname += 6;
2267 if (*oldname == '\0')
2268 return FALSE;
2269
2270 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2271 if (!old)
2272 {
2273 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2274 return TRUE;
2275 }
2276
2277 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2278 the desired alias name, and p points to its end. If not, then
2279 the desired alias name is in the global original_case_string. */
2280 #ifdef TC_CASE_SENSITIVE
2281 nlen = p - newname;
2282 #else
2283 newname = original_case_string;
2284 nlen = strlen (newname);
2285 #endif
2286
2287 nbuf = xmemdup0 (newname, nlen);
2288
2289 /* Create aliases under the new name as stated; an all-lowercase
2290 version of the new name; and an all-uppercase version of the new
2291 name. */
2292 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2293 {
2294 for (p = nbuf; *p; p++)
2295 *p = TOUPPER (*p);
2296
2297 if (strncmp (nbuf, newname, nlen))
2298 {
2299 /* If this attempt to create an additional alias fails, do not bother
2300 trying to create the all-lower case alias. We will fail and issue
2301 a second, duplicate error message. This situation arises when the
2302 programmer does something like:
2303 foo .req r0
2304 Foo .req r1
2305 The second .req creates the "Foo" alias but then fails to create
2306 the artificial FOO alias because it has already been created by the
2307 first .req. */
2308 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2309 {
2310 free (nbuf);
2311 return TRUE;
2312 }
2313 }
2314
2315 for (p = nbuf; *p; p++)
2316 *p = TOLOWER (*p);
2317
2318 if (strncmp (nbuf, newname, nlen))
2319 insert_reg_alias (nbuf, old->number, old->type);
2320 }
2321
2322 free (nbuf);
2323 return TRUE;
2324 }
2325
2326 /* Create a Neon typed/indexed register alias using directives, e.g.:
2327 X .dn d5.s32[1]
2328 Y .qn 6.s16
2329 Z .dn d7
2330 T .dn Z[0]
2331 These typed registers can be used instead of the types specified after the
2332 Neon mnemonic, so long as all operands given have types. Types can also be
2333 specified directly, e.g.:
2334 vadd d0.s32, d1.s32, d2.s32 */
2335
2336 static bfd_boolean
2337 create_neon_reg_alias (char *newname, char *p)
2338 {
2339 enum arm_reg_type basetype;
2340 struct reg_entry *basereg;
2341 struct reg_entry mybasereg;
2342 struct neon_type ntype;
2343 struct neon_typed_alias typeinfo;
2344 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2345 int namelen;
2346
2347 typeinfo.defined = 0;
2348 typeinfo.eltype.type = NT_invtype;
2349 typeinfo.eltype.size = -1;
2350 typeinfo.index = -1;
2351
2352 nameend = p;
2353
2354 if (strncmp (p, " .dn ", 5) == 0)
2355 basetype = REG_TYPE_VFD;
2356 else if (strncmp (p, " .qn ", 5) == 0)
2357 basetype = REG_TYPE_NQ;
2358 else
2359 return FALSE;
2360
2361 p += 5;
2362
2363 if (*p == '\0')
2364 return FALSE;
2365
2366 basereg = arm_reg_parse_multi (&p);
2367
2368 if (basereg && basereg->type != basetype)
2369 {
2370 as_bad (_("bad type for register"));
2371 return FALSE;
2372 }
2373
2374 if (basereg == NULL)
2375 {
2376 expressionS exp;
2377 /* Try parsing as an integer. */
2378 my_get_expression (&exp, &p, GE_NO_PREFIX);
2379 if (exp.X_op != O_constant)
2380 {
2381 as_bad (_("expression must be constant"));
2382 return FALSE;
2383 }
2384 basereg = &mybasereg;
2385 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2386 : exp.X_add_number;
2387 basereg->neon = 0;
2388 }
2389
2390 if (basereg->neon)
2391 typeinfo = *basereg->neon;
2392
2393 if (parse_neon_type (&ntype, &p) == SUCCESS)
2394 {
2395 /* We got a type. */
2396 if (typeinfo.defined & NTA_HASTYPE)
2397 {
2398 as_bad (_("can't redefine the type of a register alias"));
2399 return FALSE;
2400 }
2401
2402 typeinfo.defined |= NTA_HASTYPE;
2403 if (ntype.elems != 1)
2404 {
2405 as_bad (_("you must specify a single type only"));
2406 return FALSE;
2407 }
2408 typeinfo.eltype = ntype.el[0];
2409 }
2410
2411 if (skip_past_char (&p, '[') == SUCCESS)
2412 {
2413 expressionS exp;
2414 /* We got a scalar index. */
2415
2416 if (typeinfo.defined & NTA_HASINDEX)
2417 {
2418 as_bad (_("can't redefine the index of a scalar alias"));
2419 return FALSE;
2420 }
2421
2422 my_get_expression (&exp, &p, GE_NO_PREFIX);
2423
2424 if (exp.X_op != O_constant)
2425 {
2426 as_bad (_("scalar index must be constant"));
2427 return FALSE;
2428 }
2429
2430 typeinfo.defined |= NTA_HASINDEX;
2431 typeinfo.index = exp.X_add_number;
2432
2433 if (skip_past_char (&p, ']') == FAIL)
2434 {
2435 as_bad (_("expecting ]"));
2436 return FALSE;
2437 }
2438 }
2439
2440 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2441 the desired alias name, and p points to its end. If not, then
2442 the desired alias name is in the global original_case_string. */
2443 #ifdef TC_CASE_SENSITIVE
2444 namelen = nameend - newname;
2445 #else
2446 newname = original_case_string;
2447 namelen = strlen (newname);
2448 #endif
2449
2450 namebuf = xmemdup0 (newname, namelen);
2451
2452 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2453 typeinfo.defined != 0 ? &typeinfo : NULL);
2454
2455 /* Insert name in all uppercase. */
2456 for (p = namebuf; *p; p++)
2457 *p = TOUPPER (*p);
2458
2459 if (strncmp (namebuf, newname, namelen))
2460 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2461 typeinfo.defined != 0 ? &typeinfo : NULL);
2462
2463 /* Insert name in all lowercase. */
2464 for (p = namebuf; *p; p++)
2465 *p = TOLOWER (*p);
2466
2467 if (strncmp (namebuf, newname, namelen))
2468 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2469 typeinfo.defined != 0 ? &typeinfo : NULL);
2470
2471 free (namebuf);
2472 return TRUE;
2473 }
2474
2475 /* Should never be called, as .req goes between the alias and the
2476 register name, not at the beginning of the line. */
2477
2478 static void
2479 s_req (int a ATTRIBUTE_UNUSED)
2480 {
2481 as_bad (_("invalid syntax for .req directive"));
2482 }
2483
2484 static void
2485 s_dn (int a ATTRIBUTE_UNUSED)
2486 {
2487 as_bad (_("invalid syntax for .dn directive"));
2488 }
2489
2490 static void
2491 s_qn (int a ATTRIBUTE_UNUSED)
2492 {
2493 as_bad (_("invalid syntax for .qn directive"));
2494 }
2495
2496 /* The .unreq directive deletes an alias which was previously defined
2497 by .req. For example:
2498
2499 my_alias .req r11
2500 .unreq my_alias */
2501
2502 static void
2503 s_unreq (int a ATTRIBUTE_UNUSED)
2504 {
2505 char * name;
2506 char saved_char;
2507
2508 name = input_line_pointer;
2509
2510 while (*input_line_pointer != 0
2511 && *input_line_pointer != ' '
2512 && *input_line_pointer != '\n')
2513 ++input_line_pointer;
2514
2515 saved_char = *input_line_pointer;
2516 *input_line_pointer = 0;
2517
2518 if (!*name)
2519 as_bad (_("invalid syntax for .unreq directive"));
2520 else
2521 {
2522 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2523 name);
2524
2525 if (!reg)
2526 as_bad (_("unknown register alias '%s'"), name);
2527 else if (reg->builtin)
2528 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2529 name);
2530 else
2531 {
2532 char * p;
2533 char * nbuf;
2534
2535 hash_delete (arm_reg_hsh, name, FALSE);
2536 free ((char *) reg->name);
2537 if (reg->neon)
2538 free (reg->neon);
2539 free (reg);
2540
2541 /* Also locate the all upper case and all lower case versions.
2542 Do not complain if we cannot find one or the other as it
2543 was probably deleted above. */
2544
2545 nbuf = strdup (name);
2546 for (p = nbuf; *p; p++)
2547 *p = TOUPPER (*p);
2548 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2549 if (reg)
2550 {
2551 hash_delete (arm_reg_hsh, nbuf, FALSE);
2552 free ((char *) reg->name);
2553 if (reg->neon)
2554 free (reg->neon);
2555 free (reg);
2556 }
2557
2558 for (p = nbuf; *p; p++)
2559 *p = TOLOWER (*p);
2560 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2561 if (reg)
2562 {
2563 hash_delete (arm_reg_hsh, nbuf, FALSE);
2564 free ((char *) reg->name);
2565 if (reg->neon)
2566 free (reg->neon);
2567 free (reg);
2568 }
2569
2570 free (nbuf);
2571 }
2572 }
2573
2574 *input_line_pointer = saved_char;
2575 demand_empty_rest_of_line ();
2576 }
2577
2578 /* Directives: Instruction set selection. */
2579
2580 #ifdef OBJ_ELF
2581 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2582 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2583 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2584 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2585
2586 /* Create a new mapping symbol for the transition to STATE. */
2587
2588 static void
2589 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2590 {
2591 symbolS * symbolP;
2592 const char * symname;
2593 int type;
2594
2595 switch (state)
2596 {
2597 case MAP_DATA:
2598 symname = "$d";
2599 type = BSF_NO_FLAGS;
2600 break;
2601 case MAP_ARM:
2602 symname = "$a";
2603 type = BSF_NO_FLAGS;
2604 break;
2605 case MAP_THUMB:
2606 symname = "$t";
2607 type = BSF_NO_FLAGS;
2608 break;
2609 default:
2610 abort ();
2611 }
2612
2613 symbolP = symbol_new (symname, now_seg, value, frag);
2614 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2615
2616 switch (state)
2617 {
2618 case MAP_ARM:
2619 THUMB_SET_FUNC (symbolP, 0);
2620 ARM_SET_THUMB (symbolP, 0);
2621 ARM_SET_INTERWORK (symbolP, support_interwork);
2622 break;
2623
2624 case MAP_THUMB:
2625 THUMB_SET_FUNC (symbolP, 1);
2626 ARM_SET_THUMB (symbolP, 1);
2627 ARM_SET_INTERWORK (symbolP, support_interwork);
2628 break;
2629
2630 case MAP_DATA:
2631 default:
2632 break;
2633 }
2634
2635 /* Save the mapping symbols for future reference. Also check that
2636 we do not place two mapping symbols at the same offset within a
2637 frag. We'll handle overlap between frags in
2638 check_mapping_symbols.
2639
2640 If .fill or other data filling directive generates zero sized data,
2641 the mapping symbol for the following code will have the same value
2642 as the one generated for the data filling directive. In this case,
2643 we replace the old symbol with the new one at the same address. */
2644 if (value == 0)
2645 {
2646 if (frag->tc_frag_data.first_map != NULL)
2647 {
2648 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2649 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2650 }
2651 frag->tc_frag_data.first_map = symbolP;
2652 }
2653 if (frag->tc_frag_data.last_map != NULL)
2654 {
2655 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2656 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2657 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2658 }
2659 frag->tc_frag_data.last_map = symbolP;
2660 }
2661
2662 /* We must sometimes convert a region marked as code to data during
2663 code alignment, if an odd number of bytes have to be padded. The
2664 code mapping symbol is pushed to an aligned address. */
2665
2666 static void
2667 insert_data_mapping_symbol (enum mstate state,
2668 valueT value, fragS *frag, offsetT bytes)
2669 {
2670 /* If there was already a mapping symbol, remove it. */
2671 if (frag->tc_frag_data.last_map != NULL
2672 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2673 {
2674 symbolS *symp = frag->tc_frag_data.last_map;
2675
2676 if (value == 0)
2677 {
2678 know (frag->tc_frag_data.first_map == symp);
2679 frag->tc_frag_data.first_map = NULL;
2680 }
2681 frag->tc_frag_data.last_map = NULL;
2682 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2683 }
2684
2685 make_mapping_symbol (MAP_DATA, value, frag);
2686 make_mapping_symbol (state, value + bytes, frag);
2687 }
2688
2689 static void mapping_state_2 (enum mstate state, int max_chars);
2690
2691 /* Set the mapping state to STATE. Only call this when about to
2692 emit some STATE bytes to the file. */
2693
2694 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2695 void
2696 mapping_state (enum mstate state)
2697 {
2698 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2699
2700 if (mapstate == state)
2701 /* The mapping symbol has already been emitted.
2702 There is nothing else to do. */
2703 return;
2704
2705 if (state == MAP_ARM || state == MAP_THUMB)
2706 /* PR gas/12931
2707 All ARM instructions require 4-byte alignment.
2708 (Almost) all Thumb instructions require 2-byte alignment.
2709
2710 When emitting instructions into any section, mark the section
2711 appropriately.
2712
2713 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2714 but themselves require 2-byte alignment; this applies to some
2715 PC- relative forms. However, these cases will involve implicit
2716 literal pool generation or an explicit .align >=2, both of
2717 which will cause the section to me marked with sufficient
2718 alignment. Thus, we don't handle those cases here. */
2719 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2720
2721 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2722 /* This case will be evaluated later. */
2723 return;
2724
2725 mapping_state_2 (state, 0);
2726 }
2727
2728 /* Same as mapping_state, but MAX_CHARS bytes have already been
2729 allocated. Put the mapping symbol that far back. */
2730
2731 static void
2732 mapping_state_2 (enum mstate state, int max_chars)
2733 {
2734 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2735
2736 if (!SEG_NORMAL (now_seg))
2737 return;
2738
2739 if (mapstate == state)
2740 /* The mapping symbol has already been emitted.
2741 There is nothing else to do. */
2742 return;
2743
2744 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2745 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2746 {
2747 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2748 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2749
2750 if (add_symbol)
2751 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2752 }
2753
2754 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2755 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2756 }
2757 #undef TRANSITION
2758 #else
2759 #define mapping_state(x) ((void)0)
2760 #define mapping_state_2(x, y) ((void)0)
2761 #endif
2762
2763 /* Find the real, Thumb encoded start of a Thumb function. */
2764
2765 #ifdef OBJ_COFF
2766 static symbolS *
2767 find_real_start (symbolS * symbolP)
2768 {
2769 char * real_start;
2770 const char * name = S_GET_NAME (symbolP);
2771 symbolS * new_target;
2772
2773 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2774 #define STUB_NAME ".real_start_of"
2775
2776 if (name == NULL)
2777 abort ();
2778
2779 /* The compiler may generate BL instructions to local labels because
2780 it needs to perform a branch to a far away location. These labels
2781 do not have a corresponding ".real_start_of" label. We check
2782 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2783 the ".real_start_of" convention for nonlocal branches. */
2784 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2785 return symbolP;
2786
2787 real_start = concat (STUB_NAME, name, NULL);
2788 new_target = symbol_find (real_start);
2789 free (real_start);
2790
2791 if (new_target == NULL)
2792 {
2793 as_warn (_("Failed to find real start of function: %s\n"), name);
2794 new_target = symbolP;
2795 }
2796
2797 return new_target;
2798 }
2799 #endif
2800
2801 static void
2802 opcode_select (int width)
2803 {
2804 switch (width)
2805 {
2806 case 16:
2807 if (! thumb_mode)
2808 {
2809 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2810 as_bad (_("selected processor does not support THUMB opcodes"));
2811
2812 thumb_mode = 1;
2813 /* No need to force the alignment, since we will have been
2814 coming from ARM mode, which is word-aligned. */
2815 record_alignment (now_seg, 1);
2816 }
2817 break;
2818
2819 case 32:
2820 if (thumb_mode)
2821 {
2822 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2823 as_bad (_("selected processor does not support ARM opcodes"));
2824
2825 thumb_mode = 0;
2826
2827 if (!need_pass_2)
2828 frag_align (2, 0, 0);
2829
2830 record_alignment (now_seg, 1);
2831 }
2832 break;
2833
2834 default:
2835 as_bad (_("invalid instruction size selected (%d)"), width);
2836 }
2837 }
2838
2839 static void
2840 s_arm (int ignore ATTRIBUTE_UNUSED)
2841 {
2842 opcode_select (32);
2843 demand_empty_rest_of_line ();
2844 }
2845
2846 static void
2847 s_thumb (int ignore ATTRIBUTE_UNUSED)
2848 {
2849 opcode_select (16);
2850 demand_empty_rest_of_line ();
2851 }
2852
2853 static void
2854 s_code (int unused ATTRIBUTE_UNUSED)
2855 {
2856 int temp;
2857
2858 temp = get_absolute_expression ();
2859 switch (temp)
2860 {
2861 case 16:
2862 case 32:
2863 opcode_select (temp);
2864 break;
2865
2866 default:
2867 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2868 }
2869 }
2870
2871 static void
2872 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2873 {
2874 /* If we are not already in thumb mode go into it, EVEN if
2875 the target processor does not support thumb instructions.
2876 This is used by gcc/config/arm/lib1funcs.asm for example
2877 to compile interworking support functions even if the
2878 target processor should not support interworking. */
2879 if (! thumb_mode)
2880 {
2881 thumb_mode = 2;
2882 record_alignment (now_seg, 1);
2883 }
2884
2885 demand_empty_rest_of_line ();
2886 }
2887
2888 static void
2889 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2890 {
2891 s_thumb (0);
2892
2893 /* The following label is the name/address of the start of a Thumb function.
2894 We need to know this for the interworking support. */
2895 label_is_thumb_function_name = TRUE;
2896 }
2897
2898 /* Perform a .set directive, but also mark the alias as
2899 being a thumb function. */
2900
2901 static void
2902 s_thumb_set (int equiv)
2903 {
2904 /* XXX the following is a duplicate of the code for s_set() in read.c
2905 We cannot just call that code as we need to get at the symbol that
2906 is created. */
2907 char * name;
2908 char delim;
2909 char * end_name;
2910 symbolS * symbolP;
2911
2912 /* Especial apologies for the random logic:
2913 This just grew, and could be parsed much more simply!
2914 Dean - in haste. */
2915 delim = get_symbol_name (& name);
2916 end_name = input_line_pointer;
2917 (void) restore_line_pointer (delim);
2918
2919 if (*input_line_pointer != ',')
2920 {
2921 *end_name = 0;
2922 as_bad (_("expected comma after name \"%s\""), name);
2923 *end_name = delim;
2924 ignore_rest_of_line ();
2925 return;
2926 }
2927
2928 input_line_pointer++;
2929 *end_name = 0;
2930
2931 if (name[0] == '.' && name[1] == '\0')
2932 {
2933 /* XXX - this should not happen to .thumb_set. */
2934 abort ();
2935 }
2936
2937 if ((symbolP = symbol_find (name)) == NULL
2938 && (symbolP = md_undefined_symbol (name)) == NULL)
2939 {
2940 #ifndef NO_LISTING
2941 /* When doing symbol listings, play games with dummy fragments living
2942 outside the normal fragment chain to record the file and line info
2943 for this symbol. */
2944 if (listing & LISTING_SYMBOLS)
2945 {
2946 extern struct list_info_struct * listing_tail;
2947 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2948
2949 memset (dummy_frag, 0, sizeof (fragS));
2950 dummy_frag->fr_type = rs_fill;
2951 dummy_frag->line = listing_tail;
2952 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2953 dummy_frag->fr_symbol = symbolP;
2954 }
2955 else
2956 #endif
2957 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2958
2959 #ifdef OBJ_COFF
2960 /* "set" symbols are local unless otherwise specified. */
2961 SF_SET_LOCAL (symbolP);
2962 #endif /* OBJ_COFF */
2963 } /* Make a new symbol. */
2964
2965 symbol_table_insert (symbolP);
2966
2967 * end_name = delim;
2968
2969 if (equiv
2970 && S_IS_DEFINED (symbolP)
2971 && S_GET_SEGMENT (symbolP) != reg_section)
2972 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2973
2974 pseudo_set (symbolP);
2975
2976 demand_empty_rest_of_line ();
2977
2978 /* XXX Now we come to the Thumb specific bit of code. */
2979
2980 THUMB_SET_FUNC (symbolP, 1);
2981 ARM_SET_THUMB (symbolP, 1);
2982 #if defined OBJ_ELF || defined OBJ_COFF
2983 ARM_SET_INTERWORK (symbolP, support_interwork);
2984 #endif
2985 }
2986
2987 /* Directives: Mode selection. */
2988
2989 /* .syntax [unified|divided] - choose the new unified syntax
2990 (same for Arm and Thumb encoding, modulo slight differences in what
2991 can be represented) or the old divergent syntax for each mode. */
2992 static void
2993 s_syntax (int unused ATTRIBUTE_UNUSED)
2994 {
2995 char *name, delim;
2996
2997 delim = get_symbol_name (& name);
2998
2999 if (!strcasecmp (name, "unified"))
3000 unified_syntax = TRUE;
3001 else if (!strcasecmp (name, "divided"))
3002 unified_syntax = FALSE;
3003 else
3004 {
3005 as_bad (_("unrecognized syntax mode \"%s\""), name);
3006 return;
3007 }
3008 (void) restore_line_pointer (delim);
3009 demand_empty_rest_of_line ();
3010 }
3011
3012 /* Directives: sectioning and alignment. */
3013
3014 static void
3015 s_bss (int ignore ATTRIBUTE_UNUSED)
3016 {
3017 /* We don't support putting frags in the BSS segment, we fake it by
3018 marking in_bss, then looking at s_skip for clues. */
3019 subseg_set (bss_section, 0);
3020 demand_empty_rest_of_line ();
3021
3022 #ifdef md_elf_section_change_hook
3023 md_elf_section_change_hook ();
3024 #endif
3025 }
3026
3027 static void
3028 s_even (int ignore ATTRIBUTE_UNUSED)
3029 {
3030 /* Never make frag if expect extra pass. */
3031 if (!need_pass_2)
3032 frag_align (1, 0, 0);
3033
3034 record_alignment (now_seg, 1);
3035
3036 demand_empty_rest_of_line ();
3037 }
3038
3039 /* Directives: CodeComposer Studio. */
3040
3041 /* .ref (for CodeComposer Studio syntax only). */
3042 static void
3043 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3044 {
3045 if (codecomposer_syntax)
3046 ignore_rest_of_line ();
3047 else
3048 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3049 }
3050
3051 /* If name is not NULL, then it is used for marking the beginning of a
3052 function, whereas if it is NULL then it means the function end. */
3053 static void
3054 asmfunc_debug (const char * name)
3055 {
3056 static const char * last_name = NULL;
3057
3058 if (name != NULL)
3059 {
3060 gas_assert (last_name == NULL);
3061 last_name = name;
3062
3063 if (debug_type == DEBUG_STABS)
3064 stabs_generate_asm_func (name, name);
3065 }
3066 else
3067 {
3068 gas_assert (last_name != NULL);
3069
3070 if (debug_type == DEBUG_STABS)
3071 stabs_generate_asm_endfunc (last_name, last_name);
3072
3073 last_name = NULL;
3074 }
3075 }
3076
3077 static void
3078 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3079 {
3080 if (codecomposer_syntax)
3081 {
3082 switch (asmfunc_state)
3083 {
3084 case OUTSIDE_ASMFUNC:
3085 asmfunc_state = WAITING_ASMFUNC_NAME;
3086 break;
3087
3088 case WAITING_ASMFUNC_NAME:
3089 as_bad (_(".asmfunc repeated."));
3090 break;
3091
3092 case WAITING_ENDASMFUNC:
3093 as_bad (_(".asmfunc without function."));
3094 break;
3095 }
3096 demand_empty_rest_of_line ();
3097 }
3098 else
3099 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3100 }
3101
3102 static void
3103 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3104 {
3105 if (codecomposer_syntax)
3106 {
3107 switch (asmfunc_state)
3108 {
3109 case OUTSIDE_ASMFUNC:
3110 as_bad (_(".endasmfunc without a .asmfunc."));
3111 break;
3112
3113 case WAITING_ASMFUNC_NAME:
3114 as_bad (_(".endasmfunc without function."));
3115 break;
3116
3117 case WAITING_ENDASMFUNC:
3118 asmfunc_state = OUTSIDE_ASMFUNC;
3119 asmfunc_debug (NULL);
3120 break;
3121 }
3122 demand_empty_rest_of_line ();
3123 }
3124 else
3125 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3126 }
3127
3128 static void
3129 s_ccs_def (int name)
3130 {
3131 if (codecomposer_syntax)
3132 s_globl (name);
3133 else
3134 as_bad (_(".def pseudo-op only available with -mccs flag."));
3135 }
3136
3137 /* Directives: Literal pools. */
3138
3139 static literal_pool *
3140 find_literal_pool (void)
3141 {
3142 literal_pool * pool;
3143
3144 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3145 {
3146 if (pool->section == now_seg
3147 && pool->sub_section == now_subseg)
3148 break;
3149 }
3150
3151 return pool;
3152 }
3153
3154 static literal_pool *
3155 find_or_make_literal_pool (void)
3156 {
3157 /* Next literal pool ID number. */
3158 static unsigned int latest_pool_num = 1;
3159 literal_pool * pool;
3160
3161 pool = find_literal_pool ();
3162
3163 if (pool == NULL)
3164 {
3165 /* Create a new pool. */
3166 pool = XNEW (literal_pool);
3167 if (! pool)
3168 return NULL;
3169
3170 pool->next_free_entry = 0;
3171 pool->section = now_seg;
3172 pool->sub_section = now_subseg;
3173 pool->next = list_of_pools;
3174 pool->symbol = NULL;
3175 pool->alignment = 2;
3176
3177 /* Add it to the list. */
3178 list_of_pools = pool;
3179 }
3180
3181 /* New pools, and emptied pools, will have a NULL symbol. */
3182 if (pool->symbol == NULL)
3183 {
3184 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3185 (valueT) 0, &zero_address_frag);
3186 pool->id = latest_pool_num ++;
3187 }
3188
3189 /* Done. */
3190 return pool;
3191 }
3192
3193 /* Add the literal in the global 'inst'
3194 structure to the relevant literal pool. */
3195
3196 static int
3197 add_to_lit_pool (unsigned int nbytes)
3198 {
3199 #define PADDING_SLOT 0x1
3200 #define LIT_ENTRY_SIZE_MASK 0xFF
3201 literal_pool * pool;
3202 unsigned int entry, pool_size = 0;
3203 bfd_boolean padding_slot_p = FALSE;
3204 unsigned imm1 = 0;
3205 unsigned imm2 = 0;
3206
3207 if (nbytes == 8)
3208 {
3209 imm1 = inst.operands[1].imm;
3210 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3211 : inst.reloc.exp.X_unsigned ? 0
3212 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3213 if (target_big_endian)
3214 {
3215 imm1 = imm2;
3216 imm2 = inst.operands[1].imm;
3217 }
3218 }
3219
3220 pool = find_or_make_literal_pool ();
3221
3222 /* Check if this literal value is already in the pool. */
3223 for (entry = 0; entry < pool->next_free_entry; entry ++)
3224 {
3225 if (nbytes == 4)
3226 {
3227 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3228 && (inst.reloc.exp.X_op == O_constant)
3229 && (pool->literals[entry].X_add_number
3230 == inst.reloc.exp.X_add_number)
3231 && (pool->literals[entry].X_md == nbytes)
3232 && (pool->literals[entry].X_unsigned
3233 == inst.reloc.exp.X_unsigned))
3234 break;
3235
3236 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3237 && (inst.reloc.exp.X_op == O_symbol)
3238 && (pool->literals[entry].X_add_number
3239 == inst.reloc.exp.X_add_number)
3240 && (pool->literals[entry].X_add_symbol
3241 == inst.reloc.exp.X_add_symbol)
3242 && (pool->literals[entry].X_op_symbol
3243 == inst.reloc.exp.X_op_symbol)
3244 && (pool->literals[entry].X_md == nbytes))
3245 break;
3246 }
3247 else if ((nbytes == 8)
3248 && !(pool_size & 0x7)
3249 && ((entry + 1) != pool->next_free_entry)
3250 && (pool->literals[entry].X_op == O_constant)
3251 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3252 && (pool->literals[entry].X_unsigned
3253 == inst.reloc.exp.X_unsigned)
3254 && (pool->literals[entry + 1].X_op == O_constant)
3255 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3256 && (pool->literals[entry + 1].X_unsigned
3257 == inst.reloc.exp.X_unsigned))
3258 break;
3259
3260 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3261 if (padding_slot_p && (nbytes == 4))
3262 break;
3263
3264 pool_size += 4;
3265 }
3266
3267 /* Do we need to create a new entry? */
3268 if (entry == pool->next_free_entry)
3269 {
3270 if (entry >= MAX_LITERAL_POOL_SIZE)
3271 {
3272 inst.error = _("literal pool overflow");
3273 return FAIL;
3274 }
3275
3276 if (nbytes == 8)
3277 {
3278 /* For 8-byte entries, we align to an 8-byte boundary,
3279 and split it into two 4-byte entries, because on 32-bit
3280 host, 8-byte constants are treated as big num, thus
3281 saved in "generic_bignum" which will be overwritten
3282 by later assignments.
3283
3284 We also need to make sure there is enough space for
3285 the split.
3286
3287 We also check to make sure the literal operand is a
3288 constant number. */
3289 if (!(inst.reloc.exp.X_op == O_constant
3290 || inst.reloc.exp.X_op == O_big))
3291 {
3292 inst.error = _("invalid type for literal pool");
3293 return FAIL;
3294 }
3295 else if (pool_size & 0x7)
3296 {
3297 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3298 {
3299 inst.error = _("literal pool overflow");
3300 return FAIL;
3301 }
3302
3303 pool->literals[entry] = inst.reloc.exp;
3304 pool->literals[entry].X_op = O_constant;
3305 pool->literals[entry].X_add_number = 0;
3306 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3307 pool->next_free_entry += 1;
3308 pool_size += 4;
3309 }
3310 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3311 {
3312 inst.error = _("literal pool overflow");
3313 return FAIL;
3314 }
3315
3316 pool->literals[entry] = inst.reloc.exp;
3317 pool->literals[entry].X_op = O_constant;
3318 pool->literals[entry].X_add_number = imm1;
3319 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3320 pool->literals[entry++].X_md = 4;
3321 pool->literals[entry] = inst.reloc.exp;
3322 pool->literals[entry].X_op = O_constant;
3323 pool->literals[entry].X_add_number = imm2;
3324 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3325 pool->literals[entry].X_md = 4;
3326 pool->alignment = 3;
3327 pool->next_free_entry += 1;
3328 }
3329 else
3330 {
3331 pool->literals[entry] = inst.reloc.exp;
3332 pool->literals[entry].X_md = 4;
3333 }
3334
3335 #ifdef OBJ_ELF
3336 /* PR ld/12974: Record the location of the first source line to reference
3337 this entry in the literal pool. If it turns out during linking that the
3338 symbol does not exist we will be able to give an accurate line number for
3339 the (first use of the) missing reference. */
3340 if (debug_type == DEBUG_DWARF2)
3341 dwarf2_where (pool->locs + entry);
3342 #endif
3343 pool->next_free_entry += 1;
3344 }
3345 else if (padding_slot_p)
3346 {
3347 pool->literals[entry] = inst.reloc.exp;
3348 pool->literals[entry].X_md = nbytes;
3349 }
3350
3351 inst.reloc.exp.X_op = O_symbol;
3352 inst.reloc.exp.X_add_number = pool_size;
3353 inst.reloc.exp.X_add_symbol = pool->symbol;
3354
3355 return SUCCESS;
3356 }
3357
3358 bfd_boolean
3359 tc_start_label_without_colon (void)
3360 {
3361 bfd_boolean ret = TRUE;
3362
3363 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3364 {
3365 const char *label = input_line_pointer;
3366
3367 while (!is_end_of_line[(int) label[-1]])
3368 --label;
3369
3370 if (*label == '.')
3371 {
3372 as_bad (_("Invalid label '%s'"), label);
3373 ret = FALSE;
3374 }
3375
3376 asmfunc_debug (label);
3377
3378 asmfunc_state = WAITING_ENDASMFUNC;
3379 }
3380
3381 return ret;
3382 }
3383
3384 /* Can't use symbol_new here, so have to create a symbol and then at
3385 a later date assign it a value. That's what these functions do. */
3386
3387 static void
3388 symbol_locate (symbolS * symbolP,
3389 const char * name, /* It is copied, the caller can modify. */
3390 segT segment, /* Segment identifier (SEG_<something>). */
3391 valueT valu, /* Symbol value. */
3392 fragS * frag) /* Associated fragment. */
3393 {
3394 size_t name_length;
3395 char * preserved_copy_of_name;
3396
3397 name_length = strlen (name) + 1; /* +1 for \0. */
3398 obstack_grow (&notes, name, name_length);
3399 preserved_copy_of_name = (char *) obstack_finish (&notes);
3400
3401 #ifdef tc_canonicalize_symbol_name
3402 preserved_copy_of_name =
3403 tc_canonicalize_symbol_name (preserved_copy_of_name);
3404 #endif
3405
3406 S_SET_NAME (symbolP, preserved_copy_of_name);
3407
3408 S_SET_SEGMENT (symbolP, segment);
3409 S_SET_VALUE (symbolP, valu);
3410 symbol_clear_list_pointers (symbolP);
3411
3412 symbol_set_frag (symbolP, frag);
3413
3414 /* Link to end of symbol chain. */
3415 {
3416 extern int symbol_table_frozen;
3417
3418 if (symbol_table_frozen)
3419 abort ();
3420 }
3421
3422 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3423
3424 obj_symbol_new_hook (symbolP);
3425
3426 #ifdef tc_symbol_new_hook
3427 tc_symbol_new_hook (symbolP);
3428 #endif
3429
3430 #ifdef DEBUG_SYMS
3431 verify_symbol_chain (symbol_rootP, symbol_lastP);
3432 #endif /* DEBUG_SYMS */
3433 }
3434
3435 static void
3436 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3437 {
3438 unsigned int entry;
3439 literal_pool * pool;
3440 char sym_name[20];
3441
3442 pool = find_literal_pool ();
3443 if (pool == NULL
3444 || pool->symbol == NULL
3445 || pool->next_free_entry == 0)
3446 return;
3447
3448 /* Align pool as you have word accesses.
3449 Only make a frag if we have to. */
3450 if (!need_pass_2)
3451 frag_align (pool->alignment, 0, 0);
3452
3453 record_alignment (now_seg, 2);
3454
3455 #ifdef OBJ_ELF
3456 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3457 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3458 #endif
3459 sprintf (sym_name, "$$lit_\002%x", pool->id);
3460
3461 symbol_locate (pool->symbol, sym_name, now_seg,
3462 (valueT) frag_now_fix (), frag_now);
3463 symbol_table_insert (pool->symbol);
3464
3465 ARM_SET_THUMB (pool->symbol, thumb_mode);
3466
3467 #if defined OBJ_COFF || defined OBJ_ELF
3468 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3469 #endif
3470
3471 for (entry = 0; entry < pool->next_free_entry; entry ++)
3472 {
3473 #ifdef OBJ_ELF
3474 if (debug_type == DEBUG_DWARF2)
3475 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3476 #endif
3477 /* First output the expression in the instruction to the pool. */
3478 emit_expr (&(pool->literals[entry]),
3479 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3480 }
3481
3482 /* Mark the pool as empty. */
3483 pool->next_free_entry = 0;
3484 pool->symbol = NULL;
3485 }
3486
3487 #ifdef OBJ_ELF
3488 /* Forward declarations for functions below, in the MD interface
3489 section. */
3490 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3491 static valueT create_unwind_entry (int);
3492 static void start_unwind_section (const segT, int);
3493 static void add_unwind_opcode (valueT, int);
3494 static void flush_pending_unwind (void);
3495
3496 /* Directives: Data. */
3497
3498 static void
3499 s_arm_elf_cons (int nbytes)
3500 {
3501 expressionS exp;
3502
3503 #ifdef md_flush_pending_output
3504 md_flush_pending_output ();
3505 #endif
3506
3507 if (is_it_end_of_statement ())
3508 {
3509 demand_empty_rest_of_line ();
3510 return;
3511 }
3512
3513 #ifdef md_cons_align
3514 md_cons_align (nbytes);
3515 #endif
3516
3517 mapping_state (MAP_DATA);
3518 do
3519 {
3520 int reloc;
3521 char *base = input_line_pointer;
3522
3523 expression (& exp);
3524
3525 if (exp.X_op != O_symbol)
3526 emit_expr (&exp, (unsigned int) nbytes);
3527 else
3528 {
3529 char *before_reloc = input_line_pointer;
3530 reloc = parse_reloc (&input_line_pointer);
3531 if (reloc == -1)
3532 {
3533 as_bad (_("unrecognized relocation suffix"));
3534 ignore_rest_of_line ();
3535 return;
3536 }
3537 else if (reloc == BFD_RELOC_UNUSED)
3538 emit_expr (&exp, (unsigned int) nbytes);
3539 else
3540 {
3541 reloc_howto_type *howto = (reloc_howto_type *)
3542 bfd_reloc_type_lookup (stdoutput,
3543 (bfd_reloc_code_real_type) reloc);
3544 int size = bfd_get_reloc_size (howto);
3545
3546 if (reloc == BFD_RELOC_ARM_PLT32)
3547 {
3548 as_bad (_("(plt) is only valid on branch targets"));
3549 reloc = BFD_RELOC_UNUSED;
3550 size = 0;
3551 }
3552
3553 if (size > nbytes)
3554 as_bad (_("%s relocations do not fit in %d bytes"),
3555 howto->name, nbytes);
3556 else
3557 {
3558 /* We've parsed an expression stopping at O_symbol.
3559 But there may be more expression left now that we
3560 have parsed the relocation marker. Parse it again.
3561 XXX Surely there is a cleaner way to do this. */
3562 char *p = input_line_pointer;
3563 int offset;
3564 char *save_buf = XNEWVEC (char, input_line_pointer - base);
3565
3566 memcpy (save_buf, base, input_line_pointer - base);
3567 memmove (base + (input_line_pointer - before_reloc),
3568 base, before_reloc - base);
3569
3570 input_line_pointer = base + (input_line_pointer-before_reloc);
3571 expression (&exp);
3572 memcpy (base, save_buf, p - base);
3573
3574 offset = nbytes - size;
3575 p = frag_more (nbytes);
3576 memset (p, 0, nbytes);
3577 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3578 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3579 free (save_buf);
3580 }
3581 }
3582 }
3583 }
3584 while (*input_line_pointer++ == ',');
3585
3586 /* Put terminator back into stream. */
3587 input_line_pointer --;
3588 demand_empty_rest_of_line ();
3589 }
3590
3591 /* Emit an expression containing a 32-bit thumb instruction.
3592 Implementation based on put_thumb32_insn. */
3593
3594 static void
3595 emit_thumb32_expr (expressionS * exp)
3596 {
3597 expressionS exp_high = *exp;
3598
3599 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3600 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3601 exp->X_add_number &= 0xffff;
3602 emit_expr (exp, (unsigned int) THUMB_SIZE);
3603 }
3604
3605 /* Guess the instruction size based on the opcode. */
3606
3607 static int
3608 thumb_insn_size (int opcode)
3609 {
3610 if ((unsigned int) opcode < 0xe800u)
3611 return 2;
3612 else if ((unsigned int) opcode >= 0xe8000000u)
3613 return 4;
3614 else
3615 return 0;
3616 }
3617
3618 static bfd_boolean
3619 emit_insn (expressionS *exp, int nbytes)
3620 {
3621 int size = 0;
3622
3623 if (exp->X_op == O_constant)
3624 {
3625 size = nbytes;
3626
3627 if (size == 0)
3628 size = thumb_insn_size (exp->X_add_number);
3629
3630 if (size != 0)
3631 {
3632 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3633 {
3634 as_bad (_(".inst.n operand too big. "\
3635 "Use .inst.w instead"));
3636 size = 0;
3637 }
3638 else
3639 {
3640 if (now_it.state == AUTOMATIC_IT_BLOCK)
3641 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3642 else
3643 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3644
3645 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3646 emit_thumb32_expr (exp);
3647 else
3648 emit_expr (exp, (unsigned int) size);
3649
3650 it_fsm_post_encode ();
3651 }
3652 }
3653 else
3654 as_bad (_("cannot determine Thumb instruction size. " \
3655 "Use .inst.n/.inst.w instead"));
3656 }
3657 else
3658 as_bad (_("constant expression required"));
3659
3660 return (size != 0);
3661 }
3662
3663 /* Like s_arm_elf_cons but do not use md_cons_align and
3664 set the mapping state to MAP_ARM/MAP_THUMB. */
3665
3666 static void
3667 s_arm_elf_inst (int nbytes)
3668 {
3669 if (is_it_end_of_statement ())
3670 {
3671 demand_empty_rest_of_line ();
3672 return;
3673 }
3674
3675 /* Calling mapping_state () here will not change ARM/THUMB,
3676 but will ensure not to be in DATA state. */
3677
3678 if (thumb_mode)
3679 mapping_state (MAP_THUMB);
3680 else
3681 {
3682 if (nbytes != 0)
3683 {
3684 as_bad (_("width suffixes are invalid in ARM mode"));
3685 ignore_rest_of_line ();
3686 return;
3687 }
3688
3689 nbytes = 4;
3690
3691 mapping_state (MAP_ARM);
3692 }
3693
3694 do
3695 {
3696 expressionS exp;
3697
3698 expression (& exp);
3699
3700 if (! emit_insn (& exp, nbytes))
3701 {
3702 ignore_rest_of_line ();
3703 return;
3704 }
3705 }
3706 while (*input_line_pointer++ == ',');
3707
3708 /* Put terminator back into stream. */
3709 input_line_pointer --;
3710 demand_empty_rest_of_line ();
3711 }
3712
3713 /* Parse a .rel31 directive. */
3714
3715 static void
3716 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3717 {
3718 expressionS exp;
3719 char *p;
3720 valueT highbit;
3721
3722 highbit = 0;
3723 if (*input_line_pointer == '1')
3724 highbit = 0x80000000;
3725 else if (*input_line_pointer != '0')
3726 as_bad (_("expected 0 or 1"));
3727
3728 input_line_pointer++;
3729 if (*input_line_pointer != ',')
3730 as_bad (_("missing comma"));
3731 input_line_pointer++;
3732
3733 #ifdef md_flush_pending_output
3734 md_flush_pending_output ();
3735 #endif
3736
3737 #ifdef md_cons_align
3738 md_cons_align (4);
3739 #endif
3740
3741 mapping_state (MAP_DATA);
3742
3743 expression (&exp);
3744
3745 p = frag_more (4);
3746 md_number_to_chars (p, highbit, 4);
3747 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3748 BFD_RELOC_ARM_PREL31);
3749
3750 demand_empty_rest_of_line ();
3751 }
3752
3753 /* Directives: AEABI stack-unwind tables. */
3754
3755 /* Parse an unwind_fnstart directive. Simply records the current location. */
3756
3757 static void
3758 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3759 {
3760 demand_empty_rest_of_line ();
3761 if (unwind.proc_start)
3762 {
3763 as_bad (_("duplicate .fnstart directive"));
3764 return;
3765 }
3766
3767 /* Mark the start of the function. */
3768 unwind.proc_start = expr_build_dot ();
3769
3770 /* Reset the rest of the unwind info. */
3771 unwind.opcode_count = 0;
3772 unwind.table_entry = NULL;
3773 unwind.personality_routine = NULL;
3774 unwind.personality_index = -1;
3775 unwind.frame_size = 0;
3776 unwind.fp_offset = 0;
3777 unwind.fp_reg = REG_SP;
3778 unwind.fp_used = 0;
3779 unwind.sp_restored = 0;
3780 }
3781
3782
3783 /* Parse a handlerdata directive. Creates the exception handling table entry
3784 for the function. */
3785
3786 static void
3787 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3788 {
3789 demand_empty_rest_of_line ();
3790 if (!unwind.proc_start)
3791 as_bad (MISSING_FNSTART);
3792
3793 if (unwind.table_entry)
3794 as_bad (_("duplicate .handlerdata directive"));
3795
3796 create_unwind_entry (1);
3797 }
3798
3799 /* Parse an unwind_fnend directive. Generates the index table entry. */
3800
3801 static void
3802 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3803 {
3804 long where;
3805 char *ptr;
3806 valueT val;
3807 unsigned int marked_pr_dependency;
3808
3809 demand_empty_rest_of_line ();
3810
3811 if (!unwind.proc_start)
3812 {
3813 as_bad (_(".fnend directive without .fnstart"));
3814 return;
3815 }
3816
3817 /* Add eh table entry. */
3818 if (unwind.table_entry == NULL)
3819 val = create_unwind_entry (0);
3820 else
3821 val = 0;
3822
3823 /* Add index table entry. This is two words. */
3824 start_unwind_section (unwind.saved_seg, 1);
3825 frag_align (2, 0, 0);
3826 record_alignment (now_seg, 2);
3827
3828 ptr = frag_more (8);
3829 memset (ptr, 0, 8);
3830 where = frag_now_fix () - 8;
3831
3832 /* Self relative offset of the function start. */
3833 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3834 BFD_RELOC_ARM_PREL31);
3835
3836 /* Indicate dependency on EHABI-defined personality routines to the
3837 linker, if it hasn't been done already. */
3838 marked_pr_dependency
3839 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3840 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3841 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3842 {
3843 static const char *const name[] =
3844 {
3845 "__aeabi_unwind_cpp_pr0",
3846 "__aeabi_unwind_cpp_pr1",
3847 "__aeabi_unwind_cpp_pr2"
3848 };
3849 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3850 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3851 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3852 |= 1 << unwind.personality_index;
3853 }
3854
3855 if (val)
3856 /* Inline exception table entry. */
3857 md_number_to_chars (ptr + 4, val, 4);
3858 else
3859 /* Self relative offset of the table entry. */
3860 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3861 BFD_RELOC_ARM_PREL31);
3862
3863 /* Restore the original section. */
3864 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3865
3866 unwind.proc_start = NULL;
3867 }
3868
3869
3870 /* Parse an unwind_cantunwind directive. */
3871
3872 static void
3873 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3874 {
3875 demand_empty_rest_of_line ();
3876 if (!unwind.proc_start)
3877 as_bad (MISSING_FNSTART);
3878
3879 if (unwind.personality_routine || unwind.personality_index != -1)
3880 as_bad (_("personality routine specified for cantunwind frame"));
3881
3882 unwind.personality_index = -2;
3883 }
3884
3885
3886 /* Parse a personalityindex directive. */
3887
3888 static void
3889 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3890 {
3891 expressionS exp;
3892
3893 if (!unwind.proc_start)
3894 as_bad (MISSING_FNSTART);
3895
3896 if (unwind.personality_routine || unwind.personality_index != -1)
3897 as_bad (_("duplicate .personalityindex directive"));
3898
3899 expression (&exp);
3900
3901 if (exp.X_op != O_constant
3902 || exp.X_add_number < 0 || exp.X_add_number > 15)
3903 {
3904 as_bad (_("bad personality routine number"));
3905 ignore_rest_of_line ();
3906 return;
3907 }
3908
3909 unwind.personality_index = exp.X_add_number;
3910
3911 demand_empty_rest_of_line ();
3912 }
3913
3914
3915 /* Parse a personality directive. */
3916
3917 static void
3918 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3919 {
3920 char *name, *p, c;
3921
3922 if (!unwind.proc_start)
3923 as_bad (MISSING_FNSTART);
3924
3925 if (unwind.personality_routine || unwind.personality_index != -1)
3926 as_bad (_("duplicate .personality directive"));
3927
3928 c = get_symbol_name (& name);
3929 p = input_line_pointer;
3930 if (c == '"')
3931 ++ input_line_pointer;
3932 unwind.personality_routine = symbol_find_or_make (name);
3933 *p = c;
3934 demand_empty_rest_of_line ();
3935 }
3936
3937
3938 /* Parse a directive saving core registers. */
3939
3940 static void
3941 s_arm_unwind_save_core (void)
3942 {
3943 valueT op;
3944 long range;
3945 int n;
3946
3947 range = parse_reg_list (&input_line_pointer);
3948 if (range == FAIL)
3949 {
3950 as_bad (_("expected register list"));
3951 ignore_rest_of_line ();
3952 return;
3953 }
3954
3955 demand_empty_rest_of_line ();
3956
3957 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3958 into .unwind_save {..., sp...}. We aren't bothered about the value of
3959 ip because it is clobbered by calls. */
3960 if (unwind.sp_restored && unwind.fp_reg == 12
3961 && (range & 0x3000) == 0x1000)
3962 {
3963 unwind.opcode_count--;
3964 unwind.sp_restored = 0;
3965 range = (range | 0x2000) & ~0x1000;
3966 unwind.pending_offset = 0;
3967 }
3968
3969 /* Pop r4-r15. */
3970 if (range & 0xfff0)
3971 {
3972 /* See if we can use the short opcodes. These pop a block of up to 8
3973 registers starting with r4, plus maybe r14. */
3974 for (n = 0; n < 8; n++)
3975 {
3976 /* Break at the first non-saved register. */
3977 if ((range & (1 << (n + 4))) == 0)
3978 break;
3979 }
3980 /* See if there are any other bits set. */
3981 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3982 {
3983 /* Use the long form. */
3984 op = 0x8000 | ((range >> 4) & 0xfff);
3985 add_unwind_opcode (op, 2);
3986 }
3987 else
3988 {
3989 /* Use the short form. */
3990 if (range & 0x4000)
3991 op = 0xa8; /* Pop r14. */
3992 else
3993 op = 0xa0; /* Do not pop r14. */
3994 op |= (n - 1);
3995 add_unwind_opcode (op, 1);
3996 }
3997 }
3998
3999 /* Pop r0-r3. */
4000 if (range & 0xf)
4001 {
4002 op = 0xb100 | (range & 0xf);
4003 add_unwind_opcode (op, 2);
4004 }
4005
4006 /* Record the number of bytes pushed. */
4007 for (n = 0; n < 16; n++)
4008 {
4009 if (range & (1 << n))
4010 unwind.frame_size += 4;
4011 }
4012 }
4013
4014
4015 /* Parse a directive saving FPA registers. */
4016
4017 static void
4018 s_arm_unwind_save_fpa (int reg)
4019 {
4020 expressionS exp;
4021 int num_regs;
4022 valueT op;
4023
4024 /* Get Number of registers to transfer. */
4025 if (skip_past_comma (&input_line_pointer) != FAIL)
4026 expression (&exp);
4027 else
4028 exp.X_op = O_illegal;
4029
4030 if (exp.X_op != O_constant)
4031 {
4032 as_bad (_("expected , <constant>"));
4033 ignore_rest_of_line ();
4034 return;
4035 }
4036
4037 num_regs = exp.X_add_number;
4038
4039 if (num_regs < 1 || num_regs > 4)
4040 {
4041 as_bad (_("number of registers must be in the range [1:4]"));
4042 ignore_rest_of_line ();
4043 return;
4044 }
4045
4046 demand_empty_rest_of_line ();
4047
4048 if (reg == 4)
4049 {
4050 /* Short form. */
4051 op = 0xb4 | (num_regs - 1);
4052 add_unwind_opcode (op, 1);
4053 }
4054 else
4055 {
4056 /* Long form. */
4057 op = 0xc800 | (reg << 4) | (num_regs - 1);
4058 add_unwind_opcode (op, 2);
4059 }
4060 unwind.frame_size += num_regs * 12;
4061 }
4062
4063
4064 /* Parse a directive saving VFP registers for ARMv6 and above. */
4065
4066 static void
4067 s_arm_unwind_save_vfp_armv6 (void)
4068 {
4069 int count;
4070 unsigned int start;
4071 valueT op;
4072 int num_vfpv3_regs = 0;
4073 int num_regs_below_16;
4074
4075 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4076 if (count == FAIL)
4077 {
4078 as_bad (_("expected register list"));
4079 ignore_rest_of_line ();
4080 return;
4081 }
4082
4083 demand_empty_rest_of_line ();
4084
4085 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4086 than FSTMX/FLDMX-style ones). */
4087
4088 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4089 if (start >= 16)
4090 num_vfpv3_regs = count;
4091 else if (start + count > 16)
4092 num_vfpv3_regs = start + count - 16;
4093
4094 if (num_vfpv3_regs > 0)
4095 {
4096 int start_offset = start > 16 ? start - 16 : 0;
4097 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4098 add_unwind_opcode (op, 2);
4099 }
4100
4101 /* Generate opcode for registers numbered in the range 0 .. 15. */
4102 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4103 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4104 if (num_regs_below_16 > 0)
4105 {
4106 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4107 add_unwind_opcode (op, 2);
4108 }
4109
4110 unwind.frame_size += count * 8;
4111 }
4112
4113
4114 /* Parse a directive saving VFP registers for pre-ARMv6. */
4115
4116 static void
4117 s_arm_unwind_save_vfp (void)
4118 {
4119 int count;
4120 unsigned int reg;
4121 valueT op;
4122
4123 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
4124 if (count == FAIL)
4125 {
4126 as_bad (_("expected register list"));
4127 ignore_rest_of_line ();
4128 return;
4129 }
4130
4131 demand_empty_rest_of_line ();
4132
4133 if (reg == 8)
4134 {
4135 /* Short form. */
4136 op = 0xb8 | (count - 1);
4137 add_unwind_opcode (op, 1);
4138 }
4139 else
4140 {
4141 /* Long form. */
4142 op = 0xb300 | (reg << 4) | (count - 1);
4143 add_unwind_opcode (op, 2);
4144 }
4145 unwind.frame_size += count * 8 + 4;
4146 }
4147
4148
4149 /* Parse a directive saving iWMMXt data registers. */
4150
4151 static void
4152 s_arm_unwind_save_mmxwr (void)
4153 {
4154 int reg;
4155 int hi_reg;
4156 int i;
4157 unsigned mask = 0;
4158 valueT op;
4159
4160 if (*input_line_pointer == '{')
4161 input_line_pointer++;
4162
4163 do
4164 {
4165 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4166
4167 if (reg == FAIL)
4168 {
4169 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4170 goto error;
4171 }
4172
4173 if (mask >> reg)
4174 as_tsktsk (_("register list not in ascending order"));
4175 mask |= 1 << reg;
4176
4177 if (*input_line_pointer == '-')
4178 {
4179 input_line_pointer++;
4180 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4181 if (hi_reg == FAIL)
4182 {
4183 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4184 goto error;
4185 }
4186 else if (reg >= hi_reg)
4187 {
4188 as_bad (_("bad register range"));
4189 goto error;
4190 }
4191 for (; reg < hi_reg; reg++)
4192 mask |= 1 << reg;
4193 }
4194 }
4195 while (skip_past_comma (&input_line_pointer) != FAIL);
4196
4197 skip_past_char (&input_line_pointer, '}');
4198
4199 demand_empty_rest_of_line ();
4200
4201 /* Generate any deferred opcodes because we're going to be looking at
4202 the list. */
4203 flush_pending_unwind ();
4204
4205 for (i = 0; i < 16; i++)
4206 {
4207 if (mask & (1 << i))
4208 unwind.frame_size += 8;
4209 }
4210
4211 /* Attempt to combine with a previous opcode. We do this because gcc
4212 likes to output separate unwind directives for a single block of
4213 registers. */
4214 if (unwind.opcode_count > 0)
4215 {
4216 i = unwind.opcodes[unwind.opcode_count - 1];
4217 if ((i & 0xf8) == 0xc0)
4218 {
4219 i &= 7;
4220 /* Only merge if the blocks are contiguous. */
4221 if (i < 6)
4222 {
4223 if ((mask & 0xfe00) == (1 << 9))
4224 {
4225 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4226 unwind.opcode_count--;
4227 }
4228 }
4229 else if (i == 6 && unwind.opcode_count >= 2)
4230 {
4231 i = unwind.opcodes[unwind.opcode_count - 2];
4232 reg = i >> 4;
4233 i &= 0xf;
4234
4235 op = 0xffff << (reg - 1);
4236 if (reg > 0
4237 && ((mask & op) == (1u << (reg - 1))))
4238 {
4239 op = (1 << (reg + i + 1)) - 1;
4240 op &= ~((1 << reg) - 1);
4241 mask |= op;
4242 unwind.opcode_count -= 2;
4243 }
4244 }
4245 }
4246 }
4247
4248 hi_reg = 15;
4249 /* We want to generate opcodes in the order the registers have been
4250 saved, ie. descending order. */
4251 for (reg = 15; reg >= -1; reg--)
4252 {
4253 /* Save registers in blocks. */
4254 if (reg < 0
4255 || !(mask & (1 << reg)))
4256 {
4257 /* We found an unsaved reg. Generate opcodes to save the
4258 preceding block. */
4259 if (reg != hi_reg)
4260 {
4261 if (reg == 9)
4262 {
4263 /* Short form. */
4264 op = 0xc0 | (hi_reg - 10);
4265 add_unwind_opcode (op, 1);
4266 }
4267 else
4268 {
4269 /* Long form. */
4270 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4271 add_unwind_opcode (op, 2);
4272 }
4273 }
4274 hi_reg = reg - 1;
4275 }
4276 }
4277
4278 return;
4279 error:
4280 ignore_rest_of_line ();
4281 }
4282
4283 static void
4284 s_arm_unwind_save_mmxwcg (void)
4285 {
4286 int reg;
4287 int hi_reg;
4288 unsigned mask = 0;
4289 valueT op;
4290
4291 if (*input_line_pointer == '{')
4292 input_line_pointer++;
4293
4294 skip_whitespace (input_line_pointer);
4295
4296 do
4297 {
4298 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4299
4300 if (reg == FAIL)
4301 {
4302 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4303 goto error;
4304 }
4305
4306 reg -= 8;
4307 if (mask >> reg)
4308 as_tsktsk (_("register list not in ascending order"));
4309 mask |= 1 << reg;
4310
4311 if (*input_line_pointer == '-')
4312 {
4313 input_line_pointer++;
4314 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4315 if (hi_reg == FAIL)
4316 {
4317 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4318 goto error;
4319 }
4320 else if (reg >= hi_reg)
4321 {
4322 as_bad (_("bad register range"));
4323 goto error;
4324 }
4325 for (; reg < hi_reg; reg++)
4326 mask |= 1 << reg;
4327 }
4328 }
4329 while (skip_past_comma (&input_line_pointer) != FAIL);
4330
4331 skip_past_char (&input_line_pointer, '}');
4332
4333 demand_empty_rest_of_line ();
4334
4335 /* Generate any deferred opcodes because we're going to be looking at
4336 the list. */
4337 flush_pending_unwind ();
4338
4339 for (reg = 0; reg < 16; reg++)
4340 {
4341 if (mask & (1 << reg))
4342 unwind.frame_size += 4;
4343 }
4344 op = 0xc700 | mask;
4345 add_unwind_opcode (op, 2);
4346 return;
4347 error:
4348 ignore_rest_of_line ();
4349 }
4350
4351
4352 /* Parse an unwind_save directive.
4353 If the argument is non-zero, this is a .vsave directive. */
4354
4355 static void
4356 s_arm_unwind_save (int arch_v6)
4357 {
4358 char *peek;
4359 struct reg_entry *reg;
4360 bfd_boolean had_brace = FALSE;
4361
4362 if (!unwind.proc_start)
4363 as_bad (MISSING_FNSTART);
4364
4365 /* Figure out what sort of save we have. */
4366 peek = input_line_pointer;
4367
4368 if (*peek == '{')
4369 {
4370 had_brace = TRUE;
4371 peek++;
4372 }
4373
4374 reg = arm_reg_parse_multi (&peek);
4375
4376 if (!reg)
4377 {
4378 as_bad (_("register expected"));
4379 ignore_rest_of_line ();
4380 return;
4381 }
4382
4383 switch (reg->type)
4384 {
4385 case REG_TYPE_FN:
4386 if (had_brace)
4387 {
4388 as_bad (_("FPA .unwind_save does not take a register list"));
4389 ignore_rest_of_line ();
4390 return;
4391 }
4392 input_line_pointer = peek;
4393 s_arm_unwind_save_fpa (reg->number);
4394 return;
4395
4396 case REG_TYPE_RN:
4397 s_arm_unwind_save_core ();
4398 return;
4399
4400 case REG_TYPE_VFD:
4401 if (arch_v6)
4402 s_arm_unwind_save_vfp_armv6 ();
4403 else
4404 s_arm_unwind_save_vfp ();
4405 return;
4406
4407 case REG_TYPE_MMXWR:
4408 s_arm_unwind_save_mmxwr ();
4409 return;
4410
4411 case REG_TYPE_MMXWCG:
4412 s_arm_unwind_save_mmxwcg ();
4413 return;
4414
4415 default:
4416 as_bad (_(".unwind_save does not support this kind of register"));
4417 ignore_rest_of_line ();
4418 }
4419 }
4420
4421
4422 /* Parse an unwind_movsp directive. */
4423
4424 static void
4425 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4426 {
4427 int reg;
4428 valueT op;
4429 int offset;
4430
4431 if (!unwind.proc_start)
4432 as_bad (MISSING_FNSTART);
4433
4434 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4435 if (reg == FAIL)
4436 {
4437 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4438 ignore_rest_of_line ();
4439 return;
4440 }
4441
4442 /* Optional constant. */
4443 if (skip_past_comma (&input_line_pointer) != FAIL)
4444 {
4445 if (immediate_for_directive (&offset) == FAIL)
4446 return;
4447 }
4448 else
4449 offset = 0;
4450
4451 demand_empty_rest_of_line ();
4452
4453 if (reg == REG_SP || reg == REG_PC)
4454 {
4455 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4456 return;
4457 }
4458
4459 if (unwind.fp_reg != REG_SP)
4460 as_bad (_("unexpected .unwind_movsp directive"));
4461
4462 /* Generate opcode to restore the value. */
4463 op = 0x90 | reg;
4464 add_unwind_opcode (op, 1);
4465
4466 /* Record the information for later. */
4467 unwind.fp_reg = reg;
4468 unwind.fp_offset = unwind.frame_size - offset;
4469 unwind.sp_restored = 1;
4470 }
4471
4472 /* Parse an unwind_pad directive. */
4473
4474 static void
4475 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4476 {
4477 int offset;
4478
4479 if (!unwind.proc_start)
4480 as_bad (MISSING_FNSTART);
4481
4482 if (immediate_for_directive (&offset) == FAIL)
4483 return;
4484
4485 if (offset & 3)
4486 {
4487 as_bad (_("stack increment must be multiple of 4"));
4488 ignore_rest_of_line ();
4489 return;
4490 }
4491
4492 /* Don't generate any opcodes, just record the details for later. */
4493 unwind.frame_size += offset;
4494 unwind.pending_offset += offset;
4495
4496 demand_empty_rest_of_line ();
4497 }
4498
4499 /* Parse an unwind_setfp directive. */
4500
4501 static void
4502 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4503 {
4504 int sp_reg;
4505 int fp_reg;
4506 int offset;
4507
4508 if (!unwind.proc_start)
4509 as_bad (MISSING_FNSTART);
4510
4511 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4512 if (skip_past_comma (&input_line_pointer) == FAIL)
4513 sp_reg = FAIL;
4514 else
4515 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4516
4517 if (fp_reg == FAIL || sp_reg == FAIL)
4518 {
4519 as_bad (_("expected <reg>, <reg>"));
4520 ignore_rest_of_line ();
4521 return;
4522 }
4523
4524 /* Optional constant. */
4525 if (skip_past_comma (&input_line_pointer) != FAIL)
4526 {
4527 if (immediate_for_directive (&offset) == FAIL)
4528 return;
4529 }
4530 else
4531 offset = 0;
4532
4533 demand_empty_rest_of_line ();
4534
4535 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4536 {
4537 as_bad (_("register must be either sp or set by a previous"
4538 "unwind_movsp directive"));
4539 return;
4540 }
4541
4542 /* Don't generate any opcodes, just record the information for later. */
4543 unwind.fp_reg = fp_reg;
4544 unwind.fp_used = 1;
4545 if (sp_reg == REG_SP)
4546 unwind.fp_offset = unwind.frame_size - offset;
4547 else
4548 unwind.fp_offset -= offset;
4549 }
4550
4551 /* Parse an unwind_raw directive. */
4552
4553 static void
4554 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4555 {
4556 expressionS exp;
4557 /* This is an arbitrary limit. */
4558 unsigned char op[16];
4559 int count;
4560
4561 if (!unwind.proc_start)
4562 as_bad (MISSING_FNSTART);
4563
4564 expression (&exp);
4565 if (exp.X_op == O_constant
4566 && skip_past_comma (&input_line_pointer) != FAIL)
4567 {
4568 unwind.frame_size += exp.X_add_number;
4569 expression (&exp);
4570 }
4571 else
4572 exp.X_op = O_illegal;
4573
4574 if (exp.X_op != O_constant)
4575 {
4576 as_bad (_("expected <offset>, <opcode>"));
4577 ignore_rest_of_line ();
4578 return;
4579 }
4580
4581 count = 0;
4582
4583 /* Parse the opcode. */
4584 for (;;)
4585 {
4586 if (count >= 16)
4587 {
4588 as_bad (_("unwind opcode too long"));
4589 ignore_rest_of_line ();
4590 }
4591 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4592 {
4593 as_bad (_("invalid unwind opcode"));
4594 ignore_rest_of_line ();
4595 return;
4596 }
4597 op[count++] = exp.X_add_number;
4598
4599 /* Parse the next byte. */
4600 if (skip_past_comma (&input_line_pointer) == FAIL)
4601 break;
4602
4603 expression (&exp);
4604 }
4605
4606 /* Add the opcode bytes in reverse order. */
4607 while (count--)
4608 add_unwind_opcode (op[count], 1);
4609
4610 demand_empty_rest_of_line ();
4611 }
4612
4613
4614 /* Parse a .eabi_attribute directive. */
4615
4616 static void
4617 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4618 {
4619 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4620
4621 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4622 attributes_set_explicitly[tag] = 1;
4623 }
4624
4625 /* Emit a tls fix for the symbol. */
4626
4627 static void
4628 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4629 {
4630 char *p;
4631 expressionS exp;
4632 #ifdef md_flush_pending_output
4633 md_flush_pending_output ();
4634 #endif
4635
4636 #ifdef md_cons_align
4637 md_cons_align (4);
4638 #endif
4639
4640 /* Since we're just labelling the code, there's no need to define a
4641 mapping symbol. */
4642 expression (&exp);
4643 p = obstack_next_free (&frchain_now->frch_obstack);
4644 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4645 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4646 : BFD_RELOC_ARM_TLS_DESCSEQ);
4647 }
4648 #endif /* OBJ_ELF */
4649
4650 static void s_arm_arch (int);
4651 static void s_arm_object_arch (int);
4652 static void s_arm_cpu (int);
4653 static void s_arm_fpu (int);
4654 static void s_arm_arch_extension (int);
4655
4656 #ifdef TE_PE
4657
4658 static void
4659 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4660 {
4661 expressionS exp;
4662
4663 do
4664 {
4665 expression (&exp);
4666 if (exp.X_op == O_symbol)
4667 exp.X_op = O_secrel;
4668
4669 emit_expr (&exp, 4);
4670 }
4671 while (*input_line_pointer++ == ',');
4672
4673 input_line_pointer--;
4674 demand_empty_rest_of_line ();
4675 }
4676 #endif /* TE_PE */
4677
4678 /* This table describes all the machine specific pseudo-ops the assembler
4679 has to support. The fields are:
4680 pseudo-op name without dot
4681 function to call to execute this pseudo-op
4682 Integer arg to pass to the function. */
4683
4684 const pseudo_typeS md_pseudo_table[] =
4685 {
4686 /* Never called because '.req' does not start a line. */
4687 { "req", s_req, 0 },
4688 /* Following two are likewise never called. */
4689 { "dn", s_dn, 0 },
4690 { "qn", s_qn, 0 },
4691 { "unreq", s_unreq, 0 },
4692 { "bss", s_bss, 0 },
4693 { "align", s_align_ptwo, 2 },
4694 { "arm", s_arm, 0 },
4695 { "thumb", s_thumb, 0 },
4696 { "code", s_code, 0 },
4697 { "force_thumb", s_force_thumb, 0 },
4698 { "thumb_func", s_thumb_func, 0 },
4699 { "thumb_set", s_thumb_set, 0 },
4700 { "even", s_even, 0 },
4701 { "ltorg", s_ltorg, 0 },
4702 { "pool", s_ltorg, 0 },
4703 { "syntax", s_syntax, 0 },
4704 { "cpu", s_arm_cpu, 0 },
4705 { "arch", s_arm_arch, 0 },
4706 { "object_arch", s_arm_object_arch, 0 },
4707 { "fpu", s_arm_fpu, 0 },
4708 { "arch_extension", s_arm_arch_extension, 0 },
4709 #ifdef OBJ_ELF
4710 { "word", s_arm_elf_cons, 4 },
4711 { "long", s_arm_elf_cons, 4 },
4712 { "inst.n", s_arm_elf_inst, 2 },
4713 { "inst.w", s_arm_elf_inst, 4 },
4714 { "inst", s_arm_elf_inst, 0 },
4715 { "rel31", s_arm_rel31, 0 },
4716 { "fnstart", s_arm_unwind_fnstart, 0 },
4717 { "fnend", s_arm_unwind_fnend, 0 },
4718 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4719 { "personality", s_arm_unwind_personality, 0 },
4720 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4721 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4722 { "save", s_arm_unwind_save, 0 },
4723 { "vsave", s_arm_unwind_save, 1 },
4724 { "movsp", s_arm_unwind_movsp, 0 },
4725 { "pad", s_arm_unwind_pad, 0 },
4726 { "setfp", s_arm_unwind_setfp, 0 },
4727 { "unwind_raw", s_arm_unwind_raw, 0 },
4728 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4729 { "tlsdescseq", s_arm_tls_descseq, 0 },
4730 #else
4731 { "word", cons, 4},
4732
4733 /* These are used for dwarf. */
4734 {"2byte", cons, 2},
4735 {"4byte", cons, 4},
4736 {"8byte", cons, 8},
4737 /* These are used for dwarf2. */
4738 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4739 { "loc", dwarf2_directive_loc, 0 },
4740 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4741 #endif
4742 { "extend", float_cons, 'x' },
4743 { "ldouble", float_cons, 'x' },
4744 { "packed", float_cons, 'p' },
4745 #ifdef TE_PE
4746 {"secrel32", pe_directive_secrel, 0},
4747 #endif
4748
4749 /* These are for compatibility with CodeComposer Studio. */
4750 {"ref", s_ccs_ref, 0},
4751 {"def", s_ccs_def, 0},
4752 {"asmfunc", s_ccs_asmfunc, 0},
4753 {"endasmfunc", s_ccs_endasmfunc, 0},
4754
4755 { 0, 0, 0 }
4756 };
4757 \f
4758 /* Parser functions used exclusively in instruction operands. */
4759
4760 /* Generic immediate-value read function for use in insn parsing.
4761 STR points to the beginning of the immediate (the leading #);
4762 VAL receives the value; if the value is outside [MIN, MAX]
4763 issue an error. PREFIX_OPT is true if the immediate prefix is
4764 optional. */
4765
4766 static int
4767 parse_immediate (char **str, int *val, int min, int max,
4768 bfd_boolean prefix_opt)
4769 {
4770 expressionS exp;
4771 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4772 if (exp.X_op != O_constant)
4773 {
4774 inst.error = _("constant expression required");
4775 return FAIL;
4776 }
4777
4778 if (exp.X_add_number < min || exp.X_add_number > max)
4779 {
4780 inst.error = _("immediate value out of range");
4781 return FAIL;
4782 }
4783
4784 *val = exp.X_add_number;
4785 return SUCCESS;
4786 }
4787
4788 /* Less-generic immediate-value read function with the possibility of loading a
4789 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4790 instructions. Puts the result directly in inst.operands[i]. */
4791
4792 static int
4793 parse_big_immediate (char **str, int i, expressionS *in_exp,
4794 bfd_boolean allow_symbol_p)
4795 {
4796 expressionS exp;
4797 expressionS *exp_p = in_exp ? in_exp : &exp;
4798 char *ptr = *str;
4799
4800 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4801
4802 if (exp_p->X_op == O_constant)
4803 {
4804 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4805 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4806 O_constant. We have to be careful not to break compilation for
4807 32-bit X_add_number, though. */
4808 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4809 {
4810 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4811 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4812 & 0xffffffff);
4813 inst.operands[i].regisimm = 1;
4814 }
4815 }
4816 else if (exp_p->X_op == O_big
4817 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4818 {
4819 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4820
4821 /* Bignums have their least significant bits in
4822 generic_bignum[0]. Make sure we put 32 bits in imm and
4823 32 bits in reg, in a (hopefully) portable way. */
4824 gas_assert (parts != 0);
4825
4826 /* Make sure that the number is not too big.
4827 PR 11972: Bignums can now be sign-extended to the
4828 size of a .octa so check that the out of range bits
4829 are all zero or all one. */
4830 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4831 {
4832 LITTLENUM_TYPE m = -1;
4833
4834 if (generic_bignum[parts * 2] != 0
4835 && generic_bignum[parts * 2] != m)
4836 return FAIL;
4837
4838 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4839 if (generic_bignum[j] != generic_bignum[j-1])
4840 return FAIL;
4841 }
4842
4843 inst.operands[i].imm = 0;
4844 for (j = 0; j < parts; j++, idx++)
4845 inst.operands[i].imm |= generic_bignum[idx]
4846 << (LITTLENUM_NUMBER_OF_BITS * j);
4847 inst.operands[i].reg = 0;
4848 for (j = 0; j < parts; j++, idx++)
4849 inst.operands[i].reg |= generic_bignum[idx]
4850 << (LITTLENUM_NUMBER_OF_BITS * j);
4851 inst.operands[i].regisimm = 1;
4852 }
4853 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4854 return FAIL;
4855
4856 *str = ptr;
4857
4858 return SUCCESS;
4859 }
4860
4861 /* Returns the pseudo-register number of an FPA immediate constant,
4862 or FAIL if there isn't a valid constant here. */
4863
4864 static int
4865 parse_fpa_immediate (char ** str)
4866 {
4867 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4868 char * save_in;
4869 expressionS exp;
4870 int i;
4871 int j;
4872
4873 /* First try and match exact strings, this is to guarantee
4874 that some formats will work even for cross assembly. */
4875
4876 for (i = 0; fp_const[i]; i++)
4877 {
4878 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4879 {
4880 char *start = *str;
4881
4882 *str += strlen (fp_const[i]);
4883 if (is_end_of_line[(unsigned char) **str])
4884 return i + 8;
4885 *str = start;
4886 }
4887 }
4888
4889 /* Just because we didn't get a match doesn't mean that the constant
4890 isn't valid, just that it is in a format that we don't
4891 automatically recognize. Try parsing it with the standard
4892 expression routines. */
4893
4894 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4895
4896 /* Look for a raw floating point number. */
4897 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4898 && is_end_of_line[(unsigned char) *save_in])
4899 {
4900 for (i = 0; i < NUM_FLOAT_VALS; i++)
4901 {
4902 for (j = 0; j < MAX_LITTLENUMS; j++)
4903 {
4904 if (words[j] != fp_values[i][j])
4905 break;
4906 }
4907
4908 if (j == MAX_LITTLENUMS)
4909 {
4910 *str = save_in;
4911 return i + 8;
4912 }
4913 }
4914 }
4915
4916 /* Try and parse a more complex expression, this will probably fail
4917 unless the code uses a floating point prefix (eg "0f"). */
4918 save_in = input_line_pointer;
4919 input_line_pointer = *str;
4920 if (expression (&exp) == absolute_section
4921 && exp.X_op == O_big
4922 && exp.X_add_number < 0)
4923 {
4924 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4925 Ditto for 15. */
4926 #define X_PRECISION 5
4927 #define E_PRECISION 15L
4928 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
4929 {
4930 for (i = 0; i < NUM_FLOAT_VALS; i++)
4931 {
4932 for (j = 0; j < MAX_LITTLENUMS; j++)
4933 {
4934 if (words[j] != fp_values[i][j])
4935 break;
4936 }
4937
4938 if (j == MAX_LITTLENUMS)
4939 {
4940 *str = input_line_pointer;
4941 input_line_pointer = save_in;
4942 return i + 8;
4943 }
4944 }
4945 }
4946 }
4947
4948 *str = input_line_pointer;
4949 input_line_pointer = save_in;
4950 inst.error = _("invalid FPA immediate expression");
4951 return FAIL;
4952 }
4953
4954 /* Returns 1 if a number has "quarter-precision" float format
4955 0baBbbbbbc defgh000 00000000 00000000. */
4956
4957 static int
4958 is_quarter_float (unsigned imm)
4959 {
4960 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4961 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4962 }
4963
4964
4965 /* Detect the presence of a floating point or integer zero constant,
4966 i.e. #0.0 or #0. */
4967
4968 static bfd_boolean
4969 parse_ifimm_zero (char **in)
4970 {
4971 int error_code;
4972
4973 if (!is_immediate_prefix (**in))
4974 {
4975 /* In unified syntax, all prefixes are optional. */
4976 if (!unified_syntax)
4977 return FALSE;
4978 }
4979 else
4980 ++*in;
4981
4982 /* Accept #0x0 as a synonym for #0. */
4983 if (strncmp (*in, "0x", 2) == 0)
4984 {
4985 int val;
4986 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
4987 return FALSE;
4988 return TRUE;
4989 }
4990
4991 error_code = atof_generic (in, ".", EXP_CHARS,
4992 &generic_floating_point_number);
4993
4994 if (!error_code
4995 && generic_floating_point_number.sign == '+'
4996 && (generic_floating_point_number.low
4997 > generic_floating_point_number.leader))
4998 return TRUE;
4999
5000 return FALSE;
5001 }
5002
5003 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5004 0baBbbbbbc defgh000 00000000 00000000.
5005 The zero and minus-zero cases need special handling, since they can't be
5006 encoded in the "quarter-precision" float format, but can nonetheless be
5007 loaded as integer constants. */
5008
5009 static unsigned
5010 parse_qfloat_immediate (char **ccp, int *immed)
5011 {
5012 char *str = *ccp;
5013 char *fpnum;
5014 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5015 int found_fpchar = 0;
5016
5017 skip_past_char (&str, '#');
5018
5019 /* We must not accidentally parse an integer as a floating-point number. Make
5020 sure that the value we parse is not an integer by checking for special
5021 characters '.' or 'e'.
5022 FIXME: This is a horrible hack, but doing better is tricky because type
5023 information isn't in a very usable state at parse time. */
5024 fpnum = str;
5025 skip_whitespace (fpnum);
5026
5027 if (strncmp (fpnum, "0x", 2) == 0)
5028 return FAIL;
5029 else
5030 {
5031 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5032 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5033 {
5034 found_fpchar = 1;
5035 break;
5036 }
5037
5038 if (!found_fpchar)
5039 return FAIL;
5040 }
5041
5042 if ((str = atof_ieee (str, 's', words)) != NULL)
5043 {
5044 unsigned fpword = 0;
5045 int i;
5046
5047 /* Our FP word must be 32 bits (single-precision FP). */
5048 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5049 {
5050 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5051 fpword |= words[i];
5052 }
5053
5054 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5055 *immed = fpword;
5056 else
5057 return FAIL;
5058
5059 *ccp = str;
5060
5061 return SUCCESS;
5062 }
5063
5064 return FAIL;
5065 }
5066
5067 /* Shift operands. */
5068 enum shift_kind
5069 {
5070 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5071 };
5072
5073 struct asm_shift_name
5074 {
5075 const char *name;
5076 enum shift_kind kind;
5077 };
5078
5079 /* Third argument to parse_shift. */
5080 enum parse_shift_mode
5081 {
5082 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5083 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5084 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5085 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5086 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5087 };
5088
5089 /* Parse a <shift> specifier on an ARM data processing instruction.
5090 This has three forms:
5091
5092 (LSL|LSR|ASL|ASR|ROR) Rs
5093 (LSL|LSR|ASL|ASR|ROR) #imm
5094 RRX
5095
5096 Note that ASL is assimilated to LSL in the instruction encoding, and
5097 RRX to ROR #0 (which cannot be written as such). */
5098
5099 static int
5100 parse_shift (char **str, int i, enum parse_shift_mode mode)
5101 {
5102 const struct asm_shift_name *shift_name;
5103 enum shift_kind shift;
5104 char *s = *str;
5105 char *p = s;
5106 int reg;
5107
5108 for (p = *str; ISALPHA (*p); p++)
5109 ;
5110
5111 if (p == *str)
5112 {
5113 inst.error = _("shift expression expected");
5114 return FAIL;
5115 }
5116
5117 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5118 p - *str);
5119
5120 if (shift_name == NULL)
5121 {
5122 inst.error = _("shift expression expected");
5123 return FAIL;
5124 }
5125
5126 shift = shift_name->kind;
5127
5128 switch (mode)
5129 {
5130 case NO_SHIFT_RESTRICT:
5131 case SHIFT_IMMEDIATE: break;
5132
5133 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5134 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5135 {
5136 inst.error = _("'LSL' or 'ASR' required");
5137 return FAIL;
5138 }
5139 break;
5140
5141 case SHIFT_LSL_IMMEDIATE:
5142 if (shift != SHIFT_LSL)
5143 {
5144 inst.error = _("'LSL' required");
5145 return FAIL;
5146 }
5147 break;
5148
5149 case SHIFT_ASR_IMMEDIATE:
5150 if (shift != SHIFT_ASR)
5151 {
5152 inst.error = _("'ASR' required");
5153 return FAIL;
5154 }
5155 break;
5156
5157 default: abort ();
5158 }
5159
5160 if (shift != SHIFT_RRX)
5161 {
5162 /* Whitespace can appear here if the next thing is a bare digit. */
5163 skip_whitespace (p);
5164
5165 if (mode == NO_SHIFT_RESTRICT
5166 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5167 {
5168 inst.operands[i].imm = reg;
5169 inst.operands[i].immisreg = 1;
5170 }
5171 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5172 return FAIL;
5173 }
5174 inst.operands[i].shift_kind = shift;
5175 inst.operands[i].shifted = 1;
5176 *str = p;
5177 return SUCCESS;
5178 }
5179
5180 /* Parse a <shifter_operand> for an ARM data processing instruction:
5181
5182 #<immediate>
5183 #<immediate>, <rotate>
5184 <Rm>
5185 <Rm>, <shift>
5186
5187 where <shift> is defined by parse_shift above, and <rotate> is a
5188 multiple of 2 between 0 and 30. Validation of immediate operands
5189 is deferred to md_apply_fix. */
5190
5191 static int
5192 parse_shifter_operand (char **str, int i)
5193 {
5194 int value;
5195 expressionS exp;
5196
5197 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5198 {
5199 inst.operands[i].reg = value;
5200 inst.operands[i].isreg = 1;
5201
5202 /* parse_shift will override this if appropriate */
5203 inst.reloc.exp.X_op = O_constant;
5204 inst.reloc.exp.X_add_number = 0;
5205
5206 if (skip_past_comma (str) == FAIL)
5207 return SUCCESS;
5208
5209 /* Shift operation on register. */
5210 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5211 }
5212
5213 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
5214 return FAIL;
5215
5216 if (skip_past_comma (str) == SUCCESS)
5217 {
5218 /* #x, y -- ie explicit rotation by Y. */
5219 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5220 return FAIL;
5221
5222 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
5223 {
5224 inst.error = _("constant expression expected");
5225 return FAIL;
5226 }
5227
5228 value = exp.X_add_number;
5229 if (value < 0 || value > 30 || value % 2 != 0)
5230 {
5231 inst.error = _("invalid rotation");
5232 return FAIL;
5233 }
5234 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
5235 {
5236 inst.error = _("invalid constant");
5237 return FAIL;
5238 }
5239
5240 /* Encode as specified. */
5241 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
5242 return SUCCESS;
5243 }
5244
5245 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5246 inst.reloc.pc_rel = 0;
5247 return SUCCESS;
5248 }
5249
5250 /* Group relocation information. Each entry in the table contains the
5251 textual name of the relocation as may appear in assembler source
5252 and must end with a colon.
5253 Along with this textual name are the relocation codes to be used if
5254 the corresponding instruction is an ALU instruction (ADD or SUB only),
5255 an LDR, an LDRS, or an LDC. */
5256
5257 struct group_reloc_table_entry
5258 {
5259 const char *name;
5260 int alu_code;
5261 int ldr_code;
5262 int ldrs_code;
5263 int ldc_code;
5264 };
5265
5266 typedef enum
5267 {
5268 /* Varieties of non-ALU group relocation. */
5269
5270 GROUP_LDR,
5271 GROUP_LDRS,
5272 GROUP_LDC
5273 } group_reloc_type;
5274
5275 static struct group_reloc_table_entry group_reloc_table[] =
5276 { /* Program counter relative: */
5277 { "pc_g0_nc",
5278 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5279 0, /* LDR */
5280 0, /* LDRS */
5281 0 }, /* LDC */
5282 { "pc_g0",
5283 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5284 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5285 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5286 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5287 { "pc_g1_nc",
5288 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5289 0, /* LDR */
5290 0, /* LDRS */
5291 0 }, /* LDC */
5292 { "pc_g1",
5293 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5294 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5295 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5296 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5297 { "pc_g2",
5298 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5299 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5300 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5301 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5302 /* Section base relative */
5303 { "sb_g0_nc",
5304 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5305 0, /* LDR */
5306 0, /* LDRS */
5307 0 }, /* LDC */
5308 { "sb_g0",
5309 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5310 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5311 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5312 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5313 { "sb_g1_nc",
5314 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5315 0, /* LDR */
5316 0, /* LDRS */
5317 0 }, /* LDC */
5318 { "sb_g1",
5319 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5320 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5321 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5322 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5323 { "sb_g2",
5324 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5325 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5326 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5327 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5328 /* Absolute thumb alu relocations. */
5329 { "lower0_7",
5330 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5331 0, /* LDR. */
5332 0, /* LDRS. */
5333 0 }, /* LDC. */
5334 { "lower8_15",
5335 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5336 0, /* LDR. */
5337 0, /* LDRS. */
5338 0 }, /* LDC. */
5339 { "upper0_7",
5340 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5341 0, /* LDR. */
5342 0, /* LDRS. */
5343 0 }, /* LDC. */
5344 { "upper8_15",
5345 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5346 0, /* LDR. */
5347 0, /* LDRS. */
5348 0 } }; /* LDC. */
5349
5350 /* Given the address of a pointer pointing to the textual name of a group
5351 relocation as may appear in assembler source, attempt to find its details
5352 in group_reloc_table. The pointer will be updated to the character after
5353 the trailing colon. On failure, FAIL will be returned; SUCCESS
5354 otherwise. On success, *entry will be updated to point at the relevant
5355 group_reloc_table entry. */
5356
5357 static int
5358 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5359 {
5360 unsigned int i;
5361 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5362 {
5363 int length = strlen (group_reloc_table[i].name);
5364
5365 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5366 && (*str)[length] == ':')
5367 {
5368 *out = &group_reloc_table[i];
5369 *str += (length + 1);
5370 return SUCCESS;
5371 }
5372 }
5373
5374 return FAIL;
5375 }
5376
5377 /* Parse a <shifter_operand> for an ARM data processing instruction
5378 (as for parse_shifter_operand) where group relocations are allowed:
5379
5380 #<immediate>
5381 #<immediate>, <rotate>
5382 #:<group_reloc>:<expression>
5383 <Rm>
5384 <Rm>, <shift>
5385
5386 where <group_reloc> is one of the strings defined in group_reloc_table.
5387 The hashes are optional.
5388
5389 Everything else is as for parse_shifter_operand. */
5390
5391 static parse_operand_result
5392 parse_shifter_operand_group_reloc (char **str, int i)
5393 {
5394 /* Determine if we have the sequence of characters #: or just :
5395 coming next. If we do, then we check for a group relocation.
5396 If we don't, punt the whole lot to parse_shifter_operand. */
5397
5398 if (((*str)[0] == '#' && (*str)[1] == ':')
5399 || (*str)[0] == ':')
5400 {
5401 struct group_reloc_table_entry *entry;
5402
5403 if ((*str)[0] == '#')
5404 (*str) += 2;
5405 else
5406 (*str)++;
5407
5408 /* Try to parse a group relocation. Anything else is an error. */
5409 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5410 {
5411 inst.error = _("unknown group relocation");
5412 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5413 }
5414
5415 /* We now have the group relocation table entry corresponding to
5416 the name in the assembler source. Next, we parse the expression. */
5417 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5418 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5419
5420 /* Record the relocation type (always the ALU variant here). */
5421 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5422 gas_assert (inst.reloc.type != 0);
5423
5424 return PARSE_OPERAND_SUCCESS;
5425 }
5426 else
5427 return parse_shifter_operand (str, i) == SUCCESS
5428 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5429
5430 /* Never reached. */
5431 }
5432
5433 /* Parse a Neon alignment expression. Information is written to
5434 inst.operands[i]. We assume the initial ':' has been skipped.
5435
5436 align .imm = align << 8, .immisalign=1, .preind=0 */
5437 static parse_operand_result
5438 parse_neon_alignment (char **str, int i)
5439 {
5440 char *p = *str;
5441 expressionS exp;
5442
5443 my_get_expression (&exp, &p, GE_NO_PREFIX);
5444
5445 if (exp.X_op != O_constant)
5446 {
5447 inst.error = _("alignment must be constant");
5448 return PARSE_OPERAND_FAIL;
5449 }
5450
5451 inst.operands[i].imm = exp.X_add_number << 8;
5452 inst.operands[i].immisalign = 1;
5453 /* Alignments are not pre-indexes. */
5454 inst.operands[i].preind = 0;
5455
5456 *str = p;
5457 return PARSE_OPERAND_SUCCESS;
5458 }
5459
5460 /* Parse all forms of an ARM address expression. Information is written
5461 to inst.operands[i] and/or inst.reloc.
5462
5463 Preindexed addressing (.preind=1):
5464
5465 [Rn, #offset] .reg=Rn .reloc.exp=offset
5466 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5467 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5468 .shift_kind=shift .reloc.exp=shift_imm
5469
5470 These three may have a trailing ! which causes .writeback to be set also.
5471
5472 Postindexed addressing (.postind=1, .writeback=1):
5473
5474 [Rn], #offset .reg=Rn .reloc.exp=offset
5475 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5476 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5477 .shift_kind=shift .reloc.exp=shift_imm
5478
5479 Unindexed addressing (.preind=0, .postind=0):
5480
5481 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5482
5483 Other:
5484
5485 [Rn]{!} shorthand for [Rn,#0]{!}
5486 =immediate .isreg=0 .reloc.exp=immediate
5487 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5488
5489 It is the caller's responsibility to check for addressing modes not
5490 supported by the instruction, and to set inst.reloc.type. */
5491
5492 static parse_operand_result
5493 parse_address_main (char **str, int i, int group_relocations,
5494 group_reloc_type group_type)
5495 {
5496 char *p = *str;
5497 int reg;
5498
5499 if (skip_past_char (&p, '[') == FAIL)
5500 {
5501 if (skip_past_char (&p, '=') == FAIL)
5502 {
5503 /* Bare address - translate to PC-relative offset. */
5504 inst.reloc.pc_rel = 1;
5505 inst.operands[i].reg = REG_PC;
5506 inst.operands[i].isreg = 1;
5507 inst.operands[i].preind = 1;
5508
5509 if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
5510 return PARSE_OPERAND_FAIL;
5511 }
5512 else if (parse_big_immediate (&p, i, &inst.reloc.exp,
5513 /*allow_symbol_p=*/TRUE))
5514 return PARSE_OPERAND_FAIL;
5515
5516 *str = p;
5517 return PARSE_OPERAND_SUCCESS;
5518 }
5519
5520 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5521 skip_whitespace (p);
5522
5523 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5524 {
5525 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5526 return PARSE_OPERAND_FAIL;
5527 }
5528 inst.operands[i].reg = reg;
5529 inst.operands[i].isreg = 1;
5530
5531 if (skip_past_comma (&p) == SUCCESS)
5532 {
5533 inst.operands[i].preind = 1;
5534
5535 if (*p == '+') p++;
5536 else if (*p == '-') p++, inst.operands[i].negative = 1;
5537
5538 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5539 {
5540 inst.operands[i].imm = reg;
5541 inst.operands[i].immisreg = 1;
5542
5543 if (skip_past_comma (&p) == SUCCESS)
5544 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5545 return PARSE_OPERAND_FAIL;
5546 }
5547 else if (skip_past_char (&p, ':') == SUCCESS)
5548 {
5549 /* FIXME: '@' should be used here, but it's filtered out by generic
5550 code before we get to see it here. This may be subject to
5551 change. */
5552 parse_operand_result result = parse_neon_alignment (&p, i);
5553
5554 if (result != PARSE_OPERAND_SUCCESS)
5555 return result;
5556 }
5557 else
5558 {
5559 if (inst.operands[i].negative)
5560 {
5561 inst.operands[i].negative = 0;
5562 p--;
5563 }
5564
5565 if (group_relocations
5566 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5567 {
5568 struct group_reloc_table_entry *entry;
5569
5570 /* Skip over the #: or : sequence. */
5571 if (*p == '#')
5572 p += 2;
5573 else
5574 p++;
5575
5576 /* Try to parse a group relocation. Anything else is an
5577 error. */
5578 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5579 {
5580 inst.error = _("unknown group relocation");
5581 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5582 }
5583
5584 /* We now have the group relocation table entry corresponding to
5585 the name in the assembler source. Next, we parse the
5586 expression. */
5587 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5588 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5589
5590 /* Record the relocation type. */
5591 switch (group_type)
5592 {
5593 case GROUP_LDR:
5594 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5595 break;
5596
5597 case GROUP_LDRS:
5598 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5599 break;
5600
5601 case GROUP_LDC:
5602 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5603 break;
5604
5605 default:
5606 gas_assert (0);
5607 }
5608
5609 if (inst.reloc.type == 0)
5610 {
5611 inst.error = _("this group relocation is not allowed on this instruction");
5612 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5613 }
5614 }
5615 else
5616 {
5617 char *q = p;
5618 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5619 return PARSE_OPERAND_FAIL;
5620 /* If the offset is 0, find out if it's a +0 or -0. */
5621 if (inst.reloc.exp.X_op == O_constant
5622 && inst.reloc.exp.X_add_number == 0)
5623 {
5624 skip_whitespace (q);
5625 if (*q == '#')
5626 {
5627 q++;
5628 skip_whitespace (q);
5629 }
5630 if (*q == '-')
5631 inst.operands[i].negative = 1;
5632 }
5633 }
5634 }
5635 }
5636 else if (skip_past_char (&p, ':') == SUCCESS)
5637 {
5638 /* FIXME: '@' should be used here, but it's filtered out by generic code
5639 before we get to see it here. This may be subject to change. */
5640 parse_operand_result result = parse_neon_alignment (&p, i);
5641
5642 if (result != PARSE_OPERAND_SUCCESS)
5643 return result;
5644 }
5645
5646 if (skip_past_char (&p, ']') == FAIL)
5647 {
5648 inst.error = _("']' expected");
5649 return PARSE_OPERAND_FAIL;
5650 }
5651
5652 if (skip_past_char (&p, '!') == SUCCESS)
5653 inst.operands[i].writeback = 1;
5654
5655 else if (skip_past_comma (&p) == SUCCESS)
5656 {
5657 if (skip_past_char (&p, '{') == SUCCESS)
5658 {
5659 /* [Rn], {expr} - unindexed, with option */
5660 if (parse_immediate (&p, &inst.operands[i].imm,
5661 0, 255, TRUE) == FAIL)
5662 return PARSE_OPERAND_FAIL;
5663
5664 if (skip_past_char (&p, '}') == FAIL)
5665 {
5666 inst.error = _("'}' expected at end of 'option' field");
5667 return PARSE_OPERAND_FAIL;
5668 }
5669 if (inst.operands[i].preind)
5670 {
5671 inst.error = _("cannot combine index with option");
5672 return PARSE_OPERAND_FAIL;
5673 }
5674 *str = p;
5675 return PARSE_OPERAND_SUCCESS;
5676 }
5677 else
5678 {
5679 inst.operands[i].postind = 1;
5680 inst.operands[i].writeback = 1;
5681
5682 if (inst.operands[i].preind)
5683 {
5684 inst.error = _("cannot combine pre- and post-indexing");
5685 return PARSE_OPERAND_FAIL;
5686 }
5687
5688 if (*p == '+') p++;
5689 else if (*p == '-') p++, inst.operands[i].negative = 1;
5690
5691 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5692 {
5693 /* We might be using the immediate for alignment already. If we
5694 are, OR the register number into the low-order bits. */
5695 if (inst.operands[i].immisalign)
5696 inst.operands[i].imm |= reg;
5697 else
5698 inst.operands[i].imm = reg;
5699 inst.operands[i].immisreg = 1;
5700
5701 if (skip_past_comma (&p) == SUCCESS)
5702 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5703 return PARSE_OPERAND_FAIL;
5704 }
5705 else
5706 {
5707 char *q = p;
5708 if (inst.operands[i].negative)
5709 {
5710 inst.operands[i].negative = 0;
5711 p--;
5712 }
5713 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5714 return PARSE_OPERAND_FAIL;
5715 /* If the offset is 0, find out if it's a +0 or -0. */
5716 if (inst.reloc.exp.X_op == O_constant
5717 && inst.reloc.exp.X_add_number == 0)
5718 {
5719 skip_whitespace (q);
5720 if (*q == '#')
5721 {
5722 q++;
5723 skip_whitespace (q);
5724 }
5725 if (*q == '-')
5726 inst.operands[i].negative = 1;
5727 }
5728 }
5729 }
5730 }
5731
5732 /* If at this point neither .preind nor .postind is set, we have a
5733 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5734 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5735 {
5736 inst.operands[i].preind = 1;
5737 inst.reloc.exp.X_op = O_constant;
5738 inst.reloc.exp.X_add_number = 0;
5739 }
5740 *str = p;
5741 return PARSE_OPERAND_SUCCESS;
5742 }
5743
5744 static int
5745 parse_address (char **str, int i)
5746 {
5747 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5748 ? SUCCESS : FAIL;
5749 }
5750
5751 static parse_operand_result
5752 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5753 {
5754 return parse_address_main (str, i, 1, type);
5755 }
5756
5757 /* Parse an operand for a MOVW or MOVT instruction. */
5758 static int
5759 parse_half (char **str)
5760 {
5761 char * p;
5762
5763 p = *str;
5764 skip_past_char (&p, '#');
5765 if (strncasecmp (p, ":lower16:", 9) == 0)
5766 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5767 else if (strncasecmp (p, ":upper16:", 9) == 0)
5768 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5769
5770 if (inst.reloc.type != BFD_RELOC_UNUSED)
5771 {
5772 p += 9;
5773 skip_whitespace (p);
5774 }
5775
5776 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5777 return FAIL;
5778
5779 if (inst.reloc.type == BFD_RELOC_UNUSED)
5780 {
5781 if (inst.reloc.exp.X_op != O_constant)
5782 {
5783 inst.error = _("constant expression expected");
5784 return FAIL;
5785 }
5786 if (inst.reloc.exp.X_add_number < 0
5787 || inst.reloc.exp.X_add_number > 0xffff)
5788 {
5789 inst.error = _("immediate value out of range");
5790 return FAIL;
5791 }
5792 }
5793 *str = p;
5794 return SUCCESS;
5795 }
5796
5797 /* Miscellaneous. */
5798
5799 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5800 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5801 static int
5802 parse_psr (char **str, bfd_boolean lhs)
5803 {
5804 char *p;
5805 unsigned long psr_field;
5806 const struct asm_psr *psr;
5807 char *start;
5808 bfd_boolean is_apsr = FALSE;
5809 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5810
5811 /* PR gas/12698: If the user has specified -march=all then m_profile will
5812 be TRUE, but we want to ignore it in this case as we are building for any
5813 CPU type, including non-m variants. */
5814 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
5815 m_profile = FALSE;
5816
5817 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5818 feature for ease of use and backwards compatibility. */
5819 p = *str;
5820 if (strncasecmp (p, "SPSR", 4) == 0)
5821 {
5822 if (m_profile)
5823 goto unsupported_psr;
5824
5825 psr_field = SPSR_BIT;
5826 }
5827 else if (strncasecmp (p, "CPSR", 4) == 0)
5828 {
5829 if (m_profile)
5830 goto unsupported_psr;
5831
5832 psr_field = 0;
5833 }
5834 else if (strncasecmp (p, "APSR", 4) == 0)
5835 {
5836 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5837 and ARMv7-R architecture CPUs. */
5838 is_apsr = TRUE;
5839 psr_field = 0;
5840 }
5841 else if (m_profile)
5842 {
5843 start = p;
5844 do
5845 p++;
5846 while (ISALNUM (*p) || *p == '_');
5847
5848 if (strncasecmp (start, "iapsr", 5) == 0
5849 || strncasecmp (start, "eapsr", 5) == 0
5850 || strncasecmp (start, "xpsr", 4) == 0
5851 || strncasecmp (start, "psr", 3) == 0)
5852 p = start + strcspn (start, "rR") + 1;
5853
5854 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5855 p - start);
5856
5857 if (!psr)
5858 return FAIL;
5859
5860 /* If APSR is being written, a bitfield may be specified. Note that
5861 APSR itself is handled above. */
5862 if (psr->field <= 3)
5863 {
5864 psr_field = psr->field;
5865 is_apsr = TRUE;
5866 goto check_suffix;
5867 }
5868
5869 *str = p;
5870 /* M-profile MSR instructions have the mask field set to "10", except
5871 *PSR variants which modify APSR, which may use a different mask (and
5872 have been handled already). Do that by setting the PSR_f field
5873 here. */
5874 return psr->field | (lhs ? PSR_f : 0);
5875 }
5876 else
5877 goto unsupported_psr;
5878
5879 p += 4;
5880 check_suffix:
5881 if (*p == '_')
5882 {
5883 /* A suffix follows. */
5884 p++;
5885 start = p;
5886
5887 do
5888 p++;
5889 while (ISALNUM (*p) || *p == '_');
5890
5891 if (is_apsr)
5892 {
5893 /* APSR uses a notation for bits, rather than fields. */
5894 unsigned int nzcvq_bits = 0;
5895 unsigned int g_bit = 0;
5896 char *bit;
5897
5898 for (bit = start; bit != p; bit++)
5899 {
5900 switch (TOLOWER (*bit))
5901 {
5902 case 'n':
5903 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5904 break;
5905
5906 case 'z':
5907 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5908 break;
5909
5910 case 'c':
5911 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5912 break;
5913
5914 case 'v':
5915 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5916 break;
5917
5918 case 'q':
5919 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5920 break;
5921
5922 case 'g':
5923 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5924 break;
5925
5926 default:
5927 inst.error = _("unexpected bit specified after APSR");
5928 return FAIL;
5929 }
5930 }
5931
5932 if (nzcvq_bits == 0x1f)
5933 psr_field |= PSR_f;
5934
5935 if (g_bit == 0x1)
5936 {
5937 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5938 {
5939 inst.error = _("selected processor does not "
5940 "support DSP extension");
5941 return FAIL;
5942 }
5943
5944 psr_field |= PSR_s;
5945 }
5946
5947 if ((nzcvq_bits & 0x20) != 0
5948 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5949 || (g_bit & 0x2) != 0)
5950 {
5951 inst.error = _("bad bitmask specified after APSR");
5952 return FAIL;
5953 }
5954 }
5955 else
5956 {
5957 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5958 p - start);
5959 if (!psr)
5960 goto error;
5961
5962 psr_field |= psr->field;
5963 }
5964 }
5965 else
5966 {
5967 if (ISALNUM (*p))
5968 goto error; /* Garbage after "[CS]PSR". */
5969
5970 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5971 is deprecated, but allow it anyway. */
5972 if (is_apsr && lhs)
5973 {
5974 psr_field |= PSR_f;
5975 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5976 "deprecated"));
5977 }
5978 else if (!m_profile)
5979 /* These bits are never right for M-profile devices: don't set them
5980 (only code paths which read/write APSR reach here). */
5981 psr_field |= (PSR_c | PSR_f);
5982 }
5983 *str = p;
5984 return psr_field;
5985
5986 unsupported_psr:
5987 inst.error = _("selected processor does not support requested special "
5988 "purpose register");
5989 return FAIL;
5990
5991 error:
5992 inst.error = _("flag for {c}psr instruction expected");
5993 return FAIL;
5994 }
5995
5996 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5997 value suitable for splatting into the AIF field of the instruction. */
5998
5999 static int
6000 parse_cps_flags (char **str)
6001 {
6002 int val = 0;
6003 int saw_a_flag = 0;
6004 char *s = *str;
6005
6006 for (;;)
6007 switch (*s++)
6008 {
6009 case '\0': case ',':
6010 goto done;
6011
6012 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6013 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6014 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6015
6016 default:
6017 inst.error = _("unrecognized CPS flag");
6018 return FAIL;
6019 }
6020
6021 done:
6022 if (saw_a_flag == 0)
6023 {
6024 inst.error = _("missing CPS flags");
6025 return FAIL;
6026 }
6027
6028 *str = s - 1;
6029 return val;
6030 }
6031
6032 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6033 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6034
6035 static int
6036 parse_endian_specifier (char **str)
6037 {
6038 int little_endian;
6039 char *s = *str;
6040
6041 if (strncasecmp (s, "BE", 2))
6042 little_endian = 0;
6043 else if (strncasecmp (s, "LE", 2))
6044 little_endian = 1;
6045 else
6046 {
6047 inst.error = _("valid endian specifiers are be or le");
6048 return FAIL;
6049 }
6050
6051 if (ISALNUM (s[2]) || s[2] == '_')
6052 {
6053 inst.error = _("valid endian specifiers are be or le");
6054 return FAIL;
6055 }
6056
6057 *str = s + 2;
6058 return little_endian;
6059 }
6060
6061 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6062 value suitable for poking into the rotate field of an sxt or sxta
6063 instruction, or FAIL on error. */
6064
6065 static int
6066 parse_ror (char **str)
6067 {
6068 int rot;
6069 char *s = *str;
6070
6071 if (strncasecmp (s, "ROR", 3) == 0)
6072 s += 3;
6073 else
6074 {
6075 inst.error = _("missing rotation field after comma");
6076 return FAIL;
6077 }
6078
6079 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6080 return FAIL;
6081
6082 switch (rot)
6083 {
6084 case 0: *str = s; return 0x0;
6085 case 8: *str = s; return 0x1;
6086 case 16: *str = s; return 0x2;
6087 case 24: *str = s; return 0x3;
6088
6089 default:
6090 inst.error = _("rotation can only be 0, 8, 16, or 24");
6091 return FAIL;
6092 }
6093 }
6094
6095 /* Parse a conditional code (from conds[] below). The value returned is in the
6096 range 0 .. 14, or FAIL. */
6097 static int
6098 parse_cond (char **str)
6099 {
6100 char *q;
6101 const struct asm_cond *c;
6102 int n;
6103 /* Condition codes are always 2 characters, so matching up to
6104 3 characters is sufficient. */
6105 char cond[3];
6106
6107 q = *str;
6108 n = 0;
6109 while (ISALPHA (*q) && n < 3)
6110 {
6111 cond[n] = TOLOWER (*q);
6112 q++;
6113 n++;
6114 }
6115
6116 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6117 if (!c)
6118 {
6119 inst.error = _("condition required");
6120 return FAIL;
6121 }
6122
6123 *str = q;
6124 return c->value;
6125 }
6126
6127 /* Record a use of the given feature. */
6128 static void
6129 record_feature_use (const arm_feature_set *feature)
6130 {
6131 if (thumb_mode)
6132 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6133 else
6134 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6135 }
6136
6137 /* If the given feature available in the selected CPU, mark it as used.
6138 Returns TRUE iff feature is available. */
6139 static bfd_boolean
6140 mark_feature_used (const arm_feature_set *feature)
6141 {
6142 /* Ensure the option is valid on the current architecture. */
6143 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6144 return FALSE;
6145
6146 /* Add the appropriate architecture feature for the barrier option used.
6147 */
6148 record_feature_use (feature);
6149
6150 return TRUE;
6151 }
6152
6153 /* Parse an option for a barrier instruction. Returns the encoding for the
6154 option, or FAIL. */
6155 static int
6156 parse_barrier (char **str)
6157 {
6158 char *p, *q;
6159 const struct asm_barrier_opt *o;
6160
6161 p = q = *str;
6162 while (ISALPHA (*q))
6163 q++;
6164
6165 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6166 q - p);
6167 if (!o)
6168 return FAIL;
6169
6170 if (!mark_feature_used (&o->arch))
6171 return FAIL;
6172
6173 *str = q;
6174 return o->value;
6175 }
6176
6177 /* Parse the operands of a table branch instruction. Similar to a memory
6178 operand. */
6179 static int
6180 parse_tb (char **str)
6181 {
6182 char * p = *str;
6183 int reg;
6184
6185 if (skip_past_char (&p, '[') == FAIL)
6186 {
6187 inst.error = _("'[' expected");
6188 return FAIL;
6189 }
6190
6191 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6192 {
6193 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6194 return FAIL;
6195 }
6196 inst.operands[0].reg = reg;
6197
6198 if (skip_past_comma (&p) == FAIL)
6199 {
6200 inst.error = _("',' expected");
6201 return FAIL;
6202 }
6203
6204 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6205 {
6206 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6207 return FAIL;
6208 }
6209 inst.operands[0].imm = reg;
6210
6211 if (skip_past_comma (&p) == SUCCESS)
6212 {
6213 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6214 return FAIL;
6215 if (inst.reloc.exp.X_add_number != 1)
6216 {
6217 inst.error = _("invalid shift");
6218 return FAIL;
6219 }
6220 inst.operands[0].shifted = 1;
6221 }
6222
6223 if (skip_past_char (&p, ']') == FAIL)
6224 {
6225 inst.error = _("']' expected");
6226 return FAIL;
6227 }
6228 *str = p;
6229 return SUCCESS;
6230 }
6231
6232 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6233 information on the types the operands can take and how they are encoded.
6234 Up to four operands may be read; this function handles setting the
6235 ".present" field for each read operand itself.
6236 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6237 else returns FAIL. */
6238
6239 static int
6240 parse_neon_mov (char **str, int *which_operand)
6241 {
6242 int i = *which_operand, val;
6243 enum arm_reg_type rtype;
6244 char *ptr = *str;
6245 struct neon_type_el optype;
6246
6247 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6248 {
6249 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6250 inst.operands[i].reg = val;
6251 inst.operands[i].isscalar = 1;
6252 inst.operands[i].vectype = optype;
6253 inst.operands[i++].present = 1;
6254
6255 if (skip_past_comma (&ptr) == FAIL)
6256 goto wanted_comma;
6257
6258 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6259 goto wanted_arm;
6260
6261 inst.operands[i].reg = val;
6262 inst.operands[i].isreg = 1;
6263 inst.operands[i].present = 1;
6264 }
6265 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6266 != FAIL)
6267 {
6268 /* Cases 0, 1, 2, 3, 5 (D only). */
6269 if (skip_past_comma (&ptr) == FAIL)
6270 goto wanted_comma;
6271
6272 inst.operands[i].reg = val;
6273 inst.operands[i].isreg = 1;
6274 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6275 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6276 inst.operands[i].isvec = 1;
6277 inst.operands[i].vectype = optype;
6278 inst.operands[i++].present = 1;
6279
6280 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6281 {
6282 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6283 Case 13: VMOV <Sd>, <Rm> */
6284 inst.operands[i].reg = val;
6285 inst.operands[i].isreg = 1;
6286 inst.operands[i].present = 1;
6287
6288 if (rtype == REG_TYPE_NQ)
6289 {
6290 first_error (_("can't use Neon quad register here"));
6291 return FAIL;
6292 }
6293 else if (rtype != REG_TYPE_VFS)
6294 {
6295 i++;
6296 if (skip_past_comma (&ptr) == FAIL)
6297 goto wanted_comma;
6298 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6299 goto wanted_arm;
6300 inst.operands[i].reg = val;
6301 inst.operands[i].isreg = 1;
6302 inst.operands[i].present = 1;
6303 }
6304 }
6305 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6306 &optype)) != FAIL)
6307 {
6308 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6309 Case 1: VMOV<c><q> <Dd>, <Dm>
6310 Case 8: VMOV.F32 <Sd>, <Sm>
6311 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6312
6313 inst.operands[i].reg = val;
6314 inst.operands[i].isreg = 1;
6315 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6316 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6317 inst.operands[i].isvec = 1;
6318 inst.operands[i].vectype = optype;
6319 inst.operands[i].present = 1;
6320
6321 if (skip_past_comma (&ptr) == SUCCESS)
6322 {
6323 /* Case 15. */
6324 i++;
6325
6326 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6327 goto wanted_arm;
6328
6329 inst.operands[i].reg = val;
6330 inst.operands[i].isreg = 1;
6331 inst.operands[i++].present = 1;
6332
6333 if (skip_past_comma (&ptr) == FAIL)
6334 goto wanted_comma;
6335
6336 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6337 goto wanted_arm;
6338
6339 inst.operands[i].reg = val;
6340 inst.operands[i].isreg = 1;
6341 inst.operands[i].present = 1;
6342 }
6343 }
6344 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6345 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6346 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6347 Case 10: VMOV.F32 <Sd>, #<imm>
6348 Case 11: VMOV.F64 <Dd>, #<imm> */
6349 inst.operands[i].immisfloat = 1;
6350 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6351 == SUCCESS)
6352 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6353 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6354 ;
6355 else
6356 {
6357 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6358 return FAIL;
6359 }
6360 }
6361 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6362 {
6363 /* Cases 6, 7. */
6364 inst.operands[i].reg = val;
6365 inst.operands[i].isreg = 1;
6366 inst.operands[i++].present = 1;
6367
6368 if (skip_past_comma (&ptr) == FAIL)
6369 goto wanted_comma;
6370
6371 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6372 {
6373 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6374 inst.operands[i].reg = val;
6375 inst.operands[i].isscalar = 1;
6376 inst.operands[i].present = 1;
6377 inst.operands[i].vectype = optype;
6378 }
6379 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6380 {
6381 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6382 inst.operands[i].reg = val;
6383 inst.operands[i].isreg = 1;
6384 inst.operands[i++].present = 1;
6385
6386 if (skip_past_comma (&ptr) == FAIL)
6387 goto wanted_comma;
6388
6389 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6390 == FAIL)
6391 {
6392 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6393 return FAIL;
6394 }
6395
6396 inst.operands[i].reg = val;
6397 inst.operands[i].isreg = 1;
6398 inst.operands[i].isvec = 1;
6399 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6400 inst.operands[i].vectype = optype;
6401 inst.operands[i].present = 1;
6402
6403 if (rtype == REG_TYPE_VFS)
6404 {
6405 /* Case 14. */
6406 i++;
6407 if (skip_past_comma (&ptr) == FAIL)
6408 goto wanted_comma;
6409 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6410 &optype)) == FAIL)
6411 {
6412 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6413 return FAIL;
6414 }
6415 inst.operands[i].reg = val;
6416 inst.operands[i].isreg = 1;
6417 inst.operands[i].isvec = 1;
6418 inst.operands[i].issingle = 1;
6419 inst.operands[i].vectype = optype;
6420 inst.operands[i].present = 1;
6421 }
6422 }
6423 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6424 != FAIL)
6425 {
6426 /* Case 13. */
6427 inst.operands[i].reg = val;
6428 inst.operands[i].isreg = 1;
6429 inst.operands[i].isvec = 1;
6430 inst.operands[i].issingle = 1;
6431 inst.operands[i].vectype = optype;
6432 inst.operands[i].present = 1;
6433 }
6434 }
6435 else
6436 {
6437 first_error (_("parse error"));
6438 return FAIL;
6439 }
6440
6441 /* Successfully parsed the operands. Update args. */
6442 *which_operand = i;
6443 *str = ptr;
6444 return SUCCESS;
6445
6446 wanted_comma:
6447 first_error (_("expected comma"));
6448 return FAIL;
6449
6450 wanted_arm:
6451 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6452 return FAIL;
6453 }
6454
6455 /* Use this macro when the operand constraints are different
6456 for ARM and THUMB (e.g. ldrd). */
6457 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6458 ((arm_operand) | ((thumb_operand) << 16))
6459
6460 /* Matcher codes for parse_operands. */
6461 enum operand_parse_code
6462 {
6463 OP_stop, /* end of line */
6464
6465 OP_RR, /* ARM register */
6466 OP_RRnpc, /* ARM register, not r15 */
6467 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6468 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6469 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6470 optional trailing ! */
6471 OP_RRw, /* ARM register, not r15, optional trailing ! */
6472 OP_RCP, /* Coprocessor number */
6473 OP_RCN, /* Coprocessor register */
6474 OP_RF, /* FPA register */
6475 OP_RVS, /* VFP single precision register */
6476 OP_RVD, /* VFP double precision register (0..15) */
6477 OP_RND, /* Neon double precision register (0..31) */
6478 OP_RNQ, /* Neon quad precision register */
6479 OP_RVSD, /* VFP single or double precision register */
6480 OP_RNDQ, /* Neon double or quad precision register */
6481 OP_RNSDQ, /* Neon single, double or quad precision register */
6482 OP_RNSC, /* Neon scalar D[X] */
6483 OP_RVC, /* VFP control register */
6484 OP_RMF, /* Maverick F register */
6485 OP_RMD, /* Maverick D register */
6486 OP_RMFX, /* Maverick FX register */
6487 OP_RMDX, /* Maverick DX register */
6488 OP_RMAX, /* Maverick AX register */
6489 OP_RMDS, /* Maverick DSPSC register */
6490 OP_RIWR, /* iWMMXt wR register */
6491 OP_RIWC, /* iWMMXt wC register */
6492 OP_RIWG, /* iWMMXt wCG register */
6493 OP_RXA, /* XScale accumulator register */
6494
6495 OP_REGLST, /* ARM register list */
6496 OP_VRSLST, /* VFP single-precision register list */
6497 OP_VRDLST, /* VFP double-precision register list */
6498 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6499 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6500 OP_NSTRLST, /* Neon element/structure list */
6501
6502 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6503 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6504 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6505 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6506 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6507 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6508 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6509 OP_VMOV, /* Neon VMOV operands. */
6510 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6511 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6512 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6513
6514 OP_I0, /* immediate zero */
6515 OP_I7, /* immediate value 0 .. 7 */
6516 OP_I15, /* 0 .. 15 */
6517 OP_I16, /* 1 .. 16 */
6518 OP_I16z, /* 0 .. 16 */
6519 OP_I31, /* 0 .. 31 */
6520 OP_I31w, /* 0 .. 31, optional trailing ! */
6521 OP_I32, /* 1 .. 32 */
6522 OP_I32z, /* 0 .. 32 */
6523 OP_I63, /* 0 .. 63 */
6524 OP_I63s, /* -64 .. 63 */
6525 OP_I64, /* 1 .. 64 */
6526 OP_I64z, /* 0 .. 64 */
6527 OP_I255, /* 0 .. 255 */
6528
6529 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6530 OP_I7b, /* 0 .. 7 */
6531 OP_I15b, /* 0 .. 15 */
6532 OP_I31b, /* 0 .. 31 */
6533
6534 OP_SH, /* shifter operand */
6535 OP_SHG, /* shifter operand with possible group relocation */
6536 OP_ADDR, /* Memory address expression (any mode) */
6537 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6538 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6539 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6540 OP_EXP, /* arbitrary expression */
6541 OP_EXPi, /* same, with optional immediate prefix */
6542 OP_EXPr, /* same, with optional relocation suffix */
6543 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6544 OP_IROT1, /* VCADD rotate immediate: 90, 270. */
6545 OP_IROT2, /* VCMLA rotate immediate: 0, 90, 180, 270. */
6546
6547 OP_CPSF, /* CPS flags */
6548 OP_ENDI, /* Endianness specifier */
6549 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6550 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6551 OP_COND, /* conditional code */
6552 OP_TB, /* Table branch. */
6553
6554 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6555
6556 OP_RRnpc_I0, /* ARM register or literal 0 */
6557 OP_RR_EXr, /* ARM register or expression with opt. reloc stuff. */
6558 OP_RR_EXi, /* ARM register or expression with imm prefix */
6559 OP_RF_IF, /* FPA register or immediate */
6560 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6561 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6562
6563 /* Optional operands. */
6564 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6565 OP_oI31b, /* 0 .. 31 */
6566 OP_oI32b, /* 1 .. 32 */
6567 OP_oI32z, /* 0 .. 32 */
6568 OP_oIffffb, /* 0 .. 65535 */
6569 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6570
6571 OP_oRR, /* ARM register */
6572 OP_oRRnpc, /* ARM register, not the PC */
6573 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6574 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6575 OP_oRND, /* Optional Neon double precision register */
6576 OP_oRNQ, /* Optional Neon quad precision register */
6577 OP_oRNDQ, /* Optional Neon double or quad precision register */
6578 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6579 OP_oSHll, /* LSL immediate */
6580 OP_oSHar, /* ASR immediate */
6581 OP_oSHllar, /* LSL or ASR immediate */
6582 OP_oROR, /* ROR 0/8/16/24 */
6583 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6584
6585 /* Some pre-defined mixed (ARM/THUMB) operands. */
6586 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6587 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6588 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6589
6590 OP_FIRST_OPTIONAL = OP_oI7b
6591 };
6592
6593 /* Generic instruction operand parser. This does no encoding and no
6594 semantic validation; it merely squirrels values away in the inst
6595 structure. Returns SUCCESS or FAIL depending on whether the
6596 specified grammar matched. */
6597 static int
6598 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6599 {
6600 unsigned const int *upat = pattern;
6601 char *backtrack_pos = 0;
6602 const char *backtrack_error = 0;
6603 int i, val = 0, backtrack_index = 0;
6604 enum arm_reg_type rtype;
6605 parse_operand_result result;
6606 unsigned int op_parse_code;
6607
6608 #define po_char_or_fail(chr) \
6609 do \
6610 { \
6611 if (skip_past_char (&str, chr) == FAIL) \
6612 goto bad_args; \
6613 } \
6614 while (0)
6615
6616 #define po_reg_or_fail(regtype) \
6617 do \
6618 { \
6619 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6620 & inst.operands[i].vectype); \
6621 if (val == FAIL) \
6622 { \
6623 first_error (_(reg_expected_msgs[regtype])); \
6624 goto failure; \
6625 } \
6626 inst.operands[i].reg = val; \
6627 inst.operands[i].isreg = 1; \
6628 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6629 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6630 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6631 || rtype == REG_TYPE_VFD \
6632 || rtype == REG_TYPE_NQ); \
6633 } \
6634 while (0)
6635
6636 #define po_reg_or_goto(regtype, label) \
6637 do \
6638 { \
6639 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6640 & inst.operands[i].vectype); \
6641 if (val == FAIL) \
6642 goto label; \
6643 \
6644 inst.operands[i].reg = val; \
6645 inst.operands[i].isreg = 1; \
6646 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6647 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6648 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6649 || rtype == REG_TYPE_VFD \
6650 || rtype == REG_TYPE_NQ); \
6651 } \
6652 while (0)
6653
6654 #define po_imm_or_fail(min, max, popt) \
6655 do \
6656 { \
6657 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6658 goto failure; \
6659 inst.operands[i].imm = val; \
6660 } \
6661 while (0)
6662
6663 #define po_scalar_or_goto(elsz, label) \
6664 do \
6665 { \
6666 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6667 if (val == FAIL) \
6668 goto label; \
6669 inst.operands[i].reg = val; \
6670 inst.operands[i].isscalar = 1; \
6671 } \
6672 while (0)
6673
6674 #define po_misc_or_fail(expr) \
6675 do \
6676 { \
6677 if (expr) \
6678 goto failure; \
6679 } \
6680 while (0)
6681
6682 #define po_misc_or_fail_no_backtrack(expr) \
6683 do \
6684 { \
6685 result = expr; \
6686 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6687 backtrack_pos = 0; \
6688 if (result != PARSE_OPERAND_SUCCESS) \
6689 goto failure; \
6690 } \
6691 while (0)
6692
6693 #define po_barrier_or_imm(str) \
6694 do \
6695 { \
6696 val = parse_barrier (&str); \
6697 if (val == FAIL && ! ISALPHA (*str)) \
6698 goto immediate; \
6699 if (val == FAIL \
6700 /* ISB can only take SY as an option. */ \
6701 || ((inst.instruction & 0xf0) == 0x60 \
6702 && val != 0xf)) \
6703 { \
6704 inst.error = _("invalid barrier type"); \
6705 backtrack_pos = 0; \
6706 goto failure; \
6707 } \
6708 } \
6709 while (0)
6710
6711 skip_whitespace (str);
6712
6713 for (i = 0; upat[i] != OP_stop; i++)
6714 {
6715 op_parse_code = upat[i];
6716 if (op_parse_code >= 1<<16)
6717 op_parse_code = thumb ? (op_parse_code >> 16)
6718 : (op_parse_code & ((1<<16)-1));
6719
6720 if (op_parse_code >= OP_FIRST_OPTIONAL)
6721 {
6722 /* Remember where we are in case we need to backtrack. */
6723 gas_assert (!backtrack_pos);
6724 backtrack_pos = str;
6725 backtrack_error = inst.error;
6726 backtrack_index = i;
6727 }
6728
6729 if (i > 0 && (i > 1 || inst.operands[0].present))
6730 po_char_or_fail (',');
6731
6732 switch (op_parse_code)
6733 {
6734 /* Registers */
6735 case OP_oRRnpc:
6736 case OP_oRRnpcsp:
6737 case OP_RRnpc:
6738 case OP_RRnpcsp:
6739 case OP_oRR:
6740 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6741 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6742 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6743 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6744 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6745 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6746 case OP_oRND:
6747 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6748 case OP_RVC:
6749 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6750 break;
6751 /* Also accept generic coprocessor regs for unknown registers. */
6752 coproc_reg:
6753 po_reg_or_fail (REG_TYPE_CN);
6754 break;
6755 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6756 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6757 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6758 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6759 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6760 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6761 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6762 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6763 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6764 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6765 case OP_oRNQ:
6766 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6767 case OP_oRNDQ:
6768 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6769 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6770 case OP_oRNSDQ:
6771 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6772
6773 /* Neon scalar. Using an element size of 8 means that some invalid
6774 scalars are accepted here, so deal with those in later code. */
6775 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6776
6777 case OP_RNDQ_I0:
6778 {
6779 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6780 break;
6781 try_imm0:
6782 po_imm_or_fail (0, 0, TRUE);
6783 }
6784 break;
6785
6786 case OP_RVSD_I0:
6787 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6788 break;
6789
6790 case OP_RSVD_FI0:
6791 {
6792 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6793 break;
6794 try_ifimm0:
6795 if (parse_ifimm_zero (&str))
6796 inst.operands[i].imm = 0;
6797 else
6798 {
6799 inst.error
6800 = _("only floating point zero is allowed as immediate value");
6801 goto failure;
6802 }
6803 }
6804 break;
6805
6806 case OP_RR_RNSC:
6807 {
6808 po_scalar_or_goto (8, try_rr);
6809 break;
6810 try_rr:
6811 po_reg_or_fail (REG_TYPE_RN);
6812 }
6813 break;
6814
6815 case OP_RNSDQ_RNSC:
6816 {
6817 po_scalar_or_goto (8, try_nsdq);
6818 break;
6819 try_nsdq:
6820 po_reg_or_fail (REG_TYPE_NSDQ);
6821 }
6822 break;
6823
6824 case OP_RNDQ_RNSC:
6825 {
6826 po_scalar_or_goto (8, try_ndq);
6827 break;
6828 try_ndq:
6829 po_reg_or_fail (REG_TYPE_NDQ);
6830 }
6831 break;
6832
6833 case OP_RND_RNSC:
6834 {
6835 po_scalar_or_goto (8, try_vfd);
6836 break;
6837 try_vfd:
6838 po_reg_or_fail (REG_TYPE_VFD);
6839 }
6840 break;
6841
6842 case OP_VMOV:
6843 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6844 not careful then bad things might happen. */
6845 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6846 break;
6847
6848 case OP_RNDQ_Ibig:
6849 {
6850 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6851 break;
6852 try_immbig:
6853 /* There's a possibility of getting a 64-bit immediate here, so
6854 we need special handling. */
6855 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6856 == FAIL)
6857 {
6858 inst.error = _("immediate value is out of range");
6859 goto failure;
6860 }
6861 }
6862 break;
6863
6864 case OP_RNDQ_I63b:
6865 {
6866 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6867 break;
6868 try_shimm:
6869 po_imm_or_fail (0, 63, TRUE);
6870 }
6871 break;
6872
6873 case OP_RRnpcb:
6874 po_char_or_fail ('[');
6875 po_reg_or_fail (REG_TYPE_RN);
6876 po_char_or_fail (']');
6877 break;
6878
6879 case OP_RRnpctw:
6880 case OP_RRw:
6881 case OP_oRRw:
6882 po_reg_or_fail (REG_TYPE_RN);
6883 if (skip_past_char (&str, '!') == SUCCESS)
6884 inst.operands[i].writeback = 1;
6885 break;
6886
6887 /* Immediates */
6888 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6889 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6890 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6891 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6892 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6893 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6894 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6895 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6896 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6897 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6898 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6899 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6900
6901 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6902 case OP_oI7b:
6903 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6904 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6905 case OP_oI31b:
6906 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6907 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6908 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6909 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6910
6911 /* Immediate variants */
6912 case OP_oI255c:
6913 po_char_or_fail ('{');
6914 po_imm_or_fail (0, 255, TRUE);
6915 po_char_or_fail ('}');
6916 break;
6917
6918 case OP_I31w:
6919 /* The expression parser chokes on a trailing !, so we have
6920 to find it first and zap it. */
6921 {
6922 char *s = str;
6923 while (*s && *s != ',')
6924 s++;
6925 if (s[-1] == '!')
6926 {
6927 s[-1] = '\0';
6928 inst.operands[i].writeback = 1;
6929 }
6930 po_imm_or_fail (0, 31, TRUE);
6931 if (str == s - 1)
6932 str = s;
6933 }
6934 break;
6935
6936 /* Expressions */
6937 case OP_EXPi: EXPi:
6938 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6939 GE_OPT_PREFIX));
6940 break;
6941
6942 case OP_EXP:
6943 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6944 GE_NO_PREFIX));
6945 break;
6946
6947 case OP_EXPr: EXPr:
6948 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6949 GE_NO_PREFIX));
6950 if (inst.reloc.exp.X_op == O_symbol)
6951 {
6952 val = parse_reloc (&str);
6953 if (val == -1)
6954 {
6955 inst.error = _("unrecognized relocation suffix");
6956 goto failure;
6957 }
6958 else if (val != BFD_RELOC_UNUSED)
6959 {
6960 inst.operands[i].imm = val;
6961 inst.operands[i].hasreloc = 1;
6962 }
6963 }
6964 break;
6965
6966 /* Operand for MOVW or MOVT. */
6967 case OP_HALF:
6968 po_misc_or_fail (parse_half (&str));
6969 break;
6970
6971 /* Register or expression. */
6972 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6973 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6974
6975 /* Register or immediate. */
6976 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6977 I0: po_imm_or_fail (0, 0, FALSE); break;
6978
6979 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6980 IF:
6981 if (!is_immediate_prefix (*str))
6982 goto bad_args;
6983 str++;
6984 val = parse_fpa_immediate (&str);
6985 if (val == FAIL)
6986 goto failure;
6987 /* FPA immediates are encoded as registers 8-15.
6988 parse_fpa_immediate has already applied the offset. */
6989 inst.operands[i].reg = val;
6990 inst.operands[i].isreg = 1;
6991 break;
6992
6993 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6994 I32z: po_imm_or_fail (0, 32, FALSE); break;
6995
6996 /* Two kinds of register. */
6997 case OP_RIWR_RIWC:
6998 {
6999 struct reg_entry *rege = arm_reg_parse_multi (&str);
7000 if (!rege
7001 || (rege->type != REG_TYPE_MMXWR
7002 && rege->type != REG_TYPE_MMXWC
7003 && rege->type != REG_TYPE_MMXWCG))
7004 {
7005 inst.error = _("iWMMXt data or control register expected");
7006 goto failure;
7007 }
7008 inst.operands[i].reg = rege->number;
7009 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
7010 }
7011 break;
7012
7013 case OP_RIWC_RIWG:
7014 {
7015 struct reg_entry *rege = arm_reg_parse_multi (&str);
7016 if (!rege
7017 || (rege->type != REG_TYPE_MMXWC
7018 && rege->type != REG_TYPE_MMXWCG))
7019 {
7020 inst.error = _("iWMMXt control register expected");
7021 goto failure;
7022 }
7023 inst.operands[i].reg = rege->number;
7024 inst.operands[i].isreg = 1;
7025 }
7026 break;
7027
7028 /* Misc */
7029 case OP_CPSF: val = parse_cps_flags (&str); break;
7030 case OP_ENDI: val = parse_endian_specifier (&str); break;
7031 case OP_oROR: val = parse_ror (&str); break;
7032 case OP_COND: val = parse_cond (&str); break;
7033 case OP_oBARRIER_I15:
7034 po_barrier_or_imm (str); break;
7035 immediate:
7036 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
7037 goto failure;
7038 break;
7039
7040 case OP_wPSR:
7041 case OP_rPSR:
7042 po_reg_or_goto (REG_TYPE_RNB, try_psr);
7043 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7044 {
7045 inst.error = _("Banked registers are not available with this "
7046 "architecture.");
7047 goto failure;
7048 }
7049 break;
7050 try_psr:
7051 val = parse_psr (&str, op_parse_code == OP_wPSR);
7052 break;
7053
7054 case OP_APSR_RR:
7055 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7056 break;
7057 try_apsr:
7058 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7059 instruction). */
7060 if (strncasecmp (str, "APSR_", 5) == 0)
7061 {
7062 unsigned found = 0;
7063 str += 5;
7064 while (found < 15)
7065 switch (*str++)
7066 {
7067 case 'c': found = (found & 1) ? 16 : found | 1; break;
7068 case 'n': found = (found & 2) ? 16 : found | 2; break;
7069 case 'z': found = (found & 4) ? 16 : found | 4; break;
7070 case 'v': found = (found & 8) ? 16 : found | 8; break;
7071 default: found = 16;
7072 }
7073 if (found != 15)
7074 goto failure;
7075 inst.operands[i].isvec = 1;
7076 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7077 inst.operands[i].reg = REG_PC;
7078 }
7079 else
7080 goto failure;
7081 break;
7082
7083 case OP_TB:
7084 po_misc_or_fail (parse_tb (&str));
7085 break;
7086
7087 /* Register lists. */
7088 case OP_REGLST:
7089 val = parse_reg_list (&str);
7090 if (*str == '^')
7091 {
7092 inst.operands[i].writeback = 1;
7093 str++;
7094 }
7095 break;
7096
7097 case OP_VRSLST:
7098 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
7099 break;
7100
7101 case OP_VRDLST:
7102 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7103 break;
7104
7105 case OP_VRSDLST:
7106 /* Allow Q registers too. */
7107 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7108 REGLIST_NEON_D);
7109 if (val == FAIL)
7110 {
7111 inst.error = NULL;
7112 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7113 REGLIST_VFP_S);
7114 inst.operands[i].issingle = 1;
7115 }
7116 break;
7117
7118 case OP_NRDLST:
7119 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7120 REGLIST_NEON_D);
7121 break;
7122
7123 case OP_NSTRLST:
7124 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7125 &inst.operands[i].vectype);
7126 break;
7127
7128 /* Addressing modes */
7129 case OP_ADDR:
7130 po_misc_or_fail (parse_address (&str, i));
7131 break;
7132
7133 case OP_ADDRGLDR:
7134 po_misc_or_fail_no_backtrack (
7135 parse_address_group_reloc (&str, i, GROUP_LDR));
7136 break;
7137
7138 case OP_ADDRGLDRS:
7139 po_misc_or_fail_no_backtrack (
7140 parse_address_group_reloc (&str, i, GROUP_LDRS));
7141 break;
7142
7143 case OP_ADDRGLDC:
7144 po_misc_or_fail_no_backtrack (
7145 parse_address_group_reloc (&str, i, GROUP_LDC));
7146 break;
7147
7148 case OP_SH:
7149 po_misc_or_fail (parse_shifter_operand (&str, i));
7150 break;
7151
7152 case OP_SHG:
7153 po_misc_or_fail_no_backtrack (
7154 parse_shifter_operand_group_reloc (&str, i));
7155 break;
7156
7157 case OP_oSHll:
7158 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7159 break;
7160
7161 case OP_oSHar:
7162 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7163 break;
7164
7165 case OP_oSHllar:
7166 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7167 break;
7168
7169 default:
7170 as_fatal (_("unhandled operand code %d"), op_parse_code);
7171 }
7172
7173 /* Various value-based sanity checks and shared operations. We
7174 do not signal immediate failures for the register constraints;
7175 this allows a syntax error to take precedence. */
7176 switch (op_parse_code)
7177 {
7178 case OP_oRRnpc:
7179 case OP_RRnpc:
7180 case OP_RRnpcb:
7181 case OP_RRw:
7182 case OP_oRRw:
7183 case OP_RRnpc_I0:
7184 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7185 inst.error = BAD_PC;
7186 break;
7187
7188 case OP_oRRnpcsp:
7189 case OP_RRnpcsp:
7190 if (inst.operands[i].isreg)
7191 {
7192 if (inst.operands[i].reg == REG_PC)
7193 inst.error = BAD_PC;
7194 else if (inst.operands[i].reg == REG_SP
7195 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7196 relaxed since ARMv8-A. */
7197 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
7198 {
7199 gas_assert (thumb);
7200 inst.error = BAD_SP;
7201 }
7202 }
7203 break;
7204
7205 case OP_RRnpctw:
7206 if (inst.operands[i].isreg
7207 && inst.operands[i].reg == REG_PC
7208 && (inst.operands[i].writeback || thumb))
7209 inst.error = BAD_PC;
7210 break;
7211
7212 case OP_CPSF:
7213 case OP_ENDI:
7214 case OP_oROR:
7215 case OP_wPSR:
7216 case OP_rPSR:
7217 case OP_COND:
7218 case OP_oBARRIER_I15:
7219 case OP_REGLST:
7220 case OP_VRSLST:
7221 case OP_VRDLST:
7222 case OP_VRSDLST:
7223 case OP_NRDLST:
7224 case OP_NSTRLST:
7225 if (val == FAIL)
7226 goto failure;
7227 inst.operands[i].imm = val;
7228 break;
7229
7230 default:
7231 break;
7232 }
7233
7234 /* If we get here, this operand was successfully parsed. */
7235 inst.operands[i].present = 1;
7236 continue;
7237
7238 bad_args:
7239 inst.error = BAD_ARGS;
7240
7241 failure:
7242 if (!backtrack_pos)
7243 {
7244 /* The parse routine should already have set inst.error, but set a
7245 default here just in case. */
7246 if (!inst.error)
7247 inst.error = _("syntax error");
7248 return FAIL;
7249 }
7250
7251 /* Do not backtrack over a trailing optional argument that
7252 absorbed some text. We will only fail again, with the
7253 'garbage following instruction' error message, which is
7254 probably less helpful than the current one. */
7255 if (backtrack_index == i && backtrack_pos != str
7256 && upat[i+1] == OP_stop)
7257 {
7258 if (!inst.error)
7259 inst.error = _("syntax error");
7260 return FAIL;
7261 }
7262
7263 /* Try again, skipping the optional argument at backtrack_pos. */
7264 str = backtrack_pos;
7265 inst.error = backtrack_error;
7266 inst.operands[backtrack_index].present = 0;
7267 i = backtrack_index;
7268 backtrack_pos = 0;
7269 }
7270
7271 /* Check that we have parsed all the arguments. */
7272 if (*str != '\0' && !inst.error)
7273 inst.error = _("garbage following instruction");
7274
7275 return inst.error ? FAIL : SUCCESS;
7276 }
7277
7278 #undef po_char_or_fail
7279 #undef po_reg_or_fail
7280 #undef po_reg_or_goto
7281 #undef po_imm_or_fail
7282 #undef po_scalar_or_fail
7283 #undef po_barrier_or_imm
7284
7285 /* Shorthand macro for instruction encoding functions issuing errors. */
7286 #define constraint(expr, err) \
7287 do \
7288 { \
7289 if (expr) \
7290 { \
7291 inst.error = err; \
7292 return; \
7293 } \
7294 } \
7295 while (0)
7296
7297 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7298 instructions are unpredictable if these registers are used. This
7299 is the BadReg predicate in ARM's Thumb-2 documentation.
7300
7301 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7302 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
7303 #define reject_bad_reg(reg) \
7304 do \
7305 if (reg == REG_PC) \
7306 { \
7307 inst.error = BAD_PC; \
7308 return; \
7309 } \
7310 else if (reg == REG_SP \
7311 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
7312 { \
7313 inst.error = BAD_SP; \
7314 return; \
7315 } \
7316 while (0)
7317
7318 /* If REG is R13 (the stack pointer), warn that its use is
7319 deprecated. */
7320 #define warn_deprecated_sp(reg) \
7321 do \
7322 if (warn_on_deprecated && reg == REG_SP) \
7323 as_tsktsk (_("use of r13 is deprecated")); \
7324 while (0)
7325
7326 /* Functions for operand encoding. ARM, then Thumb. */
7327
7328 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7329
7330 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7331
7332 The only binary encoding difference is the Coprocessor number. Coprocessor
7333 9 is used for half-precision calculations or conversions. The format of the
7334 instruction is the same as the equivalent Coprocessor 10 instruction that
7335 exists for Single-Precision operation. */
7336
7337 static void
7338 do_scalar_fp16_v82_encode (void)
7339 {
7340 if (inst.cond != COND_ALWAYS)
7341 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7342 " the behaviour is UNPREDICTABLE"));
7343 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
7344 _(BAD_FP16));
7345
7346 inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
7347 mark_feature_used (&arm_ext_fp16);
7348 }
7349
7350 /* If VAL can be encoded in the immediate field of an ARM instruction,
7351 return the encoded form. Otherwise, return FAIL. */
7352
7353 static unsigned int
7354 encode_arm_immediate (unsigned int val)
7355 {
7356 unsigned int a, i;
7357
7358 if (val <= 0xff)
7359 return val;
7360
7361 for (i = 2; i < 32; i += 2)
7362 if ((a = rotate_left (val, i)) <= 0xff)
7363 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7364
7365 return FAIL;
7366 }
7367
7368 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7369 return the encoded form. Otherwise, return FAIL. */
7370 static unsigned int
7371 encode_thumb32_immediate (unsigned int val)
7372 {
7373 unsigned int a, i;
7374
7375 if (val <= 0xff)
7376 return val;
7377
7378 for (i = 1; i <= 24; i++)
7379 {
7380 a = val >> i;
7381 if ((val & ~(0xff << i)) == 0)
7382 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7383 }
7384
7385 a = val & 0xff;
7386 if (val == ((a << 16) | a))
7387 return 0x100 | a;
7388 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7389 return 0x300 | a;
7390
7391 a = val & 0xff00;
7392 if (val == ((a << 16) | a))
7393 return 0x200 | (a >> 8);
7394
7395 return FAIL;
7396 }
7397 /* Encode a VFP SP or DP register number into inst.instruction. */
7398
7399 static void
7400 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7401 {
7402 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7403 && reg > 15)
7404 {
7405 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7406 {
7407 if (thumb_mode)
7408 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7409 fpu_vfp_ext_d32);
7410 else
7411 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7412 fpu_vfp_ext_d32);
7413 }
7414 else
7415 {
7416 first_error (_("D register out of range for selected VFP version"));
7417 return;
7418 }
7419 }
7420
7421 switch (pos)
7422 {
7423 case VFP_REG_Sd:
7424 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7425 break;
7426
7427 case VFP_REG_Sn:
7428 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7429 break;
7430
7431 case VFP_REG_Sm:
7432 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7433 break;
7434
7435 case VFP_REG_Dd:
7436 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7437 break;
7438
7439 case VFP_REG_Dn:
7440 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7441 break;
7442
7443 case VFP_REG_Dm:
7444 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7445 break;
7446
7447 default:
7448 abort ();
7449 }
7450 }
7451
7452 /* Encode a <shift> in an ARM-format instruction. The immediate,
7453 if any, is handled by md_apply_fix. */
7454 static void
7455 encode_arm_shift (int i)
7456 {
7457 /* register-shifted register. */
7458 if (inst.operands[i].immisreg)
7459 {
7460 int op_index;
7461 for (op_index = 0; op_index <= i; ++op_index)
7462 {
7463 /* Check the operand only when it's presented. In pre-UAL syntax,
7464 if the destination register is the same as the first operand, two
7465 register form of the instruction can be used. */
7466 if (inst.operands[op_index].present && inst.operands[op_index].isreg
7467 && inst.operands[op_index].reg == REG_PC)
7468 as_warn (UNPRED_REG ("r15"));
7469 }
7470
7471 if (inst.operands[i].imm == REG_PC)
7472 as_warn (UNPRED_REG ("r15"));
7473 }
7474
7475 if (inst.operands[i].shift_kind == SHIFT_RRX)
7476 inst.instruction |= SHIFT_ROR << 5;
7477 else
7478 {
7479 inst.instruction |= inst.operands[i].shift_kind << 5;
7480 if (inst.operands[i].immisreg)
7481 {
7482 inst.instruction |= SHIFT_BY_REG;
7483 inst.instruction |= inst.operands[i].imm << 8;
7484 }
7485 else
7486 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7487 }
7488 }
7489
7490 static void
7491 encode_arm_shifter_operand (int i)
7492 {
7493 if (inst.operands[i].isreg)
7494 {
7495 inst.instruction |= inst.operands[i].reg;
7496 encode_arm_shift (i);
7497 }
7498 else
7499 {
7500 inst.instruction |= INST_IMMEDIATE;
7501 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7502 inst.instruction |= inst.operands[i].imm;
7503 }
7504 }
7505
7506 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7507 static void
7508 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7509 {
7510 /* PR 14260:
7511 Generate an error if the operand is not a register. */
7512 constraint (!inst.operands[i].isreg,
7513 _("Instruction does not support =N addresses"));
7514
7515 inst.instruction |= inst.operands[i].reg << 16;
7516
7517 if (inst.operands[i].preind)
7518 {
7519 if (is_t)
7520 {
7521 inst.error = _("instruction does not accept preindexed addressing");
7522 return;
7523 }
7524 inst.instruction |= PRE_INDEX;
7525 if (inst.operands[i].writeback)
7526 inst.instruction |= WRITE_BACK;
7527
7528 }
7529 else if (inst.operands[i].postind)
7530 {
7531 gas_assert (inst.operands[i].writeback);
7532 if (is_t)
7533 inst.instruction |= WRITE_BACK;
7534 }
7535 else /* unindexed - only for coprocessor */
7536 {
7537 inst.error = _("instruction does not accept unindexed addressing");
7538 return;
7539 }
7540
7541 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7542 && (((inst.instruction & 0x000f0000) >> 16)
7543 == ((inst.instruction & 0x0000f000) >> 12)))
7544 as_warn ((inst.instruction & LOAD_BIT)
7545 ? _("destination register same as write-back base")
7546 : _("source register same as write-back base"));
7547 }
7548
7549 /* inst.operands[i] was set up by parse_address. Encode it into an
7550 ARM-format mode 2 load or store instruction. If is_t is true,
7551 reject forms that cannot be used with a T instruction (i.e. not
7552 post-indexed). */
7553 static void
7554 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7555 {
7556 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7557
7558 encode_arm_addr_mode_common (i, is_t);
7559
7560 if (inst.operands[i].immisreg)
7561 {
7562 constraint ((inst.operands[i].imm == REG_PC
7563 || (is_pc && inst.operands[i].writeback)),
7564 BAD_PC_ADDRESSING);
7565 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7566 inst.instruction |= inst.operands[i].imm;
7567 if (!inst.operands[i].negative)
7568 inst.instruction |= INDEX_UP;
7569 if (inst.operands[i].shifted)
7570 {
7571 if (inst.operands[i].shift_kind == SHIFT_RRX)
7572 inst.instruction |= SHIFT_ROR << 5;
7573 else
7574 {
7575 inst.instruction |= inst.operands[i].shift_kind << 5;
7576 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7577 }
7578 }
7579 }
7580 else /* immediate offset in inst.reloc */
7581 {
7582 if (is_pc && !inst.reloc.pc_rel)
7583 {
7584 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7585
7586 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7587 cannot use PC in addressing.
7588 PC cannot be used in writeback addressing, either. */
7589 constraint ((is_t || inst.operands[i].writeback),
7590 BAD_PC_ADDRESSING);
7591
7592 /* Use of PC in str is deprecated for ARMv7. */
7593 if (warn_on_deprecated
7594 && !is_load
7595 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7596 as_tsktsk (_("use of PC in this instruction is deprecated"));
7597 }
7598
7599 if (inst.reloc.type == BFD_RELOC_UNUSED)
7600 {
7601 /* Prefer + for zero encoded value. */
7602 if (!inst.operands[i].negative)
7603 inst.instruction |= INDEX_UP;
7604 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7605 }
7606 }
7607 }
7608
7609 /* inst.operands[i] was set up by parse_address. Encode it into an
7610 ARM-format mode 3 load or store instruction. Reject forms that
7611 cannot be used with such instructions. If is_t is true, reject
7612 forms that cannot be used with a T instruction (i.e. not
7613 post-indexed). */
7614 static void
7615 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7616 {
7617 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7618 {
7619 inst.error = _("instruction does not accept scaled register index");
7620 return;
7621 }
7622
7623 encode_arm_addr_mode_common (i, is_t);
7624
7625 if (inst.operands[i].immisreg)
7626 {
7627 constraint ((inst.operands[i].imm == REG_PC
7628 || (is_t && inst.operands[i].reg == REG_PC)),
7629 BAD_PC_ADDRESSING);
7630 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7631 BAD_PC_WRITEBACK);
7632 inst.instruction |= inst.operands[i].imm;
7633 if (!inst.operands[i].negative)
7634 inst.instruction |= INDEX_UP;
7635 }
7636 else /* immediate offset in inst.reloc */
7637 {
7638 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7639 && inst.operands[i].writeback),
7640 BAD_PC_WRITEBACK);
7641 inst.instruction |= HWOFFSET_IMM;
7642 if (inst.reloc.type == BFD_RELOC_UNUSED)
7643 {
7644 /* Prefer + for zero encoded value. */
7645 if (!inst.operands[i].negative)
7646 inst.instruction |= INDEX_UP;
7647
7648 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7649 }
7650 }
7651 }
7652
7653 /* Write immediate bits [7:0] to the following locations:
7654
7655 |28/24|23 19|18 16|15 4|3 0|
7656 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7657
7658 This function is used by VMOV/VMVN/VORR/VBIC. */
7659
7660 static void
7661 neon_write_immbits (unsigned immbits)
7662 {
7663 inst.instruction |= immbits & 0xf;
7664 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7665 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7666 }
7667
7668 /* Invert low-order SIZE bits of XHI:XLO. */
7669
7670 static void
7671 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7672 {
7673 unsigned immlo = xlo ? *xlo : 0;
7674 unsigned immhi = xhi ? *xhi : 0;
7675
7676 switch (size)
7677 {
7678 case 8:
7679 immlo = (~immlo) & 0xff;
7680 break;
7681
7682 case 16:
7683 immlo = (~immlo) & 0xffff;
7684 break;
7685
7686 case 64:
7687 immhi = (~immhi) & 0xffffffff;
7688 /* fall through. */
7689
7690 case 32:
7691 immlo = (~immlo) & 0xffffffff;
7692 break;
7693
7694 default:
7695 abort ();
7696 }
7697
7698 if (xlo)
7699 *xlo = immlo;
7700
7701 if (xhi)
7702 *xhi = immhi;
7703 }
7704
7705 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7706 A, B, C, D. */
7707
7708 static int
7709 neon_bits_same_in_bytes (unsigned imm)
7710 {
7711 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7712 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7713 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7714 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7715 }
7716
7717 /* For immediate of above form, return 0bABCD. */
7718
7719 static unsigned
7720 neon_squash_bits (unsigned imm)
7721 {
7722 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7723 | ((imm & 0x01000000) >> 21);
7724 }
7725
7726 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7727
7728 static unsigned
7729 neon_qfloat_bits (unsigned imm)
7730 {
7731 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7732 }
7733
7734 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7735 the instruction. *OP is passed as the initial value of the op field, and
7736 may be set to a different value depending on the constant (i.e.
7737 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7738 MVN). If the immediate looks like a repeated pattern then also
7739 try smaller element sizes. */
7740
7741 static int
7742 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7743 unsigned *immbits, int *op, int size,
7744 enum neon_el_type type)
7745 {
7746 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7747 float. */
7748 if (type == NT_float && !float_p)
7749 return FAIL;
7750
7751 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7752 {
7753 if (size != 32 || *op == 1)
7754 return FAIL;
7755 *immbits = neon_qfloat_bits (immlo);
7756 return 0xf;
7757 }
7758
7759 if (size == 64)
7760 {
7761 if (neon_bits_same_in_bytes (immhi)
7762 && neon_bits_same_in_bytes (immlo))
7763 {
7764 if (*op == 1)
7765 return FAIL;
7766 *immbits = (neon_squash_bits (immhi) << 4)
7767 | neon_squash_bits (immlo);
7768 *op = 1;
7769 return 0xe;
7770 }
7771
7772 if (immhi != immlo)
7773 return FAIL;
7774 }
7775
7776 if (size >= 32)
7777 {
7778 if (immlo == (immlo & 0x000000ff))
7779 {
7780 *immbits = immlo;
7781 return 0x0;
7782 }
7783 else if (immlo == (immlo & 0x0000ff00))
7784 {
7785 *immbits = immlo >> 8;
7786 return 0x2;
7787 }
7788 else if (immlo == (immlo & 0x00ff0000))
7789 {
7790 *immbits = immlo >> 16;
7791 return 0x4;
7792 }
7793 else if (immlo == (immlo & 0xff000000))
7794 {
7795 *immbits = immlo >> 24;
7796 return 0x6;
7797 }
7798 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7799 {
7800 *immbits = (immlo >> 8) & 0xff;
7801 return 0xc;
7802 }
7803 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7804 {
7805 *immbits = (immlo >> 16) & 0xff;
7806 return 0xd;
7807 }
7808
7809 if ((immlo & 0xffff) != (immlo >> 16))
7810 return FAIL;
7811 immlo &= 0xffff;
7812 }
7813
7814 if (size >= 16)
7815 {
7816 if (immlo == (immlo & 0x000000ff))
7817 {
7818 *immbits = immlo;
7819 return 0x8;
7820 }
7821 else if (immlo == (immlo & 0x0000ff00))
7822 {
7823 *immbits = immlo >> 8;
7824 return 0xa;
7825 }
7826
7827 if ((immlo & 0xff) != (immlo >> 8))
7828 return FAIL;
7829 immlo &= 0xff;
7830 }
7831
7832 if (immlo == (immlo & 0x000000ff))
7833 {
7834 /* Don't allow MVN with 8-bit immediate. */
7835 if (*op == 1)
7836 return FAIL;
7837 *immbits = immlo;
7838 return 0xe;
7839 }
7840
7841 return FAIL;
7842 }
7843
7844 #if defined BFD_HOST_64_BIT
7845 /* Returns TRUE if double precision value V may be cast
7846 to single precision without loss of accuracy. */
7847
7848 static bfd_boolean
7849 is_double_a_single (bfd_int64_t v)
7850 {
7851 int exp = (int)((v >> 52) & 0x7FF);
7852 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7853
7854 return (exp == 0 || exp == 0x7FF
7855 || (exp >= 1023 - 126 && exp <= 1023 + 127))
7856 && (mantissa & 0x1FFFFFFFl) == 0;
7857 }
7858
7859 /* Returns a double precision value casted to single precision
7860 (ignoring the least significant bits in exponent and mantissa). */
7861
7862 static int
7863 double_to_single (bfd_int64_t v)
7864 {
7865 int sign = (int) ((v >> 63) & 1l);
7866 int exp = (int) ((v >> 52) & 0x7FF);
7867 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7868
7869 if (exp == 0x7FF)
7870 exp = 0xFF;
7871 else
7872 {
7873 exp = exp - 1023 + 127;
7874 if (exp >= 0xFF)
7875 {
7876 /* Infinity. */
7877 exp = 0x7F;
7878 mantissa = 0;
7879 }
7880 else if (exp < 0)
7881 {
7882 /* No denormalized numbers. */
7883 exp = 0;
7884 mantissa = 0;
7885 }
7886 }
7887 mantissa >>= 29;
7888 return (sign << 31) | (exp << 23) | mantissa;
7889 }
7890 #endif /* BFD_HOST_64_BIT */
7891
7892 enum lit_type
7893 {
7894 CONST_THUMB,
7895 CONST_ARM,
7896 CONST_VEC
7897 };
7898
7899 static void do_vfp_nsyn_opcode (const char *);
7900
7901 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7902 Determine whether it can be performed with a move instruction; if
7903 it can, convert inst.instruction to that move instruction and
7904 return TRUE; if it can't, convert inst.instruction to a literal-pool
7905 load and return FALSE. If this is not a valid thing to do in the
7906 current context, set inst.error and return TRUE.
7907
7908 inst.operands[i] describes the destination register. */
7909
7910 static bfd_boolean
7911 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
7912 {
7913 unsigned long tbit;
7914 bfd_boolean thumb_p = (t == CONST_THUMB);
7915 bfd_boolean arm_p = (t == CONST_ARM);
7916
7917 if (thumb_p)
7918 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7919 else
7920 tbit = LOAD_BIT;
7921
7922 if ((inst.instruction & tbit) == 0)
7923 {
7924 inst.error = _("invalid pseudo operation");
7925 return TRUE;
7926 }
7927
7928 if (inst.reloc.exp.X_op != O_constant
7929 && inst.reloc.exp.X_op != O_symbol
7930 && inst.reloc.exp.X_op != O_big)
7931 {
7932 inst.error = _("constant expression expected");
7933 return TRUE;
7934 }
7935
7936 if (inst.reloc.exp.X_op == O_constant
7937 || inst.reloc.exp.X_op == O_big)
7938 {
7939 #if defined BFD_HOST_64_BIT
7940 bfd_int64_t v;
7941 #else
7942 offsetT v;
7943 #endif
7944 if (inst.reloc.exp.X_op == O_big)
7945 {
7946 LITTLENUM_TYPE w[X_PRECISION];
7947 LITTLENUM_TYPE * l;
7948
7949 if (inst.reloc.exp.X_add_number == -1)
7950 {
7951 gen_to_words (w, X_PRECISION, E_PRECISION);
7952 l = w;
7953 /* FIXME: Should we check words w[2..5] ? */
7954 }
7955 else
7956 l = generic_bignum;
7957
7958 #if defined BFD_HOST_64_BIT
7959 v =
7960 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
7961 << LITTLENUM_NUMBER_OF_BITS)
7962 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
7963 << LITTLENUM_NUMBER_OF_BITS)
7964 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
7965 << LITTLENUM_NUMBER_OF_BITS)
7966 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
7967 #else
7968 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
7969 | (l[0] & LITTLENUM_MASK);
7970 #endif
7971 }
7972 else
7973 v = inst.reloc.exp.X_add_number;
7974
7975 if (!inst.operands[i].issingle)
7976 {
7977 if (thumb_p)
7978 {
7979 /* LDR should not use lead in a flag-setting instruction being
7980 chosen so we do not check whether movs can be used. */
7981
7982 if ((ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
7983 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
7984 && inst.operands[i].reg != 13
7985 && inst.operands[i].reg != 15)
7986 {
7987 /* Check if on thumb2 it can be done with a mov.w, mvn or
7988 movw instruction. */
7989 unsigned int newimm;
7990 bfd_boolean isNegated;
7991
7992 newimm = encode_thumb32_immediate (v);
7993 if (newimm != (unsigned int) FAIL)
7994 isNegated = FALSE;
7995 else
7996 {
7997 newimm = encode_thumb32_immediate (~v);
7998 if (newimm != (unsigned int) FAIL)
7999 isNegated = TRUE;
8000 }
8001
8002 /* The number can be loaded with a mov.w or mvn
8003 instruction. */
8004 if (newimm != (unsigned int) FAIL
8005 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
8006 {
8007 inst.instruction = (0xf04f0000 /* MOV.W. */
8008 | (inst.operands[i].reg << 8));
8009 /* Change to MOVN. */
8010 inst.instruction |= (isNegated ? 0x200000 : 0);
8011 inst.instruction |= (newimm & 0x800) << 15;
8012 inst.instruction |= (newimm & 0x700) << 4;
8013 inst.instruction |= (newimm & 0x0ff);
8014 return TRUE;
8015 }
8016 /* The number can be loaded with a movw instruction. */
8017 else if ((v & ~0xFFFF) == 0
8018 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8019 {
8020 int imm = v & 0xFFFF;
8021
8022 inst.instruction = 0xf2400000; /* MOVW. */
8023 inst.instruction |= (inst.operands[i].reg << 8);
8024 inst.instruction |= (imm & 0xf000) << 4;
8025 inst.instruction |= (imm & 0x0800) << 15;
8026 inst.instruction |= (imm & 0x0700) << 4;
8027 inst.instruction |= (imm & 0x00ff);
8028 return TRUE;
8029 }
8030 }
8031 }
8032 else if (arm_p)
8033 {
8034 int value = encode_arm_immediate (v);
8035
8036 if (value != FAIL)
8037 {
8038 /* This can be done with a mov instruction. */
8039 inst.instruction &= LITERAL_MASK;
8040 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
8041 inst.instruction |= value & 0xfff;
8042 return TRUE;
8043 }
8044
8045 value = encode_arm_immediate (~ v);
8046 if (value != FAIL)
8047 {
8048 /* This can be done with a mvn instruction. */
8049 inst.instruction &= LITERAL_MASK;
8050 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8051 inst.instruction |= value & 0xfff;
8052 return TRUE;
8053 }
8054 }
8055 else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8056 {
8057 int op = 0;
8058 unsigned immbits = 0;
8059 unsigned immlo = inst.operands[1].imm;
8060 unsigned immhi = inst.operands[1].regisimm
8061 ? inst.operands[1].reg
8062 : inst.reloc.exp.X_unsigned
8063 ? 0
8064 : ((bfd_int64_t)((int) immlo)) >> 32;
8065 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8066 &op, 64, NT_invtype);
8067
8068 if (cmode == FAIL)
8069 {
8070 neon_invert_size (&immlo, &immhi, 64);
8071 op = !op;
8072 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8073 &op, 64, NT_invtype);
8074 }
8075
8076 if (cmode != FAIL)
8077 {
8078 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8079 | (1 << 23)
8080 | (cmode << 8)
8081 | (op << 5)
8082 | (1 << 4);
8083
8084 /* Fill other bits in vmov encoding for both thumb and arm. */
8085 if (thumb_mode)
8086 inst.instruction |= (0x7U << 29) | (0xF << 24);
8087 else
8088 inst.instruction |= (0xFU << 28) | (0x1 << 25);
8089 neon_write_immbits (immbits);
8090 return TRUE;
8091 }
8092 }
8093 }
8094
8095 if (t == CONST_VEC)
8096 {
8097 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8098 if (inst.operands[i].issingle
8099 && is_quarter_float (inst.operands[1].imm)
8100 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8101 {
8102 inst.operands[1].imm =
8103 neon_qfloat_bits (v);
8104 do_vfp_nsyn_opcode ("fconsts");
8105 return TRUE;
8106 }
8107
8108 /* If our host does not support a 64-bit type then we cannot perform
8109 the following optimization. This mean that there will be a
8110 discrepancy between the output produced by an assembler built for
8111 a 32-bit-only host and the output produced from a 64-bit host, but
8112 this cannot be helped. */
8113 #if defined BFD_HOST_64_BIT
8114 else if (!inst.operands[1].issingle
8115 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8116 {
8117 if (is_double_a_single (v)
8118 && is_quarter_float (double_to_single (v)))
8119 {
8120 inst.operands[1].imm =
8121 neon_qfloat_bits (double_to_single (v));
8122 do_vfp_nsyn_opcode ("fconstd");
8123 return TRUE;
8124 }
8125 }
8126 #endif
8127 }
8128 }
8129
8130 if (add_to_lit_pool ((!inst.operands[i].isvec
8131 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
8132 return TRUE;
8133
8134 inst.operands[1].reg = REG_PC;
8135 inst.operands[1].isreg = 1;
8136 inst.operands[1].preind = 1;
8137 inst.reloc.pc_rel = 1;
8138 inst.reloc.type = (thumb_p
8139 ? BFD_RELOC_ARM_THUMB_OFFSET
8140 : (mode_3
8141 ? BFD_RELOC_ARM_HWLITERAL
8142 : BFD_RELOC_ARM_LITERAL));
8143 return FALSE;
8144 }
8145
8146 /* inst.operands[i] was set up by parse_address. Encode it into an
8147 ARM-format instruction. Reject all forms which cannot be encoded
8148 into a coprocessor load/store instruction. If wb_ok is false,
8149 reject use of writeback; if unind_ok is false, reject use of
8150 unindexed addressing. If reloc_override is not 0, use it instead
8151 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8152 (in which case it is preserved). */
8153
8154 static int
8155 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8156 {
8157 if (!inst.operands[i].isreg)
8158 {
8159 /* PR 18256 */
8160 if (! inst.operands[0].isvec)
8161 {
8162 inst.error = _("invalid co-processor operand");
8163 return FAIL;
8164 }
8165 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8166 return SUCCESS;
8167 }
8168
8169 inst.instruction |= inst.operands[i].reg << 16;
8170
8171 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8172
8173 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8174 {
8175 gas_assert (!inst.operands[i].writeback);
8176 if (!unind_ok)
8177 {
8178 inst.error = _("instruction does not support unindexed addressing");
8179 return FAIL;
8180 }
8181 inst.instruction |= inst.operands[i].imm;
8182 inst.instruction |= INDEX_UP;
8183 return SUCCESS;
8184 }
8185
8186 if (inst.operands[i].preind)
8187 inst.instruction |= PRE_INDEX;
8188
8189 if (inst.operands[i].writeback)
8190 {
8191 if (inst.operands[i].reg == REG_PC)
8192 {
8193 inst.error = _("pc may not be used with write-back");
8194 return FAIL;
8195 }
8196 if (!wb_ok)
8197 {
8198 inst.error = _("instruction does not support writeback");
8199 return FAIL;
8200 }
8201 inst.instruction |= WRITE_BACK;
8202 }
8203
8204 if (reloc_override)
8205 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
8206 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
8207 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
8208 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
8209 {
8210 if (thumb_mode)
8211 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8212 else
8213 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
8214 }
8215
8216 /* Prefer + for zero encoded value. */
8217 if (!inst.operands[i].negative)
8218 inst.instruction |= INDEX_UP;
8219
8220 return SUCCESS;
8221 }
8222
8223 /* Functions for instruction encoding, sorted by sub-architecture.
8224 First some generics; their names are taken from the conventional
8225 bit positions for register arguments in ARM format instructions. */
8226
8227 static void
8228 do_noargs (void)
8229 {
8230 }
8231
8232 static void
8233 do_rd (void)
8234 {
8235 inst.instruction |= inst.operands[0].reg << 12;
8236 }
8237
8238 static void
8239 do_rn (void)
8240 {
8241 inst.instruction |= inst.operands[0].reg << 16;
8242 }
8243
8244 static void
8245 do_rd_rm (void)
8246 {
8247 inst.instruction |= inst.operands[0].reg << 12;
8248 inst.instruction |= inst.operands[1].reg;
8249 }
8250
8251 static void
8252 do_rm_rn (void)
8253 {
8254 inst.instruction |= inst.operands[0].reg;
8255 inst.instruction |= inst.operands[1].reg << 16;
8256 }
8257
8258 static void
8259 do_rd_rn (void)
8260 {
8261 inst.instruction |= inst.operands[0].reg << 12;
8262 inst.instruction |= inst.operands[1].reg << 16;
8263 }
8264
8265 static void
8266 do_rn_rd (void)
8267 {
8268 inst.instruction |= inst.operands[0].reg << 16;
8269 inst.instruction |= inst.operands[1].reg << 12;
8270 }
8271
8272 static void
8273 do_tt (void)
8274 {
8275 inst.instruction |= inst.operands[0].reg << 8;
8276 inst.instruction |= inst.operands[1].reg << 16;
8277 }
8278
8279 static bfd_boolean
8280 check_obsolete (const arm_feature_set *feature, const char *msg)
8281 {
8282 if (ARM_CPU_IS_ANY (cpu_variant))
8283 {
8284 as_tsktsk ("%s", msg);
8285 return TRUE;
8286 }
8287 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8288 {
8289 as_bad ("%s", msg);
8290 return TRUE;
8291 }
8292
8293 return FALSE;
8294 }
8295
8296 static void
8297 do_rd_rm_rn (void)
8298 {
8299 unsigned Rn = inst.operands[2].reg;
8300 /* Enforce restrictions on SWP instruction. */
8301 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8302 {
8303 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8304 _("Rn must not overlap other operands"));
8305
8306 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8307 */
8308 if (!check_obsolete (&arm_ext_v8,
8309 _("swp{b} use is obsoleted for ARMv8 and later"))
8310 && warn_on_deprecated
8311 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8312 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8313 }
8314
8315 inst.instruction |= inst.operands[0].reg << 12;
8316 inst.instruction |= inst.operands[1].reg;
8317 inst.instruction |= Rn << 16;
8318 }
8319
8320 static void
8321 do_rd_rn_rm (void)
8322 {
8323 inst.instruction |= inst.operands[0].reg << 12;
8324 inst.instruction |= inst.operands[1].reg << 16;
8325 inst.instruction |= inst.operands[2].reg;
8326 }
8327
8328 static void
8329 do_rm_rd_rn (void)
8330 {
8331 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8332 constraint (((inst.reloc.exp.X_op != O_constant
8333 && inst.reloc.exp.X_op != O_illegal)
8334 || inst.reloc.exp.X_add_number != 0),
8335 BAD_ADDR_MODE);
8336 inst.instruction |= inst.operands[0].reg;
8337 inst.instruction |= inst.operands[1].reg << 12;
8338 inst.instruction |= inst.operands[2].reg << 16;
8339 }
8340
8341 static void
8342 do_imm0 (void)
8343 {
8344 inst.instruction |= inst.operands[0].imm;
8345 }
8346
8347 static void
8348 do_rd_cpaddr (void)
8349 {
8350 inst.instruction |= inst.operands[0].reg << 12;
8351 encode_arm_cp_address (1, TRUE, TRUE, 0);
8352 }
8353
8354 /* ARM instructions, in alphabetical order by function name (except
8355 that wrapper functions appear immediately after the function they
8356 wrap). */
8357
8358 /* This is a pseudo-op of the form "adr rd, label" to be converted
8359 into a relative address of the form "add rd, pc, #label-.-8". */
8360
8361 static void
8362 do_adr (void)
8363 {
8364 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8365
8366 /* Frag hacking will turn this into a sub instruction if the offset turns
8367 out to be negative. */
8368 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8369 inst.reloc.pc_rel = 1;
8370 inst.reloc.exp.X_add_number -= 8;
8371
8372 if (inst.reloc.exp.X_op == O_symbol
8373 && inst.reloc.exp.X_add_symbol != NULL
8374 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8375 && THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8376 inst.reloc.exp.X_add_number += 1;
8377 }
8378
8379 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8380 into a relative address of the form:
8381 add rd, pc, #low(label-.-8)"
8382 add rd, rd, #high(label-.-8)" */
8383
8384 static void
8385 do_adrl (void)
8386 {
8387 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8388
8389 /* Frag hacking will turn this into a sub instruction if the offset turns
8390 out to be negative. */
8391 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8392 inst.reloc.pc_rel = 1;
8393 inst.size = INSN_SIZE * 2;
8394 inst.reloc.exp.X_add_number -= 8;
8395
8396 if (inst.reloc.exp.X_op == O_symbol
8397 && inst.reloc.exp.X_add_symbol != NULL
8398 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8399 && THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8400 inst.reloc.exp.X_add_number += 1;
8401 }
8402
8403 static void
8404 do_arit (void)
8405 {
8406 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8407 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
8408 THUMB1_RELOC_ONLY);
8409 if (!inst.operands[1].present)
8410 inst.operands[1].reg = inst.operands[0].reg;
8411 inst.instruction |= inst.operands[0].reg << 12;
8412 inst.instruction |= inst.operands[1].reg << 16;
8413 encode_arm_shifter_operand (2);
8414 }
8415
8416 static void
8417 do_barrier (void)
8418 {
8419 if (inst.operands[0].present)
8420 inst.instruction |= inst.operands[0].imm;
8421 else
8422 inst.instruction |= 0xf;
8423 }
8424
8425 static void
8426 do_bfc (void)
8427 {
8428 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8429 constraint (msb > 32, _("bit-field extends past end of register"));
8430 /* The instruction encoding stores the LSB and MSB,
8431 not the LSB and width. */
8432 inst.instruction |= inst.operands[0].reg << 12;
8433 inst.instruction |= inst.operands[1].imm << 7;
8434 inst.instruction |= (msb - 1) << 16;
8435 }
8436
8437 static void
8438 do_bfi (void)
8439 {
8440 unsigned int msb;
8441
8442 /* #0 in second position is alternative syntax for bfc, which is
8443 the same instruction but with REG_PC in the Rm field. */
8444 if (!inst.operands[1].isreg)
8445 inst.operands[1].reg = REG_PC;
8446
8447 msb = inst.operands[2].imm + inst.operands[3].imm;
8448 constraint (msb > 32, _("bit-field extends past end of register"));
8449 /* The instruction encoding stores the LSB and MSB,
8450 not the LSB and width. */
8451 inst.instruction |= inst.operands[0].reg << 12;
8452 inst.instruction |= inst.operands[1].reg;
8453 inst.instruction |= inst.operands[2].imm << 7;
8454 inst.instruction |= (msb - 1) << 16;
8455 }
8456
8457 static void
8458 do_bfx (void)
8459 {
8460 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8461 _("bit-field extends past end of register"));
8462 inst.instruction |= inst.operands[0].reg << 12;
8463 inst.instruction |= inst.operands[1].reg;
8464 inst.instruction |= inst.operands[2].imm << 7;
8465 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8466 }
8467
8468 /* ARM V5 breakpoint instruction (argument parse)
8469 BKPT <16 bit unsigned immediate>
8470 Instruction is not conditional.
8471 The bit pattern given in insns[] has the COND_ALWAYS condition,
8472 and it is an error if the caller tried to override that. */
8473
8474 static void
8475 do_bkpt (void)
8476 {
8477 /* Top 12 of 16 bits to bits 19:8. */
8478 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8479
8480 /* Bottom 4 of 16 bits to bits 3:0. */
8481 inst.instruction |= inst.operands[0].imm & 0xf;
8482 }
8483
8484 static void
8485 encode_branch (int default_reloc)
8486 {
8487 if (inst.operands[0].hasreloc)
8488 {
8489 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8490 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8491 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8492 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8493 ? BFD_RELOC_ARM_PLT32
8494 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8495 }
8496 else
8497 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
8498 inst.reloc.pc_rel = 1;
8499 }
8500
8501 static void
8502 do_branch (void)
8503 {
8504 #ifdef OBJ_ELF
8505 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8506 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8507 else
8508 #endif
8509 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8510 }
8511
8512 static void
8513 do_bl (void)
8514 {
8515 #ifdef OBJ_ELF
8516 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8517 {
8518 if (inst.cond == COND_ALWAYS)
8519 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8520 else
8521 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8522 }
8523 else
8524 #endif
8525 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8526 }
8527
8528 /* ARM V5 branch-link-exchange instruction (argument parse)
8529 BLX <target_addr> ie BLX(1)
8530 BLX{<condition>} <Rm> ie BLX(2)
8531 Unfortunately, there are two different opcodes for this mnemonic.
8532 So, the insns[].value is not used, and the code here zaps values
8533 into inst.instruction.
8534 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8535
8536 static void
8537 do_blx (void)
8538 {
8539 if (inst.operands[0].isreg)
8540 {
8541 /* Arg is a register; the opcode provided by insns[] is correct.
8542 It is not illegal to do "blx pc", just useless. */
8543 if (inst.operands[0].reg == REG_PC)
8544 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8545
8546 inst.instruction |= inst.operands[0].reg;
8547 }
8548 else
8549 {
8550 /* Arg is an address; this instruction cannot be executed
8551 conditionally, and the opcode must be adjusted.
8552 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8553 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8554 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8555 inst.instruction = 0xfa000000;
8556 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8557 }
8558 }
8559
8560 static void
8561 do_bx (void)
8562 {
8563 bfd_boolean want_reloc;
8564
8565 if (inst.operands[0].reg == REG_PC)
8566 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8567
8568 inst.instruction |= inst.operands[0].reg;
8569 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8570 it is for ARMv4t or earlier. */
8571 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8572 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
8573 want_reloc = TRUE;
8574
8575 #ifdef OBJ_ELF
8576 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8577 #endif
8578 want_reloc = FALSE;
8579
8580 if (want_reloc)
8581 inst.reloc.type = BFD_RELOC_ARM_V4BX;
8582 }
8583
8584
8585 /* ARM v5TEJ. Jump to Jazelle code. */
8586
8587 static void
8588 do_bxj (void)
8589 {
8590 if (inst.operands[0].reg == REG_PC)
8591 as_tsktsk (_("use of r15 in bxj is not really useful"));
8592
8593 inst.instruction |= inst.operands[0].reg;
8594 }
8595
8596 /* Co-processor data operation:
8597 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8598 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8599 static void
8600 do_cdp (void)
8601 {
8602 inst.instruction |= inst.operands[0].reg << 8;
8603 inst.instruction |= inst.operands[1].imm << 20;
8604 inst.instruction |= inst.operands[2].reg << 12;
8605 inst.instruction |= inst.operands[3].reg << 16;
8606 inst.instruction |= inst.operands[4].reg;
8607 inst.instruction |= inst.operands[5].imm << 5;
8608 }
8609
8610 static void
8611 do_cmp (void)
8612 {
8613 inst.instruction |= inst.operands[0].reg << 16;
8614 encode_arm_shifter_operand (1);
8615 }
8616
8617 /* Transfer between coprocessor and ARM registers.
8618 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8619 MRC2
8620 MCR{cond}
8621 MCR2
8622
8623 No special properties. */
8624
8625 struct deprecated_coproc_regs_s
8626 {
8627 unsigned cp;
8628 int opc1;
8629 unsigned crn;
8630 unsigned crm;
8631 int opc2;
8632 arm_feature_set deprecated;
8633 arm_feature_set obsoleted;
8634 const char *dep_msg;
8635 const char *obs_msg;
8636 };
8637
8638 #define DEPR_ACCESS_V8 \
8639 N_("This coprocessor register access is deprecated in ARMv8")
8640
8641 /* Table of all deprecated coprocessor registers. */
8642 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8643 {
8644 {15, 0, 7, 10, 5, /* CP15DMB. */
8645 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8646 DEPR_ACCESS_V8, NULL},
8647 {15, 0, 7, 10, 4, /* CP15DSB. */
8648 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8649 DEPR_ACCESS_V8, NULL},
8650 {15, 0, 7, 5, 4, /* CP15ISB. */
8651 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8652 DEPR_ACCESS_V8, NULL},
8653 {14, 6, 1, 0, 0, /* TEEHBR. */
8654 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8655 DEPR_ACCESS_V8, NULL},
8656 {14, 6, 0, 0, 0, /* TEECR. */
8657 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8658 DEPR_ACCESS_V8, NULL},
8659 };
8660
8661 #undef DEPR_ACCESS_V8
8662
8663 static const size_t deprecated_coproc_reg_count =
8664 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8665
8666 static void
8667 do_co_reg (void)
8668 {
8669 unsigned Rd;
8670 size_t i;
8671
8672 Rd = inst.operands[2].reg;
8673 if (thumb_mode)
8674 {
8675 if (inst.instruction == 0xee000010
8676 || inst.instruction == 0xfe000010)
8677 /* MCR, MCR2 */
8678 reject_bad_reg (Rd);
8679 else if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
8680 /* MRC, MRC2 */
8681 constraint (Rd == REG_SP, BAD_SP);
8682 }
8683 else
8684 {
8685 /* MCR */
8686 if (inst.instruction == 0xe000010)
8687 constraint (Rd == REG_PC, BAD_PC);
8688 }
8689
8690 for (i = 0; i < deprecated_coproc_reg_count; ++i)
8691 {
8692 const struct deprecated_coproc_regs_s *r =
8693 deprecated_coproc_regs + i;
8694
8695 if (inst.operands[0].reg == r->cp
8696 && inst.operands[1].imm == r->opc1
8697 && inst.operands[3].reg == r->crn
8698 && inst.operands[4].reg == r->crm
8699 && inst.operands[5].imm == r->opc2)
8700 {
8701 if (! ARM_CPU_IS_ANY (cpu_variant)
8702 && warn_on_deprecated
8703 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8704 as_tsktsk ("%s", r->dep_msg);
8705 }
8706 }
8707
8708 inst.instruction |= inst.operands[0].reg << 8;
8709 inst.instruction |= inst.operands[1].imm << 21;
8710 inst.instruction |= Rd << 12;
8711 inst.instruction |= inst.operands[3].reg << 16;
8712 inst.instruction |= inst.operands[4].reg;
8713 inst.instruction |= inst.operands[5].imm << 5;
8714 }
8715
8716 /* Transfer between coprocessor register and pair of ARM registers.
8717 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8718 MCRR2
8719 MRRC{cond}
8720 MRRC2
8721
8722 Two XScale instructions are special cases of these:
8723
8724 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8725 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8726
8727 Result unpredictable if Rd or Rn is R15. */
8728
8729 static void
8730 do_co_reg2c (void)
8731 {
8732 unsigned Rd, Rn;
8733
8734 Rd = inst.operands[2].reg;
8735 Rn = inst.operands[3].reg;
8736
8737 if (thumb_mode)
8738 {
8739 reject_bad_reg (Rd);
8740 reject_bad_reg (Rn);
8741 }
8742 else
8743 {
8744 constraint (Rd == REG_PC, BAD_PC);
8745 constraint (Rn == REG_PC, BAD_PC);
8746 }
8747
8748 /* Only check the MRRC{2} variants. */
8749 if ((inst.instruction & 0x0FF00000) == 0x0C500000)
8750 {
8751 /* If Rd == Rn, error that the operation is
8752 unpredictable (example MRRC p3,#1,r1,r1,c4). */
8753 constraint (Rd == Rn, BAD_OVERLAP);
8754 }
8755
8756 inst.instruction |= inst.operands[0].reg << 8;
8757 inst.instruction |= inst.operands[1].imm << 4;
8758 inst.instruction |= Rd << 12;
8759 inst.instruction |= Rn << 16;
8760 inst.instruction |= inst.operands[4].reg;
8761 }
8762
8763 static void
8764 do_cpsi (void)
8765 {
8766 inst.instruction |= inst.operands[0].imm << 6;
8767 if (inst.operands[1].present)
8768 {
8769 inst.instruction |= CPSI_MMOD;
8770 inst.instruction |= inst.operands[1].imm;
8771 }
8772 }
8773
8774 static void
8775 do_dbg (void)
8776 {
8777 inst.instruction |= inst.operands[0].imm;
8778 }
8779
8780 static void
8781 do_div (void)
8782 {
8783 unsigned Rd, Rn, Rm;
8784
8785 Rd = inst.operands[0].reg;
8786 Rn = (inst.operands[1].present
8787 ? inst.operands[1].reg : Rd);
8788 Rm = inst.operands[2].reg;
8789
8790 constraint ((Rd == REG_PC), BAD_PC);
8791 constraint ((Rn == REG_PC), BAD_PC);
8792 constraint ((Rm == REG_PC), BAD_PC);
8793
8794 inst.instruction |= Rd << 16;
8795 inst.instruction |= Rn << 0;
8796 inst.instruction |= Rm << 8;
8797 }
8798
8799 static void
8800 do_it (void)
8801 {
8802 /* There is no IT instruction in ARM mode. We
8803 process it to do the validation as if in
8804 thumb mode, just in case the code gets
8805 assembled for thumb using the unified syntax. */
8806
8807 inst.size = 0;
8808 if (unified_syntax)
8809 {
8810 set_it_insn_type (IT_INSN);
8811 now_it.mask = (inst.instruction & 0xf) | 0x10;
8812 now_it.cc = inst.operands[0].imm;
8813 }
8814 }
8815
8816 /* If there is only one register in the register list,
8817 then return its register number. Otherwise return -1. */
8818 static int
8819 only_one_reg_in_list (int range)
8820 {
8821 int i = ffs (range) - 1;
8822 return (i > 15 || range != (1 << i)) ? -1 : i;
8823 }
8824
8825 static void
8826 encode_ldmstm(int from_push_pop_mnem)
8827 {
8828 int base_reg = inst.operands[0].reg;
8829 int range = inst.operands[1].imm;
8830 int one_reg;
8831
8832 inst.instruction |= base_reg << 16;
8833 inst.instruction |= range;
8834
8835 if (inst.operands[1].writeback)
8836 inst.instruction |= LDM_TYPE_2_OR_3;
8837
8838 if (inst.operands[0].writeback)
8839 {
8840 inst.instruction |= WRITE_BACK;
8841 /* Check for unpredictable uses of writeback. */
8842 if (inst.instruction & LOAD_BIT)
8843 {
8844 /* Not allowed in LDM type 2. */
8845 if ((inst.instruction & LDM_TYPE_2_OR_3)
8846 && ((range & (1 << REG_PC)) == 0))
8847 as_warn (_("writeback of base register is UNPREDICTABLE"));
8848 /* Only allowed if base reg not in list for other types. */
8849 else if (range & (1 << base_reg))
8850 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8851 }
8852 else /* STM. */
8853 {
8854 /* Not allowed for type 2. */
8855 if (inst.instruction & LDM_TYPE_2_OR_3)
8856 as_warn (_("writeback of base register is UNPREDICTABLE"));
8857 /* Only allowed if base reg not in list, or first in list. */
8858 else if ((range & (1 << base_reg))
8859 && (range & ((1 << base_reg) - 1)))
8860 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8861 }
8862 }
8863
8864 /* If PUSH/POP has only one register, then use the A2 encoding. */
8865 one_reg = only_one_reg_in_list (range);
8866 if (from_push_pop_mnem && one_reg >= 0)
8867 {
8868 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
8869
8870 inst.instruction &= A_COND_MASK;
8871 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
8872 inst.instruction |= one_reg << 12;
8873 }
8874 }
8875
8876 static void
8877 do_ldmstm (void)
8878 {
8879 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
8880 }
8881
8882 /* ARMv5TE load-consecutive (argument parse)
8883 Mode is like LDRH.
8884
8885 LDRccD R, mode
8886 STRccD R, mode. */
8887
8888 static void
8889 do_ldrd (void)
8890 {
8891 constraint (inst.operands[0].reg % 2 != 0,
8892 _("first transfer register must be even"));
8893 constraint (inst.operands[1].present
8894 && inst.operands[1].reg != inst.operands[0].reg + 1,
8895 _("can only transfer two consecutive registers"));
8896 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8897 constraint (!inst.operands[2].isreg, _("'[' expected"));
8898
8899 if (!inst.operands[1].present)
8900 inst.operands[1].reg = inst.operands[0].reg + 1;
8901
8902 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8903 register and the first register written; we have to diagnose
8904 overlap between the base and the second register written here. */
8905
8906 if (inst.operands[2].reg == inst.operands[1].reg
8907 && (inst.operands[2].writeback || inst.operands[2].postind))
8908 as_warn (_("base register written back, and overlaps "
8909 "second transfer register"));
8910
8911 if (!(inst.instruction & V4_STR_BIT))
8912 {
8913 /* For an index-register load, the index register must not overlap the
8914 destination (even if not write-back). */
8915 if (inst.operands[2].immisreg
8916 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8917 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8918 as_warn (_("index register overlaps transfer register"));
8919 }
8920 inst.instruction |= inst.operands[0].reg << 12;
8921 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8922 }
8923
8924 static void
8925 do_ldrex (void)
8926 {
8927 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8928 || inst.operands[1].postind || inst.operands[1].writeback
8929 || inst.operands[1].immisreg || inst.operands[1].shifted
8930 || inst.operands[1].negative
8931 /* This can arise if the programmer has written
8932 strex rN, rM, foo
8933 or if they have mistakenly used a register name as the last
8934 operand, eg:
8935 strex rN, rM, rX
8936 It is very difficult to distinguish between these two cases
8937 because "rX" might actually be a label. ie the register
8938 name has been occluded by a symbol of the same name. So we
8939 just generate a general 'bad addressing mode' type error
8940 message and leave it up to the programmer to discover the
8941 true cause and fix their mistake. */
8942 || (inst.operands[1].reg == REG_PC),
8943 BAD_ADDR_MODE);
8944
8945 constraint (inst.reloc.exp.X_op != O_constant
8946 || inst.reloc.exp.X_add_number != 0,
8947 _("offset must be zero in ARM encoding"));
8948
8949 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
8950
8951 inst.instruction |= inst.operands[0].reg << 12;
8952 inst.instruction |= inst.operands[1].reg << 16;
8953 inst.reloc.type = BFD_RELOC_UNUSED;
8954 }
8955
8956 static void
8957 do_ldrexd (void)
8958 {
8959 constraint (inst.operands[0].reg % 2 != 0,
8960 _("even register required"));
8961 constraint (inst.operands[1].present
8962 && inst.operands[1].reg != inst.operands[0].reg + 1,
8963 _("can only load two consecutive registers"));
8964 /* If op 1 were present and equal to PC, this function wouldn't
8965 have been called in the first place. */
8966 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8967
8968 inst.instruction |= inst.operands[0].reg << 12;
8969 inst.instruction |= inst.operands[2].reg << 16;
8970 }
8971
8972 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8973 which is not a multiple of four is UNPREDICTABLE. */
8974 static void
8975 check_ldr_r15_aligned (void)
8976 {
8977 constraint (!(inst.operands[1].immisreg)
8978 && (inst.operands[0].reg == REG_PC
8979 && inst.operands[1].reg == REG_PC
8980 && (inst.reloc.exp.X_add_number & 0x3)),
8981 _("ldr to register 15 must be 4-byte alligned"));
8982 }
8983
8984 static void
8985 do_ldst (void)
8986 {
8987 inst.instruction |= inst.operands[0].reg << 12;
8988 if (!inst.operands[1].isreg)
8989 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
8990 return;
8991 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
8992 check_ldr_r15_aligned ();
8993 }
8994
8995 static void
8996 do_ldstt (void)
8997 {
8998 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8999 reject [Rn,...]. */
9000 if (inst.operands[1].preind)
9001 {
9002 constraint (inst.reloc.exp.X_op != O_constant
9003 || inst.reloc.exp.X_add_number != 0,
9004 _("this instruction requires a post-indexed address"));
9005
9006 inst.operands[1].preind = 0;
9007 inst.operands[1].postind = 1;
9008 inst.operands[1].writeback = 1;
9009 }
9010 inst.instruction |= inst.operands[0].reg << 12;
9011 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
9012 }
9013
9014 /* Halfword and signed-byte load/store operations. */
9015
9016 static void
9017 do_ldstv4 (void)
9018 {
9019 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9020 inst.instruction |= inst.operands[0].reg << 12;
9021 if (!inst.operands[1].isreg)
9022 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
9023 return;
9024 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
9025 }
9026
9027 static void
9028 do_ldsttv4 (void)
9029 {
9030 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9031 reject [Rn,...]. */
9032 if (inst.operands[1].preind)
9033 {
9034 constraint (inst.reloc.exp.X_op != O_constant
9035 || inst.reloc.exp.X_add_number != 0,
9036 _("this instruction requires a post-indexed address"));
9037
9038 inst.operands[1].preind = 0;
9039 inst.operands[1].postind = 1;
9040 inst.operands[1].writeback = 1;
9041 }
9042 inst.instruction |= inst.operands[0].reg << 12;
9043 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
9044 }
9045
9046 /* Co-processor register load/store.
9047 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9048 static void
9049 do_lstc (void)
9050 {
9051 inst.instruction |= inst.operands[0].reg << 8;
9052 inst.instruction |= inst.operands[1].reg << 12;
9053 encode_arm_cp_address (2, TRUE, TRUE, 0);
9054 }
9055
9056 static void
9057 do_mlas (void)
9058 {
9059 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9060 if (inst.operands[0].reg == inst.operands[1].reg
9061 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9062 && !(inst.instruction & 0x00400000))
9063 as_tsktsk (_("Rd and Rm should be different in mla"));
9064
9065 inst.instruction |= inst.operands[0].reg << 16;
9066 inst.instruction |= inst.operands[1].reg;
9067 inst.instruction |= inst.operands[2].reg << 8;
9068 inst.instruction |= inst.operands[3].reg << 12;
9069 }
9070
9071 static void
9072 do_mov (void)
9073 {
9074 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9075 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9076 THUMB1_RELOC_ONLY);
9077 inst.instruction |= inst.operands[0].reg << 12;
9078 encode_arm_shifter_operand (1);
9079 }
9080
9081 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9082 static void
9083 do_mov16 (void)
9084 {
9085 bfd_vma imm;
9086 bfd_boolean top;
9087
9088 top = (inst.instruction & 0x00400000) != 0;
9089 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
9090 _(":lower16: not allowed in this instruction"));
9091 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
9092 _(":upper16: not allowed in this instruction"));
9093 inst.instruction |= inst.operands[0].reg << 12;
9094 if (inst.reloc.type == BFD_RELOC_UNUSED)
9095 {
9096 imm = inst.reloc.exp.X_add_number;
9097 /* The value is in two pieces: 0:11, 16:19. */
9098 inst.instruction |= (imm & 0x00000fff);
9099 inst.instruction |= (imm & 0x0000f000) << 4;
9100 }
9101 }
9102
9103 static int
9104 do_vfp_nsyn_mrs (void)
9105 {
9106 if (inst.operands[0].isvec)
9107 {
9108 if (inst.operands[1].reg != 1)
9109 first_error (_("operand 1 must be FPSCR"));
9110 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
9111 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
9112 do_vfp_nsyn_opcode ("fmstat");
9113 }
9114 else if (inst.operands[1].isvec)
9115 do_vfp_nsyn_opcode ("fmrx");
9116 else
9117 return FAIL;
9118
9119 return SUCCESS;
9120 }
9121
9122 static int
9123 do_vfp_nsyn_msr (void)
9124 {
9125 if (inst.operands[0].isvec)
9126 do_vfp_nsyn_opcode ("fmxr");
9127 else
9128 return FAIL;
9129
9130 return SUCCESS;
9131 }
9132
9133 static void
9134 do_vmrs (void)
9135 {
9136 unsigned Rt = inst.operands[0].reg;
9137
9138 if (thumb_mode && Rt == REG_SP)
9139 {
9140 inst.error = BAD_SP;
9141 return;
9142 }
9143
9144 /* APSR_ sets isvec. All other refs to PC are illegal. */
9145 if (!inst.operands[0].isvec && Rt == REG_PC)
9146 {
9147 inst.error = BAD_PC;
9148 return;
9149 }
9150
9151 /* If we get through parsing the register name, we just insert the number
9152 generated into the instruction without further validation. */
9153 inst.instruction |= (inst.operands[1].reg << 16);
9154 inst.instruction |= (Rt << 12);
9155 }
9156
9157 static void
9158 do_vmsr (void)
9159 {
9160 unsigned Rt = inst.operands[1].reg;
9161
9162 if (thumb_mode)
9163 reject_bad_reg (Rt);
9164 else if (Rt == REG_PC)
9165 {
9166 inst.error = BAD_PC;
9167 return;
9168 }
9169
9170 /* If we get through parsing the register name, we just insert the number
9171 generated into the instruction without further validation. */
9172 inst.instruction |= (inst.operands[0].reg << 16);
9173 inst.instruction |= (Rt << 12);
9174 }
9175
9176 static void
9177 do_mrs (void)
9178 {
9179 unsigned br;
9180
9181 if (do_vfp_nsyn_mrs () == SUCCESS)
9182 return;
9183
9184 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9185 inst.instruction |= inst.operands[0].reg << 12;
9186
9187 if (inst.operands[1].isreg)
9188 {
9189 br = inst.operands[1].reg;
9190 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
9191 as_bad (_("bad register for mrs"));
9192 }
9193 else
9194 {
9195 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9196 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9197 != (PSR_c|PSR_f),
9198 _("'APSR', 'CPSR' or 'SPSR' expected"));
9199 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9200 }
9201
9202 inst.instruction |= br;
9203 }
9204
9205 /* Two possible forms:
9206 "{C|S}PSR_<field>, Rm",
9207 "{C|S}PSR_f, #expression". */
9208
9209 static void
9210 do_msr (void)
9211 {
9212 if (do_vfp_nsyn_msr () == SUCCESS)
9213 return;
9214
9215 inst.instruction |= inst.operands[0].imm;
9216 if (inst.operands[1].isreg)
9217 inst.instruction |= inst.operands[1].reg;
9218 else
9219 {
9220 inst.instruction |= INST_IMMEDIATE;
9221 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
9222 inst.reloc.pc_rel = 0;
9223 }
9224 }
9225
9226 static void
9227 do_mul (void)
9228 {
9229 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9230
9231 if (!inst.operands[2].present)
9232 inst.operands[2].reg = inst.operands[0].reg;
9233 inst.instruction |= inst.operands[0].reg << 16;
9234 inst.instruction |= inst.operands[1].reg;
9235 inst.instruction |= inst.operands[2].reg << 8;
9236
9237 if (inst.operands[0].reg == inst.operands[1].reg
9238 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9239 as_tsktsk (_("Rd and Rm should be different in mul"));
9240 }
9241
9242 /* Long Multiply Parser
9243 UMULL RdLo, RdHi, Rm, Rs
9244 SMULL RdLo, RdHi, Rm, Rs
9245 UMLAL RdLo, RdHi, Rm, Rs
9246 SMLAL RdLo, RdHi, Rm, Rs. */
9247
9248 static void
9249 do_mull (void)
9250 {
9251 inst.instruction |= inst.operands[0].reg << 12;
9252 inst.instruction |= inst.operands[1].reg << 16;
9253 inst.instruction |= inst.operands[2].reg;
9254 inst.instruction |= inst.operands[3].reg << 8;
9255
9256 /* rdhi and rdlo must be different. */
9257 if (inst.operands[0].reg == inst.operands[1].reg)
9258 as_tsktsk (_("rdhi and rdlo must be different"));
9259
9260 /* rdhi, rdlo and rm must all be different before armv6. */
9261 if ((inst.operands[0].reg == inst.operands[2].reg
9262 || inst.operands[1].reg == inst.operands[2].reg)
9263 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9264 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9265 }
9266
9267 static void
9268 do_nop (void)
9269 {
9270 if (inst.operands[0].present
9271 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9272 {
9273 /* Architectural NOP hints are CPSR sets with no bits selected. */
9274 inst.instruction &= 0xf0000000;
9275 inst.instruction |= 0x0320f000;
9276 if (inst.operands[0].present)
9277 inst.instruction |= inst.operands[0].imm;
9278 }
9279 }
9280
9281 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9282 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9283 Condition defaults to COND_ALWAYS.
9284 Error if Rd, Rn or Rm are R15. */
9285
9286 static void
9287 do_pkhbt (void)
9288 {
9289 inst.instruction |= inst.operands[0].reg << 12;
9290 inst.instruction |= inst.operands[1].reg << 16;
9291 inst.instruction |= inst.operands[2].reg;
9292 if (inst.operands[3].present)
9293 encode_arm_shift (3);
9294 }
9295
9296 /* ARM V6 PKHTB (Argument Parse). */
9297
9298 static void
9299 do_pkhtb (void)
9300 {
9301 if (!inst.operands[3].present)
9302 {
9303 /* If the shift specifier is omitted, turn the instruction
9304 into pkhbt rd, rm, rn. */
9305 inst.instruction &= 0xfff00010;
9306 inst.instruction |= inst.operands[0].reg << 12;
9307 inst.instruction |= inst.operands[1].reg;
9308 inst.instruction |= inst.operands[2].reg << 16;
9309 }
9310 else
9311 {
9312 inst.instruction |= inst.operands[0].reg << 12;
9313 inst.instruction |= inst.operands[1].reg << 16;
9314 inst.instruction |= inst.operands[2].reg;
9315 encode_arm_shift (3);
9316 }
9317 }
9318
9319 /* ARMv5TE: Preload-Cache
9320 MP Extensions: Preload for write
9321
9322 PLD(W) <addr_mode>
9323
9324 Syntactically, like LDR with B=1, W=0, L=1. */
9325
9326 static void
9327 do_pld (void)
9328 {
9329 constraint (!inst.operands[0].isreg,
9330 _("'[' expected after PLD mnemonic"));
9331 constraint (inst.operands[0].postind,
9332 _("post-indexed expression used in preload instruction"));
9333 constraint (inst.operands[0].writeback,
9334 _("writeback used in preload instruction"));
9335 constraint (!inst.operands[0].preind,
9336 _("unindexed addressing used in preload instruction"));
9337 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9338 }
9339
9340 /* ARMv7: PLI <addr_mode> */
9341 static void
9342 do_pli (void)
9343 {
9344 constraint (!inst.operands[0].isreg,
9345 _("'[' expected after PLI mnemonic"));
9346 constraint (inst.operands[0].postind,
9347 _("post-indexed expression used in preload instruction"));
9348 constraint (inst.operands[0].writeback,
9349 _("writeback used in preload instruction"));
9350 constraint (!inst.operands[0].preind,
9351 _("unindexed addressing used in preload instruction"));
9352 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9353 inst.instruction &= ~PRE_INDEX;
9354 }
9355
9356 static void
9357 do_push_pop (void)
9358 {
9359 constraint (inst.operands[0].writeback,
9360 _("push/pop do not support {reglist}^"));
9361 inst.operands[1] = inst.operands[0];
9362 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9363 inst.operands[0].isreg = 1;
9364 inst.operands[0].writeback = 1;
9365 inst.operands[0].reg = REG_SP;
9366 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9367 }
9368
9369 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9370 word at the specified address and the following word
9371 respectively.
9372 Unconditionally executed.
9373 Error if Rn is R15. */
9374
9375 static void
9376 do_rfe (void)
9377 {
9378 inst.instruction |= inst.operands[0].reg << 16;
9379 if (inst.operands[0].writeback)
9380 inst.instruction |= WRITE_BACK;
9381 }
9382
9383 /* ARM V6 ssat (argument parse). */
9384
9385 static void
9386 do_ssat (void)
9387 {
9388 inst.instruction |= inst.operands[0].reg << 12;
9389 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9390 inst.instruction |= inst.operands[2].reg;
9391
9392 if (inst.operands[3].present)
9393 encode_arm_shift (3);
9394 }
9395
9396 /* ARM V6 usat (argument parse). */
9397
9398 static void
9399 do_usat (void)
9400 {
9401 inst.instruction |= inst.operands[0].reg << 12;
9402 inst.instruction |= inst.operands[1].imm << 16;
9403 inst.instruction |= inst.operands[2].reg;
9404
9405 if (inst.operands[3].present)
9406 encode_arm_shift (3);
9407 }
9408
9409 /* ARM V6 ssat16 (argument parse). */
9410
9411 static void
9412 do_ssat16 (void)
9413 {
9414 inst.instruction |= inst.operands[0].reg << 12;
9415 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9416 inst.instruction |= inst.operands[2].reg;
9417 }
9418
9419 static void
9420 do_usat16 (void)
9421 {
9422 inst.instruction |= inst.operands[0].reg << 12;
9423 inst.instruction |= inst.operands[1].imm << 16;
9424 inst.instruction |= inst.operands[2].reg;
9425 }
9426
9427 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9428 preserving the other bits.
9429
9430 setend <endian_specifier>, where <endian_specifier> is either
9431 BE or LE. */
9432
9433 static void
9434 do_setend (void)
9435 {
9436 if (warn_on_deprecated
9437 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9438 as_tsktsk (_("setend use is deprecated for ARMv8"));
9439
9440 if (inst.operands[0].imm)
9441 inst.instruction |= 0x200;
9442 }
9443
9444 static void
9445 do_shift (void)
9446 {
9447 unsigned int Rm = (inst.operands[1].present
9448 ? inst.operands[1].reg
9449 : inst.operands[0].reg);
9450
9451 inst.instruction |= inst.operands[0].reg << 12;
9452 inst.instruction |= Rm;
9453 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9454 {
9455 inst.instruction |= inst.operands[2].reg << 8;
9456 inst.instruction |= SHIFT_BY_REG;
9457 /* PR 12854: Error on extraneous shifts. */
9458 constraint (inst.operands[2].shifted,
9459 _("extraneous shift as part of operand to shift insn"));
9460 }
9461 else
9462 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
9463 }
9464
9465 static void
9466 do_smc (void)
9467 {
9468 inst.reloc.type = BFD_RELOC_ARM_SMC;
9469 inst.reloc.pc_rel = 0;
9470 }
9471
9472 static void
9473 do_hvc (void)
9474 {
9475 inst.reloc.type = BFD_RELOC_ARM_HVC;
9476 inst.reloc.pc_rel = 0;
9477 }
9478
9479 static void
9480 do_swi (void)
9481 {
9482 inst.reloc.type = BFD_RELOC_ARM_SWI;
9483 inst.reloc.pc_rel = 0;
9484 }
9485
9486 static void
9487 do_setpan (void)
9488 {
9489 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9490 _("selected processor does not support SETPAN instruction"));
9491
9492 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9493 }
9494
9495 static void
9496 do_t_setpan (void)
9497 {
9498 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9499 _("selected processor does not support SETPAN instruction"));
9500
9501 inst.instruction |= (inst.operands[0].imm << 3);
9502 }
9503
9504 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9505 SMLAxy{cond} Rd,Rm,Rs,Rn
9506 SMLAWy{cond} Rd,Rm,Rs,Rn
9507 Error if any register is R15. */
9508
9509 static void
9510 do_smla (void)
9511 {
9512 inst.instruction |= inst.operands[0].reg << 16;
9513 inst.instruction |= inst.operands[1].reg;
9514 inst.instruction |= inst.operands[2].reg << 8;
9515 inst.instruction |= inst.operands[3].reg << 12;
9516 }
9517
9518 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9519 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9520 Error if any register is R15.
9521 Warning if Rdlo == Rdhi. */
9522
9523 static void
9524 do_smlal (void)
9525 {
9526 inst.instruction |= inst.operands[0].reg << 12;
9527 inst.instruction |= inst.operands[1].reg << 16;
9528 inst.instruction |= inst.operands[2].reg;
9529 inst.instruction |= inst.operands[3].reg << 8;
9530
9531 if (inst.operands[0].reg == inst.operands[1].reg)
9532 as_tsktsk (_("rdhi and rdlo must be different"));
9533 }
9534
9535 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9536 SMULxy{cond} Rd,Rm,Rs
9537 Error if any register is R15. */
9538
9539 static void
9540 do_smul (void)
9541 {
9542 inst.instruction |= inst.operands[0].reg << 16;
9543 inst.instruction |= inst.operands[1].reg;
9544 inst.instruction |= inst.operands[2].reg << 8;
9545 }
9546
9547 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9548 the same for both ARM and Thumb-2. */
9549
9550 static void
9551 do_srs (void)
9552 {
9553 int reg;
9554
9555 if (inst.operands[0].present)
9556 {
9557 reg = inst.operands[0].reg;
9558 constraint (reg != REG_SP, _("SRS base register must be r13"));
9559 }
9560 else
9561 reg = REG_SP;
9562
9563 inst.instruction |= reg << 16;
9564 inst.instruction |= inst.operands[1].imm;
9565 if (inst.operands[0].writeback || inst.operands[1].writeback)
9566 inst.instruction |= WRITE_BACK;
9567 }
9568
9569 /* ARM V6 strex (argument parse). */
9570
9571 static void
9572 do_strex (void)
9573 {
9574 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9575 || inst.operands[2].postind || inst.operands[2].writeback
9576 || inst.operands[2].immisreg || inst.operands[2].shifted
9577 || inst.operands[2].negative
9578 /* See comment in do_ldrex(). */
9579 || (inst.operands[2].reg == REG_PC),
9580 BAD_ADDR_MODE);
9581
9582 constraint (inst.operands[0].reg == inst.operands[1].reg
9583 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9584
9585 constraint (inst.reloc.exp.X_op != O_constant
9586 || inst.reloc.exp.X_add_number != 0,
9587 _("offset must be zero in ARM encoding"));
9588
9589 inst.instruction |= inst.operands[0].reg << 12;
9590 inst.instruction |= inst.operands[1].reg;
9591 inst.instruction |= inst.operands[2].reg << 16;
9592 inst.reloc.type = BFD_RELOC_UNUSED;
9593 }
9594
9595 static void
9596 do_t_strexbh (void)
9597 {
9598 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9599 || inst.operands[2].postind || inst.operands[2].writeback
9600 || inst.operands[2].immisreg || inst.operands[2].shifted
9601 || inst.operands[2].negative,
9602 BAD_ADDR_MODE);
9603
9604 constraint (inst.operands[0].reg == inst.operands[1].reg
9605 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9606
9607 do_rm_rd_rn ();
9608 }
9609
9610 static void
9611 do_strexd (void)
9612 {
9613 constraint (inst.operands[1].reg % 2 != 0,
9614 _("even register required"));
9615 constraint (inst.operands[2].present
9616 && inst.operands[2].reg != inst.operands[1].reg + 1,
9617 _("can only store two consecutive registers"));
9618 /* If op 2 were present and equal to PC, this function wouldn't
9619 have been called in the first place. */
9620 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9621
9622 constraint (inst.operands[0].reg == inst.operands[1].reg
9623 || inst.operands[0].reg == inst.operands[1].reg + 1
9624 || inst.operands[0].reg == inst.operands[3].reg,
9625 BAD_OVERLAP);
9626
9627 inst.instruction |= inst.operands[0].reg << 12;
9628 inst.instruction |= inst.operands[1].reg;
9629 inst.instruction |= inst.operands[3].reg << 16;
9630 }
9631
9632 /* ARM V8 STRL. */
9633 static void
9634 do_stlex (void)
9635 {
9636 constraint (inst.operands[0].reg == inst.operands[1].reg
9637 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9638
9639 do_rd_rm_rn ();
9640 }
9641
9642 static void
9643 do_t_stlex (void)
9644 {
9645 constraint (inst.operands[0].reg == inst.operands[1].reg
9646 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9647
9648 do_rm_rd_rn ();
9649 }
9650
9651 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9652 extends it to 32-bits, and adds the result to a value in another
9653 register. You can specify a rotation by 0, 8, 16, or 24 bits
9654 before extracting the 16-bit value.
9655 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9656 Condition defaults to COND_ALWAYS.
9657 Error if any register uses R15. */
9658
9659 static void
9660 do_sxtah (void)
9661 {
9662 inst.instruction |= inst.operands[0].reg << 12;
9663 inst.instruction |= inst.operands[1].reg << 16;
9664 inst.instruction |= inst.operands[2].reg;
9665 inst.instruction |= inst.operands[3].imm << 10;
9666 }
9667
9668 /* ARM V6 SXTH.
9669
9670 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9671 Condition defaults to COND_ALWAYS.
9672 Error if any register uses R15. */
9673
9674 static void
9675 do_sxth (void)
9676 {
9677 inst.instruction |= inst.operands[0].reg << 12;
9678 inst.instruction |= inst.operands[1].reg;
9679 inst.instruction |= inst.operands[2].imm << 10;
9680 }
9681 \f
9682 /* VFP instructions. In a logical order: SP variant first, monad
9683 before dyad, arithmetic then move then load/store. */
9684
9685 static void
9686 do_vfp_sp_monadic (void)
9687 {
9688 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9689 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9690 }
9691
9692 static void
9693 do_vfp_sp_dyadic (void)
9694 {
9695 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9696 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9697 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9698 }
9699
9700 static void
9701 do_vfp_sp_compare_z (void)
9702 {
9703 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9704 }
9705
9706 static void
9707 do_vfp_dp_sp_cvt (void)
9708 {
9709 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9710 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9711 }
9712
9713 static void
9714 do_vfp_sp_dp_cvt (void)
9715 {
9716 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9717 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9718 }
9719
9720 static void
9721 do_vfp_reg_from_sp (void)
9722 {
9723 inst.instruction |= inst.operands[0].reg << 12;
9724 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9725 }
9726
9727 static void
9728 do_vfp_reg2_from_sp2 (void)
9729 {
9730 constraint (inst.operands[2].imm != 2,
9731 _("only two consecutive VFP SP registers allowed here"));
9732 inst.instruction |= inst.operands[0].reg << 12;
9733 inst.instruction |= inst.operands[1].reg << 16;
9734 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9735 }
9736
9737 static void
9738 do_vfp_sp_from_reg (void)
9739 {
9740 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9741 inst.instruction |= inst.operands[1].reg << 12;
9742 }
9743
9744 static void
9745 do_vfp_sp2_from_reg2 (void)
9746 {
9747 constraint (inst.operands[0].imm != 2,
9748 _("only two consecutive VFP SP registers allowed here"));
9749 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9750 inst.instruction |= inst.operands[1].reg << 12;
9751 inst.instruction |= inst.operands[2].reg << 16;
9752 }
9753
9754 static void
9755 do_vfp_sp_ldst (void)
9756 {
9757 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9758 encode_arm_cp_address (1, FALSE, TRUE, 0);
9759 }
9760
9761 static void
9762 do_vfp_dp_ldst (void)
9763 {
9764 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9765 encode_arm_cp_address (1, FALSE, TRUE, 0);
9766 }
9767
9768
9769 static void
9770 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9771 {
9772 if (inst.operands[0].writeback)
9773 inst.instruction |= WRITE_BACK;
9774 else
9775 constraint (ldstm_type != VFP_LDSTMIA,
9776 _("this addressing mode requires base-register writeback"));
9777 inst.instruction |= inst.operands[0].reg << 16;
9778 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9779 inst.instruction |= inst.operands[1].imm;
9780 }
9781
9782 static void
9783 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9784 {
9785 int count;
9786
9787 if (inst.operands[0].writeback)
9788 inst.instruction |= WRITE_BACK;
9789 else
9790 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9791 _("this addressing mode requires base-register writeback"));
9792
9793 inst.instruction |= inst.operands[0].reg << 16;
9794 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9795
9796 count = inst.operands[1].imm << 1;
9797 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9798 count += 1;
9799
9800 inst.instruction |= count;
9801 }
9802
9803 static void
9804 do_vfp_sp_ldstmia (void)
9805 {
9806 vfp_sp_ldstm (VFP_LDSTMIA);
9807 }
9808
9809 static void
9810 do_vfp_sp_ldstmdb (void)
9811 {
9812 vfp_sp_ldstm (VFP_LDSTMDB);
9813 }
9814
9815 static void
9816 do_vfp_dp_ldstmia (void)
9817 {
9818 vfp_dp_ldstm (VFP_LDSTMIA);
9819 }
9820
9821 static void
9822 do_vfp_dp_ldstmdb (void)
9823 {
9824 vfp_dp_ldstm (VFP_LDSTMDB);
9825 }
9826
9827 static void
9828 do_vfp_xp_ldstmia (void)
9829 {
9830 vfp_dp_ldstm (VFP_LDSTMIAX);
9831 }
9832
9833 static void
9834 do_vfp_xp_ldstmdb (void)
9835 {
9836 vfp_dp_ldstm (VFP_LDSTMDBX);
9837 }
9838
9839 static void
9840 do_vfp_dp_rd_rm (void)
9841 {
9842 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9843 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9844 }
9845
9846 static void
9847 do_vfp_dp_rn_rd (void)
9848 {
9849 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9850 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9851 }
9852
9853 static void
9854 do_vfp_dp_rd_rn (void)
9855 {
9856 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9857 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9858 }
9859
9860 static void
9861 do_vfp_dp_rd_rn_rm (void)
9862 {
9863 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9864 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9865 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
9866 }
9867
9868 static void
9869 do_vfp_dp_rd (void)
9870 {
9871 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9872 }
9873
9874 static void
9875 do_vfp_dp_rm_rd_rn (void)
9876 {
9877 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
9878 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9879 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
9880 }
9881
9882 /* VFPv3 instructions. */
9883 static void
9884 do_vfp_sp_const (void)
9885 {
9886 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9887 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9888 inst.instruction |= (inst.operands[1].imm & 0x0f);
9889 }
9890
9891 static void
9892 do_vfp_dp_const (void)
9893 {
9894 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9895 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9896 inst.instruction |= (inst.operands[1].imm & 0x0f);
9897 }
9898
9899 static void
9900 vfp_conv (int srcsize)
9901 {
9902 int immbits = srcsize - inst.operands[1].imm;
9903
9904 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9905 {
9906 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9907 i.e. immbits must be in range 0 - 16. */
9908 inst.error = _("immediate value out of range, expected range [0, 16]");
9909 return;
9910 }
9911 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9912 {
9913 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9914 i.e. immbits must be in range 0 - 31. */
9915 inst.error = _("immediate value out of range, expected range [1, 32]");
9916 return;
9917 }
9918
9919 inst.instruction |= (immbits & 1) << 5;
9920 inst.instruction |= (immbits >> 1);
9921 }
9922
9923 static void
9924 do_vfp_sp_conv_16 (void)
9925 {
9926 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9927 vfp_conv (16);
9928 }
9929
9930 static void
9931 do_vfp_dp_conv_16 (void)
9932 {
9933 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9934 vfp_conv (16);
9935 }
9936
9937 static void
9938 do_vfp_sp_conv_32 (void)
9939 {
9940 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9941 vfp_conv (32);
9942 }
9943
9944 static void
9945 do_vfp_dp_conv_32 (void)
9946 {
9947 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9948 vfp_conv (32);
9949 }
9950 \f
9951 /* FPA instructions. Also in a logical order. */
9952
9953 static void
9954 do_fpa_cmp (void)
9955 {
9956 inst.instruction |= inst.operands[0].reg << 16;
9957 inst.instruction |= inst.operands[1].reg;
9958 }
9959
9960 static void
9961 do_fpa_ldmstm (void)
9962 {
9963 inst.instruction |= inst.operands[0].reg << 12;
9964 switch (inst.operands[1].imm)
9965 {
9966 case 1: inst.instruction |= CP_T_X; break;
9967 case 2: inst.instruction |= CP_T_Y; break;
9968 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
9969 case 4: break;
9970 default: abort ();
9971 }
9972
9973 if (inst.instruction & (PRE_INDEX | INDEX_UP))
9974 {
9975 /* The instruction specified "ea" or "fd", so we can only accept
9976 [Rn]{!}. The instruction does not really support stacking or
9977 unstacking, so we have to emulate these by setting appropriate
9978 bits and offsets. */
9979 constraint (inst.reloc.exp.X_op != O_constant
9980 || inst.reloc.exp.X_add_number != 0,
9981 _("this instruction does not support indexing"));
9982
9983 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
9984 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
9985
9986 if (!(inst.instruction & INDEX_UP))
9987 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
9988
9989 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
9990 {
9991 inst.operands[2].preind = 0;
9992 inst.operands[2].postind = 1;
9993 }
9994 }
9995
9996 encode_arm_cp_address (2, TRUE, TRUE, 0);
9997 }
9998 \f
9999 /* iWMMXt instructions: strictly in alphabetical order. */
10000
10001 static void
10002 do_iwmmxt_tandorc (void)
10003 {
10004 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
10005 }
10006
10007 static void
10008 do_iwmmxt_textrc (void)
10009 {
10010 inst.instruction |= inst.operands[0].reg << 12;
10011 inst.instruction |= inst.operands[1].imm;
10012 }
10013
10014 static void
10015 do_iwmmxt_textrm (void)
10016 {
10017 inst.instruction |= inst.operands[0].reg << 12;
10018 inst.instruction |= inst.operands[1].reg << 16;
10019 inst.instruction |= inst.operands[2].imm;
10020 }
10021
10022 static void
10023 do_iwmmxt_tinsr (void)
10024 {
10025 inst.instruction |= inst.operands[0].reg << 16;
10026 inst.instruction |= inst.operands[1].reg << 12;
10027 inst.instruction |= inst.operands[2].imm;
10028 }
10029
10030 static void
10031 do_iwmmxt_tmia (void)
10032 {
10033 inst.instruction |= inst.operands[0].reg << 5;
10034 inst.instruction |= inst.operands[1].reg;
10035 inst.instruction |= inst.operands[2].reg << 12;
10036 }
10037
10038 static void
10039 do_iwmmxt_waligni (void)
10040 {
10041 inst.instruction |= inst.operands[0].reg << 12;
10042 inst.instruction |= inst.operands[1].reg << 16;
10043 inst.instruction |= inst.operands[2].reg;
10044 inst.instruction |= inst.operands[3].imm << 20;
10045 }
10046
10047 static void
10048 do_iwmmxt_wmerge (void)
10049 {
10050 inst.instruction |= inst.operands[0].reg << 12;
10051 inst.instruction |= inst.operands[1].reg << 16;
10052 inst.instruction |= inst.operands[2].reg;
10053 inst.instruction |= inst.operands[3].imm << 21;
10054 }
10055
10056 static void
10057 do_iwmmxt_wmov (void)
10058 {
10059 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10060 inst.instruction |= inst.operands[0].reg << 12;
10061 inst.instruction |= inst.operands[1].reg << 16;
10062 inst.instruction |= inst.operands[1].reg;
10063 }
10064
10065 static void
10066 do_iwmmxt_wldstbh (void)
10067 {
10068 int reloc;
10069 inst.instruction |= inst.operands[0].reg << 12;
10070 if (thumb_mode)
10071 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
10072 else
10073 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
10074 encode_arm_cp_address (1, TRUE, FALSE, reloc);
10075 }
10076
10077 static void
10078 do_iwmmxt_wldstw (void)
10079 {
10080 /* RIWR_RIWC clears .isreg for a control register. */
10081 if (!inst.operands[0].isreg)
10082 {
10083 constraint (inst.cond != COND_ALWAYS, BAD_COND);
10084 inst.instruction |= 0xf0000000;
10085 }
10086
10087 inst.instruction |= inst.operands[0].reg << 12;
10088 encode_arm_cp_address (1, TRUE, TRUE, 0);
10089 }
10090
10091 static void
10092 do_iwmmxt_wldstd (void)
10093 {
10094 inst.instruction |= inst.operands[0].reg << 12;
10095 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
10096 && inst.operands[1].immisreg)
10097 {
10098 inst.instruction &= ~0x1a000ff;
10099 inst.instruction |= (0xfU << 28);
10100 if (inst.operands[1].preind)
10101 inst.instruction |= PRE_INDEX;
10102 if (!inst.operands[1].negative)
10103 inst.instruction |= INDEX_UP;
10104 if (inst.operands[1].writeback)
10105 inst.instruction |= WRITE_BACK;
10106 inst.instruction |= inst.operands[1].reg << 16;
10107 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10108 inst.instruction |= inst.operands[1].imm;
10109 }
10110 else
10111 encode_arm_cp_address (1, TRUE, FALSE, 0);
10112 }
10113
10114 static void
10115 do_iwmmxt_wshufh (void)
10116 {
10117 inst.instruction |= inst.operands[0].reg << 12;
10118 inst.instruction |= inst.operands[1].reg << 16;
10119 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
10120 inst.instruction |= (inst.operands[2].imm & 0x0f);
10121 }
10122
10123 static void
10124 do_iwmmxt_wzero (void)
10125 {
10126 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10127 inst.instruction |= inst.operands[0].reg;
10128 inst.instruction |= inst.operands[0].reg << 12;
10129 inst.instruction |= inst.operands[0].reg << 16;
10130 }
10131
10132 static void
10133 do_iwmmxt_wrwrwr_or_imm5 (void)
10134 {
10135 if (inst.operands[2].isreg)
10136 do_rd_rn_rm ();
10137 else {
10138 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
10139 _("immediate operand requires iWMMXt2"));
10140 do_rd_rn ();
10141 if (inst.operands[2].imm == 0)
10142 {
10143 switch ((inst.instruction >> 20) & 0xf)
10144 {
10145 case 4:
10146 case 5:
10147 case 6:
10148 case 7:
10149 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10150 inst.operands[2].imm = 16;
10151 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
10152 break;
10153 case 8:
10154 case 9:
10155 case 10:
10156 case 11:
10157 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10158 inst.operands[2].imm = 32;
10159 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
10160 break;
10161 case 12:
10162 case 13:
10163 case 14:
10164 case 15:
10165 {
10166 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10167 unsigned long wrn;
10168 wrn = (inst.instruction >> 16) & 0xf;
10169 inst.instruction &= 0xff0fff0f;
10170 inst.instruction |= wrn;
10171 /* Bail out here; the instruction is now assembled. */
10172 return;
10173 }
10174 }
10175 }
10176 /* Map 32 -> 0, etc. */
10177 inst.operands[2].imm &= 0x1f;
10178 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10179 }
10180 }
10181 \f
10182 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10183 operations first, then control, shift, and load/store. */
10184
10185 /* Insns like "foo X,Y,Z". */
10186
10187 static void
10188 do_mav_triple (void)
10189 {
10190 inst.instruction |= inst.operands[0].reg << 16;
10191 inst.instruction |= inst.operands[1].reg;
10192 inst.instruction |= inst.operands[2].reg << 12;
10193 }
10194
10195 /* Insns like "foo W,X,Y,Z".
10196 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10197
10198 static void
10199 do_mav_quad (void)
10200 {
10201 inst.instruction |= inst.operands[0].reg << 5;
10202 inst.instruction |= inst.operands[1].reg << 12;
10203 inst.instruction |= inst.operands[2].reg << 16;
10204 inst.instruction |= inst.operands[3].reg;
10205 }
10206
10207 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10208 static void
10209 do_mav_dspsc (void)
10210 {
10211 inst.instruction |= inst.operands[1].reg << 12;
10212 }
10213
10214 /* Maverick shift immediate instructions.
10215 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10216 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10217
10218 static void
10219 do_mav_shift (void)
10220 {
10221 int imm = inst.operands[2].imm;
10222
10223 inst.instruction |= inst.operands[0].reg << 12;
10224 inst.instruction |= inst.operands[1].reg << 16;
10225
10226 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10227 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10228 Bit 4 should be 0. */
10229 imm = (imm & 0xf) | ((imm & 0x70) << 1);
10230
10231 inst.instruction |= imm;
10232 }
10233 \f
10234 /* XScale instructions. Also sorted arithmetic before move. */
10235
10236 /* Xscale multiply-accumulate (argument parse)
10237 MIAcc acc0,Rm,Rs
10238 MIAPHcc acc0,Rm,Rs
10239 MIAxycc acc0,Rm,Rs. */
10240
10241 static void
10242 do_xsc_mia (void)
10243 {
10244 inst.instruction |= inst.operands[1].reg;
10245 inst.instruction |= inst.operands[2].reg << 12;
10246 }
10247
10248 /* Xscale move-accumulator-register (argument parse)
10249
10250 MARcc acc0,RdLo,RdHi. */
10251
10252 static void
10253 do_xsc_mar (void)
10254 {
10255 inst.instruction |= inst.operands[1].reg << 12;
10256 inst.instruction |= inst.operands[2].reg << 16;
10257 }
10258
10259 /* Xscale move-register-accumulator (argument parse)
10260
10261 MRAcc RdLo,RdHi,acc0. */
10262
10263 static void
10264 do_xsc_mra (void)
10265 {
10266 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10267 inst.instruction |= inst.operands[0].reg << 12;
10268 inst.instruction |= inst.operands[1].reg << 16;
10269 }
10270 \f
10271 /* Encoding functions relevant only to Thumb. */
10272
10273 /* inst.operands[i] is a shifted-register operand; encode
10274 it into inst.instruction in the format used by Thumb32. */
10275
10276 static void
10277 encode_thumb32_shifted_operand (int i)
10278 {
10279 unsigned int value = inst.reloc.exp.X_add_number;
10280 unsigned int shift = inst.operands[i].shift_kind;
10281
10282 constraint (inst.operands[i].immisreg,
10283 _("shift by register not allowed in thumb mode"));
10284 inst.instruction |= inst.operands[i].reg;
10285 if (shift == SHIFT_RRX)
10286 inst.instruction |= SHIFT_ROR << 4;
10287 else
10288 {
10289 constraint (inst.reloc.exp.X_op != O_constant,
10290 _("expression too complex"));
10291
10292 constraint (value > 32
10293 || (value == 32 && (shift == SHIFT_LSL
10294 || shift == SHIFT_ROR)),
10295 _("shift expression is too large"));
10296
10297 if (value == 0)
10298 shift = SHIFT_LSL;
10299 else if (value == 32)
10300 value = 0;
10301
10302 inst.instruction |= shift << 4;
10303 inst.instruction |= (value & 0x1c) << 10;
10304 inst.instruction |= (value & 0x03) << 6;
10305 }
10306 }
10307
10308
10309 /* inst.operands[i] was set up by parse_address. Encode it into a
10310 Thumb32 format load or store instruction. Reject forms that cannot
10311 be used with such instructions. If is_t is true, reject forms that
10312 cannot be used with a T instruction; if is_d is true, reject forms
10313 that cannot be used with a D instruction. If it is a store insn,
10314 reject PC in Rn. */
10315
10316 static void
10317 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10318 {
10319 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10320
10321 constraint (!inst.operands[i].isreg,
10322 _("Instruction does not support =N addresses"));
10323
10324 inst.instruction |= inst.operands[i].reg << 16;
10325 if (inst.operands[i].immisreg)
10326 {
10327 constraint (is_pc, BAD_PC_ADDRESSING);
10328 constraint (is_t || is_d, _("cannot use register index with this instruction"));
10329 constraint (inst.operands[i].negative,
10330 _("Thumb does not support negative register indexing"));
10331 constraint (inst.operands[i].postind,
10332 _("Thumb does not support register post-indexing"));
10333 constraint (inst.operands[i].writeback,
10334 _("Thumb does not support register indexing with writeback"));
10335 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10336 _("Thumb supports only LSL in shifted register indexing"));
10337
10338 inst.instruction |= inst.operands[i].imm;
10339 if (inst.operands[i].shifted)
10340 {
10341 constraint (inst.reloc.exp.X_op != O_constant,
10342 _("expression too complex"));
10343 constraint (inst.reloc.exp.X_add_number < 0
10344 || inst.reloc.exp.X_add_number > 3,
10345 _("shift out of range"));
10346 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10347 }
10348 inst.reloc.type = BFD_RELOC_UNUSED;
10349 }
10350 else if (inst.operands[i].preind)
10351 {
10352 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10353 constraint (is_t && inst.operands[i].writeback,
10354 _("cannot use writeback with this instruction"));
10355 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10356 BAD_PC_ADDRESSING);
10357
10358 if (is_d)
10359 {
10360 inst.instruction |= 0x01000000;
10361 if (inst.operands[i].writeback)
10362 inst.instruction |= 0x00200000;
10363 }
10364 else
10365 {
10366 inst.instruction |= 0x00000c00;
10367 if (inst.operands[i].writeback)
10368 inst.instruction |= 0x00000100;
10369 }
10370 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10371 }
10372 else if (inst.operands[i].postind)
10373 {
10374 gas_assert (inst.operands[i].writeback);
10375 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10376 constraint (is_t, _("cannot use post-indexing with this instruction"));
10377
10378 if (is_d)
10379 inst.instruction |= 0x00200000;
10380 else
10381 inst.instruction |= 0x00000900;
10382 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10383 }
10384 else /* unindexed - only for coprocessor */
10385 inst.error = _("instruction does not accept unindexed addressing");
10386 }
10387
10388 /* Table of Thumb instructions which exist in both 16- and 32-bit
10389 encodings (the latter only in post-V6T2 cores). The index is the
10390 value used in the insns table below. When there is more than one
10391 possible 16-bit encoding for the instruction, this table always
10392 holds variant (1).
10393 Also contains several pseudo-instructions used during relaxation. */
10394 #define T16_32_TAB \
10395 X(_adc, 4140, eb400000), \
10396 X(_adcs, 4140, eb500000), \
10397 X(_add, 1c00, eb000000), \
10398 X(_adds, 1c00, eb100000), \
10399 X(_addi, 0000, f1000000), \
10400 X(_addis, 0000, f1100000), \
10401 X(_add_pc,000f, f20f0000), \
10402 X(_add_sp,000d, f10d0000), \
10403 X(_adr, 000f, f20f0000), \
10404 X(_and, 4000, ea000000), \
10405 X(_ands, 4000, ea100000), \
10406 X(_asr, 1000, fa40f000), \
10407 X(_asrs, 1000, fa50f000), \
10408 X(_b, e000, f000b000), \
10409 X(_bcond, d000, f0008000), \
10410 X(_bic, 4380, ea200000), \
10411 X(_bics, 4380, ea300000), \
10412 X(_cmn, 42c0, eb100f00), \
10413 X(_cmp, 2800, ebb00f00), \
10414 X(_cpsie, b660, f3af8400), \
10415 X(_cpsid, b670, f3af8600), \
10416 X(_cpy, 4600, ea4f0000), \
10417 X(_dec_sp,80dd, f1ad0d00), \
10418 X(_eor, 4040, ea800000), \
10419 X(_eors, 4040, ea900000), \
10420 X(_inc_sp,00dd, f10d0d00), \
10421 X(_ldmia, c800, e8900000), \
10422 X(_ldr, 6800, f8500000), \
10423 X(_ldrb, 7800, f8100000), \
10424 X(_ldrh, 8800, f8300000), \
10425 X(_ldrsb, 5600, f9100000), \
10426 X(_ldrsh, 5e00, f9300000), \
10427 X(_ldr_pc,4800, f85f0000), \
10428 X(_ldr_pc2,4800, f85f0000), \
10429 X(_ldr_sp,9800, f85d0000), \
10430 X(_lsl, 0000, fa00f000), \
10431 X(_lsls, 0000, fa10f000), \
10432 X(_lsr, 0800, fa20f000), \
10433 X(_lsrs, 0800, fa30f000), \
10434 X(_mov, 2000, ea4f0000), \
10435 X(_movs, 2000, ea5f0000), \
10436 X(_mul, 4340, fb00f000), \
10437 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10438 X(_mvn, 43c0, ea6f0000), \
10439 X(_mvns, 43c0, ea7f0000), \
10440 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10441 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10442 X(_orr, 4300, ea400000), \
10443 X(_orrs, 4300, ea500000), \
10444 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10445 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10446 X(_rev, ba00, fa90f080), \
10447 X(_rev16, ba40, fa90f090), \
10448 X(_revsh, bac0, fa90f0b0), \
10449 X(_ror, 41c0, fa60f000), \
10450 X(_rors, 41c0, fa70f000), \
10451 X(_sbc, 4180, eb600000), \
10452 X(_sbcs, 4180, eb700000), \
10453 X(_stmia, c000, e8800000), \
10454 X(_str, 6000, f8400000), \
10455 X(_strb, 7000, f8000000), \
10456 X(_strh, 8000, f8200000), \
10457 X(_str_sp,9000, f84d0000), \
10458 X(_sub, 1e00, eba00000), \
10459 X(_subs, 1e00, ebb00000), \
10460 X(_subi, 8000, f1a00000), \
10461 X(_subis, 8000, f1b00000), \
10462 X(_sxtb, b240, fa4ff080), \
10463 X(_sxth, b200, fa0ff080), \
10464 X(_tst, 4200, ea100f00), \
10465 X(_uxtb, b2c0, fa5ff080), \
10466 X(_uxth, b280, fa1ff080), \
10467 X(_nop, bf00, f3af8000), \
10468 X(_yield, bf10, f3af8001), \
10469 X(_wfe, bf20, f3af8002), \
10470 X(_wfi, bf30, f3af8003), \
10471 X(_sev, bf40, f3af8004), \
10472 X(_sevl, bf50, f3af8005), \
10473 X(_udf, de00, f7f0a000)
10474
10475 /* To catch errors in encoding functions, the codes are all offset by
10476 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10477 as 16-bit instructions. */
10478 #define X(a,b,c) T_MNEM##a
10479 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10480 #undef X
10481
10482 #define X(a,b,c) 0x##b
10483 static const unsigned short thumb_op16[] = { T16_32_TAB };
10484 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10485 #undef X
10486
10487 #define X(a,b,c) 0x##c
10488 static const unsigned int thumb_op32[] = { T16_32_TAB };
10489 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10490 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10491 #undef X
10492 #undef T16_32_TAB
10493
10494 /* Thumb instruction encoders, in alphabetical order. */
10495
10496 /* ADDW or SUBW. */
10497
10498 static void
10499 do_t_add_sub_w (void)
10500 {
10501 int Rd, Rn;
10502
10503 Rd = inst.operands[0].reg;
10504 Rn = inst.operands[1].reg;
10505
10506 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10507 is the SP-{plus,minus}-immediate form of the instruction. */
10508 if (Rn == REG_SP)
10509 constraint (Rd == REG_PC, BAD_PC);
10510 else
10511 reject_bad_reg (Rd);
10512
10513 inst.instruction |= (Rn << 16) | (Rd << 8);
10514 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10515 }
10516
10517 /* Parse an add or subtract instruction. We get here with inst.instruction
10518 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
10519
10520 static void
10521 do_t_add_sub (void)
10522 {
10523 int Rd, Rs, Rn;
10524
10525 Rd = inst.operands[0].reg;
10526 Rs = (inst.operands[1].present
10527 ? inst.operands[1].reg /* Rd, Rs, foo */
10528 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10529
10530 if (Rd == REG_PC)
10531 set_it_insn_type_last ();
10532
10533 if (unified_syntax)
10534 {
10535 bfd_boolean flags;
10536 bfd_boolean narrow;
10537 int opcode;
10538
10539 flags = (inst.instruction == T_MNEM_adds
10540 || inst.instruction == T_MNEM_subs);
10541 if (flags)
10542 narrow = !in_it_block ();
10543 else
10544 narrow = in_it_block ();
10545 if (!inst.operands[2].isreg)
10546 {
10547 int add;
10548
10549 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10550 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10551
10552 add = (inst.instruction == T_MNEM_add
10553 || inst.instruction == T_MNEM_adds);
10554 opcode = 0;
10555 if (inst.size_req != 4)
10556 {
10557 /* Attempt to use a narrow opcode, with relaxation if
10558 appropriate. */
10559 if (Rd == REG_SP && Rs == REG_SP && !flags)
10560 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10561 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10562 opcode = T_MNEM_add_sp;
10563 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10564 opcode = T_MNEM_add_pc;
10565 else if (Rd <= 7 && Rs <= 7 && narrow)
10566 {
10567 if (flags)
10568 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10569 else
10570 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10571 }
10572 if (opcode)
10573 {
10574 inst.instruction = THUMB_OP16(opcode);
10575 inst.instruction |= (Rd << 4) | Rs;
10576 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10577 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
10578 {
10579 if (inst.size_req == 2)
10580 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10581 else
10582 inst.relax = opcode;
10583 }
10584 }
10585 else
10586 constraint (inst.size_req == 2, BAD_HIREG);
10587 }
10588 if (inst.size_req == 4
10589 || (inst.size_req != 2 && !opcode))
10590 {
10591 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10592 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
10593 THUMB1_RELOC_ONLY);
10594 if (Rd == REG_PC)
10595 {
10596 constraint (add, BAD_PC);
10597 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10598 _("only SUBS PC, LR, #const allowed"));
10599 constraint (inst.reloc.exp.X_op != O_constant,
10600 _("expression too complex"));
10601 constraint (inst.reloc.exp.X_add_number < 0
10602 || inst.reloc.exp.X_add_number > 0xff,
10603 _("immediate value out of range"));
10604 inst.instruction = T2_SUBS_PC_LR
10605 | inst.reloc.exp.X_add_number;
10606 inst.reloc.type = BFD_RELOC_UNUSED;
10607 return;
10608 }
10609 else if (Rs == REG_PC)
10610 {
10611 /* Always use addw/subw. */
10612 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10613 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10614 }
10615 else
10616 {
10617 inst.instruction = THUMB_OP32 (inst.instruction);
10618 inst.instruction = (inst.instruction & 0xe1ffffff)
10619 | 0x10000000;
10620 if (flags)
10621 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10622 else
10623 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
10624 }
10625 inst.instruction |= Rd << 8;
10626 inst.instruction |= Rs << 16;
10627 }
10628 }
10629 else
10630 {
10631 unsigned int value = inst.reloc.exp.X_add_number;
10632 unsigned int shift = inst.operands[2].shift_kind;
10633
10634 Rn = inst.operands[2].reg;
10635 /* See if we can do this with a 16-bit instruction. */
10636 if (!inst.operands[2].shifted && inst.size_req != 4)
10637 {
10638 if (Rd > 7 || Rs > 7 || Rn > 7)
10639 narrow = FALSE;
10640
10641 if (narrow)
10642 {
10643 inst.instruction = ((inst.instruction == T_MNEM_adds
10644 || inst.instruction == T_MNEM_add)
10645 ? T_OPCODE_ADD_R3
10646 : T_OPCODE_SUB_R3);
10647 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10648 return;
10649 }
10650
10651 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10652 {
10653 /* Thumb-1 cores (except v6-M) require at least one high
10654 register in a narrow non flag setting add. */
10655 if (Rd > 7 || Rn > 7
10656 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10657 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10658 {
10659 if (Rd == Rn)
10660 {
10661 Rn = Rs;
10662 Rs = Rd;
10663 }
10664 inst.instruction = T_OPCODE_ADD_HI;
10665 inst.instruction |= (Rd & 8) << 4;
10666 inst.instruction |= (Rd & 7);
10667 inst.instruction |= Rn << 3;
10668 return;
10669 }
10670 }
10671 }
10672
10673 constraint (Rd == REG_PC, BAD_PC);
10674 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10675 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10676 constraint (Rs == REG_PC, BAD_PC);
10677 reject_bad_reg (Rn);
10678
10679 /* If we get here, it can't be done in 16 bits. */
10680 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10681 _("shift must be constant"));
10682 inst.instruction = THUMB_OP32 (inst.instruction);
10683 inst.instruction |= Rd << 8;
10684 inst.instruction |= Rs << 16;
10685 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10686 _("shift value over 3 not allowed in thumb mode"));
10687 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10688 _("only LSL shift allowed in thumb mode"));
10689 encode_thumb32_shifted_operand (2);
10690 }
10691 }
10692 else
10693 {
10694 constraint (inst.instruction == T_MNEM_adds
10695 || inst.instruction == T_MNEM_subs,
10696 BAD_THUMB32);
10697
10698 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10699 {
10700 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10701 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10702 BAD_HIREG);
10703
10704 inst.instruction = (inst.instruction == T_MNEM_add
10705 ? 0x0000 : 0x8000);
10706 inst.instruction |= (Rd << 4) | Rs;
10707 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10708 return;
10709 }
10710
10711 Rn = inst.operands[2].reg;
10712 constraint (inst.operands[2].shifted, _("unshifted register required"));
10713
10714 /* We now have Rd, Rs, and Rn set to registers. */
10715 if (Rd > 7 || Rs > 7 || Rn > 7)
10716 {
10717 /* Can't do this for SUB. */
10718 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10719 inst.instruction = T_OPCODE_ADD_HI;
10720 inst.instruction |= (Rd & 8) << 4;
10721 inst.instruction |= (Rd & 7);
10722 if (Rs == Rd)
10723 inst.instruction |= Rn << 3;
10724 else if (Rn == Rd)
10725 inst.instruction |= Rs << 3;
10726 else
10727 constraint (1, _("dest must overlap one source register"));
10728 }
10729 else
10730 {
10731 inst.instruction = (inst.instruction == T_MNEM_add
10732 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10733 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10734 }
10735 }
10736 }
10737
10738 static void
10739 do_t_adr (void)
10740 {
10741 unsigned Rd;
10742
10743 Rd = inst.operands[0].reg;
10744 reject_bad_reg (Rd);
10745
10746 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10747 {
10748 /* Defer to section relaxation. */
10749 inst.relax = inst.instruction;
10750 inst.instruction = THUMB_OP16 (inst.instruction);
10751 inst.instruction |= Rd << 4;
10752 }
10753 else if (unified_syntax && inst.size_req != 2)
10754 {
10755 /* Generate a 32-bit opcode. */
10756 inst.instruction = THUMB_OP32 (inst.instruction);
10757 inst.instruction |= Rd << 8;
10758 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
10759 inst.reloc.pc_rel = 1;
10760 }
10761 else
10762 {
10763 /* Generate a 16-bit opcode. */
10764 inst.instruction = THUMB_OP16 (inst.instruction);
10765 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10766 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
10767 inst.reloc.pc_rel = 1;
10768 inst.instruction |= Rd << 4;
10769 }
10770
10771 if (inst.reloc.exp.X_op == O_symbol
10772 && inst.reloc.exp.X_add_symbol != NULL
10773 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
10774 && THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
10775 inst.reloc.exp.X_add_number += 1;
10776 }
10777
10778 /* Arithmetic instructions for which there is just one 16-bit
10779 instruction encoding, and it allows only two low registers.
10780 For maximal compatibility with ARM syntax, we allow three register
10781 operands even when Thumb-32 instructions are not available, as long
10782 as the first two are identical. For instance, both "sbc r0,r1" and
10783 "sbc r0,r0,r1" are allowed. */
10784 static void
10785 do_t_arit3 (void)
10786 {
10787 int Rd, Rs, Rn;
10788
10789 Rd = inst.operands[0].reg;
10790 Rs = (inst.operands[1].present
10791 ? inst.operands[1].reg /* Rd, Rs, foo */
10792 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10793 Rn = inst.operands[2].reg;
10794
10795 reject_bad_reg (Rd);
10796 reject_bad_reg (Rs);
10797 if (inst.operands[2].isreg)
10798 reject_bad_reg (Rn);
10799
10800 if (unified_syntax)
10801 {
10802 if (!inst.operands[2].isreg)
10803 {
10804 /* For an immediate, we always generate a 32-bit opcode;
10805 section relaxation will shrink it later if possible. */
10806 inst.instruction = THUMB_OP32 (inst.instruction);
10807 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10808 inst.instruction |= Rd << 8;
10809 inst.instruction |= Rs << 16;
10810 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10811 }
10812 else
10813 {
10814 bfd_boolean narrow;
10815
10816 /* See if we can do this with a 16-bit instruction. */
10817 if (THUMB_SETS_FLAGS (inst.instruction))
10818 narrow = !in_it_block ();
10819 else
10820 narrow = in_it_block ();
10821
10822 if (Rd > 7 || Rn > 7 || Rs > 7)
10823 narrow = FALSE;
10824 if (inst.operands[2].shifted)
10825 narrow = FALSE;
10826 if (inst.size_req == 4)
10827 narrow = FALSE;
10828
10829 if (narrow
10830 && Rd == Rs)
10831 {
10832 inst.instruction = THUMB_OP16 (inst.instruction);
10833 inst.instruction |= Rd;
10834 inst.instruction |= Rn << 3;
10835 return;
10836 }
10837
10838 /* If we get here, it can't be done in 16 bits. */
10839 constraint (inst.operands[2].shifted
10840 && inst.operands[2].immisreg,
10841 _("shift must be constant"));
10842 inst.instruction = THUMB_OP32 (inst.instruction);
10843 inst.instruction |= Rd << 8;
10844 inst.instruction |= Rs << 16;
10845 encode_thumb32_shifted_operand (2);
10846 }
10847 }
10848 else
10849 {
10850 /* On its face this is a lie - the instruction does set the
10851 flags. However, the only supported mnemonic in this mode
10852 says it doesn't. */
10853 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10854
10855 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10856 _("unshifted register required"));
10857 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10858 constraint (Rd != Rs,
10859 _("dest and source1 must be the same register"));
10860
10861 inst.instruction = THUMB_OP16 (inst.instruction);
10862 inst.instruction |= Rd;
10863 inst.instruction |= Rn << 3;
10864 }
10865 }
10866
10867 /* Similarly, but for instructions where the arithmetic operation is
10868 commutative, so we can allow either of them to be different from
10869 the destination operand in a 16-bit instruction. For instance, all
10870 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10871 accepted. */
10872 static void
10873 do_t_arit3c (void)
10874 {
10875 int Rd, Rs, Rn;
10876
10877 Rd = inst.operands[0].reg;
10878 Rs = (inst.operands[1].present
10879 ? inst.operands[1].reg /* Rd, Rs, foo */
10880 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10881 Rn = inst.operands[2].reg;
10882
10883 reject_bad_reg (Rd);
10884 reject_bad_reg (Rs);
10885 if (inst.operands[2].isreg)
10886 reject_bad_reg (Rn);
10887
10888 if (unified_syntax)
10889 {
10890 if (!inst.operands[2].isreg)
10891 {
10892 /* For an immediate, we always generate a 32-bit opcode;
10893 section relaxation will shrink it later if possible. */
10894 inst.instruction = THUMB_OP32 (inst.instruction);
10895 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10896 inst.instruction |= Rd << 8;
10897 inst.instruction |= Rs << 16;
10898 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10899 }
10900 else
10901 {
10902 bfd_boolean narrow;
10903
10904 /* See if we can do this with a 16-bit instruction. */
10905 if (THUMB_SETS_FLAGS (inst.instruction))
10906 narrow = !in_it_block ();
10907 else
10908 narrow = in_it_block ();
10909
10910 if (Rd > 7 || Rn > 7 || Rs > 7)
10911 narrow = FALSE;
10912 if (inst.operands[2].shifted)
10913 narrow = FALSE;
10914 if (inst.size_req == 4)
10915 narrow = FALSE;
10916
10917 if (narrow)
10918 {
10919 if (Rd == Rs)
10920 {
10921 inst.instruction = THUMB_OP16 (inst.instruction);
10922 inst.instruction |= Rd;
10923 inst.instruction |= Rn << 3;
10924 return;
10925 }
10926 if (Rd == Rn)
10927 {
10928 inst.instruction = THUMB_OP16 (inst.instruction);
10929 inst.instruction |= Rd;
10930 inst.instruction |= Rs << 3;
10931 return;
10932 }
10933 }
10934
10935 /* If we get here, it can't be done in 16 bits. */
10936 constraint (inst.operands[2].shifted
10937 && inst.operands[2].immisreg,
10938 _("shift must be constant"));
10939 inst.instruction = THUMB_OP32 (inst.instruction);
10940 inst.instruction |= Rd << 8;
10941 inst.instruction |= Rs << 16;
10942 encode_thumb32_shifted_operand (2);
10943 }
10944 }
10945 else
10946 {
10947 /* On its face this is a lie - the instruction does set the
10948 flags. However, the only supported mnemonic in this mode
10949 says it doesn't. */
10950 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10951
10952 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10953 _("unshifted register required"));
10954 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10955
10956 inst.instruction = THUMB_OP16 (inst.instruction);
10957 inst.instruction |= Rd;
10958
10959 if (Rd == Rs)
10960 inst.instruction |= Rn << 3;
10961 else if (Rd == Rn)
10962 inst.instruction |= Rs << 3;
10963 else
10964 constraint (1, _("dest must overlap one source register"));
10965 }
10966 }
10967
10968 static void
10969 do_t_bfc (void)
10970 {
10971 unsigned Rd;
10972 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
10973 constraint (msb > 32, _("bit-field extends past end of register"));
10974 /* The instruction encoding stores the LSB and MSB,
10975 not the LSB and width. */
10976 Rd = inst.operands[0].reg;
10977 reject_bad_reg (Rd);
10978 inst.instruction |= Rd << 8;
10979 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
10980 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
10981 inst.instruction |= msb - 1;
10982 }
10983
10984 static void
10985 do_t_bfi (void)
10986 {
10987 int Rd, Rn;
10988 unsigned int msb;
10989
10990 Rd = inst.operands[0].reg;
10991 reject_bad_reg (Rd);
10992
10993 /* #0 in second position is alternative syntax for bfc, which is
10994 the same instruction but with REG_PC in the Rm field. */
10995 if (!inst.operands[1].isreg)
10996 Rn = REG_PC;
10997 else
10998 {
10999 Rn = inst.operands[1].reg;
11000 reject_bad_reg (Rn);
11001 }
11002
11003 msb = inst.operands[2].imm + inst.operands[3].imm;
11004 constraint (msb > 32, _("bit-field extends past end of register"));
11005 /* The instruction encoding stores the LSB and MSB,
11006 not the LSB and width. */
11007 inst.instruction |= Rd << 8;
11008 inst.instruction |= Rn << 16;
11009 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11010 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11011 inst.instruction |= msb - 1;
11012 }
11013
11014 static void
11015 do_t_bfx (void)
11016 {
11017 unsigned Rd, Rn;
11018
11019 Rd = inst.operands[0].reg;
11020 Rn = inst.operands[1].reg;
11021
11022 reject_bad_reg (Rd);
11023 reject_bad_reg (Rn);
11024
11025 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
11026 _("bit-field extends past end of register"));
11027 inst.instruction |= Rd << 8;
11028 inst.instruction |= Rn << 16;
11029 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11030 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11031 inst.instruction |= inst.operands[3].imm - 1;
11032 }
11033
11034 /* ARM V5 Thumb BLX (argument parse)
11035 BLX <target_addr> which is BLX(1)
11036 BLX <Rm> which is BLX(2)
11037 Unfortunately, there are two different opcodes for this mnemonic.
11038 So, the insns[].value is not used, and the code here zaps values
11039 into inst.instruction.
11040
11041 ??? How to take advantage of the additional two bits of displacement
11042 available in Thumb32 mode? Need new relocation? */
11043
11044 static void
11045 do_t_blx (void)
11046 {
11047 set_it_insn_type_last ();
11048
11049 if (inst.operands[0].isreg)
11050 {
11051 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
11052 /* We have a register, so this is BLX(2). */
11053 inst.instruction |= inst.operands[0].reg << 3;
11054 }
11055 else
11056 {
11057 /* No register. This must be BLX(1). */
11058 inst.instruction = 0xf000e800;
11059 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
11060 }
11061 }
11062
11063 static void
11064 do_t_branch (void)
11065 {
11066 int opcode;
11067 int cond;
11068 bfd_reloc_code_real_type reloc;
11069
11070 cond = inst.cond;
11071 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
11072
11073 if (in_it_block ())
11074 {
11075 /* Conditional branches inside IT blocks are encoded as unconditional
11076 branches. */
11077 cond = COND_ALWAYS;
11078 }
11079 else
11080 cond = inst.cond;
11081
11082 if (cond != COND_ALWAYS)
11083 opcode = T_MNEM_bcond;
11084 else
11085 opcode = inst.instruction;
11086
11087 if (unified_syntax
11088 && (inst.size_req == 4
11089 || (inst.size_req != 2
11090 && (inst.operands[0].hasreloc
11091 || inst.reloc.exp.X_op == O_constant))))
11092 {
11093 inst.instruction = THUMB_OP32(opcode);
11094 if (cond == COND_ALWAYS)
11095 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
11096 else
11097 {
11098 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
11099 _("selected architecture does not support "
11100 "wide conditional branch instruction"));
11101
11102 gas_assert (cond != 0xF);
11103 inst.instruction |= cond << 22;
11104 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
11105 }
11106 }
11107 else
11108 {
11109 inst.instruction = THUMB_OP16(opcode);
11110 if (cond == COND_ALWAYS)
11111 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
11112 else
11113 {
11114 inst.instruction |= cond << 8;
11115 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
11116 }
11117 /* Allow section relaxation. */
11118 if (unified_syntax && inst.size_req != 2)
11119 inst.relax = opcode;
11120 }
11121 inst.reloc.type = reloc;
11122 inst.reloc.pc_rel = 1;
11123 }
11124
11125 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11126 between the two is the maximum immediate allowed - which is passed in
11127 RANGE. */
11128 static void
11129 do_t_bkpt_hlt1 (int range)
11130 {
11131 constraint (inst.cond != COND_ALWAYS,
11132 _("instruction is always unconditional"));
11133 if (inst.operands[0].present)
11134 {
11135 constraint (inst.operands[0].imm > range,
11136 _("immediate value out of range"));
11137 inst.instruction |= inst.operands[0].imm;
11138 }
11139
11140 set_it_insn_type (NEUTRAL_IT_INSN);
11141 }
11142
11143 static void
11144 do_t_hlt (void)
11145 {
11146 do_t_bkpt_hlt1 (63);
11147 }
11148
11149 static void
11150 do_t_bkpt (void)
11151 {
11152 do_t_bkpt_hlt1 (255);
11153 }
11154
11155 static void
11156 do_t_branch23 (void)
11157 {
11158 set_it_insn_type_last ();
11159 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
11160
11161 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11162 this file. We used to simply ignore the PLT reloc type here --
11163 the branch encoding is now needed to deal with TLSCALL relocs.
11164 So if we see a PLT reloc now, put it back to how it used to be to
11165 keep the preexisting behaviour. */
11166 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
11167 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
11168
11169 #if defined(OBJ_COFF)
11170 /* If the destination of the branch is a defined symbol which does not have
11171 the THUMB_FUNC attribute, then we must be calling a function which has
11172 the (interfacearm) attribute. We look for the Thumb entry point to that
11173 function and change the branch to refer to that function instead. */
11174 if ( inst.reloc.exp.X_op == O_symbol
11175 && inst.reloc.exp.X_add_symbol != NULL
11176 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
11177 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
11178 inst.reloc.exp.X_add_symbol =
11179 find_real_start (inst.reloc.exp.X_add_symbol);
11180 #endif
11181 }
11182
11183 static void
11184 do_t_bx (void)
11185 {
11186 set_it_insn_type_last ();
11187 inst.instruction |= inst.operands[0].reg << 3;
11188 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11189 should cause the alignment to be checked once it is known. This is
11190 because BX PC only works if the instruction is word aligned. */
11191 }
11192
11193 static void
11194 do_t_bxj (void)
11195 {
11196 int Rm;
11197
11198 set_it_insn_type_last ();
11199 Rm = inst.operands[0].reg;
11200 reject_bad_reg (Rm);
11201 inst.instruction |= Rm << 16;
11202 }
11203
11204 static void
11205 do_t_clz (void)
11206 {
11207 unsigned Rd;
11208 unsigned Rm;
11209
11210 Rd = inst.operands[0].reg;
11211 Rm = inst.operands[1].reg;
11212
11213 reject_bad_reg (Rd);
11214 reject_bad_reg (Rm);
11215
11216 inst.instruction |= Rd << 8;
11217 inst.instruction |= Rm << 16;
11218 inst.instruction |= Rm;
11219 }
11220
11221 static void
11222 do_t_cps (void)
11223 {
11224 set_it_insn_type (OUTSIDE_IT_INSN);
11225 inst.instruction |= inst.operands[0].imm;
11226 }
11227
11228 static void
11229 do_t_cpsi (void)
11230 {
11231 set_it_insn_type (OUTSIDE_IT_INSN);
11232 if (unified_syntax
11233 && (inst.operands[1].present || inst.size_req == 4)
11234 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11235 {
11236 unsigned int imod = (inst.instruction & 0x0030) >> 4;
11237 inst.instruction = 0xf3af8000;
11238 inst.instruction |= imod << 9;
11239 inst.instruction |= inst.operands[0].imm << 5;
11240 if (inst.operands[1].present)
11241 inst.instruction |= 0x100 | inst.operands[1].imm;
11242 }
11243 else
11244 {
11245 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11246 && (inst.operands[0].imm & 4),
11247 _("selected processor does not support 'A' form "
11248 "of this instruction"));
11249 constraint (inst.operands[1].present || inst.size_req == 4,
11250 _("Thumb does not support the 2-argument "
11251 "form of this instruction"));
11252 inst.instruction |= inst.operands[0].imm;
11253 }
11254 }
11255
11256 /* THUMB CPY instruction (argument parse). */
11257
11258 static void
11259 do_t_cpy (void)
11260 {
11261 if (inst.size_req == 4)
11262 {
11263 inst.instruction = THUMB_OP32 (T_MNEM_mov);
11264 inst.instruction |= inst.operands[0].reg << 8;
11265 inst.instruction |= inst.operands[1].reg;
11266 }
11267 else
11268 {
11269 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11270 inst.instruction |= (inst.operands[0].reg & 0x7);
11271 inst.instruction |= inst.operands[1].reg << 3;
11272 }
11273 }
11274
11275 static void
11276 do_t_cbz (void)
11277 {
11278 set_it_insn_type (OUTSIDE_IT_INSN);
11279 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11280 inst.instruction |= inst.operands[0].reg;
11281 inst.reloc.pc_rel = 1;
11282 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11283 }
11284
11285 static void
11286 do_t_dbg (void)
11287 {
11288 inst.instruction |= inst.operands[0].imm;
11289 }
11290
11291 static void
11292 do_t_div (void)
11293 {
11294 unsigned Rd, Rn, Rm;
11295
11296 Rd = inst.operands[0].reg;
11297 Rn = (inst.operands[1].present
11298 ? inst.operands[1].reg : Rd);
11299 Rm = inst.operands[2].reg;
11300
11301 reject_bad_reg (Rd);
11302 reject_bad_reg (Rn);
11303 reject_bad_reg (Rm);
11304
11305 inst.instruction |= Rd << 8;
11306 inst.instruction |= Rn << 16;
11307 inst.instruction |= Rm;
11308 }
11309
11310 static void
11311 do_t_hint (void)
11312 {
11313 if (unified_syntax && inst.size_req == 4)
11314 inst.instruction = THUMB_OP32 (inst.instruction);
11315 else
11316 inst.instruction = THUMB_OP16 (inst.instruction);
11317 }
11318
11319 static void
11320 do_t_it (void)
11321 {
11322 unsigned int cond = inst.operands[0].imm;
11323
11324 set_it_insn_type (IT_INSN);
11325 now_it.mask = (inst.instruction & 0xf) | 0x10;
11326 now_it.cc = cond;
11327 now_it.warn_deprecated = FALSE;
11328
11329 /* If the condition is a negative condition, invert the mask. */
11330 if ((cond & 0x1) == 0x0)
11331 {
11332 unsigned int mask = inst.instruction & 0x000f;
11333
11334 if ((mask & 0x7) == 0)
11335 {
11336 /* No conversion needed. */
11337 now_it.block_length = 1;
11338 }
11339 else if ((mask & 0x3) == 0)
11340 {
11341 mask ^= 0x8;
11342 now_it.block_length = 2;
11343 }
11344 else if ((mask & 0x1) == 0)
11345 {
11346 mask ^= 0xC;
11347 now_it.block_length = 3;
11348 }
11349 else
11350 {
11351 mask ^= 0xE;
11352 now_it.block_length = 4;
11353 }
11354
11355 inst.instruction &= 0xfff0;
11356 inst.instruction |= mask;
11357 }
11358
11359 inst.instruction |= cond << 4;
11360 }
11361
11362 /* Helper function used for both push/pop and ldm/stm. */
11363 static void
11364 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
11365 {
11366 bfd_boolean load;
11367
11368 load = (inst.instruction & (1 << 20)) != 0;
11369
11370 if (mask & (1 << 13))
11371 inst.error = _("SP not allowed in register list");
11372
11373 if ((mask & (1 << base)) != 0
11374 && writeback)
11375 inst.error = _("having the base register in the register list when "
11376 "using write back is UNPREDICTABLE");
11377
11378 if (load)
11379 {
11380 if (mask & (1 << 15))
11381 {
11382 if (mask & (1 << 14))
11383 inst.error = _("LR and PC should not both be in register list");
11384 else
11385 set_it_insn_type_last ();
11386 }
11387 }
11388 else
11389 {
11390 if (mask & (1 << 15))
11391 inst.error = _("PC not allowed in register list");
11392 }
11393
11394 if ((mask & (mask - 1)) == 0)
11395 {
11396 /* Single register transfers implemented as str/ldr. */
11397 if (writeback)
11398 {
11399 if (inst.instruction & (1 << 23))
11400 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11401 else
11402 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11403 }
11404 else
11405 {
11406 if (inst.instruction & (1 << 23))
11407 inst.instruction = 0x00800000; /* ia -> [base] */
11408 else
11409 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11410 }
11411
11412 inst.instruction |= 0xf8400000;
11413 if (load)
11414 inst.instruction |= 0x00100000;
11415
11416 mask = ffs (mask) - 1;
11417 mask <<= 12;
11418 }
11419 else if (writeback)
11420 inst.instruction |= WRITE_BACK;
11421
11422 inst.instruction |= mask;
11423 inst.instruction |= base << 16;
11424 }
11425
11426 static void
11427 do_t_ldmstm (void)
11428 {
11429 /* This really doesn't seem worth it. */
11430 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11431 _("expression too complex"));
11432 constraint (inst.operands[1].writeback,
11433 _("Thumb load/store multiple does not support {reglist}^"));
11434
11435 if (unified_syntax)
11436 {
11437 bfd_boolean narrow;
11438 unsigned mask;
11439
11440 narrow = FALSE;
11441 /* See if we can use a 16-bit instruction. */
11442 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11443 && inst.size_req != 4
11444 && !(inst.operands[1].imm & ~0xff))
11445 {
11446 mask = 1 << inst.operands[0].reg;
11447
11448 if (inst.operands[0].reg <= 7)
11449 {
11450 if (inst.instruction == T_MNEM_stmia
11451 ? inst.operands[0].writeback
11452 : (inst.operands[0].writeback
11453 == !(inst.operands[1].imm & mask)))
11454 {
11455 if (inst.instruction == T_MNEM_stmia
11456 && (inst.operands[1].imm & mask)
11457 && (inst.operands[1].imm & (mask - 1)))
11458 as_warn (_("value stored for r%d is UNKNOWN"),
11459 inst.operands[0].reg);
11460
11461 inst.instruction = THUMB_OP16 (inst.instruction);
11462 inst.instruction |= inst.operands[0].reg << 8;
11463 inst.instruction |= inst.operands[1].imm;
11464 narrow = TRUE;
11465 }
11466 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11467 {
11468 /* This means 1 register in reg list one of 3 situations:
11469 1. Instruction is stmia, but without writeback.
11470 2. lmdia without writeback, but with Rn not in
11471 reglist.
11472 3. ldmia with writeback, but with Rn in reglist.
11473 Case 3 is UNPREDICTABLE behaviour, so we handle
11474 case 1 and 2 which can be converted into a 16-bit
11475 str or ldr. The SP cases are handled below. */
11476 unsigned long opcode;
11477 /* First, record an error for Case 3. */
11478 if (inst.operands[1].imm & mask
11479 && inst.operands[0].writeback)
11480 inst.error =
11481 _("having the base register in the register list when "
11482 "using write back is UNPREDICTABLE");
11483
11484 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11485 : T_MNEM_ldr);
11486 inst.instruction = THUMB_OP16 (opcode);
11487 inst.instruction |= inst.operands[0].reg << 3;
11488 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11489 narrow = TRUE;
11490 }
11491 }
11492 else if (inst.operands[0] .reg == REG_SP)
11493 {
11494 if (inst.operands[0].writeback)
11495 {
11496 inst.instruction =
11497 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11498 ? T_MNEM_push : T_MNEM_pop);
11499 inst.instruction |= inst.operands[1].imm;
11500 narrow = TRUE;
11501 }
11502 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11503 {
11504 inst.instruction =
11505 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11506 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11507 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11508 narrow = TRUE;
11509 }
11510 }
11511 }
11512
11513 if (!narrow)
11514 {
11515 if (inst.instruction < 0xffff)
11516 inst.instruction = THUMB_OP32 (inst.instruction);
11517
11518 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
11519 inst.operands[0].writeback);
11520 }
11521 }
11522 else
11523 {
11524 constraint (inst.operands[0].reg > 7
11525 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11526 constraint (inst.instruction != T_MNEM_ldmia
11527 && inst.instruction != T_MNEM_stmia,
11528 _("Thumb-2 instruction only valid in unified syntax"));
11529 if (inst.instruction == T_MNEM_stmia)
11530 {
11531 if (!inst.operands[0].writeback)
11532 as_warn (_("this instruction will write back the base register"));
11533 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11534 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11535 as_warn (_("value stored for r%d is UNKNOWN"),
11536 inst.operands[0].reg);
11537 }
11538 else
11539 {
11540 if (!inst.operands[0].writeback
11541 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11542 as_warn (_("this instruction will write back the base register"));
11543 else if (inst.operands[0].writeback
11544 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11545 as_warn (_("this instruction will not write back the base register"));
11546 }
11547
11548 inst.instruction = THUMB_OP16 (inst.instruction);
11549 inst.instruction |= inst.operands[0].reg << 8;
11550 inst.instruction |= inst.operands[1].imm;
11551 }
11552 }
11553
11554 static void
11555 do_t_ldrex (void)
11556 {
11557 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11558 || inst.operands[1].postind || inst.operands[1].writeback
11559 || inst.operands[1].immisreg || inst.operands[1].shifted
11560 || inst.operands[1].negative,
11561 BAD_ADDR_MODE);
11562
11563 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11564
11565 inst.instruction |= inst.operands[0].reg << 12;
11566 inst.instruction |= inst.operands[1].reg << 16;
11567 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11568 }
11569
11570 static void
11571 do_t_ldrexd (void)
11572 {
11573 if (!inst.operands[1].present)
11574 {
11575 constraint (inst.operands[0].reg == REG_LR,
11576 _("r14 not allowed as first register "
11577 "when second register is omitted"));
11578 inst.operands[1].reg = inst.operands[0].reg + 1;
11579 }
11580 constraint (inst.operands[0].reg == inst.operands[1].reg,
11581 BAD_OVERLAP);
11582
11583 inst.instruction |= inst.operands[0].reg << 12;
11584 inst.instruction |= inst.operands[1].reg << 8;
11585 inst.instruction |= inst.operands[2].reg << 16;
11586 }
11587
11588 static void
11589 do_t_ldst (void)
11590 {
11591 unsigned long opcode;
11592 int Rn;
11593
11594 if (inst.operands[0].isreg
11595 && !inst.operands[0].preind
11596 && inst.operands[0].reg == REG_PC)
11597 set_it_insn_type_last ();
11598
11599 opcode = inst.instruction;
11600 if (unified_syntax)
11601 {
11602 if (!inst.operands[1].isreg)
11603 {
11604 if (opcode <= 0xffff)
11605 inst.instruction = THUMB_OP32 (opcode);
11606 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11607 return;
11608 }
11609 if (inst.operands[1].isreg
11610 && !inst.operands[1].writeback
11611 && !inst.operands[1].shifted && !inst.operands[1].postind
11612 && !inst.operands[1].negative && inst.operands[0].reg <= 7
11613 && opcode <= 0xffff
11614 && inst.size_req != 4)
11615 {
11616 /* Insn may have a 16-bit form. */
11617 Rn = inst.operands[1].reg;
11618 if (inst.operands[1].immisreg)
11619 {
11620 inst.instruction = THUMB_OP16 (opcode);
11621 /* [Rn, Rik] */
11622 if (Rn <= 7 && inst.operands[1].imm <= 7)
11623 goto op16;
11624 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11625 reject_bad_reg (inst.operands[1].imm);
11626 }
11627 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11628 && opcode != T_MNEM_ldrsb)
11629 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11630 || (Rn == REG_SP && opcode == T_MNEM_str))
11631 {
11632 /* [Rn, #const] */
11633 if (Rn > 7)
11634 {
11635 if (Rn == REG_PC)
11636 {
11637 if (inst.reloc.pc_rel)
11638 opcode = T_MNEM_ldr_pc2;
11639 else
11640 opcode = T_MNEM_ldr_pc;
11641 }
11642 else
11643 {
11644 if (opcode == T_MNEM_ldr)
11645 opcode = T_MNEM_ldr_sp;
11646 else
11647 opcode = T_MNEM_str_sp;
11648 }
11649 inst.instruction = inst.operands[0].reg << 8;
11650 }
11651 else
11652 {
11653 inst.instruction = inst.operands[0].reg;
11654 inst.instruction |= inst.operands[1].reg << 3;
11655 }
11656 inst.instruction |= THUMB_OP16 (opcode);
11657 if (inst.size_req == 2)
11658 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11659 else
11660 inst.relax = opcode;
11661 return;
11662 }
11663 }
11664 /* Definitely a 32-bit variant. */
11665
11666 /* Warning for Erratum 752419. */
11667 if (opcode == T_MNEM_ldr
11668 && inst.operands[0].reg == REG_SP
11669 && inst.operands[1].writeback == 1
11670 && !inst.operands[1].immisreg)
11671 {
11672 if (no_cpu_selected ()
11673 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11674 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11675 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11676 as_warn (_("This instruction may be unpredictable "
11677 "if executed on M-profile cores "
11678 "with interrupts enabled."));
11679 }
11680
11681 /* Do some validations regarding addressing modes. */
11682 if (inst.operands[1].immisreg)
11683 reject_bad_reg (inst.operands[1].imm);
11684
11685 constraint (inst.operands[1].writeback == 1
11686 && inst.operands[0].reg == inst.operands[1].reg,
11687 BAD_OVERLAP);
11688
11689 inst.instruction = THUMB_OP32 (opcode);
11690 inst.instruction |= inst.operands[0].reg << 12;
11691 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11692 check_ldr_r15_aligned ();
11693 return;
11694 }
11695
11696 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11697
11698 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11699 {
11700 /* Only [Rn,Rm] is acceptable. */
11701 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11702 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11703 || inst.operands[1].postind || inst.operands[1].shifted
11704 || inst.operands[1].negative,
11705 _("Thumb does not support this addressing mode"));
11706 inst.instruction = THUMB_OP16 (inst.instruction);
11707 goto op16;
11708 }
11709
11710 inst.instruction = THUMB_OP16 (inst.instruction);
11711 if (!inst.operands[1].isreg)
11712 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11713 return;
11714
11715 constraint (!inst.operands[1].preind
11716 || inst.operands[1].shifted
11717 || inst.operands[1].writeback,
11718 _("Thumb does not support this addressing mode"));
11719 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11720 {
11721 constraint (inst.instruction & 0x0600,
11722 _("byte or halfword not valid for base register"));
11723 constraint (inst.operands[1].reg == REG_PC
11724 && !(inst.instruction & THUMB_LOAD_BIT),
11725 _("r15 based store not allowed"));
11726 constraint (inst.operands[1].immisreg,
11727 _("invalid base register for register offset"));
11728
11729 if (inst.operands[1].reg == REG_PC)
11730 inst.instruction = T_OPCODE_LDR_PC;
11731 else if (inst.instruction & THUMB_LOAD_BIT)
11732 inst.instruction = T_OPCODE_LDR_SP;
11733 else
11734 inst.instruction = T_OPCODE_STR_SP;
11735
11736 inst.instruction |= inst.operands[0].reg << 8;
11737 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11738 return;
11739 }
11740
11741 constraint (inst.operands[1].reg > 7, BAD_HIREG);
11742 if (!inst.operands[1].immisreg)
11743 {
11744 /* Immediate offset. */
11745 inst.instruction |= inst.operands[0].reg;
11746 inst.instruction |= inst.operands[1].reg << 3;
11747 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11748 return;
11749 }
11750
11751 /* Register offset. */
11752 constraint (inst.operands[1].imm > 7, BAD_HIREG);
11753 constraint (inst.operands[1].negative,
11754 _("Thumb does not support this addressing mode"));
11755
11756 op16:
11757 switch (inst.instruction)
11758 {
11759 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11760 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11761 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11762 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11763 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11764 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11765 case 0x5600 /* ldrsb */:
11766 case 0x5e00 /* ldrsh */: break;
11767 default: abort ();
11768 }
11769
11770 inst.instruction |= inst.operands[0].reg;
11771 inst.instruction |= inst.operands[1].reg << 3;
11772 inst.instruction |= inst.operands[1].imm << 6;
11773 }
11774
11775 static void
11776 do_t_ldstd (void)
11777 {
11778 if (!inst.operands[1].present)
11779 {
11780 inst.operands[1].reg = inst.operands[0].reg + 1;
11781 constraint (inst.operands[0].reg == REG_LR,
11782 _("r14 not allowed here"));
11783 constraint (inst.operands[0].reg == REG_R12,
11784 _("r12 not allowed here"));
11785 }
11786
11787 if (inst.operands[2].writeback
11788 && (inst.operands[0].reg == inst.operands[2].reg
11789 || inst.operands[1].reg == inst.operands[2].reg))
11790 as_warn (_("base register written back, and overlaps "
11791 "one of transfer registers"));
11792
11793 inst.instruction |= inst.operands[0].reg << 12;
11794 inst.instruction |= inst.operands[1].reg << 8;
11795 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11796 }
11797
11798 static void
11799 do_t_ldstt (void)
11800 {
11801 inst.instruction |= inst.operands[0].reg << 12;
11802 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11803 }
11804
11805 static void
11806 do_t_mla (void)
11807 {
11808 unsigned Rd, Rn, Rm, Ra;
11809
11810 Rd = inst.operands[0].reg;
11811 Rn = inst.operands[1].reg;
11812 Rm = inst.operands[2].reg;
11813 Ra = inst.operands[3].reg;
11814
11815 reject_bad_reg (Rd);
11816 reject_bad_reg (Rn);
11817 reject_bad_reg (Rm);
11818 reject_bad_reg (Ra);
11819
11820 inst.instruction |= Rd << 8;
11821 inst.instruction |= Rn << 16;
11822 inst.instruction |= Rm;
11823 inst.instruction |= Ra << 12;
11824 }
11825
11826 static void
11827 do_t_mlal (void)
11828 {
11829 unsigned RdLo, RdHi, Rn, Rm;
11830
11831 RdLo = inst.operands[0].reg;
11832 RdHi = inst.operands[1].reg;
11833 Rn = inst.operands[2].reg;
11834 Rm = inst.operands[3].reg;
11835
11836 reject_bad_reg (RdLo);
11837 reject_bad_reg (RdHi);
11838 reject_bad_reg (Rn);
11839 reject_bad_reg (Rm);
11840
11841 inst.instruction |= RdLo << 12;
11842 inst.instruction |= RdHi << 8;
11843 inst.instruction |= Rn << 16;
11844 inst.instruction |= Rm;
11845 }
11846
11847 static void
11848 do_t_mov_cmp (void)
11849 {
11850 unsigned Rn, Rm;
11851
11852 Rn = inst.operands[0].reg;
11853 Rm = inst.operands[1].reg;
11854
11855 if (Rn == REG_PC)
11856 set_it_insn_type_last ();
11857
11858 if (unified_syntax)
11859 {
11860 int r0off = (inst.instruction == T_MNEM_mov
11861 || inst.instruction == T_MNEM_movs) ? 8 : 16;
11862 unsigned long opcode;
11863 bfd_boolean narrow;
11864 bfd_boolean low_regs;
11865
11866 low_regs = (Rn <= 7 && Rm <= 7);
11867 opcode = inst.instruction;
11868 if (in_it_block ())
11869 narrow = opcode != T_MNEM_movs;
11870 else
11871 narrow = opcode != T_MNEM_movs || low_regs;
11872 if (inst.size_req == 4
11873 || inst.operands[1].shifted)
11874 narrow = FALSE;
11875
11876 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11877 if (opcode == T_MNEM_movs && inst.operands[1].isreg
11878 && !inst.operands[1].shifted
11879 && Rn == REG_PC
11880 && Rm == REG_LR)
11881 {
11882 inst.instruction = T2_SUBS_PC_LR;
11883 return;
11884 }
11885
11886 if (opcode == T_MNEM_cmp)
11887 {
11888 constraint (Rn == REG_PC, BAD_PC);
11889 if (narrow)
11890 {
11891 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11892 but valid. */
11893 warn_deprecated_sp (Rm);
11894 /* R15 was documented as a valid choice for Rm in ARMv6,
11895 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11896 tools reject R15, so we do too. */
11897 constraint (Rm == REG_PC, BAD_PC);
11898 }
11899 else
11900 reject_bad_reg (Rm);
11901 }
11902 else if (opcode == T_MNEM_mov
11903 || opcode == T_MNEM_movs)
11904 {
11905 if (inst.operands[1].isreg)
11906 {
11907 if (opcode == T_MNEM_movs)
11908 {
11909 reject_bad_reg (Rn);
11910 reject_bad_reg (Rm);
11911 }
11912 else if (narrow)
11913 {
11914 /* This is mov.n. */
11915 if ((Rn == REG_SP || Rn == REG_PC)
11916 && (Rm == REG_SP || Rm == REG_PC))
11917 {
11918 as_tsktsk (_("Use of r%u as a source register is "
11919 "deprecated when r%u is the destination "
11920 "register."), Rm, Rn);
11921 }
11922 }
11923 else
11924 {
11925 /* This is mov.w. */
11926 constraint (Rn == REG_PC, BAD_PC);
11927 constraint (Rm == REG_PC, BAD_PC);
11928 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11929 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
11930 }
11931 }
11932 else
11933 reject_bad_reg (Rn);
11934 }
11935
11936 if (!inst.operands[1].isreg)
11937 {
11938 /* Immediate operand. */
11939 if (!in_it_block () && opcode == T_MNEM_mov)
11940 narrow = 0;
11941 if (low_regs && narrow)
11942 {
11943 inst.instruction = THUMB_OP16 (opcode);
11944 inst.instruction |= Rn << 8;
11945 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11946 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
11947 {
11948 if (inst.size_req == 2)
11949 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11950 else
11951 inst.relax = opcode;
11952 }
11953 }
11954 else
11955 {
11956 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11957 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
11958 THUMB1_RELOC_ONLY);
11959
11960 inst.instruction = THUMB_OP32 (inst.instruction);
11961 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11962 inst.instruction |= Rn << r0off;
11963 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11964 }
11965 }
11966 else if (inst.operands[1].shifted && inst.operands[1].immisreg
11967 && (inst.instruction == T_MNEM_mov
11968 || inst.instruction == T_MNEM_movs))
11969 {
11970 /* Register shifts are encoded as separate shift instructions. */
11971 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
11972
11973 if (in_it_block ())
11974 narrow = !flags;
11975 else
11976 narrow = flags;
11977
11978 if (inst.size_req == 4)
11979 narrow = FALSE;
11980
11981 if (!low_regs || inst.operands[1].imm > 7)
11982 narrow = FALSE;
11983
11984 if (Rn != Rm)
11985 narrow = FALSE;
11986
11987 switch (inst.operands[1].shift_kind)
11988 {
11989 case SHIFT_LSL:
11990 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
11991 break;
11992 case SHIFT_ASR:
11993 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
11994 break;
11995 case SHIFT_LSR:
11996 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
11997 break;
11998 case SHIFT_ROR:
11999 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
12000 break;
12001 default:
12002 abort ();
12003 }
12004
12005 inst.instruction = opcode;
12006 if (narrow)
12007 {
12008 inst.instruction |= Rn;
12009 inst.instruction |= inst.operands[1].imm << 3;
12010 }
12011 else
12012 {
12013 if (flags)
12014 inst.instruction |= CONDS_BIT;
12015
12016 inst.instruction |= Rn << 8;
12017 inst.instruction |= Rm << 16;
12018 inst.instruction |= inst.operands[1].imm;
12019 }
12020 }
12021 else if (!narrow)
12022 {
12023 /* Some mov with immediate shift have narrow variants.
12024 Register shifts are handled above. */
12025 if (low_regs && inst.operands[1].shifted
12026 && (inst.instruction == T_MNEM_mov
12027 || inst.instruction == T_MNEM_movs))
12028 {
12029 if (in_it_block ())
12030 narrow = (inst.instruction == T_MNEM_mov);
12031 else
12032 narrow = (inst.instruction == T_MNEM_movs);
12033 }
12034
12035 if (narrow)
12036 {
12037 switch (inst.operands[1].shift_kind)
12038 {
12039 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12040 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12041 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12042 default: narrow = FALSE; break;
12043 }
12044 }
12045
12046 if (narrow)
12047 {
12048 inst.instruction |= Rn;
12049 inst.instruction |= Rm << 3;
12050 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12051 }
12052 else
12053 {
12054 inst.instruction = THUMB_OP32 (inst.instruction);
12055 inst.instruction |= Rn << r0off;
12056 encode_thumb32_shifted_operand (1);
12057 }
12058 }
12059 else
12060 switch (inst.instruction)
12061 {
12062 case T_MNEM_mov:
12063 /* In v4t or v5t a move of two lowregs produces unpredictable
12064 results. Don't allow this. */
12065 if (low_regs)
12066 {
12067 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
12068 "MOV Rd, Rs with two low registers is not "
12069 "permitted on this architecture");
12070 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12071 arm_ext_v6);
12072 }
12073
12074 inst.instruction = T_OPCODE_MOV_HR;
12075 inst.instruction |= (Rn & 0x8) << 4;
12076 inst.instruction |= (Rn & 0x7);
12077 inst.instruction |= Rm << 3;
12078 break;
12079
12080 case T_MNEM_movs:
12081 /* We know we have low registers at this point.
12082 Generate LSLS Rd, Rs, #0. */
12083 inst.instruction = T_OPCODE_LSL_I;
12084 inst.instruction |= Rn;
12085 inst.instruction |= Rm << 3;
12086 break;
12087
12088 case T_MNEM_cmp:
12089 if (low_regs)
12090 {
12091 inst.instruction = T_OPCODE_CMP_LR;
12092 inst.instruction |= Rn;
12093 inst.instruction |= Rm << 3;
12094 }
12095 else
12096 {
12097 inst.instruction = T_OPCODE_CMP_HR;
12098 inst.instruction |= (Rn & 0x8) << 4;
12099 inst.instruction |= (Rn & 0x7);
12100 inst.instruction |= Rm << 3;
12101 }
12102 break;
12103 }
12104 return;
12105 }
12106
12107 inst.instruction = THUMB_OP16 (inst.instruction);
12108
12109 /* PR 10443: Do not silently ignore shifted operands. */
12110 constraint (inst.operands[1].shifted,
12111 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12112
12113 if (inst.operands[1].isreg)
12114 {
12115 if (Rn < 8 && Rm < 8)
12116 {
12117 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12118 since a MOV instruction produces unpredictable results. */
12119 if (inst.instruction == T_OPCODE_MOV_I8)
12120 inst.instruction = T_OPCODE_ADD_I3;
12121 else
12122 inst.instruction = T_OPCODE_CMP_LR;
12123
12124 inst.instruction |= Rn;
12125 inst.instruction |= Rm << 3;
12126 }
12127 else
12128 {
12129 if (inst.instruction == T_OPCODE_MOV_I8)
12130 inst.instruction = T_OPCODE_MOV_HR;
12131 else
12132 inst.instruction = T_OPCODE_CMP_HR;
12133 do_t_cpy ();
12134 }
12135 }
12136 else
12137 {
12138 constraint (Rn > 7,
12139 _("only lo regs allowed with immediate"));
12140 inst.instruction |= Rn << 8;
12141 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
12142 }
12143 }
12144
12145 static void
12146 do_t_mov16 (void)
12147 {
12148 unsigned Rd;
12149 bfd_vma imm;
12150 bfd_boolean top;
12151
12152 top = (inst.instruction & 0x00800000) != 0;
12153 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
12154 {
12155 constraint (top, _(":lower16: not allowed in this instruction"));
12156 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
12157 }
12158 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
12159 {
12160 constraint (!top, _(":upper16: not allowed in this instruction"));
12161 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
12162 }
12163
12164 Rd = inst.operands[0].reg;
12165 reject_bad_reg (Rd);
12166
12167 inst.instruction |= Rd << 8;
12168 if (inst.reloc.type == BFD_RELOC_UNUSED)
12169 {
12170 imm = inst.reloc.exp.X_add_number;
12171 inst.instruction |= (imm & 0xf000) << 4;
12172 inst.instruction |= (imm & 0x0800) << 15;
12173 inst.instruction |= (imm & 0x0700) << 4;
12174 inst.instruction |= (imm & 0x00ff);
12175 }
12176 }
12177
12178 static void
12179 do_t_mvn_tst (void)
12180 {
12181 unsigned Rn, Rm;
12182
12183 Rn = inst.operands[0].reg;
12184 Rm = inst.operands[1].reg;
12185
12186 if (inst.instruction == T_MNEM_cmp
12187 || inst.instruction == T_MNEM_cmn)
12188 constraint (Rn == REG_PC, BAD_PC);
12189 else
12190 reject_bad_reg (Rn);
12191 reject_bad_reg (Rm);
12192
12193 if (unified_syntax)
12194 {
12195 int r0off = (inst.instruction == T_MNEM_mvn
12196 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
12197 bfd_boolean narrow;
12198
12199 if (inst.size_req == 4
12200 || inst.instruction > 0xffff
12201 || inst.operands[1].shifted
12202 || Rn > 7 || Rm > 7)
12203 narrow = FALSE;
12204 else if (inst.instruction == T_MNEM_cmn
12205 || inst.instruction == T_MNEM_tst)
12206 narrow = TRUE;
12207 else if (THUMB_SETS_FLAGS (inst.instruction))
12208 narrow = !in_it_block ();
12209 else
12210 narrow = in_it_block ();
12211
12212 if (!inst.operands[1].isreg)
12213 {
12214 /* For an immediate, we always generate a 32-bit opcode;
12215 section relaxation will shrink it later if possible. */
12216 if (inst.instruction < 0xffff)
12217 inst.instruction = THUMB_OP32 (inst.instruction);
12218 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12219 inst.instruction |= Rn << r0off;
12220 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12221 }
12222 else
12223 {
12224 /* See if we can do this with a 16-bit instruction. */
12225 if (narrow)
12226 {
12227 inst.instruction = THUMB_OP16 (inst.instruction);
12228 inst.instruction |= Rn;
12229 inst.instruction |= Rm << 3;
12230 }
12231 else
12232 {
12233 constraint (inst.operands[1].shifted
12234 && inst.operands[1].immisreg,
12235 _("shift must be constant"));
12236 if (inst.instruction < 0xffff)
12237 inst.instruction = THUMB_OP32 (inst.instruction);
12238 inst.instruction |= Rn << r0off;
12239 encode_thumb32_shifted_operand (1);
12240 }
12241 }
12242 }
12243 else
12244 {
12245 constraint (inst.instruction > 0xffff
12246 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12247 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12248 _("unshifted register required"));
12249 constraint (Rn > 7 || Rm > 7,
12250 BAD_HIREG);
12251
12252 inst.instruction = THUMB_OP16 (inst.instruction);
12253 inst.instruction |= Rn;
12254 inst.instruction |= Rm << 3;
12255 }
12256 }
12257
12258 static void
12259 do_t_mrs (void)
12260 {
12261 unsigned Rd;
12262
12263 if (do_vfp_nsyn_mrs () == SUCCESS)
12264 return;
12265
12266 Rd = inst.operands[0].reg;
12267 reject_bad_reg (Rd);
12268 inst.instruction |= Rd << 8;
12269
12270 if (inst.operands[1].isreg)
12271 {
12272 unsigned br = inst.operands[1].reg;
12273 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12274 as_bad (_("bad register for mrs"));
12275
12276 inst.instruction |= br & (0xf << 16);
12277 inst.instruction |= (br & 0x300) >> 4;
12278 inst.instruction |= (br & SPSR_BIT) >> 2;
12279 }
12280 else
12281 {
12282 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12283
12284 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12285 {
12286 /* PR gas/12698: The constraint is only applied for m_profile.
12287 If the user has specified -march=all, we want to ignore it as
12288 we are building for any CPU type, including non-m variants. */
12289 bfd_boolean m_profile =
12290 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12291 constraint ((flags != 0) && m_profile, _("selected processor does "
12292 "not support requested special purpose register"));
12293 }
12294 else
12295 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12296 devices). */
12297 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12298 _("'APSR', 'CPSR' or 'SPSR' expected"));
12299
12300 inst.instruction |= (flags & SPSR_BIT) >> 2;
12301 inst.instruction |= inst.operands[1].imm & 0xff;
12302 inst.instruction |= 0xf0000;
12303 }
12304 }
12305
12306 static void
12307 do_t_msr (void)
12308 {
12309 int flags;
12310 unsigned Rn;
12311
12312 if (do_vfp_nsyn_msr () == SUCCESS)
12313 return;
12314
12315 constraint (!inst.operands[1].isreg,
12316 _("Thumb encoding does not support an immediate here"));
12317
12318 if (inst.operands[0].isreg)
12319 flags = (int)(inst.operands[0].reg);
12320 else
12321 flags = inst.operands[0].imm;
12322
12323 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12324 {
12325 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12326
12327 /* PR gas/12698: The constraint is only applied for m_profile.
12328 If the user has specified -march=all, we want to ignore it as
12329 we are building for any CPU type, including non-m variants. */
12330 bfd_boolean m_profile =
12331 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12332 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12333 && (bits & ~(PSR_s | PSR_f)) != 0)
12334 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12335 && bits != PSR_f)) && m_profile,
12336 _("selected processor does not support requested special "
12337 "purpose register"));
12338 }
12339 else
12340 constraint ((flags & 0xff) != 0, _("selected processor does not support "
12341 "requested special purpose register"));
12342
12343 Rn = inst.operands[1].reg;
12344 reject_bad_reg (Rn);
12345
12346 inst.instruction |= (flags & SPSR_BIT) >> 2;
12347 inst.instruction |= (flags & 0xf0000) >> 8;
12348 inst.instruction |= (flags & 0x300) >> 4;
12349 inst.instruction |= (flags & 0xff);
12350 inst.instruction |= Rn << 16;
12351 }
12352
12353 static void
12354 do_t_mul (void)
12355 {
12356 bfd_boolean narrow;
12357 unsigned Rd, Rn, Rm;
12358
12359 if (!inst.operands[2].present)
12360 inst.operands[2].reg = inst.operands[0].reg;
12361
12362 Rd = inst.operands[0].reg;
12363 Rn = inst.operands[1].reg;
12364 Rm = inst.operands[2].reg;
12365
12366 if (unified_syntax)
12367 {
12368 if (inst.size_req == 4
12369 || (Rd != Rn
12370 && Rd != Rm)
12371 || Rn > 7
12372 || Rm > 7)
12373 narrow = FALSE;
12374 else if (inst.instruction == T_MNEM_muls)
12375 narrow = !in_it_block ();
12376 else
12377 narrow = in_it_block ();
12378 }
12379 else
12380 {
12381 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12382 constraint (Rn > 7 || Rm > 7,
12383 BAD_HIREG);
12384 narrow = TRUE;
12385 }
12386
12387 if (narrow)
12388 {
12389 /* 16-bit MULS/Conditional MUL. */
12390 inst.instruction = THUMB_OP16 (inst.instruction);
12391 inst.instruction |= Rd;
12392
12393 if (Rd == Rn)
12394 inst.instruction |= Rm << 3;
12395 else if (Rd == Rm)
12396 inst.instruction |= Rn << 3;
12397 else
12398 constraint (1, _("dest must overlap one source register"));
12399 }
12400 else
12401 {
12402 constraint (inst.instruction != T_MNEM_mul,
12403 _("Thumb-2 MUL must not set flags"));
12404 /* 32-bit MUL. */
12405 inst.instruction = THUMB_OP32 (inst.instruction);
12406 inst.instruction |= Rd << 8;
12407 inst.instruction |= Rn << 16;
12408 inst.instruction |= Rm << 0;
12409
12410 reject_bad_reg (Rd);
12411 reject_bad_reg (Rn);
12412 reject_bad_reg (Rm);
12413 }
12414 }
12415
12416 static void
12417 do_t_mull (void)
12418 {
12419 unsigned RdLo, RdHi, Rn, Rm;
12420
12421 RdLo = inst.operands[0].reg;
12422 RdHi = inst.operands[1].reg;
12423 Rn = inst.operands[2].reg;
12424 Rm = inst.operands[3].reg;
12425
12426 reject_bad_reg (RdLo);
12427 reject_bad_reg (RdHi);
12428 reject_bad_reg (Rn);
12429 reject_bad_reg (Rm);
12430
12431 inst.instruction |= RdLo << 12;
12432 inst.instruction |= RdHi << 8;
12433 inst.instruction |= Rn << 16;
12434 inst.instruction |= Rm;
12435
12436 if (RdLo == RdHi)
12437 as_tsktsk (_("rdhi and rdlo must be different"));
12438 }
12439
12440 static void
12441 do_t_nop (void)
12442 {
12443 set_it_insn_type (NEUTRAL_IT_INSN);
12444
12445 if (unified_syntax)
12446 {
12447 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12448 {
12449 inst.instruction = THUMB_OP32 (inst.instruction);
12450 inst.instruction |= inst.operands[0].imm;
12451 }
12452 else
12453 {
12454 /* PR9722: Check for Thumb2 availability before
12455 generating a thumb2 nop instruction. */
12456 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12457 {
12458 inst.instruction = THUMB_OP16 (inst.instruction);
12459 inst.instruction |= inst.operands[0].imm << 4;
12460 }
12461 else
12462 inst.instruction = 0x46c0;
12463 }
12464 }
12465 else
12466 {
12467 constraint (inst.operands[0].present,
12468 _("Thumb does not support NOP with hints"));
12469 inst.instruction = 0x46c0;
12470 }
12471 }
12472
12473 static void
12474 do_t_neg (void)
12475 {
12476 if (unified_syntax)
12477 {
12478 bfd_boolean narrow;
12479
12480 if (THUMB_SETS_FLAGS (inst.instruction))
12481 narrow = !in_it_block ();
12482 else
12483 narrow = in_it_block ();
12484 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12485 narrow = FALSE;
12486 if (inst.size_req == 4)
12487 narrow = FALSE;
12488
12489 if (!narrow)
12490 {
12491 inst.instruction = THUMB_OP32 (inst.instruction);
12492 inst.instruction |= inst.operands[0].reg << 8;
12493 inst.instruction |= inst.operands[1].reg << 16;
12494 }
12495 else
12496 {
12497 inst.instruction = THUMB_OP16 (inst.instruction);
12498 inst.instruction |= inst.operands[0].reg;
12499 inst.instruction |= inst.operands[1].reg << 3;
12500 }
12501 }
12502 else
12503 {
12504 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12505 BAD_HIREG);
12506 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12507
12508 inst.instruction = THUMB_OP16 (inst.instruction);
12509 inst.instruction |= inst.operands[0].reg;
12510 inst.instruction |= inst.operands[1].reg << 3;
12511 }
12512 }
12513
12514 static void
12515 do_t_orn (void)
12516 {
12517 unsigned Rd, Rn;
12518
12519 Rd = inst.operands[0].reg;
12520 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12521
12522 reject_bad_reg (Rd);
12523 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12524 reject_bad_reg (Rn);
12525
12526 inst.instruction |= Rd << 8;
12527 inst.instruction |= Rn << 16;
12528
12529 if (!inst.operands[2].isreg)
12530 {
12531 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12532 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12533 }
12534 else
12535 {
12536 unsigned Rm;
12537
12538 Rm = inst.operands[2].reg;
12539 reject_bad_reg (Rm);
12540
12541 constraint (inst.operands[2].shifted
12542 && inst.operands[2].immisreg,
12543 _("shift must be constant"));
12544 encode_thumb32_shifted_operand (2);
12545 }
12546 }
12547
12548 static void
12549 do_t_pkhbt (void)
12550 {
12551 unsigned Rd, Rn, Rm;
12552
12553 Rd = inst.operands[0].reg;
12554 Rn = inst.operands[1].reg;
12555 Rm = inst.operands[2].reg;
12556
12557 reject_bad_reg (Rd);
12558 reject_bad_reg (Rn);
12559 reject_bad_reg (Rm);
12560
12561 inst.instruction |= Rd << 8;
12562 inst.instruction |= Rn << 16;
12563 inst.instruction |= Rm;
12564 if (inst.operands[3].present)
12565 {
12566 unsigned int val = inst.reloc.exp.X_add_number;
12567 constraint (inst.reloc.exp.X_op != O_constant,
12568 _("expression too complex"));
12569 inst.instruction |= (val & 0x1c) << 10;
12570 inst.instruction |= (val & 0x03) << 6;
12571 }
12572 }
12573
12574 static void
12575 do_t_pkhtb (void)
12576 {
12577 if (!inst.operands[3].present)
12578 {
12579 unsigned Rtmp;
12580
12581 inst.instruction &= ~0x00000020;
12582
12583 /* PR 10168. Swap the Rm and Rn registers. */
12584 Rtmp = inst.operands[1].reg;
12585 inst.operands[1].reg = inst.operands[2].reg;
12586 inst.operands[2].reg = Rtmp;
12587 }
12588 do_t_pkhbt ();
12589 }
12590
12591 static void
12592 do_t_pld (void)
12593 {
12594 if (inst.operands[0].immisreg)
12595 reject_bad_reg (inst.operands[0].imm);
12596
12597 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12598 }
12599
12600 static void
12601 do_t_push_pop (void)
12602 {
12603 unsigned mask;
12604
12605 constraint (inst.operands[0].writeback,
12606 _("push/pop do not support {reglist}^"));
12607 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
12608 _("expression too complex"));
12609
12610 mask = inst.operands[0].imm;
12611 if (inst.size_req != 4 && (mask & ~0xff) == 0)
12612 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12613 else if (inst.size_req != 4
12614 && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
12615 ? REG_LR : REG_PC)))
12616 {
12617 inst.instruction = THUMB_OP16 (inst.instruction);
12618 inst.instruction |= THUMB_PP_PC_LR;
12619 inst.instruction |= mask & 0xff;
12620 }
12621 else if (unified_syntax)
12622 {
12623 inst.instruction = THUMB_OP32 (inst.instruction);
12624 encode_thumb2_ldmstm (13, mask, TRUE);
12625 }
12626 else
12627 {
12628 inst.error = _("invalid register list to push/pop instruction");
12629 return;
12630 }
12631 }
12632
12633 static void
12634 do_t_rbit (void)
12635 {
12636 unsigned Rd, Rm;
12637
12638 Rd = inst.operands[0].reg;
12639 Rm = inst.operands[1].reg;
12640
12641 reject_bad_reg (Rd);
12642 reject_bad_reg (Rm);
12643
12644 inst.instruction |= Rd << 8;
12645 inst.instruction |= Rm << 16;
12646 inst.instruction |= Rm;
12647 }
12648
12649 static void
12650 do_t_rev (void)
12651 {
12652 unsigned Rd, Rm;
12653
12654 Rd = inst.operands[0].reg;
12655 Rm = inst.operands[1].reg;
12656
12657 reject_bad_reg (Rd);
12658 reject_bad_reg (Rm);
12659
12660 if (Rd <= 7 && Rm <= 7
12661 && inst.size_req != 4)
12662 {
12663 inst.instruction = THUMB_OP16 (inst.instruction);
12664 inst.instruction |= Rd;
12665 inst.instruction |= Rm << 3;
12666 }
12667 else if (unified_syntax)
12668 {
12669 inst.instruction = THUMB_OP32 (inst.instruction);
12670 inst.instruction |= Rd << 8;
12671 inst.instruction |= Rm << 16;
12672 inst.instruction |= Rm;
12673 }
12674 else
12675 inst.error = BAD_HIREG;
12676 }
12677
12678 static void
12679 do_t_rrx (void)
12680 {
12681 unsigned Rd, Rm;
12682
12683 Rd = inst.operands[0].reg;
12684 Rm = inst.operands[1].reg;
12685
12686 reject_bad_reg (Rd);
12687 reject_bad_reg (Rm);
12688
12689 inst.instruction |= Rd << 8;
12690 inst.instruction |= Rm;
12691 }
12692
12693 static void
12694 do_t_rsb (void)
12695 {
12696 unsigned Rd, Rs;
12697
12698 Rd = inst.operands[0].reg;
12699 Rs = (inst.operands[1].present
12700 ? inst.operands[1].reg /* Rd, Rs, foo */
12701 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
12702
12703 reject_bad_reg (Rd);
12704 reject_bad_reg (Rs);
12705 if (inst.operands[2].isreg)
12706 reject_bad_reg (inst.operands[2].reg);
12707
12708 inst.instruction |= Rd << 8;
12709 inst.instruction |= Rs << 16;
12710 if (!inst.operands[2].isreg)
12711 {
12712 bfd_boolean narrow;
12713
12714 if ((inst.instruction & 0x00100000) != 0)
12715 narrow = !in_it_block ();
12716 else
12717 narrow = in_it_block ();
12718
12719 if (Rd > 7 || Rs > 7)
12720 narrow = FALSE;
12721
12722 if (inst.size_req == 4 || !unified_syntax)
12723 narrow = FALSE;
12724
12725 if (inst.reloc.exp.X_op != O_constant
12726 || inst.reloc.exp.X_add_number != 0)
12727 narrow = FALSE;
12728
12729 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12730 relaxation, but it doesn't seem worth the hassle. */
12731 if (narrow)
12732 {
12733 inst.reloc.type = BFD_RELOC_UNUSED;
12734 inst.instruction = THUMB_OP16 (T_MNEM_negs);
12735 inst.instruction |= Rs << 3;
12736 inst.instruction |= Rd;
12737 }
12738 else
12739 {
12740 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12741 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12742 }
12743 }
12744 else
12745 encode_thumb32_shifted_operand (2);
12746 }
12747
12748 static void
12749 do_t_setend (void)
12750 {
12751 if (warn_on_deprecated
12752 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12753 as_tsktsk (_("setend use is deprecated for ARMv8"));
12754
12755 set_it_insn_type (OUTSIDE_IT_INSN);
12756 if (inst.operands[0].imm)
12757 inst.instruction |= 0x8;
12758 }
12759
12760 static void
12761 do_t_shift (void)
12762 {
12763 if (!inst.operands[1].present)
12764 inst.operands[1].reg = inst.operands[0].reg;
12765
12766 if (unified_syntax)
12767 {
12768 bfd_boolean narrow;
12769 int shift_kind;
12770
12771 switch (inst.instruction)
12772 {
12773 case T_MNEM_asr:
12774 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12775 case T_MNEM_lsl:
12776 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12777 case T_MNEM_lsr:
12778 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12779 case T_MNEM_ror:
12780 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12781 default: abort ();
12782 }
12783
12784 if (THUMB_SETS_FLAGS (inst.instruction))
12785 narrow = !in_it_block ();
12786 else
12787 narrow = in_it_block ();
12788 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12789 narrow = FALSE;
12790 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12791 narrow = FALSE;
12792 if (inst.operands[2].isreg
12793 && (inst.operands[1].reg != inst.operands[0].reg
12794 || inst.operands[2].reg > 7))
12795 narrow = FALSE;
12796 if (inst.size_req == 4)
12797 narrow = FALSE;
12798
12799 reject_bad_reg (inst.operands[0].reg);
12800 reject_bad_reg (inst.operands[1].reg);
12801
12802 if (!narrow)
12803 {
12804 if (inst.operands[2].isreg)
12805 {
12806 reject_bad_reg (inst.operands[2].reg);
12807 inst.instruction = THUMB_OP32 (inst.instruction);
12808 inst.instruction |= inst.operands[0].reg << 8;
12809 inst.instruction |= inst.operands[1].reg << 16;
12810 inst.instruction |= inst.operands[2].reg;
12811
12812 /* PR 12854: Error on extraneous shifts. */
12813 constraint (inst.operands[2].shifted,
12814 _("extraneous shift as part of operand to shift insn"));
12815 }
12816 else
12817 {
12818 inst.operands[1].shifted = 1;
12819 inst.operands[1].shift_kind = shift_kind;
12820 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
12821 ? T_MNEM_movs : T_MNEM_mov);
12822 inst.instruction |= inst.operands[0].reg << 8;
12823 encode_thumb32_shifted_operand (1);
12824 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12825 inst.reloc.type = BFD_RELOC_UNUSED;
12826 }
12827 }
12828 else
12829 {
12830 if (inst.operands[2].isreg)
12831 {
12832 switch (shift_kind)
12833 {
12834 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
12835 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
12836 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
12837 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
12838 default: abort ();
12839 }
12840
12841 inst.instruction |= inst.operands[0].reg;
12842 inst.instruction |= inst.operands[2].reg << 3;
12843
12844 /* PR 12854: Error on extraneous shifts. */
12845 constraint (inst.operands[2].shifted,
12846 _("extraneous shift as part of operand to shift insn"));
12847 }
12848 else
12849 {
12850 switch (shift_kind)
12851 {
12852 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12853 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12854 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12855 default: abort ();
12856 }
12857 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12858 inst.instruction |= inst.operands[0].reg;
12859 inst.instruction |= inst.operands[1].reg << 3;
12860 }
12861 }
12862 }
12863 else
12864 {
12865 constraint (inst.operands[0].reg > 7
12866 || inst.operands[1].reg > 7, BAD_HIREG);
12867 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12868
12869 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
12870 {
12871 constraint (inst.operands[2].reg > 7, BAD_HIREG);
12872 constraint (inst.operands[0].reg != inst.operands[1].reg,
12873 _("source1 and dest must be same register"));
12874
12875 switch (inst.instruction)
12876 {
12877 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
12878 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
12879 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
12880 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
12881 default: abort ();
12882 }
12883
12884 inst.instruction |= inst.operands[0].reg;
12885 inst.instruction |= inst.operands[2].reg << 3;
12886
12887 /* PR 12854: Error on extraneous shifts. */
12888 constraint (inst.operands[2].shifted,
12889 _("extraneous shift as part of operand to shift insn"));
12890 }
12891 else
12892 {
12893 switch (inst.instruction)
12894 {
12895 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
12896 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
12897 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
12898 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
12899 default: abort ();
12900 }
12901 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12902 inst.instruction |= inst.operands[0].reg;
12903 inst.instruction |= inst.operands[1].reg << 3;
12904 }
12905 }
12906 }
12907
12908 static void
12909 do_t_simd (void)
12910 {
12911 unsigned Rd, Rn, Rm;
12912
12913 Rd = inst.operands[0].reg;
12914 Rn = inst.operands[1].reg;
12915 Rm = inst.operands[2].reg;
12916
12917 reject_bad_reg (Rd);
12918 reject_bad_reg (Rn);
12919 reject_bad_reg (Rm);
12920
12921 inst.instruction |= Rd << 8;
12922 inst.instruction |= Rn << 16;
12923 inst.instruction |= Rm;
12924 }
12925
12926 static void
12927 do_t_simd2 (void)
12928 {
12929 unsigned Rd, Rn, Rm;
12930
12931 Rd = inst.operands[0].reg;
12932 Rm = inst.operands[1].reg;
12933 Rn = inst.operands[2].reg;
12934
12935 reject_bad_reg (Rd);
12936 reject_bad_reg (Rn);
12937 reject_bad_reg (Rm);
12938
12939 inst.instruction |= Rd << 8;
12940 inst.instruction |= Rn << 16;
12941 inst.instruction |= Rm;
12942 }
12943
12944 static void
12945 do_t_smc (void)
12946 {
12947 unsigned int value = inst.reloc.exp.X_add_number;
12948 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
12949 _("SMC is not permitted on this architecture"));
12950 constraint (inst.reloc.exp.X_op != O_constant,
12951 _("expression too complex"));
12952 inst.reloc.type = BFD_RELOC_UNUSED;
12953 inst.instruction |= (value & 0xf000) >> 12;
12954 inst.instruction |= (value & 0x0ff0);
12955 inst.instruction |= (value & 0x000f) << 16;
12956 /* PR gas/15623: SMC instructions must be last in an IT block. */
12957 set_it_insn_type_last ();
12958 }
12959
12960 static void
12961 do_t_hvc (void)
12962 {
12963 unsigned int value = inst.reloc.exp.X_add_number;
12964
12965 inst.reloc.type = BFD_RELOC_UNUSED;
12966 inst.instruction |= (value & 0x0fff);
12967 inst.instruction |= (value & 0xf000) << 4;
12968 }
12969
12970 static void
12971 do_t_ssat_usat (int bias)
12972 {
12973 unsigned Rd, Rn;
12974
12975 Rd = inst.operands[0].reg;
12976 Rn = inst.operands[2].reg;
12977
12978 reject_bad_reg (Rd);
12979 reject_bad_reg (Rn);
12980
12981 inst.instruction |= Rd << 8;
12982 inst.instruction |= inst.operands[1].imm - bias;
12983 inst.instruction |= Rn << 16;
12984
12985 if (inst.operands[3].present)
12986 {
12987 offsetT shift_amount = inst.reloc.exp.X_add_number;
12988
12989 inst.reloc.type = BFD_RELOC_UNUSED;
12990
12991 constraint (inst.reloc.exp.X_op != O_constant,
12992 _("expression too complex"));
12993
12994 if (shift_amount != 0)
12995 {
12996 constraint (shift_amount > 31,
12997 _("shift expression is too large"));
12998
12999 if (inst.operands[3].shift_kind == SHIFT_ASR)
13000 inst.instruction |= 0x00200000; /* sh bit. */
13001
13002 inst.instruction |= (shift_amount & 0x1c) << 10;
13003 inst.instruction |= (shift_amount & 0x03) << 6;
13004 }
13005 }
13006 }
13007
13008 static void
13009 do_t_ssat (void)
13010 {
13011 do_t_ssat_usat (1);
13012 }
13013
13014 static void
13015 do_t_ssat16 (void)
13016 {
13017 unsigned Rd, Rn;
13018
13019 Rd = inst.operands[0].reg;
13020 Rn = inst.operands[2].reg;
13021
13022 reject_bad_reg (Rd);
13023 reject_bad_reg (Rn);
13024
13025 inst.instruction |= Rd << 8;
13026 inst.instruction |= inst.operands[1].imm - 1;
13027 inst.instruction |= Rn << 16;
13028 }
13029
13030 static void
13031 do_t_strex (void)
13032 {
13033 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
13034 || inst.operands[2].postind || inst.operands[2].writeback
13035 || inst.operands[2].immisreg || inst.operands[2].shifted
13036 || inst.operands[2].negative,
13037 BAD_ADDR_MODE);
13038
13039 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
13040
13041 inst.instruction |= inst.operands[0].reg << 8;
13042 inst.instruction |= inst.operands[1].reg << 12;
13043 inst.instruction |= inst.operands[2].reg << 16;
13044 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
13045 }
13046
13047 static void
13048 do_t_strexd (void)
13049 {
13050 if (!inst.operands[2].present)
13051 inst.operands[2].reg = inst.operands[1].reg + 1;
13052
13053 constraint (inst.operands[0].reg == inst.operands[1].reg
13054 || inst.operands[0].reg == inst.operands[2].reg
13055 || inst.operands[0].reg == inst.operands[3].reg,
13056 BAD_OVERLAP);
13057
13058 inst.instruction |= inst.operands[0].reg;
13059 inst.instruction |= inst.operands[1].reg << 12;
13060 inst.instruction |= inst.operands[2].reg << 8;
13061 inst.instruction |= inst.operands[3].reg << 16;
13062 }
13063
13064 static void
13065 do_t_sxtah (void)
13066 {
13067 unsigned Rd, Rn, Rm;
13068
13069 Rd = inst.operands[0].reg;
13070 Rn = inst.operands[1].reg;
13071 Rm = inst.operands[2].reg;
13072
13073 reject_bad_reg (Rd);
13074 reject_bad_reg (Rn);
13075 reject_bad_reg (Rm);
13076
13077 inst.instruction |= Rd << 8;
13078 inst.instruction |= Rn << 16;
13079 inst.instruction |= Rm;
13080 inst.instruction |= inst.operands[3].imm << 4;
13081 }
13082
13083 static void
13084 do_t_sxth (void)
13085 {
13086 unsigned Rd, Rm;
13087
13088 Rd = inst.operands[0].reg;
13089 Rm = inst.operands[1].reg;
13090
13091 reject_bad_reg (Rd);
13092 reject_bad_reg (Rm);
13093
13094 if (inst.instruction <= 0xffff
13095 && inst.size_req != 4
13096 && Rd <= 7 && Rm <= 7
13097 && (!inst.operands[2].present || inst.operands[2].imm == 0))
13098 {
13099 inst.instruction = THUMB_OP16 (inst.instruction);
13100 inst.instruction |= Rd;
13101 inst.instruction |= Rm << 3;
13102 }
13103 else if (unified_syntax)
13104 {
13105 if (inst.instruction <= 0xffff)
13106 inst.instruction = THUMB_OP32 (inst.instruction);
13107 inst.instruction |= Rd << 8;
13108 inst.instruction |= Rm;
13109 inst.instruction |= inst.operands[2].imm << 4;
13110 }
13111 else
13112 {
13113 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
13114 _("Thumb encoding does not support rotation"));
13115 constraint (1, BAD_HIREG);
13116 }
13117 }
13118
13119 static void
13120 do_t_swi (void)
13121 {
13122 /* We have to do the following check manually as ARM_EXT_OS only applies
13123 to ARM_EXT_V6M. */
13124 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
13125 {
13126 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
13127 /* This only applies to the v6m however, not later architectures. */
13128 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
13129 as_bad (_("SVC is not permitted on this architecture"));
13130 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
13131 }
13132
13133 inst.reloc.type = BFD_RELOC_ARM_SWI;
13134 }
13135
13136 static void
13137 do_t_tb (void)
13138 {
13139 unsigned Rn, Rm;
13140 int half;
13141
13142 half = (inst.instruction & 0x10) != 0;
13143 set_it_insn_type_last ();
13144 constraint (inst.operands[0].immisreg,
13145 _("instruction requires register index"));
13146
13147 Rn = inst.operands[0].reg;
13148 Rm = inst.operands[0].imm;
13149
13150 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13151 constraint (Rn == REG_SP, BAD_SP);
13152 reject_bad_reg (Rm);
13153
13154 constraint (!half && inst.operands[0].shifted,
13155 _("instruction does not allow shifted index"));
13156 inst.instruction |= (Rn << 16) | Rm;
13157 }
13158
13159 static void
13160 do_t_udf (void)
13161 {
13162 if (!inst.operands[0].present)
13163 inst.operands[0].imm = 0;
13164
13165 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
13166 {
13167 constraint (inst.size_req == 2,
13168 _("immediate value out of range"));
13169 inst.instruction = THUMB_OP32 (inst.instruction);
13170 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
13171 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
13172 }
13173 else
13174 {
13175 inst.instruction = THUMB_OP16 (inst.instruction);
13176 inst.instruction |= inst.operands[0].imm;
13177 }
13178
13179 set_it_insn_type (NEUTRAL_IT_INSN);
13180 }
13181
13182
13183 static void
13184 do_t_usat (void)
13185 {
13186 do_t_ssat_usat (0);
13187 }
13188
13189 static void
13190 do_t_usat16 (void)
13191 {
13192 unsigned Rd, Rn;
13193
13194 Rd = inst.operands[0].reg;
13195 Rn = inst.operands[2].reg;
13196
13197 reject_bad_reg (Rd);
13198 reject_bad_reg (Rn);
13199
13200 inst.instruction |= Rd << 8;
13201 inst.instruction |= inst.operands[1].imm;
13202 inst.instruction |= Rn << 16;
13203 }
13204
13205 /* Neon instruction encoder helpers. */
13206
13207 /* Encodings for the different types for various Neon opcodes. */
13208
13209 /* An "invalid" code for the following tables. */
13210 #define N_INV -1u
13211
13212 struct neon_tab_entry
13213 {
13214 unsigned integer;
13215 unsigned float_or_poly;
13216 unsigned scalar_or_imm;
13217 };
13218
13219 /* Map overloaded Neon opcodes to their respective encodings. */
13220 #define NEON_ENC_TAB \
13221 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13222 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13223 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13224 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13225 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13226 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13227 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13228 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13229 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13230 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13231 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13232 /* Register variants of the following two instructions are encoded as
13233 vcge / vcgt with the operands reversed. */ \
13234 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13235 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13236 X(vfma, N_INV, 0x0000c10, N_INV), \
13237 X(vfms, N_INV, 0x0200c10, N_INV), \
13238 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13239 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13240 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13241 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13242 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13243 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13244 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13245 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13246 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13247 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13248 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13249 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13250 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13251 X(vshl, 0x0000400, N_INV, 0x0800510), \
13252 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13253 X(vand, 0x0000110, N_INV, 0x0800030), \
13254 X(vbic, 0x0100110, N_INV, 0x0800030), \
13255 X(veor, 0x1000110, N_INV, N_INV), \
13256 X(vorn, 0x0300110, N_INV, 0x0800010), \
13257 X(vorr, 0x0200110, N_INV, 0x0800010), \
13258 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13259 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13260 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13261 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13262 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13263 X(vst1, 0x0000000, 0x0800000, N_INV), \
13264 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13265 X(vst2, 0x0000100, 0x0800100, N_INV), \
13266 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13267 X(vst3, 0x0000200, 0x0800200, N_INV), \
13268 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13269 X(vst4, 0x0000300, 0x0800300, N_INV), \
13270 X(vmovn, 0x1b20200, N_INV, N_INV), \
13271 X(vtrn, 0x1b20080, N_INV, N_INV), \
13272 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13273 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13274 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13275 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13276 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13277 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13278 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13279 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13280 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13281 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13282 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13283 X(vseleq, 0xe000a00, N_INV, N_INV), \
13284 X(vselvs, 0xe100a00, N_INV, N_INV), \
13285 X(vselge, 0xe200a00, N_INV, N_INV), \
13286 X(vselgt, 0xe300a00, N_INV, N_INV), \
13287 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13288 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13289 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13290 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13291 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13292 X(aes, 0x3b00300, N_INV, N_INV), \
13293 X(sha3op, 0x2000c00, N_INV, N_INV), \
13294 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13295 X(sha2op, 0x3ba0380, N_INV, N_INV)
13296
13297 enum neon_opc
13298 {
13299 #define X(OPC,I,F,S) N_MNEM_##OPC
13300 NEON_ENC_TAB
13301 #undef X
13302 };
13303
13304 static const struct neon_tab_entry neon_enc_tab[] =
13305 {
13306 #define X(OPC,I,F,S) { (I), (F), (S) }
13307 NEON_ENC_TAB
13308 #undef X
13309 };
13310
13311 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13312 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13313 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13314 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13315 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13316 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13317 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13318 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13319 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13320 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13321 #define NEON_ENC_SINGLE_(X) \
13322 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13323 #define NEON_ENC_DOUBLE_(X) \
13324 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13325 #define NEON_ENC_FPV8_(X) \
13326 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13327
13328 #define NEON_ENCODE(type, inst) \
13329 do \
13330 { \
13331 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13332 inst.is_neon = 1; \
13333 } \
13334 while (0)
13335
13336 #define check_neon_suffixes \
13337 do \
13338 { \
13339 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13340 { \
13341 as_bad (_("invalid neon suffix for non neon instruction")); \
13342 return; \
13343 } \
13344 } \
13345 while (0)
13346
13347 /* Define shapes for instruction operands. The following mnemonic characters
13348 are used in this table:
13349
13350 F - VFP S<n> register
13351 D - Neon D<n> register
13352 Q - Neon Q<n> register
13353 I - Immediate
13354 S - Scalar
13355 R - ARM register
13356 L - D<n> register list
13357
13358 This table is used to generate various data:
13359 - enumerations of the form NS_DDR to be used as arguments to
13360 neon_select_shape.
13361 - a table classifying shapes into single, double, quad, mixed.
13362 - a table used to drive neon_select_shape. */
13363
13364 #define NEON_SHAPE_DEF \
13365 X(3, (D, D, D), DOUBLE), \
13366 X(3, (Q, Q, Q), QUAD), \
13367 X(3, (D, D, I), DOUBLE), \
13368 X(3, (Q, Q, I), QUAD), \
13369 X(3, (D, D, S), DOUBLE), \
13370 X(3, (Q, Q, S), QUAD), \
13371 X(2, (D, D), DOUBLE), \
13372 X(2, (Q, Q), QUAD), \
13373 X(2, (D, S), DOUBLE), \
13374 X(2, (Q, S), QUAD), \
13375 X(2, (D, R), DOUBLE), \
13376 X(2, (Q, R), QUAD), \
13377 X(2, (D, I), DOUBLE), \
13378 X(2, (Q, I), QUAD), \
13379 X(3, (D, L, D), DOUBLE), \
13380 X(2, (D, Q), MIXED), \
13381 X(2, (Q, D), MIXED), \
13382 X(3, (D, Q, I), MIXED), \
13383 X(3, (Q, D, I), MIXED), \
13384 X(3, (Q, D, D), MIXED), \
13385 X(3, (D, Q, Q), MIXED), \
13386 X(3, (Q, Q, D), MIXED), \
13387 X(3, (Q, D, S), MIXED), \
13388 X(3, (D, Q, S), MIXED), \
13389 X(4, (D, D, D, I), DOUBLE), \
13390 X(4, (Q, Q, Q, I), QUAD), \
13391 X(4, (D, D, S, I), DOUBLE), \
13392 X(4, (Q, Q, S, I), QUAD), \
13393 X(2, (F, F), SINGLE), \
13394 X(3, (F, F, F), SINGLE), \
13395 X(2, (F, I), SINGLE), \
13396 X(2, (F, D), MIXED), \
13397 X(2, (D, F), MIXED), \
13398 X(3, (F, F, I), MIXED), \
13399 X(4, (R, R, F, F), SINGLE), \
13400 X(4, (F, F, R, R), SINGLE), \
13401 X(3, (D, R, R), DOUBLE), \
13402 X(3, (R, R, D), DOUBLE), \
13403 X(2, (S, R), SINGLE), \
13404 X(2, (R, S), SINGLE), \
13405 X(2, (F, R), SINGLE), \
13406 X(2, (R, F), SINGLE), \
13407 /* Half float shape supported so far. */\
13408 X (2, (H, D), MIXED), \
13409 X (2, (D, H), MIXED), \
13410 X (2, (H, F), MIXED), \
13411 X (2, (F, H), MIXED), \
13412 X (2, (H, H), HALF), \
13413 X (2, (H, R), HALF), \
13414 X (2, (R, H), HALF), \
13415 X (2, (H, I), HALF), \
13416 X (3, (H, H, H), HALF), \
13417 X (3, (H, F, I), MIXED), \
13418 X (3, (F, H, I), MIXED)
13419
13420 #define S2(A,B) NS_##A##B
13421 #define S3(A,B,C) NS_##A##B##C
13422 #define S4(A,B,C,D) NS_##A##B##C##D
13423
13424 #define X(N, L, C) S##N L
13425
13426 enum neon_shape
13427 {
13428 NEON_SHAPE_DEF,
13429 NS_NULL
13430 };
13431
13432 #undef X
13433 #undef S2
13434 #undef S3
13435 #undef S4
13436
13437 enum neon_shape_class
13438 {
13439 SC_HALF,
13440 SC_SINGLE,
13441 SC_DOUBLE,
13442 SC_QUAD,
13443 SC_MIXED
13444 };
13445
13446 #define X(N, L, C) SC_##C
13447
13448 static enum neon_shape_class neon_shape_class[] =
13449 {
13450 NEON_SHAPE_DEF
13451 };
13452
13453 #undef X
13454
13455 enum neon_shape_el
13456 {
13457 SE_H,
13458 SE_F,
13459 SE_D,
13460 SE_Q,
13461 SE_I,
13462 SE_S,
13463 SE_R,
13464 SE_L
13465 };
13466
13467 /* Register widths of above. */
13468 static unsigned neon_shape_el_size[] =
13469 {
13470 16,
13471 32,
13472 64,
13473 128,
13474 0,
13475 32,
13476 32,
13477 0
13478 };
13479
13480 struct neon_shape_info
13481 {
13482 unsigned els;
13483 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13484 };
13485
13486 #define S2(A,B) { SE_##A, SE_##B }
13487 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13488 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13489
13490 #define X(N, L, C) { N, S##N L }
13491
13492 static struct neon_shape_info neon_shape_tab[] =
13493 {
13494 NEON_SHAPE_DEF
13495 };
13496
13497 #undef X
13498 #undef S2
13499 #undef S3
13500 #undef S4
13501
13502 /* Bit masks used in type checking given instructions.
13503 'N_EQK' means the type must be the same as (or based on in some way) the key
13504 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13505 set, various other bits can be set as well in order to modify the meaning of
13506 the type constraint. */
13507
13508 enum neon_type_mask
13509 {
13510 N_S8 = 0x0000001,
13511 N_S16 = 0x0000002,
13512 N_S32 = 0x0000004,
13513 N_S64 = 0x0000008,
13514 N_U8 = 0x0000010,
13515 N_U16 = 0x0000020,
13516 N_U32 = 0x0000040,
13517 N_U64 = 0x0000080,
13518 N_I8 = 0x0000100,
13519 N_I16 = 0x0000200,
13520 N_I32 = 0x0000400,
13521 N_I64 = 0x0000800,
13522 N_8 = 0x0001000,
13523 N_16 = 0x0002000,
13524 N_32 = 0x0004000,
13525 N_64 = 0x0008000,
13526 N_P8 = 0x0010000,
13527 N_P16 = 0x0020000,
13528 N_F16 = 0x0040000,
13529 N_F32 = 0x0080000,
13530 N_F64 = 0x0100000,
13531 N_P64 = 0x0200000,
13532 N_KEY = 0x1000000, /* Key element (main type specifier). */
13533 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
13534 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
13535 N_UNT = 0x8000000, /* Must be explicitly untyped. */
13536 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
13537 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
13538 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13539 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13540 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13541 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
13542 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13543 N_UTYP = 0,
13544 N_MAX_NONSPECIAL = N_P64
13545 };
13546
13547 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13548
13549 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13550 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13551 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13552 #define N_S_32 (N_S8 | N_S16 | N_S32)
13553 #define N_F_16_32 (N_F16 | N_F32)
13554 #define N_SUF_32 (N_SU_32 | N_F_16_32)
13555 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13556 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
13557 #define N_F_ALL (N_F16 | N_F32 | N_F64)
13558
13559 /* Pass this as the first type argument to neon_check_type to ignore types
13560 altogether. */
13561 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13562
13563 /* Select a "shape" for the current instruction (describing register types or
13564 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13565 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13566 function of operand parsing, so this function doesn't need to be called.
13567 Shapes should be listed in order of decreasing length. */
13568
13569 static enum neon_shape
13570 neon_select_shape (enum neon_shape shape, ...)
13571 {
13572 va_list ap;
13573 enum neon_shape first_shape = shape;
13574
13575 /* Fix missing optional operands. FIXME: we don't know at this point how
13576 many arguments we should have, so this makes the assumption that we have
13577 > 1. This is true of all current Neon opcodes, I think, but may not be
13578 true in the future. */
13579 if (!inst.operands[1].present)
13580 inst.operands[1] = inst.operands[0];
13581
13582 va_start (ap, shape);
13583
13584 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13585 {
13586 unsigned j;
13587 int matches = 1;
13588
13589 for (j = 0; j < neon_shape_tab[shape].els; j++)
13590 {
13591 if (!inst.operands[j].present)
13592 {
13593 matches = 0;
13594 break;
13595 }
13596
13597 switch (neon_shape_tab[shape].el[j])
13598 {
13599 /* If a .f16, .16, .u16, .s16 type specifier is given over
13600 a VFP single precision register operand, it's essentially
13601 means only half of the register is used.
13602
13603 If the type specifier is given after the mnemonics, the
13604 information is stored in inst.vectype. If the type specifier
13605 is given after register operand, the information is stored
13606 in inst.operands[].vectype.
13607
13608 When there is only one type specifier, and all the register
13609 operands are the same type of hardware register, the type
13610 specifier applies to all register operands.
13611
13612 If no type specifier is given, the shape is inferred from
13613 operand information.
13614
13615 for example:
13616 vadd.f16 s0, s1, s2: NS_HHH
13617 vabs.f16 s0, s1: NS_HH
13618 vmov.f16 s0, r1: NS_HR
13619 vmov.f16 r0, s1: NS_RH
13620 vcvt.f16 r0, s1: NS_RH
13621 vcvt.f16.s32 s2, s2, #29: NS_HFI
13622 vcvt.f16.s32 s2, s2: NS_HF
13623 */
13624 case SE_H:
13625 if (!(inst.operands[j].isreg
13626 && inst.operands[j].isvec
13627 && inst.operands[j].issingle
13628 && !inst.operands[j].isquad
13629 && ((inst.vectype.elems == 1
13630 && inst.vectype.el[0].size == 16)
13631 || (inst.vectype.elems > 1
13632 && inst.vectype.el[j].size == 16)
13633 || (inst.vectype.elems == 0
13634 && inst.operands[j].vectype.type != NT_invtype
13635 && inst.operands[j].vectype.size == 16))))
13636 matches = 0;
13637 break;
13638
13639 case SE_F:
13640 if (!(inst.operands[j].isreg
13641 && inst.operands[j].isvec
13642 && inst.operands[j].issingle
13643 && !inst.operands[j].isquad
13644 && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
13645 || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
13646 || (inst.vectype.elems == 0
13647 && (inst.operands[j].vectype.size == 32
13648 || inst.operands[j].vectype.type == NT_invtype)))))
13649 matches = 0;
13650 break;
13651
13652 case SE_D:
13653 if (!(inst.operands[j].isreg
13654 && inst.operands[j].isvec
13655 && !inst.operands[j].isquad
13656 && !inst.operands[j].issingle))
13657 matches = 0;
13658 break;
13659
13660 case SE_R:
13661 if (!(inst.operands[j].isreg
13662 && !inst.operands[j].isvec))
13663 matches = 0;
13664 break;
13665
13666 case SE_Q:
13667 if (!(inst.operands[j].isreg
13668 && inst.operands[j].isvec
13669 && inst.operands[j].isquad
13670 && !inst.operands[j].issingle))
13671 matches = 0;
13672 break;
13673
13674 case SE_I:
13675 if (!(!inst.operands[j].isreg
13676 && !inst.operands[j].isscalar))
13677 matches = 0;
13678 break;
13679
13680 case SE_S:
13681 if (!(!inst.operands[j].isreg
13682 && inst.operands[j].isscalar))
13683 matches = 0;
13684 break;
13685
13686 case SE_L:
13687 break;
13688 }
13689 if (!matches)
13690 break;
13691 }
13692 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
13693 /* We've matched all the entries in the shape table, and we don't
13694 have any left over operands which have not been matched. */
13695 break;
13696 }
13697
13698 va_end (ap);
13699
13700 if (shape == NS_NULL && first_shape != NS_NULL)
13701 first_error (_("invalid instruction shape"));
13702
13703 return shape;
13704 }
13705
13706 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13707 means the Q bit should be set). */
13708
13709 static int
13710 neon_quad (enum neon_shape shape)
13711 {
13712 return neon_shape_class[shape] == SC_QUAD;
13713 }
13714
13715 static void
13716 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
13717 unsigned *g_size)
13718 {
13719 /* Allow modification to be made to types which are constrained to be
13720 based on the key element, based on bits set alongside N_EQK. */
13721 if ((typebits & N_EQK) != 0)
13722 {
13723 if ((typebits & N_HLF) != 0)
13724 *g_size /= 2;
13725 else if ((typebits & N_DBL) != 0)
13726 *g_size *= 2;
13727 if ((typebits & N_SGN) != 0)
13728 *g_type = NT_signed;
13729 else if ((typebits & N_UNS) != 0)
13730 *g_type = NT_unsigned;
13731 else if ((typebits & N_INT) != 0)
13732 *g_type = NT_integer;
13733 else if ((typebits & N_FLT) != 0)
13734 *g_type = NT_float;
13735 else if ((typebits & N_SIZ) != 0)
13736 *g_type = NT_untyped;
13737 }
13738 }
13739
13740 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13741 operand type, i.e. the single type specified in a Neon instruction when it
13742 is the only one given. */
13743
13744 static struct neon_type_el
13745 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
13746 {
13747 struct neon_type_el dest = *key;
13748
13749 gas_assert ((thisarg & N_EQK) != 0);
13750
13751 neon_modify_type_size (thisarg, &dest.type, &dest.size);
13752
13753 return dest;
13754 }
13755
13756 /* Convert Neon type and size into compact bitmask representation. */
13757
13758 static enum neon_type_mask
13759 type_chk_of_el_type (enum neon_el_type type, unsigned size)
13760 {
13761 switch (type)
13762 {
13763 case NT_untyped:
13764 switch (size)
13765 {
13766 case 8: return N_8;
13767 case 16: return N_16;
13768 case 32: return N_32;
13769 case 64: return N_64;
13770 default: ;
13771 }
13772 break;
13773
13774 case NT_integer:
13775 switch (size)
13776 {
13777 case 8: return N_I8;
13778 case 16: return N_I16;
13779 case 32: return N_I32;
13780 case 64: return N_I64;
13781 default: ;
13782 }
13783 break;
13784
13785 case NT_float:
13786 switch (size)
13787 {
13788 case 16: return N_F16;
13789 case 32: return N_F32;
13790 case 64: return N_F64;
13791 default: ;
13792 }
13793 break;
13794
13795 case NT_poly:
13796 switch (size)
13797 {
13798 case 8: return N_P8;
13799 case 16: return N_P16;
13800 case 64: return N_P64;
13801 default: ;
13802 }
13803 break;
13804
13805 case NT_signed:
13806 switch (size)
13807 {
13808 case 8: return N_S8;
13809 case 16: return N_S16;
13810 case 32: return N_S32;
13811 case 64: return N_S64;
13812 default: ;
13813 }
13814 break;
13815
13816 case NT_unsigned:
13817 switch (size)
13818 {
13819 case 8: return N_U8;
13820 case 16: return N_U16;
13821 case 32: return N_U32;
13822 case 64: return N_U64;
13823 default: ;
13824 }
13825 break;
13826
13827 default: ;
13828 }
13829
13830 return N_UTYP;
13831 }
13832
13833 /* Convert compact Neon bitmask type representation to a type and size. Only
13834 handles the case where a single bit is set in the mask. */
13835
13836 static int
13837 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
13838 enum neon_type_mask mask)
13839 {
13840 if ((mask & N_EQK) != 0)
13841 return FAIL;
13842
13843 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
13844 *size = 8;
13845 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
13846 *size = 16;
13847 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
13848 *size = 32;
13849 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
13850 *size = 64;
13851 else
13852 return FAIL;
13853
13854 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
13855 *type = NT_signed;
13856 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
13857 *type = NT_unsigned;
13858 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
13859 *type = NT_integer;
13860 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
13861 *type = NT_untyped;
13862 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
13863 *type = NT_poly;
13864 else if ((mask & (N_F_ALL)) != 0)
13865 *type = NT_float;
13866 else
13867 return FAIL;
13868
13869 return SUCCESS;
13870 }
13871
13872 /* Modify a bitmask of allowed types. This is only needed for type
13873 relaxation. */
13874
13875 static unsigned
13876 modify_types_allowed (unsigned allowed, unsigned mods)
13877 {
13878 unsigned size;
13879 enum neon_el_type type;
13880 unsigned destmask;
13881 int i;
13882
13883 destmask = 0;
13884
13885 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
13886 {
13887 if (el_type_of_type_chk (&type, &size,
13888 (enum neon_type_mask) (allowed & i)) == SUCCESS)
13889 {
13890 neon_modify_type_size (mods, &type, &size);
13891 destmask |= type_chk_of_el_type (type, size);
13892 }
13893 }
13894
13895 return destmask;
13896 }
13897
13898 /* Check type and return type classification.
13899 The manual states (paraphrase): If one datatype is given, it indicates the
13900 type given in:
13901 - the second operand, if there is one
13902 - the operand, if there is no second operand
13903 - the result, if there are no operands.
13904 This isn't quite good enough though, so we use a concept of a "key" datatype
13905 which is set on a per-instruction basis, which is the one which matters when
13906 only one data type is written.
13907 Note: this function has side-effects (e.g. filling in missing operands). All
13908 Neon instructions should call it before performing bit encoding. */
13909
13910 static struct neon_type_el
13911 neon_check_type (unsigned els, enum neon_shape ns, ...)
13912 {
13913 va_list ap;
13914 unsigned i, pass, key_el = 0;
13915 unsigned types[NEON_MAX_TYPE_ELS];
13916 enum neon_el_type k_type = NT_invtype;
13917 unsigned k_size = -1u;
13918 struct neon_type_el badtype = {NT_invtype, -1};
13919 unsigned key_allowed = 0;
13920
13921 /* Optional registers in Neon instructions are always (not) in operand 1.
13922 Fill in the missing operand here, if it was omitted. */
13923 if (els > 1 && !inst.operands[1].present)
13924 inst.operands[1] = inst.operands[0];
13925
13926 /* Suck up all the varargs. */
13927 va_start (ap, ns);
13928 for (i = 0; i < els; i++)
13929 {
13930 unsigned thisarg = va_arg (ap, unsigned);
13931 if (thisarg == N_IGNORE_TYPE)
13932 {
13933 va_end (ap);
13934 return badtype;
13935 }
13936 types[i] = thisarg;
13937 if ((thisarg & N_KEY) != 0)
13938 key_el = i;
13939 }
13940 va_end (ap);
13941
13942 if (inst.vectype.elems > 0)
13943 for (i = 0; i < els; i++)
13944 if (inst.operands[i].vectype.type != NT_invtype)
13945 {
13946 first_error (_("types specified in both the mnemonic and operands"));
13947 return badtype;
13948 }
13949
13950 /* Duplicate inst.vectype elements here as necessary.
13951 FIXME: No idea if this is exactly the same as the ARM assembler,
13952 particularly when an insn takes one register and one non-register
13953 operand. */
13954 if (inst.vectype.elems == 1 && els > 1)
13955 {
13956 unsigned j;
13957 inst.vectype.elems = els;
13958 inst.vectype.el[key_el] = inst.vectype.el[0];
13959 for (j = 0; j < els; j++)
13960 if (j != key_el)
13961 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13962 types[j]);
13963 }
13964 else if (inst.vectype.elems == 0 && els > 0)
13965 {
13966 unsigned j;
13967 /* No types were given after the mnemonic, so look for types specified
13968 after each operand. We allow some flexibility here; as long as the
13969 "key" operand has a type, we can infer the others. */
13970 for (j = 0; j < els; j++)
13971 if (inst.operands[j].vectype.type != NT_invtype)
13972 inst.vectype.el[j] = inst.operands[j].vectype;
13973
13974 if (inst.operands[key_el].vectype.type != NT_invtype)
13975 {
13976 for (j = 0; j < els; j++)
13977 if (inst.operands[j].vectype.type == NT_invtype)
13978 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13979 types[j]);
13980 }
13981 else
13982 {
13983 first_error (_("operand types can't be inferred"));
13984 return badtype;
13985 }
13986 }
13987 else if (inst.vectype.elems != els)
13988 {
13989 first_error (_("type specifier has the wrong number of parts"));
13990 return badtype;
13991 }
13992
13993 for (pass = 0; pass < 2; pass++)
13994 {
13995 for (i = 0; i < els; i++)
13996 {
13997 unsigned thisarg = types[i];
13998 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
13999 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
14000 enum neon_el_type g_type = inst.vectype.el[i].type;
14001 unsigned g_size = inst.vectype.el[i].size;
14002
14003 /* Decay more-specific signed & unsigned types to sign-insensitive
14004 integer types if sign-specific variants are unavailable. */
14005 if ((g_type == NT_signed || g_type == NT_unsigned)
14006 && (types_allowed & N_SU_ALL) == 0)
14007 g_type = NT_integer;
14008
14009 /* If only untyped args are allowed, decay any more specific types to
14010 them. Some instructions only care about signs for some element
14011 sizes, so handle that properly. */
14012 if (((types_allowed & N_UNT) == 0)
14013 && ((g_size == 8 && (types_allowed & N_8) != 0)
14014 || (g_size == 16 && (types_allowed & N_16) != 0)
14015 || (g_size == 32 && (types_allowed & N_32) != 0)
14016 || (g_size == 64 && (types_allowed & N_64) != 0)))
14017 g_type = NT_untyped;
14018
14019 if (pass == 0)
14020 {
14021 if ((thisarg & N_KEY) != 0)
14022 {
14023 k_type = g_type;
14024 k_size = g_size;
14025 key_allowed = thisarg & ~N_KEY;
14026
14027 /* Check architecture constraint on FP16 extension. */
14028 if (k_size == 16
14029 && k_type == NT_float
14030 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14031 {
14032 inst.error = _(BAD_FP16);
14033 return badtype;
14034 }
14035 }
14036 }
14037 else
14038 {
14039 if ((thisarg & N_VFP) != 0)
14040 {
14041 enum neon_shape_el regshape;
14042 unsigned regwidth, match;
14043
14044 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14045 if (ns == NS_NULL)
14046 {
14047 first_error (_("invalid instruction shape"));
14048 return badtype;
14049 }
14050 regshape = neon_shape_tab[ns].el[i];
14051 regwidth = neon_shape_el_size[regshape];
14052
14053 /* In VFP mode, operands must match register widths. If we
14054 have a key operand, use its width, else use the width of
14055 the current operand. */
14056 if (k_size != -1u)
14057 match = k_size;
14058 else
14059 match = g_size;
14060
14061 /* FP16 will use a single precision register. */
14062 if (regwidth == 32 && match == 16)
14063 {
14064 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14065 match = regwidth;
14066 else
14067 {
14068 inst.error = _(BAD_FP16);
14069 return badtype;
14070 }
14071 }
14072
14073 if (regwidth != match)
14074 {
14075 first_error (_("operand size must match register width"));
14076 return badtype;
14077 }
14078 }
14079
14080 if ((thisarg & N_EQK) == 0)
14081 {
14082 unsigned given_type = type_chk_of_el_type (g_type, g_size);
14083
14084 if ((given_type & types_allowed) == 0)
14085 {
14086 first_error (_("bad type in Neon instruction"));
14087 return badtype;
14088 }
14089 }
14090 else
14091 {
14092 enum neon_el_type mod_k_type = k_type;
14093 unsigned mod_k_size = k_size;
14094 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
14095 if (g_type != mod_k_type || g_size != mod_k_size)
14096 {
14097 first_error (_("inconsistent types in Neon instruction"));
14098 return badtype;
14099 }
14100 }
14101 }
14102 }
14103 }
14104
14105 return inst.vectype.el[key_el];
14106 }
14107
14108 /* Neon-style VFP instruction forwarding. */
14109
14110 /* Thumb VFP instructions have 0xE in the condition field. */
14111
14112 static void
14113 do_vfp_cond_or_thumb (void)
14114 {
14115 inst.is_neon = 1;
14116
14117 if (thumb_mode)
14118 inst.instruction |= 0xe0000000;
14119 else
14120 inst.instruction |= inst.cond << 28;
14121 }
14122
14123 /* Look up and encode a simple mnemonic, for use as a helper function for the
14124 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14125 etc. It is assumed that operand parsing has already been done, and that the
14126 operands are in the form expected by the given opcode (this isn't necessarily
14127 the same as the form in which they were parsed, hence some massaging must
14128 take place before this function is called).
14129 Checks current arch version against that in the looked-up opcode. */
14130
14131 static void
14132 do_vfp_nsyn_opcode (const char *opname)
14133 {
14134 const struct asm_opcode *opcode;
14135
14136 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
14137
14138 if (!opcode)
14139 abort ();
14140
14141 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
14142 thumb_mode ? *opcode->tvariant : *opcode->avariant),
14143 _(BAD_FPU));
14144
14145 inst.is_neon = 1;
14146
14147 if (thumb_mode)
14148 {
14149 inst.instruction = opcode->tvalue;
14150 opcode->tencode ();
14151 }
14152 else
14153 {
14154 inst.instruction = (inst.cond << 28) | opcode->avalue;
14155 opcode->aencode ();
14156 }
14157 }
14158
14159 static void
14160 do_vfp_nsyn_add_sub (enum neon_shape rs)
14161 {
14162 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
14163
14164 if (rs == NS_FFF || rs == NS_HHH)
14165 {
14166 if (is_add)
14167 do_vfp_nsyn_opcode ("fadds");
14168 else
14169 do_vfp_nsyn_opcode ("fsubs");
14170
14171 /* ARMv8.2 fp16 instruction. */
14172 if (rs == NS_HHH)
14173 do_scalar_fp16_v82_encode ();
14174 }
14175 else
14176 {
14177 if (is_add)
14178 do_vfp_nsyn_opcode ("faddd");
14179 else
14180 do_vfp_nsyn_opcode ("fsubd");
14181 }
14182 }
14183
14184 /* Check operand types to see if this is a VFP instruction, and if so call
14185 PFN (). */
14186
14187 static int
14188 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
14189 {
14190 enum neon_shape rs;
14191 struct neon_type_el et;
14192
14193 switch (args)
14194 {
14195 case 2:
14196 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14197 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14198 break;
14199
14200 case 3:
14201 rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14202 et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14203 N_F_ALL | N_KEY | N_VFP);
14204 break;
14205
14206 default:
14207 abort ();
14208 }
14209
14210 if (et.type != NT_invtype)
14211 {
14212 pfn (rs);
14213 return SUCCESS;
14214 }
14215
14216 inst.error = NULL;
14217 return FAIL;
14218 }
14219
14220 static void
14221 do_vfp_nsyn_mla_mls (enum neon_shape rs)
14222 {
14223 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
14224
14225 if (rs == NS_FFF || rs == NS_HHH)
14226 {
14227 if (is_mla)
14228 do_vfp_nsyn_opcode ("fmacs");
14229 else
14230 do_vfp_nsyn_opcode ("fnmacs");
14231
14232 /* ARMv8.2 fp16 instruction. */
14233 if (rs == NS_HHH)
14234 do_scalar_fp16_v82_encode ();
14235 }
14236 else
14237 {
14238 if (is_mla)
14239 do_vfp_nsyn_opcode ("fmacd");
14240 else
14241 do_vfp_nsyn_opcode ("fnmacd");
14242 }
14243 }
14244
14245 static void
14246 do_vfp_nsyn_fma_fms (enum neon_shape rs)
14247 {
14248 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
14249
14250 if (rs == NS_FFF || rs == NS_HHH)
14251 {
14252 if (is_fma)
14253 do_vfp_nsyn_opcode ("ffmas");
14254 else
14255 do_vfp_nsyn_opcode ("ffnmas");
14256
14257 /* ARMv8.2 fp16 instruction. */
14258 if (rs == NS_HHH)
14259 do_scalar_fp16_v82_encode ();
14260 }
14261 else
14262 {
14263 if (is_fma)
14264 do_vfp_nsyn_opcode ("ffmad");
14265 else
14266 do_vfp_nsyn_opcode ("ffnmad");
14267 }
14268 }
14269
14270 static void
14271 do_vfp_nsyn_mul (enum neon_shape rs)
14272 {
14273 if (rs == NS_FFF || rs == NS_HHH)
14274 {
14275 do_vfp_nsyn_opcode ("fmuls");
14276
14277 /* ARMv8.2 fp16 instruction. */
14278 if (rs == NS_HHH)
14279 do_scalar_fp16_v82_encode ();
14280 }
14281 else
14282 do_vfp_nsyn_opcode ("fmuld");
14283 }
14284
14285 static void
14286 do_vfp_nsyn_abs_neg (enum neon_shape rs)
14287 {
14288 int is_neg = (inst.instruction & 0x80) != 0;
14289 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
14290
14291 if (rs == NS_FF || rs == NS_HH)
14292 {
14293 if (is_neg)
14294 do_vfp_nsyn_opcode ("fnegs");
14295 else
14296 do_vfp_nsyn_opcode ("fabss");
14297
14298 /* ARMv8.2 fp16 instruction. */
14299 if (rs == NS_HH)
14300 do_scalar_fp16_v82_encode ();
14301 }
14302 else
14303 {
14304 if (is_neg)
14305 do_vfp_nsyn_opcode ("fnegd");
14306 else
14307 do_vfp_nsyn_opcode ("fabsd");
14308 }
14309 }
14310
14311 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14312 insns belong to Neon, and are handled elsewhere. */
14313
14314 static void
14315 do_vfp_nsyn_ldm_stm (int is_dbmode)
14316 {
14317 int is_ldm = (inst.instruction & (1 << 20)) != 0;
14318 if (is_ldm)
14319 {
14320 if (is_dbmode)
14321 do_vfp_nsyn_opcode ("fldmdbs");
14322 else
14323 do_vfp_nsyn_opcode ("fldmias");
14324 }
14325 else
14326 {
14327 if (is_dbmode)
14328 do_vfp_nsyn_opcode ("fstmdbs");
14329 else
14330 do_vfp_nsyn_opcode ("fstmias");
14331 }
14332 }
14333
14334 static void
14335 do_vfp_nsyn_sqrt (void)
14336 {
14337 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14338 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14339
14340 if (rs == NS_FF || rs == NS_HH)
14341 {
14342 do_vfp_nsyn_opcode ("fsqrts");
14343
14344 /* ARMv8.2 fp16 instruction. */
14345 if (rs == NS_HH)
14346 do_scalar_fp16_v82_encode ();
14347 }
14348 else
14349 do_vfp_nsyn_opcode ("fsqrtd");
14350 }
14351
14352 static void
14353 do_vfp_nsyn_div (void)
14354 {
14355 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14356 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14357 N_F_ALL | N_KEY | N_VFP);
14358
14359 if (rs == NS_FFF || rs == NS_HHH)
14360 {
14361 do_vfp_nsyn_opcode ("fdivs");
14362
14363 /* ARMv8.2 fp16 instruction. */
14364 if (rs == NS_HHH)
14365 do_scalar_fp16_v82_encode ();
14366 }
14367 else
14368 do_vfp_nsyn_opcode ("fdivd");
14369 }
14370
14371 static void
14372 do_vfp_nsyn_nmul (void)
14373 {
14374 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14375 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14376 N_F_ALL | N_KEY | N_VFP);
14377
14378 if (rs == NS_FFF || rs == NS_HHH)
14379 {
14380 NEON_ENCODE (SINGLE, inst);
14381 do_vfp_sp_dyadic ();
14382
14383 /* ARMv8.2 fp16 instruction. */
14384 if (rs == NS_HHH)
14385 do_scalar_fp16_v82_encode ();
14386 }
14387 else
14388 {
14389 NEON_ENCODE (DOUBLE, inst);
14390 do_vfp_dp_rd_rn_rm ();
14391 }
14392 do_vfp_cond_or_thumb ();
14393
14394 }
14395
14396 static void
14397 do_vfp_nsyn_cmp (void)
14398 {
14399 enum neon_shape rs;
14400 if (inst.operands[1].isreg)
14401 {
14402 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14403 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14404
14405 if (rs == NS_FF || rs == NS_HH)
14406 {
14407 NEON_ENCODE (SINGLE, inst);
14408 do_vfp_sp_monadic ();
14409 }
14410 else
14411 {
14412 NEON_ENCODE (DOUBLE, inst);
14413 do_vfp_dp_rd_rm ();
14414 }
14415 }
14416 else
14417 {
14418 rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
14419 neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
14420
14421 switch (inst.instruction & 0x0fffffff)
14422 {
14423 case N_MNEM_vcmp:
14424 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
14425 break;
14426 case N_MNEM_vcmpe:
14427 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
14428 break;
14429 default:
14430 abort ();
14431 }
14432
14433 if (rs == NS_FI || rs == NS_HI)
14434 {
14435 NEON_ENCODE (SINGLE, inst);
14436 do_vfp_sp_compare_z ();
14437 }
14438 else
14439 {
14440 NEON_ENCODE (DOUBLE, inst);
14441 do_vfp_dp_rd ();
14442 }
14443 }
14444 do_vfp_cond_or_thumb ();
14445
14446 /* ARMv8.2 fp16 instruction. */
14447 if (rs == NS_HI || rs == NS_HH)
14448 do_scalar_fp16_v82_encode ();
14449 }
14450
14451 static void
14452 nsyn_insert_sp (void)
14453 {
14454 inst.operands[1] = inst.operands[0];
14455 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
14456 inst.operands[0].reg = REG_SP;
14457 inst.operands[0].isreg = 1;
14458 inst.operands[0].writeback = 1;
14459 inst.operands[0].present = 1;
14460 }
14461
14462 static void
14463 do_vfp_nsyn_push (void)
14464 {
14465 nsyn_insert_sp ();
14466
14467 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14468 _("register list must contain at least 1 and at most 16 "
14469 "registers"));
14470
14471 if (inst.operands[1].issingle)
14472 do_vfp_nsyn_opcode ("fstmdbs");
14473 else
14474 do_vfp_nsyn_opcode ("fstmdbd");
14475 }
14476
14477 static void
14478 do_vfp_nsyn_pop (void)
14479 {
14480 nsyn_insert_sp ();
14481
14482 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14483 _("register list must contain at least 1 and at most 16 "
14484 "registers"));
14485
14486 if (inst.operands[1].issingle)
14487 do_vfp_nsyn_opcode ("fldmias");
14488 else
14489 do_vfp_nsyn_opcode ("fldmiad");
14490 }
14491
14492 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14493 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14494
14495 static void
14496 neon_dp_fixup (struct arm_it* insn)
14497 {
14498 unsigned int i = insn->instruction;
14499 insn->is_neon = 1;
14500
14501 if (thumb_mode)
14502 {
14503 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14504 if (i & (1 << 24))
14505 i |= 1 << 28;
14506
14507 i &= ~(1 << 24);
14508
14509 i |= 0xef000000;
14510 }
14511 else
14512 i |= 0xf2000000;
14513
14514 insn->instruction = i;
14515 }
14516
14517 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14518 (0, 1, 2, 3). */
14519
14520 static unsigned
14521 neon_logbits (unsigned x)
14522 {
14523 return ffs (x) - 4;
14524 }
14525
14526 #define LOW4(R) ((R) & 0xf)
14527 #define HI1(R) (((R) >> 4) & 1)
14528
14529 /* Encode insns with bit pattern:
14530
14531 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14532 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14533
14534 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14535 different meaning for some instruction. */
14536
14537 static void
14538 neon_three_same (int isquad, int ubit, int size)
14539 {
14540 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14541 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14542 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14543 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14544 inst.instruction |= LOW4 (inst.operands[2].reg);
14545 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14546 inst.instruction |= (isquad != 0) << 6;
14547 inst.instruction |= (ubit != 0) << 24;
14548 if (size != -1)
14549 inst.instruction |= neon_logbits (size) << 20;
14550
14551 neon_dp_fixup (&inst);
14552 }
14553
14554 /* Encode instructions of the form:
14555
14556 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14557 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14558
14559 Don't write size if SIZE == -1. */
14560
14561 static void
14562 neon_two_same (int qbit, int ubit, int size)
14563 {
14564 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14565 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14566 inst.instruction |= LOW4 (inst.operands[1].reg);
14567 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14568 inst.instruction |= (qbit != 0) << 6;
14569 inst.instruction |= (ubit != 0) << 24;
14570
14571 if (size != -1)
14572 inst.instruction |= neon_logbits (size) << 18;
14573
14574 neon_dp_fixup (&inst);
14575 }
14576
14577 /* Neon instruction encoders, in approximate order of appearance. */
14578
14579 static void
14580 do_neon_dyadic_i_su (void)
14581 {
14582 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14583 struct neon_type_el et = neon_check_type (3, rs,
14584 N_EQK, N_EQK, N_SU_32 | N_KEY);
14585 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14586 }
14587
14588 static void
14589 do_neon_dyadic_i64_su (void)
14590 {
14591 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14592 struct neon_type_el et = neon_check_type (3, rs,
14593 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14594 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14595 }
14596
14597 static void
14598 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14599 unsigned immbits)
14600 {
14601 unsigned size = et.size >> 3;
14602 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14603 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14604 inst.instruction |= LOW4 (inst.operands[1].reg);
14605 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14606 inst.instruction |= (isquad != 0) << 6;
14607 inst.instruction |= immbits << 16;
14608 inst.instruction |= (size >> 3) << 7;
14609 inst.instruction |= (size & 0x7) << 19;
14610 if (write_ubit)
14611 inst.instruction |= (uval != 0) << 24;
14612
14613 neon_dp_fixup (&inst);
14614 }
14615
14616 static void
14617 do_neon_shl_imm (void)
14618 {
14619 if (!inst.operands[2].isreg)
14620 {
14621 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14622 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14623 int imm = inst.operands[2].imm;
14624
14625 constraint (imm < 0 || (unsigned)imm >= et.size,
14626 _("immediate out of range for shift"));
14627 NEON_ENCODE (IMMED, inst);
14628 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14629 }
14630 else
14631 {
14632 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14633 struct neon_type_el et = neon_check_type (3, rs,
14634 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14635 unsigned int tmp;
14636
14637 /* VSHL/VQSHL 3-register variants have syntax such as:
14638 vshl.xx Dd, Dm, Dn
14639 whereas other 3-register operations encoded by neon_three_same have
14640 syntax like:
14641 vadd.xx Dd, Dn, Dm
14642 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14643 here. */
14644 tmp = inst.operands[2].reg;
14645 inst.operands[2].reg = inst.operands[1].reg;
14646 inst.operands[1].reg = tmp;
14647 NEON_ENCODE (INTEGER, inst);
14648 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14649 }
14650 }
14651
14652 static void
14653 do_neon_qshl_imm (void)
14654 {
14655 if (!inst.operands[2].isreg)
14656 {
14657 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14658 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14659 int imm = inst.operands[2].imm;
14660
14661 constraint (imm < 0 || (unsigned)imm >= et.size,
14662 _("immediate out of range for shift"));
14663 NEON_ENCODE (IMMED, inst);
14664 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
14665 }
14666 else
14667 {
14668 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14669 struct neon_type_el et = neon_check_type (3, rs,
14670 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14671 unsigned int tmp;
14672
14673 /* See note in do_neon_shl_imm. */
14674 tmp = inst.operands[2].reg;
14675 inst.operands[2].reg = inst.operands[1].reg;
14676 inst.operands[1].reg = tmp;
14677 NEON_ENCODE (INTEGER, inst);
14678 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14679 }
14680 }
14681
14682 static void
14683 do_neon_rshl (void)
14684 {
14685 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14686 struct neon_type_el et = neon_check_type (3, rs,
14687 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14688 unsigned int tmp;
14689
14690 tmp = inst.operands[2].reg;
14691 inst.operands[2].reg = inst.operands[1].reg;
14692 inst.operands[1].reg = tmp;
14693 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14694 }
14695
14696 static int
14697 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
14698 {
14699 /* Handle .I8 pseudo-instructions. */
14700 if (size == 8)
14701 {
14702 /* Unfortunately, this will make everything apart from zero out-of-range.
14703 FIXME is this the intended semantics? There doesn't seem much point in
14704 accepting .I8 if so. */
14705 immediate |= immediate << 8;
14706 size = 16;
14707 }
14708
14709 if (size >= 32)
14710 {
14711 if (immediate == (immediate & 0x000000ff))
14712 {
14713 *immbits = immediate;
14714 return 0x1;
14715 }
14716 else if (immediate == (immediate & 0x0000ff00))
14717 {
14718 *immbits = immediate >> 8;
14719 return 0x3;
14720 }
14721 else if (immediate == (immediate & 0x00ff0000))
14722 {
14723 *immbits = immediate >> 16;
14724 return 0x5;
14725 }
14726 else if (immediate == (immediate & 0xff000000))
14727 {
14728 *immbits = immediate >> 24;
14729 return 0x7;
14730 }
14731 if ((immediate & 0xffff) != (immediate >> 16))
14732 goto bad_immediate;
14733 immediate &= 0xffff;
14734 }
14735
14736 if (immediate == (immediate & 0x000000ff))
14737 {
14738 *immbits = immediate;
14739 return 0x9;
14740 }
14741 else if (immediate == (immediate & 0x0000ff00))
14742 {
14743 *immbits = immediate >> 8;
14744 return 0xb;
14745 }
14746
14747 bad_immediate:
14748 first_error (_("immediate value out of range"));
14749 return FAIL;
14750 }
14751
14752 static void
14753 do_neon_logic (void)
14754 {
14755 if (inst.operands[2].present && inst.operands[2].isreg)
14756 {
14757 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14758 neon_check_type (3, rs, N_IGNORE_TYPE);
14759 /* U bit and size field were set as part of the bitmask. */
14760 NEON_ENCODE (INTEGER, inst);
14761 neon_three_same (neon_quad (rs), 0, -1);
14762 }
14763 else
14764 {
14765 const int three_ops_form = (inst.operands[2].present
14766 && !inst.operands[2].isreg);
14767 const int immoperand = (three_ops_form ? 2 : 1);
14768 enum neon_shape rs = (three_ops_form
14769 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
14770 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
14771 struct neon_type_el et = neon_check_type (2, rs,
14772 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14773 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
14774 unsigned immbits;
14775 int cmode;
14776
14777 if (et.type == NT_invtype)
14778 return;
14779
14780 if (three_ops_form)
14781 constraint (inst.operands[0].reg != inst.operands[1].reg,
14782 _("first and second operands shall be the same register"));
14783
14784 NEON_ENCODE (IMMED, inst);
14785
14786 immbits = inst.operands[immoperand].imm;
14787 if (et.size == 64)
14788 {
14789 /* .i64 is a pseudo-op, so the immediate must be a repeating
14790 pattern. */
14791 if (immbits != (inst.operands[immoperand].regisimm ?
14792 inst.operands[immoperand].reg : 0))
14793 {
14794 /* Set immbits to an invalid constant. */
14795 immbits = 0xdeadbeef;
14796 }
14797 }
14798
14799 switch (opcode)
14800 {
14801 case N_MNEM_vbic:
14802 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14803 break;
14804
14805 case N_MNEM_vorr:
14806 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14807 break;
14808
14809 case N_MNEM_vand:
14810 /* Pseudo-instruction for VBIC. */
14811 neon_invert_size (&immbits, 0, et.size);
14812 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14813 break;
14814
14815 case N_MNEM_vorn:
14816 /* Pseudo-instruction for VORR. */
14817 neon_invert_size (&immbits, 0, et.size);
14818 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14819 break;
14820
14821 default:
14822 abort ();
14823 }
14824
14825 if (cmode == FAIL)
14826 return;
14827
14828 inst.instruction |= neon_quad (rs) << 6;
14829 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14830 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14831 inst.instruction |= cmode << 8;
14832 neon_write_immbits (immbits);
14833
14834 neon_dp_fixup (&inst);
14835 }
14836 }
14837
14838 static void
14839 do_neon_bitfield (void)
14840 {
14841 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14842 neon_check_type (3, rs, N_IGNORE_TYPE);
14843 neon_three_same (neon_quad (rs), 0, -1);
14844 }
14845
14846 static void
14847 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
14848 unsigned destbits)
14849 {
14850 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14851 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
14852 types | N_KEY);
14853 if (et.type == NT_float)
14854 {
14855 NEON_ENCODE (FLOAT, inst);
14856 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
14857 }
14858 else
14859 {
14860 NEON_ENCODE (INTEGER, inst);
14861 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
14862 }
14863 }
14864
14865 static void
14866 do_neon_dyadic_if_su (void)
14867 {
14868 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14869 }
14870
14871 static void
14872 do_neon_dyadic_if_su_d (void)
14873 {
14874 /* This version only allow D registers, but that constraint is enforced during
14875 operand parsing so we don't need to do anything extra here. */
14876 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14877 }
14878
14879 static void
14880 do_neon_dyadic_if_i_d (void)
14881 {
14882 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14883 affected if we specify unsigned args. */
14884 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14885 }
14886
14887 enum vfp_or_neon_is_neon_bits
14888 {
14889 NEON_CHECK_CC = 1,
14890 NEON_CHECK_ARCH = 2,
14891 NEON_CHECK_ARCH8 = 4
14892 };
14893
14894 /* Call this function if an instruction which may have belonged to the VFP or
14895 Neon instruction sets, but turned out to be a Neon instruction (due to the
14896 operand types involved, etc.). We have to check and/or fix-up a couple of
14897 things:
14898
14899 - Make sure the user hasn't attempted to make a Neon instruction
14900 conditional.
14901 - Alter the value in the condition code field if necessary.
14902 - Make sure that the arch supports Neon instructions.
14903
14904 Which of these operations take place depends on bits from enum
14905 vfp_or_neon_is_neon_bits.
14906
14907 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14908 current instruction's condition is COND_ALWAYS, the condition field is
14909 changed to inst.uncond_value. This is necessary because instructions shared
14910 between VFP and Neon may be conditional for the VFP variants only, and the
14911 unconditional Neon version must have, e.g., 0xF in the condition field. */
14912
14913 static int
14914 vfp_or_neon_is_neon (unsigned check)
14915 {
14916 /* Conditions are always legal in Thumb mode (IT blocks). */
14917 if (!thumb_mode && (check & NEON_CHECK_CC))
14918 {
14919 if (inst.cond != COND_ALWAYS)
14920 {
14921 first_error (_(BAD_COND));
14922 return FAIL;
14923 }
14924 if (inst.uncond_value != -1)
14925 inst.instruction |= inst.uncond_value << 28;
14926 }
14927
14928 if ((check & NEON_CHECK_ARCH)
14929 && !mark_feature_used (&fpu_neon_ext_v1))
14930 {
14931 first_error (_(BAD_FPU));
14932 return FAIL;
14933 }
14934
14935 if ((check & NEON_CHECK_ARCH8)
14936 && !mark_feature_used (&fpu_neon_ext_armv8))
14937 {
14938 first_error (_(BAD_FPU));
14939 return FAIL;
14940 }
14941
14942 return SUCCESS;
14943 }
14944
14945 static void
14946 do_neon_addsub_if_i (void)
14947 {
14948 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
14949 return;
14950
14951 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14952 return;
14953
14954 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14955 affected if we specify unsigned args. */
14956 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
14957 }
14958
14959 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14960 result to be:
14961 V<op> A,B (A is operand 0, B is operand 2)
14962 to mean:
14963 V<op> A,B,A
14964 not:
14965 V<op> A,B,B
14966 so handle that case specially. */
14967
14968 static void
14969 neon_exchange_operands (void)
14970 {
14971 if (inst.operands[1].present)
14972 {
14973 void *scratch = xmalloc (sizeof (inst.operands[0]));
14974
14975 /* Swap operands[1] and operands[2]. */
14976 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
14977 inst.operands[1] = inst.operands[2];
14978 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
14979 free (scratch);
14980 }
14981 else
14982 {
14983 inst.operands[1] = inst.operands[2];
14984 inst.operands[2] = inst.operands[0];
14985 }
14986 }
14987
14988 static void
14989 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
14990 {
14991 if (inst.operands[2].isreg)
14992 {
14993 if (invert)
14994 neon_exchange_operands ();
14995 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
14996 }
14997 else
14998 {
14999 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15000 struct neon_type_el et = neon_check_type (2, rs,
15001 N_EQK | N_SIZ, immtypes | N_KEY);
15002
15003 NEON_ENCODE (IMMED, inst);
15004 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15005 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15006 inst.instruction |= LOW4 (inst.operands[1].reg);
15007 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15008 inst.instruction |= neon_quad (rs) << 6;
15009 inst.instruction |= (et.type == NT_float) << 10;
15010 inst.instruction |= neon_logbits (et.size) << 18;
15011
15012 neon_dp_fixup (&inst);
15013 }
15014 }
15015
15016 static void
15017 do_neon_cmp (void)
15018 {
15019 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
15020 }
15021
15022 static void
15023 do_neon_cmp_inv (void)
15024 {
15025 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
15026 }
15027
15028 static void
15029 do_neon_ceq (void)
15030 {
15031 neon_compare (N_IF_32, N_IF_32, FALSE);
15032 }
15033
15034 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
15035 scalars, which are encoded in 5 bits, M : Rm.
15036 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
15037 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
15038 index in M. */
15039
15040 static unsigned
15041 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
15042 {
15043 unsigned regno = NEON_SCALAR_REG (scalar);
15044 unsigned elno = NEON_SCALAR_INDEX (scalar);
15045
15046 switch (elsize)
15047 {
15048 case 16:
15049 if (regno > 7 || elno > 3)
15050 goto bad_scalar;
15051 return regno | (elno << 3);
15052
15053 case 32:
15054 if (regno > 15 || elno > 1)
15055 goto bad_scalar;
15056 return regno | (elno << 4);
15057
15058 default:
15059 bad_scalar:
15060 first_error (_("scalar out of range for multiply instruction"));
15061 }
15062
15063 return 0;
15064 }
15065
15066 /* Encode multiply / multiply-accumulate scalar instructions. */
15067
15068 static void
15069 neon_mul_mac (struct neon_type_el et, int ubit)
15070 {
15071 unsigned scalar;
15072
15073 /* Give a more helpful error message if we have an invalid type. */
15074 if (et.type == NT_invtype)
15075 return;
15076
15077 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
15078 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15079 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15080 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15081 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15082 inst.instruction |= LOW4 (scalar);
15083 inst.instruction |= HI1 (scalar) << 5;
15084 inst.instruction |= (et.type == NT_float) << 8;
15085 inst.instruction |= neon_logbits (et.size) << 20;
15086 inst.instruction |= (ubit != 0) << 24;
15087
15088 neon_dp_fixup (&inst);
15089 }
15090
15091 static void
15092 do_neon_mac_maybe_scalar (void)
15093 {
15094 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
15095 return;
15096
15097 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15098 return;
15099
15100 if (inst.operands[2].isscalar)
15101 {
15102 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15103 struct neon_type_el et = neon_check_type (3, rs,
15104 N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
15105 NEON_ENCODE (SCALAR, inst);
15106 neon_mul_mac (et, neon_quad (rs));
15107 }
15108 else
15109 {
15110 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15111 affected if we specify unsigned args. */
15112 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15113 }
15114 }
15115
15116 static void
15117 do_neon_fmac (void)
15118 {
15119 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
15120 return;
15121
15122 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15123 return;
15124
15125 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15126 }
15127
15128 static void
15129 do_neon_tst (void)
15130 {
15131 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15132 struct neon_type_el et = neon_check_type (3, rs,
15133 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
15134 neon_three_same (neon_quad (rs), 0, et.size);
15135 }
15136
15137 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15138 same types as the MAC equivalents. The polynomial type for this instruction
15139 is encoded the same as the integer type. */
15140
15141 static void
15142 do_neon_mul (void)
15143 {
15144 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
15145 return;
15146
15147 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15148 return;
15149
15150 if (inst.operands[2].isscalar)
15151 do_neon_mac_maybe_scalar ();
15152 else
15153 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
15154 }
15155
15156 static void
15157 do_neon_qdmulh (void)
15158 {
15159 if (inst.operands[2].isscalar)
15160 {
15161 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15162 struct neon_type_el et = neon_check_type (3, rs,
15163 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15164 NEON_ENCODE (SCALAR, inst);
15165 neon_mul_mac (et, neon_quad (rs));
15166 }
15167 else
15168 {
15169 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15170 struct neon_type_el et = neon_check_type (3, rs,
15171 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15172 NEON_ENCODE (INTEGER, inst);
15173 /* The U bit (rounding) comes from bit mask. */
15174 neon_three_same (neon_quad (rs), 0, et.size);
15175 }
15176 }
15177
15178 static void
15179 do_neon_qrdmlah (void)
15180 {
15181 /* Check we're on the correct architecture. */
15182 if (!mark_feature_used (&fpu_neon_ext_armv8))
15183 inst.error =
15184 _("instruction form not available on this architecture.");
15185 else if (!mark_feature_used (&fpu_neon_ext_v8_1))
15186 {
15187 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
15188 record_feature_use (&fpu_neon_ext_v8_1);
15189 }
15190
15191 if (inst.operands[2].isscalar)
15192 {
15193 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15194 struct neon_type_el et = neon_check_type (3, rs,
15195 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15196 NEON_ENCODE (SCALAR, inst);
15197 neon_mul_mac (et, neon_quad (rs));
15198 }
15199 else
15200 {
15201 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15202 struct neon_type_el et = neon_check_type (3, rs,
15203 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15204 NEON_ENCODE (INTEGER, inst);
15205 /* The U bit (rounding) comes from bit mask. */
15206 neon_three_same (neon_quad (rs), 0, et.size);
15207 }
15208 }
15209
15210 static void
15211 do_neon_fcmp_absolute (void)
15212 {
15213 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15214 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15215 N_F_16_32 | N_KEY);
15216 /* Size field comes from bit mask. */
15217 neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
15218 }
15219
15220 static void
15221 do_neon_fcmp_absolute_inv (void)
15222 {
15223 neon_exchange_operands ();
15224 do_neon_fcmp_absolute ();
15225 }
15226
15227 static void
15228 do_neon_step (void)
15229 {
15230 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15231 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15232 N_F_16_32 | N_KEY);
15233 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
15234 }
15235
15236 static void
15237 do_neon_abs_neg (void)
15238 {
15239 enum neon_shape rs;
15240 struct neon_type_el et;
15241
15242 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
15243 return;
15244
15245 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15246 return;
15247
15248 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15249 et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
15250
15251 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15252 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15253 inst.instruction |= LOW4 (inst.operands[1].reg);
15254 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15255 inst.instruction |= neon_quad (rs) << 6;
15256 inst.instruction |= (et.type == NT_float) << 10;
15257 inst.instruction |= neon_logbits (et.size) << 18;
15258
15259 neon_dp_fixup (&inst);
15260 }
15261
15262 static void
15263 do_neon_sli (void)
15264 {
15265 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15266 struct neon_type_el et = neon_check_type (2, rs,
15267 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15268 int imm = inst.operands[2].imm;
15269 constraint (imm < 0 || (unsigned)imm >= et.size,
15270 _("immediate out of range for insert"));
15271 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15272 }
15273
15274 static void
15275 do_neon_sri (void)
15276 {
15277 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15278 struct neon_type_el et = neon_check_type (2, rs,
15279 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15280 int imm = inst.operands[2].imm;
15281 constraint (imm < 1 || (unsigned)imm > et.size,
15282 _("immediate out of range for insert"));
15283 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
15284 }
15285
15286 static void
15287 do_neon_qshlu_imm (void)
15288 {
15289 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15290 struct neon_type_el et = neon_check_type (2, rs,
15291 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
15292 int imm = inst.operands[2].imm;
15293 constraint (imm < 0 || (unsigned)imm >= et.size,
15294 _("immediate out of range for shift"));
15295 /* Only encodes the 'U present' variant of the instruction.
15296 In this case, signed types have OP (bit 8) set to 0.
15297 Unsigned types have OP set to 1. */
15298 inst.instruction |= (et.type == NT_unsigned) << 8;
15299 /* The rest of the bits are the same as other immediate shifts. */
15300 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15301 }
15302
15303 static void
15304 do_neon_qmovn (void)
15305 {
15306 struct neon_type_el et = neon_check_type (2, NS_DQ,
15307 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15308 /* Saturating move where operands can be signed or unsigned, and the
15309 destination has the same signedness. */
15310 NEON_ENCODE (INTEGER, inst);
15311 if (et.type == NT_unsigned)
15312 inst.instruction |= 0xc0;
15313 else
15314 inst.instruction |= 0x80;
15315 neon_two_same (0, 1, et.size / 2);
15316 }
15317
15318 static void
15319 do_neon_qmovun (void)
15320 {
15321 struct neon_type_el et = neon_check_type (2, NS_DQ,
15322 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15323 /* Saturating move with unsigned results. Operands must be signed. */
15324 NEON_ENCODE (INTEGER, inst);
15325 neon_two_same (0, 1, et.size / 2);
15326 }
15327
15328 static void
15329 do_neon_rshift_sat_narrow (void)
15330 {
15331 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15332 or unsigned. If operands are unsigned, results must also be unsigned. */
15333 struct neon_type_el et = neon_check_type (2, NS_DQI,
15334 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15335 int imm = inst.operands[2].imm;
15336 /* This gets the bounds check, size encoding and immediate bits calculation
15337 right. */
15338 et.size /= 2;
15339
15340 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15341 VQMOVN.I<size> <Dd>, <Qm>. */
15342 if (imm == 0)
15343 {
15344 inst.operands[2].present = 0;
15345 inst.instruction = N_MNEM_vqmovn;
15346 do_neon_qmovn ();
15347 return;
15348 }
15349
15350 constraint (imm < 1 || (unsigned)imm > et.size,
15351 _("immediate out of range"));
15352 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
15353 }
15354
15355 static void
15356 do_neon_rshift_sat_narrow_u (void)
15357 {
15358 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15359 or unsigned. If operands are unsigned, results must also be unsigned. */
15360 struct neon_type_el et = neon_check_type (2, NS_DQI,
15361 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15362 int imm = inst.operands[2].imm;
15363 /* This gets the bounds check, size encoding and immediate bits calculation
15364 right. */
15365 et.size /= 2;
15366
15367 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15368 VQMOVUN.I<size> <Dd>, <Qm>. */
15369 if (imm == 0)
15370 {
15371 inst.operands[2].present = 0;
15372 inst.instruction = N_MNEM_vqmovun;
15373 do_neon_qmovun ();
15374 return;
15375 }
15376
15377 constraint (imm < 1 || (unsigned)imm > et.size,
15378 _("immediate out of range"));
15379 /* FIXME: The manual is kind of unclear about what value U should have in
15380 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15381 must be 1. */
15382 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
15383 }
15384
15385 static void
15386 do_neon_movn (void)
15387 {
15388 struct neon_type_el et = neon_check_type (2, NS_DQ,
15389 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15390 NEON_ENCODE (INTEGER, inst);
15391 neon_two_same (0, 1, et.size / 2);
15392 }
15393
15394 static void
15395 do_neon_rshift_narrow (void)
15396 {
15397 struct neon_type_el et = neon_check_type (2, NS_DQI,
15398 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15399 int imm = inst.operands[2].imm;
15400 /* This gets the bounds check, size encoding and immediate bits calculation
15401 right. */
15402 et.size /= 2;
15403
15404 /* If immediate is zero then we are a pseudo-instruction for
15405 VMOVN.I<size> <Dd>, <Qm> */
15406 if (imm == 0)
15407 {
15408 inst.operands[2].present = 0;
15409 inst.instruction = N_MNEM_vmovn;
15410 do_neon_movn ();
15411 return;
15412 }
15413
15414 constraint (imm < 1 || (unsigned)imm > et.size,
15415 _("immediate out of range for narrowing operation"));
15416 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
15417 }
15418
15419 static void
15420 do_neon_shll (void)
15421 {
15422 /* FIXME: Type checking when lengthening. */
15423 struct neon_type_el et = neon_check_type (2, NS_QDI,
15424 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
15425 unsigned imm = inst.operands[2].imm;
15426
15427 if (imm == et.size)
15428 {
15429 /* Maximum shift variant. */
15430 NEON_ENCODE (INTEGER, inst);
15431 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15432 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15433 inst.instruction |= LOW4 (inst.operands[1].reg);
15434 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15435 inst.instruction |= neon_logbits (et.size) << 18;
15436
15437 neon_dp_fixup (&inst);
15438 }
15439 else
15440 {
15441 /* A more-specific type check for non-max versions. */
15442 et = neon_check_type (2, NS_QDI,
15443 N_EQK | N_DBL, N_SU_32 | N_KEY);
15444 NEON_ENCODE (IMMED, inst);
15445 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
15446 }
15447 }
15448
15449 /* Check the various types for the VCVT instruction, and return which version
15450 the current instruction is. */
15451
15452 #define CVT_FLAVOUR_VAR \
15453 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15454 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15455 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15456 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15457 /* Half-precision conversions. */ \
15458 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15459 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15460 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
15461 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
15462 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15463 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15464 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
15465 Compared with single/double precision variants, only the co-processor \
15466 field is different, so the encoding flow is reused here. */ \
15467 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
15468 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
15469 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15470 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15471 /* VFP instructions. */ \
15472 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15473 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15474 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15475 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15476 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15477 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15478 /* VFP instructions with bitshift. */ \
15479 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15480 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15481 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15482 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15483 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15484 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15485 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15486 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15487
15488 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15489 neon_cvt_flavour_##C,
15490
15491 /* The different types of conversions we can do. */
15492 enum neon_cvt_flavour
15493 {
15494 CVT_FLAVOUR_VAR
15495 neon_cvt_flavour_invalid,
15496 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
15497 };
15498
15499 #undef CVT_VAR
15500
15501 static enum neon_cvt_flavour
15502 get_neon_cvt_flavour (enum neon_shape rs)
15503 {
15504 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15505 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15506 if (et.type != NT_invtype) \
15507 { \
15508 inst.error = NULL; \
15509 return (neon_cvt_flavour_##C); \
15510 }
15511
15512 struct neon_type_el et;
15513 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
15514 || rs == NS_FF) ? N_VFP : 0;
15515 /* The instruction versions which take an immediate take one register
15516 argument, which is extended to the width of the full register. Thus the
15517 "source" and "destination" registers must have the same width. Hack that
15518 here by making the size equal to the key (wider, in this case) operand. */
15519 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
15520
15521 CVT_FLAVOUR_VAR;
15522
15523 return neon_cvt_flavour_invalid;
15524 #undef CVT_VAR
15525 }
15526
15527 enum neon_cvt_mode
15528 {
15529 neon_cvt_mode_a,
15530 neon_cvt_mode_n,
15531 neon_cvt_mode_p,
15532 neon_cvt_mode_m,
15533 neon_cvt_mode_z,
15534 neon_cvt_mode_x,
15535 neon_cvt_mode_r
15536 };
15537
15538 /* Neon-syntax VFP conversions. */
15539
15540 static void
15541 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
15542 {
15543 const char *opname = 0;
15544
15545 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
15546 || rs == NS_FHI || rs == NS_HFI)
15547 {
15548 /* Conversions with immediate bitshift. */
15549 const char *enc[] =
15550 {
15551 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15552 CVT_FLAVOUR_VAR
15553 NULL
15554 #undef CVT_VAR
15555 };
15556
15557 if (flavour < (int) ARRAY_SIZE (enc))
15558 {
15559 opname = enc[flavour];
15560 constraint (inst.operands[0].reg != inst.operands[1].reg,
15561 _("operands 0 and 1 must be the same register"));
15562 inst.operands[1] = inst.operands[2];
15563 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
15564 }
15565 }
15566 else
15567 {
15568 /* Conversions without bitshift. */
15569 const char *enc[] =
15570 {
15571 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15572 CVT_FLAVOUR_VAR
15573 NULL
15574 #undef CVT_VAR
15575 };
15576
15577 if (flavour < (int) ARRAY_SIZE (enc))
15578 opname = enc[flavour];
15579 }
15580
15581 if (opname)
15582 do_vfp_nsyn_opcode (opname);
15583
15584 /* ARMv8.2 fp16 VCVT instruction. */
15585 if (flavour == neon_cvt_flavour_s32_f16
15586 || flavour == neon_cvt_flavour_u32_f16
15587 || flavour == neon_cvt_flavour_f16_u32
15588 || flavour == neon_cvt_flavour_f16_s32)
15589 do_scalar_fp16_v82_encode ();
15590 }
15591
15592 static void
15593 do_vfp_nsyn_cvtz (void)
15594 {
15595 enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
15596 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15597 const char *enc[] =
15598 {
15599 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15600 CVT_FLAVOUR_VAR
15601 NULL
15602 #undef CVT_VAR
15603 };
15604
15605 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
15606 do_vfp_nsyn_opcode (enc[flavour]);
15607 }
15608
15609 static void
15610 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
15611 enum neon_cvt_mode mode)
15612 {
15613 int sz, op;
15614 int rm;
15615
15616 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15617 D register operands. */
15618 if (flavour == neon_cvt_flavour_s32_f64
15619 || flavour == neon_cvt_flavour_u32_f64)
15620 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15621 _(BAD_FPU));
15622
15623 if (flavour == neon_cvt_flavour_s32_f16
15624 || flavour == neon_cvt_flavour_u32_f16)
15625 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
15626 _(BAD_FP16));
15627
15628 set_it_insn_type (OUTSIDE_IT_INSN);
15629
15630 switch (flavour)
15631 {
15632 case neon_cvt_flavour_s32_f64:
15633 sz = 1;
15634 op = 1;
15635 break;
15636 case neon_cvt_flavour_s32_f32:
15637 sz = 0;
15638 op = 1;
15639 break;
15640 case neon_cvt_flavour_s32_f16:
15641 sz = 0;
15642 op = 1;
15643 break;
15644 case neon_cvt_flavour_u32_f64:
15645 sz = 1;
15646 op = 0;
15647 break;
15648 case neon_cvt_flavour_u32_f32:
15649 sz = 0;
15650 op = 0;
15651 break;
15652 case neon_cvt_flavour_u32_f16:
15653 sz = 0;
15654 op = 0;
15655 break;
15656 default:
15657 first_error (_("invalid instruction shape"));
15658 return;
15659 }
15660
15661 switch (mode)
15662 {
15663 case neon_cvt_mode_a: rm = 0; break;
15664 case neon_cvt_mode_n: rm = 1; break;
15665 case neon_cvt_mode_p: rm = 2; break;
15666 case neon_cvt_mode_m: rm = 3; break;
15667 default: first_error (_("invalid rounding mode")); return;
15668 }
15669
15670 NEON_ENCODE (FPV8, inst);
15671 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
15672 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
15673 inst.instruction |= sz << 8;
15674
15675 /* ARMv8.2 fp16 VCVT instruction. */
15676 if (flavour == neon_cvt_flavour_s32_f16
15677 ||flavour == neon_cvt_flavour_u32_f16)
15678 do_scalar_fp16_v82_encode ();
15679 inst.instruction |= op << 7;
15680 inst.instruction |= rm << 16;
15681 inst.instruction |= 0xf0000000;
15682 inst.is_neon = TRUE;
15683 }
15684
15685 static void
15686 do_neon_cvt_1 (enum neon_cvt_mode mode)
15687 {
15688 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
15689 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
15690 NS_FH, NS_HF, NS_FHI, NS_HFI,
15691 NS_NULL);
15692 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15693
15694 if (flavour == neon_cvt_flavour_invalid)
15695 return;
15696
15697 /* PR11109: Handle round-to-zero for VCVT conversions. */
15698 if (mode == neon_cvt_mode_z
15699 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
15700 && (flavour == neon_cvt_flavour_s16_f16
15701 || flavour == neon_cvt_flavour_u16_f16
15702 || flavour == neon_cvt_flavour_s32_f32
15703 || flavour == neon_cvt_flavour_u32_f32
15704 || flavour == neon_cvt_flavour_s32_f64
15705 || flavour == neon_cvt_flavour_u32_f64)
15706 && (rs == NS_FD || rs == NS_FF))
15707 {
15708 do_vfp_nsyn_cvtz ();
15709 return;
15710 }
15711
15712 /* ARMv8.2 fp16 VCVT conversions. */
15713 if (mode == neon_cvt_mode_z
15714 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
15715 && (flavour == neon_cvt_flavour_s32_f16
15716 || flavour == neon_cvt_flavour_u32_f16)
15717 && (rs == NS_FH))
15718 {
15719 do_vfp_nsyn_cvtz ();
15720 do_scalar_fp16_v82_encode ();
15721 return;
15722 }
15723
15724 /* VFP rather than Neon conversions. */
15725 if (flavour >= neon_cvt_flavour_first_fp)
15726 {
15727 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15728 do_vfp_nsyn_cvt (rs, flavour);
15729 else
15730 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15731
15732 return;
15733 }
15734
15735 switch (rs)
15736 {
15737 case NS_DDI:
15738 case NS_QQI:
15739 {
15740 unsigned immbits;
15741 unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
15742 0x0000100, 0x1000100, 0x0, 0x1000000};
15743
15744 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15745 return;
15746
15747 /* Fixed-point conversion with #0 immediate is encoded as an
15748 integer conversion. */
15749 if (inst.operands[2].present && inst.operands[2].imm == 0)
15750 goto int_encode;
15751 NEON_ENCODE (IMMED, inst);
15752 if (flavour != neon_cvt_flavour_invalid)
15753 inst.instruction |= enctab[flavour];
15754 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15755 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15756 inst.instruction |= LOW4 (inst.operands[1].reg);
15757 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15758 inst.instruction |= neon_quad (rs) << 6;
15759 inst.instruction |= 1 << 21;
15760 if (flavour < neon_cvt_flavour_s16_f16)
15761 {
15762 inst.instruction |= 1 << 21;
15763 immbits = 32 - inst.operands[2].imm;
15764 inst.instruction |= immbits << 16;
15765 }
15766 else
15767 {
15768 inst.instruction |= 3 << 20;
15769 immbits = 16 - inst.operands[2].imm;
15770 inst.instruction |= immbits << 16;
15771 inst.instruction &= ~(1 << 9);
15772 }
15773
15774 neon_dp_fixup (&inst);
15775 }
15776 break;
15777
15778 case NS_DD:
15779 case NS_QQ:
15780 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
15781 {
15782 NEON_ENCODE (FLOAT, inst);
15783 set_it_insn_type (OUTSIDE_IT_INSN);
15784
15785 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
15786 return;
15787
15788 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15789 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15790 inst.instruction |= LOW4 (inst.operands[1].reg);
15791 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15792 inst.instruction |= neon_quad (rs) << 6;
15793 inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
15794 || flavour == neon_cvt_flavour_u32_f32) << 7;
15795 inst.instruction |= mode << 8;
15796 if (flavour == neon_cvt_flavour_u16_f16
15797 || flavour == neon_cvt_flavour_s16_f16)
15798 /* Mask off the original size bits and reencode them. */
15799 inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
15800
15801 if (thumb_mode)
15802 inst.instruction |= 0xfc000000;
15803 else
15804 inst.instruction |= 0xf0000000;
15805 }
15806 else
15807 {
15808 int_encode:
15809 {
15810 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
15811 0x100, 0x180, 0x0, 0x080};
15812
15813 NEON_ENCODE (INTEGER, inst);
15814
15815 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15816 return;
15817
15818 if (flavour != neon_cvt_flavour_invalid)
15819 inst.instruction |= enctab[flavour];
15820
15821 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15822 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15823 inst.instruction |= LOW4 (inst.operands[1].reg);
15824 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15825 inst.instruction |= neon_quad (rs) << 6;
15826 if (flavour >= neon_cvt_flavour_s16_f16
15827 && flavour <= neon_cvt_flavour_f16_u16)
15828 /* Half precision. */
15829 inst.instruction |= 1 << 18;
15830 else
15831 inst.instruction |= 2 << 18;
15832
15833 neon_dp_fixup (&inst);
15834 }
15835 }
15836 break;
15837
15838 /* Half-precision conversions for Advanced SIMD -- neon. */
15839 case NS_QD:
15840 case NS_DQ:
15841
15842 if ((rs == NS_DQ)
15843 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
15844 {
15845 as_bad (_("operand size must match register width"));
15846 break;
15847 }
15848
15849 if ((rs == NS_QD)
15850 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
15851 {
15852 as_bad (_("operand size must match register width"));
15853 break;
15854 }
15855
15856 if (rs == NS_DQ)
15857 inst.instruction = 0x3b60600;
15858 else
15859 inst.instruction = 0x3b60700;
15860
15861 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15862 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15863 inst.instruction |= LOW4 (inst.operands[1].reg);
15864 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15865 neon_dp_fixup (&inst);
15866 break;
15867
15868 default:
15869 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15870 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15871 do_vfp_nsyn_cvt (rs, flavour);
15872 else
15873 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15874 }
15875 }
15876
15877 static void
15878 do_neon_cvtr (void)
15879 {
15880 do_neon_cvt_1 (neon_cvt_mode_x);
15881 }
15882
15883 static void
15884 do_neon_cvt (void)
15885 {
15886 do_neon_cvt_1 (neon_cvt_mode_z);
15887 }
15888
15889 static void
15890 do_neon_cvta (void)
15891 {
15892 do_neon_cvt_1 (neon_cvt_mode_a);
15893 }
15894
15895 static void
15896 do_neon_cvtn (void)
15897 {
15898 do_neon_cvt_1 (neon_cvt_mode_n);
15899 }
15900
15901 static void
15902 do_neon_cvtp (void)
15903 {
15904 do_neon_cvt_1 (neon_cvt_mode_p);
15905 }
15906
15907 static void
15908 do_neon_cvtm (void)
15909 {
15910 do_neon_cvt_1 (neon_cvt_mode_m);
15911 }
15912
15913 static void
15914 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
15915 {
15916 if (is_double)
15917 mark_feature_used (&fpu_vfp_ext_armv8);
15918
15919 encode_arm_vfp_reg (inst.operands[0].reg,
15920 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
15921 encode_arm_vfp_reg (inst.operands[1].reg,
15922 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
15923 inst.instruction |= to ? 0x10000 : 0;
15924 inst.instruction |= t ? 0x80 : 0;
15925 inst.instruction |= is_double ? 0x100 : 0;
15926 do_vfp_cond_or_thumb ();
15927 }
15928
15929 static void
15930 do_neon_cvttb_1 (bfd_boolean t)
15931 {
15932 enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
15933 NS_DF, NS_DH, NS_NULL);
15934
15935 if (rs == NS_NULL)
15936 return;
15937 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
15938 {
15939 inst.error = NULL;
15940 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
15941 }
15942 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
15943 {
15944 inst.error = NULL;
15945 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
15946 }
15947 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
15948 {
15949 /* The VCVTB and VCVTT instructions with D-register operands
15950 don't work for SP only targets. */
15951 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15952 _(BAD_FPU));
15953
15954 inst.error = NULL;
15955 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
15956 }
15957 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
15958 {
15959 /* The VCVTB and VCVTT instructions with D-register operands
15960 don't work for SP only targets. */
15961 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15962 _(BAD_FPU));
15963
15964 inst.error = NULL;
15965 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
15966 }
15967 else
15968 return;
15969 }
15970
15971 static void
15972 do_neon_cvtb (void)
15973 {
15974 do_neon_cvttb_1 (FALSE);
15975 }
15976
15977
15978 static void
15979 do_neon_cvtt (void)
15980 {
15981 do_neon_cvttb_1 (TRUE);
15982 }
15983
15984 static void
15985 neon_move_immediate (void)
15986 {
15987 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
15988 struct neon_type_el et = neon_check_type (2, rs,
15989 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15990 unsigned immlo, immhi = 0, immbits;
15991 int op, cmode, float_p;
15992
15993 constraint (et.type == NT_invtype,
15994 _("operand size must be specified for immediate VMOV"));
15995
15996 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15997 op = (inst.instruction & (1 << 5)) != 0;
15998
15999 immlo = inst.operands[1].imm;
16000 if (inst.operands[1].regisimm)
16001 immhi = inst.operands[1].reg;
16002
16003 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
16004 _("immediate has bits set outside the operand size"));
16005
16006 float_p = inst.operands[1].immisfloat;
16007
16008 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
16009 et.size, et.type)) == FAIL)
16010 {
16011 /* Invert relevant bits only. */
16012 neon_invert_size (&immlo, &immhi, et.size);
16013 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
16014 with one or the other; those cases are caught by
16015 neon_cmode_for_move_imm. */
16016 op = !op;
16017 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
16018 &op, et.size, et.type)) == FAIL)
16019 {
16020 first_error (_("immediate out of range"));
16021 return;
16022 }
16023 }
16024
16025 inst.instruction &= ~(1 << 5);
16026 inst.instruction |= op << 5;
16027
16028 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16029 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16030 inst.instruction |= neon_quad (rs) << 6;
16031 inst.instruction |= cmode << 8;
16032
16033 neon_write_immbits (immbits);
16034 }
16035
16036 static void
16037 do_neon_mvn (void)
16038 {
16039 if (inst.operands[1].isreg)
16040 {
16041 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16042
16043 NEON_ENCODE (INTEGER, inst);
16044 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16045 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16046 inst.instruction |= LOW4 (inst.operands[1].reg);
16047 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16048 inst.instruction |= neon_quad (rs) << 6;
16049 }
16050 else
16051 {
16052 NEON_ENCODE (IMMED, inst);
16053 neon_move_immediate ();
16054 }
16055
16056 neon_dp_fixup (&inst);
16057 }
16058
16059 /* Encode instructions of form:
16060
16061 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16062 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
16063
16064 static void
16065 neon_mixed_length (struct neon_type_el et, unsigned size)
16066 {
16067 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16068 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16069 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16070 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16071 inst.instruction |= LOW4 (inst.operands[2].reg);
16072 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16073 inst.instruction |= (et.type == NT_unsigned) << 24;
16074 inst.instruction |= neon_logbits (size) << 20;
16075
16076 neon_dp_fixup (&inst);
16077 }
16078
16079 static void
16080 do_neon_dyadic_long (void)
16081 {
16082 /* FIXME: Type checking for lengthening op. */
16083 struct neon_type_el et = neon_check_type (3, NS_QDD,
16084 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
16085 neon_mixed_length (et, et.size);
16086 }
16087
16088 static void
16089 do_neon_abal (void)
16090 {
16091 struct neon_type_el et = neon_check_type (3, NS_QDD,
16092 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
16093 neon_mixed_length (et, et.size);
16094 }
16095
16096 static void
16097 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
16098 {
16099 if (inst.operands[2].isscalar)
16100 {
16101 struct neon_type_el et = neon_check_type (3, NS_QDS,
16102 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
16103 NEON_ENCODE (SCALAR, inst);
16104 neon_mul_mac (et, et.type == NT_unsigned);
16105 }
16106 else
16107 {
16108 struct neon_type_el et = neon_check_type (3, NS_QDD,
16109 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
16110 NEON_ENCODE (INTEGER, inst);
16111 neon_mixed_length (et, et.size);
16112 }
16113 }
16114
16115 static void
16116 do_neon_mac_maybe_scalar_long (void)
16117 {
16118 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
16119 }
16120
16121 static void
16122 do_neon_dyadic_wide (void)
16123 {
16124 struct neon_type_el et = neon_check_type (3, NS_QQD,
16125 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
16126 neon_mixed_length (et, et.size);
16127 }
16128
16129 static void
16130 do_neon_dyadic_narrow (void)
16131 {
16132 struct neon_type_el et = neon_check_type (3, NS_QDD,
16133 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
16134 /* Operand sign is unimportant, and the U bit is part of the opcode,
16135 so force the operand type to integer. */
16136 et.type = NT_integer;
16137 neon_mixed_length (et, et.size / 2);
16138 }
16139
16140 static void
16141 do_neon_mul_sat_scalar_long (void)
16142 {
16143 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
16144 }
16145
16146 static void
16147 do_neon_vmull (void)
16148 {
16149 if (inst.operands[2].isscalar)
16150 do_neon_mac_maybe_scalar_long ();
16151 else
16152 {
16153 struct neon_type_el et = neon_check_type (3, NS_QDD,
16154 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
16155
16156 if (et.type == NT_poly)
16157 NEON_ENCODE (POLY, inst);
16158 else
16159 NEON_ENCODE (INTEGER, inst);
16160
16161 /* For polynomial encoding the U bit must be zero, and the size must
16162 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
16163 obviously, as 0b10). */
16164 if (et.size == 64)
16165 {
16166 /* Check we're on the correct architecture. */
16167 if (!mark_feature_used (&fpu_crypto_ext_armv8))
16168 inst.error =
16169 _("Instruction form not available on this architecture.");
16170
16171 et.size = 32;
16172 }
16173
16174 neon_mixed_length (et, et.size);
16175 }
16176 }
16177
16178 static void
16179 do_neon_ext (void)
16180 {
16181 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
16182 struct neon_type_el et = neon_check_type (3, rs,
16183 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16184 unsigned imm = (inst.operands[3].imm * et.size) / 8;
16185
16186 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
16187 _("shift out of range"));
16188 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16189 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16190 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16191 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16192 inst.instruction |= LOW4 (inst.operands[2].reg);
16193 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16194 inst.instruction |= neon_quad (rs) << 6;
16195 inst.instruction |= imm << 8;
16196
16197 neon_dp_fixup (&inst);
16198 }
16199
16200 static void
16201 do_neon_rev (void)
16202 {
16203 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16204 struct neon_type_el et = neon_check_type (2, rs,
16205 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16206 unsigned op = (inst.instruction >> 7) & 3;
16207 /* N (width of reversed regions) is encoded as part of the bitmask. We
16208 extract it here to check the elements to be reversed are smaller.
16209 Otherwise we'd get a reserved instruction. */
16210 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
16211 gas_assert (elsize != 0);
16212 constraint (et.size >= elsize,
16213 _("elements must be smaller than reversal region"));
16214 neon_two_same (neon_quad (rs), 1, et.size);
16215 }
16216
16217 static void
16218 do_neon_dup (void)
16219 {
16220 if (inst.operands[1].isscalar)
16221 {
16222 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
16223 struct neon_type_el et = neon_check_type (2, rs,
16224 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16225 unsigned sizebits = et.size >> 3;
16226 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
16227 int logsize = neon_logbits (et.size);
16228 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
16229
16230 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
16231 return;
16232
16233 NEON_ENCODE (SCALAR, inst);
16234 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16235 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16236 inst.instruction |= LOW4 (dm);
16237 inst.instruction |= HI1 (dm) << 5;
16238 inst.instruction |= neon_quad (rs) << 6;
16239 inst.instruction |= x << 17;
16240 inst.instruction |= sizebits << 16;
16241
16242 neon_dp_fixup (&inst);
16243 }
16244 else
16245 {
16246 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
16247 struct neon_type_el et = neon_check_type (2, rs,
16248 N_8 | N_16 | N_32 | N_KEY, N_EQK);
16249 /* Duplicate ARM register to lanes of vector. */
16250 NEON_ENCODE (ARMREG, inst);
16251 switch (et.size)
16252 {
16253 case 8: inst.instruction |= 0x400000; break;
16254 case 16: inst.instruction |= 0x000020; break;
16255 case 32: inst.instruction |= 0x000000; break;
16256 default: break;
16257 }
16258 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16259 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
16260 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
16261 inst.instruction |= neon_quad (rs) << 21;
16262 /* The encoding for this instruction is identical for the ARM and Thumb
16263 variants, except for the condition field. */
16264 do_vfp_cond_or_thumb ();
16265 }
16266 }
16267
16268 /* VMOV has particularly many variations. It can be one of:
16269 0. VMOV<c><q> <Qd>, <Qm>
16270 1. VMOV<c><q> <Dd>, <Dm>
16271 (Register operations, which are VORR with Rm = Rn.)
16272 2. VMOV<c><q>.<dt> <Qd>, #<imm>
16273 3. VMOV<c><q>.<dt> <Dd>, #<imm>
16274 (Immediate loads.)
16275 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16276 (ARM register to scalar.)
16277 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16278 (Two ARM registers to vector.)
16279 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16280 (Scalar to ARM register.)
16281 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16282 (Vector to two ARM registers.)
16283 8. VMOV.F32 <Sd>, <Sm>
16284 9. VMOV.F64 <Dd>, <Dm>
16285 (VFP register moves.)
16286 10. VMOV.F32 <Sd>, #imm
16287 11. VMOV.F64 <Dd>, #imm
16288 (VFP float immediate load.)
16289 12. VMOV <Rd>, <Sm>
16290 (VFP single to ARM reg.)
16291 13. VMOV <Sd>, <Rm>
16292 (ARM reg to VFP single.)
16293 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16294 (Two ARM regs to two VFP singles.)
16295 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16296 (Two VFP singles to two ARM regs.)
16297
16298 These cases can be disambiguated using neon_select_shape, except cases 1/9
16299 and 3/11 which depend on the operand type too.
16300
16301 All the encoded bits are hardcoded by this function.
16302
16303 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16304 Cases 5, 7 may be used with VFPv2 and above.
16305
16306 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16307 can specify a type where it doesn't make sense to, and is ignored). */
16308
16309 static void
16310 do_neon_mov (void)
16311 {
16312 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
16313 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR,
16314 NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
16315 NS_HR, NS_RH, NS_HI, NS_NULL);
16316 struct neon_type_el et;
16317 const char *ldconst = 0;
16318
16319 switch (rs)
16320 {
16321 case NS_DD: /* case 1/9. */
16322 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16323 /* It is not an error here if no type is given. */
16324 inst.error = NULL;
16325 if (et.type == NT_float && et.size == 64)
16326 {
16327 do_vfp_nsyn_opcode ("fcpyd");
16328 break;
16329 }
16330 /* fall through. */
16331
16332 case NS_QQ: /* case 0/1. */
16333 {
16334 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16335 return;
16336 /* The architecture manual I have doesn't explicitly state which
16337 value the U bit should have for register->register moves, but
16338 the equivalent VORR instruction has U = 0, so do that. */
16339 inst.instruction = 0x0200110;
16340 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16341 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16342 inst.instruction |= LOW4 (inst.operands[1].reg);
16343 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16344 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16345 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16346 inst.instruction |= neon_quad (rs) << 6;
16347
16348 neon_dp_fixup (&inst);
16349 }
16350 break;
16351
16352 case NS_DI: /* case 3/11. */
16353 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16354 inst.error = NULL;
16355 if (et.type == NT_float && et.size == 64)
16356 {
16357 /* case 11 (fconstd). */
16358 ldconst = "fconstd";
16359 goto encode_fconstd;
16360 }
16361 /* fall through. */
16362
16363 case NS_QI: /* case 2/3. */
16364 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16365 return;
16366 inst.instruction = 0x0800010;
16367 neon_move_immediate ();
16368 neon_dp_fixup (&inst);
16369 break;
16370
16371 case NS_SR: /* case 4. */
16372 {
16373 unsigned bcdebits = 0;
16374 int logsize;
16375 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
16376 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
16377
16378 /* .<size> is optional here, defaulting to .32. */
16379 if (inst.vectype.elems == 0
16380 && inst.operands[0].vectype.type == NT_invtype
16381 && inst.operands[1].vectype.type == NT_invtype)
16382 {
16383 inst.vectype.el[0].type = NT_untyped;
16384 inst.vectype.el[0].size = 32;
16385 inst.vectype.elems = 1;
16386 }
16387
16388 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
16389 logsize = neon_logbits (et.size);
16390
16391 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16392 _(BAD_FPU));
16393 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16394 && et.size != 32, _(BAD_FPU));
16395 constraint (et.type == NT_invtype, _("bad type for scalar"));
16396 constraint (x >= 64 / et.size, _("scalar index out of range"));
16397
16398 switch (et.size)
16399 {
16400 case 8: bcdebits = 0x8; break;
16401 case 16: bcdebits = 0x1; break;
16402 case 32: bcdebits = 0x0; break;
16403 default: ;
16404 }
16405
16406 bcdebits |= x << logsize;
16407
16408 inst.instruction = 0xe000b10;
16409 do_vfp_cond_or_thumb ();
16410 inst.instruction |= LOW4 (dn) << 16;
16411 inst.instruction |= HI1 (dn) << 7;
16412 inst.instruction |= inst.operands[1].reg << 12;
16413 inst.instruction |= (bcdebits & 3) << 5;
16414 inst.instruction |= (bcdebits >> 2) << 21;
16415 }
16416 break;
16417
16418 case NS_DRR: /* case 5 (fmdrr). */
16419 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16420 _(BAD_FPU));
16421
16422 inst.instruction = 0xc400b10;
16423 do_vfp_cond_or_thumb ();
16424 inst.instruction |= LOW4 (inst.operands[0].reg);
16425 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
16426 inst.instruction |= inst.operands[1].reg << 12;
16427 inst.instruction |= inst.operands[2].reg << 16;
16428 break;
16429
16430 case NS_RS: /* case 6. */
16431 {
16432 unsigned logsize;
16433 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
16434 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
16435 unsigned abcdebits = 0;
16436
16437 /* .<dt> is optional here, defaulting to .32. */
16438 if (inst.vectype.elems == 0
16439 && inst.operands[0].vectype.type == NT_invtype
16440 && inst.operands[1].vectype.type == NT_invtype)
16441 {
16442 inst.vectype.el[0].type = NT_untyped;
16443 inst.vectype.el[0].size = 32;
16444 inst.vectype.elems = 1;
16445 }
16446
16447 et = neon_check_type (2, NS_NULL,
16448 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
16449 logsize = neon_logbits (et.size);
16450
16451 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16452 _(BAD_FPU));
16453 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16454 && et.size != 32, _(BAD_FPU));
16455 constraint (et.type == NT_invtype, _("bad type for scalar"));
16456 constraint (x >= 64 / et.size, _("scalar index out of range"));
16457
16458 switch (et.size)
16459 {
16460 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
16461 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
16462 case 32: abcdebits = 0x00; break;
16463 default: ;
16464 }
16465
16466 abcdebits |= x << logsize;
16467 inst.instruction = 0xe100b10;
16468 do_vfp_cond_or_thumb ();
16469 inst.instruction |= LOW4 (dn) << 16;
16470 inst.instruction |= HI1 (dn) << 7;
16471 inst.instruction |= inst.operands[0].reg << 12;
16472 inst.instruction |= (abcdebits & 3) << 5;
16473 inst.instruction |= (abcdebits >> 2) << 21;
16474 }
16475 break;
16476
16477 case NS_RRD: /* case 7 (fmrrd). */
16478 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16479 _(BAD_FPU));
16480
16481 inst.instruction = 0xc500b10;
16482 do_vfp_cond_or_thumb ();
16483 inst.instruction |= inst.operands[0].reg << 12;
16484 inst.instruction |= inst.operands[1].reg << 16;
16485 inst.instruction |= LOW4 (inst.operands[2].reg);
16486 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16487 break;
16488
16489 case NS_FF: /* case 8 (fcpys). */
16490 do_vfp_nsyn_opcode ("fcpys");
16491 break;
16492
16493 case NS_HI:
16494 case NS_FI: /* case 10 (fconsts). */
16495 ldconst = "fconsts";
16496 encode_fconstd:
16497 if (is_quarter_float (inst.operands[1].imm))
16498 {
16499 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
16500 do_vfp_nsyn_opcode (ldconst);
16501
16502 /* ARMv8.2 fp16 vmov.f16 instruction. */
16503 if (rs == NS_HI)
16504 do_scalar_fp16_v82_encode ();
16505 }
16506 else
16507 first_error (_("immediate out of range"));
16508 break;
16509
16510 case NS_RH:
16511 case NS_RF: /* case 12 (fmrs). */
16512 do_vfp_nsyn_opcode ("fmrs");
16513 /* ARMv8.2 fp16 vmov.f16 instruction. */
16514 if (rs == NS_RH)
16515 do_scalar_fp16_v82_encode ();
16516 break;
16517
16518 case NS_HR:
16519 case NS_FR: /* case 13 (fmsr). */
16520 do_vfp_nsyn_opcode ("fmsr");
16521 /* ARMv8.2 fp16 vmov.f16 instruction. */
16522 if (rs == NS_HR)
16523 do_scalar_fp16_v82_encode ();
16524 break;
16525
16526 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16527 (one of which is a list), but we have parsed four. Do some fiddling to
16528 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16529 expect. */
16530 case NS_RRFF: /* case 14 (fmrrs). */
16531 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
16532 _("VFP registers must be adjacent"));
16533 inst.operands[2].imm = 2;
16534 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16535 do_vfp_nsyn_opcode ("fmrrs");
16536 break;
16537
16538 case NS_FFRR: /* case 15 (fmsrr). */
16539 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
16540 _("VFP registers must be adjacent"));
16541 inst.operands[1] = inst.operands[2];
16542 inst.operands[2] = inst.operands[3];
16543 inst.operands[0].imm = 2;
16544 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16545 do_vfp_nsyn_opcode ("fmsrr");
16546 break;
16547
16548 case NS_NULL:
16549 /* neon_select_shape has determined that the instruction
16550 shape is wrong and has already set the error message. */
16551 break;
16552
16553 default:
16554 abort ();
16555 }
16556 }
16557
16558 static void
16559 do_neon_rshift_round_imm (void)
16560 {
16561 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16562 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
16563 int imm = inst.operands[2].imm;
16564
16565 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16566 if (imm == 0)
16567 {
16568 inst.operands[2].present = 0;
16569 do_neon_mov ();
16570 return;
16571 }
16572
16573 constraint (imm < 1 || (unsigned)imm > et.size,
16574 _("immediate out of range for shift"));
16575 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
16576 et.size - imm);
16577 }
16578
16579 static void
16580 do_neon_movhf (void)
16581 {
16582 enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
16583 constraint (rs != NS_HH, _("invalid suffix"));
16584
16585 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16586 _(BAD_FPU));
16587
16588 do_vfp_sp_monadic ();
16589
16590 inst.is_neon = 1;
16591 inst.instruction |= 0xf0000000;
16592 }
16593
16594 static void
16595 do_neon_movl (void)
16596 {
16597 struct neon_type_el et = neon_check_type (2, NS_QD,
16598 N_EQK | N_DBL, N_SU_32 | N_KEY);
16599 unsigned sizebits = et.size >> 3;
16600 inst.instruction |= sizebits << 19;
16601 neon_two_same (0, et.type == NT_unsigned, -1);
16602 }
16603
16604 static void
16605 do_neon_trn (void)
16606 {
16607 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16608 struct neon_type_el et = neon_check_type (2, rs,
16609 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16610 NEON_ENCODE (INTEGER, inst);
16611 neon_two_same (neon_quad (rs), 1, et.size);
16612 }
16613
16614 static void
16615 do_neon_zip_uzp (void)
16616 {
16617 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16618 struct neon_type_el et = neon_check_type (2, rs,
16619 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16620 if (rs == NS_DD && et.size == 32)
16621 {
16622 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16623 inst.instruction = N_MNEM_vtrn;
16624 do_neon_trn ();
16625 return;
16626 }
16627 neon_two_same (neon_quad (rs), 1, et.size);
16628 }
16629
16630 static void
16631 do_neon_sat_abs_neg (void)
16632 {
16633 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16634 struct neon_type_el et = neon_check_type (2, rs,
16635 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16636 neon_two_same (neon_quad (rs), 1, et.size);
16637 }
16638
16639 static void
16640 do_neon_pair_long (void)
16641 {
16642 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16643 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
16644 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16645 inst.instruction |= (et.type == NT_unsigned) << 7;
16646 neon_two_same (neon_quad (rs), 1, et.size);
16647 }
16648
16649 static void
16650 do_neon_recip_est (void)
16651 {
16652 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16653 struct neon_type_el et = neon_check_type (2, rs,
16654 N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
16655 inst.instruction |= (et.type == NT_float) << 8;
16656 neon_two_same (neon_quad (rs), 1, et.size);
16657 }
16658
16659 static void
16660 do_neon_cls (void)
16661 {
16662 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16663 struct neon_type_el et = neon_check_type (2, rs,
16664 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16665 neon_two_same (neon_quad (rs), 1, et.size);
16666 }
16667
16668 static void
16669 do_neon_clz (void)
16670 {
16671 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16672 struct neon_type_el et = neon_check_type (2, rs,
16673 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
16674 neon_two_same (neon_quad (rs), 1, et.size);
16675 }
16676
16677 static void
16678 do_neon_cnt (void)
16679 {
16680 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16681 struct neon_type_el et = neon_check_type (2, rs,
16682 N_EQK | N_INT, N_8 | N_KEY);
16683 neon_two_same (neon_quad (rs), 1, et.size);
16684 }
16685
16686 static void
16687 do_neon_swp (void)
16688 {
16689 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16690 neon_two_same (neon_quad (rs), 1, -1);
16691 }
16692
16693 static void
16694 do_neon_tbl_tbx (void)
16695 {
16696 unsigned listlenbits;
16697 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
16698
16699 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
16700 {
16701 first_error (_("bad list length for table lookup"));
16702 return;
16703 }
16704
16705 listlenbits = inst.operands[1].imm - 1;
16706 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16707 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16708 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16709 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16710 inst.instruction |= LOW4 (inst.operands[2].reg);
16711 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16712 inst.instruction |= listlenbits << 8;
16713
16714 neon_dp_fixup (&inst);
16715 }
16716
16717 static void
16718 do_neon_ldm_stm (void)
16719 {
16720 /* P, U and L bits are part of bitmask. */
16721 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
16722 unsigned offsetbits = inst.operands[1].imm * 2;
16723
16724 if (inst.operands[1].issingle)
16725 {
16726 do_vfp_nsyn_ldm_stm (is_dbmode);
16727 return;
16728 }
16729
16730 constraint (is_dbmode && !inst.operands[0].writeback,
16731 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16732
16733 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16734 _("register list must contain at least 1 and at most 16 "
16735 "registers"));
16736
16737 inst.instruction |= inst.operands[0].reg << 16;
16738 inst.instruction |= inst.operands[0].writeback << 21;
16739 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16740 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
16741
16742 inst.instruction |= offsetbits;
16743
16744 do_vfp_cond_or_thumb ();
16745 }
16746
16747 static void
16748 do_neon_ldr_str (void)
16749 {
16750 int is_ldr = (inst.instruction & (1 << 20)) != 0;
16751
16752 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16753 And is UNPREDICTABLE in thumb mode. */
16754 if (!is_ldr
16755 && inst.operands[1].reg == REG_PC
16756 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
16757 {
16758 if (thumb_mode)
16759 inst.error = _("Use of PC here is UNPREDICTABLE");
16760 else if (warn_on_deprecated)
16761 as_tsktsk (_("Use of PC here is deprecated"));
16762 }
16763
16764 if (inst.operands[0].issingle)
16765 {
16766 if (is_ldr)
16767 do_vfp_nsyn_opcode ("flds");
16768 else
16769 do_vfp_nsyn_opcode ("fsts");
16770
16771 /* ARMv8.2 vldr.16/vstr.16 instruction. */
16772 if (inst.vectype.el[0].size == 16)
16773 do_scalar_fp16_v82_encode ();
16774 }
16775 else
16776 {
16777 if (is_ldr)
16778 do_vfp_nsyn_opcode ("fldd");
16779 else
16780 do_vfp_nsyn_opcode ("fstd");
16781 }
16782 }
16783
16784 /* "interleave" version also handles non-interleaving register VLD1/VST1
16785 instructions. */
16786
16787 static void
16788 do_neon_ld_st_interleave (void)
16789 {
16790 struct neon_type_el et = neon_check_type (1, NS_NULL,
16791 N_8 | N_16 | N_32 | N_64);
16792 unsigned alignbits = 0;
16793 unsigned idx;
16794 /* The bits in this table go:
16795 0: register stride of one (0) or two (1)
16796 1,2: register list length, minus one (1, 2, 3, 4).
16797 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16798 We use -1 for invalid entries. */
16799 const int typetable[] =
16800 {
16801 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16802 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16803 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16804 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16805 };
16806 int typebits;
16807
16808 if (et.type == NT_invtype)
16809 return;
16810
16811 if (inst.operands[1].immisalign)
16812 switch (inst.operands[1].imm >> 8)
16813 {
16814 case 64: alignbits = 1; break;
16815 case 128:
16816 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
16817 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16818 goto bad_alignment;
16819 alignbits = 2;
16820 break;
16821 case 256:
16822 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16823 goto bad_alignment;
16824 alignbits = 3;
16825 break;
16826 default:
16827 bad_alignment:
16828 first_error (_("bad alignment"));
16829 return;
16830 }
16831
16832 inst.instruction |= alignbits << 4;
16833 inst.instruction |= neon_logbits (et.size) << 6;
16834
16835 /* Bits [4:6] of the immediate in a list specifier encode register stride
16836 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16837 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16838 up the right value for "type" in a table based on this value and the given
16839 list style, then stick it back. */
16840 idx = ((inst.operands[0].imm >> 4) & 7)
16841 | (((inst.instruction >> 8) & 3) << 3);
16842
16843 typebits = typetable[idx];
16844
16845 constraint (typebits == -1, _("bad list type for instruction"));
16846 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
16847 _("bad element type for instruction"));
16848
16849 inst.instruction &= ~0xf00;
16850 inst.instruction |= typebits << 8;
16851 }
16852
16853 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16854 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16855 otherwise. The variable arguments are a list of pairs of legal (size, align)
16856 values, terminated with -1. */
16857
16858 static int
16859 neon_alignment_bit (int size, int align, int *do_alignment, ...)
16860 {
16861 va_list ap;
16862 int result = FAIL, thissize, thisalign;
16863
16864 if (!inst.operands[1].immisalign)
16865 {
16866 *do_alignment = 0;
16867 return SUCCESS;
16868 }
16869
16870 va_start (ap, do_alignment);
16871
16872 do
16873 {
16874 thissize = va_arg (ap, int);
16875 if (thissize == -1)
16876 break;
16877 thisalign = va_arg (ap, int);
16878
16879 if (size == thissize && align == thisalign)
16880 result = SUCCESS;
16881 }
16882 while (result != SUCCESS);
16883
16884 va_end (ap);
16885
16886 if (result == SUCCESS)
16887 *do_alignment = 1;
16888 else
16889 first_error (_("unsupported alignment for instruction"));
16890
16891 return result;
16892 }
16893
16894 static void
16895 do_neon_ld_st_lane (void)
16896 {
16897 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16898 int align_good, do_alignment = 0;
16899 int logsize = neon_logbits (et.size);
16900 int align = inst.operands[1].imm >> 8;
16901 int n = (inst.instruction >> 8) & 3;
16902 int max_el = 64 / et.size;
16903
16904 if (et.type == NT_invtype)
16905 return;
16906
16907 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
16908 _("bad list length"));
16909 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
16910 _("scalar index out of range"));
16911 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
16912 && et.size == 8,
16913 _("stride of 2 unavailable when element size is 8"));
16914
16915 switch (n)
16916 {
16917 case 0: /* VLD1 / VST1. */
16918 align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
16919 32, 32, -1);
16920 if (align_good == FAIL)
16921 return;
16922 if (do_alignment)
16923 {
16924 unsigned alignbits = 0;
16925 switch (et.size)
16926 {
16927 case 16: alignbits = 0x1; break;
16928 case 32: alignbits = 0x3; break;
16929 default: ;
16930 }
16931 inst.instruction |= alignbits << 4;
16932 }
16933 break;
16934
16935 case 1: /* VLD2 / VST2. */
16936 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
16937 16, 32, 32, 64, -1);
16938 if (align_good == FAIL)
16939 return;
16940 if (do_alignment)
16941 inst.instruction |= 1 << 4;
16942 break;
16943
16944 case 2: /* VLD3 / VST3. */
16945 constraint (inst.operands[1].immisalign,
16946 _("can't use alignment with this instruction"));
16947 break;
16948
16949 case 3: /* VLD4 / VST4. */
16950 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
16951 16, 64, 32, 64, 32, 128, -1);
16952 if (align_good == FAIL)
16953 return;
16954 if (do_alignment)
16955 {
16956 unsigned alignbits = 0;
16957 switch (et.size)
16958 {
16959 case 8: alignbits = 0x1; break;
16960 case 16: alignbits = 0x1; break;
16961 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
16962 default: ;
16963 }
16964 inst.instruction |= alignbits << 4;
16965 }
16966 break;
16967
16968 default: ;
16969 }
16970
16971 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16972 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16973 inst.instruction |= 1 << (4 + logsize);
16974
16975 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
16976 inst.instruction |= logsize << 10;
16977 }
16978
16979 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16980
16981 static void
16982 do_neon_ld_dup (void)
16983 {
16984 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16985 int align_good, do_alignment = 0;
16986
16987 if (et.type == NT_invtype)
16988 return;
16989
16990 switch ((inst.instruction >> 8) & 3)
16991 {
16992 case 0: /* VLD1. */
16993 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
16994 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16995 &do_alignment, 16, 16, 32, 32, -1);
16996 if (align_good == FAIL)
16997 return;
16998 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
16999 {
17000 case 1: break;
17001 case 2: inst.instruction |= 1 << 5; break;
17002 default: first_error (_("bad list length")); return;
17003 }
17004 inst.instruction |= neon_logbits (et.size) << 6;
17005 break;
17006
17007 case 1: /* VLD2. */
17008 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
17009 &do_alignment, 8, 16, 16, 32, 32, 64,
17010 -1);
17011 if (align_good == FAIL)
17012 return;
17013 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
17014 _("bad list length"));
17015 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17016 inst.instruction |= 1 << 5;
17017 inst.instruction |= neon_logbits (et.size) << 6;
17018 break;
17019
17020 case 2: /* VLD3. */
17021 constraint (inst.operands[1].immisalign,
17022 _("can't use alignment with this instruction"));
17023 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
17024 _("bad list length"));
17025 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17026 inst.instruction |= 1 << 5;
17027 inst.instruction |= neon_logbits (et.size) << 6;
17028 break;
17029
17030 case 3: /* VLD4. */
17031 {
17032 int align = inst.operands[1].imm >> 8;
17033 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
17034 16, 64, 32, 64, 32, 128, -1);
17035 if (align_good == FAIL)
17036 return;
17037 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
17038 _("bad list length"));
17039 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17040 inst.instruction |= 1 << 5;
17041 if (et.size == 32 && align == 128)
17042 inst.instruction |= 0x3 << 6;
17043 else
17044 inst.instruction |= neon_logbits (et.size) << 6;
17045 }
17046 break;
17047
17048 default: ;
17049 }
17050
17051 inst.instruction |= do_alignment << 4;
17052 }
17053
17054 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
17055 apart from bits [11:4]. */
17056
17057 static void
17058 do_neon_ldx_stx (void)
17059 {
17060 if (inst.operands[1].isreg)
17061 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
17062
17063 switch (NEON_LANE (inst.operands[0].imm))
17064 {
17065 case NEON_INTERLEAVE_LANES:
17066 NEON_ENCODE (INTERLV, inst);
17067 do_neon_ld_st_interleave ();
17068 break;
17069
17070 case NEON_ALL_LANES:
17071 NEON_ENCODE (DUP, inst);
17072 if (inst.instruction == N_INV)
17073 {
17074 first_error ("only loads support such operands");
17075 break;
17076 }
17077 do_neon_ld_dup ();
17078 break;
17079
17080 default:
17081 NEON_ENCODE (LANE, inst);
17082 do_neon_ld_st_lane ();
17083 }
17084
17085 /* L bit comes from bit mask. */
17086 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17087 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17088 inst.instruction |= inst.operands[1].reg << 16;
17089
17090 if (inst.operands[1].postind)
17091 {
17092 int postreg = inst.operands[1].imm & 0xf;
17093 constraint (!inst.operands[1].immisreg,
17094 _("post-index must be a register"));
17095 constraint (postreg == 0xd || postreg == 0xf,
17096 _("bad register for post-index"));
17097 inst.instruction |= postreg;
17098 }
17099 else
17100 {
17101 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
17102 constraint (inst.reloc.exp.X_op != O_constant
17103 || inst.reloc.exp.X_add_number != 0,
17104 BAD_ADDR_MODE);
17105
17106 if (inst.operands[1].writeback)
17107 {
17108 inst.instruction |= 0xd;
17109 }
17110 else
17111 inst.instruction |= 0xf;
17112 }
17113
17114 if (thumb_mode)
17115 inst.instruction |= 0xf9000000;
17116 else
17117 inst.instruction |= 0xf4000000;
17118 }
17119
17120 /* FP v8. */
17121 static void
17122 do_vfp_nsyn_fpv8 (enum neon_shape rs)
17123 {
17124 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17125 D register operands. */
17126 if (neon_shape_class[rs] == SC_DOUBLE)
17127 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17128 _(BAD_FPU));
17129
17130 NEON_ENCODE (FPV8, inst);
17131
17132 if (rs == NS_FFF || rs == NS_HHH)
17133 {
17134 do_vfp_sp_dyadic ();
17135
17136 /* ARMv8.2 fp16 instruction. */
17137 if (rs == NS_HHH)
17138 do_scalar_fp16_v82_encode ();
17139 }
17140 else
17141 do_vfp_dp_rd_rn_rm ();
17142
17143 if (rs == NS_DDD)
17144 inst.instruction |= 0x100;
17145
17146 inst.instruction |= 0xf0000000;
17147 }
17148
17149 static void
17150 do_vsel (void)
17151 {
17152 set_it_insn_type (OUTSIDE_IT_INSN);
17153
17154 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
17155 first_error (_("invalid instruction shape"));
17156 }
17157
17158 static void
17159 do_vmaxnm (void)
17160 {
17161 set_it_insn_type (OUTSIDE_IT_INSN);
17162
17163 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
17164 return;
17165
17166 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17167 return;
17168
17169 neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
17170 }
17171
17172 static void
17173 do_vrint_1 (enum neon_cvt_mode mode)
17174 {
17175 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
17176 struct neon_type_el et;
17177
17178 if (rs == NS_NULL)
17179 return;
17180
17181 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17182 D register operands. */
17183 if (neon_shape_class[rs] == SC_DOUBLE)
17184 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17185 _(BAD_FPU));
17186
17187 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
17188 | N_VFP);
17189 if (et.type != NT_invtype)
17190 {
17191 /* VFP encodings. */
17192 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
17193 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
17194 set_it_insn_type (OUTSIDE_IT_INSN);
17195
17196 NEON_ENCODE (FPV8, inst);
17197 if (rs == NS_FF || rs == NS_HH)
17198 do_vfp_sp_monadic ();
17199 else
17200 do_vfp_dp_rd_rm ();
17201
17202 switch (mode)
17203 {
17204 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
17205 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
17206 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
17207 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
17208 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
17209 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
17210 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
17211 default: abort ();
17212 }
17213
17214 inst.instruction |= (rs == NS_DD) << 8;
17215 do_vfp_cond_or_thumb ();
17216
17217 /* ARMv8.2 fp16 vrint instruction. */
17218 if (rs == NS_HH)
17219 do_scalar_fp16_v82_encode ();
17220 }
17221 else
17222 {
17223 /* Neon encodings (or something broken...). */
17224 inst.error = NULL;
17225 et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
17226
17227 if (et.type == NT_invtype)
17228 return;
17229
17230 set_it_insn_type (OUTSIDE_IT_INSN);
17231 NEON_ENCODE (FLOAT, inst);
17232
17233 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17234 return;
17235
17236 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17237 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17238 inst.instruction |= LOW4 (inst.operands[1].reg);
17239 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17240 inst.instruction |= neon_quad (rs) << 6;
17241 /* Mask off the original size bits and reencode them. */
17242 inst.instruction = ((inst.instruction & 0xfff3ffff)
17243 | neon_logbits (et.size) << 18);
17244
17245 switch (mode)
17246 {
17247 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
17248 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
17249 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
17250 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
17251 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
17252 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
17253 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
17254 default: abort ();
17255 }
17256
17257 if (thumb_mode)
17258 inst.instruction |= 0xfc000000;
17259 else
17260 inst.instruction |= 0xf0000000;
17261 }
17262 }
17263
17264 static void
17265 do_vrintx (void)
17266 {
17267 do_vrint_1 (neon_cvt_mode_x);
17268 }
17269
17270 static void
17271 do_vrintz (void)
17272 {
17273 do_vrint_1 (neon_cvt_mode_z);
17274 }
17275
17276 static void
17277 do_vrintr (void)
17278 {
17279 do_vrint_1 (neon_cvt_mode_r);
17280 }
17281
17282 static void
17283 do_vrinta (void)
17284 {
17285 do_vrint_1 (neon_cvt_mode_a);
17286 }
17287
17288 static void
17289 do_vrintn (void)
17290 {
17291 do_vrint_1 (neon_cvt_mode_n);
17292 }
17293
17294 static void
17295 do_vrintp (void)
17296 {
17297 do_vrint_1 (neon_cvt_mode_p);
17298 }
17299
17300 static void
17301 do_vrintm (void)
17302 {
17303 do_vrint_1 (neon_cvt_mode_m);
17304 }
17305
17306 static unsigned
17307 neon_scalar_for_vcmla (unsigned opnd, unsigned elsize)
17308 {
17309 unsigned regno = NEON_SCALAR_REG (opnd);
17310 unsigned elno = NEON_SCALAR_INDEX (opnd);
17311
17312 if (elsize == 16 && elno < 2 && regno < 16)
17313 return regno | (elno << 4);
17314 else if (elsize == 32 && elno == 0)
17315 return regno;
17316
17317 first_error (_("scalar out of range"));
17318 return 0;
17319 }
17320
17321 static void
17322 do_vcmla (void)
17323 {
17324 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17325 _(BAD_FPU));
17326 constraint (inst.reloc.exp.X_op != O_constant, _("expression too complex"));
17327 unsigned rot = inst.reloc.exp.X_add_number;
17328 constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
17329 _("immediate out of range"));
17330 rot /= 90;
17331 if (inst.operands[2].isscalar)
17332 {
17333 enum neon_shape rs = neon_select_shape (NS_DDSI, NS_QQSI, NS_NULL);
17334 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17335 N_KEY | N_F16 | N_F32).size;
17336 unsigned m = neon_scalar_for_vcmla (inst.operands[2].reg, size);
17337 inst.is_neon = 1;
17338 inst.instruction = 0xfe000800;
17339 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17340 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17341 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17342 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17343 inst.instruction |= LOW4 (m);
17344 inst.instruction |= HI1 (m) << 5;
17345 inst.instruction |= neon_quad (rs) << 6;
17346 inst.instruction |= rot << 20;
17347 inst.instruction |= (size == 32) << 23;
17348 }
17349 else
17350 {
17351 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
17352 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17353 N_KEY | N_F16 | N_F32).size;
17354 neon_three_same (neon_quad (rs), 0, -1);
17355 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
17356 inst.instruction |= 0xfc200800;
17357 inst.instruction |= rot << 23;
17358 inst.instruction |= (size == 32) << 20;
17359 }
17360 }
17361
17362 static void
17363 do_vcadd (void)
17364 {
17365 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17366 _(BAD_FPU));
17367 constraint (inst.reloc.exp.X_op != O_constant, _("expression too complex"));
17368 unsigned rot = inst.reloc.exp.X_add_number;
17369 constraint (rot != 90 && rot != 270, _("immediate out of range"));
17370 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
17371 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17372 N_KEY | N_F16 | N_F32).size;
17373 neon_three_same (neon_quad (rs), 0, -1);
17374 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
17375 inst.instruction |= 0xfc800800;
17376 inst.instruction |= (rot == 270) << 24;
17377 inst.instruction |= (size == 32) << 20;
17378 }
17379
17380 /* Crypto v1 instructions. */
17381 static void
17382 do_crypto_2op_1 (unsigned elttype, int op)
17383 {
17384 set_it_insn_type (OUTSIDE_IT_INSN);
17385
17386 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
17387 == NT_invtype)
17388 return;
17389
17390 inst.error = NULL;
17391
17392 NEON_ENCODE (INTEGER, inst);
17393 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17394 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17395 inst.instruction |= LOW4 (inst.operands[1].reg);
17396 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17397 if (op != -1)
17398 inst.instruction |= op << 6;
17399
17400 if (thumb_mode)
17401 inst.instruction |= 0xfc000000;
17402 else
17403 inst.instruction |= 0xf0000000;
17404 }
17405
17406 static void
17407 do_crypto_3op_1 (int u, int op)
17408 {
17409 set_it_insn_type (OUTSIDE_IT_INSN);
17410
17411 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
17412 N_32 | N_UNT | N_KEY).type == NT_invtype)
17413 return;
17414
17415 inst.error = NULL;
17416
17417 NEON_ENCODE (INTEGER, inst);
17418 neon_three_same (1, u, 8 << op);
17419 }
17420
17421 static void
17422 do_aese (void)
17423 {
17424 do_crypto_2op_1 (N_8, 0);
17425 }
17426
17427 static void
17428 do_aesd (void)
17429 {
17430 do_crypto_2op_1 (N_8, 1);
17431 }
17432
17433 static void
17434 do_aesmc (void)
17435 {
17436 do_crypto_2op_1 (N_8, 2);
17437 }
17438
17439 static void
17440 do_aesimc (void)
17441 {
17442 do_crypto_2op_1 (N_8, 3);
17443 }
17444
17445 static void
17446 do_sha1c (void)
17447 {
17448 do_crypto_3op_1 (0, 0);
17449 }
17450
17451 static void
17452 do_sha1p (void)
17453 {
17454 do_crypto_3op_1 (0, 1);
17455 }
17456
17457 static void
17458 do_sha1m (void)
17459 {
17460 do_crypto_3op_1 (0, 2);
17461 }
17462
17463 static void
17464 do_sha1su0 (void)
17465 {
17466 do_crypto_3op_1 (0, 3);
17467 }
17468
17469 static void
17470 do_sha256h (void)
17471 {
17472 do_crypto_3op_1 (1, 0);
17473 }
17474
17475 static void
17476 do_sha256h2 (void)
17477 {
17478 do_crypto_3op_1 (1, 1);
17479 }
17480
17481 static void
17482 do_sha256su1 (void)
17483 {
17484 do_crypto_3op_1 (1, 2);
17485 }
17486
17487 static void
17488 do_sha1h (void)
17489 {
17490 do_crypto_2op_1 (N_32, -1);
17491 }
17492
17493 static void
17494 do_sha1su1 (void)
17495 {
17496 do_crypto_2op_1 (N_32, 0);
17497 }
17498
17499 static void
17500 do_sha256su0 (void)
17501 {
17502 do_crypto_2op_1 (N_32, 1);
17503 }
17504
17505 static void
17506 do_crc32_1 (unsigned int poly, unsigned int sz)
17507 {
17508 unsigned int Rd = inst.operands[0].reg;
17509 unsigned int Rn = inst.operands[1].reg;
17510 unsigned int Rm = inst.operands[2].reg;
17511
17512 set_it_insn_type (OUTSIDE_IT_INSN);
17513 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
17514 inst.instruction |= LOW4 (Rn) << 16;
17515 inst.instruction |= LOW4 (Rm);
17516 inst.instruction |= sz << (thumb_mode ? 4 : 21);
17517 inst.instruction |= poly << (thumb_mode ? 20 : 9);
17518
17519 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
17520 as_warn (UNPRED_REG ("r15"));
17521 if (thumb_mode && (Rd == REG_SP || Rn == REG_SP || Rm == REG_SP))
17522 as_warn (UNPRED_REG ("r13"));
17523 }
17524
17525 static void
17526 do_crc32b (void)
17527 {
17528 do_crc32_1 (0, 0);
17529 }
17530
17531 static void
17532 do_crc32h (void)
17533 {
17534 do_crc32_1 (0, 1);
17535 }
17536
17537 static void
17538 do_crc32w (void)
17539 {
17540 do_crc32_1 (0, 2);
17541 }
17542
17543 static void
17544 do_crc32cb (void)
17545 {
17546 do_crc32_1 (1, 0);
17547 }
17548
17549 static void
17550 do_crc32ch (void)
17551 {
17552 do_crc32_1 (1, 1);
17553 }
17554
17555 static void
17556 do_crc32cw (void)
17557 {
17558 do_crc32_1 (1, 2);
17559 }
17560
17561 static void
17562 do_vjcvt (void)
17563 {
17564 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17565 _(BAD_FPU));
17566 neon_check_type (2, NS_FD, N_S32, N_F64);
17567 do_vfp_sp_dp_cvt ();
17568 do_vfp_cond_or_thumb ();
17569 }
17570
17571 \f
17572 /* Overall per-instruction processing. */
17573
17574 /* We need to be able to fix up arbitrary expressions in some statements.
17575 This is so that we can handle symbols that are an arbitrary distance from
17576 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17577 which returns part of an address in a form which will be valid for
17578 a data instruction. We do this by pushing the expression into a symbol
17579 in the expr_section, and creating a fix for that. */
17580
17581 static void
17582 fix_new_arm (fragS * frag,
17583 int where,
17584 short int size,
17585 expressionS * exp,
17586 int pc_rel,
17587 int reloc)
17588 {
17589 fixS * new_fix;
17590
17591 switch (exp->X_op)
17592 {
17593 case O_constant:
17594 if (pc_rel)
17595 {
17596 /* Create an absolute valued symbol, so we have something to
17597 refer to in the object file. Unfortunately for us, gas's
17598 generic expression parsing will already have folded out
17599 any use of .set foo/.type foo %function that may have
17600 been used to set type information of the target location,
17601 that's being specified symbolically. We have to presume
17602 the user knows what they are doing. */
17603 char name[16 + 8];
17604 symbolS *symbol;
17605
17606 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
17607
17608 symbol = symbol_find_or_make (name);
17609 S_SET_SEGMENT (symbol, absolute_section);
17610 symbol_set_frag (symbol, &zero_address_frag);
17611 S_SET_VALUE (symbol, exp->X_add_number);
17612 exp->X_op = O_symbol;
17613 exp->X_add_symbol = symbol;
17614 exp->X_add_number = 0;
17615 }
17616 /* FALLTHROUGH */
17617 case O_symbol:
17618 case O_add:
17619 case O_subtract:
17620 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
17621 (enum bfd_reloc_code_real) reloc);
17622 break;
17623
17624 default:
17625 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
17626 pc_rel, (enum bfd_reloc_code_real) reloc);
17627 break;
17628 }
17629
17630 /* Mark whether the fix is to a THUMB instruction, or an ARM
17631 instruction. */
17632 new_fix->tc_fix_data = thumb_mode;
17633 }
17634
17635 /* Create a frg for an instruction requiring relaxation. */
17636 static void
17637 output_relax_insn (void)
17638 {
17639 char * to;
17640 symbolS *sym;
17641 int offset;
17642
17643 /* The size of the instruction is unknown, so tie the debug info to the
17644 start of the instruction. */
17645 dwarf2_emit_insn (0);
17646
17647 switch (inst.reloc.exp.X_op)
17648 {
17649 case O_symbol:
17650 sym = inst.reloc.exp.X_add_symbol;
17651 offset = inst.reloc.exp.X_add_number;
17652 break;
17653 case O_constant:
17654 sym = NULL;
17655 offset = inst.reloc.exp.X_add_number;
17656 break;
17657 default:
17658 sym = make_expr_symbol (&inst.reloc.exp);
17659 offset = 0;
17660 break;
17661 }
17662 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
17663 inst.relax, sym, offset, NULL/*offset, opcode*/);
17664 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
17665 }
17666
17667 /* Write a 32-bit thumb instruction to buf. */
17668 static void
17669 put_thumb32_insn (char * buf, unsigned long insn)
17670 {
17671 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
17672 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
17673 }
17674
17675 static void
17676 output_inst (const char * str)
17677 {
17678 char * to = NULL;
17679
17680 if (inst.error)
17681 {
17682 as_bad ("%s -- `%s'", inst.error, str);
17683 return;
17684 }
17685 if (inst.relax)
17686 {
17687 output_relax_insn ();
17688 return;
17689 }
17690 if (inst.size == 0)
17691 return;
17692
17693 to = frag_more (inst.size);
17694 /* PR 9814: Record the thumb mode into the current frag so that we know
17695 what type of NOP padding to use, if necessary. We override any previous
17696 setting so that if the mode has changed then the NOPS that we use will
17697 match the encoding of the last instruction in the frag. */
17698 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
17699
17700 if (thumb_mode && (inst.size > THUMB_SIZE))
17701 {
17702 gas_assert (inst.size == (2 * THUMB_SIZE));
17703 put_thumb32_insn (to, inst.instruction);
17704 }
17705 else if (inst.size > INSN_SIZE)
17706 {
17707 gas_assert (inst.size == (2 * INSN_SIZE));
17708 md_number_to_chars (to, inst.instruction, INSN_SIZE);
17709 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
17710 }
17711 else
17712 md_number_to_chars (to, inst.instruction, inst.size);
17713
17714 if (inst.reloc.type != BFD_RELOC_UNUSED)
17715 fix_new_arm (frag_now, to - frag_now->fr_literal,
17716 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
17717 inst.reloc.type);
17718
17719 dwarf2_emit_insn (inst.size);
17720 }
17721
17722 static char *
17723 output_it_inst (int cond, int mask, char * to)
17724 {
17725 unsigned long instruction = 0xbf00;
17726
17727 mask &= 0xf;
17728 instruction |= mask;
17729 instruction |= cond << 4;
17730
17731 if (to == NULL)
17732 {
17733 to = frag_more (2);
17734 #ifdef OBJ_ELF
17735 dwarf2_emit_insn (2);
17736 #endif
17737 }
17738
17739 md_number_to_chars (to, instruction, 2);
17740
17741 return to;
17742 }
17743
17744 /* Tag values used in struct asm_opcode's tag field. */
17745 enum opcode_tag
17746 {
17747 OT_unconditional, /* Instruction cannot be conditionalized.
17748 The ARM condition field is still 0xE. */
17749 OT_unconditionalF, /* Instruction cannot be conditionalized
17750 and carries 0xF in its ARM condition field. */
17751 OT_csuffix, /* Instruction takes a conditional suffix. */
17752 OT_csuffixF, /* Some forms of the instruction take a conditional
17753 suffix, others place 0xF where the condition field
17754 would be. */
17755 OT_cinfix3, /* Instruction takes a conditional infix,
17756 beginning at character index 3. (In
17757 unified mode, it becomes a suffix.) */
17758 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
17759 tsts, cmps, cmns, and teqs. */
17760 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
17761 character index 3, even in unified mode. Used for
17762 legacy instructions where suffix and infix forms
17763 may be ambiguous. */
17764 OT_csuf_or_in3, /* Instruction takes either a conditional
17765 suffix or an infix at character index 3. */
17766 OT_odd_infix_unc, /* This is the unconditional variant of an
17767 instruction that takes a conditional infix
17768 at an unusual position. In unified mode,
17769 this variant will accept a suffix. */
17770 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
17771 are the conditional variants of instructions that
17772 take conditional infixes in unusual positions.
17773 The infix appears at character index
17774 (tag - OT_odd_infix_0). These are not accepted
17775 in unified mode. */
17776 };
17777
17778 /* Subroutine of md_assemble, responsible for looking up the primary
17779 opcode from the mnemonic the user wrote. STR points to the
17780 beginning of the mnemonic.
17781
17782 This is not simply a hash table lookup, because of conditional
17783 variants. Most instructions have conditional variants, which are
17784 expressed with a _conditional affix_ to the mnemonic. If we were
17785 to encode each conditional variant as a literal string in the opcode
17786 table, it would have approximately 20,000 entries.
17787
17788 Most mnemonics take this affix as a suffix, and in unified syntax,
17789 'most' is upgraded to 'all'. However, in the divided syntax, some
17790 instructions take the affix as an infix, notably the s-variants of
17791 the arithmetic instructions. Of those instructions, all but six
17792 have the infix appear after the third character of the mnemonic.
17793
17794 Accordingly, the algorithm for looking up primary opcodes given
17795 an identifier is:
17796
17797 1. Look up the identifier in the opcode table.
17798 If we find a match, go to step U.
17799
17800 2. Look up the last two characters of the identifier in the
17801 conditions table. If we find a match, look up the first N-2
17802 characters of the identifier in the opcode table. If we
17803 find a match, go to step CE.
17804
17805 3. Look up the fourth and fifth characters of the identifier in
17806 the conditions table. If we find a match, extract those
17807 characters from the identifier, and look up the remaining
17808 characters in the opcode table. If we find a match, go
17809 to step CM.
17810
17811 4. Fail.
17812
17813 U. Examine the tag field of the opcode structure, in case this is
17814 one of the six instructions with its conditional infix in an
17815 unusual place. If it is, the tag tells us where to find the
17816 infix; look it up in the conditions table and set inst.cond
17817 accordingly. Otherwise, this is an unconditional instruction.
17818 Again set inst.cond accordingly. Return the opcode structure.
17819
17820 CE. Examine the tag field to make sure this is an instruction that
17821 should receive a conditional suffix. If it is not, fail.
17822 Otherwise, set inst.cond from the suffix we already looked up,
17823 and return the opcode structure.
17824
17825 CM. Examine the tag field to make sure this is an instruction that
17826 should receive a conditional infix after the third character.
17827 If it is not, fail. Otherwise, undo the edits to the current
17828 line of input and proceed as for case CE. */
17829
17830 static const struct asm_opcode *
17831 opcode_lookup (char **str)
17832 {
17833 char *end, *base;
17834 char *affix;
17835 const struct asm_opcode *opcode;
17836 const struct asm_cond *cond;
17837 char save[2];
17838
17839 /* Scan up to the end of the mnemonic, which must end in white space,
17840 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17841 for (base = end = *str; *end != '\0'; end++)
17842 if (*end == ' ' || *end == '.')
17843 break;
17844
17845 if (end == base)
17846 return NULL;
17847
17848 /* Handle a possible width suffix and/or Neon type suffix. */
17849 if (end[0] == '.')
17850 {
17851 int offset = 2;
17852
17853 /* The .w and .n suffixes are only valid if the unified syntax is in
17854 use. */
17855 if (unified_syntax && end[1] == 'w')
17856 inst.size_req = 4;
17857 else if (unified_syntax && end[1] == 'n')
17858 inst.size_req = 2;
17859 else
17860 offset = 0;
17861
17862 inst.vectype.elems = 0;
17863
17864 *str = end + offset;
17865
17866 if (end[offset] == '.')
17867 {
17868 /* See if we have a Neon type suffix (possible in either unified or
17869 non-unified ARM syntax mode). */
17870 if (parse_neon_type (&inst.vectype, str) == FAIL)
17871 return NULL;
17872 }
17873 else if (end[offset] != '\0' && end[offset] != ' ')
17874 return NULL;
17875 }
17876 else
17877 *str = end;
17878
17879 /* Look for unaffixed or special-case affixed mnemonic. */
17880 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17881 end - base);
17882 if (opcode)
17883 {
17884 /* step U */
17885 if (opcode->tag < OT_odd_infix_0)
17886 {
17887 inst.cond = COND_ALWAYS;
17888 return opcode;
17889 }
17890
17891 if (warn_on_deprecated && unified_syntax)
17892 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17893 affix = base + (opcode->tag - OT_odd_infix_0);
17894 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17895 gas_assert (cond);
17896
17897 inst.cond = cond->value;
17898 return opcode;
17899 }
17900
17901 /* Cannot have a conditional suffix on a mnemonic of less than two
17902 characters. */
17903 if (end - base < 3)
17904 return NULL;
17905
17906 /* Look for suffixed mnemonic. */
17907 affix = end - 2;
17908 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17909 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17910 affix - base);
17911 if (opcode && cond)
17912 {
17913 /* step CE */
17914 switch (opcode->tag)
17915 {
17916 case OT_cinfix3_legacy:
17917 /* Ignore conditional suffixes matched on infix only mnemonics. */
17918 break;
17919
17920 case OT_cinfix3:
17921 case OT_cinfix3_deprecated:
17922 case OT_odd_infix_unc:
17923 if (!unified_syntax)
17924 return 0;
17925 /* Fall through. */
17926
17927 case OT_csuffix:
17928 case OT_csuffixF:
17929 case OT_csuf_or_in3:
17930 inst.cond = cond->value;
17931 return opcode;
17932
17933 case OT_unconditional:
17934 case OT_unconditionalF:
17935 if (thumb_mode)
17936 inst.cond = cond->value;
17937 else
17938 {
17939 /* Delayed diagnostic. */
17940 inst.error = BAD_COND;
17941 inst.cond = COND_ALWAYS;
17942 }
17943 return opcode;
17944
17945 default:
17946 return NULL;
17947 }
17948 }
17949
17950 /* Cannot have a usual-position infix on a mnemonic of less than
17951 six characters (five would be a suffix). */
17952 if (end - base < 6)
17953 return NULL;
17954
17955 /* Look for infixed mnemonic in the usual position. */
17956 affix = base + 3;
17957 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17958 if (!cond)
17959 return NULL;
17960
17961 memcpy (save, affix, 2);
17962 memmove (affix, affix + 2, (end - affix) - 2);
17963 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17964 (end - base) - 2);
17965 memmove (affix + 2, affix, (end - affix) - 2);
17966 memcpy (affix, save, 2);
17967
17968 if (opcode
17969 && (opcode->tag == OT_cinfix3
17970 || opcode->tag == OT_cinfix3_deprecated
17971 || opcode->tag == OT_csuf_or_in3
17972 || opcode->tag == OT_cinfix3_legacy))
17973 {
17974 /* Step CM. */
17975 if (warn_on_deprecated && unified_syntax
17976 && (opcode->tag == OT_cinfix3
17977 || opcode->tag == OT_cinfix3_deprecated))
17978 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17979
17980 inst.cond = cond->value;
17981 return opcode;
17982 }
17983
17984 return NULL;
17985 }
17986
17987 /* This function generates an initial IT instruction, leaving its block
17988 virtually open for the new instructions. Eventually,
17989 the mask will be updated by now_it_add_mask () each time
17990 a new instruction needs to be included in the IT block.
17991 Finally, the block is closed with close_automatic_it_block ().
17992 The block closure can be requested either from md_assemble (),
17993 a tencode (), or due to a label hook. */
17994
17995 static void
17996 new_automatic_it_block (int cond)
17997 {
17998 now_it.state = AUTOMATIC_IT_BLOCK;
17999 now_it.mask = 0x18;
18000 now_it.cc = cond;
18001 now_it.block_length = 1;
18002 mapping_state (MAP_THUMB);
18003 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
18004 now_it.warn_deprecated = FALSE;
18005 now_it.insn_cond = TRUE;
18006 }
18007
18008 /* Close an automatic IT block.
18009 See comments in new_automatic_it_block (). */
18010
18011 static void
18012 close_automatic_it_block (void)
18013 {
18014 now_it.mask = 0x10;
18015 now_it.block_length = 0;
18016 }
18017
18018 /* Update the mask of the current automatically-generated IT
18019 instruction. See comments in new_automatic_it_block (). */
18020
18021 static void
18022 now_it_add_mask (int cond)
18023 {
18024 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
18025 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
18026 | ((bitvalue) << (nbit)))
18027 const int resulting_bit = (cond & 1);
18028
18029 now_it.mask &= 0xf;
18030 now_it.mask = SET_BIT_VALUE (now_it.mask,
18031 resulting_bit,
18032 (5 - now_it.block_length));
18033 now_it.mask = SET_BIT_VALUE (now_it.mask,
18034 1,
18035 ((5 - now_it.block_length) - 1) );
18036 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
18037
18038 #undef CLEAR_BIT
18039 #undef SET_BIT_VALUE
18040 }
18041
18042 /* The IT blocks handling machinery is accessed through the these functions:
18043 it_fsm_pre_encode () from md_assemble ()
18044 set_it_insn_type () optional, from the tencode functions
18045 set_it_insn_type_last () ditto
18046 in_it_block () ditto
18047 it_fsm_post_encode () from md_assemble ()
18048 force_automatic_it_block_close () from label handling functions
18049
18050 Rationale:
18051 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
18052 initializing the IT insn type with a generic initial value depending
18053 on the inst.condition.
18054 2) During the tencode function, two things may happen:
18055 a) The tencode function overrides the IT insn type by
18056 calling either set_it_insn_type (type) or set_it_insn_type_last ().
18057 b) The tencode function queries the IT block state by
18058 calling in_it_block () (i.e. to determine narrow/not narrow mode).
18059
18060 Both set_it_insn_type and in_it_block run the internal FSM state
18061 handling function (handle_it_state), because: a) setting the IT insn
18062 type may incur in an invalid state (exiting the function),
18063 and b) querying the state requires the FSM to be updated.
18064 Specifically we want to avoid creating an IT block for conditional
18065 branches, so it_fsm_pre_encode is actually a guess and we can't
18066 determine whether an IT block is required until the tencode () routine
18067 has decided what type of instruction this actually it.
18068 Because of this, if set_it_insn_type and in_it_block have to be used,
18069 set_it_insn_type has to be called first.
18070
18071 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
18072 determines the insn IT type depending on the inst.cond code.
18073 When a tencode () routine encodes an instruction that can be
18074 either outside an IT block, or, in the case of being inside, has to be
18075 the last one, set_it_insn_type_last () will determine the proper
18076 IT instruction type based on the inst.cond code. Otherwise,
18077 set_it_insn_type can be called for overriding that logic or
18078 for covering other cases.
18079
18080 Calling handle_it_state () may not transition the IT block state to
18081 OUTSIDE_IT_BLOCK immediately, since the (current) state could be
18082 still queried. Instead, if the FSM determines that the state should
18083 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
18084 after the tencode () function: that's what it_fsm_post_encode () does.
18085
18086 Since in_it_block () calls the state handling function to get an
18087 updated state, an error may occur (due to invalid insns combination).
18088 In that case, inst.error is set.
18089 Therefore, inst.error has to be checked after the execution of
18090 the tencode () routine.
18091
18092 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
18093 any pending state change (if any) that didn't take place in
18094 handle_it_state () as explained above. */
18095
18096 static void
18097 it_fsm_pre_encode (void)
18098 {
18099 if (inst.cond != COND_ALWAYS)
18100 inst.it_insn_type = INSIDE_IT_INSN;
18101 else
18102 inst.it_insn_type = OUTSIDE_IT_INSN;
18103
18104 now_it.state_handled = 0;
18105 }
18106
18107 /* IT state FSM handling function. */
18108
18109 static int
18110 handle_it_state (void)
18111 {
18112 now_it.state_handled = 1;
18113 now_it.insn_cond = FALSE;
18114
18115 switch (now_it.state)
18116 {
18117 case OUTSIDE_IT_BLOCK:
18118 switch (inst.it_insn_type)
18119 {
18120 case OUTSIDE_IT_INSN:
18121 break;
18122
18123 case INSIDE_IT_INSN:
18124 case INSIDE_IT_LAST_INSN:
18125 if (thumb_mode == 0)
18126 {
18127 if (unified_syntax
18128 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
18129 as_tsktsk (_("Warning: conditional outside an IT block"\
18130 " for Thumb."));
18131 }
18132 else
18133 {
18134 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
18135 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
18136 {
18137 /* Automatically generate the IT instruction. */
18138 new_automatic_it_block (inst.cond);
18139 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
18140 close_automatic_it_block ();
18141 }
18142 else
18143 {
18144 inst.error = BAD_OUT_IT;
18145 return FAIL;
18146 }
18147 }
18148 break;
18149
18150 case IF_INSIDE_IT_LAST_INSN:
18151 case NEUTRAL_IT_INSN:
18152 break;
18153
18154 case IT_INSN:
18155 now_it.state = MANUAL_IT_BLOCK;
18156 now_it.block_length = 0;
18157 break;
18158 }
18159 break;
18160
18161 case AUTOMATIC_IT_BLOCK:
18162 /* Three things may happen now:
18163 a) We should increment current it block size;
18164 b) We should close current it block (closing insn or 4 insns);
18165 c) We should close current it block and start a new one (due
18166 to incompatible conditions or
18167 4 insns-length block reached). */
18168
18169 switch (inst.it_insn_type)
18170 {
18171 case OUTSIDE_IT_INSN:
18172 /* The closure of the block shall happen immediately,
18173 so any in_it_block () call reports the block as closed. */
18174 force_automatic_it_block_close ();
18175 break;
18176
18177 case INSIDE_IT_INSN:
18178 case INSIDE_IT_LAST_INSN:
18179 case IF_INSIDE_IT_LAST_INSN:
18180 now_it.block_length++;
18181
18182 if (now_it.block_length > 4
18183 || !now_it_compatible (inst.cond))
18184 {
18185 force_automatic_it_block_close ();
18186 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
18187 new_automatic_it_block (inst.cond);
18188 }
18189 else
18190 {
18191 now_it.insn_cond = TRUE;
18192 now_it_add_mask (inst.cond);
18193 }
18194
18195 if (now_it.state == AUTOMATIC_IT_BLOCK
18196 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
18197 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
18198 close_automatic_it_block ();
18199 break;
18200
18201 case NEUTRAL_IT_INSN:
18202 now_it.block_length++;
18203 now_it.insn_cond = TRUE;
18204
18205 if (now_it.block_length > 4)
18206 force_automatic_it_block_close ();
18207 else
18208 now_it_add_mask (now_it.cc & 1);
18209 break;
18210
18211 case IT_INSN:
18212 close_automatic_it_block ();
18213 now_it.state = MANUAL_IT_BLOCK;
18214 break;
18215 }
18216 break;
18217
18218 case MANUAL_IT_BLOCK:
18219 {
18220 /* Check conditional suffixes. */
18221 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
18222 int is_last;
18223 now_it.mask <<= 1;
18224 now_it.mask &= 0x1f;
18225 is_last = (now_it.mask == 0x10);
18226 now_it.insn_cond = TRUE;
18227
18228 switch (inst.it_insn_type)
18229 {
18230 case OUTSIDE_IT_INSN:
18231 inst.error = BAD_NOT_IT;
18232 return FAIL;
18233
18234 case INSIDE_IT_INSN:
18235 if (cond != inst.cond)
18236 {
18237 inst.error = BAD_IT_COND;
18238 return FAIL;
18239 }
18240 break;
18241
18242 case INSIDE_IT_LAST_INSN:
18243 case IF_INSIDE_IT_LAST_INSN:
18244 if (cond != inst.cond)
18245 {
18246 inst.error = BAD_IT_COND;
18247 return FAIL;
18248 }
18249 if (!is_last)
18250 {
18251 inst.error = BAD_BRANCH;
18252 return FAIL;
18253 }
18254 break;
18255
18256 case NEUTRAL_IT_INSN:
18257 /* The BKPT instruction is unconditional even in an IT block. */
18258 break;
18259
18260 case IT_INSN:
18261 inst.error = BAD_IT_IT;
18262 return FAIL;
18263 }
18264 }
18265 break;
18266 }
18267
18268 return SUCCESS;
18269 }
18270
18271 struct depr_insn_mask
18272 {
18273 unsigned long pattern;
18274 unsigned long mask;
18275 const char* description;
18276 };
18277
18278 /* List of 16-bit instruction patterns deprecated in an IT block in
18279 ARMv8. */
18280 static const struct depr_insn_mask depr_it_insns[] = {
18281 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
18282 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
18283 { 0xa000, 0xb800, N_("ADR") },
18284 { 0x4800, 0xf800, N_("Literal loads") },
18285 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
18286 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
18287 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
18288 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
18289 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
18290 { 0, 0, NULL }
18291 };
18292
18293 static void
18294 it_fsm_post_encode (void)
18295 {
18296 int is_last;
18297
18298 if (!now_it.state_handled)
18299 handle_it_state ();
18300
18301 if (now_it.insn_cond
18302 && !now_it.warn_deprecated
18303 && warn_on_deprecated
18304 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
18305 {
18306 if (inst.instruction >= 0x10000)
18307 {
18308 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
18309 "deprecated in ARMv8"));
18310 now_it.warn_deprecated = TRUE;
18311 }
18312 else
18313 {
18314 const struct depr_insn_mask *p = depr_it_insns;
18315
18316 while (p->mask != 0)
18317 {
18318 if ((inst.instruction & p->mask) == p->pattern)
18319 {
18320 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
18321 "of the following class are deprecated in ARMv8: "
18322 "%s"), p->description);
18323 now_it.warn_deprecated = TRUE;
18324 break;
18325 }
18326
18327 ++p;
18328 }
18329 }
18330
18331 if (now_it.block_length > 1)
18332 {
18333 as_tsktsk (_("IT blocks containing more than one conditional "
18334 "instruction are deprecated in ARMv8"));
18335 now_it.warn_deprecated = TRUE;
18336 }
18337 }
18338
18339 is_last = (now_it.mask == 0x10);
18340 if (is_last)
18341 {
18342 now_it.state = OUTSIDE_IT_BLOCK;
18343 now_it.mask = 0;
18344 }
18345 }
18346
18347 static void
18348 force_automatic_it_block_close (void)
18349 {
18350 if (now_it.state == AUTOMATIC_IT_BLOCK)
18351 {
18352 close_automatic_it_block ();
18353 now_it.state = OUTSIDE_IT_BLOCK;
18354 now_it.mask = 0;
18355 }
18356 }
18357
18358 static int
18359 in_it_block (void)
18360 {
18361 if (!now_it.state_handled)
18362 handle_it_state ();
18363
18364 return now_it.state != OUTSIDE_IT_BLOCK;
18365 }
18366
18367 /* Whether OPCODE only has T32 encoding. Since this function is only used by
18368 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
18369 here, hence the "known" in the function name. */
18370
18371 static bfd_boolean
18372 known_t32_only_insn (const struct asm_opcode *opcode)
18373 {
18374 /* Original Thumb-1 wide instruction. */
18375 if (opcode->tencode == do_t_blx
18376 || opcode->tencode == do_t_branch23
18377 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
18378 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
18379 return TRUE;
18380
18381 /* Wide-only instruction added to ARMv8-M Baseline. */
18382 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
18383 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
18384 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
18385 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
18386 return TRUE;
18387
18388 return FALSE;
18389 }
18390
18391 /* Whether wide instruction variant can be used if available for a valid OPCODE
18392 in ARCH. */
18393
18394 static bfd_boolean
18395 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
18396 {
18397 if (known_t32_only_insn (opcode))
18398 return TRUE;
18399
18400 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
18401 of variant T3 of B.W is checked in do_t_branch. */
18402 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
18403 && opcode->tencode == do_t_branch)
18404 return TRUE;
18405
18406 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
18407 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
18408 && opcode->tencode == do_t_mov_cmp
18409 /* Make sure CMP instruction is not affected. */
18410 && opcode->aencode == do_mov)
18411 return TRUE;
18412
18413 /* Wide instruction variants of all instructions with narrow *and* wide
18414 variants become available with ARMv6t2. Other opcodes are either
18415 narrow-only or wide-only and are thus available if OPCODE is valid. */
18416 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
18417 return TRUE;
18418
18419 /* OPCODE with narrow only instruction variant or wide variant not
18420 available. */
18421 return FALSE;
18422 }
18423
18424 void
18425 md_assemble (char *str)
18426 {
18427 char *p = str;
18428 const struct asm_opcode * opcode;
18429
18430 /* Align the previous label if needed. */
18431 if (last_label_seen != NULL)
18432 {
18433 symbol_set_frag (last_label_seen, frag_now);
18434 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
18435 S_SET_SEGMENT (last_label_seen, now_seg);
18436 }
18437
18438 memset (&inst, '\0', sizeof (inst));
18439 inst.reloc.type = BFD_RELOC_UNUSED;
18440
18441 opcode = opcode_lookup (&p);
18442 if (!opcode)
18443 {
18444 /* It wasn't an instruction, but it might be a register alias of
18445 the form alias .req reg, or a Neon .dn/.qn directive. */
18446 if (! create_register_alias (str, p)
18447 && ! create_neon_reg_alias (str, p))
18448 as_bad (_("bad instruction `%s'"), str);
18449
18450 return;
18451 }
18452
18453 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
18454 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
18455
18456 /* The value which unconditional instructions should have in place of the
18457 condition field. */
18458 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
18459
18460 if (thumb_mode)
18461 {
18462 arm_feature_set variant;
18463
18464 variant = cpu_variant;
18465 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
18466 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
18467 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
18468 /* Check that this instruction is supported for this CPU. */
18469 if (!opcode->tvariant
18470 || (thumb_mode == 1
18471 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
18472 {
18473 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
18474 return;
18475 }
18476 if (inst.cond != COND_ALWAYS && !unified_syntax
18477 && opcode->tencode != do_t_branch)
18478 {
18479 as_bad (_("Thumb does not support conditional execution"));
18480 return;
18481 }
18482
18483 /* Two things are addressed here:
18484 1) Implicit require narrow instructions on Thumb-1.
18485 This avoids relaxation accidentally introducing Thumb-2
18486 instructions.
18487 2) Reject wide instructions in non Thumb-2 cores.
18488
18489 Only instructions with narrow and wide variants need to be handled
18490 but selecting all non wide-only instructions is easier. */
18491 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
18492 && !t32_insn_ok (variant, opcode))
18493 {
18494 if (inst.size_req == 0)
18495 inst.size_req = 2;
18496 else if (inst.size_req == 4)
18497 {
18498 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
18499 as_bad (_("selected processor does not support 32bit wide "
18500 "variant of instruction `%s'"), str);
18501 else
18502 as_bad (_("selected processor does not support `%s' in "
18503 "Thumb-2 mode"), str);
18504 return;
18505 }
18506 }
18507
18508 inst.instruction = opcode->tvalue;
18509
18510 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
18511 {
18512 /* Prepare the it_insn_type for those encodings that don't set
18513 it. */
18514 it_fsm_pre_encode ();
18515
18516 opcode->tencode ();
18517
18518 it_fsm_post_encode ();
18519 }
18520
18521 if (!(inst.error || inst.relax))
18522 {
18523 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
18524 inst.size = (inst.instruction > 0xffff ? 4 : 2);
18525 if (inst.size_req && inst.size_req != inst.size)
18526 {
18527 as_bad (_("cannot honor width suffix -- `%s'"), str);
18528 return;
18529 }
18530 }
18531
18532 /* Something has gone badly wrong if we try to relax a fixed size
18533 instruction. */
18534 gas_assert (inst.size_req == 0 || !inst.relax);
18535
18536 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18537 *opcode->tvariant);
18538 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
18539 set those bits when Thumb-2 32-bit instructions are seen. The impact
18540 of relaxable instructions will be considered later after we finish all
18541 relaxation. */
18542 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
18543 variant = arm_arch_none;
18544 else
18545 variant = cpu_variant;
18546 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
18547 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18548 arm_ext_v6t2);
18549
18550 check_neon_suffixes;
18551
18552 if (!inst.error)
18553 {
18554 mapping_state (MAP_THUMB);
18555 }
18556 }
18557 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
18558 {
18559 bfd_boolean is_bx;
18560
18561 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18562 is_bx = (opcode->aencode == do_bx);
18563
18564 /* Check that this instruction is supported for this CPU. */
18565 if (!(is_bx && fix_v4bx)
18566 && !(opcode->avariant &&
18567 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
18568 {
18569 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
18570 return;
18571 }
18572 if (inst.size_req)
18573 {
18574 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
18575 return;
18576 }
18577
18578 inst.instruction = opcode->avalue;
18579 if (opcode->tag == OT_unconditionalF)
18580 inst.instruction |= 0xFU << 28;
18581 else
18582 inst.instruction |= inst.cond << 28;
18583 inst.size = INSN_SIZE;
18584 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
18585 {
18586 it_fsm_pre_encode ();
18587 opcode->aencode ();
18588 it_fsm_post_encode ();
18589 }
18590 /* Arm mode bx is marked as both v4T and v5 because it's still required
18591 on a hypothetical non-thumb v5 core. */
18592 if (is_bx)
18593 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
18594 else
18595 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
18596 *opcode->avariant);
18597
18598 check_neon_suffixes;
18599
18600 if (!inst.error)
18601 {
18602 mapping_state (MAP_ARM);
18603 }
18604 }
18605 else
18606 {
18607 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18608 "-- `%s'"), str);
18609 return;
18610 }
18611 output_inst (str);
18612 }
18613
18614 static void
18615 check_it_blocks_finished (void)
18616 {
18617 #ifdef OBJ_ELF
18618 asection *sect;
18619
18620 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
18621 if (seg_info (sect)->tc_segment_info_data.current_it.state
18622 == MANUAL_IT_BLOCK)
18623 {
18624 as_warn (_("section '%s' finished with an open IT block."),
18625 sect->name);
18626 }
18627 #else
18628 if (now_it.state == MANUAL_IT_BLOCK)
18629 as_warn (_("file finished with an open IT block."));
18630 #endif
18631 }
18632
18633 /* Various frobbings of labels and their addresses. */
18634
18635 void
18636 arm_start_line_hook (void)
18637 {
18638 last_label_seen = NULL;
18639 }
18640
18641 void
18642 arm_frob_label (symbolS * sym)
18643 {
18644 last_label_seen = sym;
18645
18646 ARM_SET_THUMB (sym, thumb_mode);
18647
18648 #if defined OBJ_COFF || defined OBJ_ELF
18649 ARM_SET_INTERWORK (sym, support_interwork);
18650 #endif
18651
18652 force_automatic_it_block_close ();
18653
18654 /* Note - do not allow local symbols (.Lxxx) to be labelled
18655 as Thumb functions. This is because these labels, whilst
18656 they exist inside Thumb code, are not the entry points for
18657 possible ARM->Thumb calls. Also, these labels can be used
18658 as part of a computed goto or switch statement. eg gcc
18659 can generate code that looks like this:
18660
18661 ldr r2, [pc, .Laaa]
18662 lsl r3, r3, #2
18663 ldr r2, [r3, r2]
18664 mov pc, r2
18665
18666 .Lbbb: .word .Lxxx
18667 .Lccc: .word .Lyyy
18668 ..etc...
18669 .Laaa: .word Lbbb
18670
18671 The first instruction loads the address of the jump table.
18672 The second instruction converts a table index into a byte offset.
18673 The third instruction gets the jump address out of the table.
18674 The fourth instruction performs the jump.
18675
18676 If the address stored at .Laaa is that of a symbol which has the
18677 Thumb_Func bit set, then the linker will arrange for this address
18678 to have the bottom bit set, which in turn would mean that the
18679 address computation performed by the third instruction would end
18680 up with the bottom bit set. Since the ARM is capable of unaligned
18681 word loads, the instruction would then load the incorrect address
18682 out of the jump table, and chaos would ensue. */
18683 if (label_is_thumb_function_name
18684 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
18685 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
18686 {
18687 /* When the address of a Thumb function is taken the bottom
18688 bit of that address should be set. This will allow
18689 interworking between Arm and Thumb functions to work
18690 correctly. */
18691
18692 THUMB_SET_FUNC (sym, 1);
18693
18694 label_is_thumb_function_name = FALSE;
18695 }
18696
18697 dwarf2_emit_label (sym);
18698 }
18699
18700 bfd_boolean
18701 arm_data_in_code (void)
18702 {
18703 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
18704 {
18705 *input_line_pointer = '/';
18706 input_line_pointer += 5;
18707 *input_line_pointer = 0;
18708 return TRUE;
18709 }
18710
18711 return FALSE;
18712 }
18713
18714 char *
18715 arm_canonicalize_symbol_name (char * name)
18716 {
18717 int len;
18718
18719 if (thumb_mode && (len = strlen (name)) > 5
18720 && streq (name + len - 5, "/data"))
18721 *(name + len - 5) = 0;
18722
18723 return name;
18724 }
18725 \f
18726 /* Table of all register names defined by default. The user can
18727 define additional names with .req. Note that all register names
18728 should appear in both upper and lowercase variants. Some registers
18729 also have mixed-case names. */
18730
18731 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
18732 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
18733 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
18734 #define REGSET(p,t) \
18735 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
18736 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
18737 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
18738 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
18739 #define REGSETH(p,t) \
18740 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
18741 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
18742 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
18743 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
18744 #define REGSET2(p,t) \
18745 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
18746 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
18747 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
18748 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
18749 #define SPLRBANK(base,bank,t) \
18750 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
18751 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
18752 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
18753 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
18754 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
18755 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
18756
18757 static const struct reg_entry reg_names[] =
18758 {
18759 /* ARM integer registers. */
18760 REGSET(r, RN), REGSET(R, RN),
18761
18762 /* ATPCS synonyms. */
18763 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
18764 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
18765 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
18766
18767 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
18768 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
18769 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
18770
18771 /* Well-known aliases. */
18772 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
18773 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
18774
18775 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
18776 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
18777
18778 /* Coprocessor numbers. */
18779 REGSET(p, CP), REGSET(P, CP),
18780
18781 /* Coprocessor register numbers. The "cr" variants are for backward
18782 compatibility. */
18783 REGSET(c, CN), REGSET(C, CN),
18784 REGSET(cr, CN), REGSET(CR, CN),
18785
18786 /* ARM banked registers. */
18787 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
18788 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
18789 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
18790 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
18791 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
18792 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
18793 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
18794
18795 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
18796 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
18797 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
18798 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
18799 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
18800 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
18801 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
18802 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
18803
18804 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
18805 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
18806 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
18807 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
18808 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
18809 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
18810 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
18811 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
18812 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
18813
18814 /* FPA registers. */
18815 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
18816 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
18817
18818 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
18819 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
18820
18821 /* VFP SP registers. */
18822 REGSET(s,VFS), REGSET(S,VFS),
18823 REGSETH(s,VFS), REGSETH(S,VFS),
18824
18825 /* VFP DP Registers. */
18826 REGSET(d,VFD), REGSET(D,VFD),
18827 /* Extra Neon DP registers. */
18828 REGSETH(d,VFD), REGSETH(D,VFD),
18829
18830 /* Neon QP registers. */
18831 REGSET2(q,NQ), REGSET2(Q,NQ),
18832
18833 /* VFP control registers. */
18834 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
18835 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
18836 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
18837 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
18838 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
18839 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
18840
18841 /* Maverick DSP coprocessor registers. */
18842 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
18843 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
18844
18845 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
18846 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
18847 REGDEF(dspsc,0,DSPSC),
18848
18849 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
18850 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
18851 REGDEF(DSPSC,0,DSPSC),
18852
18853 /* iWMMXt data registers - p0, c0-15. */
18854 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
18855
18856 /* iWMMXt control registers - p1, c0-3. */
18857 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
18858 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
18859 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
18860 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
18861
18862 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
18863 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
18864 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
18865 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
18866 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
18867
18868 /* XScale accumulator registers. */
18869 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
18870 };
18871 #undef REGDEF
18872 #undef REGNUM
18873 #undef REGSET
18874
18875 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18876 within psr_required_here. */
18877 static const struct asm_psr psrs[] =
18878 {
18879 /* Backward compatibility notation. Note that "all" is no longer
18880 truly all possible PSR bits. */
18881 {"all", PSR_c | PSR_f},
18882 {"flg", PSR_f},
18883 {"ctl", PSR_c},
18884
18885 /* Individual flags. */
18886 {"f", PSR_f},
18887 {"c", PSR_c},
18888 {"x", PSR_x},
18889 {"s", PSR_s},
18890
18891 /* Combinations of flags. */
18892 {"fs", PSR_f | PSR_s},
18893 {"fx", PSR_f | PSR_x},
18894 {"fc", PSR_f | PSR_c},
18895 {"sf", PSR_s | PSR_f},
18896 {"sx", PSR_s | PSR_x},
18897 {"sc", PSR_s | PSR_c},
18898 {"xf", PSR_x | PSR_f},
18899 {"xs", PSR_x | PSR_s},
18900 {"xc", PSR_x | PSR_c},
18901 {"cf", PSR_c | PSR_f},
18902 {"cs", PSR_c | PSR_s},
18903 {"cx", PSR_c | PSR_x},
18904 {"fsx", PSR_f | PSR_s | PSR_x},
18905 {"fsc", PSR_f | PSR_s | PSR_c},
18906 {"fxs", PSR_f | PSR_x | PSR_s},
18907 {"fxc", PSR_f | PSR_x | PSR_c},
18908 {"fcs", PSR_f | PSR_c | PSR_s},
18909 {"fcx", PSR_f | PSR_c | PSR_x},
18910 {"sfx", PSR_s | PSR_f | PSR_x},
18911 {"sfc", PSR_s | PSR_f | PSR_c},
18912 {"sxf", PSR_s | PSR_x | PSR_f},
18913 {"sxc", PSR_s | PSR_x | PSR_c},
18914 {"scf", PSR_s | PSR_c | PSR_f},
18915 {"scx", PSR_s | PSR_c | PSR_x},
18916 {"xfs", PSR_x | PSR_f | PSR_s},
18917 {"xfc", PSR_x | PSR_f | PSR_c},
18918 {"xsf", PSR_x | PSR_s | PSR_f},
18919 {"xsc", PSR_x | PSR_s | PSR_c},
18920 {"xcf", PSR_x | PSR_c | PSR_f},
18921 {"xcs", PSR_x | PSR_c | PSR_s},
18922 {"cfs", PSR_c | PSR_f | PSR_s},
18923 {"cfx", PSR_c | PSR_f | PSR_x},
18924 {"csf", PSR_c | PSR_s | PSR_f},
18925 {"csx", PSR_c | PSR_s | PSR_x},
18926 {"cxf", PSR_c | PSR_x | PSR_f},
18927 {"cxs", PSR_c | PSR_x | PSR_s},
18928 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
18929 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
18930 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
18931 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
18932 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
18933 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
18934 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
18935 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
18936 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
18937 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
18938 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
18939 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
18940 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
18941 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
18942 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
18943 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
18944 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
18945 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
18946 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
18947 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
18948 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
18949 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
18950 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
18951 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
18952 };
18953
18954 /* Table of V7M psr names. */
18955 static const struct asm_psr v7m_psrs[] =
18956 {
18957 {"apsr", 0x0 }, {"APSR", 0x0 },
18958 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
18959 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
18960 {"psr", 0x3 }, {"PSR", 0x3 },
18961 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
18962 {"ipsr", 0x5 }, {"IPSR", 0x5 },
18963 {"epsr", 0x6 }, {"EPSR", 0x6 },
18964 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
18965 {"msp", 0x8 }, {"MSP", 0x8 },
18966 {"psp", 0x9 }, {"PSP", 0x9 },
18967 {"msplim", 0xa }, {"MSPLIM", 0xa },
18968 {"psplim", 0xb }, {"PSPLIM", 0xb },
18969 {"primask", 0x10}, {"PRIMASK", 0x10},
18970 {"basepri", 0x11}, {"BASEPRI", 0x11},
18971 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
18972 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
18973 {"control", 0x14}, {"CONTROL", 0x14},
18974 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
18975 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
18976 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
18977 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
18978 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
18979 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
18980 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
18981 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
18982 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
18983 };
18984
18985 /* Table of all shift-in-operand names. */
18986 static const struct asm_shift_name shift_names [] =
18987 {
18988 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
18989 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
18990 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
18991 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
18992 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
18993 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
18994 };
18995
18996 /* Table of all explicit relocation names. */
18997 #ifdef OBJ_ELF
18998 static struct reloc_entry reloc_names[] =
18999 {
19000 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
19001 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
19002 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
19003 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
19004 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
19005 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
19006 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
19007 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
19008 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
19009 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
19010 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
19011 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
19012 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
19013 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
19014 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
19015 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
19016 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
19017 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
19018 };
19019 #endif
19020
19021 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
19022 static const struct asm_cond conds[] =
19023 {
19024 {"eq", 0x0},
19025 {"ne", 0x1},
19026 {"cs", 0x2}, {"hs", 0x2},
19027 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
19028 {"mi", 0x4},
19029 {"pl", 0x5},
19030 {"vs", 0x6},
19031 {"vc", 0x7},
19032 {"hi", 0x8},
19033 {"ls", 0x9},
19034 {"ge", 0xa},
19035 {"lt", 0xb},
19036 {"gt", 0xc},
19037 {"le", 0xd},
19038 {"al", 0xe}
19039 };
19040
19041 #define UL_BARRIER(L,U,CODE,FEAT) \
19042 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
19043 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
19044
19045 static struct asm_barrier_opt barrier_opt_names[] =
19046 {
19047 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
19048 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
19049 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
19050 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
19051 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
19052 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
19053 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
19054 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
19055 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
19056 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
19057 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
19058 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
19059 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
19060 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
19061 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
19062 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
19063 };
19064
19065 #undef UL_BARRIER
19066
19067 /* Table of ARM-format instructions. */
19068
19069 /* Macros for gluing together operand strings. N.B. In all cases
19070 other than OPS0, the trailing OP_stop comes from default
19071 zero-initialization of the unspecified elements of the array. */
19072 #define OPS0() { OP_stop, }
19073 #define OPS1(a) { OP_##a, }
19074 #define OPS2(a,b) { OP_##a,OP_##b, }
19075 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
19076 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
19077 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
19078 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
19079
19080 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
19081 This is useful when mixing operands for ARM and THUMB, i.e. using the
19082 MIX_ARM_THUMB_OPERANDS macro.
19083 In order to use these macros, prefix the number of operands with _
19084 e.g. _3. */
19085 #define OPS_1(a) { a, }
19086 #define OPS_2(a,b) { a,b, }
19087 #define OPS_3(a,b,c) { a,b,c, }
19088 #define OPS_4(a,b,c,d) { a,b,c,d, }
19089 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
19090 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
19091
19092 /* These macros abstract out the exact format of the mnemonic table and
19093 save some repeated characters. */
19094
19095 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
19096 #define TxCE(mnem, op, top, nops, ops, ae, te) \
19097 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
19098 THUMB_VARIANT, do_##ae, do_##te }
19099
19100 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
19101 a T_MNEM_xyz enumerator. */
19102 #define TCE(mnem, aop, top, nops, ops, ae, te) \
19103 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
19104 #define tCE(mnem, aop, top, nops, ops, ae, te) \
19105 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19106
19107 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
19108 infix after the third character. */
19109 #define TxC3(mnem, op, top, nops, ops, ae, te) \
19110 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
19111 THUMB_VARIANT, do_##ae, do_##te }
19112 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
19113 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
19114 THUMB_VARIANT, do_##ae, do_##te }
19115 #define TC3(mnem, aop, top, nops, ops, ae, te) \
19116 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
19117 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
19118 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
19119 #define tC3(mnem, aop, top, nops, ops, ae, te) \
19120 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19121 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
19122 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19123
19124 /* Mnemonic that cannot be conditionalized. The ARM condition-code
19125 field is still 0xE. Many of the Thumb variants can be executed
19126 conditionally, so this is checked separately. */
19127 #define TUE(mnem, op, top, nops, ops, ae, te) \
19128 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19129 THUMB_VARIANT, do_##ae, do_##te }
19130
19131 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
19132 Used by mnemonics that have very minimal differences in the encoding for
19133 ARM and Thumb variants and can be handled in a common function. */
19134 #define TUEc(mnem, op, top, nops, ops, en) \
19135 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19136 THUMB_VARIANT, do_##en, do_##en }
19137
19138 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
19139 condition code field. */
19140 #define TUF(mnem, op, top, nops, ops, ae, te) \
19141 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
19142 THUMB_VARIANT, do_##ae, do_##te }
19143
19144 /* ARM-only variants of all the above. */
19145 #define CE(mnem, op, nops, ops, ae) \
19146 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19147
19148 #define C3(mnem, op, nops, ops, ae) \
19149 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19150
19151 /* Legacy mnemonics that always have conditional infix after the third
19152 character. */
19153 #define CL(mnem, op, nops, ops, ae) \
19154 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19155 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19156
19157 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
19158 #define cCE(mnem, op, nops, ops, ae) \
19159 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19160
19161 /* Legacy coprocessor instructions where conditional infix and conditional
19162 suffix are ambiguous. For consistency this includes all FPA instructions,
19163 not just the potentially ambiguous ones. */
19164 #define cCL(mnem, op, nops, ops, ae) \
19165 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19166 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19167
19168 /* Coprocessor, takes either a suffix or a position-3 infix
19169 (for an FPA corner case). */
19170 #define C3E(mnem, op, nops, ops, ae) \
19171 { mnem, OPS##nops ops, OT_csuf_or_in3, \
19172 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19173
19174 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
19175 { m1 #m2 m3, OPS##nops ops, \
19176 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
19177 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19178
19179 #define CM(m1, m2, op, nops, ops, ae) \
19180 xCM_ (m1, , m2, op, nops, ops, ae), \
19181 xCM_ (m1, eq, m2, op, nops, ops, ae), \
19182 xCM_ (m1, ne, m2, op, nops, ops, ae), \
19183 xCM_ (m1, cs, m2, op, nops, ops, ae), \
19184 xCM_ (m1, hs, m2, op, nops, ops, ae), \
19185 xCM_ (m1, cc, m2, op, nops, ops, ae), \
19186 xCM_ (m1, ul, m2, op, nops, ops, ae), \
19187 xCM_ (m1, lo, m2, op, nops, ops, ae), \
19188 xCM_ (m1, mi, m2, op, nops, ops, ae), \
19189 xCM_ (m1, pl, m2, op, nops, ops, ae), \
19190 xCM_ (m1, vs, m2, op, nops, ops, ae), \
19191 xCM_ (m1, vc, m2, op, nops, ops, ae), \
19192 xCM_ (m1, hi, m2, op, nops, ops, ae), \
19193 xCM_ (m1, ls, m2, op, nops, ops, ae), \
19194 xCM_ (m1, ge, m2, op, nops, ops, ae), \
19195 xCM_ (m1, lt, m2, op, nops, ops, ae), \
19196 xCM_ (m1, gt, m2, op, nops, ops, ae), \
19197 xCM_ (m1, le, m2, op, nops, ops, ae), \
19198 xCM_ (m1, al, m2, op, nops, ops, ae)
19199
19200 #define UE(mnem, op, nops, ops, ae) \
19201 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19202
19203 #define UF(mnem, op, nops, ops, ae) \
19204 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19205
19206 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
19207 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
19208 use the same encoding function for each. */
19209 #define NUF(mnem, op, nops, ops, enc) \
19210 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
19211 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19212
19213 /* Neon data processing, version which indirects through neon_enc_tab for
19214 the various overloaded versions of opcodes. */
19215 #define nUF(mnem, op, nops, ops, enc) \
19216 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
19217 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19218
19219 /* Neon insn with conditional suffix for the ARM version, non-overloaded
19220 version. */
19221 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
19222 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
19223 THUMB_VARIANT, do_##enc, do_##enc }
19224
19225 #define NCE(mnem, op, nops, ops, enc) \
19226 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19227
19228 #define NCEF(mnem, op, nops, ops, enc) \
19229 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19230
19231 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
19232 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
19233 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
19234 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19235
19236 #define nCE(mnem, op, nops, ops, enc) \
19237 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19238
19239 #define nCEF(mnem, op, nops, ops, enc) \
19240 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19241
19242 #define do_0 0
19243
19244 static const struct asm_opcode insns[] =
19245 {
19246 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
19247 #define THUMB_VARIANT & arm_ext_v4t
19248 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
19249 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
19250 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
19251 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
19252 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
19253 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
19254 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
19255 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
19256 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
19257 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
19258 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
19259 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
19260 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
19261 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
19262 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
19263 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
19264
19265 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
19266 for setting PSR flag bits. They are obsolete in V6 and do not
19267 have Thumb equivalents. */
19268 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
19269 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
19270 CL("tstp", 110f000, 2, (RR, SH), cmp),
19271 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
19272 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
19273 CL("cmpp", 150f000, 2, (RR, SH), cmp),
19274 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
19275 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
19276 CL("cmnp", 170f000, 2, (RR, SH), cmp),
19277
19278 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
19279 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
19280 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
19281 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
19282
19283 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
19284 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19285 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
19286 OP_RRnpc),
19287 OP_ADDRGLDR),ldst, t_ldst),
19288 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19289
19290 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19291 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19292 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19293 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19294 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19295 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19296
19297 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
19298 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
19299 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
19300 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
19301
19302 /* Pseudo ops. */
19303 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
19304 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
19305 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
19306 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
19307
19308 /* Thumb-compatibility pseudo ops. */
19309 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
19310 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
19311 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
19312 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
19313 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
19314 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
19315 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
19316 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
19317 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
19318 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
19319 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
19320 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
19321
19322 /* These may simplify to neg. */
19323 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
19324 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
19325
19326 #undef THUMB_VARIANT
19327 #define THUMB_VARIANT & arm_ext_v6
19328
19329 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
19330
19331 /* V1 instructions with no Thumb analogue prior to V6T2. */
19332 #undef THUMB_VARIANT
19333 #define THUMB_VARIANT & arm_ext_v6t2
19334
19335 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19336 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19337 CL("teqp", 130f000, 2, (RR, SH), cmp),
19338
19339 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19340 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19341 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
19342 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19343
19344 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19345 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19346
19347 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19348 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19349
19350 /* V1 instructions with no Thumb analogue at all. */
19351 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
19352 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
19353
19354 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
19355 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
19356 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
19357 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
19358 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
19359 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
19360 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
19361 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
19362
19363 #undef ARM_VARIANT
19364 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
19365 #undef THUMB_VARIANT
19366 #define THUMB_VARIANT & arm_ext_v4t
19367
19368 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
19369 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
19370
19371 #undef THUMB_VARIANT
19372 #define THUMB_VARIANT & arm_ext_v6t2
19373
19374 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19375 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
19376
19377 /* Generic coprocessor instructions. */
19378 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
19379 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19380 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19381 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19382 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19383 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19384 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
19385
19386 #undef ARM_VARIANT
19387 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
19388
19389 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19390 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19391
19392 #undef ARM_VARIANT
19393 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
19394 #undef THUMB_VARIANT
19395 #define THUMB_VARIANT & arm_ext_msr
19396
19397 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
19398 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
19399
19400 #undef ARM_VARIANT
19401 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
19402 #undef THUMB_VARIANT
19403 #define THUMB_VARIANT & arm_ext_v6t2
19404
19405 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19406 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19407 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19408 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19409 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19410 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19411 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19412 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19413
19414 #undef ARM_VARIANT
19415 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
19416 #undef THUMB_VARIANT
19417 #define THUMB_VARIANT & arm_ext_v4t
19418
19419 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19420 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19421 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19422 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19423 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19424 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19425
19426 #undef ARM_VARIANT
19427 #define ARM_VARIANT & arm_ext_v4t_5
19428
19429 /* ARM Architecture 4T. */
19430 /* Note: bx (and blx) are required on V5, even if the processor does
19431 not support Thumb. */
19432 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
19433
19434 #undef ARM_VARIANT
19435 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
19436 #undef THUMB_VARIANT
19437 #define THUMB_VARIANT & arm_ext_v5t
19438
19439 /* Note: blx has 2 variants; the .value coded here is for
19440 BLX(2). Only this variant has conditional execution. */
19441 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
19442 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
19443
19444 #undef THUMB_VARIANT
19445 #define THUMB_VARIANT & arm_ext_v6t2
19446
19447 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
19448 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19449 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19450 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19451 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19452 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
19453 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19454 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19455
19456 #undef ARM_VARIANT
19457 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
19458 #undef THUMB_VARIANT
19459 #define THUMB_VARIANT & arm_ext_v5exp
19460
19461 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19462 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19463 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19464 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19465
19466 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19467 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19468
19469 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19470 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19471 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19472 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19473
19474 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19475 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19476 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19477 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19478
19479 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19480 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19481
19482 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19483 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19484 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19485 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19486
19487 #undef ARM_VARIANT
19488 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
19489 #undef THUMB_VARIANT
19490 #define THUMB_VARIANT & arm_ext_v6t2
19491
19492 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
19493 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
19494 ldrd, t_ldstd),
19495 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
19496 ADDRGLDRS), ldrd, t_ldstd),
19497
19498 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19499 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19500
19501 #undef ARM_VARIANT
19502 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
19503
19504 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
19505
19506 #undef ARM_VARIANT
19507 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
19508 #undef THUMB_VARIANT
19509 #define THUMB_VARIANT & arm_ext_v6
19510
19511 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
19512 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
19513 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19514 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19515 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19516 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19517 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19518 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19519 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19520 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
19521
19522 #undef THUMB_VARIANT
19523 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19524
19525 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
19526 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19527 strex, t_strex),
19528 #undef THUMB_VARIANT
19529 #define THUMB_VARIANT & arm_ext_v6t2
19530
19531 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19532 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19533
19534 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
19535 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
19536
19537 /* ARM V6 not included in V7M. */
19538 #undef THUMB_VARIANT
19539 #define THUMB_VARIANT & arm_ext_v6_notm
19540 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19541 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19542 UF(rfeib, 9900a00, 1, (RRw), rfe),
19543 UF(rfeda, 8100a00, 1, (RRw), rfe),
19544 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19545 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19546 UF(rfefa, 8100a00, 1, (RRw), rfe),
19547 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19548 UF(rfeed, 9900a00, 1, (RRw), rfe),
19549 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19550 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19551 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19552 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
19553 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
19554 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
19555 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
19556 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19557 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19558 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
19559
19560 /* ARM V6 not included in V7M (eg. integer SIMD). */
19561 #undef THUMB_VARIANT
19562 #define THUMB_VARIANT & arm_ext_v6_dsp
19563 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
19564 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
19565 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19566 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19567 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19568 /* Old name for QASX. */
19569 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19570 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19571 /* Old name for QSAX. */
19572 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19573 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19574 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19575 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19576 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19577 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19578 /* Old name for SASX. */
19579 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19580 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19581 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19582 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19583 /* Old name for SHASX. */
19584 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19585 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19586 /* Old name for SHSAX. */
19587 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19588 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19589 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19590 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19591 /* Old name for SSAX. */
19592 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19593 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19594 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19595 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19596 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19597 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19598 /* Old name for UASX. */
19599 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19600 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19601 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19602 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19603 /* Old name for UHASX. */
19604 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19605 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19606 /* Old name for UHSAX. */
19607 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19608 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19609 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19610 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19611 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19612 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19613 /* Old name for UQASX. */
19614 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19615 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19616 /* Old name for UQSAX. */
19617 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19618 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19619 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19620 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19621 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19622 /* Old name for USAX. */
19623 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19624 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19625 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19626 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19627 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19628 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19629 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19630 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19631 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19632 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19633 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19634 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19635 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19636 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19637 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19638 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19639 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19640 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19641 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19642 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19643 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19644 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19645 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19646 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19647 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19648 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19649 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19650 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19651 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19652 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
19653 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
19654 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19655 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19656 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
19657
19658 #undef ARM_VARIANT
19659 #define ARM_VARIANT & arm_ext_v6k
19660 #undef THUMB_VARIANT
19661 #define THUMB_VARIANT & arm_ext_v6k
19662
19663 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
19664 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
19665 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
19666 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
19667
19668 #undef THUMB_VARIANT
19669 #define THUMB_VARIANT & arm_ext_v6_notm
19670 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
19671 ldrexd, t_ldrexd),
19672 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
19673 RRnpcb), strexd, t_strexd),
19674
19675 #undef THUMB_VARIANT
19676 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19677 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
19678 rd_rn, rd_rn),
19679 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
19680 rd_rn, rd_rn),
19681 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19682 strex, t_strexbh),
19683 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19684 strex, t_strexbh),
19685 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
19686
19687 #undef ARM_VARIANT
19688 #define ARM_VARIANT & arm_ext_sec
19689 #undef THUMB_VARIANT
19690 #define THUMB_VARIANT & arm_ext_sec
19691
19692 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
19693
19694 #undef ARM_VARIANT
19695 #define ARM_VARIANT & arm_ext_virt
19696 #undef THUMB_VARIANT
19697 #define THUMB_VARIANT & arm_ext_virt
19698
19699 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
19700 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
19701
19702 #undef ARM_VARIANT
19703 #define ARM_VARIANT & arm_ext_pan
19704 #undef THUMB_VARIANT
19705 #define THUMB_VARIANT & arm_ext_pan
19706
19707 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
19708
19709 #undef ARM_VARIANT
19710 #define ARM_VARIANT & arm_ext_v6t2
19711 #undef THUMB_VARIANT
19712 #define THUMB_VARIANT & arm_ext_v6t2
19713
19714 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
19715 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
19716 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
19717 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
19718
19719 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19720 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
19721
19722 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19723 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19724 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19725 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19726
19727 #undef THUMB_VARIANT
19728 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19729 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
19730 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
19731
19732 /* Thumb-only instructions. */
19733 #undef ARM_VARIANT
19734 #define ARM_VARIANT NULL
19735 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
19736 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
19737
19738 /* ARM does not really have an IT instruction, so always allow it.
19739 The opcode is copied from Thumb in order to allow warnings in
19740 -mimplicit-it=[never | arm] modes. */
19741 #undef ARM_VARIANT
19742 #define ARM_VARIANT & arm_ext_v1
19743 #undef THUMB_VARIANT
19744 #define THUMB_VARIANT & arm_ext_v6t2
19745
19746 TUE("it", bf08, bf08, 1, (COND), it, t_it),
19747 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
19748 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
19749 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
19750 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
19751 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
19752 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
19753 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
19754 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
19755 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
19756 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
19757 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
19758 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
19759 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
19760 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
19761 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
19762 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
19763 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
19764
19765 /* Thumb2 only instructions. */
19766 #undef ARM_VARIANT
19767 #define ARM_VARIANT NULL
19768
19769 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19770 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19771 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
19772 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
19773 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
19774 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
19775
19776 /* Hardware division instructions. */
19777 #undef ARM_VARIANT
19778 #define ARM_VARIANT & arm_ext_adiv
19779 #undef THUMB_VARIANT
19780 #define THUMB_VARIANT & arm_ext_div
19781
19782 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
19783 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
19784
19785 /* ARM V6M/V7 instructions. */
19786 #undef ARM_VARIANT
19787 #define ARM_VARIANT & arm_ext_barrier
19788 #undef THUMB_VARIANT
19789 #define THUMB_VARIANT & arm_ext_barrier
19790
19791 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
19792 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
19793 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
19794
19795 /* ARM V7 instructions. */
19796 #undef ARM_VARIANT
19797 #define ARM_VARIANT & arm_ext_v7
19798 #undef THUMB_VARIANT
19799 #define THUMB_VARIANT & arm_ext_v7
19800
19801 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
19802 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
19803
19804 #undef ARM_VARIANT
19805 #define ARM_VARIANT & arm_ext_mp
19806 #undef THUMB_VARIANT
19807 #define THUMB_VARIANT & arm_ext_mp
19808
19809 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
19810
19811 /* AArchv8 instructions. */
19812 #undef ARM_VARIANT
19813 #define ARM_VARIANT & arm_ext_v8
19814
19815 /* Instructions shared between armv8-a and armv8-m. */
19816 #undef THUMB_VARIANT
19817 #define THUMB_VARIANT & arm_ext_atomics
19818
19819 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19820 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19821 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19822 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19823 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19824 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19825 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19826 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
19827 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19828 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
19829 stlex, t_stlex),
19830 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
19831 stlex, t_stlex),
19832 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
19833 stlex, t_stlex),
19834 #undef THUMB_VARIANT
19835 #define THUMB_VARIANT & arm_ext_v8
19836
19837 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
19838 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
19839 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
19840 ldrexd, t_ldrexd),
19841 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
19842 strexd, t_strexd),
19843 /* ARMv8 T32 only. */
19844 #undef ARM_VARIANT
19845 #define ARM_VARIANT NULL
19846 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
19847 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
19848 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
19849
19850 /* FP for ARMv8. */
19851 #undef ARM_VARIANT
19852 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
19853 #undef THUMB_VARIANT
19854 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19855
19856 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
19857 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
19858 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
19859 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
19860 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19861 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19862 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
19863 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
19864 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
19865 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
19866 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
19867 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
19868 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
19869 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
19870 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
19871 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
19872 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
19873
19874 /* Crypto v1 extensions. */
19875 #undef ARM_VARIANT
19876 #define ARM_VARIANT & fpu_crypto_ext_armv8
19877 #undef THUMB_VARIANT
19878 #define THUMB_VARIANT & fpu_crypto_ext_armv8
19879
19880 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
19881 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
19882 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
19883 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
19884 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
19885 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
19886 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
19887 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
19888 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
19889 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
19890 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
19891 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
19892 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
19893 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
19894
19895 #undef ARM_VARIANT
19896 #define ARM_VARIANT & crc_ext_armv8
19897 #undef THUMB_VARIANT
19898 #define THUMB_VARIANT & crc_ext_armv8
19899 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
19900 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
19901 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
19902 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
19903 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
19904 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
19905
19906 /* ARMv8.2 RAS extension. */
19907 #undef ARM_VARIANT
19908 #define ARM_VARIANT & arm_ext_ras
19909 #undef THUMB_VARIANT
19910 #define THUMB_VARIANT & arm_ext_ras
19911 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
19912
19913 #undef ARM_VARIANT
19914 #define ARM_VARIANT & arm_ext_v8_3
19915 #undef THUMB_VARIANT
19916 #define THUMB_VARIANT & arm_ext_v8_3
19917 NCE (vjcvt, eb90bc0, 2, (RVS, RVD), vjcvt),
19918 NUF (vcmla, 0, 4, (RNDQ, RNDQ, RNDQ_RNSC, EXPi), vcmla),
19919 NUF (vcadd, 0, 4, (RNDQ, RNDQ, RNDQ, EXPi), vcadd),
19920
19921 #undef ARM_VARIANT
19922 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
19923 #undef THUMB_VARIANT
19924 #define THUMB_VARIANT NULL
19925
19926 cCE("wfs", e200110, 1, (RR), rd),
19927 cCE("rfs", e300110, 1, (RR), rd),
19928 cCE("wfc", e400110, 1, (RR), rd),
19929 cCE("rfc", e500110, 1, (RR), rd),
19930
19931 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
19932 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
19933 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
19934 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
19935
19936 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
19937 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
19938 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
19939 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
19940
19941 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
19942 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
19943 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
19944 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
19945 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
19946 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
19947 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
19948 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
19949 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
19950 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
19951 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
19952 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
19953
19954 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
19955 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
19956 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
19957 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
19958 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
19959 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
19960 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
19961 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
19962 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
19963 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
19964 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
19965 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
19966
19967 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
19968 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
19969 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
19970 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
19971 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
19972 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
19973 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
19974 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
19975 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
19976 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
19977 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
19978 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
19979
19980 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
19981 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
19982 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
19983 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
19984 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
19985 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
19986 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
19987 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
19988 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
19989 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
19990 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
19991 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
19992
19993 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
19994 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
19995 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
19996 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
19997 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
19998 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
19999 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
20000 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
20001 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
20002 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
20003 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
20004 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
20005
20006 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
20007 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
20008 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
20009 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
20010 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
20011 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
20012 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
20013 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
20014 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
20015 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
20016 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
20017 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
20018
20019 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
20020 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
20021 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
20022 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
20023 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
20024 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
20025 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
20026 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
20027 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
20028 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
20029 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
20030 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
20031
20032 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
20033 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
20034 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
20035 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
20036 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
20037 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
20038 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
20039 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
20040 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
20041 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
20042 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
20043 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
20044
20045 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
20046 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
20047 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
20048 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
20049 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
20050 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
20051 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
20052 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
20053 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
20054 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
20055 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
20056 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
20057
20058 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
20059 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
20060 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
20061 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
20062 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
20063 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
20064 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
20065 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
20066 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
20067 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
20068 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
20069 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
20070
20071 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
20072 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
20073 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
20074 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
20075 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
20076 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
20077 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
20078 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
20079 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
20080 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
20081 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
20082 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
20083
20084 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
20085 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
20086 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
20087 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
20088 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
20089 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
20090 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
20091 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
20092 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
20093 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
20094 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
20095 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
20096
20097 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
20098 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
20099 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
20100 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
20101 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
20102 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
20103 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
20104 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
20105 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
20106 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
20107 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
20108 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
20109
20110 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
20111 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
20112 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
20113 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
20114 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
20115 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
20116 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
20117 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
20118 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
20119 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
20120 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
20121 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
20122
20123 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
20124 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
20125 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
20126 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
20127 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
20128 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
20129 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
20130 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
20131 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
20132 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
20133 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
20134 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
20135
20136 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
20137 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
20138 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
20139 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
20140 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
20141 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
20142 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
20143 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
20144 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
20145 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
20146 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
20147 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
20148
20149 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
20150 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
20151 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
20152 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
20153 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
20154 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20155 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20156 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20157 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
20158 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
20159 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
20160 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
20161
20162 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
20163 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
20164 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
20165 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
20166 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
20167 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20168 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20169 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20170 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
20171 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
20172 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
20173 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
20174
20175 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
20176 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
20177 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
20178 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
20179 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
20180 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20181 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20182 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20183 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
20184 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
20185 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
20186 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
20187
20188 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
20189 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
20190 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
20191 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
20192 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
20193 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20194 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20195 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20196 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
20197 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
20198 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
20199 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
20200
20201 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
20202 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
20203 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
20204 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
20205 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
20206 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20207 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20208 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20209 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
20210 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
20211 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
20212 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
20213
20214 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
20215 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
20216 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
20217 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
20218 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
20219 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20220 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20221 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20222 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
20223 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
20224 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
20225 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
20226
20227 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
20228 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
20229 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
20230 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
20231 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
20232 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20233 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20234 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20235 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
20236 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
20237 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
20238 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
20239
20240 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
20241 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
20242 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
20243 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
20244 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
20245 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20246 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20247 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20248 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
20249 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
20250 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
20251 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
20252
20253 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
20254 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
20255 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
20256 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
20257 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
20258 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20259 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20260 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20261 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
20262 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
20263 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
20264 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
20265
20266 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
20267 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
20268 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
20269 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
20270 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
20271 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20272 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20273 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20274 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
20275 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
20276 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
20277 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
20278
20279 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20280 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20281 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20282 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20283 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20284 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20285 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20286 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20287 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20288 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20289 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20290 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20291
20292 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20293 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20294 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20295 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20296 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20297 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20298 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20299 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20300 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20301 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20302 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20303 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20304
20305 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20306 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20307 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20308 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20309 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20310 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20311 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20312 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20313 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20314 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20315 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20316 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20317
20318 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
20319 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
20320 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
20321 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
20322
20323 cCL("flts", e000110, 2, (RF, RR), rn_rd),
20324 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
20325 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
20326 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
20327 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
20328 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
20329 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
20330 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
20331 cCL("flte", e080110, 2, (RF, RR), rn_rd),
20332 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
20333 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
20334 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
20335
20336 /* The implementation of the FIX instruction is broken on some
20337 assemblers, in that it accepts a precision specifier as well as a
20338 rounding specifier, despite the fact that this is meaningless.
20339 To be more compatible, we accept it as well, though of course it
20340 does not set any bits. */
20341 cCE("fix", e100110, 2, (RR, RF), rd_rm),
20342 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
20343 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
20344 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
20345 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
20346 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
20347 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
20348 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
20349 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
20350 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
20351 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
20352 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
20353 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
20354
20355 /* Instructions that were new with the real FPA, call them V2. */
20356 #undef ARM_VARIANT
20357 #define ARM_VARIANT & fpu_fpa_ext_v2
20358
20359 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20360 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20361 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20362 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20363 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20364 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20365
20366 #undef ARM_VARIANT
20367 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
20368
20369 /* Moves and type conversions. */
20370 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
20371 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
20372 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
20373 cCE("fmstat", ef1fa10, 0, (), noargs),
20374 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
20375 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
20376 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
20377 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
20378 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
20379 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
20380 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
20381 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
20382 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
20383 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
20384
20385 /* Memory operations. */
20386 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
20387 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
20388 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20389 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20390 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20391 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20392 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20393 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20394 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20395 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20396 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20397 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20398 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20399 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20400 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20401 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20402 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20403 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20404
20405 /* Monadic operations. */
20406 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
20407 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
20408 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
20409
20410 /* Dyadic operations. */
20411 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20412 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20413 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20414 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20415 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20416 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20417 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20418 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20419 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20420
20421 /* Comparisons. */
20422 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
20423 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
20424 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
20425 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
20426
20427 /* Double precision load/store are still present on single precision
20428 implementations. */
20429 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
20430 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
20431 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20432 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20433 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20434 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20435 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20436 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20437 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20438 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20439
20440 #undef ARM_VARIANT
20441 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
20442
20443 /* Moves and type conversions. */
20444 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20445 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
20446 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20447 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
20448 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
20449 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
20450 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
20451 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
20452 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
20453 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
20454 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20455 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
20456 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20457
20458 /* Monadic operations. */
20459 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20460 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20461 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20462
20463 /* Dyadic operations. */
20464 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20465 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20466 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20467 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20468 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20469 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20470 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20471 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20472 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20473
20474 /* Comparisons. */
20475 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20476 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
20477 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20478 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
20479
20480 #undef ARM_VARIANT
20481 #define ARM_VARIANT & fpu_vfp_ext_v2
20482
20483 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
20484 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
20485 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
20486 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
20487
20488 /* Instructions which may belong to either the Neon or VFP instruction sets.
20489 Individual encoder functions perform additional architecture checks. */
20490 #undef ARM_VARIANT
20491 #define ARM_VARIANT & fpu_vfp_ext_v1xd
20492 #undef THUMB_VARIANT
20493 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
20494
20495 /* These mnemonics are unique to VFP. */
20496 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
20497 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
20498 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20499 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20500 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20501 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20502 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20503 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
20504 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
20505 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
20506
20507 /* Mnemonics shared by Neon and VFP. */
20508 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
20509 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20510 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20511
20512 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20513 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20514
20515 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20516 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20517
20518 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20519 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20520 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20521 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20522 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20523 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20524 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20525 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20526
20527 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
20528 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
20529 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
20530 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
20531
20532
20533 /* NOTE: All VMOV encoding is special-cased! */
20534 NCE(vmov, 0, 1, (VMOV), neon_mov),
20535 NCE(vmovq, 0, 1, (VMOV), neon_mov),
20536
20537 #undef ARM_VARIANT
20538 #define ARM_VARIANT & arm_ext_fp16
20539 #undef THUMB_VARIANT
20540 #define THUMB_VARIANT & arm_ext_fp16
20541 /* New instructions added from v8.2, allowing the extraction and insertion of
20542 the upper 16 bits of a 32-bit vector register. */
20543 NCE (vmovx, eb00a40, 2, (RVS, RVS), neon_movhf),
20544 NCE (vins, eb00ac0, 2, (RVS, RVS), neon_movhf),
20545
20546 #undef THUMB_VARIANT
20547 #define THUMB_VARIANT & fpu_neon_ext_v1
20548 #undef ARM_VARIANT
20549 #define ARM_VARIANT & fpu_neon_ext_v1
20550
20551 /* Data processing with three registers of the same length. */
20552 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
20553 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
20554 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
20555 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20556 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20557 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20558 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20559 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20560 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20561 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
20562 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20563 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20564 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20565 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20566 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20567 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20568 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20569 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20570 /* If not immediate, fall back to neon_dyadic_i64_su.
20571 shl_imm should accept I8 I16 I32 I64,
20572 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
20573 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
20574 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
20575 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
20576 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
20577 /* Logic ops, types optional & ignored. */
20578 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20579 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20580 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20581 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20582 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20583 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20584 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20585 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20586 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
20587 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
20588 /* Bitfield ops, untyped. */
20589 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20590 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20591 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20592 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20593 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20594 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20595 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
20596 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20597 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20598 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20599 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20600 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20601 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20602 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20603 back to neon_dyadic_if_su. */
20604 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20605 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20606 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20607 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20608 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20609 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20610 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20611 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20612 /* Comparison. Type I8 I16 I32 F32. */
20613 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
20614 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
20615 /* As above, D registers only. */
20616 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20617 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20618 /* Int and float variants, signedness unimportant. */
20619 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20620 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20621 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
20622 /* Add/sub take types I8 I16 I32 I64 F32. */
20623 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20624 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20625 /* vtst takes sizes 8, 16, 32. */
20626 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
20627 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
20628 /* VMUL takes I8 I16 I32 F32 P8. */
20629 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
20630 /* VQD{R}MULH takes S16 S32. */
20631 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20632 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20633 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20634 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20635 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20636 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20637 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20638 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20639 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20640 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20641 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20642 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20643 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20644 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20645 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20646 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20647 /* ARM v8.1 extension. */
20648 nUF (vqrdmlah, _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
20649 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
20650 nUF (vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
20651 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
20652
20653 /* Two address, int/float. Types S8 S16 S32 F32. */
20654 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
20655 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
20656
20657 /* Data processing with two registers and a shift amount. */
20658 /* Right shifts, and variants with rounding.
20659 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
20660 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20661 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20662 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20663 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20664 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20665 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20666 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20667 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20668 /* Shift and insert. Sizes accepted 8 16 32 64. */
20669 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
20670 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
20671 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
20672 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
20673 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
20674 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
20675 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
20676 /* Right shift immediate, saturating & narrowing, with rounding variants.
20677 Types accepted S16 S32 S64 U16 U32 U64. */
20678 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20679 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20680 /* As above, unsigned. Types accepted S16 S32 S64. */
20681 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20682 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20683 /* Right shift narrowing. Types accepted I16 I32 I64. */
20684 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20685 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20686 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
20687 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
20688 /* CVT with optional immediate for fixed-point variant. */
20689 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
20690
20691 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
20692 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
20693
20694 /* Data processing, three registers of different lengths. */
20695 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
20696 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
20697 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
20698 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
20699 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
20700 /* If not scalar, fall back to neon_dyadic_long.
20701 Vector types as above, scalar types S16 S32 U16 U32. */
20702 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20703 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20704 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
20705 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20706 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20707 /* Dyadic, narrowing insns. Types I16 I32 I64. */
20708 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20709 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20710 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20711 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20712 /* Saturating doubling multiplies. Types S16 S32. */
20713 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20714 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20715 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20716 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
20717 S16 S32 U16 U32. */
20718 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
20719
20720 /* Extract. Size 8. */
20721 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
20722 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
20723
20724 /* Two registers, miscellaneous. */
20725 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
20726 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
20727 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
20728 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
20729 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
20730 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
20731 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
20732 /* Vector replicate. Sizes 8 16 32. */
20733 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
20734 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
20735 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
20736 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
20737 /* VMOVN. Types I16 I32 I64. */
20738 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
20739 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
20740 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
20741 /* VQMOVUN. Types S16 S32 S64. */
20742 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
20743 /* VZIP / VUZP. Sizes 8 16 32. */
20744 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
20745 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
20746 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
20747 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
20748 /* VQABS / VQNEG. Types S8 S16 S32. */
20749 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
20750 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
20751 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
20752 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
20753 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
20754 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
20755 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
20756 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
20757 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
20758 /* Reciprocal estimates. Types U32 F16 F32. */
20759 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
20760 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
20761 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
20762 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
20763 /* VCLS. Types S8 S16 S32. */
20764 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
20765 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
20766 /* VCLZ. Types I8 I16 I32. */
20767 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
20768 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
20769 /* VCNT. Size 8. */
20770 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
20771 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
20772 /* Two address, untyped. */
20773 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
20774 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
20775 /* VTRN. Sizes 8 16 32. */
20776 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
20777 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
20778
20779 /* Table lookup. Size 8. */
20780 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20781 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20782
20783 #undef THUMB_VARIANT
20784 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
20785 #undef ARM_VARIANT
20786 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
20787
20788 /* Neon element/structure load/store. */
20789 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
20790 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
20791 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
20792 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
20793 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
20794 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
20795 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
20796 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
20797
20798 #undef THUMB_VARIANT
20799 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
20800 #undef ARM_VARIANT
20801 #define ARM_VARIANT & fpu_vfp_ext_v3xd
20802 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
20803 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20804 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20805 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20806 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20807 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20808 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20809 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20810 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20811
20812 #undef THUMB_VARIANT
20813 #define THUMB_VARIANT & fpu_vfp_ext_v3
20814 #undef ARM_VARIANT
20815 #define ARM_VARIANT & fpu_vfp_ext_v3
20816
20817 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
20818 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20819 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20820 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20821 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20822 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20823 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20824 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20825 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20826
20827 #undef ARM_VARIANT
20828 #define ARM_VARIANT & fpu_vfp_ext_fma
20829 #undef THUMB_VARIANT
20830 #define THUMB_VARIANT & fpu_vfp_ext_fma
20831 /* Mnemonics shared by Neon and VFP. These are included in the
20832 VFP FMA variant; NEON and VFP FMA always includes the NEON
20833 FMA instructions. */
20834 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20835 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20836 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
20837 the v form should always be used. */
20838 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20839 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20840 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20841 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20842 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20843 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20844
20845 #undef THUMB_VARIANT
20846 #undef ARM_VARIANT
20847 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
20848
20849 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20850 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20851 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20852 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20853 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20854 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20855 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
20856 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
20857
20858 #undef ARM_VARIANT
20859 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
20860
20861 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
20862 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
20863 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
20864 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
20865 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
20866 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
20867 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
20868 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
20869 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
20870 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20871 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20872 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20873 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20874 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20875 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20876 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20877 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20878 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20879 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
20880 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
20881 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20882 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20883 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20884 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20885 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20886 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20887 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
20888 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
20889 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
20890 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
20891 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
20892 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
20893 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
20894 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
20895 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
20896 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
20897 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
20898 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20899 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20900 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20901 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20902 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20903 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20904 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20905 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20906 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20907 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
20908 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20909 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20910 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20911 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20912 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20913 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20914 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20915 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20916 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20917 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20918 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20919 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20920 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20921 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20922 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20923 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20924 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20925 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20926 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20927 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20928 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20929 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20930 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20931 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20932 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20933 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20934 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20935 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20936 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20937 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20938 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20939 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20940 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20941 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20942 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20943 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20944 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20945 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20946 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20947 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20948 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20949 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
20950 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20951 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20952 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20953 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20954 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20955 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20956 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20957 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20958 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20959 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20960 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20961 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20962 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20963 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20964 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20965 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20966 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20967 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20968 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20969 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20970 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20971 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
20972 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20973 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20974 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20975 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20976 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20977 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20978 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20979 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20980 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20981 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20982 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20983 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20984 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20985 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20986 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20987 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20988 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20989 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20990 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20991 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20992 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20993 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20994 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20995 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20996 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20997 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20998 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20999 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21000 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21001 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21002 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21003 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
21004 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
21005 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
21006 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
21007 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
21008 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
21009 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21010 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21011 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21012 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
21013 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
21014 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
21015 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
21016 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
21017 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
21018 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21019 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21020 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21021 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21022 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
21023
21024 #undef ARM_VARIANT
21025 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
21026
21027 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
21028 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
21029 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
21030 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
21031 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
21032 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
21033 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21034 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21035 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21036 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21037 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21038 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21039 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21040 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21041 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21042 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21043 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21044 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21045 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21046 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21047 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
21048 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21049 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21050 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21051 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21052 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21053 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21054 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21055 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21056 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21057 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21058 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21059 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21060 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21061 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21062 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21063 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21064 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21065 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21066 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21067 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21068 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21069 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21070 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21071 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21072 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21073 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21074 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21075 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21076 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21077 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21078 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21079 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21080 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21081 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21082 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21083 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21084
21085 #undef ARM_VARIANT
21086 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
21087
21088 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
21089 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
21090 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
21091 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
21092 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
21093 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
21094 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
21095 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
21096 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
21097 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
21098 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
21099 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
21100 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
21101 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
21102 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
21103 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
21104 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
21105 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
21106 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
21107 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
21108 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
21109 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
21110 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
21111 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
21112 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
21113 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
21114 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
21115 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
21116 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
21117 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
21118 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
21119 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
21120 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
21121 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
21122 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
21123 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
21124 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
21125 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
21126 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
21127 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
21128 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
21129 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
21130 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
21131 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
21132 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
21133 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
21134 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
21135 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
21136 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
21137 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
21138 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
21139 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
21140 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
21141 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
21142 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
21143 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
21144 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
21145 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
21146 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
21147 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
21148 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
21149 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
21150 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
21151 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
21152 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21153 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21154 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21155 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21156 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21157 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21158 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21159 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21160 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
21161 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
21162 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
21163 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
21164
21165 /* ARMv8-M instructions. */
21166 #undef ARM_VARIANT
21167 #define ARM_VARIANT NULL
21168 #undef THUMB_VARIANT
21169 #define THUMB_VARIANT & arm_ext_v8m
21170 TUE("sg", 0, e97fe97f, 0, (), 0, noargs),
21171 TUE("blxns", 0, 4784, 1, (RRnpc), 0, t_blx),
21172 TUE("bxns", 0, 4704, 1, (RRnpc), 0, t_bx),
21173 TUE("tt", 0, e840f000, 2, (RRnpc, RRnpc), 0, tt),
21174 TUE("ttt", 0, e840f040, 2, (RRnpc, RRnpc), 0, tt),
21175 TUE("tta", 0, e840f080, 2, (RRnpc, RRnpc), 0, tt),
21176 TUE("ttat", 0, e840f0c0, 2, (RRnpc, RRnpc), 0, tt),
21177
21178 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
21179 instructions behave as nop if no VFP is present. */
21180 #undef THUMB_VARIANT
21181 #define THUMB_VARIANT & arm_ext_v8m_main
21182 TUEc("vlldm", 0, ec300a00, 1, (RRnpc), rn),
21183 TUEc("vlstm", 0, ec200a00, 1, (RRnpc), rn),
21184 };
21185 #undef ARM_VARIANT
21186 #undef THUMB_VARIANT
21187 #undef TCE
21188 #undef TUE
21189 #undef TUF
21190 #undef TCC
21191 #undef cCE
21192 #undef cCL
21193 #undef C3E
21194 #undef CE
21195 #undef CM
21196 #undef UE
21197 #undef UF
21198 #undef UT
21199 #undef NUF
21200 #undef nUF
21201 #undef NCE
21202 #undef nCE
21203 #undef OPS0
21204 #undef OPS1
21205 #undef OPS2
21206 #undef OPS3
21207 #undef OPS4
21208 #undef OPS5
21209 #undef OPS6
21210 #undef do_0
21211 \f
21212 /* MD interface: bits in the object file. */
21213
21214 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
21215 for use in the a.out file, and stores them in the array pointed to by buf.
21216 This knows about the endian-ness of the target machine and does
21217 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
21218 2 (short) and 4 (long) Floating numbers are put out as a series of
21219 LITTLENUMS (shorts, here at least). */
21220
21221 void
21222 md_number_to_chars (char * buf, valueT val, int n)
21223 {
21224 if (target_big_endian)
21225 number_to_chars_bigendian (buf, val, n);
21226 else
21227 number_to_chars_littleendian (buf, val, n);
21228 }
21229
21230 static valueT
21231 md_chars_to_number (char * buf, int n)
21232 {
21233 valueT result = 0;
21234 unsigned char * where = (unsigned char *) buf;
21235
21236 if (target_big_endian)
21237 {
21238 while (n--)
21239 {
21240 result <<= 8;
21241 result |= (*where++ & 255);
21242 }
21243 }
21244 else
21245 {
21246 while (n--)
21247 {
21248 result <<= 8;
21249 result |= (where[n] & 255);
21250 }
21251 }
21252
21253 return result;
21254 }
21255
21256 /* MD interface: Sections. */
21257
21258 /* Calculate the maximum variable size (i.e., excluding fr_fix)
21259 that an rs_machine_dependent frag may reach. */
21260
21261 unsigned int
21262 arm_frag_max_var (fragS *fragp)
21263 {
21264 /* We only use rs_machine_dependent for variable-size Thumb instructions,
21265 which are either THUMB_SIZE (2) or INSN_SIZE (4).
21266
21267 Note that we generate relaxable instructions even for cases that don't
21268 really need it, like an immediate that's a trivial constant. So we're
21269 overestimating the instruction size for some of those cases. Rather
21270 than putting more intelligence here, it would probably be better to
21271 avoid generating a relaxation frag in the first place when it can be
21272 determined up front that a short instruction will suffice. */
21273
21274 gas_assert (fragp->fr_type == rs_machine_dependent);
21275 return INSN_SIZE;
21276 }
21277
21278 /* Estimate the size of a frag before relaxing. Assume everything fits in
21279 2 bytes. */
21280
21281 int
21282 md_estimate_size_before_relax (fragS * fragp,
21283 segT segtype ATTRIBUTE_UNUSED)
21284 {
21285 fragp->fr_var = 2;
21286 return 2;
21287 }
21288
21289 /* Convert a machine dependent frag. */
21290
21291 void
21292 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
21293 {
21294 unsigned long insn;
21295 unsigned long old_op;
21296 char *buf;
21297 expressionS exp;
21298 fixS *fixp;
21299 int reloc_type;
21300 int pc_rel;
21301 int opcode;
21302
21303 buf = fragp->fr_literal + fragp->fr_fix;
21304
21305 old_op = bfd_get_16(abfd, buf);
21306 if (fragp->fr_symbol)
21307 {
21308 exp.X_op = O_symbol;
21309 exp.X_add_symbol = fragp->fr_symbol;
21310 }
21311 else
21312 {
21313 exp.X_op = O_constant;
21314 }
21315 exp.X_add_number = fragp->fr_offset;
21316 opcode = fragp->fr_subtype;
21317 switch (opcode)
21318 {
21319 case T_MNEM_ldr_pc:
21320 case T_MNEM_ldr_pc2:
21321 case T_MNEM_ldr_sp:
21322 case T_MNEM_str_sp:
21323 case T_MNEM_ldr:
21324 case T_MNEM_ldrb:
21325 case T_MNEM_ldrh:
21326 case T_MNEM_str:
21327 case T_MNEM_strb:
21328 case T_MNEM_strh:
21329 if (fragp->fr_var == 4)
21330 {
21331 insn = THUMB_OP32 (opcode);
21332 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
21333 {
21334 insn |= (old_op & 0x700) << 4;
21335 }
21336 else
21337 {
21338 insn |= (old_op & 7) << 12;
21339 insn |= (old_op & 0x38) << 13;
21340 }
21341 insn |= 0x00000c00;
21342 put_thumb32_insn (buf, insn);
21343 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
21344 }
21345 else
21346 {
21347 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
21348 }
21349 pc_rel = (opcode == T_MNEM_ldr_pc2);
21350 break;
21351 case T_MNEM_adr:
21352 if (fragp->fr_var == 4)
21353 {
21354 insn = THUMB_OP32 (opcode);
21355 insn |= (old_op & 0xf0) << 4;
21356 put_thumb32_insn (buf, insn);
21357 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
21358 }
21359 else
21360 {
21361 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21362 exp.X_add_number -= 4;
21363 }
21364 pc_rel = 1;
21365 break;
21366 case T_MNEM_mov:
21367 case T_MNEM_movs:
21368 case T_MNEM_cmp:
21369 case T_MNEM_cmn:
21370 if (fragp->fr_var == 4)
21371 {
21372 int r0off = (opcode == T_MNEM_mov
21373 || opcode == T_MNEM_movs) ? 0 : 8;
21374 insn = THUMB_OP32 (opcode);
21375 insn = (insn & 0xe1ffffff) | 0x10000000;
21376 insn |= (old_op & 0x700) << r0off;
21377 put_thumb32_insn (buf, insn);
21378 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21379 }
21380 else
21381 {
21382 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
21383 }
21384 pc_rel = 0;
21385 break;
21386 case T_MNEM_b:
21387 if (fragp->fr_var == 4)
21388 {
21389 insn = THUMB_OP32(opcode);
21390 put_thumb32_insn (buf, insn);
21391 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
21392 }
21393 else
21394 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
21395 pc_rel = 1;
21396 break;
21397 case T_MNEM_bcond:
21398 if (fragp->fr_var == 4)
21399 {
21400 insn = THUMB_OP32(opcode);
21401 insn |= (old_op & 0xf00) << 14;
21402 put_thumb32_insn (buf, insn);
21403 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
21404 }
21405 else
21406 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
21407 pc_rel = 1;
21408 break;
21409 case T_MNEM_add_sp:
21410 case T_MNEM_add_pc:
21411 case T_MNEM_inc_sp:
21412 case T_MNEM_dec_sp:
21413 if (fragp->fr_var == 4)
21414 {
21415 /* ??? Choose between add and addw. */
21416 insn = THUMB_OP32 (opcode);
21417 insn |= (old_op & 0xf0) << 4;
21418 put_thumb32_insn (buf, insn);
21419 if (opcode == T_MNEM_add_pc)
21420 reloc_type = BFD_RELOC_ARM_T32_IMM12;
21421 else
21422 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21423 }
21424 else
21425 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21426 pc_rel = 0;
21427 break;
21428
21429 case T_MNEM_addi:
21430 case T_MNEM_addis:
21431 case T_MNEM_subi:
21432 case T_MNEM_subis:
21433 if (fragp->fr_var == 4)
21434 {
21435 insn = THUMB_OP32 (opcode);
21436 insn |= (old_op & 0xf0) << 4;
21437 insn |= (old_op & 0xf) << 16;
21438 put_thumb32_insn (buf, insn);
21439 if (insn & (1 << 20))
21440 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21441 else
21442 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21443 }
21444 else
21445 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21446 pc_rel = 0;
21447 break;
21448 default:
21449 abort ();
21450 }
21451 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
21452 (enum bfd_reloc_code_real) reloc_type);
21453 fixp->fx_file = fragp->fr_file;
21454 fixp->fx_line = fragp->fr_line;
21455 fragp->fr_fix += fragp->fr_var;
21456
21457 /* Set whether we use thumb-2 ISA based on final relaxation results. */
21458 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
21459 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
21460 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
21461 }
21462
21463 /* Return the size of a relaxable immediate operand instruction.
21464 SHIFT and SIZE specify the form of the allowable immediate. */
21465 static int
21466 relax_immediate (fragS *fragp, int size, int shift)
21467 {
21468 offsetT offset;
21469 offsetT mask;
21470 offsetT low;
21471
21472 /* ??? Should be able to do better than this. */
21473 if (fragp->fr_symbol)
21474 return 4;
21475
21476 low = (1 << shift) - 1;
21477 mask = (1 << (shift + size)) - (1 << shift);
21478 offset = fragp->fr_offset;
21479 /* Force misaligned offsets to 32-bit variant. */
21480 if (offset & low)
21481 return 4;
21482 if (offset & ~mask)
21483 return 4;
21484 return 2;
21485 }
21486
21487 /* Get the address of a symbol during relaxation. */
21488 static addressT
21489 relaxed_symbol_addr (fragS *fragp, long stretch)
21490 {
21491 fragS *sym_frag;
21492 addressT addr;
21493 symbolS *sym;
21494
21495 sym = fragp->fr_symbol;
21496 sym_frag = symbol_get_frag (sym);
21497 know (S_GET_SEGMENT (sym) != absolute_section
21498 || sym_frag == &zero_address_frag);
21499 addr = S_GET_VALUE (sym) + fragp->fr_offset;
21500
21501 /* If frag has yet to be reached on this pass, assume it will
21502 move by STRETCH just as we did. If this is not so, it will
21503 be because some frag between grows, and that will force
21504 another pass. */
21505
21506 if (stretch != 0
21507 && sym_frag->relax_marker != fragp->relax_marker)
21508 {
21509 fragS *f;
21510
21511 /* Adjust stretch for any alignment frag. Note that if have
21512 been expanding the earlier code, the symbol may be
21513 defined in what appears to be an earlier frag. FIXME:
21514 This doesn't handle the fr_subtype field, which specifies
21515 a maximum number of bytes to skip when doing an
21516 alignment. */
21517 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
21518 {
21519 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
21520 {
21521 if (stretch < 0)
21522 stretch = - ((- stretch)
21523 & ~ ((1 << (int) f->fr_offset) - 1));
21524 else
21525 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
21526 if (stretch == 0)
21527 break;
21528 }
21529 }
21530 if (f != NULL)
21531 addr += stretch;
21532 }
21533
21534 return addr;
21535 }
21536
21537 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
21538 load. */
21539 static int
21540 relax_adr (fragS *fragp, asection *sec, long stretch)
21541 {
21542 addressT addr;
21543 offsetT val;
21544
21545 /* Assume worst case for symbols not known to be in the same section. */
21546 if (fragp->fr_symbol == NULL
21547 || !S_IS_DEFINED (fragp->fr_symbol)
21548 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21549 || S_IS_WEAK (fragp->fr_symbol))
21550 return 4;
21551
21552 val = relaxed_symbol_addr (fragp, stretch);
21553 addr = fragp->fr_address + fragp->fr_fix;
21554 addr = (addr + 4) & ~3;
21555 /* Force misaligned targets to 32-bit variant. */
21556 if (val & 3)
21557 return 4;
21558 val -= addr;
21559 if (val < 0 || val > 1020)
21560 return 4;
21561 return 2;
21562 }
21563
21564 /* Return the size of a relaxable add/sub immediate instruction. */
21565 static int
21566 relax_addsub (fragS *fragp, asection *sec)
21567 {
21568 char *buf;
21569 int op;
21570
21571 buf = fragp->fr_literal + fragp->fr_fix;
21572 op = bfd_get_16(sec->owner, buf);
21573 if ((op & 0xf) == ((op >> 4) & 0xf))
21574 return relax_immediate (fragp, 8, 0);
21575 else
21576 return relax_immediate (fragp, 3, 0);
21577 }
21578
21579 /* Return TRUE iff the definition of symbol S could be pre-empted
21580 (overridden) at link or load time. */
21581 static bfd_boolean
21582 symbol_preemptible (symbolS *s)
21583 {
21584 /* Weak symbols can always be pre-empted. */
21585 if (S_IS_WEAK (s))
21586 return TRUE;
21587
21588 /* Non-global symbols cannot be pre-empted. */
21589 if (! S_IS_EXTERNAL (s))
21590 return FALSE;
21591
21592 #ifdef OBJ_ELF
21593 /* In ELF, a global symbol can be marked protected, or private. In that
21594 case it can't be pre-empted (other definitions in the same link unit
21595 would violate the ODR). */
21596 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
21597 return FALSE;
21598 #endif
21599
21600 /* Other global symbols might be pre-empted. */
21601 return TRUE;
21602 }
21603
21604 /* Return the size of a relaxable branch instruction. BITS is the
21605 size of the offset field in the narrow instruction. */
21606
21607 static int
21608 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
21609 {
21610 addressT addr;
21611 offsetT val;
21612 offsetT limit;
21613
21614 /* Assume worst case for symbols not known to be in the same section. */
21615 if (!S_IS_DEFINED (fragp->fr_symbol)
21616 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21617 || S_IS_WEAK (fragp->fr_symbol))
21618 return 4;
21619
21620 #ifdef OBJ_ELF
21621 /* A branch to a function in ARM state will require interworking. */
21622 if (S_IS_DEFINED (fragp->fr_symbol)
21623 && ARM_IS_FUNC (fragp->fr_symbol))
21624 return 4;
21625 #endif
21626
21627 if (symbol_preemptible (fragp->fr_symbol))
21628 return 4;
21629
21630 val = relaxed_symbol_addr (fragp, stretch);
21631 addr = fragp->fr_address + fragp->fr_fix + 4;
21632 val -= addr;
21633
21634 /* Offset is a signed value *2 */
21635 limit = 1 << bits;
21636 if (val >= limit || val < -limit)
21637 return 4;
21638 return 2;
21639 }
21640
21641
21642 /* Relax a machine dependent frag. This returns the amount by which
21643 the current size of the frag should change. */
21644
21645 int
21646 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
21647 {
21648 int oldsize;
21649 int newsize;
21650
21651 oldsize = fragp->fr_var;
21652 switch (fragp->fr_subtype)
21653 {
21654 case T_MNEM_ldr_pc2:
21655 newsize = relax_adr (fragp, sec, stretch);
21656 break;
21657 case T_MNEM_ldr_pc:
21658 case T_MNEM_ldr_sp:
21659 case T_MNEM_str_sp:
21660 newsize = relax_immediate (fragp, 8, 2);
21661 break;
21662 case T_MNEM_ldr:
21663 case T_MNEM_str:
21664 newsize = relax_immediate (fragp, 5, 2);
21665 break;
21666 case T_MNEM_ldrh:
21667 case T_MNEM_strh:
21668 newsize = relax_immediate (fragp, 5, 1);
21669 break;
21670 case T_MNEM_ldrb:
21671 case T_MNEM_strb:
21672 newsize = relax_immediate (fragp, 5, 0);
21673 break;
21674 case T_MNEM_adr:
21675 newsize = relax_adr (fragp, sec, stretch);
21676 break;
21677 case T_MNEM_mov:
21678 case T_MNEM_movs:
21679 case T_MNEM_cmp:
21680 case T_MNEM_cmn:
21681 newsize = relax_immediate (fragp, 8, 0);
21682 break;
21683 case T_MNEM_b:
21684 newsize = relax_branch (fragp, sec, 11, stretch);
21685 break;
21686 case T_MNEM_bcond:
21687 newsize = relax_branch (fragp, sec, 8, stretch);
21688 break;
21689 case T_MNEM_add_sp:
21690 case T_MNEM_add_pc:
21691 newsize = relax_immediate (fragp, 8, 2);
21692 break;
21693 case T_MNEM_inc_sp:
21694 case T_MNEM_dec_sp:
21695 newsize = relax_immediate (fragp, 7, 2);
21696 break;
21697 case T_MNEM_addi:
21698 case T_MNEM_addis:
21699 case T_MNEM_subi:
21700 case T_MNEM_subis:
21701 newsize = relax_addsub (fragp, sec);
21702 break;
21703 default:
21704 abort ();
21705 }
21706
21707 fragp->fr_var = newsize;
21708 /* Freeze wide instructions that are at or before the same location as
21709 in the previous pass. This avoids infinite loops.
21710 Don't freeze them unconditionally because targets may be artificially
21711 misaligned by the expansion of preceding frags. */
21712 if (stretch <= 0 && newsize > 2)
21713 {
21714 md_convert_frag (sec->owner, sec, fragp);
21715 frag_wane (fragp);
21716 }
21717
21718 return newsize - oldsize;
21719 }
21720
21721 /* Round up a section size to the appropriate boundary. */
21722
21723 valueT
21724 md_section_align (segT segment ATTRIBUTE_UNUSED,
21725 valueT size)
21726 {
21727 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
21728 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
21729 {
21730 /* For a.out, force the section size to be aligned. If we don't do
21731 this, BFD will align it for us, but it will not write out the
21732 final bytes of the section. This may be a bug in BFD, but it is
21733 easier to fix it here since that is how the other a.out targets
21734 work. */
21735 int align;
21736
21737 align = bfd_get_section_alignment (stdoutput, segment);
21738 size = ((size + (1 << align) - 1) & (-((valueT) 1 << align)));
21739 }
21740 #endif
21741
21742 return size;
21743 }
21744
21745 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
21746 of an rs_align_code fragment. */
21747
21748 void
21749 arm_handle_align (fragS * fragP)
21750 {
21751 static unsigned char const arm_noop[2][2][4] =
21752 {
21753 { /* ARMv1 */
21754 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
21755 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
21756 },
21757 { /* ARMv6k */
21758 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
21759 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
21760 },
21761 };
21762 static unsigned char const thumb_noop[2][2][2] =
21763 {
21764 { /* Thumb-1 */
21765 {0xc0, 0x46}, /* LE */
21766 {0x46, 0xc0}, /* BE */
21767 },
21768 { /* Thumb-2 */
21769 {0x00, 0xbf}, /* LE */
21770 {0xbf, 0x00} /* BE */
21771 }
21772 };
21773 static unsigned char const wide_thumb_noop[2][4] =
21774 { /* Wide Thumb-2 */
21775 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
21776 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
21777 };
21778
21779 unsigned bytes, fix, noop_size;
21780 char * p;
21781 const unsigned char * noop;
21782 const unsigned char *narrow_noop = NULL;
21783 #ifdef OBJ_ELF
21784 enum mstate state;
21785 #endif
21786
21787 if (fragP->fr_type != rs_align_code)
21788 return;
21789
21790 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
21791 p = fragP->fr_literal + fragP->fr_fix;
21792 fix = 0;
21793
21794 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
21795 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
21796
21797 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
21798
21799 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
21800 {
21801 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21802 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
21803 {
21804 narrow_noop = thumb_noop[1][target_big_endian];
21805 noop = wide_thumb_noop[target_big_endian];
21806 }
21807 else
21808 noop = thumb_noop[0][target_big_endian];
21809 noop_size = 2;
21810 #ifdef OBJ_ELF
21811 state = MAP_THUMB;
21812 #endif
21813 }
21814 else
21815 {
21816 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21817 ? selected_cpu : arm_arch_none,
21818 arm_ext_v6k) != 0]
21819 [target_big_endian];
21820 noop_size = 4;
21821 #ifdef OBJ_ELF
21822 state = MAP_ARM;
21823 #endif
21824 }
21825
21826 fragP->fr_var = noop_size;
21827
21828 if (bytes & (noop_size - 1))
21829 {
21830 fix = bytes & (noop_size - 1);
21831 #ifdef OBJ_ELF
21832 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
21833 #endif
21834 memset (p, 0, fix);
21835 p += fix;
21836 bytes -= fix;
21837 }
21838
21839 if (narrow_noop)
21840 {
21841 if (bytes & noop_size)
21842 {
21843 /* Insert a narrow noop. */
21844 memcpy (p, narrow_noop, noop_size);
21845 p += noop_size;
21846 bytes -= noop_size;
21847 fix += noop_size;
21848 }
21849
21850 /* Use wide noops for the remainder */
21851 noop_size = 4;
21852 }
21853
21854 while (bytes >= noop_size)
21855 {
21856 memcpy (p, noop, noop_size);
21857 p += noop_size;
21858 bytes -= noop_size;
21859 fix += noop_size;
21860 }
21861
21862 fragP->fr_fix += fix;
21863 }
21864
21865 /* Called from md_do_align. Used to create an alignment
21866 frag in a code section. */
21867
21868 void
21869 arm_frag_align_code (int n, int max)
21870 {
21871 char * p;
21872
21873 /* We assume that there will never be a requirement
21874 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
21875 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
21876 {
21877 char err_msg[128];
21878
21879 sprintf (err_msg,
21880 _("alignments greater than %d bytes not supported in .text sections."),
21881 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
21882 as_fatal ("%s", err_msg);
21883 }
21884
21885 p = frag_var (rs_align_code,
21886 MAX_MEM_FOR_RS_ALIGN_CODE,
21887 1,
21888 (relax_substateT) max,
21889 (symbolS *) NULL,
21890 (offsetT) n,
21891 (char *) NULL);
21892 *p = 0;
21893 }
21894
21895 /* Perform target specific initialisation of a frag.
21896 Note - despite the name this initialisation is not done when the frag
21897 is created, but only when its type is assigned. A frag can be created
21898 and used a long time before its type is set, so beware of assuming that
21899 this initialisation is performed first. */
21900
21901 #ifndef OBJ_ELF
21902 void
21903 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
21904 {
21905 /* Record whether this frag is in an ARM or a THUMB area. */
21906 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21907 }
21908
21909 #else /* OBJ_ELF is defined. */
21910 void
21911 arm_init_frag (fragS * fragP, int max_chars)
21912 {
21913 int frag_thumb_mode;
21914
21915 /* If the current ARM vs THUMB mode has not already
21916 been recorded into this frag then do so now. */
21917 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
21918 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21919
21920 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
21921
21922 /* Record a mapping symbol for alignment frags. We will delete this
21923 later if the alignment ends up empty. */
21924 switch (fragP->fr_type)
21925 {
21926 case rs_align:
21927 case rs_align_test:
21928 case rs_fill:
21929 mapping_state_2 (MAP_DATA, max_chars);
21930 break;
21931 case rs_align_code:
21932 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
21933 break;
21934 default:
21935 break;
21936 }
21937 }
21938
21939 /* When we change sections we need to issue a new mapping symbol. */
21940
21941 void
21942 arm_elf_change_section (void)
21943 {
21944 /* Link an unlinked unwind index table section to the .text section. */
21945 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
21946 && elf_linked_to_section (now_seg) == NULL)
21947 elf_linked_to_section (now_seg) = text_section;
21948 }
21949
21950 int
21951 arm_elf_section_type (const char * str, size_t len)
21952 {
21953 if (len == 5 && strncmp (str, "exidx", 5) == 0)
21954 return SHT_ARM_EXIDX;
21955
21956 return -1;
21957 }
21958 \f
21959 /* Code to deal with unwinding tables. */
21960
21961 static void add_unwind_adjustsp (offsetT);
21962
21963 /* Generate any deferred unwind frame offset. */
21964
21965 static void
21966 flush_pending_unwind (void)
21967 {
21968 offsetT offset;
21969
21970 offset = unwind.pending_offset;
21971 unwind.pending_offset = 0;
21972 if (offset != 0)
21973 add_unwind_adjustsp (offset);
21974 }
21975
21976 /* Add an opcode to this list for this function. Two-byte opcodes should
21977 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
21978 order. */
21979
21980 static void
21981 add_unwind_opcode (valueT op, int length)
21982 {
21983 /* Add any deferred stack adjustment. */
21984 if (unwind.pending_offset)
21985 flush_pending_unwind ();
21986
21987 unwind.sp_restored = 0;
21988
21989 if (unwind.opcode_count + length > unwind.opcode_alloc)
21990 {
21991 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
21992 if (unwind.opcodes)
21993 unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
21994 unwind.opcode_alloc);
21995 else
21996 unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
21997 }
21998 while (length > 0)
21999 {
22000 length--;
22001 unwind.opcodes[unwind.opcode_count] = op & 0xff;
22002 op >>= 8;
22003 unwind.opcode_count++;
22004 }
22005 }
22006
22007 /* Add unwind opcodes to adjust the stack pointer. */
22008
22009 static void
22010 add_unwind_adjustsp (offsetT offset)
22011 {
22012 valueT op;
22013
22014 if (offset > 0x200)
22015 {
22016 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
22017 char bytes[5];
22018 int n;
22019 valueT o;
22020
22021 /* Long form: 0xb2, uleb128. */
22022 /* This might not fit in a word so add the individual bytes,
22023 remembering the list is built in reverse order. */
22024 o = (valueT) ((offset - 0x204) >> 2);
22025 if (o == 0)
22026 add_unwind_opcode (0, 1);
22027
22028 /* Calculate the uleb128 encoding of the offset. */
22029 n = 0;
22030 while (o)
22031 {
22032 bytes[n] = o & 0x7f;
22033 o >>= 7;
22034 if (o)
22035 bytes[n] |= 0x80;
22036 n++;
22037 }
22038 /* Add the insn. */
22039 for (; n; n--)
22040 add_unwind_opcode (bytes[n - 1], 1);
22041 add_unwind_opcode (0xb2, 1);
22042 }
22043 else if (offset > 0x100)
22044 {
22045 /* Two short opcodes. */
22046 add_unwind_opcode (0x3f, 1);
22047 op = (offset - 0x104) >> 2;
22048 add_unwind_opcode (op, 1);
22049 }
22050 else if (offset > 0)
22051 {
22052 /* Short opcode. */
22053 op = (offset - 4) >> 2;
22054 add_unwind_opcode (op, 1);
22055 }
22056 else if (offset < 0)
22057 {
22058 offset = -offset;
22059 while (offset > 0x100)
22060 {
22061 add_unwind_opcode (0x7f, 1);
22062 offset -= 0x100;
22063 }
22064 op = ((offset - 4) >> 2) | 0x40;
22065 add_unwind_opcode (op, 1);
22066 }
22067 }
22068
22069 /* Finish the list of unwind opcodes for this function. */
22070 static void
22071 finish_unwind_opcodes (void)
22072 {
22073 valueT op;
22074
22075 if (unwind.fp_used)
22076 {
22077 /* Adjust sp as necessary. */
22078 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
22079 flush_pending_unwind ();
22080
22081 /* After restoring sp from the frame pointer. */
22082 op = 0x90 | unwind.fp_reg;
22083 add_unwind_opcode (op, 1);
22084 }
22085 else
22086 flush_pending_unwind ();
22087 }
22088
22089
22090 /* Start an exception table entry. If idx is nonzero this is an index table
22091 entry. */
22092
22093 static void
22094 start_unwind_section (const segT text_seg, int idx)
22095 {
22096 const char * text_name;
22097 const char * prefix;
22098 const char * prefix_once;
22099 const char * group_name;
22100 char * sec_name;
22101 int type;
22102 int flags;
22103 int linkonce;
22104
22105 if (idx)
22106 {
22107 prefix = ELF_STRING_ARM_unwind;
22108 prefix_once = ELF_STRING_ARM_unwind_once;
22109 type = SHT_ARM_EXIDX;
22110 }
22111 else
22112 {
22113 prefix = ELF_STRING_ARM_unwind_info;
22114 prefix_once = ELF_STRING_ARM_unwind_info_once;
22115 type = SHT_PROGBITS;
22116 }
22117
22118 text_name = segment_name (text_seg);
22119 if (streq (text_name, ".text"))
22120 text_name = "";
22121
22122 if (strncmp (text_name, ".gnu.linkonce.t.",
22123 strlen (".gnu.linkonce.t.")) == 0)
22124 {
22125 prefix = prefix_once;
22126 text_name += strlen (".gnu.linkonce.t.");
22127 }
22128
22129 sec_name = concat (prefix, text_name, (char *) NULL);
22130
22131 flags = SHF_ALLOC;
22132 linkonce = 0;
22133 group_name = 0;
22134
22135 /* Handle COMDAT group. */
22136 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
22137 {
22138 group_name = elf_group_name (text_seg);
22139 if (group_name == NULL)
22140 {
22141 as_bad (_("Group section `%s' has no group signature"),
22142 segment_name (text_seg));
22143 ignore_rest_of_line ();
22144 return;
22145 }
22146 flags |= SHF_GROUP;
22147 linkonce = 1;
22148 }
22149
22150 obj_elf_change_section (sec_name, type, 0, flags, 0, group_name,
22151 linkonce, 0);
22152
22153 /* Set the section link for index tables. */
22154 if (idx)
22155 elf_linked_to_section (now_seg) = text_seg;
22156 }
22157
22158
22159 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
22160 personality routine data. Returns zero, or the index table value for
22161 an inline entry. */
22162
22163 static valueT
22164 create_unwind_entry (int have_data)
22165 {
22166 int size;
22167 addressT where;
22168 char *ptr;
22169 /* The current word of data. */
22170 valueT data;
22171 /* The number of bytes left in this word. */
22172 int n;
22173
22174 finish_unwind_opcodes ();
22175
22176 /* Remember the current text section. */
22177 unwind.saved_seg = now_seg;
22178 unwind.saved_subseg = now_subseg;
22179
22180 start_unwind_section (now_seg, 0);
22181
22182 if (unwind.personality_routine == NULL)
22183 {
22184 if (unwind.personality_index == -2)
22185 {
22186 if (have_data)
22187 as_bad (_("handlerdata in cantunwind frame"));
22188 return 1; /* EXIDX_CANTUNWIND. */
22189 }
22190
22191 /* Use a default personality routine if none is specified. */
22192 if (unwind.personality_index == -1)
22193 {
22194 if (unwind.opcode_count > 3)
22195 unwind.personality_index = 1;
22196 else
22197 unwind.personality_index = 0;
22198 }
22199
22200 /* Space for the personality routine entry. */
22201 if (unwind.personality_index == 0)
22202 {
22203 if (unwind.opcode_count > 3)
22204 as_bad (_("too many unwind opcodes for personality routine 0"));
22205
22206 if (!have_data)
22207 {
22208 /* All the data is inline in the index table. */
22209 data = 0x80;
22210 n = 3;
22211 while (unwind.opcode_count > 0)
22212 {
22213 unwind.opcode_count--;
22214 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22215 n--;
22216 }
22217
22218 /* Pad with "finish" opcodes. */
22219 while (n--)
22220 data = (data << 8) | 0xb0;
22221
22222 return data;
22223 }
22224 size = 0;
22225 }
22226 else
22227 /* We get two opcodes "free" in the first word. */
22228 size = unwind.opcode_count - 2;
22229 }
22230 else
22231 {
22232 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
22233 if (unwind.personality_index != -1)
22234 {
22235 as_bad (_("attempt to recreate an unwind entry"));
22236 return 1;
22237 }
22238
22239 /* An extra byte is required for the opcode count. */
22240 size = unwind.opcode_count + 1;
22241 }
22242
22243 size = (size + 3) >> 2;
22244 if (size > 0xff)
22245 as_bad (_("too many unwind opcodes"));
22246
22247 frag_align (2, 0, 0);
22248 record_alignment (now_seg, 2);
22249 unwind.table_entry = expr_build_dot ();
22250
22251 /* Allocate the table entry. */
22252 ptr = frag_more ((size << 2) + 4);
22253 /* PR 13449: Zero the table entries in case some of them are not used. */
22254 memset (ptr, 0, (size << 2) + 4);
22255 where = frag_now_fix () - ((size << 2) + 4);
22256
22257 switch (unwind.personality_index)
22258 {
22259 case -1:
22260 /* ??? Should this be a PLT generating relocation? */
22261 /* Custom personality routine. */
22262 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
22263 BFD_RELOC_ARM_PREL31);
22264
22265 where += 4;
22266 ptr += 4;
22267
22268 /* Set the first byte to the number of additional words. */
22269 data = size > 0 ? size - 1 : 0;
22270 n = 3;
22271 break;
22272
22273 /* ABI defined personality routines. */
22274 case 0:
22275 /* Three opcodes bytes are packed into the first word. */
22276 data = 0x80;
22277 n = 3;
22278 break;
22279
22280 case 1:
22281 case 2:
22282 /* The size and first two opcode bytes go in the first word. */
22283 data = ((0x80 + unwind.personality_index) << 8) | size;
22284 n = 2;
22285 break;
22286
22287 default:
22288 /* Should never happen. */
22289 abort ();
22290 }
22291
22292 /* Pack the opcodes into words (MSB first), reversing the list at the same
22293 time. */
22294 while (unwind.opcode_count > 0)
22295 {
22296 if (n == 0)
22297 {
22298 md_number_to_chars (ptr, data, 4);
22299 ptr += 4;
22300 n = 4;
22301 data = 0;
22302 }
22303 unwind.opcode_count--;
22304 n--;
22305 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22306 }
22307
22308 /* Finish off the last word. */
22309 if (n < 4)
22310 {
22311 /* Pad with "finish" opcodes. */
22312 while (n--)
22313 data = (data << 8) | 0xb0;
22314
22315 md_number_to_chars (ptr, data, 4);
22316 }
22317
22318 if (!have_data)
22319 {
22320 /* Add an empty descriptor if there is no user-specified data. */
22321 ptr = frag_more (4);
22322 md_number_to_chars (ptr, 0, 4);
22323 }
22324
22325 return 0;
22326 }
22327
22328
22329 /* Initialize the DWARF-2 unwind information for this procedure. */
22330
22331 void
22332 tc_arm_frame_initial_instructions (void)
22333 {
22334 cfi_add_CFA_def_cfa (REG_SP, 0);
22335 }
22336 #endif /* OBJ_ELF */
22337
22338 /* Convert REGNAME to a DWARF-2 register number. */
22339
22340 int
22341 tc_arm_regname_to_dw2regnum (char *regname)
22342 {
22343 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
22344 if (reg != FAIL)
22345 return reg;
22346
22347 /* PR 16694: Allow VFP registers as well. */
22348 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
22349 if (reg != FAIL)
22350 return 64 + reg;
22351
22352 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
22353 if (reg != FAIL)
22354 return reg + 256;
22355
22356 return -1;
22357 }
22358
22359 #ifdef TE_PE
22360 void
22361 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
22362 {
22363 expressionS exp;
22364
22365 exp.X_op = O_secrel;
22366 exp.X_add_symbol = symbol;
22367 exp.X_add_number = 0;
22368 emit_expr (&exp, size);
22369 }
22370 #endif
22371
22372 /* MD interface: Symbol and relocation handling. */
22373
22374 /* Return the address within the segment that a PC-relative fixup is
22375 relative to. For ARM, PC-relative fixups applied to instructions
22376 are generally relative to the location of the fixup plus 8 bytes.
22377 Thumb branches are offset by 4, and Thumb loads relative to PC
22378 require special handling. */
22379
22380 long
22381 md_pcrel_from_section (fixS * fixP, segT seg)
22382 {
22383 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
22384
22385 /* If this is pc-relative and we are going to emit a relocation
22386 then we just want to put out any pipeline compensation that the linker
22387 will need. Otherwise we want to use the calculated base.
22388 For WinCE we skip the bias for externals as well, since this
22389 is how the MS ARM-CE assembler behaves and we want to be compatible. */
22390 if (fixP->fx_pcrel
22391 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
22392 || (arm_force_relocation (fixP)
22393 #ifdef TE_WINCE
22394 && !S_IS_EXTERNAL (fixP->fx_addsy)
22395 #endif
22396 )))
22397 base = 0;
22398
22399
22400 switch (fixP->fx_r_type)
22401 {
22402 /* PC relative addressing on the Thumb is slightly odd as the
22403 bottom two bits of the PC are forced to zero for the
22404 calculation. This happens *after* application of the
22405 pipeline offset. However, Thumb adrl already adjusts for
22406 this, so we need not do it again. */
22407 case BFD_RELOC_ARM_THUMB_ADD:
22408 return base & ~3;
22409
22410 case BFD_RELOC_ARM_THUMB_OFFSET:
22411 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22412 case BFD_RELOC_ARM_T32_ADD_PC12:
22413 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
22414 return (base + 4) & ~3;
22415
22416 /* Thumb branches are simply offset by +4. */
22417 case BFD_RELOC_THUMB_PCREL_BRANCH7:
22418 case BFD_RELOC_THUMB_PCREL_BRANCH9:
22419 case BFD_RELOC_THUMB_PCREL_BRANCH12:
22420 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22421 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22422 return base + 4;
22423
22424 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22425 if (fixP->fx_addsy
22426 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22427 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22428 && ARM_IS_FUNC (fixP->fx_addsy)
22429 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22430 base = fixP->fx_where + fixP->fx_frag->fr_address;
22431 return base + 4;
22432
22433 /* BLX is like branches above, but forces the low two bits of PC to
22434 zero. */
22435 case BFD_RELOC_THUMB_PCREL_BLX:
22436 if (fixP->fx_addsy
22437 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22438 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22439 && THUMB_IS_FUNC (fixP->fx_addsy)
22440 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22441 base = fixP->fx_where + fixP->fx_frag->fr_address;
22442 return (base + 4) & ~3;
22443
22444 /* ARM mode branches are offset by +8. However, the Windows CE
22445 loader expects the relocation not to take this into account. */
22446 case BFD_RELOC_ARM_PCREL_BLX:
22447 if (fixP->fx_addsy
22448 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22449 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22450 && ARM_IS_FUNC (fixP->fx_addsy)
22451 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22452 base = fixP->fx_where + fixP->fx_frag->fr_address;
22453 return base + 8;
22454
22455 case BFD_RELOC_ARM_PCREL_CALL:
22456 if (fixP->fx_addsy
22457 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22458 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22459 && THUMB_IS_FUNC (fixP->fx_addsy)
22460 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22461 base = fixP->fx_where + fixP->fx_frag->fr_address;
22462 return base + 8;
22463
22464 case BFD_RELOC_ARM_PCREL_BRANCH:
22465 case BFD_RELOC_ARM_PCREL_JUMP:
22466 case BFD_RELOC_ARM_PLT32:
22467 #ifdef TE_WINCE
22468 /* When handling fixups immediately, because we have already
22469 discovered the value of a symbol, or the address of the frag involved
22470 we must account for the offset by +8, as the OS loader will never see the reloc.
22471 see fixup_segment() in write.c
22472 The S_IS_EXTERNAL test handles the case of global symbols.
22473 Those need the calculated base, not just the pipe compensation the linker will need. */
22474 if (fixP->fx_pcrel
22475 && fixP->fx_addsy != NULL
22476 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22477 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
22478 return base + 8;
22479 return base;
22480 #else
22481 return base + 8;
22482 #endif
22483
22484
22485 /* ARM mode loads relative to PC are also offset by +8. Unlike
22486 branches, the Windows CE loader *does* expect the relocation
22487 to take this into account. */
22488 case BFD_RELOC_ARM_OFFSET_IMM:
22489 case BFD_RELOC_ARM_OFFSET_IMM8:
22490 case BFD_RELOC_ARM_HWLITERAL:
22491 case BFD_RELOC_ARM_LITERAL:
22492 case BFD_RELOC_ARM_CP_OFF_IMM:
22493 return base + 8;
22494
22495
22496 /* Other PC-relative relocations are un-offset. */
22497 default:
22498 return base;
22499 }
22500 }
22501
22502 static bfd_boolean flag_warn_syms = TRUE;
22503
22504 bfd_boolean
22505 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
22506 {
22507 /* PR 18347 - Warn if the user attempts to create a symbol with the same
22508 name as an ARM instruction. Whilst strictly speaking it is allowed, it
22509 does mean that the resulting code might be very confusing to the reader.
22510 Also this warning can be triggered if the user omits an operand before
22511 an immediate address, eg:
22512
22513 LDR =foo
22514
22515 GAS treats this as an assignment of the value of the symbol foo to a
22516 symbol LDR, and so (without this code) it will not issue any kind of
22517 warning or error message.
22518
22519 Note - ARM instructions are case-insensitive but the strings in the hash
22520 table are all stored in lower case, so we must first ensure that name is
22521 lower case too. */
22522 if (flag_warn_syms && arm_ops_hsh)
22523 {
22524 char * nbuf = strdup (name);
22525 char * p;
22526
22527 for (p = nbuf; *p; p++)
22528 *p = TOLOWER (*p);
22529 if (hash_find (arm_ops_hsh, nbuf) != NULL)
22530 {
22531 static struct hash_control * already_warned = NULL;
22532
22533 if (already_warned == NULL)
22534 already_warned = hash_new ();
22535 /* Only warn about the symbol once. To keep the code
22536 simple we let hash_insert do the lookup for us. */
22537 if (hash_insert (already_warned, name, NULL) == NULL)
22538 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
22539 }
22540 else
22541 free (nbuf);
22542 }
22543
22544 return FALSE;
22545 }
22546
22547 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
22548 Otherwise we have no need to default values of symbols. */
22549
22550 symbolS *
22551 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
22552 {
22553 #ifdef OBJ_ELF
22554 if (name[0] == '_' && name[1] == 'G'
22555 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
22556 {
22557 if (!GOT_symbol)
22558 {
22559 if (symbol_find (name))
22560 as_bad (_("GOT already in the symbol table"));
22561
22562 GOT_symbol = symbol_new (name, undefined_section,
22563 (valueT) 0, & zero_address_frag);
22564 }
22565
22566 return GOT_symbol;
22567 }
22568 #endif
22569
22570 return NULL;
22571 }
22572
22573 /* Subroutine of md_apply_fix. Check to see if an immediate can be
22574 computed as two separate immediate values, added together. We
22575 already know that this value cannot be computed by just one ARM
22576 instruction. */
22577
22578 static unsigned int
22579 validate_immediate_twopart (unsigned int val,
22580 unsigned int * highpart)
22581 {
22582 unsigned int a;
22583 unsigned int i;
22584
22585 for (i = 0; i < 32; i += 2)
22586 if (((a = rotate_left (val, i)) & 0xff) != 0)
22587 {
22588 if (a & 0xff00)
22589 {
22590 if (a & ~ 0xffff)
22591 continue;
22592 * highpart = (a >> 8) | ((i + 24) << 7);
22593 }
22594 else if (a & 0xff0000)
22595 {
22596 if (a & 0xff000000)
22597 continue;
22598 * highpart = (a >> 16) | ((i + 16) << 7);
22599 }
22600 else
22601 {
22602 gas_assert (a & 0xff000000);
22603 * highpart = (a >> 24) | ((i + 8) << 7);
22604 }
22605
22606 return (a & 0xff) | (i << 7);
22607 }
22608
22609 return FAIL;
22610 }
22611
22612 static int
22613 validate_offset_imm (unsigned int val, int hwse)
22614 {
22615 if ((hwse && val > 255) || val > 4095)
22616 return FAIL;
22617 return val;
22618 }
22619
22620 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22621 negative immediate constant by altering the instruction. A bit of
22622 a hack really.
22623 MOV <-> MVN
22624 AND <-> BIC
22625 ADC <-> SBC
22626 by inverting the second operand, and
22627 ADD <-> SUB
22628 CMP <-> CMN
22629 by negating the second operand. */
22630
22631 static int
22632 negate_data_op (unsigned long * instruction,
22633 unsigned long value)
22634 {
22635 int op, new_inst;
22636 unsigned long negated, inverted;
22637
22638 negated = encode_arm_immediate (-value);
22639 inverted = encode_arm_immediate (~value);
22640
22641 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
22642 switch (op)
22643 {
22644 /* First negates. */
22645 case OPCODE_SUB: /* ADD <-> SUB */
22646 new_inst = OPCODE_ADD;
22647 value = negated;
22648 break;
22649
22650 case OPCODE_ADD:
22651 new_inst = OPCODE_SUB;
22652 value = negated;
22653 break;
22654
22655 case OPCODE_CMP: /* CMP <-> CMN */
22656 new_inst = OPCODE_CMN;
22657 value = negated;
22658 break;
22659
22660 case OPCODE_CMN:
22661 new_inst = OPCODE_CMP;
22662 value = negated;
22663 break;
22664
22665 /* Now Inverted ops. */
22666 case OPCODE_MOV: /* MOV <-> MVN */
22667 new_inst = OPCODE_MVN;
22668 value = inverted;
22669 break;
22670
22671 case OPCODE_MVN:
22672 new_inst = OPCODE_MOV;
22673 value = inverted;
22674 break;
22675
22676 case OPCODE_AND: /* AND <-> BIC */
22677 new_inst = OPCODE_BIC;
22678 value = inverted;
22679 break;
22680
22681 case OPCODE_BIC:
22682 new_inst = OPCODE_AND;
22683 value = inverted;
22684 break;
22685
22686 case OPCODE_ADC: /* ADC <-> SBC */
22687 new_inst = OPCODE_SBC;
22688 value = inverted;
22689 break;
22690
22691 case OPCODE_SBC:
22692 new_inst = OPCODE_ADC;
22693 value = inverted;
22694 break;
22695
22696 /* We cannot do anything. */
22697 default:
22698 return FAIL;
22699 }
22700
22701 if (value == (unsigned) FAIL)
22702 return FAIL;
22703
22704 *instruction &= OPCODE_MASK;
22705 *instruction |= new_inst << DATA_OP_SHIFT;
22706 return value;
22707 }
22708
22709 /* Like negate_data_op, but for Thumb-2. */
22710
22711 static unsigned int
22712 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
22713 {
22714 int op, new_inst;
22715 int rd;
22716 unsigned int negated, inverted;
22717
22718 negated = encode_thumb32_immediate (-value);
22719 inverted = encode_thumb32_immediate (~value);
22720
22721 rd = (*instruction >> 8) & 0xf;
22722 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
22723 switch (op)
22724 {
22725 /* ADD <-> SUB. Includes CMP <-> CMN. */
22726 case T2_OPCODE_SUB:
22727 new_inst = T2_OPCODE_ADD;
22728 value = negated;
22729 break;
22730
22731 case T2_OPCODE_ADD:
22732 new_inst = T2_OPCODE_SUB;
22733 value = negated;
22734 break;
22735
22736 /* ORR <-> ORN. Includes MOV <-> MVN. */
22737 case T2_OPCODE_ORR:
22738 new_inst = T2_OPCODE_ORN;
22739 value = inverted;
22740 break;
22741
22742 case T2_OPCODE_ORN:
22743 new_inst = T2_OPCODE_ORR;
22744 value = inverted;
22745 break;
22746
22747 /* AND <-> BIC. TST has no inverted equivalent. */
22748 case T2_OPCODE_AND:
22749 new_inst = T2_OPCODE_BIC;
22750 if (rd == 15)
22751 value = FAIL;
22752 else
22753 value = inverted;
22754 break;
22755
22756 case T2_OPCODE_BIC:
22757 new_inst = T2_OPCODE_AND;
22758 value = inverted;
22759 break;
22760
22761 /* ADC <-> SBC */
22762 case T2_OPCODE_ADC:
22763 new_inst = T2_OPCODE_SBC;
22764 value = inverted;
22765 break;
22766
22767 case T2_OPCODE_SBC:
22768 new_inst = T2_OPCODE_ADC;
22769 value = inverted;
22770 break;
22771
22772 /* We cannot do anything. */
22773 default:
22774 return FAIL;
22775 }
22776
22777 if (value == (unsigned int)FAIL)
22778 return FAIL;
22779
22780 *instruction &= T2_OPCODE_MASK;
22781 *instruction |= new_inst << T2_DATA_OP_SHIFT;
22782 return value;
22783 }
22784
22785 /* Read a 32-bit thumb instruction from buf. */
22786 static unsigned long
22787 get_thumb32_insn (char * buf)
22788 {
22789 unsigned long insn;
22790 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
22791 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22792
22793 return insn;
22794 }
22795
22796
22797 /* We usually want to set the low bit on the address of thumb function
22798 symbols. In particular .word foo - . should have the low bit set.
22799 Generic code tries to fold the difference of two symbols to
22800 a constant. Prevent this and force a relocation when the first symbols
22801 is a thumb function. */
22802
22803 bfd_boolean
22804 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
22805 {
22806 if (op == O_subtract
22807 && l->X_op == O_symbol
22808 && r->X_op == O_symbol
22809 && THUMB_IS_FUNC (l->X_add_symbol))
22810 {
22811 l->X_op = O_subtract;
22812 l->X_op_symbol = r->X_add_symbol;
22813 l->X_add_number -= r->X_add_number;
22814 return TRUE;
22815 }
22816
22817 /* Process as normal. */
22818 return FALSE;
22819 }
22820
22821 /* Encode Thumb2 unconditional branches and calls. The encoding
22822 for the 2 are identical for the immediate values. */
22823
22824 static void
22825 encode_thumb2_b_bl_offset (char * buf, offsetT value)
22826 {
22827 #define T2I1I2MASK ((1 << 13) | (1 << 11))
22828 offsetT newval;
22829 offsetT newval2;
22830 addressT S, I1, I2, lo, hi;
22831
22832 S = (value >> 24) & 0x01;
22833 I1 = (value >> 23) & 0x01;
22834 I2 = (value >> 22) & 0x01;
22835 hi = (value >> 12) & 0x3ff;
22836 lo = (value >> 1) & 0x7ff;
22837 newval = md_chars_to_number (buf, THUMB_SIZE);
22838 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22839 newval |= (S << 10) | hi;
22840 newval2 &= ~T2I1I2MASK;
22841 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
22842 md_number_to_chars (buf, newval, THUMB_SIZE);
22843 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22844 }
22845
22846 void
22847 md_apply_fix (fixS * fixP,
22848 valueT * valP,
22849 segT seg)
22850 {
22851 offsetT value = * valP;
22852 offsetT newval;
22853 unsigned int newimm;
22854 unsigned long temp;
22855 int sign;
22856 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
22857
22858 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
22859
22860 /* Note whether this will delete the relocation. */
22861
22862 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
22863 fixP->fx_done = 1;
22864
22865 /* On a 64-bit host, silently truncate 'value' to 32 bits for
22866 consistency with the behaviour on 32-bit hosts. Remember value
22867 for emit_reloc. */
22868 value &= 0xffffffff;
22869 value ^= 0x80000000;
22870 value -= 0x80000000;
22871
22872 *valP = value;
22873 fixP->fx_addnumber = value;
22874
22875 /* Same treatment for fixP->fx_offset. */
22876 fixP->fx_offset &= 0xffffffff;
22877 fixP->fx_offset ^= 0x80000000;
22878 fixP->fx_offset -= 0x80000000;
22879
22880 switch (fixP->fx_r_type)
22881 {
22882 case BFD_RELOC_NONE:
22883 /* This will need to go in the object file. */
22884 fixP->fx_done = 0;
22885 break;
22886
22887 case BFD_RELOC_ARM_IMMEDIATE:
22888 /* We claim that this fixup has been processed here,
22889 even if in fact we generate an error because we do
22890 not have a reloc for it, so tc_gen_reloc will reject it. */
22891 fixP->fx_done = 1;
22892
22893 if (fixP->fx_addsy)
22894 {
22895 const char *msg = 0;
22896
22897 if (! S_IS_DEFINED (fixP->fx_addsy))
22898 msg = _("undefined symbol %s used as an immediate value");
22899 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22900 msg = _("symbol %s is in a different section");
22901 else if (S_IS_WEAK (fixP->fx_addsy))
22902 msg = _("symbol %s is weak and may be overridden later");
22903
22904 if (msg)
22905 {
22906 as_bad_where (fixP->fx_file, fixP->fx_line,
22907 msg, S_GET_NAME (fixP->fx_addsy));
22908 break;
22909 }
22910 }
22911
22912 temp = md_chars_to_number (buf, INSN_SIZE);
22913
22914 /* If the offset is negative, we should use encoding A2 for ADR. */
22915 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
22916 newimm = negate_data_op (&temp, value);
22917 else
22918 {
22919 newimm = encode_arm_immediate (value);
22920
22921 /* If the instruction will fail, see if we can fix things up by
22922 changing the opcode. */
22923 if (newimm == (unsigned int) FAIL)
22924 newimm = negate_data_op (&temp, value);
22925 /* MOV accepts both ARM modified immediate (A1 encoding) and
22926 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
22927 When disassembling, MOV is preferred when there is no encoding
22928 overlap. */
22929 if (newimm == (unsigned int) FAIL
22930 && ((temp >> DATA_OP_SHIFT) & 0xf) == OPCODE_MOV
22931 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
22932 && !((temp >> SBIT_SHIFT) & 0x1)
22933 && value >= 0 && value <= 0xffff)
22934 {
22935 /* Clear bits[23:20] to change encoding from A1 to A2. */
22936 temp &= 0xff0fffff;
22937 /* Encoding high 4bits imm. Code below will encode the remaining
22938 low 12bits. */
22939 temp |= (value & 0x0000f000) << 4;
22940 newimm = value & 0x00000fff;
22941 }
22942 }
22943
22944 if (newimm == (unsigned int) FAIL)
22945 {
22946 as_bad_where (fixP->fx_file, fixP->fx_line,
22947 _("invalid constant (%lx) after fixup"),
22948 (unsigned long) value);
22949 break;
22950 }
22951
22952 newimm |= (temp & 0xfffff000);
22953 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22954 break;
22955
22956 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
22957 {
22958 unsigned int highpart = 0;
22959 unsigned int newinsn = 0xe1a00000; /* nop. */
22960
22961 if (fixP->fx_addsy)
22962 {
22963 const char *msg = 0;
22964
22965 if (! S_IS_DEFINED (fixP->fx_addsy))
22966 msg = _("undefined symbol %s used as an immediate value");
22967 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22968 msg = _("symbol %s is in a different section");
22969 else if (S_IS_WEAK (fixP->fx_addsy))
22970 msg = _("symbol %s is weak and may be overridden later");
22971
22972 if (msg)
22973 {
22974 as_bad_where (fixP->fx_file, fixP->fx_line,
22975 msg, S_GET_NAME (fixP->fx_addsy));
22976 break;
22977 }
22978 }
22979
22980 newimm = encode_arm_immediate (value);
22981 temp = md_chars_to_number (buf, INSN_SIZE);
22982
22983 /* If the instruction will fail, see if we can fix things up by
22984 changing the opcode. */
22985 if (newimm == (unsigned int) FAIL
22986 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
22987 {
22988 /* No ? OK - try using two ADD instructions to generate
22989 the value. */
22990 newimm = validate_immediate_twopart (value, & highpart);
22991
22992 /* Yes - then make sure that the second instruction is
22993 also an add. */
22994 if (newimm != (unsigned int) FAIL)
22995 newinsn = temp;
22996 /* Still No ? Try using a negated value. */
22997 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
22998 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
22999 /* Otherwise - give up. */
23000 else
23001 {
23002 as_bad_where (fixP->fx_file, fixP->fx_line,
23003 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
23004 (long) value);
23005 break;
23006 }
23007
23008 /* Replace the first operand in the 2nd instruction (which
23009 is the PC) with the destination register. We have
23010 already added in the PC in the first instruction and we
23011 do not want to do it again. */
23012 newinsn &= ~ 0xf0000;
23013 newinsn |= ((newinsn & 0x0f000) << 4);
23014 }
23015
23016 newimm |= (temp & 0xfffff000);
23017 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
23018
23019 highpart |= (newinsn & 0xfffff000);
23020 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
23021 }
23022 break;
23023
23024 case BFD_RELOC_ARM_OFFSET_IMM:
23025 if (!fixP->fx_done && seg->use_rela_p)
23026 value = 0;
23027 /* Fall through. */
23028
23029 case BFD_RELOC_ARM_LITERAL:
23030 sign = value > 0;
23031
23032 if (value < 0)
23033 value = - value;
23034
23035 if (validate_offset_imm (value, 0) == FAIL)
23036 {
23037 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
23038 as_bad_where (fixP->fx_file, fixP->fx_line,
23039 _("invalid literal constant: pool needs to be closer"));
23040 else
23041 as_bad_where (fixP->fx_file, fixP->fx_line,
23042 _("bad immediate value for offset (%ld)"),
23043 (long) value);
23044 break;
23045 }
23046
23047 newval = md_chars_to_number (buf, INSN_SIZE);
23048 if (value == 0)
23049 newval &= 0xfffff000;
23050 else
23051 {
23052 newval &= 0xff7ff000;
23053 newval |= value | (sign ? INDEX_UP : 0);
23054 }
23055 md_number_to_chars (buf, newval, INSN_SIZE);
23056 break;
23057
23058 case BFD_RELOC_ARM_OFFSET_IMM8:
23059 case BFD_RELOC_ARM_HWLITERAL:
23060 sign = value > 0;
23061
23062 if (value < 0)
23063 value = - value;
23064
23065 if (validate_offset_imm (value, 1) == FAIL)
23066 {
23067 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
23068 as_bad_where (fixP->fx_file, fixP->fx_line,
23069 _("invalid literal constant: pool needs to be closer"));
23070 else
23071 as_bad_where (fixP->fx_file, fixP->fx_line,
23072 _("bad immediate value for 8-bit offset (%ld)"),
23073 (long) value);
23074 break;
23075 }
23076
23077 newval = md_chars_to_number (buf, INSN_SIZE);
23078 if (value == 0)
23079 newval &= 0xfffff0f0;
23080 else
23081 {
23082 newval &= 0xff7ff0f0;
23083 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
23084 }
23085 md_number_to_chars (buf, newval, INSN_SIZE);
23086 break;
23087
23088 case BFD_RELOC_ARM_T32_OFFSET_U8:
23089 if (value < 0 || value > 1020 || value % 4 != 0)
23090 as_bad_where (fixP->fx_file, fixP->fx_line,
23091 _("bad immediate value for offset (%ld)"), (long) value);
23092 value /= 4;
23093
23094 newval = md_chars_to_number (buf+2, THUMB_SIZE);
23095 newval |= value;
23096 md_number_to_chars (buf+2, newval, THUMB_SIZE);
23097 break;
23098
23099 case BFD_RELOC_ARM_T32_OFFSET_IMM:
23100 /* This is a complicated relocation used for all varieties of Thumb32
23101 load/store instruction with immediate offset:
23102
23103 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
23104 *4, optional writeback(W)
23105 (doubleword load/store)
23106
23107 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
23108 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
23109 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
23110 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
23111 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
23112
23113 Uppercase letters indicate bits that are already encoded at
23114 this point. Lowercase letters are our problem. For the
23115 second block of instructions, the secondary opcode nybble
23116 (bits 8..11) is present, and bit 23 is zero, even if this is
23117 a PC-relative operation. */
23118 newval = md_chars_to_number (buf, THUMB_SIZE);
23119 newval <<= 16;
23120 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
23121
23122 if ((newval & 0xf0000000) == 0xe0000000)
23123 {
23124 /* Doubleword load/store: 8-bit offset, scaled by 4. */
23125 if (value >= 0)
23126 newval |= (1 << 23);
23127 else
23128 value = -value;
23129 if (value % 4 != 0)
23130 {
23131 as_bad_where (fixP->fx_file, fixP->fx_line,
23132 _("offset not a multiple of 4"));
23133 break;
23134 }
23135 value /= 4;
23136 if (value > 0xff)
23137 {
23138 as_bad_where (fixP->fx_file, fixP->fx_line,
23139 _("offset out of range"));
23140 break;
23141 }
23142 newval &= ~0xff;
23143 }
23144 else if ((newval & 0x000f0000) == 0x000f0000)
23145 {
23146 /* PC-relative, 12-bit offset. */
23147 if (value >= 0)
23148 newval |= (1 << 23);
23149 else
23150 value = -value;
23151 if (value > 0xfff)
23152 {
23153 as_bad_where (fixP->fx_file, fixP->fx_line,
23154 _("offset out of range"));
23155 break;
23156 }
23157 newval &= ~0xfff;
23158 }
23159 else if ((newval & 0x00000100) == 0x00000100)
23160 {
23161 /* Writeback: 8-bit, +/- offset. */
23162 if (value >= 0)
23163 newval |= (1 << 9);
23164 else
23165 value = -value;
23166 if (value > 0xff)
23167 {
23168 as_bad_where (fixP->fx_file, fixP->fx_line,
23169 _("offset out of range"));
23170 break;
23171 }
23172 newval &= ~0xff;
23173 }
23174 else if ((newval & 0x00000f00) == 0x00000e00)
23175 {
23176 /* T-instruction: positive 8-bit offset. */
23177 if (value < 0 || value > 0xff)
23178 {
23179 as_bad_where (fixP->fx_file, fixP->fx_line,
23180 _("offset out of range"));
23181 break;
23182 }
23183 newval &= ~0xff;
23184 newval |= value;
23185 }
23186 else
23187 {
23188 /* Positive 12-bit or negative 8-bit offset. */
23189 int limit;
23190 if (value >= 0)
23191 {
23192 newval |= (1 << 23);
23193 limit = 0xfff;
23194 }
23195 else
23196 {
23197 value = -value;
23198 limit = 0xff;
23199 }
23200 if (value > limit)
23201 {
23202 as_bad_where (fixP->fx_file, fixP->fx_line,
23203 _("offset out of range"));
23204 break;
23205 }
23206 newval &= ~limit;
23207 }
23208
23209 newval |= value;
23210 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
23211 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
23212 break;
23213
23214 case BFD_RELOC_ARM_SHIFT_IMM:
23215 newval = md_chars_to_number (buf, INSN_SIZE);
23216 if (((unsigned long) value) > 32
23217 || (value == 32
23218 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
23219 {
23220 as_bad_where (fixP->fx_file, fixP->fx_line,
23221 _("shift expression is too large"));
23222 break;
23223 }
23224
23225 if (value == 0)
23226 /* Shifts of zero must be done as lsl. */
23227 newval &= ~0x60;
23228 else if (value == 32)
23229 value = 0;
23230 newval &= 0xfffff07f;
23231 newval |= (value & 0x1f) << 7;
23232 md_number_to_chars (buf, newval, INSN_SIZE);
23233 break;
23234
23235 case BFD_RELOC_ARM_T32_IMMEDIATE:
23236 case BFD_RELOC_ARM_T32_ADD_IMM:
23237 case BFD_RELOC_ARM_T32_IMM12:
23238 case BFD_RELOC_ARM_T32_ADD_PC12:
23239 /* We claim that this fixup has been processed here,
23240 even if in fact we generate an error because we do
23241 not have a reloc for it, so tc_gen_reloc will reject it. */
23242 fixP->fx_done = 1;
23243
23244 if (fixP->fx_addsy
23245 && ! S_IS_DEFINED (fixP->fx_addsy))
23246 {
23247 as_bad_where (fixP->fx_file, fixP->fx_line,
23248 _("undefined symbol %s used as an immediate value"),
23249 S_GET_NAME (fixP->fx_addsy));
23250 break;
23251 }
23252
23253 newval = md_chars_to_number (buf, THUMB_SIZE);
23254 newval <<= 16;
23255 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
23256
23257 newimm = FAIL;
23258 if ((fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
23259 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
23260 Thumb2 modified immediate encoding (T2). */
23261 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
23262 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23263 {
23264 newimm = encode_thumb32_immediate (value);
23265 if (newimm == (unsigned int) FAIL)
23266 newimm = thumb32_negate_data_op (&newval, value);
23267 }
23268 if (newimm == (unsigned int) FAIL)
23269 {
23270 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE)
23271 {
23272 /* Turn add/sum into addw/subw. */
23273 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23274 newval = (newval & 0xfeffffff) | 0x02000000;
23275 /* No flat 12-bit imm encoding for addsw/subsw. */
23276 if ((newval & 0x00100000) == 0)
23277 {
23278 /* 12 bit immediate for addw/subw. */
23279 if (value < 0)
23280 {
23281 value = -value;
23282 newval ^= 0x00a00000;
23283 }
23284 if (value > 0xfff)
23285 newimm = (unsigned int) FAIL;
23286 else
23287 newimm = value;
23288 }
23289 }
23290 else
23291 {
23292 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
23293 UINT16 (T3 encoding), MOVW only accepts UINT16. When
23294 disassembling, MOV is preferred when there is no encoding
23295 overlap.
23296 NOTE: MOV is using ORR opcode under Thumb 2 mode. */
23297 if (((newval >> T2_DATA_OP_SHIFT) & 0xf) == T2_OPCODE_ORR
23298 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
23299 && !((newval >> T2_SBIT_SHIFT) & 0x1)
23300 && value >= 0 && value <=0xffff)
23301 {
23302 /* Toggle bit[25] to change encoding from T2 to T3. */
23303 newval ^= 1 << 25;
23304 /* Clear bits[19:16]. */
23305 newval &= 0xfff0ffff;
23306 /* Encoding high 4bits imm. Code below will encode the
23307 remaining low 12bits. */
23308 newval |= (value & 0x0000f000) << 4;
23309 newimm = value & 0x00000fff;
23310 }
23311 }
23312 }
23313
23314 if (newimm == (unsigned int)FAIL)
23315 {
23316 as_bad_where (fixP->fx_file, fixP->fx_line,
23317 _("invalid constant (%lx) after fixup"),
23318 (unsigned long) value);
23319 break;
23320 }
23321
23322 newval |= (newimm & 0x800) << 15;
23323 newval |= (newimm & 0x700) << 4;
23324 newval |= (newimm & 0x0ff);
23325
23326 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
23327 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
23328 break;
23329
23330 case BFD_RELOC_ARM_SMC:
23331 if (((unsigned long) value) > 0xffff)
23332 as_bad_where (fixP->fx_file, fixP->fx_line,
23333 _("invalid smc expression"));
23334 newval = md_chars_to_number (buf, INSN_SIZE);
23335 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23336 md_number_to_chars (buf, newval, INSN_SIZE);
23337 break;
23338
23339 case BFD_RELOC_ARM_HVC:
23340 if (((unsigned long) value) > 0xffff)
23341 as_bad_where (fixP->fx_file, fixP->fx_line,
23342 _("invalid hvc expression"));
23343 newval = md_chars_to_number (buf, INSN_SIZE);
23344 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23345 md_number_to_chars (buf, newval, INSN_SIZE);
23346 break;
23347
23348 case BFD_RELOC_ARM_SWI:
23349 if (fixP->tc_fix_data != 0)
23350 {
23351 if (((unsigned long) value) > 0xff)
23352 as_bad_where (fixP->fx_file, fixP->fx_line,
23353 _("invalid swi expression"));
23354 newval = md_chars_to_number (buf, THUMB_SIZE);
23355 newval |= value;
23356 md_number_to_chars (buf, newval, THUMB_SIZE);
23357 }
23358 else
23359 {
23360 if (((unsigned long) value) > 0x00ffffff)
23361 as_bad_where (fixP->fx_file, fixP->fx_line,
23362 _("invalid swi expression"));
23363 newval = md_chars_to_number (buf, INSN_SIZE);
23364 newval |= value;
23365 md_number_to_chars (buf, newval, INSN_SIZE);
23366 }
23367 break;
23368
23369 case BFD_RELOC_ARM_MULTI:
23370 if (((unsigned long) value) > 0xffff)
23371 as_bad_where (fixP->fx_file, fixP->fx_line,
23372 _("invalid expression in load/store multiple"));
23373 newval = value | md_chars_to_number (buf, INSN_SIZE);
23374 md_number_to_chars (buf, newval, INSN_SIZE);
23375 break;
23376
23377 #ifdef OBJ_ELF
23378 case BFD_RELOC_ARM_PCREL_CALL:
23379
23380 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23381 && fixP->fx_addsy
23382 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23383 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23384 && THUMB_IS_FUNC (fixP->fx_addsy))
23385 /* Flip the bl to blx. This is a simple flip
23386 bit here because we generate PCREL_CALL for
23387 unconditional bls. */
23388 {
23389 newval = md_chars_to_number (buf, INSN_SIZE);
23390 newval = newval | 0x10000000;
23391 md_number_to_chars (buf, newval, INSN_SIZE);
23392 temp = 1;
23393 fixP->fx_done = 1;
23394 }
23395 else
23396 temp = 3;
23397 goto arm_branch_common;
23398
23399 case BFD_RELOC_ARM_PCREL_JUMP:
23400 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23401 && fixP->fx_addsy
23402 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23403 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23404 && THUMB_IS_FUNC (fixP->fx_addsy))
23405 {
23406 /* This would map to a bl<cond>, b<cond>,
23407 b<always> to a Thumb function. We
23408 need to force a relocation for this particular
23409 case. */
23410 newval = md_chars_to_number (buf, INSN_SIZE);
23411 fixP->fx_done = 0;
23412 }
23413 /* Fall through. */
23414
23415 case BFD_RELOC_ARM_PLT32:
23416 #endif
23417 case BFD_RELOC_ARM_PCREL_BRANCH:
23418 temp = 3;
23419 goto arm_branch_common;
23420
23421 case BFD_RELOC_ARM_PCREL_BLX:
23422
23423 temp = 1;
23424 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23425 && fixP->fx_addsy
23426 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23427 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23428 && ARM_IS_FUNC (fixP->fx_addsy))
23429 {
23430 /* Flip the blx to a bl and warn. */
23431 const char *name = S_GET_NAME (fixP->fx_addsy);
23432 newval = 0xeb000000;
23433 as_warn_where (fixP->fx_file, fixP->fx_line,
23434 _("blx to '%s' an ARM ISA state function changed to bl"),
23435 name);
23436 md_number_to_chars (buf, newval, INSN_SIZE);
23437 temp = 3;
23438 fixP->fx_done = 1;
23439 }
23440
23441 #ifdef OBJ_ELF
23442 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23443 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
23444 #endif
23445
23446 arm_branch_common:
23447 /* We are going to store value (shifted right by two) in the
23448 instruction, in a 24 bit, signed field. Bits 26 through 32 either
23449 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
23450 also be be clear. */
23451 if (value & temp)
23452 as_bad_where (fixP->fx_file, fixP->fx_line,
23453 _("misaligned branch destination"));
23454 if ((value & (offsetT)0xfe000000) != (offsetT)0
23455 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
23456 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23457
23458 if (fixP->fx_done || !seg->use_rela_p)
23459 {
23460 newval = md_chars_to_number (buf, INSN_SIZE);
23461 newval |= (value >> 2) & 0x00ffffff;
23462 /* Set the H bit on BLX instructions. */
23463 if (temp == 1)
23464 {
23465 if (value & 2)
23466 newval |= 0x01000000;
23467 else
23468 newval &= ~0x01000000;
23469 }
23470 md_number_to_chars (buf, newval, INSN_SIZE);
23471 }
23472 break;
23473
23474 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
23475 /* CBZ can only branch forward. */
23476
23477 /* Attempts to use CBZ to branch to the next instruction
23478 (which, strictly speaking, are prohibited) will be turned into
23479 no-ops.
23480
23481 FIXME: It may be better to remove the instruction completely and
23482 perform relaxation. */
23483 if (value == -2)
23484 {
23485 newval = md_chars_to_number (buf, THUMB_SIZE);
23486 newval = 0xbf00; /* NOP encoding T1 */
23487 md_number_to_chars (buf, newval, THUMB_SIZE);
23488 }
23489 else
23490 {
23491 if (value & ~0x7e)
23492 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23493
23494 if (fixP->fx_done || !seg->use_rela_p)
23495 {
23496 newval = md_chars_to_number (buf, THUMB_SIZE);
23497 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
23498 md_number_to_chars (buf, newval, THUMB_SIZE);
23499 }
23500 }
23501 break;
23502
23503 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
23504 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
23505 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23506
23507 if (fixP->fx_done || !seg->use_rela_p)
23508 {
23509 newval = md_chars_to_number (buf, THUMB_SIZE);
23510 newval |= (value & 0x1ff) >> 1;
23511 md_number_to_chars (buf, newval, THUMB_SIZE);
23512 }
23513 break;
23514
23515 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
23516 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
23517 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23518
23519 if (fixP->fx_done || !seg->use_rela_p)
23520 {
23521 newval = md_chars_to_number (buf, THUMB_SIZE);
23522 newval |= (value & 0xfff) >> 1;
23523 md_number_to_chars (buf, newval, THUMB_SIZE);
23524 }
23525 break;
23526
23527 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23528 if (fixP->fx_addsy
23529 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23530 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23531 && ARM_IS_FUNC (fixP->fx_addsy)
23532 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23533 {
23534 /* Force a relocation for a branch 20 bits wide. */
23535 fixP->fx_done = 0;
23536 }
23537 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
23538 as_bad_where (fixP->fx_file, fixP->fx_line,
23539 _("conditional branch out of range"));
23540
23541 if (fixP->fx_done || !seg->use_rela_p)
23542 {
23543 offsetT newval2;
23544 addressT S, J1, J2, lo, hi;
23545
23546 S = (value & 0x00100000) >> 20;
23547 J2 = (value & 0x00080000) >> 19;
23548 J1 = (value & 0x00040000) >> 18;
23549 hi = (value & 0x0003f000) >> 12;
23550 lo = (value & 0x00000ffe) >> 1;
23551
23552 newval = md_chars_to_number (buf, THUMB_SIZE);
23553 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23554 newval |= (S << 10) | hi;
23555 newval2 |= (J1 << 13) | (J2 << 11) | lo;
23556 md_number_to_chars (buf, newval, THUMB_SIZE);
23557 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
23558 }
23559 break;
23560
23561 case BFD_RELOC_THUMB_PCREL_BLX:
23562 /* If there is a blx from a thumb state function to
23563 another thumb function flip this to a bl and warn
23564 about it. */
23565
23566 if (fixP->fx_addsy
23567 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23568 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23569 && THUMB_IS_FUNC (fixP->fx_addsy))
23570 {
23571 const char *name = S_GET_NAME (fixP->fx_addsy);
23572 as_warn_where (fixP->fx_file, fixP->fx_line,
23573 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
23574 name);
23575 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23576 newval = newval | 0x1000;
23577 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23578 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23579 fixP->fx_done = 1;
23580 }
23581
23582
23583 goto thumb_bl_common;
23584
23585 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23586 /* A bl from Thumb state ISA to an internal ARM state function
23587 is converted to a blx. */
23588 if (fixP->fx_addsy
23589 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23590 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23591 && ARM_IS_FUNC (fixP->fx_addsy)
23592 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23593 {
23594 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23595 newval = newval & ~0x1000;
23596 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23597 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
23598 fixP->fx_done = 1;
23599 }
23600
23601 thumb_bl_common:
23602
23603 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23604 /* For a BLX instruction, make sure that the relocation is rounded up
23605 to a word boundary. This follows the semantics of the instruction
23606 which specifies that bit 1 of the target address will come from bit
23607 1 of the base address. */
23608 value = (value + 3) & ~ 3;
23609
23610 #ifdef OBJ_ELF
23611 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
23612 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23613 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23614 #endif
23615
23616 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
23617 {
23618 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
23619 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23620 else if ((value & ~0x1ffffff)
23621 && ((value & ~0x1ffffff) != ~0x1ffffff))
23622 as_bad_where (fixP->fx_file, fixP->fx_line,
23623 _("Thumb2 branch out of range"));
23624 }
23625
23626 if (fixP->fx_done || !seg->use_rela_p)
23627 encode_thumb2_b_bl_offset (buf, value);
23628
23629 break;
23630
23631 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23632 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
23633 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23634
23635 if (fixP->fx_done || !seg->use_rela_p)
23636 encode_thumb2_b_bl_offset (buf, value);
23637
23638 break;
23639
23640 case BFD_RELOC_8:
23641 if (fixP->fx_done || !seg->use_rela_p)
23642 *buf = value;
23643 break;
23644
23645 case BFD_RELOC_16:
23646 if (fixP->fx_done || !seg->use_rela_p)
23647 md_number_to_chars (buf, value, 2);
23648 break;
23649
23650 #ifdef OBJ_ELF
23651 case BFD_RELOC_ARM_TLS_CALL:
23652 case BFD_RELOC_ARM_THM_TLS_CALL:
23653 case BFD_RELOC_ARM_TLS_DESCSEQ:
23654 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23655 case BFD_RELOC_ARM_TLS_GOTDESC:
23656 case BFD_RELOC_ARM_TLS_GD32:
23657 case BFD_RELOC_ARM_TLS_LE32:
23658 case BFD_RELOC_ARM_TLS_IE32:
23659 case BFD_RELOC_ARM_TLS_LDM32:
23660 case BFD_RELOC_ARM_TLS_LDO32:
23661 S_SET_THREAD_LOCAL (fixP->fx_addsy);
23662 break;
23663
23664 case BFD_RELOC_ARM_GOT32:
23665 case BFD_RELOC_ARM_GOTOFF:
23666 break;
23667
23668 case BFD_RELOC_ARM_GOT_PREL:
23669 if (fixP->fx_done || !seg->use_rela_p)
23670 md_number_to_chars (buf, value, 4);
23671 break;
23672
23673 case BFD_RELOC_ARM_TARGET2:
23674 /* TARGET2 is not partial-inplace, so we need to write the
23675 addend here for REL targets, because it won't be written out
23676 during reloc processing later. */
23677 if (fixP->fx_done || !seg->use_rela_p)
23678 md_number_to_chars (buf, fixP->fx_offset, 4);
23679 break;
23680 #endif
23681
23682 case BFD_RELOC_RVA:
23683 case BFD_RELOC_32:
23684 case BFD_RELOC_ARM_TARGET1:
23685 case BFD_RELOC_ARM_ROSEGREL32:
23686 case BFD_RELOC_ARM_SBREL32:
23687 case BFD_RELOC_32_PCREL:
23688 #ifdef TE_PE
23689 case BFD_RELOC_32_SECREL:
23690 #endif
23691 if (fixP->fx_done || !seg->use_rela_p)
23692 #ifdef TE_WINCE
23693 /* For WinCE we only do this for pcrel fixups. */
23694 if (fixP->fx_done || fixP->fx_pcrel)
23695 #endif
23696 md_number_to_chars (buf, value, 4);
23697 break;
23698
23699 #ifdef OBJ_ELF
23700 case BFD_RELOC_ARM_PREL31:
23701 if (fixP->fx_done || !seg->use_rela_p)
23702 {
23703 newval = md_chars_to_number (buf, 4) & 0x80000000;
23704 if ((value ^ (value >> 1)) & 0x40000000)
23705 {
23706 as_bad_where (fixP->fx_file, fixP->fx_line,
23707 _("rel31 relocation overflow"));
23708 }
23709 newval |= value & 0x7fffffff;
23710 md_number_to_chars (buf, newval, 4);
23711 }
23712 break;
23713 #endif
23714
23715 case BFD_RELOC_ARM_CP_OFF_IMM:
23716 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
23717 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
23718 newval = md_chars_to_number (buf, INSN_SIZE);
23719 else
23720 newval = get_thumb32_insn (buf);
23721 if ((newval & 0x0f200f00) == 0x0d000900)
23722 {
23723 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
23724 has permitted values that are multiples of 2, in the range 0
23725 to 510. */
23726 if (value < -510 || value > 510 || (value & 1))
23727 as_bad_where (fixP->fx_file, fixP->fx_line,
23728 _("co-processor offset out of range"));
23729 }
23730 else if (value < -1023 || value > 1023 || (value & 3))
23731 as_bad_where (fixP->fx_file, fixP->fx_line,
23732 _("co-processor offset out of range"));
23733 cp_off_common:
23734 sign = value > 0;
23735 if (value < 0)
23736 value = -value;
23737 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23738 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23739 newval = md_chars_to_number (buf, INSN_SIZE);
23740 else
23741 newval = get_thumb32_insn (buf);
23742 if (value == 0)
23743 newval &= 0xffffff00;
23744 else
23745 {
23746 newval &= 0xff7fff00;
23747 if ((newval & 0x0f200f00) == 0x0d000900)
23748 {
23749 /* This is a fp16 vstr/vldr.
23750
23751 It requires the immediate offset in the instruction is shifted
23752 left by 1 to be a half-word offset.
23753
23754 Here, left shift by 1 first, and later right shift by 2
23755 should get the right offset. */
23756 value <<= 1;
23757 }
23758 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
23759 }
23760 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23761 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23762 md_number_to_chars (buf, newval, INSN_SIZE);
23763 else
23764 put_thumb32_insn (buf, newval);
23765 break;
23766
23767 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
23768 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
23769 if (value < -255 || value > 255)
23770 as_bad_where (fixP->fx_file, fixP->fx_line,
23771 _("co-processor offset out of range"));
23772 value *= 4;
23773 goto cp_off_common;
23774
23775 case BFD_RELOC_ARM_THUMB_OFFSET:
23776 newval = md_chars_to_number (buf, THUMB_SIZE);
23777 /* Exactly what ranges, and where the offset is inserted depends
23778 on the type of instruction, we can establish this from the
23779 top 4 bits. */
23780 switch (newval >> 12)
23781 {
23782 case 4: /* PC load. */
23783 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
23784 forced to zero for these loads; md_pcrel_from has already
23785 compensated for this. */
23786 if (value & 3)
23787 as_bad_where (fixP->fx_file, fixP->fx_line,
23788 _("invalid offset, target not word aligned (0x%08lX)"),
23789 (((unsigned long) fixP->fx_frag->fr_address
23790 + (unsigned long) fixP->fx_where) & ~3)
23791 + (unsigned long) value);
23792
23793 if (value & ~0x3fc)
23794 as_bad_where (fixP->fx_file, fixP->fx_line,
23795 _("invalid offset, value too big (0x%08lX)"),
23796 (long) value);
23797
23798 newval |= value >> 2;
23799 break;
23800
23801 case 9: /* SP load/store. */
23802 if (value & ~0x3fc)
23803 as_bad_where (fixP->fx_file, fixP->fx_line,
23804 _("invalid offset, value too big (0x%08lX)"),
23805 (long) value);
23806 newval |= value >> 2;
23807 break;
23808
23809 case 6: /* Word load/store. */
23810 if (value & ~0x7c)
23811 as_bad_where (fixP->fx_file, fixP->fx_line,
23812 _("invalid offset, value too big (0x%08lX)"),
23813 (long) value);
23814 newval |= value << 4; /* 6 - 2. */
23815 break;
23816
23817 case 7: /* Byte load/store. */
23818 if (value & ~0x1f)
23819 as_bad_where (fixP->fx_file, fixP->fx_line,
23820 _("invalid offset, value too big (0x%08lX)"),
23821 (long) value);
23822 newval |= value << 6;
23823 break;
23824
23825 case 8: /* Halfword load/store. */
23826 if (value & ~0x3e)
23827 as_bad_where (fixP->fx_file, fixP->fx_line,
23828 _("invalid offset, value too big (0x%08lX)"),
23829 (long) value);
23830 newval |= value << 5; /* 6 - 1. */
23831 break;
23832
23833 default:
23834 as_bad_where (fixP->fx_file, fixP->fx_line,
23835 "Unable to process relocation for thumb opcode: %lx",
23836 (unsigned long) newval);
23837 break;
23838 }
23839 md_number_to_chars (buf, newval, THUMB_SIZE);
23840 break;
23841
23842 case BFD_RELOC_ARM_THUMB_ADD:
23843 /* This is a complicated relocation, since we use it for all of
23844 the following immediate relocations:
23845
23846 3bit ADD/SUB
23847 8bit ADD/SUB
23848 9bit ADD/SUB SP word-aligned
23849 10bit ADD PC/SP word-aligned
23850
23851 The type of instruction being processed is encoded in the
23852 instruction field:
23853
23854 0x8000 SUB
23855 0x00F0 Rd
23856 0x000F Rs
23857 */
23858 newval = md_chars_to_number (buf, THUMB_SIZE);
23859 {
23860 int rd = (newval >> 4) & 0xf;
23861 int rs = newval & 0xf;
23862 int subtract = !!(newval & 0x8000);
23863
23864 /* Check for HI regs, only very restricted cases allowed:
23865 Adjusting SP, and using PC or SP to get an address. */
23866 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
23867 || (rs > 7 && rs != REG_SP && rs != REG_PC))
23868 as_bad_where (fixP->fx_file, fixP->fx_line,
23869 _("invalid Hi register with immediate"));
23870
23871 /* If value is negative, choose the opposite instruction. */
23872 if (value < 0)
23873 {
23874 value = -value;
23875 subtract = !subtract;
23876 if (value < 0)
23877 as_bad_where (fixP->fx_file, fixP->fx_line,
23878 _("immediate value out of range"));
23879 }
23880
23881 if (rd == REG_SP)
23882 {
23883 if (value & ~0x1fc)
23884 as_bad_where (fixP->fx_file, fixP->fx_line,
23885 _("invalid immediate for stack address calculation"));
23886 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
23887 newval |= value >> 2;
23888 }
23889 else if (rs == REG_PC || rs == REG_SP)
23890 {
23891 /* PR gas/18541. If the addition is for a defined symbol
23892 within range of an ADR instruction then accept it. */
23893 if (subtract
23894 && value == 4
23895 && fixP->fx_addsy != NULL)
23896 {
23897 subtract = 0;
23898
23899 if (! S_IS_DEFINED (fixP->fx_addsy)
23900 || S_GET_SEGMENT (fixP->fx_addsy) != seg
23901 || S_IS_WEAK (fixP->fx_addsy))
23902 {
23903 as_bad_where (fixP->fx_file, fixP->fx_line,
23904 _("address calculation needs a strongly defined nearby symbol"));
23905 }
23906 else
23907 {
23908 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
23909
23910 /* Round up to the next 4-byte boundary. */
23911 if (v & 3)
23912 v = (v + 3) & ~ 3;
23913 else
23914 v += 4;
23915 v = S_GET_VALUE (fixP->fx_addsy) - v;
23916
23917 if (v & ~0x3fc)
23918 {
23919 as_bad_where (fixP->fx_file, fixP->fx_line,
23920 _("symbol too far away"));
23921 }
23922 else
23923 {
23924 fixP->fx_done = 1;
23925 value = v;
23926 }
23927 }
23928 }
23929
23930 if (subtract || value & ~0x3fc)
23931 as_bad_where (fixP->fx_file, fixP->fx_line,
23932 _("invalid immediate for address calculation (value = 0x%08lX)"),
23933 (unsigned long) (subtract ? - value : value));
23934 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
23935 newval |= rd << 8;
23936 newval |= value >> 2;
23937 }
23938 else if (rs == rd)
23939 {
23940 if (value & ~0xff)
23941 as_bad_where (fixP->fx_file, fixP->fx_line,
23942 _("immediate value out of range"));
23943 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
23944 newval |= (rd << 8) | value;
23945 }
23946 else
23947 {
23948 if (value & ~0x7)
23949 as_bad_where (fixP->fx_file, fixP->fx_line,
23950 _("immediate value out of range"));
23951 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
23952 newval |= rd | (rs << 3) | (value << 6);
23953 }
23954 }
23955 md_number_to_chars (buf, newval, THUMB_SIZE);
23956 break;
23957
23958 case BFD_RELOC_ARM_THUMB_IMM:
23959 newval = md_chars_to_number (buf, THUMB_SIZE);
23960 if (value < 0 || value > 255)
23961 as_bad_where (fixP->fx_file, fixP->fx_line,
23962 _("invalid immediate: %ld is out of range"),
23963 (long) value);
23964 newval |= value;
23965 md_number_to_chars (buf, newval, THUMB_SIZE);
23966 break;
23967
23968 case BFD_RELOC_ARM_THUMB_SHIFT:
23969 /* 5bit shift value (0..32). LSL cannot take 32. */
23970 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
23971 temp = newval & 0xf800;
23972 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
23973 as_bad_where (fixP->fx_file, fixP->fx_line,
23974 _("invalid shift value: %ld"), (long) value);
23975 /* Shifts of zero must be encoded as LSL. */
23976 if (value == 0)
23977 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
23978 /* Shifts of 32 are encoded as zero. */
23979 else if (value == 32)
23980 value = 0;
23981 newval |= value << 6;
23982 md_number_to_chars (buf, newval, THUMB_SIZE);
23983 break;
23984
23985 case BFD_RELOC_VTABLE_INHERIT:
23986 case BFD_RELOC_VTABLE_ENTRY:
23987 fixP->fx_done = 0;
23988 return;
23989
23990 case BFD_RELOC_ARM_MOVW:
23991 case BFD_RELOC_ARM_MOVT:
23992 case BFD_RELOC_ARM_THUMB_MOVW:
23993 case BFD_RELOC_ARM_THUMB_MOVT:
23994 if (fixP->fx_done || !seg->use_rela_p)
23995 {
23996 /* REL format relocations are limited to a 16-bit addend. */
23997 if (!fixP->fx_done)
23998 {
23999 if (value < -0x8000 || value > 0x7fff)
24000 as_bad_where (fixP->fx_file, fixP->fx_line,
24001 _("offset out of range"));
24002 }
24003 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
24004 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
24005 {
24006 value >>= 16;
24007 }
24008
24009 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
24010 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
24011 {
24012 newval = get_thumb32_insn (buf);
24013 newval &= 0xfbf08f00;
24014 newval |= (value & 0xf000) << 4;
24015 newval |= (value & 0x0800) << 15;
24016 newval |= (value & 0x0700) << 4;
24017 newval |= (value & 0x00ff);
24018 put_thumb32_insn (buf, newval);
24019 }
24020 else
24021 {
24022 newval = md_chars_to_number (buf, 4);
24023 newval &= 0xfff0f000;
24024 newval |= value & 0x0fff;
24025 newval |= (value & 0xf000) << 4;
24026 md_number_to_chars (buf, newval, 4);
24027 }
24028 }
24029 return;
24030
24031 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
24032 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
24033 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
24034 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
24035 gas_assert (!fixP->fx_done);
24036 {
24037 bfd_vma insn;
24038 bfd_boolean is_mov;
24039 bfd_vma encoded_addend = value;
24040
24041 /* Check that addend can be encoded in instruction. */
24042 if (!seg->use_rela_p && (value < 0 || value > 255))
24043 as_bad_where (fixP->fx_file, fixP->fx_line,
24044 _("the offset 0x%08lX is not representable"),
24045 (unsigned long) encoded_addend);
24046
24047 /* Extract the instruction. */
24048 insn = md_chars_to_number (buf, THUMB_SIZE);
24049 is_mov = (insn & 0xf800) == 0x2000;
24050
24051 /* Encode insn. */
24052 if (is_mov)
24053 {
24054 if (!seg->use_rela_p)
24055 insn |= encoded_addend;
24056 }
24057 else
24058 {
24059 int rd, rs;
24060
24061 /* Extract the instruction. */
24062 /* Encoding is the following
24063 0x8000 SUB
24064 0x00F0 Rd
24065 0x000F Rs
24066 */
24067 /* The following conditions must be true :
24068 - ADD
24069 - Rd == Rs
24070 - Rd <= 7
24071 */
24072 rd = (insn >> 4) & 0xf;
24073 rs = insn & 0xf;
24074 if ((insn & 0x8000) || (rd != rs) || rd > 7)
24075 as_bad_where (fixP->fx_file, fixP->fx_line,
24076 _("Unable to process relocation for thumb opcode: %lx"),
24077 (unsigned long) insn);
24078
24079 /* Encode as ADD immediate8 thumb 1 code. */
24080 insn = 0x3000 | (rd << 8);
24081
24082 /* Place the encoded addend into the first 8 bits of the
24083 instruction. */
24084 if (!seg->use_rela_p)
24085 insn |= encoded_addend;
24086 }
24087
24088 /* Update the instruction. */
24089 md_number_to_chars (buf, insn, THUMB_SIZE);
24090 }
24091 break;
24092
24093 case BFD_RELOC_ARM_ALU_PC_G0_NC:
24094 case BFD_RELOC_ARM_ALU_PC_G0:
24095 case BFD_RELOC_ARM_ALU_PC_G1_NC:
24096 case BFD_RELOC_ARM_ALU_PC_G1:
24097 case BFD_RELOC_ARM_ALU_PC_G2:
24098 case BFD_RELOC_ARM_ALU_SB_G0_NC:
24099 case BFD_RELOC_ARM_ALU_SB_G0:
24100 case BFD_RELOC_ARM_ALU_SB_G1_NC:
24101 case BFD_RELOC_ARM_ALU_SB_G1:
24102 case BFD_RELOC_ARM_ALU_SB_G2:
24103 gas_assert (!fixP->fx_done);
24104 if (!seg->use_rela_p)
24105 {
24106 bfd_vma insn;
24107 bfd_vma encoded_addend;
24108 bfd_vma addend_abs = abs (value);
24109
24110 /* Check that the absolute value of the addend can be
24111 expressed as an 8-bit constant plus a rotation. */
24112 encoded_addend = encode_arm_immediate (addend_abs);
24113 if (encoded_addend == (unsigned int) FAIL)
24114 as_bad_where (fixP->fx_file, fixP->fx_line,
24115 _("the offset 0x%08lX is not representable"),
24116 (unsigned long) addend_abs);
24117
24118 /* Extract the instruction. */
24119 insn = md_chars_to_number (buf, INSN_SIZE);
24120
24121 /* If the addend is positive, use an ADD instruction.
24122 Otherwise use a SUB. Take care not to destroy the S bit. */
24123 insn &= 0xff1fffff;
24124 if (value < 0)
24125 insn |= 1 << 22;
24126 else
24127 insn |= 1 << 23;
24128
24129 /* Place the encoded addend into the first 12 bits of the
24130 instruction. */
24131 insn &= 0xfffff000;
24132 insn |= encoded_addend;
24133
24134 /* Update the instruction. */
24135 md_number_to_chars (buf, insn, INSN_SIZE);
24136 }
24137 break;
24138
24139 case BFD_RELOC_ARM_LDR_PC_G0:
24140 case BFD_RELOC_ARM_LDR_PC_G1:
24141 case BFD_RELOC_ARM_LDR_PC_G2:
24142 case BFD_RELOC_ARM_LDR_SB_G0:
24143 case BFD_RELOC_ARM_LDR_SB_G1:
24144 case BFD_RELOC_ARM_LDR_SB_G2:
24145 gas_assert (!fixP->fx_done);
24146 if (!seg->use_rela_p)
24147 {
24148 bfd_vma insn;
24149 bfd_vma addend_abs = abs (value);
24150
24151 /* Check that the absolute value of the addend can be
24152 encoded in 12 bits. */
24153 if (addend_abs >= 0x1000)
24154 as_bad_where (fixP->fx_file, fixP->fx_line,
24155 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
24156 (unsigned long) addend_abs);
24157
24158 /* Extract the instruction. */
24159 insn = md_chars_to_number (buf, INSN_SIZE);
24160
24161 /* If the addend is negative, clear bit 23 of the instruction.
24162 Otherwise set it. */
24163 if (value < 0)
24164 insn &= ~(1 << 23);
24165 else
24166 insn |= 1 << 23;
24167
24168 /* Place the absolute value of the addend into the first 12 bits
24169 of the instruction. */
24170 insn &= 0xfffff000;
24171 insn |= addend_abs;
24172
24173 /* Update the instruction. */
24174 md_number_to_chars (buf, insn, INSN_SIZE);
24175 }
24176 break;
24177
24178 case BFD_RELOC_ARM_LDRS_PC_G0:
24179 case BFD_RELOC_ARM_LDRS_PC_G1:
24180 case BFD_RELOC_ARM_LDRS_PC_G2:
24181 case BFD_RELOC_ARM_LDRS_SB_G0:
24182 case BFD_RELOC_ARM_LDRS_SB_G1:
24183 case BFD_RELOC_ARM_LDRS_SB_G2:
24184 gas_assert (!fixP->fx_done);
24185 if (!seg->use_rela_p)
24186 {
24187 bfd_vma insn;
24188 bfd_vma addend_abs = abs (value);
24189
24190 /* Check that the absolute value of the addend can be
24191 encoded in 8 bits. */
24192 if (addend_abs >= 0x100)
24193 as_bad_where (fixP->fx_file, fixP->fx_line,
24194 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
24195 (unsigned long) addend_abs);
24196
24197 /* Extract the instruction. */
24198 insn = md_chars_to_number (buf, INSN_SIZE);
24199
24200 /* If the addend is negative, clear bit 23 of the instruction.
24201 Otherwise set it. */
24202 if (value < 0)
24203 insn &= ~(1 << 23);
24204 else
24205 insn |= 1 << 23;
24206
24207 /* Place the first four bits of the absolute value of the addend
24208 into the first 4 bits of the instruction, and the remaining
24209 four into bits 8 .. 11. */
24210 insn &= 0xfffff0f0;
24211 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
24212
24213 /* Update the instruction. */
24214 md_number_to_chars (buf, insn, INSN_SIZE);
24215 }
24216 break;
24217
24218 case BFD_RELOC_ARM_LDC_PC_G0:
24219 case BFD_RELOC_ARM_LDC_PC_G1:
24220 case BFD_RELOC_ARM_LDC_PC_G2:
24221 case BFD_RELOC_ARM_LDC_SB_G0:
24222 case BFD_RELOC_ARM_LDC_SB_G1:
24223 case BFD_RELOC_ARM_LDC_SB_G2:
24224 gas_assert (!fixP->fx_done);
24225 if (!seg->use_rela_p)
24226 {
24227 bfd_vma insn;
24228 bfd_vma addend_abs = abs (value);
24229
24230 /* Check that the absolute value of the addend is a multiple of
24231 four and, when divided by four, fits in 8 bits. */
24232 if (addend_abs & 0x3)
24233 as_bad_where (fixP->fx_file, fixP->fx_line,
24234 _("bad offset 0x%08lX (must be word-aligned)"),
24235 (unsigned long) addend_abs);
24236
24237 if ((addend_abs >> 2) > 0xff)
24238 as_bad_where (fixP->fx_file, fixP->fx_line,
24239 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
24240 (unsigned long) addend_abs);
24241
24242 /* Extract the instruction. */
24243 insn = md_chars_to_number (buf, INSN_SIZE);
24244
24245 /* If the addend is negative, clear bit 23 of the instruction.
24246 Otherwise set it. */
24247 if (value < 0)
24248 insn &= ~(1 << 23);
24249 else
24250 insn |= 1 << 23;
24251
24252 /* Place the addend (divided by four) into the first eight
24253 bits of the instruction. */
24254 insn &= 0xfffffff0;
24255 insn |= addend_abs >> 2;
24256
24257 /* Update the instruction. */
24258 md_number_to_chars (buf, insn, INSN_SIZE);
24259 }
24260 break;
24261
24262 case BFD_RELOC_ARM_V4BX:
24263 /* This will need to go in the object file. */
24264 fixP->fx_done = 0;
24265 break;
24266
24267 case BFD_RELOC_UNUSED:
24268 default:
24269 as_bad_where (fixP->fx_file, fixP->fx_line,
24270 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
24271 }
24272 }
24273
24274 /* Translate internal representation of relocation info to BFD target
24275 format. */
24276
24277 arelent *
24278 tc_gen_reloc (asection *section, fixS *fixp)
24279 {
24280 arelent * reloc;
24281 bfd_reloc_code_real_type code;
24282
24283 reloc = XNEW (arelent);
24284
24285 reloc->sym_ptr_ptr = XNEW (asymbol *);
24286 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
24287 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
24288
24289 if (fixp->fx_pcrel)
24290 {
24291 if (section->use_rela_p)
24292 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
24293 else
24294 fixp->fx_offset = reloc->address;
24295 }
24296 reloc->addend = fixp->fx_offset;
24297
24298 switch (fixp->fx_r_type)
24299 {
24300 case BFD_RELOC_8:
24301 if (fixp->fx_pcrel)
24302 {
24303 code = BFD_RELOC_8_PCREL;
24304 break;
24305 }
24306 /* Fall through. */
24307
24308 case BFD_RELOC_16:
24309 if (fixp->fx_pcrel)
24310 {
24311 code = BFD_RELOC_16_PCREL;
24312 break;
24313 }
24314 /* Fall through. */
24315
24316 case BFD_RELOC_32:
24317 if (fixp->fx_pcrel)
24318 {
24319 code = BFD_RELOC_32_PCREL;
24320 break;
24321 }
24322 /* Fall through. */
24323
24324 case BFD_RELOC_ARM_MOVW:
24325 if (fixp->fx_pcrel)
24326 {
24327 code = BFD_RELOC_ARM_MOVW_PCREL;
24328 break;
24329 }
24330 /* Fall through. */
24331
24332 case BFD_RELOC_ARM_MOVT:
24333 if (fixp->fx_pcrel)
24334 {
24335 code = BFD_RELOC_ARM_MOVT_PCREL;
24336 break;
24337 }
24338 /* Fall through. */
24339
24340 case BFD_RELOC_ARM_THUMB_MOVW:
24341 if (fixp->fx_pcrel)
24342 {
24343 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
24344 break;
24345 }
24346 /* Fall through. */
24347
24348 case BFD_RELOC_ARM_THUMB_MOVT:
24349 if (fixp->fx_pcrel)
24350 {
24351 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
24352 break;
24353 }
24354 /* Fall through. */
24355
24356 case BFD_RELOC_NONE:
24357 case BFD_RELOC_ARM_PCREL_BRANCH:
24358 case BFD_RELOC_ARM_PCREL_BLX:
24359 case BFD_RELOC_RVA:
24360 case BFD_RELOC_THUMB_PCREL_BRANCH7:
24361 case BFD_RELOC_THUMB_PCREL_BRANCH9:
24362 case BFD_RELOC_THUMB_PCREL_BRANCH12:
24363 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24364 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24365 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24366 case BFD_RELOC_VTABLE_ENTRY:
24367 case BFD_RELOC_VTABLE_INHERIT:
24368 #ifdef TE_PE
24369 case BFD_RELOC_32_SECREL:
24370 #endif
24371 code = fixp->fx_r_type;
24372 break;
24373
24374 case BFD_RELOC_THUMB_PCREL_BLX:
24375 #ifdef OBJ_ELF
24376 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
24377 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
24378 else
24379 #endif
24380 code = BFD_RELOC_THUMB_PCREL_BLX;
24381 break;
24382
24383 case BFD_RELOC_ARM_LITERAL:
24384 case BFD_RELOC_ARM_HWLITERAL:
24385 /* If this is called then the a literal has
24386 been referenced across a section boundary. */
24387 as_bad_where (fixp->fx_file, fixp->fx_line,
24388 _("literal referenced across section boundary"));
24389 return NULL;
24390
24391 #ifdef OBJ_ELF
24392 case BFD_RELOC_ARM_TLS_CALL:
24393 case BFD_RELOC_ARM_THM_TLS_CALL:
24394 case BFD_RELOC_ARM_TLS_DESCSEQ:
24395 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
24396 case BFD_RELOC_ARM_GOT32:
24397 case BFD_RELOC_ARM_GOTOFF:
24398 case BFD_RELOC_ARM_GOT_PREL:
24399 case BFD_RELOC_ARM_PLT32:
24400 case BFD_RELOC_ARM_TARGET1:
24401 case BFD_RELOC_ARM_ROSEGREL32:
24402 case BFD_RELOC_ARM_SBREL32:
24403 case BFD_RELOC_ARM_PREL31:
24404 case BFD_RELOC_ARM_TARGET2:
24405 case BFD_RELOC_ARM_TLS_LDO32:
24406 case BFD_RELOC_ARM_PCREL_CALL:
24407 case BFD_RELOC_ARM_PCREL_JUMP:
24408 case BFD_RELOC_ARM_ALU_PC_G0_NC:
24409 case BFD_RELOC_ARM_ALU_PC_G0:
24410 case BFD_RELOC_ARM_ALU_PC_G1_NC:
24411 case BFD_RELOC_ARM_ALU_PC_G1:
24412 case BFD_RELOC_ARM_ALU_PC_G2:
24413 case BFD_RELOC_ARM_LDR_PC_G0:
24414 case BFD_RELOC_ARM_LDR_PC_G1:
24415 case BFD_RELOC_ARM_LDR_PC_G2:
24416 case BFD_RELOC_ARM_LDRS_PC_G0:
24417 case BFD_RELOC_ARM_LDRS_PC_G1:
24418 case BFD_RELOC_ARM_LDRS_PC_G2:
24419 case BFD_RELOC_ARM_LDC_PC_G0:
24420 case BFD_RELOC_ARM_LDC_PC_G1:
24421 case BFD_RELOC_ARM_LDC_PC_G2:
24422 case BFD_RELOC_ARM_ALU_SB_G0_NC:
24423 case BFD_RELOC_ARM_ALU_SB_G0:
24424 case BFD_RELOC_ARM_ALU_SB_G1_NC:
24425 case BFD_RELOC_ARM_ALU_SB_G1:
24426 case BFD_RELOC_ARM_ALU_SB_G2:
24427 case BFD_RELOC_ARM_LDR_SB_G0:
24428 case BFD_RELOC_ARM_LDR_SB_G1:
24429 case BFD_RELOC_ARM_LDR_SB_G2:
24430 case BFD_RELOC_ARM_LDRS_SB_G0:
24431 case BFD_RELOC_ARM_LDRS_SB_G1:
24432 case BFD_RELOC_ARM_LDRS_SB_G2:
24433 case BFD_RELOC_ARM_LDC_SB_G0:
24434 case BFD_RELOC_ARM_LDC_SB_G1:
24435 case BFD_RELOC_ARM_LDC_SB_G2:
24436 case BFD_RELOC_ARM_V4BX:
24437 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
24438 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
24439 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
24440 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
24441 code = fixp->fx_r_type;
24442 break;
24443
24444 case BFD_RELOC_ARM_TLS_GOTDESC:
24445 case BFD_RELOC_ARM_TLS_GD32:
24446 case BFD_RELOC_ARM_TLS_LE32:
24447 case BFD_RELOC_ARM_TLS_IE32:
24448 case BFD_RELOC_ARM_TLS_LDM32:
24449 /* BFD will include the symbol's address in the addend.
24450 But we don't want that, so subtract it out again here. */
24451 if (!S_IS_COMMON (fixp->fx_addsy))
24452 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
24453 code = fixp->fx_r_type;
24454 break;
24455 #endif
24456
24457 case BFD_RELOC_ARM_IMMEDIATE:
24458 as_bad_where (fixp->fx_file, fixp->fx_line,
24459 _("internal relocation (type: IMMEDIATE) not fixed up"));
24460 return NULL;
24461
24462 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
24463 as_bad_where (fixp->fx_file, fixp->fx_line,
24464 _("ADRL used for a symbol not defined in the same file"));
24465 return NULL;
24466
24467 case BFD_RELOC_ARM_OFFSET_IMM:
24468 if (section->use_rela_p)
24469 {
24470 code = fixp->fx_r_type;
24471 break;
24472 }
24473
24474 if (fixp->fx_addsy != NULL
24475 && !S_IS_DEFINED (fixp->fx_addsy)
24476 && S_IS_LOCAL (fixp->fx_addsy))
24477 {
24478 as_bad_where (fixp->fx_file, fixp->fx_line,
24479 _("undefined local label `%s'"),
24480 S_GET_NAME (fixp->fx_addsy));
24481 return NULL;
24482 }
24483
24484 as_bad_where (fixp->fx_file, fixp->fx_line,
24485 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
24486 return NULL;
24487
24488 default:
24489 {
24490 const char * type;
24491
24492 switch (fixp->fx_r_type)
24493 {
24494 case BFD_RELOC_NONE: type = "NONE"; break;
24495 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
24496 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
24497 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
24498 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
24499 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
24500 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
24501 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
24502 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
24503 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
24504 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
24505 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
24506 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
24507 default: type = _("<unknown>"); break;
24508 }
24509 as_bad_where (fixp->fx_file, fixp->fx_line,
24510 _("cannot represent %s relocation in this object file format"),
24511 type);
24512 return NULL;
24513 }
24514 }
24515
24516 #ifdef OBJ_ELF
24517 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
24518 && GOT_symbol
24519 && fixp->fx_addsy == GOT_symbol)
24520 {
24521 code = BFD_RELOC_ARM_GOTPC;
24522 reloc->addend = fixp->fx_offset = reloc->address;
24523 }
24524 #endif
24525
24526 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
24527
24528 if (reloc->howto == NULL)
24529 {
24530 as_bad_where (fixp->fx_file, fixp->fx_line,
24531 _("cannot represent %s relocation in this object file format"),
24532 bfd_get_reloc_code_name (code));
24533 return NULL;
24534 }
24535
24536 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
24537 vtable entry to be used in the relocation's section offset. */
24538 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24539 reloc->address = fixp->fx_offset;
24540
24541 return reloc;
24542 }
24543
24544 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
24545
24546 void
24547 cons_fix_new_arm (fragS * frag,
24548 int where,
24549 int size,
24550 expressionS * exp,
24551 bfd_reloc_code_real_type reloc)
24552 {
24553 int pcrel = 0;
24554
24555 /* Pick a reloc.
24556 FIXME: @@ Should look at CPU word size. */
24557 switch (size)
24558 {
24559 case 1:
24560 reloc = BFD_RELOC_8;
24561 break;
24562 case 2:
24563 reloc = BFD_RELOC_16;
24564 break;
24565 case 4:
24566 default:
24567 reloc = BFD_RELOC_32;
24568 break;
24569 case 8:
24570 reloc = BFD_RELOC_64;
24571 break;
24572 }
24573
24574 #ifdef TE_PE
24575 if (exp->X_op == O_secrel)
24576 {
24577 exp->X_op = O_symbol;
24578 reloc = BFD_RELOC_32_SECREL;
24579 }
24580 #endif
24581
24582 fix_new_exp (frag, where, size, exp, pcrel, reloc);
24583 }
24584
24585 #if defined (OBJ_COFF)
24586 void
24587 arm_validate_fix (fixS * fixP)
24588 {
24589 /* If the destination of the branch is a defined symbol which does not have
24590 the THUMB_FUNC attribute, then we must be calling a function which has
24591 the (interfacearm) attribute. We look for the Thumb entry point to that
24592 function and change the branch to refer to that function instead. */
24593 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
24594 && fixP->fx_addsy != NULL
24595 && S_IS_DEFINED (fixP->fx_addsy)
24596 && ! THUMB_IS_FUNC (fixP->fx_addsy))
24597 {
24598 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
24599 }
24600 }
24601 #endif
24602
24603
24604 int
24605 arm_force_relocation (struct fix * fixp)
24606 {
24607 #if defined (OBJ_COFF) && defined (TE_PE)
24608 if (fixp->fx_r_type == BFD_RELOC_RVA)
24609 return 1;
24610 #endif
24611
24612 /* In case we have a call or a branch to a function in ARM ISA mode from
24613 a thumb function or vice-versa force the relocation. These relocations
24614 are cleared off for some cores that might have blx and simple transformations
24615 are possible. */
24616
24617 #ifdef OBJ_ELF
24618 switch (fixp->fx_r_type)
24619 {
24620 case BFD_RELOC_ARM_PCREL_JUMP:
24621 case BFD_RELOC_ARM_PCREL_CALL:
24622 case BFD_RELOC_THUMB_PCREL_BLX:
24623 if (THUMB_IS_FUNC (fixp->fx_addsy))
24624 return 1;
24625 break;
24626
24627 case BFD_RELOC_ARM_PCREL_BLX:
24628 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24629 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24630 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24631 if (ARM_IS_FUNC (fixp->fx_addsy))
24632 return 1;
24633 break;
24634
24635 default:
24636 break;
24637 }
24638 #endif
24639
24640 /* Resolve these relocations even if the symbol is extern or weak.
24641 Technically this is probably wrong due to symbol preemption.
24642 In practice these relocations do not have enough range to be useful
24643 at dynamic link time, and some code (e.g. in the Linux kernel)
24644 expects these references to be resolved. */
24645 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
24646 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
24647 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
24648 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
24649 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24650 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
24651 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
24652 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
24653 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
24654 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
24655 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
24656 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
24657 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
24658 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
24659 return 0;
24660
24661 /* Always leave these relocations for the linker. */
24662 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24663 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24664 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24665 return 1;
24666
24667 /* Always generate relocations against function symbols. */
24668 if (fixp->fx_r_type == BFD_RELOC_32
24669 && fixp->fx_addsy
24670 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
24671 return 1;
24672
24673 return generic_force_reloc (fixp);
24674 }
24675
24676 #if defined (OBJ_ELF) || defined (OBJ_COFF)
24677 /* Relocations against function names must be left unadjusted,
24678 so that the linker can use this information to generate interworking
24679 stubs. The MIPS version of this function
24680 also prevents relocations that are mips-16 specific, but I do not
24681 know why it does this.
24682
24683 FIXME:
24684 There is one other problem that ought to be addressed here, but
24685 which currently is not: Taking the address of a label (rather
24686 than a function) and then later jumping to that address. Such
24687 addresses also ought to have their bottom bit set (assuming that
24688 they reside in Thumb code), but at the moment they will not. */
24689
24690 bfd_boolean
24691 arm_fix_adjustable (fixS * fixP)
24692 {
24693 if (fixP->fx_addsy == NULL)
24694 return 1;
24695
24696 /* Preserve relocations against symbols with function type. */
24697 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
24698 return FALSE;
24699
24700 if (THUMB_IS_FUNC (fixP->fx_addsy)
24701 && fixP->fx_subsy == NULL)
24702 return FALSE;
24703
24704 /* We need the symbol name for the VTABLE entries. */
24705 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
24706 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24707 return FALSE;
24708
24709 /* Don't allow symbols to be discarded on GOT related relocs. */
24710 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
24711 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
24712 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
24713 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
24714 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
24715 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
24716 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
24717 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
24718 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
24719 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
24720 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
24721 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
24722 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
24723 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
24724 return FALSE;
24725
24726 /* Similarly for group relocations. */
24727 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24728 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24729 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24730 return FALSE;
24731
24732 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
24733 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
24734 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
24735 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
24736 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
24737 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
24738 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
24739 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
24740 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
24741 return FALSE;
24742
24743 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
24744 offsets, so keep these symbols. */
24745 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
24746 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
24747 return FALSE;
24748
24749 return TRUE;
24750 }
24751 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
24752
24753 #ifdef OBJ_ELF
24754 const char *
24755 elf32_arm_target_format (void)
24756 {
24757 #ifdef TE_SYMBIAN
24758 return (target_big_endian
24759 ? "elf32-bigarm-symbian"
24760 : "elf32-littlearm-symbian");
24761 #elif defined (TE_VXWORKS)
24762 return (target_big_endian
24763 ? "elf32-bigarm-vxworks"
24764 : "elf32-littlearm-vxworks");
24765 #elif defined (TE_NACL)
24766 return (target_big_endian
24767 ? "elf32-bigarm-nacl"
24768 : "elf32-littlearm-nacl");
24769 #else
24770 if (target_big_endian)
24771 return "elf32-bigarm";
24772 else
24773 return "elf32-littlearm";
24774 #endif
24775 }
24776
24777 void
24778 armelf_frob_symbol (symbolS * symp,
24779 int * puntp)
24780 {
24781 elf_frob_symbol (symp, puntp);
24782 }
24783 #endif
24784
24785 /* MD interface: Finalization. */
24786
24787 void
24788 arm_cleanup (void)
24789 {
24790 literal_pool * pool;
24791
24792 /* Ensure that all the IT blocks are properly closed. */
24793 check_it_blocks_finished ();
24794
24795 for (pool = list_of_pools; pool; pool = pool->next)
24796 {
24797 /* Put it at the end of the relevant section. */
24798 subseg_set (pool->section, pool->sub_section);
24799 #ifdef OBJ_ELF
24800 arm_elf_change_section ();
24801 #endif
24802 s_ltorg (0);
24803 }
24804 }
24805
24806 #ifdef OBJ_ELF
24807 /* Remove any excess mapping symbols generated for alignment frags in
24808 SEC. We may have created a mapping symbol before a zero byte
24809 alignment; remove it if there's a mapping symbol after the
24810 alignment. */
24811 static void
24812 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
24813 void *dummy ATTRIBUTE_UNUSED)
24814 {
24815 segment_info_type *seginfo = seg_info (sec);
24816 fragS *fragp;
24817
24818 if (seginfo == NULL || seginfo->frchainP == NULL)
24819 return;
24820
24821 for (fragp = seginfo->frchainP->frch_root;
24822 fragp != NULL;
24823 fragp = fragp->fr_next)
24824 {
24825 symbolS *sym = fragp->tc_frag_data.last_map;
24826 fragS *next = fragp->fr_next;
24827
24828 /* Variable-sized frags have been converted to fixed size by
24829 this point. But if this was variable-sized to start with,
24830 there will be a fixed-size frag after it. So don't handle
24831 next == NULL. */
24832 if (sym == NULL || next == NULL)
24833 continue;
24834
24835 if (S_GET_VALUE (sym) < next->fr_address)
24836 /* Not at the end of this frag. */
24837 continue;
24838 know (S_GET_VALUE (sym) == next->fr_address);
24839
24840 do
24841 {
24842 if (next->tc_frag_data.first_map != NULL)
24843 {
24844 /* Next frag starts with a mapping symbol. Discard this
24845 one. */
24846 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24847 break;
24848 }
24849
24850 if (next->fr_next == NULL)
24851 {
24852 /* This mapping symbol is at the end of the section. Discard
24853 it. */
24854 know (next->fr_fix == 0 && next->fr_var == 0);
24855 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24856 break;
24857 }
24858
24859 /* As long as we have empty frags without any mapping symbols,
24860 keep looking. */
24861 /* If the next frag is non-empty and does not start with a
24862 mapping symbol, then this mapping symbol is required. */
24863 if (next->fr_address != next->fr_next->fr_address)
24864 break;
24865
24866 next = next->fr_next;
24867 }
24868 while (next != NULL);
24869 }
24870 }
24871 #endif
24872
24873 /* Adjust the symbol table. This marks Thumb symbols as distinct from
24874 ARM ones. */
24875
24876 void
24877 arm_adjust_symtab (void)
24878 {
24879 #ifdef OBJ_COFF
24880 symbolS * sym;
24881
24882 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24883 {
24884 if (ARM_IS_THUMB (sym))
24885 {
24886 if (THUMB_IS_FUNC (sym))
24887 {
24888 /* Mark the symbol as a Thumb function. */
24889 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
24890 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
24891 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
24892
24893 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
24894 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
24895 else
24896 as_bad (_("%s: unexpected function type: %d"),
24897 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
24898 }
24899 else switch (S_GET_STORAGE_CLASS (sym))
24900 {
24901 case C_EXT:
24902 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
24903 break;
24904 case C_STAT:
24905 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
24906 break;
24907 case C_LABEL:
24908 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
24909 break;
24910 default:
24911 /* Do nothing. */
24912 break;
24913 }
24914 }
24915
24916 if (ARM_IS_INTERWORK (sym))
24917 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
24918 }
24919 #endif
24920 #ifdef OBJ_ELF
24921 symbolS * sym;
24922 char bind;
24923
24924 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24925 {
24926 if (ARM_IS_THUMB (sym))
24927 {
24928 elf_symbol_type * elf_sym;
24929
24930 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
24931 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
24932
24933 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
24934 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
24935 {
24936 /* If it's a .thumb_func, declare it as so,
24937 otherwise tag label as .code 16. */
24938 if (THUMB_IS_FUNC (sym))
24939 ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
24940 ST_BRANCH_TO_THUMB);
24941 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
24942 elf_sym->internal_elf_sym.st_info =
24943 ELF_ST_INFO (bind, STT_ARM_16BIT);
24944 }
24945 }
24946 }
24947
24948 /* Remove any overlapping mapping symbols generated by alignment frags. */
24949 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
24950 /* Now do generic ELF adjustments. */
24951 elf_adjust_symtab ();
24952 #endif
24953 }
24954
24955 /* MD interface: Initialization. */
24956
24957 static void
24958 set_constant_flonums (void)
24959 {
24960 int i;
24961
24962 for (i = 0; i < NUM_FLOAT_VALS; i++)
24963 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
24964 abort ();
24965 }
24966
24967 /* Auto-select Thumb mode if it's the only available instruction set for the
24968 given architecture. */
24969
24970 static void
24971 autoselect_thumb_from_cpu_variant (void)
24972 {
24973 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
24974 opcode_select (16);
24975 }
24976
24977 void
24978 md_begin (void)
24979 {
24980 unsigned mach;
24981 unsigned int i;
24982
24983 if ( (arm_ops_hsh = hash_new ()) == NULL
24984 || (arm_cond_hsh = hash_new ()) == NULL
24985 || (arm_shift_hsh = hash_new ()) == NULL
24986 || (arm_psr_hsh = hash_new ()) == NULL
24987 || (arm_v7m_psr_hsh = hash_new ()) == NULL
24988 || (arm_reg_hsh = hash_new ()) == NULL
24989 || (arm_reloc_hsh = hash_new ()) == NULL
24990 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
24991 as_fatal (_("virtual memory exhausted"));
24992
24993 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
24994 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
24995 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
24996 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
24997 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
24998 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
24999 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
25000 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
25001 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
25002 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
25003 (void *) (v7m_psrs + i));
25004 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
25005 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
25006 for (i = 0;
25007 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
25008 i++)
25009 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
25010 (void *) (barrier_opt_names + i));
25011 #ifdef OBJ_ELF
25012 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
25013 {
25014 struct reloc_entry * entry = reloc_names + i;
25015
25016 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
25017 /* This makes encode_branch() use the EABI versions of this relocation. */
25018 entry->reloc = BFD_RELOC_UNUSED;
25019
25020 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
25021 }
25022 #endif
25023
25024 set_constant_flonums ();
25025
25026 /* Set the cpu variant based on the command-line options. We prefer
25027 -mcpu= over -march= if both are set (as for GCC); and we prefer
25028 -mfpu= over any other way of setting the floating point unit.
25029 Use of legacy options with new options are faulted. */
25030 if (legacy_cpu)
25031 {
25032 if (mcpu_cpu_opt || march_cpu_opt)
25033 as_bad (_("use of old and new-style options to set CPU type"));
25034
25035 mcpu_cpu_opt = legacy_cpu;
25036 }
25037 else if (!mcpu_cpu_opt)
25038 {
25039 mcpu_cpu_opt = march_cpu_opt;
25040 dyn_mcpu_ext_opt = dyn_march_ext_opt;
25041 /* Avoid double free in arm_md_end. */
25042 dyn_march_ext_opt = NULL;
25043 }
25044
25045 if (legacy_fpu)
25046 {
25047 if (mfpu_opt)
25048 as_bad (_("use of old and new-style options to set FPU type"));
25049
25050 mfpu_opt = legacy_fpu;
25051 }
25052 else if (!mfpu_opt)
25053 {
25054 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
25055 || defined (TE_NetBSD) || defined (TE_VXWORKS))
25056 /* Some environments specify a default FPU. If they don't, infer it
25057 from the processor. */
25058 if (mcpu_fpu_opt)
25059 mfpu_opt = mcpu_fpu_opt;
25060 else
25061 mfpu_opt = march_fpu_opt;
25062 #else
25063 mfpu_opt = &fpu_default;
25064 #endif
25065 }
25066
25067 if (!mfpu_opt)
25068 {
25069 if (mcpu_cpu_opt != NULL)
25070 mfpu_opt = &fpu_default;
25071 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
25072 mfpu_opt = &fpu_arch_vfp_v2;
25073 else
25074 mfpu_opt = &fpu_arch_fpa;
25075 }
25076
25077 #ifdef CPU_DEFAULT
25078 if (!mcpu_cpu_opt)
25079 {
25080 mcpu_cpu_opt = &cpu_default;
25081 selected_cpu = cpu_default;
25082 }
25083 else if (dyn_mcpu_ext_opt)
25084 ARM_MERGE_FEATURE_SETS (selected_cpu, *mcpu_cpu_opt, *dyn_mcpu_ext_opt);
25085 else
25086 selected_cpu = *mcpu_cpu_opt;
25087 #else
25088 if (mcpu_cpu_opt && dyn_mcpu_ext_opt)
25089 ARM_MERGE_FEATURE_SETS (selected_cpu, *mcpu_cpu_opt, *dyn_mcpu_ext_opt);
25090 else if (mcpu_cpu_opt)
25091 selected_cpu = *mcpu_cpu_opt;
25092 else
25093 mcpu_cpu_opt = &arm_arch_any;
25094 #endif
25095
25096 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25097 if (dyn_mcpu_ext_opt)
25098 ARM_MERGE_FEATURE_SETS (cpu_variant, cpu_variant, *dyn_mcpu_ext_opt);
25099
25100 autoselect_thumb_from_cpu_variant ();
25101
25102 arm_arch_used = thumb_arch_used = arm_arch_none;
25103
25104 #if defined OBJ_COFF || defined OBJ_ELF
25105 {
25106 unsigned int flags = 0;
25107
25108 #if defined OBJ_ELF
25109 flags = meabi_flags;
25110
25111 switch (meabi_flags)
25112 {
25113 case EF_ARM_EABI_UNKNOWN:
25114 #endif
25115 /* Set the flags in the private structure. */
25116 if (uses_apcs_26) flags |= F_APCS26;
25117 if (support_interwork) flags |= F_INTERWORK;
25118 if (uses_apcs_float) flags |= F_APCS_FLOAT;
25119 if (pic_code) flags |= F_PIC;
25120 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
25121 flags |= F_SOFT_FLOAT;
25122
25123 switch (mfloat_abi_opt)
25124 {
25125 case ARM_FLOAT_ABI_SOFT:
25126 case ARM_FLOAT_ABI_SOFTFP:
25127 flags |= F_SOFT_FLOAT;
25128 break;
25129
25130 case ARM_FLOAT_ABI_HARD:
25131 if (flags & F_SOFT_FLOAT)
25132 as_bad (_("hard-float conflicts with specified fpu"));
25133 break;
25134 }
25135
25136 /* Using pure-endian doubles (even if soft-float). */
25137 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
25138 flags |= F_VFP_FLOAT;
25139
25140 #if defined OBJ_ELF
25141 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
25142 flags |= EF_ARM_MAVERICK_FLOAT;
25143 break;
25144
25145 case EF_ARM_EABI_VER4:
25146 case EF_ARM_EABI_VER5:
25147 /* No additional flags to set. */
25148 break;
25149
25150 default:
25151 abort ();
25152 }
25153 #endif
25154 bfd_set_private_flags (stdoutput, flags);
25155
25156 /* We have run out flags in the COFF header to encode the
25157 status of ATPCS support, so instead we create a dummy,
25158 empty, debug section called .arm.atpcs. */
25159 if (atpcs)
25160 {
25161 asection * sec;
25162
25163 sec = bfd_make_section (stdoutput, ".arm.atpcs");
25164
25165 if (sec != NULL)
25166 {
25167 bfd_set_section_flags
25168 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
25169 bfd_set_section_size (stdoutput, sec, 0);
25170 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
25171 }
25172 }
25173 }
25174 #endif
25175
25176 /* Record the CPU type as well. */
25177 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
25178 mach = bfd_mach_arm_iWMMXt2;
25179 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
25180 mach = bfd_mach_arm_iWMMXt;
25181 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
25182 mach = bfd_mach_arm_XScale;
25183 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
25184 mach = bfd_mach_arm_ep9312;
25185 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
25186 mach = bfd_mach_arm_5TE;
25187 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
25188 {
25189 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
25190 mach = bfd_mach_arm_5T;
25191 else
25192 mach = bfd_mach_arm_5;
25193 }
25194 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
25195 {
25196 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
25197 mach = bfd_mach_arm_4T;
25198 else
25199 mach = bfd_mach_arm_4;
25200 }
25201 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
25202 mach = bfd_mach_arm_3M;
25203 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
25204 mach = bfd_mach_arm_3;
25205 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
25206 mach = bfd_mach_arm_2a;
25207 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
25208 mach = bfd_mach_arm_2;
25209 else
25210 mach = bfd_mach_arm_unknown;
25211
25212 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
25213 }
25214
25215 /* Command line processing. */
25216
25217 /* md_parse_option
25218 Invocation line includes a switch not recognized by the base assembler.
25219 See if it's a processor-specific option.
25220
25221 This routine is somewhat complicated by the need for backwards
25222 compatibility (since older releases of gcc can't be changed).
25223 The new options try to make the interface as compatible as
25224 possible with GCC.
25225
25226 New options (supported) are:
25227
25228 -mcpu=<cpu name> Assemble for selected processor
25229 -march=<architecture name> Assemble for selected architecture
25230 -mfpu=<fpu architecture> Assemble for selected FPU.
25231 -EB/-mbig-endian Big-endian
25232 -EL/-mlittle-endian Little-endian
25233 -k Generate PIC code
25234 -mthumb Start in Thumb mode
25235 -mthumb-interwork Code supports ARM/Thumb interworking
25236
25237 -m[no-]warn-deprecated Warn about deprecated features
25238 -m[no-]warn-syms Warn when symbols match instructions
25239
25240 For now we will also provide support for:
25241
25242 -mapcs-32 32-bit Program counter
25243 -mapcs-26 26-bit Program counter
25244 -macps-float Floats passed in FP registers
25245 -mapcs-reentrant Reentrant code
25246 -matpcs
25247 (sometime these will probably be replaced with -mapcs=<list of options>
25248 and -matpcs=<list of options>)
25249
25250 The remaining options are only supported for back-wards compatibility.
25251 Cpu variants, the arm part is optional:
25252 -m[arm]1 Currently not supported.
25253 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
25254 -m[arm]3 Arm 3 processor
25255 -m[arm]6[xx], Arm 6 processors
25256 -m[arm]7[xx][t][[d]m] Arm 7 processors
25257 -m[arm]8[10] Arm 8 processors
25258 -m[arm]9[20][tdmi] Arm 9 processors
25259 -mstrongarm[110[0]] StrongARM processors
25260 -mxscale XScale processors
25261 -m[arm]v[2345[t[e]]] Arm architectures
25262 -mall All (except the ARM1)
25263 FP variants:
25264 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
25265 -mfpe-old (No float load/store multiples)
25266 -mvfpxd VFP Single precision
25267 -mvfp All VFP
25268 -mno-fpu Disable all floating point instructions
25269
25270 The following CPU names are recognized:
25271 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
25272 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
25273 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
25274 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
25275 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
25276 arm10t arm10e, arm1020t, arm1020e, arm10200e,
25277 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
25278
25279 */
25280
25281 const char * md_shortopts = "m:k";
25282
25283 #ifdef ARM_BI_ENDIAN
25284 #define OPTION_EB (OPTION_MD_BASE + 0)
25285 #define OPTION_EL (OPTION_MD_BASE + 1)
25286 #else
25287 #if TARGET_BYTES_BIG_ENDIAN
25288 #define OPTION_EB (OPTION_MD_BASE + 0)
25289 #else
25290 #define OPTION_EL (OPTION_MD_BASE + 1)
25291 #endif
25292 #endif
25293 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
25294
25295 struct option md_longopts[] =
25296 {
25297 #ifdef OPTION_EB
25298 {"EB", no_argument, NULL, OPTION_EB},
25299 #endif
25300 #ifdef OPTION_EL
25301 {"EL", no_argument, NULL, OPTION_EL},
25302 #endif
25303 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
25304 {NULL, no_argument, NULL, 0}
25305 };
25306
25307
25308 size_t md_longopts_size = sizeof (md_longopts);
25309
25310 struct arm_option_table
25311 {
25312 const char *option; /* Option name to match. */
25313 const char *help; /* Help information. */
25314 int *var; /* Variable to change. */
25315 int value; /* What to change it to. */
25316 const char *deprecated; /* If non-null, print this message. */
25317 };
25318
25319 struct arm_option_table arm_opts[] =
25320 {
25321 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
25322 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
25323 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
25324 &support_interwork, 1, NULL},
25325 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
25326 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
25327 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
25328 1, NULL},
25329 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
25330 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
25331 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
25332 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
25333 NULL},
25334
25335 /* These are recognized by the assembler, but have no affect on code. */
25336 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
25337 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
25338
25339 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
25340 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
25341 &warn_on_deprecated, 0, NULL},
25342 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
25343 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
25344 {NULL, NULL, NULL, 0, NULL}
25345 };
25346
25347 struct arm_legacy_option_table
25348 {
25349 const char *option; /* Option name to match. */
25350 const arm_feature_set **var; /* Variable to change. */
25351 const arm_feature_set value; /* What to change it to. */
25352 const char *deprecated; /* If non-null, print this message. */
25353 };
25354
25355 const struct arm_legacy_option_table arm_legacy_opts[] =
25356 {
25357 /* DON'T add any new processors to this list -- we want the whole list
25358 to go away... Add them to the processors table instead. */
25359 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
25360 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
25361 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
25362 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
25363 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25364 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25365 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25366 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25367 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
25368 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
25369 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
25370 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
25371 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
25372 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
25373 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
25374 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
25375 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
25376 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
25377 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
25378 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
25379 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
25380 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
25381 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
25382 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
25383 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
25384 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
25385 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
25386 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
25387 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
25388 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
25389 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
25390 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
25391 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
25392 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
25393 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25394 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25395 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25396 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25397 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25398 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25399 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
25400 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
25401 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
25402 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
25403 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
25404 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
25405 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25406 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25407 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25408 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25409 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25410 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25411 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25412 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25413 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25414 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25415 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
25416 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
25417 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
25418 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
25419 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25420 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25421 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25422 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25423 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25424 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25425 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25426 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25427 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
25428 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
25429 N_("use -mcpu=strongarm110")},
25430 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
25431 N_("use -mcpu=strongarm1100")},
25432 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
25433 N_("use -mcpu=strongarm1110")},
25434 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
25435 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
25436 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
25437
25438 /* Architecture variants -- don't add any more to this list either. */
25439 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
25440 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
25441 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25442 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25443 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
25444 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
25445 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25446 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25447 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
25448 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
25449 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25450 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25451 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
25452 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
25453 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25454 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25455 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25456 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25457
25458 /* Floating point variants -- don't add any more to this list either. */
25459 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
25460 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
25461 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
25462 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
25463 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
25464
25465 {NULL, NULL, ARM_ARCH_NONE, NULL}
25466 };
25467
25468 struct arm_cpu_option_table
25469 {
25470 const char *name;
25471 size_t name_len;
25472 const arm_feature_set value;
25473 const arm_feature_set ext;
25474 /* For some CPUs we assume an FPU unless the user explicitly sets
25475 -mfpu=... */
25476 const arm_feature_set default_fpu;
25477 /* The canonical name of the CPU, or NULL to use NAME converted to upper
25478 case. */
25479 const char *canonical_name;
25480 };
25481
25482 /* This list should, at a minimum, contain all the cpu names
25483 recognized by GCC. */
25484 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
25485 static const struct arm_cpu_option_table arm_cpus[] =
25486 {
25487 ARM_CPU_OPT ("all", NULL, ARM_ANY,
25488 ARM_ARCH_NONE,
25489 FPU_ARCH_FPA),
25490 ARM_CPU_OPT ("arm1", NULL, ARM_ARCH_V1,
25491 ARM_ARCH_NONE,
25492 FPU_ARCH_FPA),
25493 ARM_CPU_OPT ("arm2", NULL, ARM_ARCH_V2,
25494 ARM_ARCH_NONE,
25495 FPU_ARCH_FPA),
25496 ARM_CPU_OPT ("arm250", NULL, ARM_ARCH_V2S,
25497 ARM_ARCH_NONE,
25498 FPU_ARCH_FPA),
25499 ARM_CPU_OPT ("arm3", NULL, ARM_ARCH_V2S,
25500 ARM_ARCH_NONE,
25501 FPU_ARCH_FPA),
25502 ARM_CPU_OPT ("arm6", NULL, ARM_ARCH_V3,
25503 ARM_ARCH_NONE,
25504 FPU_ARCH_FPA),
25505 ARM_CPU_OPT ("arm60", NULL, ARM_ARCH_V3,
25506 ARM_ARCH_NONE,
25507 FPU_ARCH_FPA),
25508 ARM_CPU_OPT ("arm600", NULL, ARM_ARCH_V3,
25509 ARM_ARCH_NONE,
25510 FPU_ARCH_FPA),
25511 ARM_CPU_OPT ("arm610", NULL, ARM_ARCH_V3,
25512 ARM_ARCH_NONE,
25513 FPU_ARCH_FPA),
25514 ARM_CPU_OPT ("arm620", NULL, ARM_ARCH_V3,
25515 ARM_ARCH_NONE,
25516 FPU_ARCH_FPA),
25517 ARM_CPU_OPT ("arm7", NULL, ARM_ARCH_V3,
25518 ARM_ARCH_NONE,
25519 FPU_ARCH_FPA),
25520 ARM_CPU_OPT ("arm7m", NULL, ARM_ARCH_V3M,
25521 ARM_ARCH_NONE,
25522 FPU_ARCH_FPA),
25523 ARM_CPU_OPT ("arm7d", NULL, ARM_ARCH_V3,
25524 ARM_ARCH_NONE,
25525 FPU_ARCH_FPA),
25526 ARM_CPU_OPT ("arm7dm", NULL, ARM_ARCH_V3M,
25527 ARM_ARCH_NONE,
25528 FPU_ARCH_FPA),
25529 ARM_CPU_OPT ("arm7di", NULL, ARM_ARCH_V3,
25530 ARM_ARCH_NONE,
25531 FPU_ARCH_FPA),
25532 ARM_CPU_OPT ("arm7dmi", NULL, ARM_ARCH_V3M,
25533 ARM_ARCH_NONE,
25534 FPU_ARCH_FPA),
25535 ARM_CPU_OPT ("arm70", NULL, ARM_ARCH_V3,
25536 ARM_ARCH_NONE,
25537 FPU_ARCH_FPA),
25538 ARM_CPU_OPT ("arm700", NULL, ARM_ARCH_V3,
25539 ARM_ARCH_NONE,
25540 FPU_ARCH_FPA),
25541 ARM_CPU_OPT ("arm700i", NULL, ARM_ARCH_V3,
25542 ARM_ARCH_NONE,
25543 FPU_ARCH_FPA),
25544 ARM_CPU_OPT ("arm710", NULL, ARM_ARCH_V3,
25545 ARM_ARCH_NONE,
25546 FPU_ARCH_FPA),
25547 ARM_CPU_OPT ("arm710t", NULL, ARM_ARCH_V4T,
25548 ARM_ARCH_NONE,
25549 FPU_ARCH_FPA),
25550 ARM_CPU_OPT ("arm720", NULL, ARM_ARCH_V3,
25551 ARM_ARCH_NONE,
25552 FPU_ARCH_FPA),
25553 ARM_CPU_OPT ("arm720t", NULL, ARM_ARCH_V4T,
25554 ARM_ARCH_NONE,
25555 FPU_ARCH_FPA),
25556 ARM_CPU_OPT ("arm740t", NULL, ARM_ARCH_V4T,
25557 ARM_ARCH_NONE,
25558 FPU_ARCH_FPA),
25559 ARM_CPU_OPT ("arm710c", NULL, ARM_ARCH_V3,
25560 ARM_ARCH_NONE,
25561 FPU_ARCH_FPA),
25562 ARM_CPU_OPT ("arm7100", NULL, ARM_ARCH_V3,
25563 ARM_ARCH_NONE,
25564 FPU_ARCH_FPA),
25565 ARM_CPU_OPT ("arm7500", NULL, ARM_ARCH_V3,
25566 ARM_ARCH_NONE,
25567 FPU_ARCH_FPA),
25568 ARM_CPU_OPT ("arm7500fe", NULL, ARM_ARCH_V3,
25569 ARM_ARCH_NONE,
25570 FPU_ARCH_FPA),
25571 ARM_CPU_OPT ("arm7t", NULL, ARM_ARCH_V4T,
25572 ARM_ARCH_NONE,
25573 FPU_ARCH_FPA),
25574 ARM_CPU_OPT ("arm7tdmi", NULL, ARM_ARCH_V4T,
25575 ARM_ARCH_NONE,
25576 FPU_ARCH_FPA),
25577 ARM_CPU_OPT ("arm7tdmi-s", NULL, ARM_ARCH_V4T,
25578 ARM_ARCH_NONE,
25579 FPU_ARCH_FPA),
25580 ARM_CPU_OPT ("arm8", NULL, ARM_ARCH_V4,
25581 ARM_ARCH_NONE,
25582 FPU_ARCH_FPA),
25583 ARM_CPU_OPT ("arm810", NULL, ARM_ARCH_V4,
25584 ARM_ARCH_NONE,
25585 FPU_ARCH_FPA),
25586 ARM_CPU_OPT ("strongarm", NULL, ARM_ARCH_V4,
25587 ARM_ARCH_NONE,
25588 FPU_ARCH_FPA),
25589 ARM_CPU_OPT ("strongarm1", NULL, ARM_ARCH_V4,
25590 ARM_ARCH_NONE,
25591 FPU_ARCH_FPA),
25592 ARM_CPU_OPT ("strongarm110", NULL, ARM_ARCH_V4,
25593 ARM_ARCH_NONE,
25594 FPU_ARCH_FPA),
25595 ARM_CPU_OPT ("strongarm1100", NULL, ARM_ARCH_V4,
25596 ARM_ARCH_NONE,
25597 FPU_ARCH_FPA),
25598 ARM_CPU_OPT ("strongarm1110", NULL, ARM_ARCH_V4,
25599 ARM_ARCH_NONE,
25600 FPU_ARCH_FPA),
25601 ARM_CPU_OPT ("arm9", NULL, ARM_ARCH_V4T,
25602 ARM_ARCH_NONE,
25603 FPU_ARCH_FPA),
25604 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T,
25605 ARM_ARCH_NONE,
25606 FPU_ARCH_FPA),
25607 ARM_CPU_OPT ("arm920t", NULL, ARM_ARCH_V4T,
25608 ARM_ARCH_NONE,
25609 FPU_ARCH_FPA),
25610 ARM_CPU_OPT ("arm922t", NULL, ARM_ARCH_V4T,
25611 ARM_ARCH_NONE,
25612 FPU_ARCH_FPA),
25613 ARM_CPU_OPT ("arm940t", NULL, ARM_ARCH_V4T,
25614 ARM_ARCH_NONE,
25615 FPU_ARCH_FPA),
25616 ARM_CPU_OPT ("arm9tdmi", NULL, ARM_ARCH_V4T,
25617 ARM_ARCH_NONE,
25618 FPU_ARCH_FPA),
25619 ARM_CPU_OPT ("fa526", NULL, ARM_ARCH_V4,
25620 ARM_ARCH_NONE,
25621 FPU_ARCH_FPA),
25622 ARM_CPU_OPT ("fa626", NULL, ARM_ARCH_V4,
25623 ARM_ARCH_NONE,
25624 FPU_ARCH_FPA),
25625
25626 /* For V5 or later processors we default to using VFP; but the user
25627 should really set the FPU type explicitly. */
25628 ARM_CPU_OPT ("arm9e-r0", NULL, ARM_ARCH_V5TExP,
25629 ARM_ARCH_NONE,
25630 FPU_ARCH_VFP_V2),
25631 ARM_CPU_OPT ("arm9e", NULL, ARM_ARCH_V5TE,
25632 ARM_ARCH_NONE,
25633 FPU_ARCH_VFP_V2),
25634 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ,
25635 ARM_ARCH_NONE,
25636 FPU_ARCH_VFP_V2),
25637 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ,
25638 ARM_ARCH_NONE,
25639 FPU_ARCH_VFP_V2),
25640 ARM_CPU_OPT ("arm926ej-s", NULL, ARM_ARCH_V5TEJ,
25641 ARM_ARCH_NONE,
25642 FPU_ARCH_VFP_V2),
25643 ARM_CPU_OPT ("arm946e-r0", NULL, ARM_ARCH_V5TExP,
25644 ARM_ARCH_NONE,
25645 FPU_ARCH_VFP_V2),
25646 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE,
25647 ARM_ARCH_NONE,
25648 FPU_ARCH_VFP_V2),
25649 ARM_CPU_OPT ("arm946e-s", NULL, ARM_ARCH_V5TE,
25650 ARM_ARCH_NONE,
25651 FPU_ARCH_VFP_V2),
25652 ARM_CPU_OPT ("arm966e-r0", NULL, ARM_ARCH_V5TExP,
25653 ARM_ARCH_NONE,
25654 FPU_ARCH_VFP_V2),
25655 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE,
25656 ARM_ARCH_NONE,
25657 FPU_ARCH_VFP_V2),
25658 ARM_CPU_OPT ("arm966e-s", NULL, ARM_ARCH_V5TE,
25659 ARM_ARCH_NONE,
25660 FPU_ARCH_VFP_V2),
25661 ARM_CPU_OPT ("arm968e-s", NULL, ARM_ARCH_V5TE,
25662 ARM_ARCH_NONE,
25663 FPU_ARCH_VFP_V2),
25664 ARM_CPU_OPT ("arm10t", NULL, ARM_ARCH_V5T,
25665 ARM_ARCH_NONE,
25666 FPU_ARCH_VFP_V1),
25667 ARM_CPU_OPT ("arm10tdmi", NULL, ARM_ARCH_V5T,
25668 ARM_ARCH_NONE,
25669 FPU_ARCH_VFP_V1),
25670 ARM_CPU_OPT ("arm10e", NULL, ARM_ARCH_V5TE,
25671 ARM_ARCH_NONE,
25672 FPU_ARCH_VFP_V2),
25673 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE,
25674 ARM_ARCH_NONE,
25675 FPU_ARCH_VFP_V2),
25676 ARM_CPU_OPT ("arm1020t", NULL, ARM_ARCH_V5T,
25677 ARM_ARCH_NONE,
25678 FPU_ARCH_VFP_V1),
25679 ARM_CPU_OPT ("arm1020e", NULL, ARM_ARCH_V5TE,
25680 ARM_ARCH_NONE,
25681 FPU_ARCH_VFP_V2),
25682 ARM_CPU_OPT ("arm1022e", NULL, ARM_ARCH_V5TE,
25683 ARM_ARCH_NONE,
25684 FPU_ARCH_VFP_V2),
25685 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ,
25686 ARM_ARCH_NONE,
25687 FPU_ARCH_VFP_V2),
25688 ARM_CPU_OPT ("arm1026ej-s", NULL, ARM_ARCH_V5TEJ,
25689 ARM_ARCH_NONE,
25690 FPU_ARCH_VFP_V2),
25691 ARM_CPU_OPT ("fa606te", NULL, ARM_ARCH_V5TE,
25692 ARM_ARCH_NONE,
25693 FPU_ARCH_VFP_V2),
25694 ARM_CPU_OPT ("fa616te", NULL, ARM_ARCH_V5TE,
25695 ARM_ARCH_NONE,
25696 FPU_ARCH_VFP_V2),
25697 ARM_CPU_OPT ("fa626te", NULL, ARM_ARCH_V5TE,
25698 ARM_ARCH_NONE,
25699 FPU_ARCH_VFP_V2),
25700 ARM_CPU_OPT ("fmp626", NULL, ARM_ARCH_V5TE,
25701 ARM_ARCH_NONE,
25702 FPU_ARCH_VFP_V2),
25703 ARM_CPU_OPT ("fa726te", NULL, ARM_ARCH_V5TE,
25704 ARM_ARCH_NONE,
25705 FPU_ARCH_VFP_V2),
25706 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6,
25707 ARM_ARCH_NONE,
25708 FPU_NONE),
25709 ARM_CPU_OPT ("arm1136j-s", NULL, ARM_ARCH_V6,
25710 ARM_ARCH_NONE,
25711 FPU_NONE),
25712 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6,
25713 ARM_ARCH_NONE,
25714 FPU_ARCH_VFP_V2),
25715 ARM_CPU_OPT ("arm1136jf-s", NULL, ARM_ARCH_V6,
25716 ARM_ARCH_NONE,
25717 FPU_ARCH_VFP_V2),
25718 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K,
25719 ARM_ARCH_NONE,
25720 FPU_ARCH_VFP_V2),
25721 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K,
25722 ARM_ARCH_NONE,
25723 FPU_NONE),
25724 ARM_CPU_OPT ("arm1156t2-s", NULL, ARM_ARCH_V6T2,
25725 ARM_ARCH_NONE,
25726 FPU_NONE),
25727 ARM_CPU_OPT ("arm1156t2f-s", NULL, ARM_ARCH_V6T2,
25728 ARM_ARCH_NONE,
25729 FPU_ARCH_VFP_V2),
25730 ARM_CPU_OPT ("arm1176jz-s", NULL, ARM_ARCH_V6KZ,
25731 ARM_ARCH_NONE,
25732 FPU_NONE),
25733 ARM_CPU_OPT ("arm1176jzf-s", NULL, ARM_ARCH_V6KZ,
25734 ARM_ARCH_NONE,
25735 FPU_ARCH_VFP_V2),
25736 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A,
25737 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
25738 FPU_NONE),
25739 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE,
25740 ARM_ARCH_NONE,
25741 FPU_ARCH_NEON_VFP_V4),
25742 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A,
25743 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25744 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
25745 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A,
25746 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
25747 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
25748 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE,
25749 ARM_ARCH_NONE,
25750 FPU_ARCH_NEON_VFP_V4),
25751 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE,
25752 ARM_ARCH_NONE,
25753 FPU_ARCH_NEON_VFP_V4),
25754 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE,
25755 ARM_ARCH_NONE,
25756 FPU_ARCH_NEON_VFP_V4),
25757 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A,
25758 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25759 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
25760 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A,
25761 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25762 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
25763 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A,
25764 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25765 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
25766 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A,
25767 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25768 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
25769 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A,
25770 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25771 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
25772 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A,
25773 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25774 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
25775 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R,
25776 ARM_ARCH_NONE,
25777 FPU_NONE),
25778 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R,
25779 ARM_ARCH_NONE,
25780 FPU_ARCH_VFP_V3D16),
25781 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R,
25782 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
25783 FPU_NONE),
25784 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R,
25785 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
25786 FPU_ARCH_VFP_V3D16),
25787 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R,
25788 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
25789 FPU_ARCH_VFP_V3D16),
25790 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN,
25791 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
25792 FPU_NONE),
25793 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE,
25794 ARM_ARCH_NONE,
25795 FPU_NONE),
25796 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM,
25797 ARM_ARCH_NONE,
25798 FPU_NONE),
25799 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM,
25800 ARM_ARCH_NONE,
25801 FPU_NONE),
25802 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M,
25803 ARM_ARCH_NONE,
25804 FPU_NONE),
25805 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM,
25806 ARM_ARCH_NONE,
25807 FPU_NONE),
25808 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM,
25809 ARM_ARCH_NONE,
25810 FPU_NONE),
25811 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM,
25812 ARM_ARCH_NONE,
25813 FPU_NONE),
25814 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A,
25815 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25816 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
25817
25818 /* ??? XSCALE is really an architecture. */
25819 ARM_CPU_OPT ("xscale", NULL, ARM_ARCH_XSCALE,
25820 ARM_ARCH_NONE,
25821 FPU_ARCH_VFP_V2),
25822
25823 /* ??? iwmmxt is not a processor. */
25824 ARM_CPU_OPT ("iwmmxt", NULL, ARM_ARCH_IWMMXT,
25825 ARM_ARCH_NONE,
25826 FPU_ARCH_VFP_V2),
25827 ARM_CPU_OPT ("iwmmxt2", NULL, ARM_ARCH_IWMMXT2,
25828 ARM_ARCH_NONE,
25829 FPU_ARCH_VFP_V2),
25830 ARM_CPU_OPT ("i80200", NULL, ARM_ARCH_XSCALE,
25831 ARM_ARCH_NONE,
25832 FPU_ARCH_VFP_V2),
25833
25834 /* Maverick */
25835 ARM_CPU_OPT ("ep9312", "ARM920T",
25836 ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
25837 ARM_ARCH_NONE, FPU_ARCH_MAVERICK),
25838
25839 /* Marvell processors. */
25840 ARM_CPU_OPT ("marvell-pj4", NULL, ARM_ARCH_V7A,
25841 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
25842 FPU_ARCH_VFP_V3D16),
25843 ARM_CPU_OPT ("marvell-whitney", NULL, ARM_ARCH_V7A,
25844 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
25845 FPU_ARCH_NEON_VFP_V4),
25846
25847 /* APM X-Gene family. */
25848 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A,
25849 ARM_ARCH_NONE,
25850 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
25851 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A,
25852 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25853 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
25854
25855 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
25856 };
25857 #undef ARM_CPU_OPT
25858
25859 struct arm_arch_option_table
25860 {
25861 const char *name;
25862 size_t name_len;
25863 const arm_feature_set value;
25864 const arm_feature_set default_fpu;
25865 };
25866
25867 /* This list should, at a minimum, contain all the architecture names
25868 recognized by GCC. */
25869 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
25870 static const struct arm_arch_option_table arm_archs[] =
25871 {
25872 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
25873 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
25874 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
25875 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
25876 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
25877 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
25878 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
25879 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
25880 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
25881 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
25882 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
25883 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
25884 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
25885 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
25886 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
25887 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
25888 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
25889 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
25890 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
25891 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
25892 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
25893 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
25894 kept to preserve existing behaviour. */
25895 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
25896 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
25897 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
25898 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
25899 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
25900 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
25901 kept to preserve existing behaviour. */
25902 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
25903 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
25904 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
25905 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
25906 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
25907 /* The official spelling of the ARMv7 profile variants is the dashed form.
25908 Accept the non-dashed form for compatibility with old toolchains. */
25909 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
25910 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP),
25911 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
25912 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
25913 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
25914 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
25915 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
25916 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
25917 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
25918 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP),
25919 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
25920 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP),
25921 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP),
25922 ARM_ARCH_OPT ("armv8.3-a", ARM_ARCH_V8_3A, FPU_ARCH_VFP),
25923 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
25924 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
25925 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
25926 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
25927 };
25928 #undef ARM_ARCH_OPT
25929
25930 /* ISA extensions in the co-processor and main instruction set space. */
25931 struct arm_option_extension_value_table
25932 {
25933 const char *name;
25934 size_t name_len;
25935 const arm_feature_set merge_value;
25936 const arm_feature_set clear_value;
25937 /* List of architectures for which an extension is available. ARM_ARCH_NONE
25938 indicates that an extension is available for all architectures while
25939 ARM_ANY marks an empty entry. */
25940 const arm_feature_set allowed_archs[2];
25941 };
25942
25943 /* The following table must be in alphabetical order with a NULL last entry.
25944 */
25945 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
25946 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
25947 static const struct arm_option_extension_value_table arm_extensions[] =
25948 {
25949 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25950 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25951 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25952 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
25953 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25954 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
25955 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
25956 ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
25957 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
25958 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25959 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
25960 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
25961 ARM_ARCH_V8_2A),
25962 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
25963 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
25964 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
25965 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
25966 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
25967 Thumb divide instruction. Due to this having the same name as the
25968 previous entry, this will be ignored when doing command-line parsing and
25969 only considered by build attribute selection code. */
25970 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
25971 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
25972 ARM_FEATURE_CORE_LOW (ARM_EXT_V7)),
25973 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
25974 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
25975 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
25976 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
25977 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
25978 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
25979 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
25980 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
25981 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
25982 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
25983 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
25984 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
25985 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
25986 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
25987 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
25988 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25989 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
25990 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
25991 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25992 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1,
25993 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
25994 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25995 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25996 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25997 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
25998 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
25999 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
26000 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
26001 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
26002 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
26003 | ARM_EXT_DIV),
26004 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
26005 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
26006 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
26007 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
26008 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
26009 };
26010 #undef ARM_EXT_OPT
26011
26012 /* ISA floating-point and Advanced SIMD extensions. */
26013 struct arm_option_fpu_value_table
26014 {
26015 const char *name;
26016 const arm_feature_set value;
26017 };
26018
26019 /* This list should, at a minimum, contain all the fpu names
26020 recognized by GCC. */
26021 static const struct arm_option_fpu_value_table arm_fpus[] =
26022 {
26023 {"softfpa", FPU_NONE},
26024 {"fpe", FPU_ARCH_FPE},
26025 {"fpe2", FPU_ARCH_FPE},
26026 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
26027 {"fpa", FPU_ARCH_FPA},
26028 {"fpa10", FPU_ARCH_FPA},
26029 {"fpa11", FPU_ARCH_FPA},
26030 {"arm7500fe", FPU_ARCH_FPA},
26031 {"softvfp", FPU_ARCH_VFP},
26032 {"softvfp+vfp", FPU_ARCH_VFP_V2},
26033 {"vfp", FPU_ARCH_VFP_V2},
26034 {"vfp9", FPU_ARCH_VFP_V2},
26035 {"vfp3", FPU_ARCH_VFP_V3}, /* Undocumented, use vfpv3. */
26036 {"vfp10", FPU_ARCH_VFP_V2},
26037 {"vfp10-r0", FPU_ARCH_VFP_V1},
26038 {"vfpxd", FPU_ARCH_VFP_V1xD},
26039 {"vfpv2", FPU_ARCH_VFP_V2},
26040 {"vfpv3", FPU_ARCH_VFP_V3},
26041 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
26042 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
26043 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
26044 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
26045 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
26046 {"arm1020t", FPU_ARCH_VFP_V1},
26047 {"arm1020e", FPU_ARCH_VFP_V2},
26048 {"arm1136jfs", FPU_ARCH_VFP_V2}, /* Undocumented, use arm1136jf-s. */
26049 {"arm1136jf-s", FPU_ARCH_VFP_V2},
26050 {"maverick", FPU_ARCH_MAVERICK},
26051 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
26052 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
26053 {"neon-fp16", FPU_ARCH_NEON_FP16},
26054 {"vfpv4", FPU_ARCH_VFP_V4},
26055 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
26056 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
26057 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
26058 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
26059 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
26060 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
26061 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
26062 {"crypto-neon-fp-armv8",
26063 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
26064 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
26065 {"crypto-neon-fp-armv8.1",
26066 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
26067 {NULL, ARM_ARCH_NONE}
26068 };
26069
26070 struct arm_option_value_table
26071 {
26072 const char *name;
26073 long value;
26074 };
26075
26076 static const struct arm_option_value_table arm_float_abis[] =
26077 {
26078 {"hard", ARM_FLOAT_ABI_HARD},
26079 {"softfp", ARM_FLOAT_ABI_SOFTFP},
26080 {"soft", ARM_FLOAT_ABI_SOFT},
26081 {NULL, 0}
26082 };
26083
26084 #ifdef OBJ_ELF
26085 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
26086 static const struct arm_option_value_table arm_eabis[] =
26087 {
26088 {"gnu", EF_ARM_EABI_UNKNOWN},
26089 {"4", EF_ARM_EABI_VER4},
26090 {"5", EF_ARM_EABI_VER5},
26091 {NULL, 0}
26092 };
26093 #endif
26094
26095 struct arm_long_option_table
26096 {
26097 const char * option; /* Substring to match. */
26098 const char * help; /* Help information. */
26099 int (* func) (const char * subopt); /* Function to decode sub-option. */
26100 const char * deprecated; /* If non-null, print this message. */
26101 };
26102
26103 static bfd_boolean
26104 arm_parse_extension (const char *str, const arm_feature_set *opt_set,
26105 arm_feature_set **ext_set_p)
26106 {
26107 /* We insist on extensions being specified in alphabetical order, and with
26108 extensions being added before being removed. We achieve this by having
26109 the global ARM_EXTENSIONS table in alphabetical order, and using the
26110 ADDING_VALUE variable to indicate whether we are adding an extension (1)
26111 or removing it (0) and only allowing it to change in the order
26112 -1 -> 1 -> 0. */
26113 const struct arm_option_extension_value_table * opt = NULL;
26114 const arm_feature_set arm_any = ARM_ANY;
26115 int adding_value = -1;
26116
26117 if (!*ext_set_p)
26118 {
26119 *ext_set_p = XNEW (arm_feature_set);
26120 **ext_set_p = arm_arch_none;
26121 }
26122
26123 while (str != NULL && *str != 0)
26124 {
26125 const char *ext;
26126 size_t len;
26127
26128 if (*str != '+')
26129 {
26130 as_bad (_("invalid architectural extension"));
26131 return FALSE;
26132 }
26133
26134 str++;
26135 ext = strchr (str, '+');
26136
26137 if (ext != NULL)
26138 len = ext - str;
26139 else
26140 len = strlen (str);
26141
26142 if (len >= 2 && strncmp (str, "no", 2) == 0)
26143 {
26144 if (adding_value != 0)
26145 {
26146 adding_value = 0;
26147 opt = arm_extensions;
26148 }
26149
26150 len -= 2;
26151 str += 2;
26152 }
26153 else if (len > 0)
26154 {
26155 if (adding_value == -1)
26156 {
26157 adding_value = 1;
26158 opt = arm_extensions;
26159 }
26160 else if (adding_value != 1)
26161 {
26162 as_bad (_("must specify extensions to add before specifying "
26163 "those to remove"));
26164 return FALSE;
26165 }
26166 }
26167
26168 if (len == 0)
26169 {
26170 as_bad (_("missing architectural extension"));
26171 return FALSE;
26172 }
26173
26174 gas_assert (adding_value != -1);
26175 gas_assert (opt != NULL);
26176
26177 /* Scan over the options table trying to find an exact match. */
26178 for (; opt->name != NULL; opt++)
26179 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26180 {
26181 int i, nb_allowed_archs =
26182 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
26183 /* Check we can apply the extension to this architecture. */
26184 for (i = 0; i < nb_allowed_archs; i++)
26185 {
26186 /* Empty entry. */
26187 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
26188 continue;
26189 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *opt_set))
26190 break;
26191 }
26192 if (i == nb_allowed_archs)
26193 {
26194 as_bad (_("extension does not apply to the base architecture"));
26195 return FALSE;
26196 }
26197
26198 /* Add or remove the extension. */
26199 if (adding_value)
26200 ARM_MERGE_FEATURE_SETS (**ext_set_p, **ext_set_p,
26201 opt->merge_value);
26202 else
26203 ARM_CLEAR_FEATURE (**ext_set_p, **ext_set_p, opt->clear_value);
26204
26205 /* Allowing Thumb division instructions for ARMv7 in autodetection
26206 rely on this break so that duplicate extensions (extensions
26207 with the same name as a previous extension in the list) are not
26208 considered for command-line parsing. */
26209 break;
26210 }
26211
26212 if (opt->name == NULL)
26213 {
26214 /* Did we fail to find an extension because it wasn't specified in
26215 alphabetical order, or because it does not exist? */
26216
26217 for (opt = arm_extensions; opt->name != NULL; opt++)
26218 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26219 break;
26220
26221 if (opt->name == NULL)
26222 as_bad (_("unknown architectural extension `%s'"), str);
26223 else
26224 as_bad (_("architectural extensions must be specified in "
26225 "alphabetical order"));
26226
26227 return FALSE;
26228 }
26229 else
26230 {
26231 /* We should skip the extension we've just matched the next time
26232 round. */
26233 opt++;
26234 }
26235
26236 str = ext;
26237 };
26238
26239 return TRUE;
26240 }
26241
26242 static bfd_boolean
26243 arm_parse_cpu (const char *str)
26244 {
26245 const struct arm_cpu_option_table *opt;
26246 const char *ext = strchr (str, '+');
26247 size_t len;
26248
26249 if (ext != NULL)
26250 len = ext - str;
26251 else
26252 len = strlen (str);
26253
26254 if (len == 0)
26255 {
26256 as_bad (_("missing cpu name `%s'"), str);
26257 return FALSE;
26258 }
26259
26260 for (opt = arm_cpus; opt->name != NULL; opt++)
26261 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26262 {
26263 mcpu_cpu_opt = &opt->value;
26264 if (!dyn_mcpu_ext_opt)
26265 dyn_mcpu_ext_opt = XNEW (arm_feature_set);
26266 *dyn_mcpu_ext_opt = opt->ext;
26267 mcpu_fpu_opt = &opt->default_fpu;
26268 if (opt->canonical_name)
26269 {
26270 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
26271 strcpy (selected_cpu_name, opt->canonical_name);
26272 }
26273 else
26274 {
26275 size_t i;
26276
26277 if (len >= sizeof selected_cpu_name)
26278 len = (sizeof selected_cpu_name) - 1;
26279
26280 for (i = 0; i < len; i++)
26281 selected_cpu_name[i] = TOUPPER (opt->name[i]);
26282 selected_cpu_name[i] = 0;
26283 }
26284
26285 if (ext != NULL)
26286 return arm_parse_extension (ext, mcpu_cpu_opt, &dyn_mcpu_ext_opt);
26287
26288 return TRUE;
26289 }
26290
26291 as_bad (_("unknown cpu `%s'"), str);
26292 return FALSE;
26293 }
26294
26295 static bfd_boolean
26296 arm_parse_arch (const char *str)
26297 {
26298 const struct arm_arch_option_table *opt;
26299 const char *ext = strchr (str, '+');
26300 size_t len;
26301
26302 if (ext != NULL)
26303 len = ext - str;
26304 else
26305 len = strlen (str);
26306
26307 if (len == 0)
26308 {
26309 as_bad (_("missing architecture name `%s'"), str);
26310 return FALSE;
26311 }
26312
26313 for (opt = arm_archs; opt->name != NULL; opt++)
26314 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26315 {
26316 march_cpu_opt = &opt->value;
26317 march_fpu_opt = &opt->default_fpu;
26318 strcpy (selected_cpu_name, opt->name);
26319
26320 if (ext != NULL)
26321 return arm_parse_extension (ext, march_cpu_opt, &dyn_march_ext_opt);
26322
26323 return TRUE;
26324 }
26325
26326 as_bad (_("unknown architecture `%s'\n"), str);
26327 return FALSE;
26328 }
26329
26330 static bfd_boolean
26331 arm_parse_fpu (const char * str)
26332 {
26333 const struct arm_option_fpu_value_table * opt;
26334
26335 for (opt = arm_fpus; opt->name != NULL; opt++)
26336 if (streq (opt->name, str))
26337 {
26338 mfpu_opt = &opt->value;
26339 return TRUE;
26340 }
26341
26342 as_bad (_("unknown floating point format `%s'\n"), str);
26343 return FALSE;
26344 }
26345
26346 static bfd_boolean
26347 arm_parse_float_abi (const char * str)
26348 {
26349 const struct arm_option_value_table * opt;
26350
26351 for (opt = arm_float_abis; opt->name != NULL; opt++)
26352 if (streq (opt->name, str))
26353 {
26354 mfloat_abi_opt = opt->value;
26355 return TRUE;
26356 }
26357
26358 as_bad (_("unknown floating point abi `%s'\n"), str);
26359 return FALSE;
26360 }
26361
26362 #ifdef OBJ_ELF
26363 static bfd_boolean
26364 arm_parse_eabi (const char * str)
26365 {
26366 const struct arm_option_value_table *opt;
26367
26368 for (opt = arm_eabis; opt->name != NULL; opt++)
26369 if (streq (opt->name, str))
26370 {
26371 meabi_flags = opt->value;
26372 return TRUE;
26373 }
26374 as_bad (_("unknown EABI `%s'\n"), str);
26375 return FALSE;
26376 }
26377 #endif
26378
26379 static bfd_boolean
26380 arm_parse_it_mode (const char * str)
26381 {
26382 bfd_boolean ret = TRUE;
26383
26384 if (streq ("arm", str))
26385 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
26386 else if (streq ("thumb", str))
26387 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
26388 else if (streq ("always", str))
26389 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
26390 else if (streq ("never", str))
26391 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
26392 else
26393 {
26394 as_bad (_("unknown implicit IT mode `%s', should be "\
26395 "arm, thumb, always, or never."), str);
26396 ret = FALSE;
26397 }
26398
26399 return ret;
26400 }
26401
26402 static bfd_boolean
26403 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
26404 {
26405 codecomposer_syntax = TRUE;
26406 arm_comment_chars[0] = ';';
26407 arm_line_separator_chars[0] = 0;
26408 return TRUE;
26409 }
26410
26411 struct arm_long_option_table arm_long_opts[] =
26412 {
26413 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
26414 arm_parse_cpu, NULL},
26415 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
26416 arm_parse_arch, NULL},
26417 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
26418 arm_parse_fpu, NULL},
26419 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
26420 arm_parse_float_abi, NULL},
26421 #ifdef OBJ_ELF
26422 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
26423 arm_parse_eabi, NULL},
26424 #endif
26425 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
26426 arm_parse_it_mode, NULL},
26427 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
26428 arm_ccs_mode, NULL},
26429 {NULL, NULL, 0, NULL}
26430 };
26431
26432 int
26433 md_parse_option (int c, const char * arg)
26434 {
26435 struct arm_option_table *opt;
26436 const struct arm_legacy_option_table *fopt;
26437 struct arm_long_option_table *lopt;
26438
26439 switch (c)
26440 {
26441 #ifdef OPTION_EB
26442 case OPTION_EB:
26443 target_big_endian = 1;
26444 break;
26445 #endif
26446
26447 #ifdef OPTION_EL
26448 case OPTION_EL:
26449 target_big_endian = 0;
26450 break;
26451 #endif
26452
26453 case OPTION_FIX_V4BX:
26454 fix_v4bx = TRUE;
26455 break;
26456
26457 case 'a':
26458 /* Listing option. Just ignore these, we don't support additional
26459 ones. */
26460 return 0;
26461
26462 default:
26463 for (opt = arm_opts; opt->option != NULL; opt++)
26464 {
26465 if (c == opt->option[0]
26466 && ((arg == NULL && opt->option[1] == 0)
26467 || streq (arg, opt->option + 1)))
26468 {
26469 /* If the option is deprecated, tell the user. */
26470 if (warn_on_deprecated && opt->deprecated != NULL)
26471 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
26472 arg ? arg : "", _(opt->deprecated));
26473
26474 if (opt->var != NULL)
26475 *opt->var = opt->value;
26476
26477 return 1;
26478 }
26479 }
26480
26481 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
26482 {
26483 if (c == fopt->option[0]
26484 && ((arg == NULL && fopt->option[1] == 0)
26485 || streq (arg, fopt->option + 1)))
26486 {
26487 /* If the option is deprecated, tell the user. */
26488 if (warn_on_deprecated && fopt->deprecated != NULL)
26489 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
26490 arg ? arg : "", _(fopt->deprecated));
26491
26492 if (fopt->var != NULL)
26493 *fopt->var = &fopt->value;
26494
26495 return 1;
26496 }
26497 }
26498
26499 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26500 {
26501 /* These options are expected to have an argument. */
26502 if (c == lopt->option[0]
26503 && arg != NULL
26504 && strncmp (arg, lopt->option + 1,
26505 strlen (lopt->option + 1)) == 0)
26506 {
26507 /* If the option is deprecated, tell the user. */
26508 if (warn_on_deprecated && lopt->deprecated != NULL)
26509 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
26510 _(lopt->deprecated));
26511
26512 /* Call the sup-option parser. */
26513 return lopt->func (arg + strlen (lopt->option) - 1);
26514 }
26515 }
26516
26517 return 0;
26518 }
26519
26520 return 1;
26521 }
26522
26523 void
26524 md_show_usage (FILE * fp)
26525 {
26526 struct arm_option_table *opt;
26527 struct arm_long_option_table *lopt;
26528
26529 fprintf (fp, _(" ARM-specific assembler options:\n"));
26530
26531 for (opt = arm_opts; opt->option != NULL; opt++)
26532 if (opt->help != NULL)
26533 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
26534
26535 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26536 if (lopt->help != NULL)
26537 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
26538
26539 #ifdef OPTION_EB
26540 fprintf (fp, _("\
26541 -EB assemble code for a big-endian cpu\n"));
26542 #endif
26543
26544 #ifdef OPTION_EL
26545 fprintf (fp, _("\
26546 -EL assemble code for a little-endian cpu\n"));
26547 #endif
26548
26549 fprintf (fp, _("\
26550 --fix-v4bx Allow BX in ARMv4 code\n"));
26551 }
26552
26553
26554 #ifdef OBJ_ELF
26555 typedef struct
26556 {
26557 int val;
26558 arm_feature_set flags;
26559 } cpu_arch_ver_table;
26560
26561 /* Mapping from CPU features to EABI CPU arch values. As a general rule, table
26562 must be sorted least features first but some reordering is needed, eg. for
26563 Thumb-2 instructions to be detected as coming from ARMv6T2. */
26564 static const cpu_arch_ver_table cpu_arch_ver[] =
26565 {
26566 {1, ARM_ARCH_V4},
26567 {2, ARM_ARCH_V4T},
26568 {3, ARM_ARCH_V5},
26569 {3, ARM_ARCH_V5T},
26570 {4, ARM_ARCH_V5TE},
26571 {5, ARM_ARCH_V5TEJ},
26572 {6, ARM_ARCH_V6},
26573 {9, ARM_ARCH_V6K},
26574 {7, ARM_ARCH_V6Z},
26575 {11, ARM_ARCH_V6M},
26576 {12, ARM_ARCH_V6SM},
26577 {8, ARM_ARCH_V6T2},
26578 {10, ARM_ARCH_V7VE},
26579 {10, ARM_ARCH_V7R},
26580 {10, ARM_ARCH_V7M},
26581 {14, ARM_ARCH_V8A},
26582 {16, ARM_ARCH_V8M_BASE},
26583 {17, ARM_ARCH_V8M_MAIN},
26584 {0, ARM_ARCH_NONE}
26585 };
26586
26587 /* Set an attribute if it has not already been set by the user. */
26588 static void
26589 aeabi_set_attribute_int (int tag, int value)
26590 {
26591 if (tag < 1
26592 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
26593 || !attributes_set_explicitly[tag])
26594 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
26595 }
26596
26597 static void
26598 aeabi_set_attribute_string (int tag, const char *value)
26599 {
26600 if (tag < 1
26601 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
26602 || !attributes_set_explicitly[tag])
26603 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
26604 }
26605
26606 /* Set the public EABI object attributes. */
26607 static void
26608 aeabi_set_public_attributes (void)
26609 {
26610 int arch;
26611 char profile;
26612 int virt_sec = 0;
26613 int fp16_optional = 0;
26614 arm_feature_set flags;
26615 arm_feature_set tmp;
26616 arm_feature_set arm_arch_v8m_base = ARM_ARCH_V8M_BASE;
26617 const cpu_arch_ver_table *p;
26618
26619 /* Autodetection mode, choose the architecture based the instructions
26620 actually used. */
26621 if (no_cpu_selected ())
26622 {
26623 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
26624
26625 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
26626 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
26627
26628 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
26629 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
26630
26631 /* We need to make sure that the attributes do not identify us as v6S-M
26632 when the only v6S-M feature in use is the Operating System
26633 Extensions. */
26634 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
26635 if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
26636 ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
26637
26638 /* Code run during relaxation relies on selected_cpu being set. */
26639 selected_cpu = flags;
26640 }
26641 /* Otherwise, choose the architecture based on the capabilities of the
26642 requested cpu. */
26643 else
26644 flags = selected_cpu;
26645 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
26646
26647 /* Allow the user to override the reported architecture. */
26648 if (object_arch)
26649 {
26650 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
26651 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
26652 }
26653
26654 tmp = flags;
26655 arch = 0;
26656 for (p = cpu_arch_ver; p->val; p++)
26657 {
26658 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
26659 {
26660 arch = p->val;
26661 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
26662 }
26663 }
26664
26665 /* The table lookup above finds the last architecture to contribute
26666 a new feature. Unfortunately, Tag13 is a subset of the union of
26667 v6T2 and v7-M, so it is never seen as contributing a new feature.
26668 We can not search for the last entry which is entirely used,
26669 because if no CPU is specified we build up only those flags
26670 actually used. Perhaps we should separate out the specified
26671 and implicit cases. Avoid taking this path for -march=all by
26672 checking for contradictory v7-A / v7-M features. */
26673 if (arch == TAG_CPU_ARCH_V7
26674 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
26675 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
26676 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
26677 arch = TAG_CPU_ARCH_V7E_M;
26678
26679 ARM_CLEAR_FEATURE (tmp, flags, arm_arch_v8m_base);
26680 if (arch == TAG_CPU_ARCH_V8M_BASE && ARM_CPU_HAS_FEATURE (tmp, arm_arch_any))
26681 arch = TAG_CPU_ARCH_V8M_MAIN;
26682
26683 /* In cpu_arch_ver ARMv8-A is before ARMv8-M for atomics to be detected as
26684 coming from ARMv8-A. However, since ARMv8-A has more instructions than
26685 ARMv8-M, -march=all must be detected as ARMv8-A. */
26686 if (arch == TAG_CPU_ARCH_V8M_MAIN
26687 && ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
26688 arch = TAG_CPU_ARCH_V8;
26689
26690 /* Tag_CPU_name. */
26691 if (selected_cpu_name[0])
26692 {
26693 char *q;
26694
26695 q = selected_cpu_name;
26696 if (strncmp (q, "armv", 4) == 0)
26697 {
26698 int i;
26699
26700 q += 4;
26701 for (i = 0; q[i]; i++)
26702 q[i] = TOUPPER (q[i]);
26703 }
26704 aeabi_set_attribute_string (Tag_CPU_name, q);
26705 }
26706
26707 /* Tag_CPU_arch. */
26708 aeabi_set_attribute_int (Tag_CPU_arch, arch);
26709
26710 /* Tag_CPU_arch_profile. */
26711 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
26712 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
26713 || (ARM_CPU_HAS_FEATURE (flags, arm_ext_atomics)
26714 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only)))
26715 profile = 'A';
26716 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
26717 profile = 'R';
26718 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
26719 profile = 'M';
26720 else
26721 profile = '\0';
26722
26723 if (profile != '\0')
26724 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
26725
26726 /* Tag_DSP_extension. */
26727 if (dyn_mcpu_ext_opt && ARM_CPU_HAS_FEATURE (*dyn_mcpu_ext_opt, arm_ext_dsp))
26728 aeabi_set_attribute_int (Tag_DSP_extension, 1);
26729
26730 /* Tag_ARM_ISA_use. */
26731 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
26732 || arch == 0)
26733 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
26734
26735 /* Tag_THUMB_ISA_use. */
26736 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
26737 || arch == 0)
26738 {
26739 int thumb_isa_use;
26740
26741 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
26742 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
26743 thumb_isa_use = 3;
26744 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
26745 thumb_isa_use = 2;
26746 else
26747 thumb_isa_use = 1;
26748 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
26749 }
26750
26751 /* Tag_VFP_arch. */
26752 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
26753 aeabi_set_attribute_int (Tag_VFP_arch,
26754 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
26755 ? 7 : 8);
26756 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
26757 aeabi_set_attribute_int (Tag_VFP_arch,
26758 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
26759 ? 5 : 6);
26760 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
26761 {
26762 fp16_optional = 1;
26763 aeabi_set_attribute_int (Tag_VFP_arch, 3);
26764 }
26765 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
26766 {
26767 aeabi_set_attribute_int (Tag_VFP_arch, 4);
26768 fp16_optional = 1;
26769 }
26770 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
26771 aeabi_set_attribute_int (Tag_VFP_arch, 2);
26772 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
26773 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
26774 aeabi_set_attribute_int (Tag_VFP_arch, 1);
26775
26776 /* Tag_ABI_HardFP_use. */
26777 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
26778 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
26779 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
26780
26781 /* Tag_WMMX_arch. */
26782 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
26783 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
26784 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
26785 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
26786
26787 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
26788 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
26789 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
26790 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
26791 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
26792 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
26793 {
26794 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
26795 {
26796 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
26797 }
26798 else
26799 {
26800 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
26801 fp16_optional = 1;
26802 }
26803 }
26804
26805 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
26806 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
26807 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
26808
26809 /* Tag_DIV_use.
26810
26811 We set Tag_DIV_use to two when integer divide instructions have been used
26812 in ARM state, or when Thumb integer divide instructions have been used,
26813 but we have no architecture profile set, nor have we any ARM instructions.
26814
26815 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
26816 by the base architecture.
26817
26818 For new architectures we will have to check these tests. */
26819 gas_assert (arch <= TAG_CPU_ARCH_V8
26820 || (arch >= TAG_CPU_ARCH_V8M_BASE
26821 && arch <= TAG_CPU_ARCH_V8M_MAIN));
26822 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
26823 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
26824 aeabi_set_attribute_int (Tag_DIV_use, 0);
26825 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
26826 || (profile == '\0'
26827 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
26828 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
26829 aeabi_set_attribute_int (Tag_DIV_use, 2);
26830
26831 /* Tag_MP_extension_use. */
26832 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
26833 aeabi_set_attribute_int (Tag_MPextension_use, 1);
26834
26835 /* Tag Virtualization_use. */
26836 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
26837 virt_sec |= 1;
26838 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
26839 virt_sec |= 2;
26840 if (virt_sec != 0)
26841 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
26842 }
26843
26844 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
26845 finished and free extension feature bits which will not be used anymore. */
26846 void
26847 arm_md_post_relax (void)
26848 {
26849 aeabi_set_public_attributes ();
26850 XDELETE (dyn_mcpu_ext_opt);
26851 dyn_mcpu_ext_opt = NULL;
26852 XDELETE (dyn_march_ext_opt);
26853 dyn_march_ext_opt = NULL;
26854 }
26855
26856 /* Add the default contents for the .ARM.attributes section. */
26857 void
26858 arm_md_end (void)
26859 {
26860 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
26861 return;
26862
26863 aeabi_set_public_attributes ();
26864 }
26865 #endif /* OBJ_ELF */
26866
26867
26868 /* Parse a .cpu directive. */
26869
26870 static void
26871 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
26872 {
26873 const struct arm_cpu_option_table *opt;
26874 char *name;
26875 char saved_char;
26876
26877 name = input_line_pointer;
26878 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26879 input_line_pointer++;
26880 saved_char = *input_line_pointer;
26881 *input_line_pointer = 0;
26882
26883 /* Skip the first "all" entry. */
26884 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
26885 if (streq (opt->name, name))
26886 {
26887 mcpu_cpu_opt = &opt->value;
26888 if (!dyn_mcpu_ext_opt)
26889 dyn_mcpu_ext_opt = XNEW (arm_feature_set);
26890 *dyn_mcpu_ext_opt = opt->ext;
26891 ARM_MERGE_FEATURE_SETS (selected_cpu, *mcpu_cpu_opt, *dyn_mcpu_ext_opt);
26892 if (opt->canonical_name)
26893 strcpy (selected_cpu_name, opt->canonical_name);
26894 else
26895 {
26896 int i;
26897 for (i = 0; opt->name[i]; i++)
26898 selected_cpu_name[i] = TOUPPER (opt->name[i]);
26899
26900 selected_cpu_name[i] = 0;
26901 }
26902 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26903 if (dyn_mcpu_ext_opt)
26904 ARM_MERGE_FEATURE_SETS (cpu_variant, cpu_variant, *dyn_mcpu_ext_opt);
26905 *input_line_pointer = saved_char;
26906 demand_empty_rest_of_line ();
26907 return;
26908 }
26909 as_bad (_("unknown cpu `%s'"), name);
26910 *input_line_pointer = saved_char;
26911 ignore_rest_of_line ();
26912 }
26913
26914
26915 /* Parse a .arch directive. */
26916
26917 static void
26918 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
26919 {
26920 const struct arm_arch_option_table *opt;
26921 char saved_char;
26922 char *name;
26923
26924 name = input_line_pointer;
26925 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26926 input_line_pointer++;
26927 saved_char = *input_line_pointer;
26928 *input_line_pointer = 0;
26929
26930 /* Skip the first "all" entry. */
26931 for (opt = arm_archs + 1; opt->name != NULL; opt++)
26932 if (streq (opt->name, name))
26933 {
26934 mcpu_cpu_opt = &opt->value;
26935 XDELETE (dyn_mcpu_ext_opt);
26936 dyn_mcpu_ext_opt = NULL;
26937 selected_cpu = *mcpu_cpu_opt;
26938 strcpy (selected_cpu_name, opt->name);
26939 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, *mfpu_opt);
26940 *input_line_pointer = saved_char;
26941 demand_empty_rest_of_line ();
26942 return;
26943 }
26944
26945 as_bad (_("unknown architecture `%s'\n"), name);
26946 *input_line_pointer = saved_char;
26947 ignore_rest_of_line ();
26948 }
26949
26950
26951 /* Parse a .object_arch directive. */
26952
26953 static void
26954 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
26955 {
26956 const struct arm_arch_option_table *opt;
26957 char saved_char;
26958 char *name;
26959
26960 name = input_line_pointer;
26961 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26962 input_line_pointer++;
26963 saved_char = *input_line_pointer;
26964 *input_line_pointer = 0;
26965
26966 /* Skip the first "all" entry. */
26967 for (opt = arm_archs + 1; opt->name != NULL; opt++)
26968 if (streq (opt->name, name))
26969 {
26970 object_arch = &opt->value;
26971 *input_line_pointer = saved_char;
26972 demand_empty_rest_of_line ();
26973 return;
26974 }
26975
26976 as_bad (_("unknown architecture `%s'\n"), name);
26977 *input_line_pointer = saved_char;
26978 ignore_rest_of_line ();
26979 }
26980
26981 /* Parse a .arch_extension directive. */
26982
26983 static void
26984 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
26985 {
26986 const struct arm_option_extension_value_table *opt;
26987 const arm_feature_set arm_any = ARM_ANY;
26988 char saved_char;
26989 char *name;
26990 int adding_value = 1;
26991
26992 name = input_line_pointer;
26993 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26994 input_line_pointer++;
26995 saved_char = *input_line_pointer;
26996 *input_line_pointer = 0;
26997
26998 if (strlen (name) >= 2
26999 && strncmp (name, "no", 2) == 0)
27000 {
27001 adding_value = 0;
27002 name += 2;
27003 }
27004
27005 for (opt = arm_extensions; opt->name != NULL; opt++)
27006 if (streq (opt->name, name))
27007 {
27008 int i, nb_allowed_archs =
27009 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
27010 for (i = 0; i < nb_allowed_archs; i++)
27011 {
27012 /* Empty entry. */
27013 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
27014 continue;
27015 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *mcpu_cpu_opt))
27016 break;
27017 }
27018
27019 if (i == nb_allowed_archs)
27020 {
27021 as_bad (_("architectural extension `%s' is not allowed for the "
27022 "current base architecture"), name);
27023 break;
27024 }
27025
27026 if (!dyn_mcpu_ext_opt)
27027 {
27028 dyn_mcpu_ext_opt = XNEW (arm_feature_set);
27029 *dyn_mcpu_ext_opt = arm_arch_none;
27030 }
27031 if (adding_value)
27032 ARM_MERGE_FEATURE_SETS (*dyn_mcpu_ext_opt, *dyn_mcpu_ext_opt,
27033 opt->merge_value);
27034 else
27035 ARM_CLEAR_FEATURE (*dyn_mcpu_ext_opt, *dyn_mcpu_ext_opt,
27036 opt->clear_value);
27037
27038 ARM_MERGE_FEATURE_SETS (selected_cpu, *mcpu_cpu_opt, *dyn_mcpu_ext_opt);
27039 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, *mfpu_opt);
27040 *input_line_pointer = saved_char;
27041 demand_empty_rest_of_line ();
27042 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
27043 on this return so that duplicate extensions (extensions with the
27044 same name as a previous extension in the list) are not considered
27045 for command-line parsing. */
27046 return;
27047 }
27048
27049 if (opt->name == NULL)
27050 as_bad (_("unknown architecture extension `%s'\n"), name);
27051
27052 *input_line_pointer = saved_char;
27053 ignore_rest_of_line ();
27054 }
27055
27056 /* Parse a .fpu directive. */
27057
27058 static void
27059 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
27060 {
27061 const struct arm_option_fpu_value_table *opt;
27062 char saved_char;
27063 char *name;
27064
27065 name = input_line_pointer;
27066 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27067 input_line_pointer++;
27068 saved_char = *input_line_pointer;
27069 *input_line_pointer = 0;
27070
27071 for (opt = arm_fpus; opt->name != NULL; opt++)
27072 if (streq (opt->name, name))
27073 {
27074 mfpu_opt = &opt->value;
27075 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
27076 if (dyn_mcpu_ext_opt)
27077 ARM_MERGE_FEATURE_SETS (cpu_variant, cpu_variant, *dyn_mcpu_ext_opt);
27078 *input_line_pointer = saved_char;
27079 demand_empty_rest_of_line ();
27080 return;
27081 }
27082
27083 as_bad (_("unknown floating point format `%s'\n"), name);
27084 *input_line_pointer = saved_char;
27085 ignore_rest_of_line ();
27086 }
27087
27088 /* Copy symbol information. */
27089
27090 void
27091 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
27092 {
27093 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
27094 }
27095
27096 #ifdef OBJ_ELF
27097 /* Given a symbolic attribute NAME, return the proper integer value.
27098 Returns -1 if the attribute is not known. */
27099
27100 int
27101 arm_convert_symbolic_attribute (const char *name)
27102 {
27103 static const struct
27104 {
27105 const char * name;
27106 const int tag;
27107 }
27108 attribute_table[] =
27109 {
27110 /* When you modify this table you should
27111 also modify the list in doc/c-arm.texi. */
27112 #define T(tag) {#tag, tag}
27113 T (Tag_CPU_raw_name),
27114 T (Tag_CPU_name),
27115 T (Tag_CPU_arch),
27116 T (Tag_CPU_arch_profile),
27117 T (Tag_ARM_ISA_use),
27118 T (Tag_THUMB_ISA_use),
27119 T (Tag_FP_arch),
27120 T (Tag_VFP_arch),
27121 T (Tag_WMMX_arch),
27122 T (Tag_Advanced_SIMD_arch),
27123 T (Tag_PCS_config),
27124 T (Tag_ABI_PCS_R9_use),
27125 T (Tag_ABI_PCS_RW_data),
27126 T (Tag_ABI_PCS_RO_data),
27127 T (Tag_ABI_PCS_GOT_use),
27128 T (Tag_ABI_PCS_wchar_t),
27129 T (Tag_ABI_FP_rounding),
27130 T (Tag_ABI_FP_denormal),
27131 T (Tag_ABI_FP_exceptions),
27132 T (Tag_ABI_FP_user_exceptions),
27133 T (Tag_ABI_FP_number_model),
27134 T (Tag_ABI_align_needed),
27135 T (Tag_ABI_align8_needed),
27136 T (Tag_ABI_align_preserved),
27137 T (Tag_ABI_align8_preserved),
27138 T (Tag_ABI_enum_size),
27139 T (Tag_ABI_HardFP_use),
27140 T (Tag_ABI_VFP_args),
27141 T (Tag_ABI_WMMX_args),
27142 T (Tag_ABI_optimization_goals),
27143 T (Tag_ABI_FP_optimization_goals),
27144 T (Tag_compatibility),
27145 T (Tag_CPU_unaligned_access),
27146 T (Tag_FP_HP_extension),
27147 T (Tag_VFP_HP_extension),
27148 T (Tag_ABI_FP_16bit_format),
27149 T (Tag_MPextension_use),
27150 T (Tag_DIV_use),
27151 T (Tag_nodefaults),
27152 T (Tag_also_compatible_with),
27153 T (Tag_conformance),
27154 T (Tag_T2EE_use),
27155 T (Tag_Virtualization_use),
27156 T (Tag_DSP_extension),
27157 /* We deliberately do not include Tag_MPextension_use_legacy. */
27158 #undef T
27159 };
27160 unsigned int i;
27161
27162 if (name == NULL)
27163 return -1;
27164
27165 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
27166 if (streq (name, attribute_table[i].name))
27167 return attribute_table[i].tag;
27168
27169 return -1;
27170 }
27171
27172
27173 /* Apply sym value for relocations only in the case that they are for
27174 local symbols in the same segment as the fixup and you have the
27175 respective architectural feature for blx and simple switches. */
27176 int
27177 arm_apply_sym_value (struct fix * fixP, segT this_seg)
27178 {
27179 if (fixP->fx_addsy
27180 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
27181 /* PR 17444: If the local symbol is in a different section then a reloc
27182 will always be generated for it, so applying the symbol value now
27183 will result in a double offset being stored in the relocation. */
27184 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
27185 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
27186 {
27187 switch (fixP->fx_r_type)
27188 {
27189 case BFD_RELOC_ARM_PCREL_BLX:
27190 case BFD_RELOC_THUMB_PCREL_BRANCH23:
27191 if (ARM_IS_FUNC (fixP->fx_addsy))
27192 return 1;
27193 break;
27194
27195 case BFD_RELOC_ARM_PCREL_CALL:
27196 case BFD_RELOC_THUMB_PCREL_BLX:
27197 if (THUMB_IS_FUNC (fixP->fx_addsy))
27198 return 1;
27199 break;
27200
27201 default:
27202 break;
27203 }
27204
27205 }
27206 return 0;
27207 }
27208 #endif /* OBJ_ELF */
This page took 0.85734 seconds and 5 git commands to generate.