Add support for ARM Cortex-M33 processor
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2016 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 #endif /* OBJ_ELF */
79
80 /* Results from operand parsing worker functions. */
81
82 typedef enum
83 {
84 PARSE_OPERAND_SUCCESS,
85 PARSE_OPERAND_FAIL,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result;
88
89 enum arm_float_abi
90 {
91 ARM_FLOAT_ABI_HARD,
92 ARM_FLOAT_ABI_SOFTFP,
93 ARM_FLOAT_ABI_SOFT
94 };
95
96 /* Types of processor to assemble for. */
97 #ifndef CPU_DEFAULT
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
101
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
104 #endif
105
106 #ifndef FPU_DEFAULT
107 # ifdef TE_LINUX
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
110 # ifdef OBJ_ELF
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
112 # else
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
115 # endif
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
118 # else
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
121 # endif
122 #endif /* ifndef FPU_DEFAULT */
123
124 #define streq(a, b) (strcmp (a, b) == 0)
125
126 static arm_feature_set cpu_variant;
127 static arm_feature_set arm_arch_used;
128 static arm_feature_set thumb_arch_used;
129
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26 = FALSE;
132 static int atpcs = FALSE;
133 static int support_interwork = FALSE;
134 static int uses_apcs_float = FALSE;
135 static int pic_code = FALSE;
136 static int fix_v4bx = FALSE;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated = TRUE;
139
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax = FALSE;
142
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
145 assembly flags. */
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
148
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
154 static const arm_feature_set *object_arch = NULL;
155
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default = FPU_DEFAULT;
158 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
159 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
161 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
162 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164 #ifdef OBJ_ELF
165 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
166 #endif
167 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
168
169 #ifdef CPU_DEFAULT
170 static const arm_feature_set cpu_default = CPU_DEFAULT;
171 #endif
172
173 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
174 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
175 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
176 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
177 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
178 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
179 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
180 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
181 static const arm_feature_set arm_ext_v4t_5 =
182 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
183 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
184 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
185 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
186 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
187 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
188 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
189 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
190 static const arm_feature_set arm_ext_v6m = ARM_FEATURE_CORE_LOW (ARM_EXT_V6M);
191 static const arm_feature_set arm_ext_v6_notm =
192 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
193 static const arm_feature_set arm_ext_v6_dsp =
194 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
195 static const arm_feature_set arm_ext_barrier =
196 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
197 static const arm_feature_set arm_ext_msr =
198 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
199 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
200 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
201 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
202 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
203 #ifdef OBJ_ELF
204 static const arm_feature_set arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
205 #endif
206 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
207 static const arm_feature_set arm_ext_m =
208 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M,
209 ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
210 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
211 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
212 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
213 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
214 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
215 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
216 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
217 static const arm_feature_set arm_ext_v8m_main =
218 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
219 /* Instructions in ARMv8-M only found in M profile architectures. */
220 static const arm_feature_set arm_ext_v8m_m_only =
221 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
222 static const arm_feature_set arm_ext_v6t2_v8m =
223 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
224 /* Instructions shared between ARMv8-A and ARMv8-M. */
225 static const arm_feature_set arm_ext_atomics =
226 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
227 #ifdef OBJ_ELF
228 /* DSP instructions Tag_DSP_extension refers to. */
229 static const arm_feature_set arm_ext_dsp =
230 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
231 #endif
232 static const arm_feature_set arm_ext_ras =
233 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
234 /* FP16 instructions. */
235 static const arm_feature_set arm_ext_fp16 =
236 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
237
238 static const arm_feature_set arm_arch_any = ARM_ANY;
239 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
240 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
241 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
242 #ifdef OBJ_ELF
243 static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
244 #endif
245
246 static const arm_feature_set arm_cext_iwmmxt2 =
247 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
248 static const arm_feature_set arm_cext_iwmmxt =
249 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
250 static const arm_feature_set arm_cext_xscale =
251 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
252 static const arm_feature_set arm_cext_maverick =
253 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
254 static const arm_feature_set fpu_fpa_ext_v1 =
255 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
256 static const arm_feature_set fpu_fpa_ext_v2 =
257 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
258 static const arm_feature_set fpu_vfp_ext_v1xd =
259 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
260 static const arm_feature_set fpu_vfp_ext_v1 =
261 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
262 static const arm_feature_set fpu_vfp_ext_v2 =
263 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
264 static const arm_feature_set fpu_vfp_ext_v3xd =
265 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
266 static const arm_feature_set fpu_vfp_ext_v3 =
267 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
268 static const arm_feature_set fpu_vfp_ext_d32 =
269 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
270 static const arm_feature_set fpu_neon_ext_v1 =
271 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
272 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
273 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
274 #ifdef OBJ_ELF
275 static const arm_feature_set fpu_vfp_fp16 =
276 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
277 static const arm_feature_set fpu_neon_ext_fma =
278 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
279 #endif
280 static const arm_feature_set fpu_vfp_ext_fma =
281 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
282 static const arm_feature_set fpu_vfp_ext_armv8 =
283 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
284 static const arm_feature_set fpu_vfp_ext_armv8xd =
285 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
286 static const arm_feature_set fpu_neon_ext_armv8 =
287 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
288 static const arm_feature_set fpu_crypto_ext_armv8 =
289 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
290 static const arm_feature_set crc_ext_armv8 =
291 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
292 static const arm_feature_set fpu_neon_ext_v8_1 =
293 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
294
295 static int mfloat_abi_opt = -1;
296 /* Record user cpu selection for object attributes. */
297 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
298 /* Must be long enough to hold any of the names in arm_cpus. */
299 static char selected_cpu_name[20];
300
301 extern FLONUM_TYPE generic_floating_point_number;
302
303 /* Return if no cpu was selected on command-line. */
304 static bfd_boolean
305 no_cpu_selected (void)
306 {
307 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
308 }
309
310 #ifdef OBJ_ELF
311 # ifdef EABI_DEFAULT
312 static int meabi_flags = EABI_DEFAULT;
313 # else
314 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
315 # endif
316
317 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
318
319 bfd_boolean
320 arm_is_eabi (void)
321 {
322 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
323 }
324 #endif
325
326 #ifdef OBJ_ELF
327 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
328 symbolS * GOT_symbol;
329 #endif
330
331 /* 0: assemble for ARM,
332 1: assemble for Thumb,
333 2: assemble for Thumb even though target CPU does not support thumb
334 instructions. */
335 static int thumb_mode = 0;
336 /* A value distinct from the possible values for thumb_mode that we
337 can use to record whether thumb_mode has been copied into the
338 tc_frag_data field of a frag. */
339 #define MODE_RECORDED (1 << 4)
340
341 /* Specifies the intrinsic IT insn behavior mode. */
342 enum implicit_it_mode
343 {
344 IMPLICIT_IT_MODE_NEVER = 0x00,
345 IMPLICIT_IT_MODE_ARM = 0x01,
346 IMPLICIT_IT_MODE_THUMB = 0x02,
347 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
348 };
349 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
350
351 /* If unified_syntax is true, we are processing the new unified
352 ARM/Thumb syntax. Important differences from the old ARM mode:
353
354 - Immediate operands do not require a # prefix.
355 - Conditional affixes always appear at the end of the
356 instruction. (For backward compatibility, those instructions
357 that formerly had them in the middle, continue to accept them
358 there.)
359 - The IT instruction may appear, and if it does is validated
360 against subsequent conditional affixes. It does not generate
361 machine code.
362
363 Important differences from the old Thumb mode:
364
365 - Immediate operands do not require a # prefix.
366 - Most of the V6T2 instructions are only available in unified mode.
367 - The .N and .W suffixes are recognized and honored (it is an error
368 if they cannot be honored).
369 - All instructions set the flags if and only if they have an 's' affix.
370 - Conditional affixes may be used. They are validated against
371 preceding IT instructions. Unlike ARM mode, you cannot use a
372 conditional affix except in the scope of an IT instruction. */
373
374 static bfd_boolean unified_syntax = FALSE;
375
376 /* An immediate operand can start with #, and ld*, st*, pld operands
377 can contain [ and ]. We need to tell APP not to elide whitespace
378 before a [, which can appear as the first operand for pld.
379 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
380 const char arm_symbol_chars[] = "#[]{}";
381
382 enum neon_el_type
383 {
384 NT_invtype,
385 NT_untyped,
386 NT_integer,
387 NT_float,
388 NT_poly,
389 NT_signed,
390 NT_unsigned
391 };
392
393 struct neon_type_el
394 {
395 enum neon_el_type type;
396 unsigned size;
397 };
398
399 #define NEON_MAX_TYPE_ELS 4
400
401 struct neon_type
402 {
403 struct neon_type_el el[NEON_MAX_TYPE_ELS];
404 unsigned elems;
405 };
406
407 enum it_instruction_type
408 {
409 OUTSIDE_IT_INSN,
410 INSIDE_IT_INSN,
411 INSIDE_IT_LAST_INSN,
412 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
413 if inside, should be the last one. */
414 NEUTRAL_IT_INSN, /* This could be either inside or outside,
415 i.e. BKPT and NOP. */
416 IT_INSN /* The IT insn has been parsed. */
417 };
418
419 /* The maximum number of operands we need. */
420 #define ARM_IT_MAX_OPERANDS 6
421
422 struct arm_it
423 {
424 const char * error;
425 unsigned long instruction;
426 int size;
427 int size_req;
428 int cond;
429 /* "uncond_value" is set to the value in place of the conditional field in
430 unconditional versions of the instruction, or -1 if nothing is
431 appropriate. */
432 int uncond_value;
433 struct neon_type vectype;
434 /* This does not indicate an actual NEON instruction, only that
435 the mnemonic accepts neon-style type suffixes. */
436 int is_neon;
437 /* Set to the opcode if the instruction needs relaxation.
438 Zero if the instruction is not relaxed. */
439 unsigned long relax;
440 struct
441 {
442 bfd_reloc_code_real_type type;
443 expressionS exp;
444 int pc_rel;
445 } reloc;
446
447 enum it_instruction_type it_insn_type;
448
449 struct
450 {
451 unsigned reg;
452 signed int imm;
453 struct neon_type_el vectype;
454 unsigned present : 1; /* Operand present. */
455 unsigned isreg : 1; /* Operand was a register. */
456 unsigned immisreg : 1; /* .imm field is a second register. */
457 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
458 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
459 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
460 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
461 instructions. This allows us to disambiguate ARM <-> vector insns. */
462 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
463 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
464 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
465 unsigned issingle : 1; /* Operand is VFP single-precision register. */
466 unsigned hasreloc : 1; /* Operand has relocation suffix. */
467 unsigned writeback : 1; /* Operand has trailing ! */
468 unsigned preind : 1; /* Preindexed address. */
469 unsigned postind : 1; /* Postindexed address. */
470 unsigned negative : 1; /* Index register was negated. */
471 unsigned shifted : 1; /* Shift applied to operation. */
472 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
473 } operands[ARM_IT_MAX_OPERANDS];
474 };
475
476 static struct arm_it inst;
477
478 #define NUM_FLOAT_VALS 8
479
480 const char * fp_const[] =
481 {
482 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
483 };
484
485 /* Number of littlenums required to hold an extended precision number. */
486 #define MAX_LITTLENUMS 6
487
488 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
489
490 #define FAIL (-1)
491 #define SUCCESS (0)
492
493 #define SUFF_S 1
494 #define SUFF_D 2
495 #define SUFF_E 3
496 #define SUFF_P 4
497
498 #define CP_T_X 0x00008000
499 #define CP_T_Y 0x00400000
500
501 #define CONDS_BIT 0x00100000
502 #define LOAD_BIT 0x00100000
503
504 #define DOUBLE_LOAD_FLAG 0x00000001
505
506 struct asm_cond
507 {
508 const char * template_name;
509 unsigned long value;
510 };
511
512 #define COND_ALWAYS 0xE
513
514 struct asm_psr
515 {
516 const char * template_name;
517 unsigned long field;
518 };
519
520 struct asm_barrier_opt
521 {
522 const char * template_name;
523 unsigned long value;
524 const arm_feature_set arch;
525 };
526
527 /* The bit that distinguishes CPSR and SPSR. */
528 #define SPSR_BIT (1 << 22)
529
530 /* The individual PSR flag bits. */
531 #define PSR_c (1 << 16)
532 #define PSR_x (1 << 17)
533 #define PSR_s (1 << 18)
534 #define PSR_f (1 << 19)
535
536 struct reloc_entry
537 {
538 const char * name;
539 bfd_reloc_code_real_type reloc;
540 };
541
542 enum vfp_reg_pos
543 {
544 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
545 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
546 };
547
548 enum vfp_ldstm_type
549 {
550 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
551 };
552
553 /* Bits for DEFINED field in neon_typed_alias. */
554 #define NTA_HASTYPE 1
555 #define NTA_HASINDEX 2
556
557 struct neon_typed_alias
558 {
559 unsigned char defined;
560 unsigned char index;
561 struct neon_type_el eltype;
562 };
563
564 /* ARM register categories. This includes coprocessor numbers and various
565 architecture extensions' registers. */
566 enum arm_reg_type
567 {
568 REG_TYPE_RN,
569 REG_TYPE_CP,
570 REG_TYPE_CN,
571 REG_TYPE_FN,
572 REG_TYPE_VFS,
573 REG_TYPE_VFD,
574 REG_TYPE_NQ,
575 REG_TYPE_VFSD,
576 REG_TYPE_NDQ,
577 REG_TYPE_NSDQ,
578 REG_TYPE_VFC,
579 REG_TYPE_MVF,
580 REG_TYPE_MVD,
581 REG_TYPE_MVFX,
582 REG_TYPE_MVDX,
583 REG_TYPE_MVAX,
584 REG_TYPE_DSPSC,
585 REG_TYPE_MMXWR,
586 REG_TYPE_MMXWC,
587 REG_TYPE_MMXWCG,
588 REG_TYPE_XSCALE,
589 REG_TYPE_RNB
590 };
591
592 /* Structure for a hash table entry for a register.
593 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
594 information which states whether a vector type or index is specified (for a
595 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
596 struct reg_entry
597 {
598 const char * name;
599 unsigned int number;
600 unsigned char type;
601 unsigned char builtin;
602 struct neon_typed_alias * neon;
603 };
604
605 /* Diagnostics used when we don't get a register of the expected type. */
606 const char * const reg_expected_msgs[] =
607 {
608 N_("ARM register expected"),
609 N_("bad or missing co-processor number"),
610 N_("co-processor register expected"),
611 N_("FPA register expected"),
612 N_("VFP single precision register expected"),
613 N_("VFP/Neon double precision register expected"),
614 N_("Neon quad precision register expected"),
615 N_("VFP single or double precision register expected"),
616 N_("Neon double or quad precision register expected"),
617 N_("VFP single, double or Neon quad precision register expected"),
618 N_("VFP system register expected"),
619 N_("Maverick MVF register expected"),
620 N_("Maverick MVD register expected"),
621 N_("Maverick MVFX register expected"),
622 N_("Maverick MVDX register expected"),
623 N_("Maverick MVAX register expected"),
624 N_("Maverick DSPSC register expected"),
625 N_("iWMMXt data register expected"),
626 N_("iWMMXt control register expected"),
627 N_("iWMMXt scalar register expected"),
628 N_("XScale accumulator register expected"),
629 };
630
631 /* Some well known registers that we refer to directly elsewhere. */
632 #define REG_R12 12
633 #define REG_SP 13
634 #define REG_LR 14
635 #define REG_PC 15
636
637 /* ARM instructions take 4bytes in the object file, Thumb instructions
638 take 2: */
639 #define INSN_SIZE 4
640
641 struct asm_opcode
642 {
643 /* Basic string to match. */
644 const char * template_name;
645
646 /* Parameters to instruction. */
647 unsigned int operands[8];
648
649 /* Conditional tag - see opcode_lookup. */
650 unsigned int tag : 4;
651
652 /* Basic instruction code. */
653 unsigned int avalue : 28;
654
655 /* Thumb-format instruction code. */
656 unsigned int tvalue;
657
658 /* Which architecture variant provides this instruction. */
659 const arm_feature_set * avariant;
660 const arm_feature_set * tvariant;
661
662 /* Function to call to encode instruction in ARM format. */
663 void (* aencode) (void);
664
665 /* Function to call to encode instruction in Thumb format. */
666 void (* tencode) (void);
667 };
668
669 /* Defines for various bits that we will want to toggle. */
670 #define INST_IMMEDIATE 0x02000000
671 #define OFFSET_REG 0x02000000
672 #define HWOFFSET_IMM 0x00400000
673 #define SHIFT_BY_REG 0x00000010
674 #define PRE_INDEX 0x01000000
675 #define INDEX_UP 0x00800000
676 #define WRITE_BACK 0x00200000
677 #define LDM_TYPE_2_OR_3 0x00400000
678 #define CPSI_MMOD 0x00020000
679
680 #define LITERAL_MASK 0xf000f000
681 #define OPCODE_MASK 0xfe1fffff
682 #define V4_STR_BIT 0x00000020
683 #define VLDR_VMOV_SAME 0x0040f000
684
685 #define T2_SUBS_PC_LR 0xf3de8f00
686
687 #define DATA_OP_SHIFT 21
688 #define SBIT_SHIFT 20
689
690 #define T2_OPCODE_MASK 0xfe1fffff
691 #define T2_DATA_OP_SHIFT 21
692 #define T2_SBIT_SHIFT 20
693
694 #define A_COND_MASK 0xf0000000
695 #define A_PUSH_POP_OP_MASK 0x0fff0000
696
697 /* Opcodes for pushing/poping registers to/from the stack. */
698 #define A1_OPCODE_PUSH 0x092d0000
699 #define A2_OPCODE_PUSH 0x052d0004
700 #define A2_OPCODE_POP 0x049d0004
701
702 /* Codes to distinguish the arithmetic instructions. */
703 #define OPCODE_AND 0
704 #define OPCODE_EOR 1
705 #define OPCODE_SUB 2
706 #define OPCODE_RSB 3
707 #define OPCODE_ADD 4
708 #define OPCODE_ADC 5
709 #define OPCODE_SBC 6
710 #define OPCODE_RSC 7
711 #define OPCODE_TST 8
712 #define OPCODE_TEQ 9
713 #define OPCODE_CMP 10
714 #define OPCODE_CMN 11
715 #define OPCODE_ORR 12
716 #define OPCODE_MOV 13
717 #define OPCODE_BIC 14
718 #define OPCODE_MVN 15
719
720 #define T2_OPCODE_AND 0
721 #define T2_OPCODE_BIC 1
722 #define T2_OPCODE_ORR 2
723 #define T2_OPCODE_ORN 3
724 #define T2_OPCODE_EOR 4
725 #define T2_OPCODE_ADD 8
726 #define T2_OPCODE_ADC 10
727 #define T2_OPCODE_SBC 11
728 #define T2_OPCODE_SUB 13
729 #define T2_OPCODE_RSB 14
730
731 #define T_OPCODE_MUL 0x4340
732 #define T_OPCODE_TST 0x4200
733 #define T_OPCODE_CMN 0x42c0
734 #define T_OPCODE_NEG 0x4240
735 #define T_OPCODE_MVN 0x43c0
736
737 #define T_OPCODE_ADD_R3 0x1800
738 #define T_OPCODE_SUB_R3 0x1a00
739 #define T_OPCODE_ADD_HI 0x4400
740 #define T_OPCODE_ADD_ST 0xb000
741 #define T_OPCODE_SUB_ST 0xb080
742 #define T_OPCODE_ADD_SP 0xa800
743 #define T_OPCODE_ADD_PC 0xa000
744 #define T_OPCODE_ADD_I8 0x3000
745 #define T_OPCODE_SUB_I8 0x3800
746 #define T_OPCODE_ADD_I3 0x1c00
747 #define T_OPCODE_SUB_I3 0x1e00
748
749 #define T_OPCODE_ASR_R 0x4100
750 #define T_OPCODE_LSL_R 0x4080
751 #define T_OPCODE_LSR_R 0x40c0
752 #define T_OPCODE_ROR_R 0x41c0
753 #define T_OPCODE_ASR_I 0x1000
754 #define T_OPCODE_LSL_I 0x0000
755 #define T_OPCODE_LSR_I 0x0800
756
757 #define T_OPCODE_MOV_I8 0x2000
758 #define T_OPCODE_CMP_I8 0x2800
759 #define T_OPCODE_CMP_LR 0x4280
760 #define T_OPCODE_MOV_HR 0x4600
761 #define T_OPCODE_CMP_HR 0x4500
762
763 #define T_OPCODE_LDR_PC 0x4800
764 #define T_OPCODE_LDR_SP 0x9800
765 #define T_OPCODE_STR_SP 0x9000
766 #define T_OPCODE_LDR_IW 0x6800
767 #define T_OPCODE_STR_IW 0x6000
768 #define T_OPCODE_LDR_IH 0x8800
769 #define T_OPCODE_STR_IH 0x8000
770 #define T_OPCODE_LDR_IB 0x7800
771 #define T_OPCODE_STR_IB 0x7000
772 #define T_OPCODE_LDR_RW 0x5800
773 #define T_OPCODE_STR_RW 0x5000
774 #define T_OPCODE_LDR_RH 0x5a00
775 #define T_OPCODE_STR_RH 0x5200
776 #define T_OPCODE_LDR_RB 0x5c00
777 #define T_OPCODE_STR_RB 0x5400
778
779 #define T_OPCODE_PUSH 0xb400
780 #define T_OPCODE_POP 0xbc00
781
782 #define T_OPCODE_BRANCH 0xe000
783
784 #define THUMB_SIZE 2 /* Size of thumb instruction. */
785 #define THUMB_PP_PC_LR 0x0100
786 #define THUMB_LOAD_BIT 0x0800
787 #define THUMB2_LOAD_BIT 0x00100000
788
789 #define BAD_ARGS _("bad arguments to instruction")
790 #define BAD_SP _("r13 not allowed here")
791 #define BAD_PC _("r15 not allowed here")
792 #define BAD_COND _("instruction cannot be conditional")
793 #define BAD_OVERLAP _("registers may not be the same")
794 #define BAD_HIREG _("lo register required")
795 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
796 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
797 #define BAD_BRANCH _("branch must be last instruction in IT block")
798 #define BAD_NOT_IT _("instruction not allowed in IT block")
799 #define BAD_FPU _("selected FPU does not support instruction")
800 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
801 #define BAD_IT_COND _("incorrect condition in IT block")
802 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
803 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
804 #define BAD_PC_ADDRESSING \
805 _("cannot use register index with PC-relative addressing")
806 #define BAD_PC_WRITEBACK \
807 _("cannot use writeback with PC-relative addressing")
808 #define BAD_RANGE _("branch out of range")
809 #define BAD_FP16 _("selected processor does not support fp16 instruction")
810 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
811 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
812
813 static struct hash_control * arm_ops_hsh;
814 static struct hash_control * arm_cond_hsh;
815 static struct hash_control * arm_shift_hsh;
816 static struct hash_control * arm_psr_hsh;
817 static struct hash_control * arm_v7m_psr_hsh;
818 static struct hash_control * arm_reg_hsh;
819 static struct hash_control * arm_reloc_hsh;
820 static struct hash_control * arm_barrier_opt_hsh;
821
822 /* Stuff needed to resolve the label ambiguity
823 As:
824 ...
825 label: <insn>
826 may differ from:
827 ...
828 label:
829 <insn> */
830
831 symbolS * last_label_seen;
832 static int label_is_thumb_function_name = FALSE;
833
834 /* Literal pool structure. Held on a per-section
835 and per-sub-section basis. */
836
837 #define MAX_LITERAL_POOL_SIZE 1024
838 typedef struct literal_pool
839 {
840 expressionS literals [MAX_LITERAL_POOL_SIZE];
841 unsigned int next_free_entry;
842 unsigned int id;
843 symbolS * symbol;
844 segT section;
845 subsegT sub_section;
846 #ifdef OBJ_ELF
847 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
848 #endif
849 struct literal_pool * next;
850 unsigned int alignment;
851 } literal_pool;
852
853 /* Pointer to a linked list of literal pools. */
854 literal_pool * list_of_pools = NULL;
855
856 typedef enum asmfunc_states
857 {
858 OUTSIDE_ASMFUNC,
859 WAITING_ASMFUNC_NAME,
860 WAITING_ENDASMFUNC
861 } asmfunc_states;
862
863 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
864
865 #ifdef OBJ_ELF
866 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
867 #else
868 static struct current_it now_it;
869 #endif
870
871 static inline int
872 now_it_compatible (int cond)
873 {
874 return (cond & ~1) == (now_it.cc & ~1);
875 }
876
877 static inline int
878 conditional_insn (void)
879 {
880 return inst.cond != COND_ALWAYS;
881 }
882
883 static int in_it_block (void);
884
885 static int handle_it_state (void);
886
887 static void force_automatic_it_block_close (void);
888
889 static void it_fsm_post_encode (void);
890
891 #define set_it_insn_type(type) \
892 do \
893 { \
894 inst.it_insn_type = type; \
895 if (handle_it_state () == FAIL) \
896 return; \
897 } \
898 while (0)
899
900 #define set_it_insn_type_nonvoid(type, failret) \
901 do \
902 { \
903 inst.it_insn_type = type; \
904 if (handle_it_state () == FAIL) \
905 return failret; \
906 } \
907 while(0)
908
909 #define set_it_insn_type_last() \
910 do \
911 { \
912 if (inst.cond == COND_ALWAYS) \
913 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
914 else \
915 set_it_insn_type (INSIDE_IT_LAST_INSN); \
916 } \
917 while (0)
918
919 /* Pure syntax. */
920
921 /* This array holds the chars that always start a comment. If the
922 pre-processor is disabled, these aren't very useful. */
923 char arm_comment_chars[] = "@";
924
925 /* This array holds the chars that only start a comment at the beginning of
926 a line. If the line seems to have the form '# 123 filename'
927 .line and .file directives will appear in the pre-processed output. */
928 /* Note that input_file.c hand checks for '#' at the beginning of the
929 first line of the input file. This is because the compiler outputs
930 #NO_APP at the beginning of its output. */
931 /* Also note that comments like this one will always work. */
932 const char line_comment_chars[] = "#";
933
934 char arm_line_separator_chars[] = ";";
935
936 /* Chars that can be used to separate mant
937 from exp in floating point numbers. */
938 const char EXP_CHARS[] = "eE";
939
940 /* Chars that mean this number is a floating point constant. */
941 /* As in 0f12.456 */
942 /* or 0d1.2345e12 */
943
944 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
945
946 /* Prefix characters that indicate the start of an immediate
947 value. */
948 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
949
950 /* Separator character handling. */
951
952 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
953
954 static inline int
955 skip_past_char (char ** str, char c)
956 {
957 /* PR gas/14987: Allow for whitespace before the expected character. */
958 skip_whitespace (*str);
959
960 if (**str == c)
961 {
962 (*str)++;
963 return SUCCESS;
964 }
965 else
966 return FAIL;
967 }
968
969 #define skip_past_comma(str) skip_past_char (str, ',')
970
971 /* Arithmetic expressions (possibly involving symbols). */
972
973 /* Return TRUE if anything in the expression is a bignum. */
974
975 static int
976 walk_no_bignums (symbolS * sp)
977 {
978 if (symbol_get_value_expression (sp)->X_op == O_big)
979 return 1;
980
981 if (symbol_get_value_expression (sp)->X_add_symbol)
982 {
983 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
984 || (symbol_get_value_expression (sp)->X_op_symbol
985 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
986 }
987
988 return 0;
989 }
990
991 static int in_my_get_expression = 0;
992
993 /* Third argument to my_get_expression. */
994 #define GE_NO_PREFIX 0
995 #define GE_IMM_PREFIX 1
996 #define GE_OPT_PREFIX 2
997 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
998 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
999 #define GE_OPT_PREFIX_BIG 3
1000
1001 static int
1002 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1003 {
1004 char * save_in;
1005 segT seg;
1006
1007 /* In unified syntax, all prefixes are optional. */
1008 if (unified_syntax)
1009 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1010 : GE_OPT_PREFIX;
1011
1012 switch (prefix_mode)
1013 {
1014 case GE_NO_PREFIX: break;
1015 case GE_IMM_PREFIX:
1016 if (!is_immediate_prefix (**str))
1017 {
1018 inst.error = _("immediate expression requires a # prefix");
1019 return FAIL;
1020 }
1021 (*str)++;
1022 break;
1023 case GE_OPT_PREFIX:
1024 case GE_OPT_PREFIX_BIG:
1025 if (is_immediate_prefix (**str))
1026 (*str)++;
1027 break;
1028 default: abort ();
1029 }
1030
1031 memset (ep, 0, sizeof (expressionS));
1032
1033 save_in = input_line_pointer;
1034 input_line_pointer = *str;
1035 in_my_get_expression = 1;
1036 seg = expression (ep);
1037 in_my_get_expression = 0;
1038
1039 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1040 {
1041 /* We found a bad or missing expression in md_operand(). */
1042 *str = input_line_pointer;
1043 input_line_pointer = save_in;
1044 if (inst.error == NULL)
1045 inst.error = (ep->X_op == O_absent
1046 ? _("missing expression") :_("bad expression"));
1047 return 1;
1048 }
1049
1050 #ifdef OBJ_AOUT
1051 if (seg != absolute_section
1052 && seg != text_section
1053 && seg != data_section
1054 && seg != bss_section
1055 && seg != undefined_section)
1056 {
1057 inst.error = _("bad segment");
1058 *str = input_line_pointer;
1059 input_line_pointer = save_in;
1060 return 1;
1061 }
1062 #else
1063 (void) seg;
1064 #endif
1065
1066 /* Get rid of any bignums now, so that we don't generate an error for which
1067 we can't establish a line number later on. Big numbers are never valid
1068 in instructions, which is where this routine is always called. */
1069 if (prefix_mode != GE_OPT_PREFIX_BIG
1070 && (ep->X_op == O_big
1071 || (ep->X_add_symbol
1072 && (walk_no_bignums (ep->X_add_symbol)
1073 || (ep->X_op_symbol
1074 && walk_no_bignums (ep->X_op_symbol))))))
1075 {
1076 inst.error = _("invalid constant");
1077 *str = input_line_pointer;
1078 input_line_pointer = save_in;
1079 return 1;
1080 }
1081
1082 *str = input_line_pointer;
1083 input_line_pointer = save_in;
1084 return 0;
1085 }
1086
1087 /* Turn a string in input_line_pointer into a floating point constant
1088 of type TYPE, and store the appropriate bytes in *LITP. The number
1089 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1090 returned, or NULL on OK.
1091
1092 Note that fp constants aren't represent in the normal way on the ARM.
1093 In big endian mode, things are as expected. However, in little endian
1094 mode fp constants are big-endian word-wise, and little-endian byte-wise
1095 within the words. For example, (double) 1.1 in big endian mode is
1096 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1097 the byte sequence 99 99 f1 3f 9a 99 99 99.
1098
1099 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1100
1101 const char *
1102 md_atof (int type, char * litP, int * sizeP)
1103 {
1104 int prec;
1105 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1106 char *t;
1107 int i;
1108
1109 switch (type)
1110 {
1111 case 'f':
1112 case 'F':
1113 case 's':
1114 case 'S':
1115 prec = 2;
1116 break;
1117
1118 case 'd':
1119 case 'D':
1120 case 'r':
1121 case 'R':
1122 prec = 4;
1123 break;
1124
1125 case 'x':
1126 case 'X':
1127 prec = 5;
1128 break;
1129
1130 case 'p':
1131 case 'P':
1132 prec = 5;
1133 break;
1134
1135 default:
1136 *sizeP = 0;
1137 return _("Unrecognized or unsupported floating point constant");
1138 }
1139
1140 t = atof_ieee (input_line_pointer, type, words);
1141 if (t)
1142 input_line_pointer = t;
1143 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1144
1145 if (target_big_endian)
1146 {
1147 for (i = 0; i < prec; i++)
1148 {
1149 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1150 litP += sizeof (LITTLENUM_TYPE);
1151 }
1152 }
1153 else
1154 {
1155 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1156 for (i = prec - 1; i >= 0; i--)
1157 {
1158 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1159 litP += sizeof (LITTLENUM_TYPE);
1160 }
1161 else
1162 /* For a 4 byte float the order of elements in `words' is 1 0.
1163 For an 8 byte float the order is 1 0 3 2. */
1164 for (i = 0; i < prec; i += 2)
1165 {
1166 md_number_to_chars (litP, (valueT) words[i + 1],
1167 sizeof (LITTLENUM_TYPE));
1168 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1169 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1170 litP += 2 * sizeof (LITTLENUM_TYPE);
1171 }
1172 }
1173
1174 return NULL;
1175 }
1176
1177 /* We handle all bad expressions here, so that we can report the faulty
1178 instruction in the error message. */
1179 void
1180 md_operand (expressionS * exp)
1181 {
1182 if (in_my_get_expression)
1183 exp->X_op = O_illegal;
1184 }
1185
1186 /* Immediate values. */
1187
1188 /* Generic immediate-value read function for use in directives.
1189 Accepts anything that 'expression' can fold to a constant.
1190 *val receives the number. */
1191 #ifdef OBJ_ELF
1192 static int
1193 immediate_for_directive (int *val)
1194 {
1195 expressionS exp;
1196 exp.X_op = O_illegal;
1197
1198 if (is_immediate_prefix (*input_line_pointer))
1199 {
1200 input_line_pointer++;
1201 expression (&exp);
1202 }
1203
1204 if (exp.X_op != O_constant)
1205 {
1206 as_bad (_("expected #constant"));
1207 ignore_rest_of_line ();
1208 return FAIL;
1209 }
1210 *val = exp.X_add_number;
1211 return SUCCESS;
1212 }
1213 #endif
1214
1215 /* Register parsing. */
1216
1217 /* Generic register parser. CCP points to what should be the
1218 beginning of a register name. If it is indeed a valid register
1219 name, advance CCP over it and return the reg_entry structure;
1220 otherwise return NULL. Does not issue diagnostics. */
1221
1222 static struct reg_entry *
1223 arm_reg_parse_multi (char **ccp)
1224 {
1225 char *start = *ccp;
1226 char *p;
1227 struct reg_entry *reg;
1228
1229 skip_whitespace (start);
1230
1231 #ifdef REGISTER_PREFIX
1232 if (*start != REGISTER_PREFIX)
1233 return NULL;
1234 start++;
1235 #endif
1236 #ifdef OPTIONAL_REGISTER_PREFIX
1237 if (*start == OPTIONAL_REGISTER_PREFIX)
1238 start++;
1239 #endif
1240
1241 p = start;
1242 if (!ISALPHA (*p) || !is_name_beginner (*p))
1243 return NULL;
1244
1245 do
1246 p++;
1247 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1248
1249 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1250
1251 if (!reg)
1252 return NULL;
1253
1254 *ccp = p;
1255 return reg;
1256 }
1257
1258 static int
1259 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1260 enum arm_reg_type type)
1261 {
1262 /* Alternative syntaxes are accepted for a few register classes. */
1263 switch (type)
1264 {
1265 case REG_TYPE_MVF:
1266 case REG_TYPE_MVD:
1267 case REG_TYPE_MVFX:
1268 case REG_TYPE_MVDX:
1269 /* Generic coprocessor register names are allowed for these. */
1270 if (reg && reg->type == REG_TYPE_CN)
1271 return reg->number;
1272 break;
1273
1274 case REG_TYPE_CP:
1275 /* For backward compatibility, a bare number is valid here. */
1276 {
1277 unsigned long processor = strtoul (start, ccp, 10);
1278 if (*ccp != start && processor <= 15)
1279 return processor;
1280 }
1281 /* Fall through. */
1282
1283 case REG_TYPE_MMXWC:
1284 /* WC includes WCG. ??? I'm not sure this is true for all
1285 instructions that take WC registers. */
1286 if (reg && reg->type == REG_TYPE_MMXWCG)
1287 return reg->number;
1288 break;
1289
1290 default:
1291 break;
1292 }
1293
1294 return FAIL;
1295 }
1296
1297 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1298 return value is the register number or FAIL. */
1299
1300 static int
1301 arm_reg_parse (char **ccp, enum arm_reg_type type)
1302 {
1303 char *start = *ccp;
1304 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1305 int ret;
1306
1307 /* Do not allow a scalar (reg+index) to parse as a register. */
1308 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1309 return FAIL;
1310
1311 if (reg && reg->type == type)
1312 return reg->number;
1313
1314 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1315 return ret;
1316
1317 *ccp = start;
1318 return FAIL;
1319 }
1320
1321 /* Parse a Neon type specifier. *STR should point at the leading '.'
1322 character. Does no verification at this stage that the type fits the opcode
1323 properly. E.g.,
1324
1325 .i32.i32.s16
1326 .s32.f32
1327 .u16
1328
1329 Can all be legally parsed by this function.
1330
1331 Fills in neon_type struct pointer with parsed information, and updates STR
1332 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1333 type, FAIL if not. */
1334
1335 static int
1336 parse_neon_type (struct neon_type *type, char **str)
1337 {
1338 char *ptr = *str;
1339
1340 if (type)
1341 type->elems = 0;
1342
1343 while (type->elems < NEON_MAX_TYPE_ELS)
1344 {
1345 enum neon_el_type thistype = NT_untyped;
1346 unsigned thissize = -1u;
1347
1348 if (*ptr != '.')
1349 break;
1350
1351 ptr++;
1352
1353 /* Just a size without an explicit type. */
1354 if (ISDIGIT (*ptr))
1355 goto parsesize;
1356
1357 switch (TOLOWER (*ptr))
1358 {
1359 case 'i': thistype = NT_integer; break;
1360 case 'f': thistype = NT_float; break;
1361 case 'p': thistype = NT_poly; break;
1362 case 's': thistype = NT_signed; break;
1363 case 'u': thistype = NT_unsigned; break;
1364 case 'd':
1365 thistype = NT_float;
1366 thissize = 64;
1367 ptr++;
1368 goto done;
1369 default:
1370 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1371 return FAIL;
1372 }
1373
1374 ptr++;
1375
1376 /* .f is an abbreviation for .f32. */
1377 if (thistype == NT_float && !ISDIGIT (*ptr))
1378 thissize = 32;
1379 else
1380 {
1381 parsesize:
1382 thissize = strtoul (ptr, &ptr, 10);
1383
1384 if (thissize != 8 && thissize != 16 && thissize != 32
1385 && thissize != 64)
1386 {
1387 as_bad (_("bad size %d in type specifier"), thissize);
1388 return FAIL;
1389 }
1390 }
1391
1392 done:
1393 if (type)
1394 {
1395 type->el[type->elems].type = thistype;
1396 type->el[type->elems].size = thissize;
1397 type->elems++;
1398 }
1399 }
1400
1401 /* Empty/missing type is not a successful parse. */
1402 if (type->elems == 0)
1403 return FAIL;
1404
1405 *str = ptr;
1406
1407 return SUCCESS;
1408 }
1409
1410 /* Errors may be set multiple times during parsing or bit encoding
1411 (particularly in the Neon bits), but usually the earliest error which is set
1412 will be the most meaningful. Avoid overwriting it with later (cascading)
1413 errors by calling this function. */
1414
1415 static void
1416 first_error (const char *err)
1417 {
1418 if (!inst.error)
1419 inst.error = err;
1420 }
1421
1422 /* Parse a single type, e.g. ".s32", leading period included. */
1423 static int
1424 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1425 {
1426 char *str = *ccp;
1427 struct neon_type optype;
1428
1429 if (*str == '.')
1430 {
1431 if (parse_neon_type (&optype, &str) == SUCCESS)
1432 {
1433 if (optype.elems == 1)
1434 *vectype = optype.el[0];
1435 else
1436 {
1437 first_error (_("only one type should be specified for operand"));
1438 return FAIL;
1439 }
1440 }
1441 else
1442 {
1443 first_error (_("vector type expected"));
1444 return FAIL;
1445 }
1446 }
1447 else
1448 return FAIL;
1449
1450 *ccp = str;
1451
1452 return SUCCESS;
1453 }
1454
1455 /* Special meanings for indices (which have a range of 0-7), which will fit into
1456 a 4-bit integer. */
1457
1458 #define NEON_ALL_LANES 15
1459 #define NEON_INTERLEAVE_LANES 14
1460
1461 /* Parse either a register or a scalar, with an optional type. Return the
1462 register number, and optionally fill in the actual type of the register
1463 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1464 type/index information in *TYPEINFO. */
1465
1466 static int
1467 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1468 enum arm_reg_type *rtype,
1469 struct neon_typed_alias *typeinfo)
1470 {
1471 char *str = *ccp;
1472 struct reg_entry *reg = arm_reg_parse_multi (&str);
1473 struct neon_typed_alias atype;
1474 struct neon_type_el parsetype;
1475
1476 atype.defined = 0;
1477 atype.index = -1;
1478 atype.eltype.type = NT_invtype;
1479 atype.eltype.size = -1;
1480
1481 /* Try alternate syntax for some types of register. Note these are mutually
1482 exclusive with the Neon syntax extensions. */
1483 if (reg == NULL)
1484 {
1485 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1486 if (altreg != FAIL)
1487 *ccp = str;
1488 if (typeinfo)
1489 *typeinfo = atype;
1490 return altreg;
1491 }
1492
1493 /* Undo polymorphism when a set of register types may be accepted. */
1494 if ((type == REG_TYPE_NDQ
1495 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1496 || (type == REG_TYPE_VFSD
1497 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1498 || (type == REG_TYPE_NSDQ
1499 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1500 || reg->type == REG_TYPE_NQ))
1501 || (type == REG_TYPE_MMXWC
1502 && (reg->type == REG_TYPE_MMXWCG)))
1503 type = (enum arm_reg_type) reg->type;
1504
1505 if (type != reg->type)
1506 return FAIL;
1507
1508 if (reg->neon)
1509 atype = *reg->neon;
1510
1511 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1512 {
1513 if ((atype.defined & NTA_HASTYPE) != 0)
1514 {
1515 first_error (_("can't redefine type for operand"));
1516 return FAIL;
1517 }
1518 atype.defined |= NTA_HASTYPE;
1519 atype.eltype = parsetype;
1520 }
1521
1522 if (skip_past_char (&str, '[') == SUCCESS)
1523 {
1524 if (type != REG_TYPE_VFD)
1525 {
1526 first_error (_("only D registers may be indexed"));
1527 return FAIL;
1528 }
1529
1530 if ((atype.defined & NTA_HASINDEX) != 0)
1531 {
1532 first_error (_("can't change index for operand"));
1533 return FAIL;
1534 }
1535
1536 atype.defined |= NTA_HASINDEX;
1537
1538 if (skip_past_char (&str, ']') == SUCCESS)
1539 atype.index = NEON_ALL_LANES;
1540 else
1541 {
1542 expressionS exp;
1543
1544 my_get_expression (&exp, &str, GE_NO_PREFIX);
1545
1546 if (exp.X_op != O_constant)
1547 {
1548 first_error (_("constant expression required"));
1549 return FAIL;
1550 }
1551
1552 if (skip_past_char (&str, ']') == FAIL)
1553 return FAIL;
1554
1555 atype.index = exp.X_add_number;
1556 }
1557 }
1558
1559 if (typeinfo)
1560 *typeinfo = atype;
1561
1562 if (rtype)
1563 *rtype = type;
1564
1565 *ccp = str;
1566
1567 return reg->number;
1568 }
1569
1570 /* Like arm_reg_parse, but allow allow the following extra features:
1571 - If RTYPE is non-zero, return the (possibly restricted) type of the
1572 register (e.g. Neon double or quad reg when either has been requested).
1573 - If this is a Neon vector type with additional type information, fill
1574 in the struct pointed to by VECTYPE (if non-NULL).
1575 This function will fault on encountering a scalar. */
1576
1577 static int
1578 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1579 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1580 {
1581 struct neon_typed_alias atype;
1582 char *str = *ccp;
1583 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1584
1585 if (reg == FAIL)
1586 return FAIL;
1587
1588 /* Do not allow regname(... to parse as a register. */
1589 if (*str == '(')
1590 return FAIL;
1591
1592 /* Do not allow a scalar (reg+index) to parse as a register. */
1593 if ((atype.defined & NTA_HASINDEX) != 0)
1594 {
1595 first_error (_("register operand expected, but got scalar"));
1596 return FAIL;
1597 }
1598
1599 if (vectype)
1600 *vectype = atype.eltype;
1601
1602 *ccp = str;
1603
1604 return reg;
1605 }
1606
1607 #define NEON_SCALAR_REG(X) ((X) >> 4)
1608 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1609
1610 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1611 have enough information to be able to do a good job bounds-checking. So, we
1612 just do easy checks here, and do further checks later. */
1613
1614 static int
1615 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1616 {
1617 int reg;
1618 char *str = *ccp;
1619 struct neon_typed_alias atype;
1620
1621 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1622
1623 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1624 return FAIL;
1625
1626 if (atype.index == NEON_ALL_LANES)
1627 {
1628 first_error (_("scalar must have an index"));
1629 return FAIL;
1630 }
1631 else if (atype.index >= 64 / elsize)
1632 {
1633 first_error (_("scalar index out of range"));
1634 return FAIL;
1635 }
1636
1637 if (type)
1638 *type = atype.eltype;
1639
1640 *ccp = str;
1641
1642 return reg * 16 + atype.index;
1643 }
1644
1645 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1646
1647 static long
1648 parse_reg_list (char ** strp)
1649 {
1650 char * str = * strp;
1651 long range = 0;
1652 int another_range;
1653
1654 /* We come back here if we get ranges concatenated by '+' or '|'. */
1655 do
1656 {
1657 skip_whitespace (str);
1658
1659 another_range = 0;
1660
1661 if (*str == '{')
1662 {
1663 int in_range = 0;
1664 int cur_reg = -1;
1665
1666 str++;
1667 do
1668 {
1669 int reg;
1670
1671 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1672 {
1673 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1674 return FAIL;
1675 }
1676
1677 if (in_range)
1678 {
1679 int i;
1680
1681 if (reg <= cur_reg)
1682 {
1683 first_error (_("bad range in register list"));
1684 return FAIL;
1685 }
1686
1687 for (i = cur_reg + 1; i < reg; i++)
1688 {
1689 if (range & (1 << i))
1690 as_tsktsk
1691 (_("Warning: duplicated register (r%d) in register list"),
1692 i);
1693 else
1694 range |= 1 << i;
1695 }
1696 in_range = 0;
1697 }
1698
1699 if (range & (1 << reg))
1700 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1701 reg);
1702 else if (reg <= cur_reg)
1703 as_tsktsk (_("Warning: register range not in ascending order"));
1704
1705 range |= 1 << reg;
1706 cur_reg = reg;
1707 }
1708 while (skip_past_comma (&str) != FAIL
1709 || (in_range = 1, *str++ == '-'));
1710 str--;
1711
1712 if (skip_past_char (&str, '}') == FAIL)
1713 {
1714 first_error (_("missing `}'"));
1715 return FAIL;
1716 }
1717 }
1718 else
1719 {
1720 expressionS exp;
1721
1722 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1723 return FAIL;
1724
1725 if (exp.X_op == O_constant)
1726 {
1727 if (exp.X_add_number
1728 != (exp.X_add_number & 0x0000ffff))
1729 {
1730 inst.error = _("invalid register mask");
1731 return FAIL;
1732 }
1733
1734 if ((range & exp.X_add_number) != 0)
1735 {
1736 int regno = range & exp.X_add_number;
1737
1738 regno &= -regno;
1739 regno = (1 << regno) - 1;
1740 as_tsktsk
1741 (_("Warning: duplicated register (r%d) in register list"),
1742 regno);
1743 }
1744
1745 range |= exp.X_add_number;
1746 }
1747 else
1748 {
1749 if (inst.reloc.type != 0)
1750 {
1751 inst.error = _("expression too complex");
1752 return FAIL;
1753 }
1754
1755 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1756 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1757 inst.reloc.pc_rel = 0;
1758 }
1759 }
1760
1761 if (*str == '|' || *str == '+')
1762 {
1763 str++;
1764 another_range = 1;
1765 }
1766 }
1767 while (another_range);
1768
1769 *strp = str;
1770 return range;
1771 }
1772
1773 /* Types of registers in a list. */
1774
1775 enum reg_list_els
1776 {
1777 REGLIST_VFP_S,
1778 REGLIST_VFP_D,
1779 REGLIST_NEON_D
1780 };
1781
1782 /* Parse a VFP register list. If the string is invalid return FAIL.
1783 Otherwise return the number of registers, and set PBASE to the first
1784 register. Parses registers of type ETYPE.
1785 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1786 - Q registers can be used to specify pairs of D registers
1787 - { } can be omitted from around a singleton register list
1788 FIXME: This is not implemented, as it would require backtracking in
1789 some cases, e.g.:
1790 vtbl.8 d3,d4,d5
1791 This could be done (the meaning isn't really ambiguous), but doesn't
1792 fit in well with the current parsing framework.
1793 - 32 D registers may be used (also true for VFPv3).
1794 FIXME: Types are ignored in these register lists, which is probably a
1795 bug. */
1796
1797 static int
1798 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1799 {
1800 char *str = *ccp;
1801 int base_reg;
1802 int new_base;
1803 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1804 int max_regs = 0;
1805 int count = 0;
1806 int warned = 0;
1807 unsigned long mask = 0;
1808 int i;
1809
1810 if (skip_past_char (&str, '{') == FAIL)
1811 {
1812 inst.error = _("expecting {");
1813 return FAIL;
1814 }
1815
1816 switch (etype)
1817 {
1818 case REGLIST_VFP_S:
1819 regtype = REG_TYPE_VFS;
1820 max_regs = 32;
1821 break;
1822
1823 case REGLIST_VFP_D:
1824 regtype = REG_TYPE_VFD;
1825 break;
1826
1827 case REGLIST_NEON_D:
1828 regtype = REG_TYPE_NDQ;
1829 break;
1830 }
1831
1832 if (etype != REGLIST_VFP_S)
1833 {
1834 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1835 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1836 {
1837 max_regs = 32;
1838 if (thumb_mode)
1839 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1840 fpu_vfp_ext_d32);
1841 else
1842 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1843 fpu_vfp_ext_d32);
1844 }
1845 else
1846 max_regs = 16;
1847 }
1848
1849 base_reg = max_regs;
1850
1851 do
1852 {
1853 int setmask = 1, addregs = 1;
1854
1855 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1856
1857 if (new_base == FAIL)
1858 {
1859 first_error (_(reg_expected_msgs[regtype]));
1860 return FAIL;
1861 }
1862
1863 if (new_base >= max_regs)
1864 {
1865 first_error (_("register out of range in list"));
1866 return FAIL;
1867 }
1868
1869 /* Note: a value of 2 * n is returned for the register Q<n>. */
1870 if (regtype == REG_TYPE_NQ)
1871 {
1872 setmask = 3;
1873 addregs = 2;
1874 }
1875
1876 if (new_base < base_reg)
1877 base_reg = new_base;
1878
1879 if (mask & (setmask << new_base))
1880 {
1881 first_error (_("invalid register list"));
1882 return FAIL;
1883 }
1884
1885 if ((mask >> new_base) != 0 && ! warned)
1886 {
1887 as_tsktsk (_("register list not in ascending order"));
1888 warned = 1;
1889 }
1890
1891 mask |= setmask << new_base;
1892 count += addregs;
1893
1894 if (*str == '-') /* We have the start of a range expression */
1895 {
1896 int high_range;
1897
1898 str++;
1899
1900 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1901 == FAIL)
1902 {
1903 inst.error = gettext (reg_expected_msgs[regtype]);
1904 return FAIL;
1905 }
1906
1907 if (high_range >= max_regs)
1908 {
1909 first_error (_("register out of range in list"));
1910 return FAIL;
1911 }
1912
1913 if (regtype == REG_TYPE_NQ)
1914 high_range = high_range + 1;
1915
1916 if (high_range <= new_base)
1917 {
1918 inst.error = _("register range not in ascending order");
1919 return FAIL;
1920 }
1921
1922 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1923 {
1924 if (mask & (setmask << new_base))
1925 {
1926 inst.error = _("invalid register list");
1927 return FAIL;
1928 }
1929
1930 mask |= setmask << new_base;
1931 count += addregs;
1932 }
1933 }
1934 }
1935 while (skip_past_comma (&str) != FAIL);
1936
1937 str++;
1938
1939 /* Sanity check -- should have raised a parse error above. */
1940 if (count == 0 || count > max_regs)
1941 abort ();
1942
1943 *pbase = base_reg;
1944
1945 /* Final test -- the registers must be consecutive. */
1946 mask >>= base_reg;
1947 for (i = 0; i < count; i++)
1948 {
1949 if ((mask & (1u << i)) == 0)
1950 {
1951 inst.error = _("non-contiguous register range");
1952 return FAIL;
1953 }
1954 }
1955
1956 *ccp = str;
1957
1958 return count;
1959 }
1960
1961 /* True if two alias types are the same. */
1962
1963 static bfd_boolean
1964 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1965 {
1966 if (!a && !b)
1967 return TRUE;
1968
1969 if (!a || !b)
1970 return FALSE;
1971
1972 if (a->defined != b->defined)
1973 return FALSE;
1974
1975 if ((a->defined & NTA_HASTYPE) != 0
1976 && (a->eltype.type != b->eltype.type
1977 || a->eltype.size != b->eltype.size))
1978 return FALSE;
1979
1980 if ((a->defined & NTA_HASINDEX) != 0
1981 && (a->index != b->index))
1982 return FALSE;
1983
1984 return TRUE;
1985 }
1986
1987 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1988 The base register is put in *PBASE.
1989 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1990 the return value.
1991 The register stride (minus one) is put in bit 4 of the return value.
1992 Bits [6:5] encode the list length (minus one).
1993 The type of the list elements is put in *ELTYPE, if non-NULL. */
1994
1995 #define NEON_LANE(X) ((X) & 0xf)
1996 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1997 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1998
1999 static int
2000 parse_neon_el_struct_list (char **str, unsigned *pbase,
2001 struct neon_type_el *eltype)
2002 {
2003 char *ptr = *str;
2004 int base_reg = -1;
2005 int reg_incr = -1;
2006 int count = 0;
2007 int lane = -1;
2008 int leading_brace = 0;
2009 enum arm_reg_type rtype = REG_TYPE_NDQ;
2010 const char *const incr_error = _("register stride must be 1 or 2");
2011 const char *const type_error = _("mismatched element/structure types in list");
2012 struct neon_typed_alias firsttype;
2013 firsttype.defined = 0;
2014 firsttype.eltype.type = NT_invtype;
2015 firsttype.eltype.size = -1;
2016 firsttype.index = -1;
2017
2018 if (skip_past_char (&ptr, '{') == SUCCESS)
2019 leading_brace = 1;
2020
2021 do
2022 {
2023 struct neon_typed_alias atype;
2024 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2025
2026 if (getreg == FAIL)
2027 {
2028 first_error (_(reg_expected_msgs[rtype]));
2029 return FAIL;
2030 }
2031
2032 if (base_reg == -1)
2033 {
2034 base_reg = getreg;
2035 if (rtype == REG_TYPE_NQ)
2036 {
2037 reg_incr = 1;
2038 }
2039 firsttype = atype;
2040 }
2041 else if (reg_incr == -1)
2042 {
2043 reg_incr = getreg - base_reg;
2044 if (reg_incr < 1 || reg_incr > 2)
2045 {
2046 first_error (_(incr_error));
2047 return FAIL;
2048 }
2049 }
2050 else if (getreg != base_reg + reg_incr * count)
2051 {
2052 first_error (_(incr_error));
2053 return FAIL;
2054 }
2055
2056 if (! neon_alias_types_same (&atype, &firsttype))
2057 {
2058 first_error (_(type_error));
2059 return FAIL;
2060 }
2061
2062 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2063 modes. */
2064 if (ptr[0] == '-')
2065 {
2066 struct neon_typed_alias htype;
2067 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2068 if (lane == -1)
2069 lane = NEON_INTERLEAVE_LANES;
2070 else if (lane != NEON_INTERLEAVE_LANES)
2071 {
2072 first_error (_(type_error));
2073 return FAIL;
2074 }
2075 if (reg_incr == -1)
2076 reg_incr = 1;
2077 else if (reg_incr != 1)
2078 {
2079 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2080 return FAIL;
2081 }
2082 ptr++;
2083 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2084 if (hireg == FAIL)
2085 {
2086 first_error (_(reg_expected_msgs[rtype]));
2087 return FAIL;
2088 }
2089 if (! neon_alias_types_same (&htype, &firsttype))
2090 {
2091 first_error (_(type_error));
2092 return FAIL;
2093 }
2094 count += hireg + dregs - getreg;
2095 continue;
2096 }
2097
2098 /* If we're using Q registers, we can't use [] or [n] syntax. */
2099 if (rtype == REG_TYPE_NQ)
2100 {
2101 count += 2;
2102 continue;
2103 }
2104
2105 if ((atype.defined & NTA_HASINDEX) != 0)
2106 {
2107 if (lane == -1)
2108 lane = atype.index;
2109 else if (lane != atype.index)
2110 {
2111 first_error (_(type_error));
2112 return FAIL;
2113 }
2114 }
2115 else if (lane == -1)
2116 lane = NEON_INTERLEAVE_LANES;
2117 else if (lane != NEON_INTERLEAVE_LANES)
2118 {
2119 first_error (_(type_error));
2120 return FAIL;
2121 }
2122 count++;
2123 }
2124 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2125
2126 /* No lane set by [x]. We must be interleaving structures. */
2127 if (lane == -1)
2128 lane = NEON_INTERLEAVE_LANES;
2129
2130 /* Sanity check. */
2131 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2132 || (count > 1 && reg_incr == -1))
2133 {
2134 first_error (_("error parsing element/structure list"));
2135 return FAIL;
2136 }
2137
2138 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2139 {
2140 first_error (_("expected }"));
2141 return FAIL;
2142 }
2143
2144 if (reg_incr == -1)
2145 reg_incr = 1;
2146
2147 if (eltype)
2148 *eltype = firsttype.eltype;
2149
2150 *pbase = base_reg;
2151 *str = ptr;
2152
2153 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2154 }
2155
2156 /* Parse an explicit relocation suffix on an expression. This is
2157 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2158 arm_reloc_hsh contains no entries, so this function can only
2159 succeed if there is no () after the word. Returns -1 on error,
2160 BFD_RELOC_UNUSED if there wasn't any suffix. */
2161
2162 static int
2163 parse_reloc (char **str)
2164 {
2165 struct reloc_entry *r;
2166 char *p, *q;
2167
2168 if (**str != '(')
2169 return BFD_RELOC_UNUSED;
2170
2171 p = *str + 1;
2172 q = p;
2173
2174 while (*q && *q != ')' && *q != ',')
2175 q++;
2176 if (*q != ')')
2177 return -1;
2178
2179 if ((r = (struct reloc_entry *)
2180 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2181 return -1;
2182
2183 *str = q + 1;
2184 return r->reloc;
2185 }
2186
2187 /* Directives: register aliases. */
2188
2189 static struct reg_entry *
2190 insert_reg_alias (char *str, unsigned number, int type)
2191 {
2192 struct reg_entry *new_reg;
2193 const char *name;
2194
2195 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2196 {
2197 if (new_reg->builtin)
2198 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2199
2200 /* Only warn about a redefinition if it's not defined as the
2201 same register. */
2202 else if (new_reg->number != number || new_reg->type != type)
2203 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2204
2205 return NULL;
2206 }
2207
2208 name = xstrdup (str);
2209 new_reg = XNEW (struct reg_entry);
2210
2211 new_reg->name = name;
2212 new_reg->number = number;
2213 new_reg->type = type;
2214 new_reg->builtin = FALSE;
2215 new_reg->neon = NULL;
2216
2217 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2218 abort ();
2219
2220 return new_reg;
2221 }
2222
2223 static void
2224 insert_neon_reg_alias (char *str, int number, int type,
2225 struct neon_typed_alias *atype)
2226 {
2227 struct reg_entry *reg = insert_reg_alias (str, number, type);
2228
2229 if (!reg)
2230 {
2231 first_error (_("attempt to redefine typed alias"));
2232 return;
2233 }
2234
2235 if (atype)
2236 {
2237 reg->neon = XNEW (struct neon_typed_alias);
2238 *reg->neon = *atype;
2239 }
2240 }
2241
2242 /* Look for the .req directive. This is of the form:
2243
2244 new_register_name .req existing_register_name
2245
2246 If we find one, or if it looks sufficiently like one that we want to
2247 handle any error here, return TRUE. Otherwise return FALSE. */
2248
2249 static bfd_boolean
2250 create_register_alias (char * newname, char *p)
2251 {
2252 struct reg_entry *old;
2253 char *oldname, *nbuf;
2254 size_t nlen;
2255
2256 /* The input scrubber ensures that whitespace after the mnemonic is
2257 collapsed to single spaces. */
2258 oldname = p;
2259 if (strncmp (oldname, " .req ", 6) != 0)
2260 return FALSE;
2261
2262 oldname += 6;
2263 if (*oldname == '\0')
2264 return FALSE;
2265
2266 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2267 if (!old)
2268 {
2269 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2270 return TRUE;
2271 }
2272
2273 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2274 the desired alias name, and p points to its end. If not, then
2275 the desired alias name is in the global original_case_string. */
2276 #ifdef TC_CASE_SENSITIVE
2277 nlen = p - newname;
2278 #else
2279 newname = original_case_string;
2280 nlen = strlen (newname);
2281 #endif
2282
2283 nbuf = xmemdup0 (newname, nlen);
2284
2285 /* Create aliases under the new name as stated; an all-lowercase
2286 version of the new name; and an all-uppercase version of the new
2287 name. */
2288 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2289 {
2290 for (p = nbuf; *p; p++)
2291 *p = TOUPPER (*p);
2292
2293 if (strncmp (nbuf, newname, nlen))
2294 {
2295 /* If this attempt to create an additional alias fails, do not bother
2296 trying to create the all-lower case alias. We will fail and issue
2297 a second, duplicate error message. This situation arises when the
2298 programmer does something like:
2299 foo .req r0
2300 Foo .req r1
2301 The second .req creates the "Foo" alias but then fails to create
2302 the artificial FOO alias because it has already been created by the
2303 first .req. */
2304 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2305 {
2306 free (nbuf);
2307 return TRUE;
2308 }
2309 }
2310
2311 for (p = nbuf; *p; p++)
2312 *p = TOLOWER (*p);
2313
2314 if (strncmp (nbuf, newname, nlen))
2315 insert_reg_alias (nbuf, old->number, old->type);
2316 }
2317
2318 free (nbuf);
2319 return TRUE;
2320 }
2321
2322 /* Create a Neon typed/indexed register alias using directives, e.g.:
2323 X .dn d5.s32[1]
2324 Y .qn 6.s16
2325 Z .dn d7
2326 T .dn Z[0]
2327 These typed registers can be used instead of the types specified after the
2328 Neon mnemonic, so long as all operands given have types. Types can also be
2329 specified directly, e.g.:
2330 vadd d0.s32, d1.s32, d2.s32 */
2331
2332 static bfd_boolean
2333 create_neon_reg_alias (char *newname, char *p)
2334 {
2335 enum arm_reg_type basetype;
2336 struct reg_entry *basereg;
2337 struct reg_entry mybasereg;
2338 struct neon_type ntype;
2339 struct neon_typed_alias typeinfo;
2340 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2341 int namelen;
2342
2343 typeinfo.defined = 0;
2344 typeinfo.eltype.type = NT_invtype;
2345 typeinfo.eltype.size = -1;
2346 typeinfo.index = -1;
2347
2348 nameend = p;
2349
2350 if (strncmp (p, " .dn ", 5) == 0)
2351 basetype = REG_TYPE_VFD;
2352 else if (strncmp (p, " .qn ", 5) == 0)
2353 basetype = REG_TYPE_NQ;
2354 else
2355 return FALSE;
2356
2357 p += 5;
2358
2359 if (*p == '\0')
2360 return FALSE;
2361
2362 basereg = arm_reg_parse_multi (&p);
2363
2364 if (basereg && basereg->type != basetype)
2365 {
2366 as_bad (_("bad type for register"));
2367 return FALSE;
2368 }
2369
2370 if (basereg == NULL)
2371 {
2372 expressionS exp;
2373 /* Try parsing as an integer. */
2374 my_get_expression (&exp, &p, GE_NO_PREFIX);
2375 if (exp.X_op != O_constant)
2376 {
2377 as_bad (_("expression must be constant"));
2378 return FALSE;
2379 }
2380 basereg = &mybasereg;
2381 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2382 : exp.X_add_number;
2383 basereg->neon = 0;
2384 }
2385
2386 if (basereg->neon)
2387 typeinfo = *basereg->neon;
2388
2389 if (parse_neon_type (&ntype, &p) == SUCCESS)
2390 {
2391 /* We got a type. */
2392 if (typeinfo.defined & NTA_HASTYPE)
2393 {
2394 as_bad (_("can't redefine the type of a register alias"));
2395 return FALSE;
2396 }
2397
2398 typeinfo.defined |= NTA_HASTYPE;
2399 if (ntype.elems != 1)
2400 {
2401 as_bad (_("you must specify a single type only"));
2402 return FALSE;
2403 }
2404 typeinfo.eltype = ntype.el[0];
2405 }
2406
2407 if (skip_past_char (&p, '[') == SUCCESS)
2408 {
2409 expressionS exp;
2410 /* We got a scalar index. */
2411
2412 if (typeinfo.defined & NTA_HASINDEX)
2413 {
2414 as_bad (_("can't redefine the index of a scalar alias"));
2415 return FALSE;
2416 }
2417
2418 my_get_expression (&exp, &p, GE_NO_PREFIX);
2419
2420 if (exp.X_op != O_constant)
2421 {
2422 as_bad (_("scalar index must be constant"));
2423 return FALSE;
2424 }
2425
2426 typeinfo.defined |= NTA_HASINDEX;
2427 typeinfo.index = exp.X_add_number;
2428
2429 if (skip_past_char (&p, ']') == FAIL)
2430 {
2431 as_bad (_("expecting ]"));
2432 return FALSE;
2433 }
2434 }
2435
2436 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2437 the desired alias name, and p points to its end. If not, then
2438 the desired alias name is in the global original_case_string. */
2439 #ifdef TC_CASE_SENSITIVE
2440 namelen = nameend - newname;
2441 #else
2442 newname = original_case_string;
2443 namelen = strlen (newname);
2444 #endif
2445
2446 namebuf = xmemdup0 (newname, namelen);
2447
2448 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2449 typeinfo.defined != 0 ? &typeinfo : NULL);
2450
2451 /* Insert name in all uppercase. */
2452 for (p = namebuf; *p; p++)
2453 *p = TOUPPER (*p);
2454
2455 if (strncmp (namebuf, newname, namelen))
2456 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2457 typeinfo.defined != 0 ? &typeinfo : NULL);
2458
2459 /* Insert name in all lowercase. */
2460 for (p = namebuf; *p; p++)
2461 *p = TOLOWER (*p);
2462
2463 if (strncmp (namebuf, newname, namelen))
2464 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2465 typeinfo.defined != 0 ? &typeinfo : NULL);
2466
2467 free (namebuf);
2468 return TRUE;
2469 }
2470
2471 /* Should never be called, as .req goes between the alias and the
2472 register name, not at the beginning of the line. */
2473
2474 static void
2475 s_req (int a ATTRIBUTE_UNUSED)
2476 {
2477 as_bad (_("invalid syntax for .req directive"));
2478 }
2479
2480 static void
2481 s_dn (int a ATTRIBUTE_UNUSED)
2482 {
2483 as_bad (_("invalid syntax for .dn directive"));
2484 }
2485
2486 static void
2487 s_qn (int a ATTRIBUTE_UNUSED)
2488 {
2489 as_bad (_("invalid syntax for .qn directive"));
2490 }
2491
2492 /* The .unreq directive deletes an alias which was previously defined
2493 by .req. For example:
2494
2495 my_alias .req r11
2496 .unreq my_alias */
2497
2498 static void
2499 s_unreq (int a ATTRIBUTE_UNUSED)
2500 {
2501 char * name;
2502 char saved_char;
2503
2504 name = input_line_pointer;
2505
2506 while (*input_line_pointer != 0
2507 && *input_line_pointer != ' '
2508 && *input_line_pointer != '\n')
2509 ++input_line_pointer;
2510
2511 saved_char = *input_line_pointer;
2512 *input_line_pointer = 0;
2513
2514 if (!*name)
2515 as_bad (_("invalid syntax for .unreq directive"));
2516 else
2517 {
2518 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2519 name);
2520
2521 if (!reg)
2522 as_bad (_("unknown register alias '%s'"), name);
2523 else if (reg->builtin)
2524 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2525 name);
2526 else
2527 {
2528 char * p;
2529 char * nbuf;
2530
2531 hash_delete (arm_reg_hsh, name, FALSE);
2532 free ((char *) reg->name);
2533 if (reg->neon)
2534 free (reg->neon);
2535 free (reg);
2536
2537 /* Also locate the all upper case and all lower case versions.
2538 Do not complain if we cannot find one or the other as it
2539 was probably deleted above. */
2540
2541 nbuf = strdup (name);
2542 for (p = nbuf; *p; p++)
2543 *p = TOUPPER (*p);
2544 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2545 if (reg)
2546 {
2547 hash_delete (arm_reg_hsh, nbuf, FALSE);
2548 free ((char *) reg->name);
2549 if (reg->neon)
2550 free (reg->neon);
2551 free (reg);
2552 }
2553
2554 for (p = nbuf; *p; p++)
2555 *p = TOLOWER (*p);
2556 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2557 if (reg)
2558 {
2559 hash_delete (arm_reg_hsh, nbuf, FALSE);
2560 free ((char *) reg->name);
2561 if (reg->neon)
2562 free (reg->neon);
2563 free (reg);
2564 }
2565
2566 free (nbuf);
2567 }
2568 }
2569
2570 *input_line_pointer = saved_char;
2571 demand_empty_rest_of_line ();
2572 }
2573
2574 /* Directives: Instruction set selection. */
2575
2576 #ifdef OBJ_ELF
2577 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2578 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2579 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2580 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2581
2582 /* Create a new mapping symbol for the transition to STATE. */
2583
2584 static void
2585 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2586 {
2587 symbolS * symbolP;
2588 const char * symname;
2589 int type;
2590
2591 switch (state)
2592 {
2593 case MAP_DATA:
2594 symname = "$d";
2595 type = BSF_NO_FLAGS;
2596 break;
2597 case MAP_ARM:
2598 symname = "$a";
2599 type = BSF_NO_FLAGS;
2600 break;
2601 case MAP_THUMB:
2602 symname = "$t";
2603 type = BSF_NO_FLAGS;
2604 break;
2605 default:
2606 abort ();
2607 }
2608
2609 symbolP = symbol_new (symname, now_seg, value, frag);
2610 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2611
2612 switch (state)
2613 {
2614 case MAP_ARM:
2615 THUMB_SET_FUNC (symbolP, 0);
2616 ARM_SET_THUMB (symbolP, 0);
2617 ARM_SET_INTERWORK (symbolP, support_interwork);
2618 break;
2619
2620 case MAP_THUMB:
2621 THUMB_SET_FUNC (symbolP, 1);
2622 ARM_SET_THUMB (symbolP, 1);
2623 ARM_SET_INTERWORK (symbolP, support_interwork);
2624 break;
2625
2626 case MAP_DATA:
2627 default:
2628 break;
2629 }
2630
2631 /* Save the mapping symbols for future reference. Also check that
2632 we do not place two mapping symbols at the same offset within a
2633 frag. We'll handle overlap between frags in
2634 check_mapping_symbols.
2635
2636 If .fill or other data filling directive generates zero sized data,
2637 the mapping symbol for the following code will have the same value
2638 as the one generated for the data filling directive. In this case,
2639 we replace the old symbol with the new one at the same address. */
2640 if (value == 0)
2641 {
2642 if (frag->tc_frag_data.first_map != NULL)
2643 {
2644 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2645 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2646 }
2647 frag->tc_frag_data.first_map = symbolP;
2648 }
2649 if (frag->tc_frag_data.last_map != NULL)
2650 {
2651 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2652 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2653 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2654 }
2655 frag->tc_frag_data.last_map = symbolP;
2656 }
2657
2658 /* We must sometimes convert a region marked as code to data during
2659 code alignment, if an odd number of bytes have to be padded. The
2660 code mapping symbol is pushed to an aligned address. */
2661
2662 static void
2663 insert_data_mapping_symbol (enum mstate state,
2664 valueT value, fragS *frag, offsetT bytes)
2665 {
2666 /* If there was already a mapping symbol, remove it. */
2667 if (frag->tc_frag_data.last_map != NULL
2668 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2669 {
2670 symbolS *symp = frag->tc_frag_data.last_map;
2671
2672 if (value == 0)
2673 {
2674 know (frag->tc_frag_data.first_map == symp);
2675 frag->tc_frag_data.first_map = NULL;
2676 }
2677 frag->tc_frag_data.last_map = NULL;
2678 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2679 }
2680
2681 make_mapping_symbol (MAP_DATA, value, frag);
2682 make_mapping_symbol (state, value + bytes, frag);
2683 }
2684
2685 static void mapping_state_2 (enum mstate state, int max_chars);
2686
2687 /* Set the mapping state to STATE. Only call this when about to
2688 emit some STATE bytes to the file. */
2689
2690 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2691 void
2692 mapping_state (enum mstate state)
2693 {
2694 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2695
2696 if (mapstate == state)
2697 /* The mapping symbol has already been emitted.
2698 There is nothing else to do. */
2699 return;
2700
2701 if (state == MAP_ARM || state == MAP_THUMB)
2702 /* PR gas/12931
2703 All ARM instructions require 4-byte alignment.
2704 (Almost) all Thumb instructions require 2-byte alignment.
2705
2706 When emitting instructions into any section, mark the section
2707 appropriately.
2708
2709 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2710 but themselves require 2-byte alignment; this applies to some
2711 PC- relative forms. However, these cases will invovle implicit
2712 literal pool generation or an explicit .align >=2, both of
2713 which will cause the section to me marked with sufficient
2714 alignment. Thus, we don't handle those cases here. */
2715 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2716
2717 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2718 /* This case will be evaluated later. */
2719 return;
2720
2721 mapping_state_2 (state, 0);
2722 }
2723
2724 /* Same as mapping_state, but MAX_CHARS bytes have already been
2725 allocated. Put the mapping symbol that far back. */
2726
2727 static void
2728 mapping_state_2 (enum mstate state, int max_chars)
2729 {
2730 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2731
2732 if (!SEG_NORMAL (now_seg))
2733 return;
2734
2735 if (mapstate == state)
2736 /* The mapping symbol has already been emitted.
2737 There is nothing else to do. */
2738 return;
2739
2740 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2741 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2742 {
2743 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2744 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2745
2746 if (add_symbol)
2747 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2748 }
2749
2750 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2751 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2752 }
2753 #undef TRANSITION
2754 #else
2755 #define mapping_state(x) ((void)0)
2756 #define mapping_state_2(x, y) ((void)0)
2757 #endif
2758
2759 /* Find the real, Thumb encoded start of a Thumb function. */
2760
2761 #ifdef OBJ_COFF
2762 static symbolS *
2763 find_real_start (symbolS * symbolP)
2764 {
2765 char * real_start;
2766 const char * name = S_GET_NAME (symbolP);
2767 symbolS * new_target;
2768
2769 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2770 #define STUB_NAME ".real_start_of"
2771
2772 if (name == NULL)
2773 abort ();
2774
2775 /* The compiler may generate BL instructions to local labels because
2776 it needs to perform a branch to a far away location. These labels
2777 do not have a corresponding ".real_start_of" label. We check
2778 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2779 the ".real_start_of" convention for nonlocal branches. */
2780 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2781 return symbolP;
2782
2783 real_start = concat (STUB_NAME, name, NULL);
2784 new_target = symbol_find (real_start);
2785 free (real_start);
2786
2787 if (new_target == NULL)
2788 {
2789 as_warn (_("Failed to find real start of function: %s\n"), name);
2790 new_target = symbolP;
2791 }
2792
2793 return new_target;
2794 }
2795 #endif
2796
2797 static void
2798 opcode_select (int width)
2799 {
2800 switch (width)
2801 {
2802 case 16:
2803 if (! thumb_mode)
2804 {
2805 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2806 as_bad (_("selected processor does not support THUMB opcodes"));
2807
2808 thumb_mode = 1;
2809 /* No need to force the alignment, since we will have been
2810 coming from ARM mode, which is word-aligned. */
2811 record_alignment (now_seg, 1);
2812 }
2813 break;
2814
2815 case 32:
2816 if (thumb_mode)
2817 {
2818 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2819 as_bad (_("selected processor does not support ARM opcodes"));
2820
2821 thumb_mode = 0;
2822
2823 if (!need_pass_2)
2824 frag_align (2, 0, 0);
2825
2826 record_alignment (now_seg, 1);
2827 }
2828 break;
2829
2830 default:
2831 as_bad (_("invalid instruction size selected (%d)"), width);
2832 }
2833 }
2834
2835 static void
2836 s_arm (int ignore ATTRIBUTE_UNUSED)
2837 {
2838 opcode_select (32);
2839 demand_empty_rest_of_line ();
2840 }
2841
2842 static void
2843 s_thumb (int ignore ATTRIBUTE_UNUSED)
2844 {
2845 opcode_select (16);
2846 demand_empty_rest_of_line ();
2847 }
2848
2849 static void
2850 s_code (int unused ATTRIBUTE_UNUSED)
2851 {
2852 int temp;
2853
2854 temp = get_absolute_expression ();
2855 switch (temp)
2856 {
2857 case 16:
2858 case 32:
2859 opcode_select (temp);
2860 break;
2861
2862 default:
2863 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2864 }
2865 }
2866
2867 static void
2868 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2869 {
2870 /* If we are not already in thumb mode go into it, EVEN if
2871 the target processor does not support thumb instructions.
2872 This is used by gcc/config/arm/lib1funcs.asm for example
2873 to compile interworking support functions even if the
2874 target processor should not support interworking. */
2875 if (! thumb_mode)
2876 {
2877 thumb_mode = 2;
2878 record_alignment (now_seg, 1);
2879 }
2880
2881 demand_empty_rest_of_line ();
2882 }
2883
2884 static void
2885 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2886 {
2887 s_thumb (0);
2888
2889 /* The following label is the name/address of the start of a Thumb function.
2890 We need to know this for the interworking support. */
2891 label_is_thumb_function_name = TRUE;
2892 }
2893
2894 /* Perform a .set directive, but also mark the alias as
2895 being a thumb function. */
2896
2897 static void
2898 s_thumb_set (int equiv)
2899 {
2900 /* XXX the following is a duplicate of the code for s_set() in read.c
2901 We cannot just call that code as we need to get at the symbol that
2902 is created. */
2903 char * name;
2904 char delim;
2905 char * end_name;
2906 symbolS * symbolP;
2907
2908 /* Especial apologies for the random logic:
2909 This just grew, and could be parsed much more simply!
2910 Dean - in haste. */
2911 delim = get_symbol_name (& name);
2912 end_name = input_line_pointer;
2913 (void) restore_line_pointer (delim);
2914
2915 if (*input_line_pointer != ',')
2916 {
2917 *end_name = 0;
2918 as_bad (_("expected comma after name \"%s\""), name);
2919 *end_name = delim;
2920 ignore_rest_of_line ();
2921 return;
2922 }
2923
2924 input_line_pointer++;
2925 *end_name = 0;
2926
2927 if (name[0] == '.' && name[1] == '\0')
2928 {
2929 /* XXX - this should not happen to .thumb_set. */
2930 abort ();
2931 }
2932
2933 if ((symbolP = symbol_find (name)) == NULL
2934 && (symbolP = md_undefined_symbol (name)) == NULL)
2935 {
2936 #ifndef NO_LISTING
2937 /* When doing symbol listings, play games with dummy fragments living
2938 outside the normal fragment chain to record the file and line info
2939 for this symbol. */
2940 if (listing & LISTING_SYMBOLS)
2941 {
2942 extern struct list_info_struct * listing_tail;
2943 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2944
2945 memset (dummy_frag, 0, sizeof (fragS));
2946 dummy_frag->fr_type = rs_fill;
2947 dummy_frag->line = listing_tail;
2948 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2949 dummy_frag->fr_symbol = symbolP;
2950 }
2951 else
2952 #endif
2953 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2954
2955 #ifdef OBJ_COFF
2956 /* "set" symbols are local unless otherwise specified. */
2957 SF_SET_LOCAL (symbolP);
2958 #endif /* OBJ_COFF */
2959 } /* Make a new symbol. */
2960
2961 symbol_table_insert (symbolP);
2962
2963 * end_name = delim;
2964
2965 if (equiv
2966 && S_IS_DEFINED (symbolP)
2967 && S_GET_SEGMENT (symbolP) != reg_section)
2968 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2969
2970 pseudo_set (symbolP);
2971
2972 demand_empty_rest_of_line ();
2973
2974 /* XXX Now we come to the Thumb specific bit of code. */
2975
2976 THUMB_SET_FUNC (symbolP, 1);
2977 ARM_SET_THUMB (symbolP, 1);
2978 #if defined OBJ_ELF || defined OBJ_COFF
2979 ARM_SET_INTERWORK (symbolP, support_interwork);
2980 #endif
2981 }
2982
2983 /* Directives: Mode selection. */
2984
2985 /* .syntax [unified|divided] - choose the new unified syntax
2986 (same for Arm and Thumb encoding, modulo slight differences in what
2987 can be represented) or the old divergent syntax for each mode. */
2988 static void
2989 s_syntax (int unused ATTRIBUTE_UNUSED)
2990 {
2991 char *name, delim;
2992
2993 delim = get_symbol_name (& name);
2994
2995 if (!strcasecmp (name, "unified"))
2996 unified_syntax = TRUE;
2997 else if (!strcasecmp (name, "divided"))
2998 unified_syntax = FALSE;
2999 else
3000 {
3001 as_bad (_("unrecognized syntax mode \"%s\""), name);
3002 return;
3003 }
3004 (void) restore_line_pointer (delim);
3005 demand_empty_rest_of_line ();
3006 }
3007
3008 /* Directives: sectioning and alignment. */
3009
3010 static void
3011 s_bss (int ignore ATTRIBUTE_UNUSED)
3012 {
3013 /* We don't support putting frags in the BSS segment, we fake it by
3014 marking in_bss, then looking at s_skip for clues. */
3015 subseg_set (bss_section, 0);
3016 demand_empty_rest_of_line ();
3017
3018 #ifdef md_elf_section_change_hook
3019 md_elf_section_change_hook ();
3020 #endif
3021 }
3022
3023 static void
3024 s_even (int ignore ATTRIBUTE_UNUSED)
3025 {
3026 /* Never make frag if expect extra pass. */
3027 if (!need_pass_2)
3028 frag_align (1, 0, 0);
3029
3030 record_alignment (now_seg, 1);
3031
3032 demand_empty_rest_of_line ();
3033 }
3034
3035 /* Directives: CodeComposer Studio. */
3036
3037 /* .ref (for CodeComposer Studio syntax only). */
3038 static void
3039 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3040 {
3041 if (codecomposer_syntax)
3042 ignore_rest_of_line ();
3043 else
3044 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3045 }
3046
3047 /* If name is not NULL, then it is used for marking the beginning of a
3048 function, wherease if it is NULL then it means the function end. */
3049 static void
3050 asmfunc_debug (const char * name)
3051 {
3052 static const char * last_name = NULL;
3053
3054 if (name != NULL)
3055 {
3056 gas_assert (last_name == NULL);
3057 last_name = name;
3058
3059 if (debug_type == DEBUG_STABS)
3060 stabs_generate_asm_func (name, name);
3061 }
3062 else
3063 {
3064 gas_assert (last_name != NULL);
3065
3066 if (debug_type == DEBUG_STABS)
3067 stabs_generate_asm_endfunc (last_name, last_name);
3068
3069 last_name = NULL;
3070 }
3071 }
3072
3073 static void
3074 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3075 {
3076 if (codecomposer_syntax)
3077 {
3078 switch (asmfunc_state)
3079 {
3080 case OUTSIDE_ASMFUNC:
3081 asmfunc_state = WAITING_ASMFUNC_NAME;
3082 break;
3083
3084 case WAITING_ASMFUNC_NAME:
3085 as_bad (_(".asmfunc repeated."));
3086 break;
3087
3088 case WAITING_ENDASMFUNC:
3089 as_bad (_(".asmfunc without function."));
3090 break;
3091 }
3092 demand_empty_rest_of_line ();
3093 }
3094 else
3095 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3096 }
3097
3098 static void
3099 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3100 {
3101 if (codecomposer_syntax)
3102 {
3103 switch (asmfunc_state)
3104 {
3105 case OUTSIDE_ASMFUNC:
3106 as_bad (_(".endasmfunc without a .asmfunc."));
3107 break;
3108
3109 case WAITING_ASMFUNC_NAME:
3110 as_bad (_(".endasmfunc without function."));
3111 break;
3112
3113 case WAITING_ENDASMFUNC:
3114 asmfunc_state = OUTSIDE_ASMFUNC;
3115 asmfunc_debug (NULL);
3116 break;
3117 }
3118 demand_empty_rest_of_line ();
3119 }
3120 else
3121 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3122 }
3123
3124 static void
3125 s_ccs_def (int name)
3126 {
3127 if (codecomposer_syntax)
3128 s_globl (name);
3129 else
3130 as_bad (_(".def pseudo-op only available with -mccs flag."));
3131 }
3132
3133 /* Directives: Literal pools. */
3134
3135 static literal_pool *
3136 find_literal_pool (void)
3137 {
3138 literal_pool * pool;
3139
3140 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3141 {
3142 if (pool->section == now_seg
3143 && pool->sub_section == now_subseg)
3144 break;
3145 }
3146
3147 return pool;
3148 }
3149
3150 static literal_pool *
3151 find_or_make_literal_pool (void)
3152 {
3153 /* Next literal pool ID number. */
3154 static unsigned int latest_pool_num = 1;
3155 literal_pool * pool;
3156
3157 pool = find_literal_pool ();
3158
3159 if (pool == NULL)
3160 {
3161 /* Create a new pool. */
3162 pool = XNEW (literal_pool);
3163 if (! pool)
3164 return NULL;
3165
3166 pool->next_free_entry = 0;
3167 pool->section = now_seg;
3168 pool->sub_section = now_subseg;
3169 pool->next = list_of_pools;
3170 pool->symbol = NULL;
3171 pool->alignment = 2;
3172
3173 /* Add it to the list. */
3174 list_of_pools = pool;
3175 }
3176
3177 /* New pools, and emptied pools, will have a NULL symbol. */
3178 if (pool->symbol == NULL)
3179 {
3180 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3181 (valueT) 0, &zero_address_frag);
3182 pool->id = latest_pool_num ++;
3183 }
3184
3185 /* Done. */
3186 return pool;
3187 }
3188
3189 /* Add the literal in the global 'inst'
3190 structure to the relevant literal pool. */
3191
3192 static int
3193 add_to_lit_pool (unsigned int nbytes)
3194 {
3195 #define PADDING_SLOT 0x1
3196 #define LIT_ENTRY_SIZE_MASK 0xFF
3197 literal_pool * pool;
3198 unsigned int entry, pool_size = 0;
3199 bfd_boolean padding_slot_p = FALSE;
3200 unsigned imm1 = 0;
3201 unsigned imm2 = 0;
3202
3203 if (nbytes == 8)
3204 {
3205 imm1 = inst.operands[1].imm;
3206 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3207 : inst.reloc.exp.X_unsigned ? 0
3208 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3209 if (target_big_endian)
3210 {
3211 imm1 = imm2;
3212 imm2 = inst.operands[1].imm;
3213 }
3214 }
3215
3216 pool = find_or_make_literal_pool ();
3217
3218 /* Check if this literal value is already in the pool. */
3219 for (entry = 0; entry < pool->next_free_entry; entry ++)
3220 {
3221 if (nbytes == 4)
3222 {
3223 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3224 && (inst.reloc.exp.X_op == O_constant)
3225 && (pool->literals[entry].X_add_number
3226 == inst.reloc.exp.X_add_number)
3227 && (pool->literals[entry].X_md == nbytes)
3228 && (pool->literals[entry].X_unsigned
3229 == inst.reloc.exp.X_unsigned))
3230 break;
3231
3232 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3233 && (inst.reloc.exp.X_op == O_symbol)
3234 && (pool->literals[entry].X_add_number
3235 == inst.reloc.exp.X_add_number)
3236 && (pool->literals[entry].X_add_symbol
3237 == inst.reloc.exp.X_add_symbol)
3238 && (pool->literals[entry].X_op_symbol
3239 == inst.reloc.exp.X_op_symbol)
3240 && (pool->literals[entry].X_md == nbytes))
3241 break;
3242 }
3243 else if ((nbytes == 8)
3244 && !(pool_size & 0x7)
3245 && ((entry + 1) != pool->next_free_entry)
3246 && (pool->literals[entry].X_op == O_constant)
3247 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3248 && (pool->literals[entry].X_unsigned
3249 == inst.reloc.exp.X_unsigned)
3250 && (pool->literals[entry + 1].X_op == O_constant)
3251 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3252 && (pool->literals[entry + 1].X_unsigned
3253 == inst.reloc.exp.X_unsigned))
3254 break;
3255
3256 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3257 if (padding_slot_p && (nbytes == 4))
3258 break;
3259
3260 pool_size += 4;
3261 }
3262
3263 /* Do we need to create a new entry? */
3264 if (entry == pool->next_free_entry)
3265 {
3266 if (entry >= MAX_LITERAL_POOL_SIZE)
3267 {
3268 inst.error = _("literal pool overflow");
3269 return FAIL;
3270 }
3271
3272 if (nbytes == 8)
3273 {
3274 /* For 8-byte entries, we align to an 8-byte boundary,
3275 and split it into two 4-byte entries, because on 32-bit
3276 host, 8-byte constants are treated as big num, thus
3277 saved in "generic_bignum" which will be overwritten
3278 by later assignments.
3279
3280 We also need to make sure there is enough space for
3281 the split.
3282
3283 We also check to make sure the literal operand is a
3284 constant number. */
3285 if (!(inst.reloc.exp.X_op == O_constant
3286 || inst.reloc.exp.X_op == O_big))
3287 {
3288 inst.error = _("invalid type for literal pool");
3289 return FAIL;
3290 }
3291 else if (pool_size & 0x7)
3292 {
3293 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3294 {
3295 inst.error = _("literal pool overflow");
3296 return FAIL;
3297 }
3298
3299 pool->literals[entry] = inst.reloc.exp;
3300 pool->literals[entry].X_op = O_constant;
3301 pool->literals[entry].X_add_number = 0;
3302 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3303 pool->next_free_entry += 1;
3304 pool_size += 4;
3305 }
3306 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3307 {
3308 inst.error = _("literal pool overflow");
3309 return FAIL;
3310 }
3311
3312 pool->literals[entry] = inst.reloc.exp;
3313 pool->literals[entry].X_op = O_constant;
3314 pool->literals[entry].X_add_number = imm1;
3315 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3316 pool->literals[entry++].X_md = 4;
3317 pool->literals[entry] = inst.reloc.exp;
3318 pool->literals[entry].X_op = O_constant;
3319 pool->literals[entry].X_add_number = imm2;
3320 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3321 pool->literals[entry].X_md = 4;
3322 pool->alignment = 3;
3323 pool->next_free_entry += 1;
3324 }
3325 else
3326 {
3327 pool->literals[entry] = inst.reloc.exp;
3328 pool->literals[entry].X_md = 4;
3329 }
3330
3331 #ifdef OBJ_ELF
3332 /* PR ld/12974: Record the location of the first source line to reference
3333 this entry in the literal pool. If it turns out during linking that the
3334 symbol does not exist we will be able to give an accurate line number for
3335 the (first use of the) missing reference. */
3336 if (debug_type == DEBUG_DWARF2)
3337 dwarf2_where (pool->locs + entry);
3338 #endif
3339 pool->next_free_entry += 1;
3340 }
3341 else if (padding_slot_p)
3342 {
3343 pool->literals[entry] = inst.reloc.exp;
3344 pool->literals[entry].X_md = nbytes;
3345 }
3346
3347 inst.reloc.exp.X_op = O_symbol;
3348 inst.reloc.exp.X_add_number = pool_size;
3349 inst.reloc.exp.X_add_symbol = pool->symbol;
3350
3351 return SUCCESS;
3352 }
3353
3354 bfd_boolean
3355 tc_start_label_without_colon (void)
3356 {
3357 bfd_boolean ret = TRUE;
3358
3359 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3360 {
3361 const char *label = input_line_pointer;
3362
3363 while (!is_end_of_line[(int) label[-1]])
3364 --label;
3365
3366 if (*label == '.')
3367 {
3368 as_bad (_("Invalid label '%s'"), label);
3369 ret = FALSE;
3370 }
3371
3372 asmfunc_debug (label);
3373
3374 asmfunc_state = WAITING_ENDASMFUNC;
3375 }
3376
3377 return ret;
3378 }
3379
3380 /* Can't use symbol_new here, so have to create a symbol and then at
3381 a later date assign it a value. Thats what these functions do. */
3382
3383 static void
3384 symbol_locate (symbolS * symbolP,
3385 const char * name, /* It is copied, the caller can modify. */
3386 segT segment, /* Segment identifier (SEG_<something>). */
3387 valueT valu, /* Symbol value. */
3388 fragS * frag) /* Associated fragment. */
3389 {
3390 size_t name_length;
3391 char * preserved_copy_of_name;
3392
3393 name_length = strlen (name) + 1; /* +1 for \0. */
3394 obstack_grow (&notes, name, name_length);
3395 preserved_copy_of_name = (char *) obstack_finish (&notes);
3396
3397 #ifdef tc_canonicalize_symbol_name
3398 preserved_copy_of_name =
3399 tc_canonicalize_symbol_name (preserved_copy_of_name);
3400 #endif
3401
3402 S_SET_NAME (symbolP, preserved_copy_of_name);
3403
3404 S_SET_SEGMENT (symbolP, segment);
3405 S_SET_VALUE (symbolP, valu);
3406 symbol_clear_list_pointers (symbolP);
3407
3408 symbol_set_frag (symbolP, frag);
3409
3410 /* Link to end of symbol chain. */
3411 {
3412 extern int symbol_table_frozen;
3413
3414 if (symbol_table_frozen)
3415 abort ();
3416 }
3417
3418 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3419
3420 obj_symbol_new_hook (symbolP);
3421
3422 #ifdef tc_symbol_new_hook
3423 tc_symbol_new_hook (symbolP);
3424 #endif
3425
3426 #ifdef DEBUG_SYMS
3427 verify_symbol_chain (symbol_rootP, symbol_lastP);
3428 #endif /* DEBUG_SYMS */
3429 }
3430
3431 static void
3432 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3433 {
3434 unsigned int entry;
3435 literal_pool * pool;
3436 char sym_name[20];
3437
3438 pool = find_literal_pool ();
3439 if (pool == NULL
3440 || pool->symbol == NULL
3441 || pool->next_free_entry == 0)
3442 return;
3443
3444 /* Align pool as you have word accesses.
3445 Only make a frag if we have to. */
3446 if (!need_pass_2)
3447 frag_align (pool->alignment, 0, 0);
3448
3449 record_alignment (now_seg, 2);
3450
3451 #ifdef OBJ_ELF
3452 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3453 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3454 #endif
3455 sprintf (sym_name, "$$lit_\002%x", pool->id);
3456
3457 symbol_locate (pool->symbol, sym_name, now_seg,
3458 (valueT) frag_now_fix (), frag_now);
3459 symbol_table_insert (pool->symbol);
3460
3461 ARM_SET_THUMB (pool->symbol, thumb_mode);
3462
3463 #if defined OBJ_COFF || defined OBJ_ELF
3464 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3465 #endif
3466
3467 for (entry = 0; entry < pool->next_free_entry; entry ++)
3468 {
3469 #ifdef OBJ_ELF
3470 if (debug_type == DEBUG_DWARF2)
3471 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3472 #endif
3473 /* First output the expression in the instruction to the pool. */
3474 emit_expr (&(pool->literals[entry]),
3475 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3476 }
3477
3478 /* Mark the pool as empty. */
3479 pool->next_free_entry = 0;
3480 pool->symbol = NULL;
3481 }
3482
3483 #ifdef OBJ_ELF
3484 /* Forward declarations for functions below, in the MD interface
3485 section. */
3486 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3487 static valueT create_unwind_entry (int);
3488 static void start_unwind_section (const segT, int);
3489 static void add_unwind_opcode (valueT, int);
3490 static void flush_pending_unwind (void);
3491
3492 /* Directives: Data. */
3493
3494 static void
3495 s_arm_elf_cons (int nbytes)
3496 {
3497 expressionS exp;
3498
3499 #ifdef md_flush_pending_output
3500 md_flush_pending_output ();
3501 #endif
3502
3503 if (is_it_end_of_statement ())
3504 {
3505 demand_empty_rest_of_line ();
3506 return;
3507 }
3508
3509 #ifdef md_cons_align
3510 md_cons_align (nbytes);
3511 #endif
3512
3513 mapping_state (MAP_DATA);
3514 do
3515 {
3516 int reloc;
3517 char *base = input_line_pointer;
3518
3519 expression (& exp);
3520
3521 if (exp.X_op != O_symbol)
3522 emit_expr (&exp, (unsigned int) nbytes);
3523 else
3524 {
3525 char *before_reloc = input_line_pointer;
3526 reloc = parse_reloc (&input_line_pointer);
3527 if (reloc == -1)
3528 {
3529 as_bad (_("unrecognized relocation suffix"));
3530 ignore_rest_of_line ();
3531 return;
3532 }
3533 else if (reloc == BFD_RELOC_UNUSED)
3534 emit_expr (&exp, (unsigned int) nbytes);
3535 else
3536 {
3537 reloc_howto_type *howto = (reloc_howto_type *)
3538 bfd_reloc_type_lookup (stdoutput,
3539 (bfd_reloc_code_real_type) reloc);
3540 int size = bfd_get_reloc_size (howto);
3541
3542 if (reloc == BFD_RELOC_ARM_PLT32)
3543 {
3544 as_bad (_("(plt) is only valid on branch targets"));
3545 reloc = BFD_RELOC_UNUSED;
3546 size = 0;
3547 }
3548
3549 if (size > nbytes)
3550 as_bad (_("%s relocations do not fit in %d bytes"),
3551 howto->name, nbytes);
3552 else
3553 {
3554 /* We've parsed an expression stopping at O_symbol.
3555 But there may be more expression left now that we
3556 have parsed the relocation marker. Parse it again.
3557 XXX Surely there is a cleaner way to do this. */
3558 char *p = input_line_pointer;
3559 int offset;
3560 char *save_buf = XNEWVEC (char, input_line_pointer - base);
3561
3562 memcpy (save_buf, base, input_line_pointer - base);
3563 memmove (base + (input_line_pointer - before_reloc),
3564 base, before_reloc - base);
3565
3566 input_line_pointer = base + (input_line_pointer-before_reloc);
3567 expression (&exp);
3568 memcpy (base, save_buf, p - base);
3569
3570 offset = nbytes - size;
3571 p = frag_more (nbytes);
3572 memset (p, 0, nbytes);
3573 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3574 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3575 free (save_buf);
3576 }
3577 }
3578 }
3579 }
3580 while (*input_line_pointer++ == ',');
3581
3582 /* Put terminator back into stream. */
3583 input_line_pointer --;
3584 demand_empty_rest_of_line ();
3585 }
3586
3587 /* Emit an expression containing a 32-bit thumb instruction.
3588 Implementation based on put_thumb32_insn. */
3589
3590 static void
3591 emit_thumb32_expr (expressionS * exp)
3592 {
3593 expressionS exp_high = *exp;
3594
3595 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3596 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3597 exp->X_add_number &= 0xffff;
3598 emit_expr (exp, (unsigned int) THUMB_SIZE);
3599 }
3600
3601 /* Guess the instruction size based on the opcode. */
3602
3603 static int
3604 thumb_insn_size (int opcode)
3605 {
3606 if ((unsigned int) opcode < 0xe800u)
3607 return 2;
3608 else if ((unsigned int) opcode >= 0xe8000000u)
3609 return 4;
3610 else
3611 return 0;
3612 }
3613
3614 static bfd_boolean
3615 emit_insn (expressionS *exp, int nbytes)
3616 {
3617 int size = 0;
3618
3619 if (exp->X_op == O_constant)
3620 {
3621 size = nbytes;
3622
3623 if (size == 0)
3624 size = thumb_insn_size (exp->X_add_number);
3625
3626 if (size != 0)
3627 {
3628 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3629 {
3630 as_bad (_(".inst.n operand too big. "\
3631 "Use .inst.w instead"));
3632 size = 0;
3633 }
3634 else
3635 {
3636 if (now_it.state == AUTOMATIC_IT_BLOCK)
3637 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3638 else
3639 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3640
3641 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3642 emit_thumb32_expr (exp);
3643 else
3644 emit_expr (exp, (unsigned int) size);
3645
3646 it_fsm_post_encode ();
3647 }
3648 }
3649 else
3650 as_bad (_("cannot determine Thumb instruction size. " \
3651 "Use .inst.n/.inst.w instead"));
3652 }
3653 else
3654 as_bad (_("constant expression required"));
3655
3656 return (size != 0);
3657 }
3658
3659 /* Like s_arm_elf_cons but do not use md_cons_align and
3660 set the mapping state to MAP_ARM/MAP_THUMB. */
3661
3662 static void
3663 s_arm_elf_inst (int nbytes)
3664 {
3665 if (is_it_end_of_statement ())
3666 {
3667 demand_empty_rest_of_line ();
3668 return;
3669 }
3670
3671 /* Calling mapping_state () here will not change ARM/THUMB,
3672 but will ensure not to be in DATA state. */
3673
3674 if (thumb_mode)
3675 mapping_state (MAP_THUMB);
3676 else
3677 {
3678 if (nbytes != 0)
3679 {
3680 as_bad (_("width suffixes are invalid in ARM mode"));
3681 ignore_rest_of_line ();
3682 return;
3683 }
3684
3685 nbytes = 4;
3686
3687 mapping_state (MAP_ARM);
3688 }
3689
3690 do
3691 {
3692 expressionS exp;
3693
3694 expression (& exp);
3695
3696 if (! emit_insn (& exp, nbytes))
3697 {
3698 ignore_rest_of_line ();
3699 return;
3700 }
3701 }
3702 while (*input_line_pointer++ == ',');
3703
3704 /* Put terminator back into stream. */
3705 input_line_pointer --;
3706 demand_empty_rest_of_line ();
3707 }
3708
3709 /* Parse a .rel31 directive. */
3710
3711 static void
3712 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3713 {
3714 expressionS exp;
3715 char *p;
3716 valueT highbit;
3717
3718 highbit = 0;
3719 if (*input_line_pointer == '1')
3720 highbit = 0x80000000;
3721 else if (*input_line_pointer != '0')
3722 as_bad (_("expected 0 or 1"));
3723
3724 input_line_pointer++;
3725 if (*input_line_pointer != ',')
3726 as_bad (_("missing comma"));
3727 input_line_pointer++;
3728
3729 #ifdef md_flush_pending_output
3730 md_flush_pending_output ();
3731 #endif
3732
3733 #ifdef md_cons_align
3734 md_cons_align (4);
3735 #endif
3736
3737 mapping_state (MAP_DATA);
3738
3739 expression (&exp);
3740
3741 p = frag_more (4);
3742 md_number_to_chars (p, highbit, 4);
3743 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3744 BFD_RELOC_ARM_PREL31);
3745
3746 demand_empty_rest_of_line ();
3747 }
3748
3749 /* Directives: AEABI stack-unwind tables. */
3750
3751 /* Parse an unwind_fnstart directive. Simply records the current location. */
3752
3753 static void
3754 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3755 {
3756 demand_empty_rest_of_line ();
3757 if (unwind.proc_start)
3758 {
3759 as_bad (_("duplicate .fnstart directive"));
3760 return;
3761 }
3762
3763 /* Mark the start of the function. */
3764 unwind.proc_start = expr_build_dot ();
3765
3766 /* Reset the rest of the unwind info. */
3767 unwind.opcode_count = 0;
3768 unwind.table_entry = NULL;
3769 unwind.personality_routine = NULL;
3770 unwind.personality_index = -1;
3771 unwind.frame_size = 0;
3772 unwind.fp_offset = 0;
3773 unwind.fp_reg = REG_SP;
3774 unwind.fp_used = 0;
3775 unwind.sp_restored = 0;
3776 }
3777
3778
3779 /* Parse a handlerdata directive. Creates the exception handling table entry
3780 for the function. */
3781
3782 static void
3783 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3784 {
3785 demand_empty_rest_of_line ();
3786 if (!unwind.proc_start)
3787 as_bad (MISSING_FNSTART);
3788
3789 if (unwind.table_entry)
3790 as_bad (_("duplicate .handlerdata directive"));
3791
3792 create_unwind_entry (1);
3793 }
3794
3795 /* Parse an unwind_fnend directive. Generates the index table entry. */
3796
3797 static void
3798 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3799 {
3800 long where;
3801 char *ptr;
3802 valueT val;
3803 unsigned int marked_pr_dependency;
3804
3805 demand_empty_rest_of_line ();
3806
3807 if (!unwind.proc_start)
3808 {
3809 as_bad (_(".fnend directive without .fnstart"));
3810 return;
3811 }
3812
3813 /* Add eh table entry. */
3814 if (unwind.table_entry == NULL)
3815 val = create_unwind_entry (0);
3816 else
3817 val = 0;
3818
3819 /* Add index table entry. This is two words. */
3820 start_unwind_section (unwind.saved_seg, 1);
3821 frag_align (2, 0, 0);
3822 record_alignment (now_seg, 2);
3823
3824 ptr = frag_more (8);
3825 memset (ptr, 0, 8);
3826 where = frag_now_fix () - 8;
3827
3828 /* Self relative offset of the function start. */
3829 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3830 BFD_RELOC_ARM_PREL31);
3831
3832 /* Indicate dependency on EHABI-defined personality routines to the
3833 linker, if it hasn't been done already. */
3834 marked_pr_dependency
3835 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3836 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3837 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3838 {
3839 static const char *const name[] =
3840 {
3841 "__aeabi_unwind_cpp_pr0",
3842 "__aeabi_unwind_cpp_pr1",
3843 "__aeabi_unwind_cpp_pr2"
3844 };
3845 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3846 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3847 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3848 |= 1 << unwind.personality_index;
3849 }
3850
3851 if (val)
3852 /* Inline exception table entry. */
3853 md_number_to_chars (ptr + 4, val, 4);
3854 else
3855 /* Self relative offset of the table entry. */
3856 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3857 BFD_RELOC_ARM_PREL31);
3858
3859 /* Restore the original section. */
3860 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3861
3862 unwind.proc_start = NULL;
3863 }
3864
3865
3866 /* Parse an unwind_cantunwind directive. */
3867
3868 static void
3869 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3870 {
3871 demand_empty_rest_of_line ();
3872 if (!unwind.proc_start)
3873 as_bad (MISSING_FNSTART);
3874
3875 if (unwind.personality_routine || unwind.personality_index != -1)
3876 as_bad (_("personality routine specified for cantunwind frame"));
3877
3878 unwind.personality_index = -2;
3879 }
3880
3881
3882 /* Parse a personalityindex directive. */
3883
3884 static void
3885 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3886 {
3887 expressionS exp;
3888
3889 if (!unwind.proc_start)
3890 as_bad (MISSING_FNSTART);
3891
3892 if (unwind.personality_routine || unwind.personality_index != -1)
3893 as_bad (_("duplicate .personalityindex directive"));
3894
3895 expression (&exp);
3896
3897 if (exp.X_op != O_constant
3898 || exp.X_add_number < 0 || exp.X_add_number > 15)
3899 {
3900 as_bad (_("bad personality routine number"));
3901 ignore_rest_of_line ();
3902 return;
3903 }
3904
3905 unwind.personality_index = exp.X_add_number;
3906
3907 demand_empty_rest_of_line ();
3908 }
3909
3910
3911 /* Parse a personality directive. */
3912
3913 static void
3914 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3915 {
3916 char *name, *p, c;
3917
3918 if (!unwind.proc_start)
3919 as_bad (MISSING_FNSTART);
3920
3921 if (unwind.personality_routine || unwind.personality_index != -1)
3922 as_bad (_("duplicate .personality directive"));
3923
3924 c = get_symbol_name (& name);
3925 p = input_line_pointer;
3926 if (c == '"')
3927 ++ input_line_pointer;
3928 unwind.personality_routine = symbol_find_or_make (name);
3929 *p = c;
3930 demand_empty_rest_of_line ();
3931 }
3932
3933
3934 /* Parse a directive saving core registers. */
3935
3936 static void
3937 s_arm_unwind_save_core (void)
3938 {
3939 valueT op;
3940 long range;
3941 int n;
3942
3943 range = parse_reg_list (&input_line_pointer);
3944 if (range == FAIL)
3945 {
3946 as_bad (_("expected register list"));
3947 ignore_rest_of_line ();
3948 return;
3949 }
3950
3951 demand_empty_rest_of_line ();
3952
3953 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3954 into .unwind_save {..., sp...}. We aren't bothered about the value of
3955 ip because it is clobbered by calls. */
3956 if (unwind.sp_restored && unwind.fp_reg == 12
3957 && (range & 0x3000) == 0x1000)
3958 {
3959 unwind.opcode_count--;
3960 unwind.sp_restored = 0;
3961 range = (range | 0x2000) & ~0x1000;
3962 unwind.pending_offset = 0;
3963 }
3964
3965 /* Pop r4-r15. */
3966 if (range & 0xfff0)
3967 {
3968 /* See if we can use the short opcodes. These pop a block of up to 8
3969 registers starting with r4, plus maybe r14. */
3970 for (n = 0; n < 8; n++)
3971 {
3972 /* Break at the first non-saved register. */
3973 if ((range & (1 << (n + 4))) == 0)
3974 break;
3975 }
3976 /* See if there are any other bits set. */
3977 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3978 {
3979 /* Use the long form. */
3980 op = 0x8000 | ((range >> 4) & 0xfff);
3981 add_unwind_opcode (op, 2);
3982 }
3983 else
3984 {
3985 /* Use the short form. */
3986 if (range & 0x4000)
3987 op = 0xa8; /* Pop r14. */
3988 else
3989 op = 0xa0; /* Do not pop r14. */
3990 op |= (n - 1);
3991 add_unwind_opcode (op, 1);
3992 }
3993 }
3994
3995 /* Pop r0-r3. */
3996 if (range & 0xf)
3997 {
3998 op = 0xb100 | (range & 0xf);
3999 add_unwind_opcode (op, 2);
4000 }
4001
4002 /* Record the number of bytes pushed. */
4003 for (n = 0; n < 16; n++)
4004 {
4005 if (range & (1 << n))
4006 unwind.frame_size += 4;
4007 }
4008 }
4009
4010
4011 /* Parse a directive saving FPA registers. */
4012
4013 static void
4014 s_arm_unwind_save_fpa (int reg)
4015 {
4016 expressionS exp;
4017 int num_regs;
4018 valueT op;
4019
4020 /* Get Number of registers to transfer. */
4021 if (skip_past_comma (&input_line_pointer) != FAIL)
4022 expression (&exp);
4023 else
4024 exp.X_op = O_illegal;
4025
4026 if (exp.X_op != O_constant)
4027 {
4028 as_bad (_("expected , <constant>"));
4029 ignore_rest_of_line ();
4030 return;
4031 }
4032
4033 num_regs = exp.X_add_number;
4034
4035 if (num_regs < 1 || num_regs > 4)
4036 {
4037 as_bad (_("number of registers must be in the range [1:4]"));
4038 ignore_rest_of_line ();
4039 return;
4040 }
4041
4042 demand_empty_rest_of_line ();
4043
4044 if (reg == 4)
4045 {
4046 /* Short form. */
4047 op = 0xb4 | (num_regs - 1);
4048 add_unwind_opcode (op, 1);
4049 }
4050 else
4051 {
4052 /* Long form. */
4053 op = 0xc800 | (reg << 4) | (num_regs - 1);
4054 add_unwind_opcode (op, 2);
4055 }
4056 unwind.frame_size += num_regs * 12;
4057 }
4058
4059
4060 /* Parse a directive saving VFP registers for ARMv6 and above. */
4061
4062 static void
4063 s_arm_unwind_save_vfp_armv6 (void)
4064 {
4065 int count;
4066 unsigned int start;
4067 valueT op;
4068 int num_vfpv3_regs = 0;
4069 int num_regs_below_16;
4070
4071 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4072 if (count == FAIL)
4073 {
4074 as_bad (_("expected register list"));
4075 ignore_rest_of_line ();
4076 return;
4077 }
4078
4079 demand_empty_rest_of_line ();
4080
4081 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4082 than FSTMX/FLDMX-style ones). */
4083
4084 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4085 if (start >= 16)
4086 num_vfpv3_regs = count;
4087 else if (start + count > 16)
4088 num_vfpv3_regs = start + count - 16;
4089
4090 if (num_vfpv3_regs > 0)
4091 {
4092 int start_offset = start > 16 ? start - 16 : 0;
4093 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4094 add_unwind_opcode (op, 2);
4095 }
4096
4097 /* Generate opcode for registers numbered in the range 0 .. 15. */
4098 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4099 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4100 if (num_regs_below_16 > 0)
4101 {
4102 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4103 add_unwind_opcode (op, 2);
4104 }
4105
4106 unwind.frame_size += count * 8;
4107 }
4108
4109
4110 /* Parse a directive saving VFP registers for pre-ARMv6. */
4111
4112 static void
4113 s_arm_unwind_save_vfp (void)
4114 {
4115 int count;
4116 unsigned int reg;
4117 valueT op;
4118
4119 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
4120 if (count == FAIL)
4121 {
4122 as_bad (_("expected register list"));
4123 ignore_rest_of_line ();
4124 return;
4125 }
4126
4127 demand_empty_rest_of_line ();
4128
4129 if (reg == 8)
4130 {
4131 /* Short form. */
4132 op = 0xb8 | (count - 1);
4133 add_unwind_opcode (op, 1);
4134 }
4135 else
4136 {
4137 /* Long form. */
4138 op = 0xb300 | (reg << 4) | (count - 1);
4139 add_unwind_opcode (op, 2);
4140 }
4141 unwind.frame_size += count * 8 + 4;
4142 }
4143
4144
4145 /* Parse a directive saving iWMMXt data registers. */
4146
4147 static void
4148 s_arm_unwind_save_mmxwr (void)
4149 {
4150 int reg;
4151 int hi_reg;
4152 int i;
4153 unsigned mask = 0;
4154 valueT op;
4155
4156 if (*input_line_pointer == '{')
4157 input_line_pointer++;
4158
4159 do
4160 {
4161 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4162
4163 if (reg == FAIL)
4164 {
4165 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4166 goto error;
4167 }
4168
4169 if (mask >> reg)
4170 as_tsktsk (_("register list not in ascending order"));
4171 mask |= 1 << reg;
4172
4173 if (*input_line_pointer == '-')
4174 {
4175 input_line_pointer++;
4176 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4177 if (hi_reg == FAIL)
4178 {
4179 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4180 goto error;
4181 }
4182 else if (reg >= hi_reg)
4183 {
4184 as_bad (_("bad register range"));
4185 goto error;
4186 }
4187 for (; reg < hi_reg; reg++)
4188 mask |= 1 << reg;
4189 }
4190 }
4191 while (skip_past_comma (&input_line_pointer) != FAIL);
4192
4193 skip_past_char (&input_line_pointer, '}');
4194
4195 demand_empty_rest_of_line ();
4196
4197 /* Generate any deferred opcodes because we're going to be looking at
4198 the list. */
4199 flush_pending_unwind ();
4200
4201 for (i = 0; i < 16; i++)
4202 {
4203 if (mask & (1 << i))
4204 unwind.frame_size += 8;
4205 }
4206
4207 /* Attempt to combine with a previous opcode. We do this because gcc
4208 likes to output separate unwind directives for a single block of
4209 registers. */
4210 if (unwind.opcode_count > 0)
4211 {
4212 i = unwind.opcodes[unwind.opcode_count - 1];
4213 if ((i & 0xf8) == 0xc0)
4214 {
4215 i &= 7;
4216 /* Only merge if the blocks are contiguous. */
4217 if (i < 6)
4218 {
4219 if ((mask & 0xfe00) == (1 << 9))
4220 {
4221 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4222 unwind.opcode_count--;
4223 }
4224 }
4225 else if (i == 6 && unwind.opcode_count >= 2)
4226 {
4227 i = unwind.opcodes[unwind.opcode_count - 2];
4228 reg = i >> 4;
4229 i &= 0xf;
4230
4231 op = 0xffff << (reg - 1);
4232 if (reg > 0
4233 && ((mask & op) == (1u << (reg - 1))))
4234 {
4235 op = (1 << (reg + i + 1)) - 1;
4236 op &= ~((1 << reg) - 1);
4237 mask |= op;
4238 unwind.opcode_count -= 2;
4239 }
4240 }
4241 }
4242 }
4243
4244 hi_reg = 15;
4245 /* We want to generate opcodes in the order the registers have been
4246 saved, ie. descending order. */
4247 for (reg = 15; reg >= -1; reg--)
4248 {
4249 /* Save registers in blocks. */
4250 if (reg < 0
4251 || !(mask & (1 << reg)))
4252 {
4253 /* We found an unsaved reg. Generate opcodes to save the
4254 preceding block. */
4255 if (reg != hi_reg)
4256 {
4257 if (reg == 9)
4258 {
4259 /* Short form. */
4260 op = 0xc0 | (hi_reg - 10);
4261 add_unwind_opcode (op, 1);
4262 }
4263 else
4264 {
4265 /* Long form. */
4266 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4267 add_unwind_opcode (op, 2);
4268 }
4269 }
4270 hi_reg = reg - 1;
4271 }
4272 }
4273
4274 return;
4275 error:
4276 ignore_rest_of_line ();
4277 }
4278
4279 static void
4280 s_arm_unwind_save_mmxwcg (void)
4281 {
4282 int reg;
4283 int hi_reg;
4284 unsigned mask = 0;
4285 valueT op;
4286
4287 if (*input_line_pointer == '{')
4288 input_line_pointer++;
4289
4290 skip_whitespace (input_line_pointer);
4291
4292 do
4293 {
4294 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4295
4296 if (reg == FAIL)
4297 {
4298 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4299 goto error;
4300 }
4301
4302 reg -= 8;
4303 if (mask >> reg)
4304 as_tsktsk (_("register list not in ascending order"));
4305 mask |= 1 << reg;
4306
4307 if (*input_line_pointer == '-')
4308 {
4309 input_line_pointer++;
4310 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4311 if (hi_reg == FAIL)
4312 {
4313 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4314 goto error;
4315 }
4316 else if (reg >= hi_reg)
4317 {
4318 as_bad (_("bad register range"));
4319 goto error;
4320 }
4321 for (; reg < hi_reg; reg++)
4322 mask |= 1 << reg;
4323 }
4324 }
4325 while (skip_past_comma (&input_line_pointer) != FAIL);
4326
4327 skip_past_char (&input_line_pointer, '}');
4328
4329 demand_empty_rest_of_line ();
4330
4331 /* Generate any deferred opcodes because we're going to be looking at
4332 the list. */
4333 flush_pending_unwind ();
4334
4335 for (reg = 0; reg < 16; reg++)
4336 {
4337 if (mask & (1 << reg))
4338 unwind.frame_size += 4;
4339 }
4340 op = 0xc700 | mask;
4341 add_unwind_opcode (op, 2);
4342 return;
4343 error:
4344 ignore_rest_of_line ();
4345 }
4346
4347
4348 /* Parse an unwind_save directive.
4349 If the argument is non-zero, this is a .vsave directive. */
4350
4351 static void
4352 s_arm_unwind_save (int arch_v6)
4353 {
4354 char *peek;
4355 struct reg_entry *reg;
4356 bfd_boolean had_brace = FALSE;
4357
4358 if (!unwind.proc_start)
4359 as_bad (MISSING_FNSTART);
4360
4361 /* Figure out what sort of save we have. */
4362 peek = input_line_pointer;
4363
4364 if (*peek == '{')
4365 {
4366 had_brace = TRUE;
4367 peek++;
4368 }
4369
4370 reg = arm_reg_parse_multi (&peek);
4371
4372 if (!reg)
4373 {
4374 as_bad (_("register expected"));
4375 ignore_rest_of_line ();
4376 return;
4377 }
4378
4379 switch (reg->type)
4380 {
4381 case REG_TYPE_FN:
4382 if (had_brace)
4383 {
4384 as_bad (_("FPA .unwind_save does not take a register list"));
4385 ignore_rest_of_line ();
4386 return;
4387 }
4388 input_line_pointer = peek;
4389 s_arm_unwind_save_fpa (reg->number);
4390 return;
4391
4392 case REG_TYPE_RN:
4393 s_arm_unwind_save_core ();
4394 return;
4395
4396 case REG_TYPE_VFD:
4397 if (arch_v6)
4398 s_arm_unwind_save_vfp_armv6 ();
4399 else
4400 s_arm_unwind_save_vfp ();
4401 return;
4402
4403 case REG_TYPE_MMXWR:
4404 s_arm_unwind_save_mmxwr ();
4405 return;
4406
4407 case REG_TYPE_MMXWCG:
4408 s_arm_unwind_save_mmxwcg ();
4409 return;
4410
4411 default:
4412 as_bad (_(".unwind_save does not support this kind of register"));
4413 ignore_rest_of_line ();
4414 }
4415 }
4416
4417
4418 /* Parse an unwind_movsp directive. */
4419
4420 static void
4421 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4422 {
4423 int reg;
4424 valueT op;
4425 int offset;
4426
4427 if (!unwind.proc_start)
4428 as_bad (MISSING_FNSTART);
4429
4430 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4431 if (reg == FAIL)
4432 {
4433 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4434 ignore_rest_of_line ();
4435 return;
4436 }
4437
4438 /* Optional constant. */
4439 if (skip_past_comma (&input_line_pointer) != FAIL)
4440 {
4441 if (immediate_for_directive (&offset) == FAIL)
4442 return;
4443 }
4444 else
4445 offset = 0;
4446
4447 demand_empty_rest_of_line ();
4448
4449 if (reg == REG_SP || reg == REG_PC)
4450 {
4451 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4452 return;
4453 }
4454
4455 if (unwind.fp_reg != REG_SP)
4456 as_bad (_("unexpected .unwind_movsp directive"));
4457
4458 /* Generate opcode to restore the value. */
4459 op = 0x90 | reg;
4460 add_unwind_opcode (op, 1);
4461
4462 /* Record the information for later. */
4463 unwind.fp_reg = reg;
4464 unwind.fp_offset = unwind.frame_size - offset;
4465 unwind.sp_restored = 1;
4466 }
4467
4468 /* Parse an unwind_pad directive. */
4469
4470 static void
4471 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4472 {
4473 int offset;
4474
4475 if (!unwind.proc_start)
4476 as_bad (MISSING_FNSTART);
4477
4478 if (immediate_for_directive (&offset) == FAIL)
4479 return;
4480
4481 if (offset & 3)
4482 {
4483 as_bad (_("stack increment must be multiple of 4"));
4484 ignore_rest_of_line ();
4485 return;
4486 }
4487
4488 /* Don't generate any opcodes, just record the details for later. */
4489 unwind.frame_size += offset;
4490 unwind.pending_offset += offset;
4491
4492 demand_empty_rest_of_line ();
4493 }
4494
4495 /* Parse an unwind_setfp directive. */
4496
4497 static void
4498 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4499 {
4500 int sp_reg;
4501 int fp_reg;
4502 int offset;
4503
4504 if (!unwind.proc_start)
4505 as_bad (MISSING_FNSTART);
4506
4507 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4508 if (skip_past_comma (&input_line_pointer) == FAIL)
4509 sp_reg = FAIL;
4510 else
4511 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4512
4513 if (fp_reg == FAIL || sp_reg == FAIL)
4514 {
4515 as_bad (_("expected <reg>, <reg>"));
4516 ignore_rest_of_line ();
4517 return;
4518 }
4519
4520 /* Optional constant. */
4521 if (skip_past_comma (&input_line_pointer) != FAIL)
4522 {
4523 if (immediate_for_directive (&offset) == FAIL)
4524 return;
4525 }
4526 else
4527 offset = 0;
4528
4529 demand_empty_rest_of_line ();
4530
4531 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4532 {
4533 as_bad (_("register must be either sp or set by a previous"
4534 "unwind_movsp directive"));
4535 return;
4536 }
4537
4538 /* Don't generate any opcodes, just record the information for later. */
4539 unwind.fp_reg = fp_reg;
4540 unwind.fp_used = 1;
4541 if (sp_reg == REG_SP)
4542 unwind.fp_offset = unwind.frame_size - offset;
4543 else
4544 unwind.fp_offset -= offset;
4545 }
4546
4547 /* Parse an unwind_raw directive. */
4548
4549 static void
4550 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4551 {
4552 expressionS exp;
4553 /* This is an arbitrary limit. */
4554 unsigned char op[16];
4555 int count;
4556
4557 if (!unwind.proc_start)
4558 as_bad (MISSING_FNSTART);
4559
4560 expression (&exp);
4561 if (exp.X_op == O_constant
4562 && skip_past_comma (&input_line_pointer) != FAIL)
4563 {
4564 unwind.frame_size += exp.X_add_number;
4565 expression (&exp);
4566 }
4567 else
4568 exp.X_op = O_illegal;
4569
4570 if (exp.X_op != O_constant)
4571 {
4572 as_bad (_("expected <offset>, <opcode>"));
4573 ignore_rest_of_line ();
4574 return;
4575 }
4576
4577 count = 0;
4578
4579 /* Parse the opcode. */
4580 for (;;)
4581 {
4582 if (count >= 16)
4583 {
4584 as_bad (_("unwind opcode too long"));
4585 ignore_rest_of_line ();
4586 }
4587 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4588 {
4589 as_bad (_("invalid unwind opcode"));
4590 ignore_rest_of_line ();
4591 return;
4592 }
4593 op[count++] = exp.X_add_number;
4594
4595 /* Parse the next byte. */
4596 if (skip_past_comma (&input_line_pointer) == FAIL)
4597 break;
4598
4599 expression (&exp);
4600 }
4601
4602 /* Add the opcode bytes in reverse order. */
4603 while (count--)
4604 add_unwind_opcode (op[count], 1);
4605
4606 demand_empty_rest_of_line ();
4607 }
4608
4609
4610 /* Parse a .eabi_attribute directive. */
4611
4612 static void
4613 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4614 {
4615 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4616
4617 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4618 attributes_set_explicitly[tag] = 1;
4619 }
4620
4621 /* Emit a tls fix for the symbol. */
4622
4623 static void
4624 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4625 {
4626 char *p;
4627 expressionS exp;
4628 #ifdef md_flush_pending_output
4629 md_flush_pending_output ();
4630 #endif
4631
4632 #ifdef md_cons_align
4633 md_cons_align (4);
4634 #endif
4635
4636 /* Since we're just labelling the code, there's no need to define a
4637 mapping symbol. */
4638 expression (&exp);
4639 p = obstack_next_free (&frchain_now->frch_obstack);
4640 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4641 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4642 : BFD_RELOC_ARM_TLS_DESCSEQ);
4643 }
4644 #endif /* OBJ_ELF */
4645
4646 static void s_arm_arch (int);
4647 static void s_arm_object_arch (int);
4648 static void s_arm_cpu (int);
4649 static void s_arm_fpu (int);
4650 static void s_arm_arch_extension (int);
4651
4652 #ifdef TE_PE
4653
4654 static void
4655 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4656 {
4657 expressionS exp;
4658
4659 do
4660 {
4661 expression (&exp);
4662 if (exp.X_op == O_symbol)
4663 exp.X_op = O_secrel;
4664
4665 emit_expr (&exp, 4);
4666 }
4667 while (*input_line_pointer++ == ',');
4668
4669 input_line_pointer--;
4670 demand_empty_rest_of_line ();
4671 }
4672 #endif /* TE_PE */
4673
4674 /* This table describes all the machine specific pseudo-ops the assembler
4675 has to support. The fields are:
4676 pseudo-op name without dot
4677 function to call to execute this pseudo-op
4678 Integer arg to pass to the function. */
4679
4680 const pseudo_typeS md_pseudo_table[] =
4681 {
4682 /* Never called because '.req' does not start a line. */
4683 { "req", s_req, 0 },
4684 /* Following two are likewise never called. */
4685 { "dn", s_dn, 0 },
4686 { "qn", s_qn, 0 },
4687 { "unreq", s_unreq, 0 },
4688 { "bss", s_bss, 0 },
4689 { "align", s_align_ptwo, 2 },
4690 { "arm", s_arm, 0 },
4691 { "thumb", s_thumb, 0 },
4692 { "code", s_code, 0 },
4693 { "force_thumb", s_force_thumb, 0 },
4694 { "thumb_func", s_thumb_func, 0 },
4695 { "thumb_set", s_thumb_set, 0 },
4696 { "even", s_even, 0 },
4697 { "ltorg", s_ltorg, 0 },
4698 { "pool", s_ltorg, 0 },
4699 { "syntax", s_syntax, 0 },
4700 { "cpu", s_arm_cpu, 0 },
4701 { "arch", s_arm_arch, 0 },
4702 { "object_arch", s_arm_object_arch, 0 },
4703 { "fpu", s_arm_fpu, 0 },
4704 { "arch_extension", s_arm_arch_extension, 0 },
4705 #ifdef OBJ_ELF
4706 { "word", s_arm_elf_cons, 4 },
4707 { "long", s_arm_elf_cons, 4 },
4708 { "inst.n", s_arm_elf_inst, 2 },
4709 { "inst.w", s_arm_elf_inst, 4 },
4710 { "inst", s_arm_elf_inst, 0 },
4711 { "rel31", s_arm_rel31, 0 },
4712 { "fnstart", s_arm_unwind_fnstart, 0 },
4713 { "fnend", s_arm_unwind_fnend, 0 },
4714 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4715 { "personality", s_arm_unwind_personality, 0 },
4716 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4717 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4718 { "save", s_arm_unwind_save, 0 },
4719 { "vsave", s_arm_unwind_save, 1 },
4720 { "movsp", s_arm_unwind_movsp, 0 },
4721 { "pad", s_arm_unwind_pad, 0 },
4722 { "setfp", s_arm_unwind_setfp, 0 },
4723 { "unwind_raw", s_arm_unwind_raw, 0 },
4724 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4725 { "tlsdescseq", s_arm_tls_descseq, 0 },
4726 #else
4727 { "word", cons, 4},
4728
4729 /* These are used for dwarf. */
4730 {"2byte", cons, 2},
4731 {"4byte", cons, 4},
4732 {"8byte", cons, 8},
4733 /* These are used for dwarf2. */
4734 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4735 { "loc", dwarf2_directive_loc, 0 },
4736 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4737 #endif
4738 { "extend", float_cons, 'x' },
4739 { "ldouble", float_cons, 'x' },
4740 { "packed", float_cons, 'p' },
4741 #ifdef TE_PE
4742 {"secrel32", pe_directive_secrel, 0},
4743 #endif
4744
4745 /* These are for compatibility with CodeComposer Studio. */
4746 {"ref", s_ccs_ref, 0},
4747 {"def", s_ccs_def, 0},
4748 {"asmfunc", s_ccs_asmfunc, 0},
4749 {"endasmfunc", s_ccs_endasmfunc, 0},
4750
4751 { 0, 0, 0 }
4752 };
4753 \f
4754 /* Parser functions used exclusively in instruction operands. */
4755
4756 /* Generic immediate-value read function for use in insn parsing.
4757 STR points to the beginning of the immediate (the leading #);
4758 VAL receives the value; if the value is outside [MIN, MAX]
4759 issue an error. PREFIX_OPT is true if the immediate prefix is
4760 optional. */
4761
4762 static int
4763 parse_immediate (char **str, int *val, int min, int max,
4764 bfd_boolean prefix_opt)
4765 {
4766 expressionS exp;
4767 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4768 if (exp.X_op != O_constant)
4769 {
4770 inst.error = _("constant expression required");
4771 return FAIL;
4772 }
4773
4774 if (exp.X_add_number < min || exp.X_add_number > max)
4775 {
4776 inst.error = _("immediate value out of range");
4777 return FAIL;
4778 }
4779
4780 *val = exp.X_add_number;
4781 return SUCCESS;
4782 }
4783
4784 /* Less-generic immediate-value read function with the possibility of loading a
4785 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4786 instructions. Puts the result directly in inst.operands[i]. */
4787
4788 static int
4789 parse_big_immediate (char **str, int i, expressionS *in_exp,
4790 bfd_boolean allow_symbol_p)
4791 {
4792 expressionS exp;
4793 expressionS *exp_p = in_exp ? in_exp : &exp;
4794 char *ptr = *str;
4795
4796 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4797
4798 if (exp_p->X_op == O_constant)
4799 {
4800 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4801 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4802 O_constant. We have to be careful not to break compilation for
4803 32-bit X_add_number, though. */
4804 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4805 {
4806 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4807 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4808 & 0xffffffff);
4809 inst.operands[i].regisimm = 1;
4810 }
4811 }
4812 else if (exp_p->X_op == O_big
4813 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4814 {
4815 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4816
4817 /* Bignums have their least significant bits in
4818 generic_bignum[0]. Make sure we put 32 bits in imm and
4819 32 bits in reg, in a (hopefully) portable way. */
4820 gas_assert (parts != 0);
4821
4822 /* Make sure that the number is not too big.
4823 PR 11972: Bignums can now be sign-extended to the
4824 size of a .octa so check that the out of range bits
4825 are all zero or all one. */
4826 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4827 {
4828 LITTLENUM_TYPE m = -1;
4829
4830 if (generic_bignum[parts * 2] != 0
4831 && generic_bignum[parts * 2] != m)
4832 return FAIL;
4833
4834 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4835 if (generic_bignum[j] != generic_bignum[j-1])
4836 return FAIL;
4837 }
4838
4839 inst.operands[i].imm = 0;
4840 for (j = 0; j < parts; j++, idx++)
4841 inst.operands[i].imm |= generic_bignum[idx]
4842 << (LITTLENUM_NUMBER_OF_BITS * j);
4843 inst.operands[i].reg = 0;
4844 for (j = 0; j < parts; j++, idx++)
4845 inst.operands[i].reg |= generic_bignum[idx]
4846 << (LITTLENUM_NUMBER_OF_BITS * j);
4847 inst.operands[i].regisimm = 1;
4848 }
4849 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4850 return FAIL;
4851
4852 *str = ptr;
4853
4854 return SUCCESS;
4855 }
4856
4857 /* Returns the pseudo-register number of an FPA immediate constant,
4858 or FAIL if there isn't a valid constant here. */
4859
4860 static int
4861 parse_fpa_immediate (char ** str)
4862 {
4863 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4864 char * save_in;
4865 expressionS exp;
4866 int i;
4867 int j;
4868
4869 /* First try and match exact strings, this is to guarantee
4870 that some formats will work even for cross assembly. */
4871
4872 for (i = 0; fp_const[i]; i++)
4873 {
4874 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4875 {
4876 char *start = *str;
4877
4878 *str += strlen (fp_const[i]);
4879 if (is_end_of_line[(unsigned char) **str])
4880 return i + 8;
4881 *str = start;
4882 }
4883 }
4884
4885 /* Just because we didn't get a match doesn't mean that the constant
4886 isn't valid, just that it is in a format that we don't
4887 automatically recognize. Try parsing it with the standard
4888 expression routines. */
4889
4890 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4891
4892 /* Look for a raw floating point number. */
4893 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4894 && is_end_of_line[(unsigned char) *save_in])
4895 {
4896 for (i = 0; i < NUM_FLOAT_VALS; i++)
4897 {
4898 for (j = 0; j < MAX_LITTLENUMS; j++)
4899 {
4900 if (words[j] != fp_values[i][j])
4901 break;
4902 }
4903
4904 if (j == MAX_LITTLENUMS)
4905 {
4906 *str = save_in;
4907 return i + 8;
4908 }
4909 }
4910 }
4911
4912 /* Try and parse a more complex expression, this will probably fail
4913 unless the code uses a floating point prefix (eg "0f"). */
4914 save_in = input_line_pointer;
4915 input_line_pointer = *str;
4916 if (expression (&exp) == absolute_section
4917 && exp.X_op == O_big
4918 && exp.X_add_number < 0)
4919 {
4920 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4921 Ditto for 15. */
4922 #define X_PRECISION 5
4923 #define E_PRECISION 15L
4924 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
4925 {
4926 for (i = 0; i < NUM_FLOAT_VALS; i++)
4927 {
4928 for (j = 0; j < MAX_LITTLENUMS; j++)
4929 {
4930 if (words[j] != fp_values[i][j])
4931 break;
4932 }
4933
4934 if (j == MAX_LITTLENUMS)
4935 {
4936 *str = input_line_pointer;
4937 input_line_pointer = save_in;
4938 return i + 8;
4939 }
4940 }
4941 }
4942 }
4943
4944 *str = input_line_pointer;
4945 input_line_pointer = save_in;
4946 inst.error = _("invalid FPA immediate expression");
4947 return FAIL;
4948 }
4949
4950 /* Returns 1 if a number has "quarter-precision" float format
4951 0baBbbbbbc defgh000 00000000 00000000. */
4952
4953 static int
4954 is_quarter_float (unsigned imm)
4955 {
4956 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4957 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4958 }
4959
4960
4961 /* Detect the presence of a floating point or integer zero constant,
4962 i.e. #0.0 or #0. */
4963
4964 static bfd_boolean
4965 parse_ifimm_zero (char **in)
4966 {
4967 int error_code;
4968
4969 if (!is_immediate_prefix (**in))
4970 return FALSE;
4971
4972 ++*in;
4973
4974 /* Accept #0x0 as a synonym for #0. */
4975 if (strncmp (*in, "0x", 2) == 0)
4976 {
4977 int val;
4978 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
4979 return FALSE;
4980 return TRUE;
4981 }
4982
4983 error_code = atof_generic (in, ".", EXP_CHARS,
4984 &generic_floating_point_number);
4985
4986 if (!error_code
4987 && generic_floating_point_number.sign == '+'
4988 && (generic_floating_point_number.low
4989 > generic_floating_point_number.leader))
4990 return TRUE;
4991
4992 return FALSE;
4993 }
4994
4995 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4996 0baBbbbbbc defgh000 00000000 00000000.
4997 The zero and minus-zero cases need special handling, since they can't be
4998 encoded in the "quarter-precision" float format, but can nonetheless be
4999 loaded as integer constants. */
5000
5001 static unsigned
5002 parse_qfloat_immediate (char **ccp, int *immed)
5003 {
5004 char *str = *ccp;
5005 char *fpnum;
5006 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5007 int found_fpchar = 0;
5008
5009 skip_past_char (&str, '#');
5010
5011 /* We must not accidentally parse an integer as a floating-point number. Make
5012 sure that the value we parse is not an integer by checking for special
5013 characters '.' or 'e'.
5014 FIXME: This is a horrible hack, but doing better is tricky because type
5015 information isn't in a very usable state at parse time. */
5016 fpnum = str;
5017 skip_whitespace (fpnum);
5018
5019 if (strncmp (fpnum, "0x", 2) == 0)
5020 return FAIL;
5021 else
5022 {
5023 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5024 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5025 {
5026 found_fpchar = 1;
5027 break;
5028 }
5029
5030 if (!found_fpchar)
5031 return FAIL;
5032 }
5033
5034 if ((str = atof_ieee (str, 's', words)) != NULL)
5035 {
5036 unsigned fpword = 0;
5037 int i;
5038
5039 /* Our FP word must be 32 bits (single-precision FP). */
5040 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5041 {
5042 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5043 fpword |= words[i];
5044 }
5045
5046 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5047 *immed = fpword;
5048 else
5049 return FAIL;
5050
5051 *ccp = str;
5052
5053 return SUCCESS;
5054 }
5055
5056 return FAIL;
5057 }
5058
5059 /* Shift operands. */
5060 enum shift_kind
5061 {
5062 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5063 };
5064
5065 struct asm_shift_name
5066 {
5067 const char *name;
5068 enum shift_kind kind;
5069 };
5070
5071 /* Third argument to parse_shift. */
5072 enum parse_shift_mode
5073 {
5074 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5075 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5076 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5077 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5078 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5079 };
5080
5081 /* Parse a <shift> specifier on an ARM data processing instruction.
5082 This has three forms:
5083
5084 (LSL|LSR|ASL|ASR|ROR) Rs
5085 (LSL|LSR|ASL|ASR|ROR) #imm
5086 RRX
5087
5088 Note that ASL is assimilated to LSL in the instruction encoding, and
5089 RRX to ROR #0 (which cannot be written as such). */
5090
5091 static int
5092 parse_shift (char **str, int i, enum parse_shift_mode mode)
5093 {
5094 const struct asm_shift_name *shift_name;
5095 enum shift_kind shift;
5096 char *s = *str;
5097 char *p = s;
5098 int reg;
5099
5100 for (p = *str; ISALPHA (*p); p++)
5101 ;
5102
5103 if (p == *str)
5104 {
5105 inst.error = _("shift expression expected");
5106 return FAIL;
5107 }
5108
5109 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5110 p - *str);
5111
5112 if (shift_name == NULL)
5113 {
5114 inst.error = _("shift expression expected");
5115 return FAIL;
5116 }
5117
5118 shift = shift_name->kind;
5119
5120 switch (mode)
5121 {
5122 case NO_SHIFT_RESTRICT:
5123 case SHIFT_IMMEDIATE: break;
5124
5125 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5126 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5127 {
5128 inst.error = _("'LSL' or 'ASR' required");
5129 return FAIL;
5130 }
5131 break;
5132
5133 case SHIFT_LSL_IMMEDIATE:
5134 if (shift != SHIFT_LSL)
5135 {
5136 inst.error = _("'LSL' required");
5137 return FAIL;
5138 }
5139 break;
5140
5141 case SHIFT_ASR_IMMEDIATE:
5142 if (shift != SHIFT_ASR)
5143 {
5144 inst.error = _("'ASR' required");
5145 return FAIL;
5146 }
5147 break;
5148
5149 default: abort ();
5150 }
5151
5152 if (shift != SHIFT_RRX)
5153 {
5154 /* Whitespace can appear here if the next thing is a bare digit. */
5155 skip_whitespace (p);
5156
5157 if (mode == NO_SHIFT_RESTRICT
5158 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5159 {
5160 inst.operands[i].imm = reg;
5161 inst.operands[i].immisreg = 1;
5162 }
5163 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5164 return FAIL;
5165 }
5166 inst.operands[i].shift_kind = shift;
5167 inst.operands[i].shifted = 1;
5168 *str = p;
5169 return SUCCESS;
5170 }
5171
5172 /* Parse a <shifter_operand> for an ARM data processing instruction:
5173
5174 #<immediate>
5175 #<immediate>, <rotate>
5176 <Rm>
5177 <Rm>, <shift>
5178
5179 where <shift> is defined by parse_shift above, and <rotate> is a
5180 multiple of 2 between 0 and 30. Validation of immediate operands
5181 is deferred to md_apply_fix. */
5182
5183 static int
5184 parse_shifter_operand (char **str, int i)
5185 {
5186 int value;
5187 expressionS exp;
5188
5189 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5190 {
5191 inst.operands[i].reg = value;
5192 inst.operands[i].isreg = 1;
5193
5194 /* parse_shift will override this if appropriate */
5195 inst.reloc.exp.X_op = O_constant;
5196 inst.reloc.exp.X_add_number = 0;
5197
5198 if (skip_past_comma (str) == FAIL)
5199 return SUCCESS;
5200
5201 /* Shift operation on register. */
5202 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5203 }
5204
5205 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
5206 return FAIL;
5207
5208 if (skip_past_comma (str) == SUCCESS)
5209 {
5210 /* #x, y -- ie explicit rotation by Y. */
5211 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5212 return FAIL;
5213
5214 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
5215 {
5216 inst.error = _("constant expression expected");
5217 return FAIL;
5218 }
5219
5220 value = exp.X_add_number;
5221 if (value < 0 || value > 30 || value % 2 != 0)
5222 {
5223 inst.error = _("invalid rotation");
5224 return FAIL;
5225 }
5226 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
5227 {
5228 inst.error = _("invalid constant");
5229 return FAIL;
5230 }
5231
5232 /* Encode as specified. */
5233 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
5234 return SUCCESS;
5235 }
5236
5237 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5238 inst.reloc.pc_rel = 0;
5239 return SUCCESS;
5240 }
5241
5242 /* Group relocation information. Each entry in the table contains the
5243 textual name of the relocation as may appear in assembler source
5244 and must end with a colon.
5245 Along with this textual name are the relocation codes to be used if
5246 the corresponding instruction is an ALU instruction (ADD or SUB only),
5247 an LDR, an LDRS, or an LDC. */
5248
5249 struct group_reloc_table_entry
5250 {
5251 const char *name;
5252 int alu_code;
5253 int ldr_code;
5254 int ldrs_code;
5255 int ldc_code;
5256 };
5257
5258 typedef enum
5259 {
5260 /* Varieties of non-ALU group relocation. */
5261
5262 GROUP_LDR,
5263 GROUP_LDRS,
5264 GROUP_LDC
5265 } group_reloc_type;
5266
5267 static struct group_reloc_table_entry group_reloc_table[] =
5268 { /* Program counter relative: */
5269 { "pc_g0_nc",
5270 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5271 0, /* LDR */
5272 0, /* LDRS */
5273 0 }, /* LDC */
5274 { "pc_g0",
5275 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5276 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5277 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5278 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5279 { "pc_g1_nc",
5280 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5281 0, /* LDR */
5282 0, /* LDRS */
5283 0 }, /* LDC */
5284 { "pc_g1",
5285 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5286 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5287 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5288 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5289 { "pc_g2",
5290 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5291 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5292 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5293 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5294 /* Section base relative */
5295 { "sb_g0_nc",
5296 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5297 0, /* LDR */
5298 0, /* LDRS */
5299 0 }, /* LDC */
5300 { "sb_g0",
5301 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5302 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5303 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5304 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5305 { "sb_g1_nc",
5306 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5307 0, /* LDR */
5308 0, /* LDRS */
5309 0 }, /* LDC */
5310 { "sb_g1",
5311 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5312 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5313 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5314 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5315 { "sb_g2",
5316 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5317 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5318 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5319 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5320 /* Absolute thumb alu relocations. */
5321 { "lower0_7",
5322 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5323 0, /* LDR. */
5324 0, /* LDRS. */
5325 0 }, /* LDC. */
5326 { "lower8_15",
5327 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5328 0, /* LDR. */
5329 0, /* LDRS. */
5330 0 }, /* LDC. */
5331 { "upper0_7",
5332 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5333 0, /* LDR. */
5334 0, /* LDRS. */
5335 0 }, /* LDC. */
5336 { "upper8_15",
5337 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5338 0, /* LDR. */
5339 0, /* LDRS. */
5340 0 } }; /* LDC. */
5341
5342 /* Given the address of a pointer pointing to the textual name of a group
5343 relocation as may appear in assembler source, attempt to find its details
5344 in group_reloc_table. The pointer will be updated to the character after
5345 the trailing colon. On failure, FAIL will be returned; SUCCESS
5346 otherwise. On success, *entry will be updated to point at the relevant
5347 group_reloc_table entry. */
5348
5349 static int
5350 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5351 {
5352 unsigned int i;
5353 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5354 {
5355 int length = strlen (group_reloc_table[i].name);
5356
5357 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5358 && (*str)[length] == ':')
5359 {
5360 *out = &group_reloc_table[i];
5361 *str += (length + 1);
5362 return SUCCESS;
5363 }
5364 }
5365
5366 return FAIL;
5367 }
5368
5369 /* Parse a <shifter_operand> for an ARM data processing instruction
5370 (as for parse_shifter_operand) where group relocations are allowed:
5371
5372 #<immediate>
5373 #<immediate>, <rotate>
5374 #:<group_reloc>:<expression>
5375 <Rm>
5376 <Rm>, <shift>
5377
5378 where <group_reloc> is one of the strings defined in group_reloc_table.
5379 The hashes are optional.
5380
5381 Everything else is as for parse_shifter_operand. */
5382
5383 static parse_operand_result
5384 parse_shifter_operand_group_reloc (char **str, int i)
5385 {
5386 /* Determine if we have the sequence of characters #: or just :
5387 coming next. If we do, then we check for a group relocation.
5388 If we don't, punt the whole lot to parse_shifter_operand. */
5389
5390 if (((*str)[0] == '#' && (*str)[1] == ':')
5391 || (*str)[0] == ':')
5392 {
5393 struct group_reloc_table_entry *entry;
5394
5395 if ((*str)[0] == '#')
5396 (*str) += 2;
5397 else
5398 (*str)++;
5399
5400 /* Try to parse a group relocation. Anything else is an error. */
5401 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5402 {
5403 inst.error = _("unknown group relocation");
5404 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5405 }
5406
5407 /* We now have the group relocation table entry corresponding to
5408 the name in the assembler source. Next, we parse the expression. */
5409 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5410 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5411
5412 /* Record the relocation type (always the ALU variant here). */
5413 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5414 gas_assert (inst.reloc.type != 0);
5415
5416 return PARSE_OPERAND_SUCCESS;
5417 }
5418 else
5419 return parse_shifter_operand (str, i) == SUCCESS
5420 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5421
5422 /* Never reached. */
5423 }
5424
5425 /* Parse a Neon alignment expression. Information is written to
5426 inst.operands[i]. We assume the initial ':' has been skipped.
5427
5428 align .imm = align << 8, .immisalign=1, .preind=0 */
5429 static parse_operand_result
5430 parse_neon_alignment (char **str, int i)
5431 {
5432 char *p = *str;
5433 expressionS exp;
5434
5435 my_get_expression (&exp, &p, GE_NO_PREFIX);
5436
5437 if (exp.X_op != O_constant)
5438 {
5439 inst.error = _("alignment must be constant");
5440 return PARSE_OPERAND_FAIL;
5441 }
5442
5443 inst.operands[i].imm = exp.X_add_number << 8;
5444 inst.operands[i].immisalign = 1;
5445 /* Alignments are not pre-indexes. */
5446 inst.operands[i].preind = 0;
5447
5448 *str = p;
5449 return PARSE_OPERAND_SUCCESS;
5450 }
5451
5452 /* Parse all forms of an ARM address expression. Information is written
5453 to inst.operands[i] and/or inst.reloc.
5454
5455 Preindexed addressing (.preind=1):
5456
5457 [Rn, #offset] .reg=Rn .reloc.exp=offset
5458 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5459 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5460 .shift_kind=shift .reloc.exp=shift_imm
5461
5462 These three may have a trailing ! which causes .writeback to be set also.
5463
5464 Postindexed addressing (.postind=1, .writeback=1):
5465
5466 [Rn], #offset .reg=Rn .reloc.exp=offset
5467 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5468 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5469 .shift_kind=shift .reloc.exp=shift_imm
5470
5471 Unindexed addressing (.preind=0, .postind=0):
5472
5473 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5474
5475 Other:
5476
5477 [Rn]{!} shorthand for [Rn,#0]{!}
5478 =immediate .isreg=0 .reloc.exp=immediate
5479 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5480
5481 It is the caller's responsibility to check for addressing modes not
5482 supported by the instruction, and to set inst.reloc.type. */
5483
5484 static parse_operand_result
5485 parse_address_main (char **str, int i, int group_relocations,
5486 group_reloc_type group_type)
5487 {
5488 char *p = *str;
5489 int reg;
5490
5491 if (skip_past_char (&p, '[') == FAIL)
5492 {
5493 if (skip_past_char (&p, '=') == FAIL)
5494 {
5495 /* Bare address - translate to PC-relative offset. */
5496 inst.reloc.pc_rel = 1;
5497 inst.operands[i].reg = REG_PC;
5498 inst.operands[i].isreg = 1;
5499 inst.operands[i].preind = 1;
5500
5501 if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
5502 return PARSE_OPERAND_FAIL;
5503 }
5504 else if (parse_big_immediate (&p, i, &inst.reloc.exp,
5505 /*allow_symbol_p=*/TRUE))
5506 return PARSE_OPERAND_FAIL;
5507
5508 *str = p;
5509 return PARSE_OPERAND_SUCCESS;
5510 }
5511
5512 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5513 skip_whitespace (p);
5514
5515 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5516 {
5517 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5518 return PARSE_OPERAND_FAIL;
5519 }
5520 inst.operands[i].reg = reg;
5521 inst.operands[i].isreg = 1;
5522
5523 if (skip_past_comma (&p) == SUCCESS)
5524 {
5525 inst.operands[i].preind = 1;
5526
5527 if (*p == '+') p++;
5528 else if (*p == '-') p++, inst.operands[i].negative = 1;
5529
5530 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5531 {
5532 inst.operands[i].imm = reg;
5533 inst.operands[i].immisreg = 1;
5534
5535 if (skip_past_comma (&p) == SUCCESS)
5536 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5537 return PARSE_OPERAND_FAIL;
5538 }
5539 else if (skip_past_char (&p, ':') == SUCCESS)
5540 {
5541 /* FIXME: '@' should be used here, but it's filtered out by generic
5542 code before we get to see it here. This may be subject to
5543 change. */
5544 parse_operand_result result = parse_neon_alignment (&p, i);
5545
5546 if (result != PARSE_OPERAND_SUCCESS)
5547 return result;
5548 }
5549 else
5550 {
5551 if (inst.operands[i].negative)
5552 {
5553 inst.operands[i].negative = 0;
5554 p--;
5555 }
5556
5557 if (group_relocations
5558 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5559 {
5560 struct group_reloc_table_entry *entry;
5561
5562 /* Skip over the #: or : sequence. */
5563 if (*p == '#')
5564 p += 2;
5565 else
5566 p++;
5567
5568 /* Try to parse a group relocation. Anything else is an
5569 error. */
5570 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5571 {
5572 inst.error = _("unknown group relocation");
5573 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5574 }
5575
5576 /* We now have the group relocation table entry corresponding to
5577 the name in the assembler source. Next, we parse the
5578 expression. */
5579 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5580 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5581
5582 /* Record the relocation type. */
5583 switch (group_type)
5584 {
5585 case GROUP_LDR:
5586 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5587 break;
5588
5589 case GROUP_LDRS:
5590 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5591 break;
5592
5593 case GROUP_LDC:
5594 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5595 break;
5596
5597 default:
5598 gas_assert (0);
5599 }
5600
5601 if (inst.reloc.type == 0)
5602 {
5603 inst.error = _("this group relocation is not allowed on this instruction");
5604 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5605 }
5606 }
5607 else
5608 {
5609 char *q = p;
5610 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5611 return PARSE_OPERAND_FAIL;
5612 /* If the offset is 0, find out if it's a +0 or -0. */
5613 if (inst.reloc.exp.X_op == O_constant
5614 && inst.reloc.exp.X_add_number == 0)
5615 {
5616 skip_whitespace (q);
5617 if (*q == '#')
5618 {
5619 q++;
5620 skip_whitespace (q);
5621 }
5622 if (*q == '-')
5623 inst.operands[i].negative = 1;
5624 }
5625 }
5626 }
5627 }
5628 else if (skip_past_char (&p, ':') == SUCCESS)
5629 {
5630 /* FIXME: '@' should be used here, but it's filtered out by generic code
5631 before we get to see it here. This may be subject to change. */
5632 parse_operand_result result = parse_neon_alignment (&p, i);
5633
5634 if (result != PARSE_OPERAND_SUCCESS)
5635 return result;
5636 }
5637
5638 if (skip_past_char (&p, ']') == FAIL)
5639 {
5640 inst.error = _("']' expected");
5641 return PARSE_OPERAND_FAIL;
5642 }
5643
5644 if (skip_past_char (&p, '!') == SUCCESS)
5645 inst.operands[i].writeback = 1;
5646
5647 else if (skip_past_comma (&p) == SUCCESS)
5648 {
5649 if (skip_past_char (&p, '{') == SUCCESS)
5650 {
5651 /* [Rn], {expr} - unindexed, with option */
5652 if (parse_immediate (&p, &inst.operands[i].imm,
5653 0, 255, TRUE) == FAIL)
5654 return PARSE_OPERAND_FAIL;
5655
5656 if (skip_past_char (&p, '}') == FAIL)
5657 {
5658 inst.error = _("'}' expected at end of 'option' field");
5659 return PARSE_OPERAND_FAIL;
5660 }
5661 if (inst.operands[i].preind)
5662 {
5663 inst.error = _("cannot combine index with option");
5664 return PARSE_OPERAND_FAIL;
5665 }
5666 *str = p;
5667 return PARSE_OPERAND_SUCCESS;
5668 }
5669 else
5670 {
5671 inst.operands[i].postind = 1;
5672 inst.operands[i].writeback = 1;
5673
5674 if (inst.operands[i].preind)
5675 {
5676 inst.error = _("cannot combine pre- and post-indexing");
5677 return PARSE_OPERAND_FAIL;
5678 }
5679
5680 if (*p == '+') p++;
5681 else if (*p == '-') p++, inst.operands[i].negative = 1;
5682
5683 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5684 {
5685 /* We might be using the immediate for alignment already. If we
5686 are, OR the register number into the low-order bits. */
5687 if (inst.operands[i].immisalign)
5688 inst.operands[i].imm |= reg;
5689 else
5690 inst.operands[i].imm = reg;
5691 inst.operands[i].immisreg = 1;
5692
5693 if (skip_past_comma (&p) == SUCCESS)
5694 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5695 return PARSE_OPERAND_FAIL;
5696 }
5697 else
5698 {
5699 char *q = p;
5700 if (inst.operands[i].negative)
5701 {
5702 inst.operands[i].negative = 0;
5703 p--;
5704 }
5705 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5706 return PARSE_OPERAND_FAIL;
5707 /* If the offset is 0, find out if it's a +0 or -0. */
5708 if (inst.reloc.exp.X_op == O_constant
5709 && inst.reloc.exp.X_add_number == 0)
5710 {
5711 skip_whitespace (q);
5712 if (*q == '#')
5713 {
5714 q++;
5715 skip_whitespace (q);
5716 }
5717 if (*q == '-')
5718 inst.operands[i].negative = 1;
5719 }
5720 }
5721 }
5722 }
5723
5724 /* If at this point neither .preind nor .postind is set, we have a
5725 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5726 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5727 {
5728 inst.operands[i].preind = 1;
5729 inst.reloc.exp.X_op = O_constant;
5730 inst.reloc.exp.X_add_number = 0;
5731 }
5732 *str = p;
5733 return PARSE_OPERAND_SUCCESS;
5734 }
5735
5736 static int
5737 parse_address (char **str, int i)
5738 {
5739 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5740 ? SUCCESS : FAIL;
5741 }
5742
5743 static parse_operand_result
5744 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5745 {
5746 return parse_address_main (str, i, 1, type);
5747 }
5748
5749 /* Parse an operand for a MOVW or MOVT instruction. */
5750 static int
5751 parse_half (char **str)
5752 {
5753 char * p;
5754
5755 p = *str;
5756 skip_past_char (&p, '#');
5757 if (strncasecmp (p, ":lower16:", 9) == 0)
5758 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5759 else if (strncasecmp (p, ":upper16:", 9) == 0)
5760 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5761
5762 if (inst.reloc.type != BFD_RELOC_UNUSED)
5763 {
5764 p += 9;
5765 skip_whitespace (p);
5766 }
5767
5768 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5769 return FAIL;
5770
5771 if (inst.reloc.type == BFD_RELOC_UNUSED)
5772 {
5773 if (inst.reloc.exp.X_op != O_constant)
5774 {
5775 inst.error = _("constant expression expected");
5776 return FAIL;
5777 }
5778 if (inst.reloc.exp.X_add_number < 0
5779 || inst.reloc.exp.X_add_number > 0xffff)
5780 {
5781 inst.error = _("immediate value out of range");
5782 return FAIL;
5783 }
5784 }
5785 *str = p;
5786 return SUCCESS;
5787 }
5788
5789 /* Miscellaneous. */
5790
5791 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5792 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5793 static int
5794 parse_psr (char **str, bfd_boolean lhs)
5795 {
5796 char *p;
5797 unsigned long psr_field;
5798 const struct asm_psr *psr;
5799 char *start;
5800 bfd_boolean is_apsr = FALSE;
5801 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5802
5803 /* PR gas/12698: If the user has specified -march=all then m_profile will
5804 be TRUE, but we want to ignore it in this case as we are building for any
5805 CPU type, including non-m variants. */
5806 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
5807 m_profile = FALSE;
5808
5809 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5810 feature for ease of use and backwards compatibility. */
5811 p = *str;
5812 if (strncasecmp (p, "SPSR", 4) == 0)
5813 {
5814 if (m_profile)
5815 goto unsupported_psr;
5816
5817 psr_field = SPSR_BIT;
5818 }
5819 else if (strncasecmp (p, "CPSR", 4) == 0)
5820 {
5821 if (m_profile)
5822 goto unsupported_psr;
5823
5824 psr_field = 0;
5825 }
5826 else if (strncasecmp (p, "APSR", 4) == 0)
5827 {
5828 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5829 and ARMv7-R architecture CPUs. */
5830 is_apsr = TRUE;
5831 psr_field = 0;
5832 }
5833 else if (m_profile)
5834 {
5835 start = p;
5836 do
5837 p++;
5838 while (ISALNUM (*p) || *p == '_');
5839
5840 if (strncasecmp (start, "iapsr", 5) == 0
5841 || strncasecmp (start, "eapsr", 5) == 0
5842 || strncasecmp (start, "xpsr", 4) == 0
5843 || strncasecmp (start, "psr", 3) == 0)
5844 p = start + strcspn (start, "rR") + 1;
5845
5846 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5847 p - start);
5848
5849 if (!psr)
5850 return FAIL;
5851
5852 /* If APSR is being written, a bitfield may be specified. Note that
5853 APSR itself is handled above. */
5854 if (psr->field <= 3)
5855 {
5856 psr_field = psr->field;
5857 is_apsr = TRUE;
5858 goto check_suffix;
5859 }
5860
5861 *str = p;
5862 /* M-profile MSR instructions have the mask field set to "10", except
5863 *PSR variants which modify APSR, which may use a different mask (and
5864 have been handled already). Do that by setting the PSR_f field
5865 here. */
5866 return psr->field | (lhs ? PSR_f : 0);
5867 }
5868 else
5869 goto unsupported_psr;
5870
5871 p += 4;
5872 check_suffix:
5873 if (*p == '_')
5874 {
5875 /* A suffix follows. */
5876 p++;
5877 start = p;
5878
5879 do
5880 p++;
5881 while (ISALNUM (*p) || *p == '_');
5882
5883 if (is_apsr)
5884 {
5885 /* APSR uses a notation for bits, rather than fields. */
5886 unsigned int nzcvq_bits = 0;
5887 unsigned int g_bit = 0;
5888 char *bit;
5889
5890 for (bit = start; bit != p; bit++)
5891 {
5892 switch (TOLOWER (*bit))
5893 {
5894 case 'n':
5895 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5896 break;
5897
5898 case 'z':
5899 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5900 break;
5901
5902 case 'c':
5903 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5904 break;
5905
5906 case 'v':
5907 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5908 break;
5909
5910 case 'q':
5911 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5912 break;
5913
5914 case 'g':
5915 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5916 break;
5917
5918 default:
5919 inst.error = _("unexpected bit specified after APSR");
5920 return FAIL;
5921 }
5922 }
5923
5924 if (nzcvq_bits == 0x1f)
5925 psr_field |= PSR_f;
5926
5927 if (g_bit == 0x1)
5928 {
5929 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5930 {
5931 inst.error = _("selected processor does not "
5932 "support DSP extension");
5933 return FAIL;
5934 }
5935
5936 psr_field |= PSR_s;
5937 }
5938
5939 if ((nzcvq_bits & 0x20) != 0
5940 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5941 || (g_bit & 0x2) != 0)
5942 {
5943 inst.error = _("bad bitmask specified after APSR");
5944 return FAIL;
5945 }
5946 }
5947 else
5948 {
5949 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5950 p - start);
5951 if (!psr)
5952 goto error;
5953
5954 psr_field |= psr->field;
5955 }
5956 }
5957 else
5958 {
5959 if (ISALNUM (*p))
5960 goto error; /* Garbage after "[CS]PSR". */
5961
5962 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5963 is deprecated, but allow it anyway. */
5964 if (is_apsr && lhs)
5965 {
5966 psr_field |= PSR_f;
5967 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5968 "deprecated"));
5969 }
5970 else if (!m_profile)
5971 /* These bits are never right for M-profile devices: don't set them
5972 (only code paths which read/write APSR reach here). */
5973 psr_field |= (PSR_c | PSR_f);
5974 }
5975 *str = p;
5976 return psr_field;
5977
5978 unsupported_psr:
5979 inst.error = _("selected processor does not support requested special "
5980 "purpose register");
5981 return FAIL;
5982
5983 error:
5984 inst.error = _("flag for {c}psr instruction expected");
5985 return FAIL;
5986 }
5987
5988 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5989 value suitable for splatting into the AIF field of the instruction. */
5990
5991 static int
5992 parse_cps_flags (char **str)
5993 {
5994 int val = 0;
5995 int saw_a_flag = 0;
5996 char *s = *str;
5997
5998 for (;;)
5999 switch (*s++)
6000 {
6001 case '\0': case ',':
6002 goto done;
6003
6004 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6005 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6006 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6007
6008 default:
6009 inst.error = _("unrecognized CPS flag");
6010 return FAIL;
6011 }
6012
6013 done:
6014 if (saw_a_flag == 0)
6015 {
6016 inst.error = _("missing CPS flags");
6017 return FAIL;
6018 }
6019
6020 *str = s - 1;
6021 return val;
6022 }
6023
6024 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6025 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6026
6027 static int
6028 parse_endian_specifier (char **str)
6029 {
6030 int little_endian;
6031 char *s = *str;
6032
6033 if (strncasecmp (s, "BE", 2))
6034 little_endian = 0;
6035 else if (strncasecmp (s, "LE", 2))
6036 little_endian = 1;
6037 else
6038 {
6039 inst.error = _("valid endian specifiers are be or le");
6040 return FAIL;
6041 }
6042
6043 if (ISALNUM (s[2]) || s[2] == '_')
6044 {
6045 inst.error = _("valid endian specifiers are be or le");
6046 return FAIL;
6047 }
6048
6049 *str = s + 2;
6050 return little_endian;
6051 }
6052
6053 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6054 value suitable for poking into the rotate field of an sxt or sxta
6055 instruction, or FAIL on error. */
6056
6057 static int
6058 parse_ror (char **str)
6059 {
6060 int rot;
6061 char *s = *str;
6062
6063 if (strncasecmp (s, "ROR", 3) == 0)
6064 s += 3;
6065 else
6066 {
6067 inst.error = _("missing rotation field after comma");
6068 return FAIL;
6069 }
6070
6071 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6072 return FAIL;
6073
6074 switch (rot)
6075 {
6076 case 0: *str = s; return 0x0;
6077 case 8: *str = s; return 0x1;
6078 case 16: *str = s; return 0x2;
6079 case 24: *str = s; return 0x3;
6080
6081 default:
6082 inst.error = _("rotation can only be 0, 8, 16, or 24");
6083 return FAIL;
6084 }
6085 }
6086
6087 /* Parse a conditional code (from conds[] below). The value returned is in the
6088 range 0 .. 14, or FAIL. */
6089 static int
6090 parse_cond (char **str)
6091 {
6092 char *q;
6093 const struct asm_cond *c;
6094 int n;
6095 /* Condition codes are always 2 characters, so matching up to
6096 3 characters is sufficient. */
6097 char cond[3];
6098
6099 q = *str;
6100 n = 0;
6101 while (ISALPHA (*q) && n < 3)
6102 {
6103 cond[n] = TOLOWER (*q);
6104 q++;
6105 n++;
6106 }
6107
6108 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6109 if (!c)
6110 {
6111 inst.error = _("condition required");
6112 return FAIL;
6113 }
6114
6115 *str = q;
6116 return c->value;
6117 }
6118
6119 /* Record a use of the given feature. */
6120 static void
6121 record_feature_use (const arm_feature_set *feature)
6122 {
6123 if (thumb_mode)
6124 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6125 else
6126 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6127 }
6128
6129 /* If the given feature available in the selected CPU, mark it as used.
6130 Returns TRUE iff feature is available. */
6131 static bfd_boolean
6132 mark_feature_used (const arm_feature_set *feature)
6133 {
6134 /* Ensure the option is valid on the current architecture. */
6135 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6136 return FALSE;
6137
6138 /* Add the appropriate architecture feature for the barrier option used.
6139 */
6140 record_feature_use (feature);
6141
6142 return TRUE;
6143 }
6144
6145 /* Parse an option for a barrier instruction. Returns the encoding for the
6146 option, or FAIL. */
6147 static int
6148 parse_barrier (char **str)
6149 {
6150 char *p, *q;
6151 const struct asm_barrier_opt *o;
6152
6153 p = q = *str;
6154 while (ISALPHA (*q))
6155 q++;
6156
6157 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6158 q - p);
6159 if (!o)
6160 return FAIL;
6161
6162 if (!mark_feature_used (&o->arch))
6163 return FAIL;
6164
6165 *str = q;
6166 return o->value;
6167 }
6168
6169 /* Parse the operands of a table branch instruction. Similar to a memory
6170 operand. */
6171 static int
6172 parse_tb (char **str)
6173 {
6174 char * p = *str;
6175 int reg;
6176
6177 if (skip_past_char (&p, '[') == FAIL)
6178 {
6179 inst.error = _("'[' expected");
6180 return FAIL;
6181 }
6182
6183 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6184 {
6185 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6186 return FAIL;
6187 }
6188 inst.operands[0].reg = reg;
6189
6190 if (skip_past_comma (&p) == FAIL)
6191 {
6192 inst.error = _("',' expected");
6193 return FAIL;
6194 }
6195
6196 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6197 {
6198 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6199 return FAIL;
6200 }
6201 inst.operands[0].imm = reg;
6202
6203 if (skip_past_comma (&p) == SUCCESS)
6204 {
6205 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6206 return FAIL;
6207 if (inst.reloc.exp.X_add_number != 1)
6208 {
6209 inst.error = _("invalid shift");
6210 return FAIL;
6211 }
6212 inst.operands[0].shifted = 1;
6213 }
6214
6215 if (skip_past_char (&p, ']') == FAIL)
6216 {
6217 inst.error = _("']' expected");
6218 return FAIL;
6219 }
6220 *str = p;
6221 return SUCCESS;
6222 }
6223
6224 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6225 information on the types the operands can take and how they are encoded.
6226 Up to four operands may be read; this function handles setting the
6227 ".present" field for each read operand itself.
6228 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6229 else returns FAIL. */
6230
6231 static int
6232 parse_neon_mov (char **str, int *which_operand)
6233 {
6234 int i = *which_operand, val;
6235 enum arm_reg_type rtype;
6236 char *ptr = *str;
6237 struct neon_type_el optype;
6238
6239 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6240 {
6241 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6242 inst.operands[i].reg = val;
6243 inst.operands[i].isscalar = 1;
6244 inst.operands[i].vectype = optype;
6245 inst.operands[i++].present = 1;
6246
6247 if (skip_past_comma (&ptr) == FAIL)
6248 goto wanted_comma;
6249
6250 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6251 goto wanted_arm;
6252
6253 inst.operands[i].reg = val;
6254 inst.operands[i].isreg = 1;
6255 inst.operands[i].present = 1;
6256 }
6257 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6258 != FAIL)
6259 {
6260 /* Cases 0, 1, 2, 3, 5 (D only). */
6261 if (skip_past_comma (&ptr) == FAIL)
6262 goto wanted_comma;
6263
6264 inst.operands[i].reg = val;
6265 inst.operands[i].isreg = 1;
6266 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6267 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6268 inst.operands[i].isvec = 1;
6269 inst.operands[i].vectype = optype;
6270 inst.operands[i++].present = 1;
6271
6272 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6273 {
6274 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6275 Case 13: VMOV <Sd>, <Rm> */
6276 inst.operands[i].reg = val;
6277 inst.operands[i].isreg = 1;
6278 inst.operands[i].present = 1;
6279
6280 if (rtype == REG_TYPE_NQ)
6281 {
6282 first_error (_("can't use Neon quad register here"));
6283 return FAIL;
6284 }
6285 else if (rtype != REG_TYPE_VFS)
6286 {
6287 i++;
6288 if (skip_past_comma (&ptr) == FAIL)
6289 goto wanted_comma;
6290 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6291 goto wanted_arm;
6292 inst.operands[i].reg = val;
6293 inst.operands[i].isreg = 1;
6294 inst.operands[i].present = 1;
6295 }
6296 }
6297 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6298 &optype)) != FAIL)
6299 {
6300 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6301 Case 1: VMOV<c><q> <Dd>, <Dm>
6302 Case 8: VMOV.F32 <Sd>, <Sm>
6303 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6304
6305 inst.operands[i].reg = val;
6306 inst.operands[i].isreg = 1;
6307 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6308 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6309 inst.operands[i].isvec = 1;
6310 inst.operands[i].vectype = optype;
6311 inst.operands[i].present = 1;
6312
6313 if (skip_past_comma (&ptr) == SUCCESS)
6314 {
6315 /* Case 15. */
6316 i++;
6317
6318 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6319 goto wanted_arm;
6320
6321 inst.operands[i].reg = val;
6322 inst.operands[i].isreg = 1;
6323 inst.operands[i++].present = 1;
6324
6325 if (skip_past_comma (&ptr) == FAIL)
6326 goto wanted_comma;
6327
6328 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6329 goto wanted_arm;
6330
6331 inst.operands[i].reg = val;
6332 inst.operands[i].isreg = 1;
6333 inst.operands[i].present = 1;
6334 }
6335 }
6336 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6337 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6338 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6339 Case 10: VMOV.F32 <Sd>, #<imm>
6340 Case 11: VMOV.F64 <Dd>, #<imm> */
6341 inst.operands[i].immisfloat = 1;
6342 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6343 == SUCCESS)
6344 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6345 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6346 ;
6347 else
6348 {
6349 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6350 return FAIL;
6351 }
6352 }
6353 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6354 {
6355 /* Cases 6, 7. */
6356 inst.operands[i].reg = val;
6357 inst.operands[i].isreg = 1;
6358 inst.operands[i++].present = 1;
6359
6360 if (skip_past_comma (&ptr) == FAIL)
6361 goto wanted_comma;
6362
6363 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6364 {
6365 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6366 inst.operands[i].reg = val;
6367 inst.operands[i].isscalar = 1;
6368 inst.operands[i].present = 1;
6369 inst.operands[i].vectype = optype;
6370 }
6371 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6372 {
6373 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6374 inst.operands[i].reg = val;
6375 inst.operands[i].isreg = 1;
6376 inst.operands[i++].present = 1;
6377
6378 if (skip_past_comma (&ptr) == FAIL)
6379 goto wanted_comma;
6380
6381 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6382 == FAIL)
6383 {
6384 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6385 return FAIL;
6386 }
6387
6388 inst.operands[i].reg = val;
6389 inst.operands[i].isreg = 1;
6390 inst.operands[i].isvec = 1;
6391 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6392 inst.operands[i].vectype = optype;
6393 inst.operands[i].present = 1;
6394
6395 if (rtype == REG_TYPE_VFS)
6396 {
6397 /* Case 14. */
6398 i++;
6399 if (skip_past_comma (&ptr) == FAIL)
6400 goto wanted_comma;
6401 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6402 &optype)) == FAIL)
6403 {
6404 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6405 return FAIL;
6406 }
6407 inst.operands[i].reg = val;
6408 inst.operands[i].isreg = 1;
6409 inst.operands[i].isvec = 1;
6410 inst.operands[i].issingle = 1;
6411 inst.operands[i].vectype = optype;
6412 inst.operands[i].present = 1;
6413 }
6414 }
6415 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6416 != FAIL)
6417 {
6418 /* Case 13. */
6419 inst.operands[i].reg = val;
6420 inst.operands[i].isreg = 1;
6421 inst.operands[i].isvec = 1;
6422 inst.operands[i].issingle = 1;
6423 inst.operands[i].vectype = optype;
6424 inst.operands[i].present = 1;
6425 }
6426 }
6427 else
6428 {
6429 first_error (_("parse error"));
6430 return FAIL;
6431 }
6432
6433 /* Successfully parsed the operands. Update args. */
6434 *which_operand = i;
6435 *str = ptr;
6436 return SUCCESS;
6437
6438 wanted_comma:
6439 first_error (_("expected comma"));
6440 return FAIL;
6441
6442 wanted_arm:
6443 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6444 return FAIL;
6445 }
6446
6447 /* Use this macro when the operand constraints are different
6448 for ARM and THUMB (e.g. ldrd). */
6449 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6450 ((arm_operand) | ((thumb_operand) << 16))
6451
6452 /* Matcher codes for parse_operands. */
6453 enum operand_parse_code
6454 {
6455 OP_stop, /* end of line */
6456
6457 OP_RR, /* ARM register */
6458 OP_RRnpc, /* ARM register, not r15 */
6459 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6460 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6461 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6462 optional trailing ! */
6463 OP_RRw, /* ARM register, not r15, optional trailing ! */
6464 OP_RCP, /* Coprocessor number */
6465 OP_RCN, /* Coprocessor register */
6466 OP_RF, /* FPA register */
6467 OP_RVS, /* VFP single precision register */
6468 OP_RVD, /* VFP double precision register (0..15) */
6469 OP_RND, /* Neon double precision register (0..31) */
6470 OP_RNQ, /* Neon quad precision register */
6471 OP_RVSD, /* VFP single or double precision register */
6472 OP_RNDQ, /* Neon double or quad precision register */
6473 OP_RNSDQ, /* Neon single, double or quad precision register */
6474 OP_RNSC, /* Neon scalar D[X] */
6475 OP_RVC, /* VFP control register */
6476 OP_RMF, /* Maverick F register */
6477 OP_RMD, /* Maverick D register */
6478 OP_RMFX, /* Maverick FX register */
6479 OP_RMDX, /* Maverick DX register */
6480 OP_RMAX, /* Maverick AX register */
6481 OP_RMDS, /* Maverick DSPSC register */
6482 OP_RIWR, /* iWMMXt wR register */
6483 OP_RIWC, /* iWMMXt wC register */
6484 OP_RIWG, /* iWMMXt wCG register */
6485 OP_RXA, /* XScale accumulator register */
6486
6487 OP_REGLST, /* ARM register list */
6488 OP_VRSLST, /* VFP single-precision register list */
6489 OP_VRDLST, /* VFP double-precision register list */
6490 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6491 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6492 OP_NSTRLST, /* Neon element/structure list */
6493
6494 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6495 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6496 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6497 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6498 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6499 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6500 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6501 OP_VMOV, /* Neon VMOV operands. */
6502 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6503 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6504 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6505
6506 OP_I0, /* immediate zero */
6507 OP_I7, /* immediate value 0 .. 7 */
6508 OP_I15, /* 0 .. 15 */
6509 OP_I16, /* 1 .. 16 */
6510 OP_I16z, /* 0 .. 16 */
6511 OP_I31, /* 0 .. 31 */
6512 OP_I31w, /* 0 .. 31, optional trailing ! */
6513 OP_I32, /* 1 .. 32 */
6514 OP_I32z, /* 0 .. 32 */
6515 OP_I63, /* 0 .. 63 */
6516 OP_I63s, /* -64 .. 63 */
6517 OP_I64, /* 1 .. 64 */
6518 OP_I64z, /* 0 .. 64 */
6519 OP_I255, /* 0 .. 255 */
6520
6521 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6522 OP_I7b, /* 0 .. 7 */
6523 OP_I15b, /* 0 .. 15 */
6524 OP_I31b, /* 0 .. 31 */
6525
6526 OP_SH, /* shifter operand */
6527 OP_SHG, /* shifter operand with possible group relocation */
6528 OP_ADDR, /* Memory address expression (any mode) */
6529 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6530 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6531 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6532 OP_EXP, /* arbitrary expression */
6533 OP_EXPi, /* same, with optional immediate prefix */
6534 OP_EXPr, /* same, with optional relocation suffix */
6535 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6536
6537 OP_CPSF, /* CPS flags */
6538 OP_ENDI, /* Endianness specifier */
6539 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6540 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6541 OP_COND, /* conditional code */
6542 OP_TB, /* Table branch. */
6543
6544 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6545
6546 OP_RRnpc_I0, /* ARM register or literal 0 */
6547 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
6548 OP_RR_EXi, /* ARM register or expression with imm prefix */
6549 OP_RF_IF, /* FPA register or immediate */
6550 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6551 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6552
6553 /* Optional operands. */
6554 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6555 OP_oI31b, /* 0 .. 31 */
6556 OP_oI32b, /* 1 .. 32 */
6557 OP_oI32z, /* 0 .. 32 */
6558 OP_oIffffb, /* 0 .. 65535 */
6559 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6560
6561 OP_oRR, /* ARM register */
6562 OP_oRRnpc, /* ARM register, not the PC */
6563 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6564 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6565 OP_oRND, /* Optional Neon double precision register */
6566 OP_oRNQ, /* Optional Neon quad precision register */
6567 OP_oRNDQ, /* Optional Neon double or quad precision register */
6568 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6569 OP_oSHll, /* LSL immediate */
6570 OP_oSHar, /* ASR immediate */
6571 OP_oSHllar, /* LSL or ASR immediate */
6572 OP_oROR, /* ROR 0/8/16/24 */
6573 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6574
6575 /* Some pre-defined mixed (ARM/THUMB) operands. */
6576 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6577 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6578 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6579
6580 OP_FIRST_OPTIONAL = OP_oI7b
6581 };
6582
6583 /* Generic instruction operand parser. This does no encoding and no
6584 semantic validation; it merely squirrels values away in the inst
6585 structure. Returns SUCCESS or FAIL depending on whether the
6586 specified grammar matched. */
6587 static int
6588 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6589 {
6590 unsigned const int *upat = pattern;
6591 char *backtrack_pos = 0;
6592 const char *backtrack_error = 0;
6593 int i, val = 0, backtrack_index = 0;
6594 enum arm_reg_type rtype;
6595 parse_operand_result result;
6596 unsigned int op_parse_code;
6597
6598 #define po_char_or_fail(chr) \
6599 do \
6600 { \
6601 if (skip_past_char (&str, chr) == FAIL) \
6602 goto bad_args; \
6603 } \
6604 while (0)
6605
6606 #define po_reg_or_fail(regtype) \
6607 do \
6608 { \
6609 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6610 & inst.operands[i].vectype); \
6611 if (val == FAIL) \
6612 { \
6613 first_error (_(reg_expected_msgs[regtype])); \
6614 goto failure; \
6615 } \
6616 inst.operands[i].reg = val; \
6617 inst.operands[i].isreg = 1; \
6618 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6619 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6620 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6621 || rtype == REG_TYPE_VFD \
6622 || rtype == REG_TYPE_NQ); \
6623 } \
6624 while (0)
6625
6626 #define po_reg_or_goto(regtype, label) \
6627 do \
6628 { \
6629 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6630 & inst.operands[i].vectype); \
6631 if (val == FAIL) \
6632 goto label; \
6633 \
6634 inst.operands[i].reg = val; \
6635 inst.operands[i].isreg = 1; \
6636 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6637 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6638 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6639 || rtype == REG_TYPE_VFD \
6640 || rtype == REG_TYPE_NQ); \
6641 } \
6642 while (0)
6643
6644 #define po_imm_or_fail(min, max, popt) \
6645 do \
6646 { \
6647 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6648 goto failure; \
6649 inst.operands[i].imm = val; \
6650 } \
6651 while (0)
6652
6653 #define po_scalar_or_goto(elsz, label) \
6654 do \
6655 { \
6656 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6657 if (val == FAIL) \
6658 goto label; \
6659 inst.operands[i].reg = val; \
6660 inst.operands[i].isscalar = 1; \
6661 } \
6662 while (0)
6663
6664 #define po_misc_or_fail(expr) \
6665 do \
6666 { \
6667 if (expr) \
6668 goto failure; \
6669 } \
6670 while (0)
6671
6672 #define po_misc_or_fail_no_backtrack(expr) \
6673 do \
6674 { \
6675 result = expr; \
6676 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6677 backtrack_pos = 0; \
6678 if (result != PARSE_OPERAND_SUCCESS) \
6679 goto failure; \
6680 } \
6681 while (0)
6682
6683 #define po_barrier_or_imm(str) \
6684 do \
6685 { \
6686 val = parse_barrier (&str); \
6687 if (val == FAIL && ! ISALPHA (*str)) \
6688 goto immediate; \
6689 if (val == FAIL \
6690 /* ISB can only take SY as an option. */ \
6691 || ((inst.instruction & 0xf0) == 0x60 \
6692 && val != 0xf)) \
6693 { \
6694 inst.error = _("invalid barrier type"); \
6695 backtrack_pos = 0; \
6696 goto failure; \
6697 } \
6698 } \
6699 while (0)
6700
6701 skip_whitespace (str);
6702
6703 for (i = 0; upat[i] != OP_stop; i++)
6704 {
6705 op_parse_code = upat[i];
6706 if (op_parse_code >= 1<<16)
6707 op_parse_code = thumb ? (op_parse_code >> 16)
6708 : (op_parse_code & ((1<<16)-1));
6709
6710 if (op_parse_code >= OP_FIRST_OPTIONAL)
6711 {
6712 /* Remember where we are in case we need to backtrack. */
6713 gas_assert (!backtrack_pos);
6714 backtrack_pos = str;
6715 backtrack_error = inst.error;
6716 backtrack_index = i;
6717 }
6718
6719 if (i > 0 && (i > 1 || inst.operands[0].present))
6720 po_char_or_fail (',');
6721
6722 switch (op_parse_code)
6723 {
6724 /* Registers */
6725 case OP_oRRnpc:
6726 case OP_oRRnpcsp:
6727 case OP_RRnpc:
6728 case OP_RRnpcsp:
6729 case OP_oRR:
6730 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6731 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6732 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6733 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6734 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6735 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6736 case OP_oRND:
6737 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6738 case OP_RVC:
6739 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6740 break;
6741 /* Also accept generic coprocessor regs for unknown registers. */
6742 coproc_reg:
6743 po_reg_or_fail (REG_TYPE_CN);
6744 break;
6745 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6746 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6747 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6748 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6749 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6750 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6751 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6752 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6753 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6754 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6755 case OP_oRNQ:
6756 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6757 case OP_oRNDQ:
6758 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6759 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6760 case OP_oRNSDQ:
6761 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6762
6763 /* Neon scalar. Using an element size of 8 means that some invalid
6764 scalars are accepted here, so deal with those in later code. */
6765 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6766
6767 case OP_RNDQ_I0:
6768 {
6769 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6770 break;
6771 try_imm0:
6772 po_imm_or_fail (0, 0, TRUE);
6773 }
6774 break;
6775
6776 case OP_RVSD_I0:
6777 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6778 break;
6779
6780 case OP_RSVD_FI0:
6781 {
6782 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6783 break;
6784 try_ifimm0:
6785 if (parse_ifimm_zero (&str))
6786 inst.operands[i].imm = 0;
6787 else
6788 {
6789 inst.error
6790 = _("only floating point zero is allowed as immediate value");
6791 goto failure;
6792 }
6793 }
6794 break;
6795
6796 case OP_RR_RNSC:
6797 {
6798 po_scalar_or_goto (8, try_rr);
6799 break;
6800 try_rr:
6801 po_reg_or_fail (REG_TYPE_RN);
6802 }
6803 break;
6804
6805 case OP_RNSDQ_RNSC:
6806 {
6807 po_scalar_or_goto (8, try_nsdq);
6808 break;
6809 try_nsdq:
6810 po_reg_or_fail (REG_TYPE_NSDQ);
6811 }
6812 break;
6813
6814 case OP_RNDQ_RNSC:
6815 {
6816 po_scalar_or_goto (8, try_ndq);
6817 break;
6818 try_ndq:
6819 po_reg_or_fail (REG_TYPE_NDQ);
6820 }
6821 break;
6822
6823 case OP_RND_RNSC:
6824 {
6825 po_scalar_or_goto (8, try_vfd);
6826 break;
6827 try_vfd:
6828 po_reg_or_fail (REG_TYPE_VFD);
6829 }
6830 break;
6831
6832 case OP_VMOV:
6833 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6834 not careful then bad things might happen. */
6835 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6836 break;
6837
6838 case OP_RNDQ_Ibig:
6839 {
6840 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6841 break;
6842 try_immbig:
6843 /* There's a possibility of getting a 64-bit immediate here, so
6844 we need special handling. */
6845 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6846 == FAIL)
6847 {
6848 inst.error = _("immediate value is out of range");
6849 goto failure;
6850 }
6851 }
6852 break;
6853
6854 case OP_RNDQ_I63b:
6855 {
6856 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6857 break;
6858 try_shimm:
6859 po_imm_or_fail (0, 63, TRUE);
6860 }
6861 break;
6862
6863 case OP_RRnpcb:
6864 po_char_or_fail ('[');
6865 po_reg_or_fail (REG_TYPE_RN);
6866 po_char_or_fail (']');
6867 break;
6868
6869 case OP_RRnpctw:
6870 case OP_RRw:
6871 case OP_oRRw:
6872 po_reg_or_fail (REG_TYPE_RN);
6873 if (skip_past_char (&str, '!') == SUCCESS)
6874 inst.operands[i].writeback = 1;
6875 break;
6876
6877 /* Immediates */
6878 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6879 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6880 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6881 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6882 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6883 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6884 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6885 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6886 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6887 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6888 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6889 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6890
6891 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6892 case OP_oI7b:
6893 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6894 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6895 case OP_oI31b:
6896 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6897 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6898 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6899 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6900
6901 /* Immediate variants */
6902 case OP_oI255c:
6903 po_char_or_fail ('{');
6904 po_imm_or_fail (0, 255, TRUE);
6905 po_char_or_fail ('}');
6906 break;
6907
6908 case OP_I31w:
6909 /* The expression parser chokes on a trailing !, so we have
6910 to find it first and zap it. */
6911 {
6912 char *s = str;
6913 while (*s && *s != ',')
6914 s++;
6915 if (s[-1] == '!')
6916 {
6917 s[-1] = '\0';
6918 inst.operands[i].writeback = 1;
6919 }
6920 po_imm_or_fail (0, 31, TRUE);
6921 if (str == s - 1)
6922 str = s;
6923 }
6924 break;
6925
6926 /* Expressions */
6927 case OP_EXPi: EXPi:
6928 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6929 GE_OPT_PREFIX));
6930 break;
6931
6932 case OP_EXP:
6933 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6934 GE_NO_PREFIX));
6935 break;
6936
6937 case OP_EXPr: EXPr:
6938 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6939 GE_NO_PREFIX));
6940 if (inst.reloc.exp.X_op == O_symbol)
6941 {
6942 val = parse_reloc (&str);
6943 if (val == -1)
6944 {
6945 inst.error = _("unrecognized relocation suffix");
6946 goto failure;
6947 }
6948 else if (val != BFD_RELOC_UNUSED)
6949 {
6950 inst.operands[i].imm = val;
6951 inst.operands[i].hasreloc = 1;
6952 }
6953 }
6954 break;
6955
6956 /* Operand for MOVW or MOVT. */
6957 case OP_HALF:
6958 po_misc_or_fail (parse_half (&str));
6959 break;
6960
6961 /* Register or expression. */
6962 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6963 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6964
6965 /* Register or immediate. */
6966 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6967 I0: po_imm_or_fail (0, 0, FALSE); break;
6968
6969 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6970 IF:
6971 if (!is_immediate_prefix (*str))
6972 goto bad_args;
6973 str++;
6974 val = parse_fpa_immediate (&str);
6975 if (val == FAIL)
6976 goto failure;
6977 /* FPA immediates are encoded as registers 8-15.
6978 parse_fpa_immediate has already applied the offset. */
6979 inst.operands[i].reg = val;
6980 inst.operands[i].isreg = 1;
6981 break;
6982
6983 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6984 I32z: po_imm_or_fail (0, 32, FALSE); break;
6985
6986 /* Two kinds of register. */
6987 case OP_RIWR_RIWC:
6988 {
6989 struct reg_entry *rege = arm_reg_parse_multi (&str);
6990 if (!rege
6991 || (rege->type != REG_TYPE_MMXWR
6992 && rege->type != REG_TYPE_MMXWC
6993 && rege->type != REG_TYPE_MMXWCG))
6994 {
6995 inst.error = _("iWMMXt data or control register expected");
6996 goto failure;
6997 }
6998 inst.operands[i].reg = rege->number;
6999 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
7000 }
7001 break;
7002
7003 case OP_RIWC_RIWG:
7004 {
7005 struct reg_entry *rege = arm_reg_parse_multi (&str);
7006 if (!rege
7007 || (rege->type != REG_TYPE_MMXWC
7008 && rege->type != REG_TYPE_MMXWCG))
7009 {
7010 inst.error = _("iWMMXt control register expected");
7011 goto failure;
7012 }
7013 inst.operands[i].reg = rege->number;
7014 inst.operands[i].isreg = 1;
7015 }
7016 break;
7017
7018 /* Misc */
7019 case OP_CPSF: val = parse_cps_flags (&str); break;
7020 case OP_ENDI: val = parse_endian_specifier (&str); break;
7021 case OP_oROR: val = parse_ror (&str); break;
7022 case OP_COND: val = parse_cond (&str); break;
7023 case OP_oBARRIER_I15:
7024 po_barrier_or_imm (str); break;
7025 immediate:
7026 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
7027 goto failure;
7028 break;
7029
7030 case OP_wPSR:
7031 case OP_rPSR:
7032 po_reg_or_goto (REG_TYPE_RNB, try_psr);
7033 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7034 {
7035 inst.error = _("Banked registers are not available with this "
7036 "architecture.");
7037 goto failure;
7038 }
7039 break;
7040 try_psr:
7041 val = parse_psr (&str, op_parse_code == OP_wPSR);
7042 break;
7043
7044 case OP_APSR_RR:
7045 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7046 break;
7047 try_apsr:
7048 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7049 instruction). */
7050 if (strncasecmp (str, "APSR_", 5) == 0)
7051 {
7052 unsigned found = 0;
7053 str += 5;
7054 while (found < 15)
7055 switch (*str++)
7056 {
7057 case 'c': found = (found & 1) ? 16 : found | 1; break;
7058 case 'n': found = (found & 2) ? 16 : found | 2; break;
7059 case 'z': found = (found & 4) ? 16 : found | 4; break;
7060 case 'v': found = (found & 8) ? 16 : found | 8; break;
7061 default: found = 16;
7062 }
7063 if (found != 15)
7064 goto failure;
7065 inst.operands[i].isvec = 1;
7066 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7067 inst.operands[i].reg = REG_PC;
7068 }
7069 else
7070 goto failure;
7071 break;
7072
7073 case OP_TB:
7074 po_misc_or_fail (parse_tb (&str));
7075 break;
7076
7077 /* Register lists. */
7078 case OP_REGLST:
7079 val = parse_reg_list (&str);
7080 if (*str == '^')
7081 {
7082 inst.operands[i].writeback = 1;
7083 str++;
7084 }
7085 break;
7086
7087 case OP_VRSLST:
7088 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
7089 break;
7090
7091 case OP_VRDLST:
7092 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7093 break;
7094
7095 case OP_VRSDLST:
7096 /* Allow Q registers too. */
7097 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7098 REGLIST_NEON_D);
7099 if (val == FAIL)
7100 {
7101 inst.error = NULL;
7102 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7103 REGLIST_VFP_S);
7104 inst.operands[i].issingle = 1;
7105 }
7106 break;
7107
7108 case OP_NRDLST:
7109 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7110 REGLIST_NEON_D);
7111 break;
7112
7113 case OP_NSTRLST:
7114 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7115 &inst.operands[i].vectype);
7116 break;
7117
7118 /* Addressing modes */
7119 case OP_ADDR:
7120 po_misc_or_fail (parse_address (&str, i));
7121 break;
7122
7123 case OP_ADDRGLDR:
7124 po_misc_or_fail_no_backtrack (
7125 parse_address_group_reloc (&str, i, GROUP_LDR));
7126 break;
7127
7128 case OP_ADDRGLDRS:
7129 po_misc_or_fail_no_backtrack (
7130 parse_address_group_reloc (&str, i, GROUP_LDRS));
7131 break;
7132
7133 case OP_ADDRGLDC:
7134 po_misc_or_fail_no_backtrack (
7135 parse_address_group_reloc (&str, i, GROUP_LDC));
7136 break;
7137
7138 case OP_SH:
7139 po_misc_or_fail (parse_shifter_operand (&str, i));
7140 break;
7141
7142 case OP_SHG:
7143 po_misc_or_fail_no_backtrack (
7144 parse_shifter_operand_group_reloc (&str, i));
7145 break;
7146
7147 case OP_oSHll:
7148 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7149 break;
7150
7151 case OP_oSHar:
7152 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7153 break;
7154
7155 case OP_oSHllar:
7156 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7157 break;
7158
7159 default:
7160 as_fatal (_("unhandled operand code %d"), op_parse_code);
7161 }
7162
7163 /* Various value-based sanity checks and shared operations. We
7164 do not signal immediate failures for the register constraints;
7165 this allows a syntax error to take precedence. */
7166 switch (op_parse_code)
7167 {
7168 case OP_oRRnpc:
7169 case OP_RRnpc:
7170 case OP_RRnpcb:
7171 case OP_RRw:
7172 case OP_oRRw:
7173 case OP_RRnpc_I0:
7174 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7175 inst.error = BAD_PC;
7176 break;
7177
7178 case OP_oRRnpcsp:
7179 case OP_RRnpcsp:
7180 if (inst.operands[i].isreg)
7181 {
7182 if (inst.operands[i].reg == REG_PC)
7183 inst.error = BAD_PC;
7184 else if (inst.operands[i].reg == REG_SP)
7185 inst.error = BAD_SP;
7186 }
7187 break;
7188
7189 case OP_RRnpctw:
7190 if (inst.operands[i].isreg
7191 && inst.operands[i].reg == REG_PC
7192 && (inst.operands[i].writeback || thumb))
7193 inst.error = BAD_PC;
7194 break;
7195
7196 case OP_CPSF:
7197 case OP_ENDI:
7198 case OP_oROR:
7199 case OP_wPSR:
7200 case OP_rPSR:
7201 case OP_COND:
7202 case OP_oBARRIER_I15:
7203 case OP_REGLST:
7204 case OP_VRSLST:
7205 case OP_VRDLST:
7206 case OP_VRSDLST:
7207 case OP_NRDLST:
7208 case OP_NSTRLST:
7209 if (val == FAIL)
7210 goto failure;
7211 inst.operands[i].imm = val;
7212 break;
7213
7214 default:
7215 break;
7216 }
7217
7218 /* If we get here, this operand was successfully parsed. */
7219 inst.operands[i].present = 1;
7220 continue;
7221
7222 bad_args:
7223 inst.error = BAD_ARGS;
7224
7225 failure:
7226 if (!backtrack_pos)
7227 {
7228 /* The parse routine should already have set inst.error, but set a
7229 default here just in case. */
7230 if (!inst.error)
7231 inst.error = _("syntax error");
7232 return FAIL;
7233 }
7234
7235 /* Do not backtrack over a trailing optional argument that
7236 absorbed some text. We will only fail again, with the
7237 'garbage following instruction' error message, which is
7238 probably less helpful than the current one. */
7239 if (backtrack_index == i && backtrack_pos != str
7240 && upat[i+1] == OP_stop)
7241 {
7242 if (!inst.error)
7243 inst.error = _("syntax error");
7244 return FAIL;
7245 }
7246
7247 /* Try again, skipping the optional argument at backtrack_pos. */
7248 str = backtrack_pos;
7249 inst.error = backtrack_error;
7250 inst.operands[backtrack_index].present = 0;
7251 i = backtrack_index;
7252 backtrack_pos = 0;
7253 }
7254
7255 /* Check that we have parsed all the arguments. */
7256 if (*str != '\0' && !inst.error)
7257 inst.error = _("garbage following instruction");
7258
7259 return inst.error ? FAIL : SUCCESS;
7260 }
7261
7262 #undef po_char_or_fail
7263 #undef po_reg_or_fail
7264 #undef po_reg_or_goto
7265 #undef po_imm_or_fail
7266 #undef po_scalar_or_fail
7267 #undef po_barrier_or_imm
7268
7269 /* Shorthand macro for instruction encoding functions issuing errors. */
7270 #define constraint(expr, err) \
7271 do \
7272 { \
7273 if (expr) \
7274 { \
7275 inst.error = err; \
7276 return; \
7277 } \
7278 } \
7279 while (0)
7280
7281 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7282 instructions are unpredictable if these registers are used. This
7283 is the BadReg predicate in ARM's Thumb-2 documentation. */
7284 #define reject_bad_reg(reg) \
7285 do \
7286 if (reg == REG_SP || reg == REG_PC) \
7287 { \
7288 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
7289 return; \
7290 } \
7291 while (0)
7292
7293 /* If REG is R13 (the stack pointer), warn that its use is
7294 deprecated. */
7295 #define warn_deprecated_sp(reg) \
7296 do \
7297 if (warn_on_deprecated && reg == REG_SP) \
7298 as_tsktsk (_("use of r13 is deprecated")); \
7299 while (0)
7300
7301 /* Functions for operand encoding. ARM, then Thumb. */
7302
7303 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7304
7305 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7306
7307 The only binary encoding difference is the Coprocessor number. Coprocessor
7308 9 is used for half-precision calculations or conversions. The format of the
7309 instruction is the same as the equivalent Coprocessor 10 instuction that
7310 exists for Single-Precision operation. */
7311
7312 static void
7313 do_scalar_fp16_v82_encode (void)
7314 {
7315 if (inst.cond != COND_ALWAYS)
7316 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7317 " the behaviour is UNPREDICTABLE"));
7318 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
7319 _(BAD_FP16));
7320
7321 inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
7322 mark_feature_used (&arm_ext_fp16);
7323 }
7324
7325 /* If VAL can be encoded in the immediate field of an ARM instruction,
7326 return the encoded form. Otherwise, return FAIL. */
7327
7328 static unsigned int
7329 encode_arm_immediate (unsigned int val)
7330 {
7331 unsigned int a, i;
7332
7333 if (val <= 0xff)
7334 return val;
7335
7336 for (i = 2; i < 32; i += 2)
7337 if ((a = rotate_left (val, i)) <= 0xff)
7338 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7339
7340 return FAIL;
7341 }
7342
7343 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7344 return the encoded form. Otherwise, return FAIL. */
7345 static unsigned int
7346 encode_thumb32_immediate (unsigned int val)
7347 {
7348 unsigned int a, i;
7349
7350 if (val <= 0xff)
7351 return val;
7352
7353 for (i = 1; i <= 24; i++)
7354 {
7355 a = val >> i;
7356 if ((val & ~(0xff << i)) == 0)
7357 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7358 }
7359
7360 a = val & 0xff;
7361 if (val == ((a << 16) | a))
7362 return 0x100 | a;
7363 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7364 return 0x300 | a;
7365
7366 a = val & 0xff00;
7367 if (val == ((a << 16) | a))
7368 return 0x200 | (a >> 8);
7369
7370 return FAIL;
7371 }
7372 /* Encode a VFP SP or DP register number into inst.instruction. */
7373
7374 static void
7375 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7376 {
7377 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7378 && reg > 15)
7379 {
7380 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7381 {
7382 if (thumb_mode)
7383 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7384 fpu_vfp_ext_d32);
7385 else
7386 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7387 fpu_vfp_ext_d32);
7388 }
7389 else
7390 {
7391 first_error (_("D register out of range for selected VFP version"));
7392 return;
7393 }
7394 }
7395
7396 switch (pos)
7397 {
7398 case VFP_REG_Sd:
7399 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7400 break;
7401
7402 case VFP_REG_Sn:
7403 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7404 break;
7405
7406 case VFP_REG_Sm:
7407 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7408 break;
7409
7410 case VFP_REG_Dd:
7411 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7412 break;
7413
7414 case VFP_REG_Dn:
7415 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7416 break;
7417
7418 case VFP_REG_Dm:
7419 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7420 break;
7421
7422 default:
7423 abort ();
7424 }
7425 }
7426
7427 /* Encode a <shift> in an ARM-format instruction. The immediate,
7428 if any, is handled by md_apply_fix. */
7429 static void
7430 encode_arm_shift (int i)
7431 {
7432 /* register-shifted register. */
7433 if (inst.operands[i].immisreg)
7434 {
7435 int index;
7436 for (index = 0; index <= i; ++index)
7437 {
7438 gas_assert (inst.operands[index].present);
7439 if (inst.operands[index].isreg && inst.operands[index].reg == REG_PC)
7440 as_warn (UNPRED_REG ("r15"));
7441 }
7442
7443 if (inst.operands[i].imm == REG_PC)
7444 as_warn (UNPRED_REG ("r15"));
7445 }
7446
7447 if (inst.operands[i].shift_kind == SHIFT_RRX)
7448 inst.instruction |= SHIFT_ROR << 5;
7449 else
7450 {
7451 inst.instruction |= inst.operands[i].shift_kind << 5;
7452 if (inst.operands[i].immisreg)
7453 {
7454 inst.instruction |= SHIFT_BY_REG;
7455 inst.instruction |= inst.operands[i].imm << 8;
7456 }
7457 else
7458 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7459 }
7460 }
7461
7462 static void
7463 encode_arm_shifter_operand (int i)
7464 {
7465 if (inst.operands[i].isreg)
7466 {
7467 inst.instruction |= inst.operands[i].reg;
7468 encode_arm_shift (i);
7469 }
7470 else
7471 {
7472 inst.instruction |= INST_IMMEDIATE;
7473 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7474 inst.instruction |= inst.operands[i].imm;
7475 }
7476 }
7477
7478 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7479 static void
7480 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7481 {
7482 /* PR 14260:
7483 Generate an error if the operand is not a register. */
7484 constraint (!inst.operands[i].isreg,
7485 _("Instruction does not support =N addresses"));
7486
7487 inst.instruction |= inst.operands[i].reg << 16;
7488
7489 if (inst.operands[i].preind)
7490 {
7491 if (is_t)
7492 {
7493 inst.error = _("instruction does not accept preindexed addressing");
7494 return;
7495 }
7496 inst.instruction |= PRE_INDEX;
7497 if (inst.operands[i].writeback)
7498 inst.instruction |= WRITE_BACK;
7499
7500 }
7501 else if (inst.operands[i].postind)
7502 {
7503 gas_assert (inst.operands[i].writeback);
7504 if (is_t)
7505 inst.instruction |= WRITE_BACK;
7506 }
7507 else /* unindexed - only for coprocessor */
7508 {
7509 inst.error = _("instruction does not accept unindexed addressing");
7510 return;
7511 }
7512
7513 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7514 && (((inst.instruction & 0x000f0000) >> 16)
7515 == ((inst.instruction & 0x0000f000) >> 12)))
7516 as_warn ((inst.instruction & LOAD_BIT)
7517 ? _("destination register same as write-back base")
7518 : _("source register same as write-back base"));
7519 }
7520
7521 /* inst.operands[i] was set up by parse_address. Encode it into an
7522 ARM-format mode 2 load or store instruction. If is_t is true,
7523 reject forms that cannot be used with a T instruction (i.e. not
7524 post-indexed). */
7525 static void
7526 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7527 {
7528 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7529
7530 encode_arm_addr_mode_common (i, is_t);
7531
7532 if (inst.operands[i].immisreg)
7533 {
7534 constraint ((inst.operands[i].imm == REG_PC
7535 || (is_pc && inst.operands[i].writeback)),
7536 BAD_PC_ADDRESSING);
7537 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7538 inst.instruction |= inst.operands[i].imm;
7539 if (!inst.operands[i].negative)
7540 inst.instruction |= INDEX_UP;
7541 if (inst.operands[i].shifted)
7542 {
7543 if (inst.operands[i].shift_kind == SHIFT_RRX)
7544 inst.instruction |= SHIFT_ROR << 5;
7545 else
7546 {
7547 inst.instruction |= inst.operands[i].shift_kind << 5;
7548 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7549 }
7550 }
7551 }
7552 else /* immediate offset in inst.reloc */
7553 {
7554 if (is_pc && !inst.reloc.pc_rel)
7555 {
7556 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7557
7558 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7559 cannot use PC in addressing.
7560 PC cannot be used in writeback addressing, either. */
7561 constraint ((is_t || inst.operands[i].writeback),
7562 BAD_PC_ADDRESSING);
7563
7564 /* Use of PC in str is deprecated for ARMv7. */
7565 if (warn_on_deprecated
7566 && !is_load
7567 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7568 as_tsktsk (_("use of PC in this instruction is deprecated"));
7569 }
7570
7571 if (inst.reloc.type == BFD_RELOC_UNUSED)
7572 {
7573 /* Prefer + for zero encoded value. */
7574 if (!inst.operands[i].negative)
7575 inst.instruction |= INDEX_UP;
7576 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7577 }
7578 }
7579 }
7580
7581 /* inst.operands[i] was set up by parse_address. Encode it into an
7582 ARM-format mode 3 load or store instruction. Reject forms that
7583 cannot be used with such instructions. If is_t is true, reject
7584 forms that cannot be used with a T instruction (i.e. not
7585 post-indexed). */
7586 static void
7587 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7588 {
7589 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7590 {
7591 inst.error = _("instruction does not accept scaled register index");
7592 return;
7593 }
7594
7595 encode_arm_addr_mode_common (i, is_t);
7596
7597 if (inst.operands[i].immisreg)
7598 {
7599 constraint ((inst.operands[i].imm == REG_PC
7600 || (is_t && inst.operands[i].reg == REG_PC)),
7601 BAD_PC_ADDRESSING);
7602 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7603 BAD_PC_WRITEBACK);
7604 inst.instruction |= inst.operands[i].imm;
7605 if (!inst.operands[i].negative)
7606 inst.instruction |= INDEX_UP;
7607 }
7608 else /* immediate offset in inst.reloc */
7609 {
7610 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7611 && inst.operands[i].writeback),
7612 BAD_PC_WRITEBACK);
7613 inst.instruction |= HWOFFSET_IMM;
7614 if (inst.reloc.type == BFD_RELOC_UNUSED)
7615 {
7616 /* Prefer + for zero encoded value. */
7617 if (!inst.operands[i].negative)
7618 inst.instruction |= INDEX_UP;
7619
7620 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7621 }
7622 }
7623 }
7624
7625 /* Write immediate bits [7:0] to the following locations:
7626
7627 |28/24|23 19|18 16|15 4|3 0|
7628 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7629
7630 This function is used by VMOV/VMVN/VORR/VBIC. */
7631
7632 static void
7633 neon_write_immbits (unsigned immbits)
7634 {
7635 inst.instruction |= immbits & 0xf;
7636 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7637 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7638 }
7639
7640 /* Invert low-order SIZE bits of XHI:XLO. */
7641
7642 static void
7643 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7644 {
7645 unsigned immlo = xlo ? *xlo : 0;
7646 unsigned immhi = xhi ? *xhi : 0;
7647
7648 switch (size)
7649 {
7650 case 8:
7651 immlo = (~immlo) & 0xff;
7652 break;
7653
7654 case 16:
7655 immlo = (~immlo) & 0xffff;
7656 break;
7657
7658 case 64:
7659 immhi = (~immhi) & 0xffffffff;
7660 /* fall through. */
7661
7662 case 32:
7663 immlo = (~immlo) & 0xffffffff;
7664 break;
7665
7666 default:
7667 abort ();
7668 }
7669
7670 if (xlo)
7671 *xlo = immlo;
7672
7673 if (xhi)
7674 *xhi = immhi;
7675 }
7676
7677 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7678 A, B, C, D. */
7679
7680 static int
7681 neon_bits_same_in_bytes (unsigned imm)
7682 {
7683 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7684 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7685 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7686 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7687 }
7688
7689 /* For immediate of above form, return 0bABCD. */
7690
7691 static unsigned
7692 neon_squash_bits (unsigned imm)
7693 {
7694 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7695 | ((imm & 0x01000000) >> 21);
7696 }
7697
7698 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7699
7700 static unsigned
7701 neon_qfloat_bits (unsigned imm)
7702 {
7703 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7704 }
7705
7706 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7707 the instruction. *OP is passed as the initial value of the op field, and
7708 may be set to a different value depending on the constant (i.e.
7709 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7710 MVN). If the immediate looks like a repeated pattern then also
7711 try smaller element sizes. */
7712
7713 static int
7714 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7715 unsigned *immbits, int *op, int size,
7716 enum neon_el_type type)
7717 {
7718 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7719 float. */
7720 if (type == NT_float && !float_p)
7721 return FAIL;
7722
7723 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7724 {
7725 if (size != 32 || *op == 1)
7726 return FAIL;
7727 *immbits = neon_qfloat_bits (immlo);
7728 return 0xf;
7729 }
7730
7731 if (size == 64)
7732 {
7733 if (neon_bits_same_in_bytes (immhi)
7734 && neon_bits_same_in_bytes (immlo))
7735 {
7736 if (*op == 1)
7737 return FAIL;
7738 *immbits = (neon_squash_bits (immhi) << 4)
7739 | neon_squash_bits (immlo);
7740 *op = 1;
7741 return 0xe;
7742 }
7743
7744 if (immhi != immlo)
7745 return FAIL;
7746 }
7747
7748 if (size >= 32)
7749 {
7750 if (immlo == (immlo & 0x000000ff))
7751 {
7752 *immbits = immlo;
7753 return 0x0;
7754 }
7755 else if (immlo == (immlo & 0x0000ff00))
7756 {
7757 *immbits = immlo >> 8;
7758 return 0x2;
7759 }
7760 else if (immlo == (immlo & 0x00ff0000))
7761 {
7762 *immbits = immlo >> 16;
7763 return 0x4;
7764 }
7765 else if (immlo == (immlo & 0xff000000))
7766 {
7767 *immbits = immlo >> 24;
7768 return 0x6;
7769 }
7770 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7771 {
7772 *immbits = (immlo >> 8) & 0xff;
7773 return 0xc;
7774 }
7775 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7776 {
7777 *immbits = (immlo >> 16) & 0xff;
7778 return 0xd;
7779 }
7780
7781 if ((immlo & 0xffff) != (immlo >> 16))
7782 return FAIL;
7783 immlo &= 0xffff;
7784 }
7785
7786 if (size >= 16)
7787 {
7788 if (immlo == (immlo & 0x000000ff))
7789 {
7790 *immbits = immlo;
7791 return 0x8;
7792 }
7793 else if (immlo == (immlo & 0x0000ff00))
7794 {
7795 *immbits = immlo >> 8;
7796 return 0xa;
7797 }
7798
7799 if ((immlo & 0xff) != (immlo >> 8))
7800 return FAIL;
7801 immlo &= 0xff;
7802 }
7803
7804 if (immlo == (immlo & 0x000000ff))
7805 {
7806 /* Don't allow MVN with 8-bit immediate. */
7807 if (*op == 1)
7808 return FAIL;
7809 *immbits = immlo;
7810 return 0xe;
7811 }
7812
7813 return FAIL;
7814 }
7815
7816 #if defined BFD_HOST_64_BIT
7817 /* Returns TRUE if double precision value V may be cast
7818 to single precision without loss of accuracy. */
7819
7820 static bfd_boolean
7821 is_double_a_single (bfd_int64_t v)
7822 {
7823 int exp = (int)((v >> 52) & 0x7FF);
7824 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7825
7826 return (exp == 0 || exp == 0x7FF
7827 || (exp >= 1023 - 126 && exp <= 1023 + 127))
7828 && (mantissa & 0x1FFFFFFFl) == 0;
7829 }
7830
7831 /* Returns a double precision value casted to single precision
7832 (ignoring the least significant bits in exponent and mantissa). */
7833
7834 static int
7835 double_to_single (bfd_int64_t v)
7836 {
7837 int sign = (int) ((v >> 63) & 1l);
7838 int exp = (int) ((v >> 52) & 0x7FF);
7839 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7840
7841 if (exp == 0x7FF)
7842 exp = 0xFF;
7843 else
7844 {
7845 exp = exp - 1023 + 127;
7846 if (exp >= 0xFF)
7847 {
7848 /* Infinity. */
7849 exp = 0x7F;
7850 mantissa = 0;
7851 }
7852 else if (exp < 0)
7853 {
7854 /* No denormalized numbers. */
7855 exp = 0;
7856 mantissa = 0;
7857 }
7858 }
7859 mantissa >>= 29;
7860 return (sign << 31) | (exp << 23) | mantissa;
7861 }
7862 #endif /* BFD_HOST_64_BIT */
7863
7864 enum lit_type
7865 {
7866 CONST_THUMB,
7867 CONST_ARM,
7868 CONST_VEC
7869 };
7870
7871 static void do_vfp_nsyn_opcode (const char *);
7872
7873 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7874 Determine whether it can be performed with a move instruction; if
7875 it can, convert inst.instruction to that move instruction and
7876 return TRUE; if it can't, convert inst.instruction to a literal-pool
7877 load and return FALSE. If this is not a valid thing to do in the
7878 current context, set inst.error and return TRUE.
7879
7880 inst.operands[i] describes the destination register. */
7881
7882 static bfd_boolean
7883 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
7884 {
7885 unsigned long tbit;
7886 bfd_boolean thumb_p = (t == CONST_THUMB);
7887 bfd_boolean arm_p = (t == CONST_ARM);
7888
7889 if (thumb_p)
7890 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7891 else
7892 tbit = LOAD_BIT;
7893
7894 if ((inst.instruction & tbit) == 0)
7895 {
7896 inst.error = _("invalid pseudo operation");
7897 return TRUE;
7898 }
7899
7900 if (inst.reloc.exp.X_op != O_constant
7901 && inst.reloc.exp.X_op != O_symbol
7902 && inst.reloc.exp.X_op != O_big)
7903 {
7904 inst.error = _("constant expression expected");
7905 return TRUE;
7906 }
7907
7908 if (inst.reloc.exp.X_op == O_constant
7909 || inst.reloc.exp.X_op == O_big)
7910 {
7911 #if defined BFD_HOST_64_BIT
7912 bfd_int64_t v;
7913 #else
7914 offsetT v;
7915 #endif
7916 if (inst.reloc.exp.X_op == O_big)
7917 {
7918 LITTLENUM_TYPE w[X_PRECISION];
7919 LITTLENUM_TYPE * l;
7920
7921 if (inst.reloc.exp.X_add_number == -1)
7922 {
7923 gen_to_words (w, X_PRECISION, E_PRECISION);
7924 l = w;
7925 /* FIXME: Should we check words w[2..5] ? */
7926 }
7927 else
7928 l = generic_bignum;
7929
7930 #if defined BFD_HOST_64_BIT
7931 v =
7932 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
7933 << LITTLENUM_NUMBER_OF_BITS)
7934 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
7935 << LITTLENUM_NUMBER_OF_BITS)
7936 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
7937 << LITTLENUM_NUMBER_OF_BITS)
7938 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
7939 #else
7940 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
7941 | (l[0] & LITTLENUM_MASK);
7942 #endif
7943 }
7944 else
7945 v = inst.reloc.exp.X_add_number;
7946
7947 if (!inst.operands[i].issingle)
7948 {
7949 if (thumb_p)
7950 {
7951 /* This can be encoded only for a low register. */
7952 if ((v & ~0xFF) == 0 && (inst.operands[i].reg < 8))
7953 {
7954 /* This can be done with a mov(1) instruction. */
7955 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7956 inst.instruction |= v;
7957 return TRUE;
7958 }
7959
7960 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
7961 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
7962 {
7963 /* Check if on thumb2 it can be done with a mov.w, mvn or
7964 movw instruction. */
7965 unsigned int newimm;
7966 bfd_boolean isNegated;
7967
7968 newimm = encode_thumb32_immediate (v);
7969 if (newimm != (unsigned int) FAIL)
7970 isNegated = FALSE;
7971 else
7972 {
7973 newimm = encode_thumb32_immediate (~v);
7974 if (newimm != (unsigned int) FAIL)
7975 isNegated = TRUE;
7976 }
7977
7978 /* The number can be loaded with a mov.w or mvn
7979 instruction. */
7980 if (newimm != (unsigned int) FAIL
7981 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
7982 {
7983 inst.instruction = (0xf04f0000 /* MOV.W. */
7984 | (inst.operands[i].reg << 8));
7985 /* Change to MOVN. */
7986 inst.instruction |= (isNegated ? 0x200000 : 0);
7987 inst.instruction |= (newimm & 0x800) << 15;
7988 inst.instruction |= (newimm & 0x700) << 4;
7989 inst.instruction |= (newimm & 0x0ff);
7990 return TRUE;
7991 }
7992 /* The number can be loaded with a movw instruction. */
7993 else if ((v & ~0xFFFF) == 0
7994 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
7995 {
7996 int imm = v & 0xFFFF;
7997
7998 inst.instruction = 0xf2400000; /* MOVW. */
7999 inst.instruction |= (inst.operands[i].reg << 8);
8000 inst.instruction |= (imm & 0xf000) << 4;
8001 inst.instruction |= (imm & 0x0800) << 15;
8002 inst.instruction |= (imm & 0x0700) << 4;
8003 inst.instruction |= (imm & 0x00ff);
8004 return TRUE;
8005 }
8006 }
8007 }
8008 else if (arm_p)
8009 {
8010 int value = encode_arm_immediate (v);
8011
8012 if (value != FAIL)
8013 {
8014 /* This can be done with a mov instruction. */
8015 inst.instruction &= LITERAL_MASK;
8016 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
8017 inst.instruction |= value & 0xfff;
8018 return TRUE;
8019 }
8020
8021 value = encode_arm_immediate (~ v);
8022 if (value != FAIL)
8023 {
8024 /* This can be done with a mvn instruction. */
8025 inst.instruction &= LITERAL_MASK;
8026 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8027 inst.instruction |= value & 0xfff;
8028 return TRUE;
8029 }
8030 }
8031 else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8032 {
8033 int op = 0;
8034 unsigned immbits = 0;
8035 unsigned immlo = inst.operands[1].imm;
8036 unsigned immhi = inst.operands[1].regisimm
8037 ? inst.operands[1].reg
8038 : inst.reloc.exp.X_unsigned
8039 ? 0
8040 : ((bfd_int64_t)((int) immlo)) >> 32;
8041 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8042 &op, 64, NT_invtype);
8043
8044 if (cmode == FAIL)
8045 {
8046 neon_invert_size (&immlo, &immhi, 64);
8047 op = !op;
8048 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8049 &op, 64, NT_invtype);
8050 }
8051
8052 if (cmode != FAIL)
8053 {
8054 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8055 | (1 << 23)
8056 | (cmode << 8)
8057 | (op << 5)
8058 | (1 << 4);
8059
8060 /* Fill other bits in vmov encoding for both thumb and arm. */
8061 if (thumb_mode)
8062 inst.instruction |= (0x7U << 29) | (0xF << 24);
8063 else
8064 inst.instruction |= (0xFU << 28) | (0x1 << 25);
8065 neon_write_immbits (immbits);
8066 return TRUE;
8067 }
8068 }
8069 }
8070
8071 if (t == CONST_VEC)
8072 {
8073 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8074 if (inst.operands[i].issingle
8075 && is_quarter_float (inst.operands[1].imm)
8076 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8077 {
8078 inst.operands[1].imm =
8079 neon_qfloat_bits (v);
8080 do_vfp_nsyn_opcode ("fconsts");
8081 return TRUE;
8082 }
8083
8084 /* If our host does not support a 64-bit type then we cannot perform
8085 the following optimization. This mean that there will be a
8086 discrepancy between the output produced by an assembler built for
8087 a 32-bit-only host and the output produced from a 64-bit host, but
8088 this cannot be helped. */
8089 #if defined BFD_HOST_64_BIT
8090 else if (!inst.operands[1].issingle
8091 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8092 {
8093 if (is_double_a_single (v)
8094 && is_quarter_float (double_to_single (v)))
8095 {
8096 inst.operands[1].imm =
8097 neon_qfloat_bits (double_to_single (v));
8098 do_vfp_nsyn_opcode ("fconstd");
8099 return TRUE;
8100 }
8101 }
8102 #endif
8103 }
8104 }
8105
8106 if (add_to_lit_pool ((!inst.operands[i].isvec
8107 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
8108 return TRUE;
8109
8110 inst.operands[1].reg = REG_PC;
8111 inst.operands[1].isreg = 1;
8112 inst.operands[1].preind = 1;
8113 inst.reloc.pc_rel = 1;
8114 inst.reloc.type = (thumb_p
8115 ? BFD_RELOC_ARM_THUMB_OFFSET
8116 : (mode_3
8117 ? BFD_RELOC_ARM_HWLITERAL
8118 : BFD_RELOC_ARM_LITERAL));
8119 return FALSE;
8120 }
8121
8122 /* inst.operands[i] was set up by parse_address. Encode it into an
8123 ARM-format instruction. Reject all forms which cannot be encoded
8124 into a coprocessor load/store instruction. If wb_ok is false,
8125 reject use of writeback; if unind_ok is false, reject use of
8126 unindexed addressing. If reloc_override is not 0, use it instead
8127 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8128 (in which case it is preserved). */
8129
8130 static int
8131 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8132 {
8133 if (!inst.operands[i].isreg)
8134 {
8135 /* PR 18256 */
8136 if (! inst.operands[0].isvec)
8137 {
8138 inst.error = _("invalid co-processor operand");
8139 return FAIL;
8140 }
8141 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8142 return SUCCESS;
8143 }
8144
8145 inst.instruction |= inst.operands[i].reg << 16;
8146
8147 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8148
8149 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8150 {
8151 gas_assert (!inst.operands[i].writeback);
8152 if (!unind_ok)
8153 {
8154 inst.error = _("instruction does not support unindexed addressing");
8155 return FAIL;
8156 }
8157 inst.instruction |= inst.operands[i].imm;
8158 inst.instruction |= INDEX_UP;
8159 return SUCCESS;
8160 }
8161
8162 if (inst.operands[i].preind)
8163 inst.instruction |= PRE_INDEX;
8164
8165 if (inst.operands[i].writeback)
8166 {
8167 if (inst.operands[i].reg == REG_PC)
8168 {
8169 inst.error = _("pc may not be used with write-back");
8170 return FAIL;
8171 }
8172 if (!wb_ok)
8173 {
8174 inst.error = _("instruction does not support writeback");
8175 return FAIL;
8176 }
8177 inst.instruction |= WRITE_BACK;
8178 }
8179
8180 if (reloc_override)
8181 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
8182 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
8183 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
8184 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
8185 {
8186 if (thumb_mode)
8187 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8188 else
8189 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
8190 }
8191
8192 /* Prefer + for zero encoded value. */
8193 if (!inst.operands[i].negative)
8194 inst.instruction |= INDEX_UP;
8195
8196 return SUCCESS;
8197 }
8198
8199 /* Functions for instruction encoding, sorted by sub-architecture.
8200 First some generics; their names are taken from the conventional
8201 bit positions for register arguments in ARM format instructions. */
8202
8203 static void
8204 do_noargs (void)
8205 {
8206 }
8207
8208 static void
8209 do_rd (void)
8210 {
8211 inst.instruction |= inst.operands[0].reg << 12;
8212 }
8213
8214 static void
8215 do_rn (void)
8216 {
8217 inst.instruction |= inst.operands[0].reg << 16;
8218 }
8219
8220 static void
8221 do_rd_rm (void)
8222 {
8223 inst.instruction |= inst.operands[0].reg << 12;
8224 inst.instruction |= inst.operands[1].reg;
8225 }
8226
8227 static void
8228 do_rm_rn (void)
8229 {
8230 inst.instruction |= inst.operands[0].reg;
8231 inst.instruction |= inst.operands[1].reg << 16;
8232 }
8233
8234 static void
8235 do_rd_rn (void)
8236 {
8237 inst.instruction |= inst.operands[0].reg << 12;
8238 inst.instruction |= inst.operands[1].reg << 16;
8239 }
8240
8241 static void
8242 do_rn_rd (void)
8243 {
8244 inst.instruction |= inst.operands[0].reg << 16;
8245 inst.instruction |= inst.operands[1].reg << 12;
8246 }
8247
8248 static void
8249 do_tt (void)
8250 {
8251 inst.instruction |= inst.operands[0].reg << 8;
8252 inst.instruction |= inst.operands[1].reg << 16;
8253 }
8254
8255 static bfd_boolean
8256 check_obsolete (const arm_feature_set *feature, const char *msg)
8257 {
8258 if (ARM_CPU_IS_ANY (cpu_variant))
8259 {
8260 as_tsktsk ("%s", msg);
8261 return TRUE;
8262 }
8263 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8264 {
8265 as_bad ("%s", msg);
8266 return TRUE;
8267 }
8268
8269 return FALSE;
8270 }
8271
8272 static void
8273 do_rd_rm_rn (void)
8274 {
8275 unsigned Rn = inst.operands[2].reg;
8276 /* Enforce restrictions on SWP instruction. */
8277 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8278 {
8279 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8280 _("Rn must not overlap other operands"));
8281
8282 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8283 */
8284 if (!check_obsolete (&arm_ext_v8,
8285 _("swp{b} use is obsoleted for ARMv8 and later"))
8286 && warn_on_deprecated
8287 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8288 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8289 }
8290
8291 inst.instruction |= inst.operands[0].reg << 12;
8292 inst.instruction |= inst.operands[1].reg;
8293 inst.instruction |= Rn << 16;
8294 }
8295
8296 static void
8297 do_rd_rn_rm (void)
8298 {
8299 inst.instruction |= inst.operands[0].reg << 12;
8300 inst.instruction |= inst.operands[1].reg << 16;
8301 inst.instruction |= inst.operands[2].reg;
8302 }
8303
8304 static void
8305 do_rm_rd_rn (void)
8306 {
8307 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8308 constraint (((inst.reloc.exp.X_op != O_constant
8309 && inst.reloc.exp.X_op != O_illegal)
8310 || inst.reloc.exp.X_add_number != 0),
8311 BAD_ADDR_MODE);
8312 inst.instruction |= inst.operands[0].reg;
8313 inst.instruction |= inst.operands[1].reg << 12;
8314 inst.instruction |= inst.operands[2].reg << 16;
8315 }
8316
8317 static void
8318 do_imm0 (void)
8319 {
8320 inst.instruction |= inst.operands[0].imm;
8321 }
8322
8323 static void
8324 do_rd_cpaddr (void)
8325 {
8326 inst.instruction |= inst.operands[0].reg << 12;
8327 encode_arm_cp_address (1, TRUE, TRUE, 0);
8328 }
8329
8330 /* ARM instructions, in alphabetical order by function name (except
8331 that wrapper functions appear immediately after the function they
8332 wrap). */
8333
8334 /* This is a pseudo-op of the form "adr rd, label" to be converted
8335 into a relative address of the form "add rd, pc, #label-.-8". */
8336
8337 static void
8338 do_adr (void)
8339 {
8340 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8341
8342 /* Frag hacking will turn this into a sub instruction if the offset turns
8343 out to be negative. */
8344 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8345 inst.reloc.pc_rel = 1;
8346 inst.reloc.exp.X_add_number -= 8;
8347 }
8348
8349 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8350 into a relative address of the form:
8351 add rd, pc, #low(label-.-8)"
8352 add rd, rd, #high(label-.-8)" */
8353
8354 static void
8355 do_adrl (void)
8356 {
8357 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8358
8359 /* Frag hacking will turn this into a sub instruction if the offset turns
8360 out to be negative. */
8361 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8362 inst.reloc.pc_rel = 1;
8363 inst.size = INSN_SIZE * 2;
8364 inst.reloc.exp.X_add_number -= 8;
8365 }
8366
8367 static void
8368 do_arit (void)
8369 {
8370 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8371 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
8372 THUMB1_RELOC_ONLY);
8373 if (!inst.operands[1].present)
8374 inst.operands[1].reg = inst.operands[0].reg;
8375 inst.instruction |= inst.operands[0].reg << 12;
8376 inst.instruction |= inst.operands[1].reg << 16;
8377 encode_arm_shifter_operand (2);
8378 }
8379
8380 static void
8381 do_barrier (void)
8382 {
8383 if (inst.operands[0].present)
8384 inst.instruction |= inst.operands[0].imm;
8385 else
8386 inst.instruction |= 0xf;
8387 }
8388
8389 static void
8390 do_bfc (void)
8391 {
8392 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8393 constraint (msb > 32, _("bit-field extends past end of register"));
8394 /* The instruction encoding stores the LSB and MSB,
8395 not the LSB and width. */
8396 inst.instruction |= inst.operands[0].reg << 12;
8397 inst.instruction |= inst.operands[1].imm << 7;
8398 inst.instruction |= (msb - 1) << 16;
8399 }
8400
8401 static void
8402 do_bfi (void)
8403 {
8404 unsigned int msb;
8405
8406 /* #0 in second position is alternative syntax for bfc, which is
8407 the same instruction but with REG_PC in the Rm field. */
8408 if (!inst.operands[1].isreg)
8409 inst.operands[1].reg = REG_PC;
8410
8411 msb = inst.operands[2].imm + inst.operands[3].imm;
8412 constraint (msb > 32, _("bit-field extends past end of register"));
8413 /* The instruction encoding stores the LSB and MSB,
8414 not the LSB and width. */
8415 inst.instruction |= inst.operands[0].reg << 12;
8416 inst.instruction |= inst.operands[1].reg;
8417 inst.instruction |= inst.operands[2].imm << 7;
8418 inst.instruction |= (msb - 1) << 16;
8419 }
8420
8421 static void
8422 do_bfx (void)
8423 {
8424 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8425 _("bit-field extends past end of register"));
8426 inst.instruction |= inst.operands[0].reg << 12;
8427 inst.instruction |= inst.operands[1].reg;
8428 inst.instruction |= inst.operands[2].imm << 7;
8429 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8430 }
8431
8432 /* ARM V5 breakpoint instruction (argument parse)
8433 BKPT <16 bit unsigned immediate>
8434 Instruction is not conditional.
8435 The bit pattern given in insns[] has the COND_ALWAYS condition,
8436 and it is an error if the caller tried to override that. */
8437
8438 static void
8439 do_bkpt (void)
8440 {
8441 /* Top 12 of 16 bits to bits 19:8. */
8442 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8443
8444 /* Bottom 4 of 16 bits to bits 3:0. */
8445 inst.instruction |= inst.operands[0].imm & 0xf;
8446 }
8447
8448 static void
8449 encode_branch (int default_reloc)
8450 {
8451 if (inst.operands[0].hasreloc)
8452 {
8453 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8454 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8455 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8456 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8457 ? BFD_RELOC_ARM_PLT32
8458 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8459 }
8460 else
8461 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
8462 inst.reloc.pc_rel = 1;
8463 }
8464
8465 static void
8466 do_branch (void)
8467 {
8468 #ifdef OBJ_ELF
8469 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8470 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8471 else
8472 #endif
8473 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8474 }
8475
8476 static void
8477 do_bl (void)
8478 {
8479 #ifdef OBJ_ELF
8480 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8481 {
8482 if (inst.cond == COND_ALWAYS)
8483 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8484 else
8485 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8486 }
8487 else
8488 #endif
8489 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8490 }
8491
8492 /* ARM V5 branch-link-exchange instruction (argument parse)
8493 BLX <target_addr> ie BLX(1)
8494 BLX{<condition>} <Rm> ie BLX(2)
8495 Unfortunately, there are two different opcodes for this mnemonic.
8496 So, the insns[].value is not used, and the code here zaps values
8497 into inst.instruction.
8498 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8499
8500 static void
8501 do_blx (void)
8502 {
8503 if (inst.operands[0].isreg)
8504 {
8505 /* Arg is a register; the opcode provided by insns[] is correct.
8506 It is not illegal to do "blx pc", just useless. */
8507 if (inst.operands[0].reg == REG_PC)
8508 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8509
8510 inst.instruction |= inst.operands[0].reg;
8511 }
8512 else
8513 {
8514 /* Arg is an address; this instruction cannot be executed
8515 conditionally, and the opcode must be adjusted.
8516 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8517 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8518 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8519 inst.instruction = 0xfa000000;
8520 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8521 }
8522 }
8523
8524 static void
8525 do_bx (void)
8526 {
8527 bfd_boolean want_reloc;
8528
8529 if (inst.operands[0].reg == REG_PC)
8530 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8531
8532 inst.instruction |= inst.operands[0].reg;
8533 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8534 it is for ARMv4t or earlier. */
8535 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8536 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
8537 want_reloc = TRUE;
8538
8539 #ifdef OBJ_ELF
8540 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8541 #endif
8542 want_reloc = FALSE;
8543
8544 if (want_reloc)
8545 inst.reloc.type = BFD_RELOC_ARM_V4BX;
8546 }
8547
8548
8549 /* ARM v5TEJ. Jump to Jazelle code. */
8550
8551 static void
8552 do_bxj (void)
8553 {
8554 if (inst.operands[0].reg == REG_PC)
8555 as_tsktsk (_("use of r15 in bxj is not really useful"));
8556
8557 inst.instruction |= inst.operands[0].reg;
8558 }
8559
8560 /* Co-processor data operation:
8561 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8562 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8563 static void
8564 do_cdp (void)
8565 {
8566 inst.instruction |= inst.operands[0].reg << 8;
8567 inst.instruction |= inst.operands[1].imm << 20;
8568 inst.instruction |= inst.operands[2].reg << 12;
8569 inst.instruction |= inst.operands[3].reg << 16;
8570 inst.instruction |= inst.operands[4].reg;
8571 inst.instruction |= inst.operands[5].imm << 5;
8572 }
8573
8574 static void
8575 do_cmp (void)
8576 {
8577 inst.instruction |= inst.operands[0].reg << 16;
8578 encode_arm_shifter_operand (1);
8579 }
8580
8581 /* Transfer between coprocessor and ARM registers.
8582 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8583 MRC2
8584 MCR{cond}
8585 MCR2
8586
8587 No special properties. */
8588
8589 struct deprecated_coproc_regs_s
8590 {
8591 unsigned cp;
8592 int opc1;
8593 unsigned crn;
8594 unsigned crm;
8595 int opc2;
8596 arm_feature_set deprecated;
8597 arm_feature_set obsoleted;
8598 const char *dep_msg;
8599 const char *obs_msg;
8600 };
8601
8602 #define DEPR_ACCESS_V8 \
8603 N_("This coprocessor register access is deprecated in ARMv8")
8604
8605 /* Table of all deprecated coprocessor registers. */
8606 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8607 {
8608 {15, 0, 7, 10, 5, /* CP15DMB. */
8609 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8610 DEPR_ACCESS_V8, NULL},
8611 {15, 0, 7, 10, 4, /* CP15DSB. */
8612 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8613 DEPR_ACCESS_V8, NULL},
8614 {15, 0, 7, 5, 4, /* CP15ISB. */
8615 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8616 DEPR_ACCESS_V8, NULL},
8617 {14, 6, 1, 0, 0, /* TEEHBR. */
8618 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8619 DEPR_ACCESS_V8, NULL},
8620 {14, 6, 0, 0, 0, /* TEECR. */
8621 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8622 DEPR_ACCESS_V8, NULL},
8623 };
8624
8625 #undef DEPR_ACCESS_V8
8626
8627 static const size_t deprecated_coproc_reg_count =
8628 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8629
8630 static void
8631 do_co_reg (void)
8632 {
8633 unsigned Rd;
8634 size_t i;
8635
8636 Rd = inst.operands[2].reg;
8637 if (thumb_mode)
8638 {
8639 if (inst.instruction == 0xee000010
8640 || inst.instruction == 0xfe000010)
8641 /* MCR, MCR2 */
8642 reject_bad_reg (Rd);
8643 else
8644 /* MRC, MRC2 */
8645 constraint (Rd == REG_SP, BAD_SP);
8646 }
8647 else
8648 {
8649 /* MCR */
8650 if (inst.instruction == 0xe000010)
8651 constraint (Rd == REG_PC, BAD_PC);
8652 }
8653
8654 for (i = 0; i < deprecated_coproc_reg_count; ++i)
8655 {
8656 const struct deprecated_coproc_regs_s *r =
8657 deprecated_coproc_regs + i;
8658
8659 if (inst.operands[0].reg == r->cp
8660 && inst.operands[1].imm == r->opc1
8661 && inst.operands[3].reg == r->crn
8662 && inst.operands[4].reg == r->crm
8663 && inst.operands[5].imm == r->opc2)
8664 {
8665 if (! ARM_CPU_IS_ANY (cpu_variant)
8666 && warn_on_deprecated
8667 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8668 as_tsktsk ("%s", r->dep_msg);
8669 }
8670 }
8671
8672 inst.instruction |= inst.operands[0].reg << 8;
8673 inst.instruction |= inst.operands[1].imm << 21;
8674 inst.instruction |= Rd << 12;
8675 inst.instruction |= inst.operands[3].reg << 16;
8676 inst.instruction |= inst.operands[4].reg;
8677 inst.instruction |= inst.operands[5].imm << 5;
8678 }
8679
8680 /* Transfer between coprocessor register and pair of ARM registers.
8681 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8682 MCRR2
8683 MRRC{cond}
8684 MRRC2
8685
8686 Two XScale instructions are special cases of these:
8687
8688 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8689 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8690
8691 Result unpredictable if Rd or Rn is R15. */
8692
8693 static void
8694 do_co_reg2c (void)
8695 {
8696 unsigned Rd, Rn;
8697
8698 Rd = inst.operands[2].reg;
8699 Rn = inst.operands[3].reg;
8700
8701 if (thumb_mode)
8702 {
8703 reject_bad_reg (Rd);
8704 reject_bad_reg (Rn);
8705 }
8706 else
8707 {
8708 constraint (Rd == REG_PC, BAD_PC);
8709 constraint (Rn == REG_PC, BAD_PC);
8710 }
8711
8712 /* Only check the MRRC{2} variants. */
8713 if ((inst.instruction & 0x0FF00000) == 0x0C500000)
8714 {
8715 /* If Rd == Rn, error that the operation is
8716 unpredictable (example MRRC p3,#1,r1,r1,c4). */
8717 constraint (Rd == Rn, BAD_OVERLAP);
8718 }
8719
8720 inst.instruction |= inst.operands[0].reg << 8;
8721 inst.instruction |= inst.operands[1].imm << 4;
8722 inst.instruction |= Rd << 12;
8723 inst.instruction |= Rn << 16;
8724 inst.instruction |= inst.operands[4].reg;
8725 }
8726
8727 static void
8728 do_cpsi (void)
8729 {
8730 inst.instruction |= inst.operands[0].imm << 6;
8731 if (inst.operands[1].present)
8732 {
8733 inst.instruction |= CPSI_MMOD;
8734 inst.instruction |= inst.operands[1].imm;
8735 }
8736 }
8737
8738 static void
8739 do_dbg (void)
8740 {
8741 inst.instruction |= inst.operands[0].imm;
8742 }
8743
8744 static void
8745 do_div (void)
8746 {
8747 unsigned Rd, Rn, Rm;
8748
8749 Rd = inst.operands[0].reg;
8750 Rn = (inst.operands[1].present
8751 ? inst.operands[1].reg : Rd);
8752 Rm = inst.operands[2].reg;
8753
8754 constraint ((Rd == REG_PC), BAD_PC);
8755 constraint ((Rn == REG_PC), BAD_PC);
8756 constraint ((Rm == REG_PC), BAD_PC);
8757
8758 inst.instruction |= Rd << 16;
8759 inst.instruction |= Rn << 0;
8760 inst.instruction |= Rm << 8;
8761 }
8762
8763 static void
8764 do_it (void)
8765 {
8766 /* There is no IT instruction in ARM mode. We
8767 process it to do the validation as if in
8768 thumb mode, just in case the code gets
8769 assembled for thumb using the unified syntax. */
8770
8771 inst.size = 0;
8772 if (unified_syntax)
8773 {
8774 set_it_insn_type (IT_INSN);
8775 now_it.mask = (inst.instruction & 0xf) | 0x10;
8776 now_it.cc = inst.operands[0].imm;
8777 }
8778 }
8779
8780 /* If there is only one register in the register list,
8781 then return its register number. Otherwise return -1. */
8782 static int
8783 only_one_reg_in_list (int range)
8784 {
8785 int i = ffs (range) - 1;
8786 return (i > 15 || range != (1 << i)) ? -1 : i;
8787 }
8788
8789 static void
8790 encode_ldmstm(int from_push_pop_mnem)
8791 {
8792 int base_reg = inst.operands[0].reg;
8793 int range = inst.operands[1].imm;
8794 int one_reg;
8795
8796 inst.instruction |= base_reg << 16;
8797 inst.instruction |= range;
8798
8799 if (inst.operands[1].writeback)
8800 inst.instruction |= LDM_TYPE_2_OR_3;
8801
8802 if (inst.operands[0].writeback)
8803 {
8804 inst.instruction |= WRITE_BACK;
8805 /* Check for unpredictable uses of writeback. */
8806 if (inst.instruction & LOAD_BIT)
8807 {
8808 /* Not allowed in LDM type 2. */
8809 if ((inst.instruction & LDM_TYPE_2_OR_3)
8810 && ((range & (1 << REG_PC)) == 0))
8811 as_warn (_("writeback of base register is UNPREDICTABLE"));
8812 /* Only allowed if base reg not in list for other types. */
8813 else if (range & (1 << base_reg))
8814 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8815 }
8816 else /* STM. */
8817 {
8818 /* Not allowed for type 2. */
8819 if (inst.instruction & LDM_TYPE_2_OR_3)
8820 as_warn (_("writeback of base register is UNPREDICTABLE"));
8821 /* Only allowed if base reg not in list, or first in list. */
8822 else if ((range & (1 << base_reg))
8823 && (range & ((1 << base_reg) - 1)))
8824 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8825 }
8826 }
8827
8828 /* If PUSH/POP has only one register, then use the A2 encoding. */
8829 one_reg = only_one_reg_in_list (range);
8830 if (from_push_pop_mnem && one_reg >= 0)
8831 {
8832 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
8833
8834 inst.instruction &= A_COND_MASK;
8835 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
8836 inst.instruction |= one_reg << 12;
8837 }
8838 }
8839
8840 static void
8841 do_ldmstm (void)
8842 {
8843 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
8844 }
8845
8846 /* ARMv5TE load-consecutive (argument parse)
8847 Mode is like LDRH.
8848
8849 LDRccD R, mode
8850 STRccD R, mode. */
8851
8852 static void
8853 do_ldrd (void)
8854 {
8855 constraint (inst.operands[0].reg % 2 != 0,
8856 _("first transfer register must be even"));
8857 constraint (inst.operands[1].present
8858 && inst.operands[1].reg != inst.operands[0].reg + 1,
8859 _("can only transfer two consecutive registers"));
8860 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8861 constraint (!inst.operands[2].isreg, _("'[' expected"));
8862
8863 if (!inst.operands[1].present)
8864 inst.operands[1].reg = inst.operands[0].reg + 1;
8865
8866 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8867 register and the first register written; we have to diagnose
8868 overlap between the base and the second register written here. */
8869
8870 if (inst.operands[2].reg == inst.operands[1].reg
8871 && (inst.operands[2].writeback || inst.operands[2].postind))
8872 as_warn (_("base register written back, and overlaps "
8873 "second transfer register"));
8874
8875 if (!(inst.instruction & V4_STR_BIT))
8876 {
8877 /* For an index-register load, the index register must not overlap the
8878 destination (even if not write-back). */
8879 if (inst.operands[2].immisreg
8880 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8881 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8882 as_warn (_("index register overlaps transfer register"));
8883 }
8884 inst.instruction |= inst.operands[0].reg << 12;
8885 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8886 }
8887
8888 static void
8889 do_ldrex (void)
8890 {
8891 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8892 || inst.operands[1].postind || inst.operands[1].writeback
8893 || inst.operands[1].immisreg || inst.operands[1].shifted
8894 || inst.operands[1].negative
8895 /* This can arise if the programmer has written
8896 strex rN, rM, foo
8897 or if they have mistakenly used a register name as the last
8898 operand, eg:
8899 strex rN, rM, rX
8900 It is very difficult to distinguish between these two cases
8901 because "rX" might actually be a label. ie the register
8902 name has been occluded by a symbol of the same name. So we
8903 just generate a general 'bad addressing mode' type error
8904 message and leave it up to the programmer to discover the
8905 true cause and fix their mistake. */
8906 || (inst.operands[1].reg == REG_PC),
8907 BAD_ADDR_MODE);
8908
8909 constraint (inst.reloc.exp.X_op != O_constant
8910 || inst.reloc.exp.X_add_number != 0,
8911 _("offset must be zero in ARM encoding"));
8912
8913 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
8914
8915 inst.instruction |= inst.operands[0].reg << 12;
8916 inst.instruction |= inst.operands[1].reg << 16;
8917 inst.reloc.type = BFD_RELOC_UNUSED;
8918 }
8919
8920 static void
8921 do_ldrexd (void)
8922 {
8923 constraint (inst.operands[0].reg % 2 != 0,
8924 _("even register required"));
8925 constraint (inst.operands[1].present
8926 && inst.operands[1].reg != inst.operands[0].reg + 1,
8927 _("can only load two consecutive registers"));
8928 /* If op 1 were present and equal to PC, this function wouldn't
8929 have been called in the first place. */
8930 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8931
8932 inst.instruction |= inst.operands[0].reg << 12;
8933 inst.instruction |= inst.operands[2].reg << 16;
8934 }
8935
8936 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8937 which is not a multiple of four is UNPREDICTABLE. */
8938 static void
8939 check_ldr_r15_aligned (void)
8940 {
8941 constraint (!(inst.operands[1].immisreg)
8942 && (inst.operands[0].reg == REG_PC
8943 && inst.operands[1].reg == REG_PC
8944 && (inst.reloc.exp.X_add_number & 0x3)),
8945 _("ldr to register 15 must be 4-byte alligned"));
8946 }
8947
8948 static void
8949 do_ldst (void)
8950 {
8951 inst.instruction |= inst.operands[0].reg << 12;
8952 if (!inst.operands[1].isreg)
8953 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
8954 return;
8955 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
8956 check_ldr_r15_aligned ();
8957 }
8958
8959 static void
8960 do_ldstt (void)
8961 {
8962 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8963 reject [Rn,...]. */
8964 if (inst.operands[1].preind)
8965 {
8966 constraint (inst.reloc.exp.X_op != O_constant
8967 || inst.reloc.exp.X_add_number != 0,
8968 _("this instruction requires a post-indexed address"));
8969
8970 inst.operands[1].preind = 0;
8971 inst.operands[1].postind = 1;
8972 inst.operands[1].writeback = 1;
8973 }
8974 inst.instruction |= inst.operands[0].reg << 12;
8975 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
8976 }
8977
8978 /* Halfword and signed-byte load/store operations. */
8979
8980 static void
8981 do_ldstv4 (void)
8982 {
8983 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8984 inst.instruction |= inst.operands[0].reg << 12;
8985 if (!inst.operands[1].isreg)
8986 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
8987 return;
8988 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
8989 }
8990
8991 static void
8992 do_ldsttv4 (void)
8993 {
8994 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8995 reject [Rn,...]. */
8996 if (inst.operands[1].preind)
8997 {
8998 constraint (inst.reloc.exp.X_op != O_constant
8999 || inst.reloc.exp.X_add_number != 0,
9000 _("this instruction requires a post-indexed address"));
9001
9002 inst.operands[1].preind = 0;
9003 inst.operands[1].postind = 1;
9004 inst.operands[1].writeback = 1;
9005 }
9006 inst.instruction |= inst.operands[0].reg << 12;
9007 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
9008 }
9009
9010 /* Co-processor register load/store.
9011 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9012 static void
9013 do_lstc (void)
9014 {
9015 inst.instruction |= inst.operands[0].reg << 8;
9016 inst.instruction |= inst.operands[1].reg << 12;
9017 encode_arm_cp_address (2, TRUE, TRUE, 0);
9018 }
9019
9020 static void
9021 do_mlas (void)
9022 {
9023 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9024 if (inst.operands[0].reg == inst.operands[1].reg
9025 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9026 && !(inst.instruction & 0x00400000))
9027 as_tsktsk (_("Rd and Rm should be different in mla"));
9028
9029 inst.instruction |= inst.operands[0].reg << 16;
9030 inst.instruction |= inst.operands[1].reg;
9031 inst.instruction |= inst.operands[2].reg << 8;
9032 inst.instruction |= inst.operands[3].reg << 12;
9033 }
9034
9035 static void
9036 do_mov (void)
9037 {
9038 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9039 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9040 THUMB1_RELOC_ONLY);
9041 inst.instruction |= inst.operands[0].reg << 12;
9042 encode_arm_shifter_operand (1);
9043 }
9044
9045 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9046 static void
9047 do_mov16 (void)
9048 {
9049 bfd_vma imm;
9050 bfd_boolean top;
9051
9052 top = (inst.instruction & 0x00400000) != 0;
9053 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
9054 _(":lower16: not allowed this instruction"));
9055 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
9056 _(":upper16: not allowed instruction"));
9057 inst.instruction |= inst.operands[0].reg << 12;
9058 if (inst.reloc.type == BFD_RELOC_UNUSED)
9059 {
9060 imm = inst.reloc.exp.X_add_number;
9061 /* The value is in two pieces: 0:11, 16:19. */
9062 inst.instruction |= (imm & 0x00000fff);
9063 inst.instruction |= (imm & 0x0000f000) << 4;
9064 }
9065 }
9066
9067 static int
9068 do_vfp_nsyn_mrs (void)
9069 {
9070 if (inst.operands[0].isvec)
9071 {
9072 if (inst.operands[1].reg != 1)
9073 first_error (_("operand 1 must be FPSCR"));
9074 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
9075 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
9076 do_vfp_nsyn_opcode ("fmstat");
9077 }
9078 else if (inst.operands[1].isvec)
9079 do_vfp_nsyn_opcode ("fmrx");
9080 else
9081 return FAIL;
9082
9083 return SUCCESS;
9084 }
9085
9086 static int
9087 do_vfp_nsyn_msr (void)
9088 {
9089 if (inst.operands[0].isvec)
9090 do_vfp_nsyn_opcode ("fmxr");
9091 else
9092 return FAIL;
9093
9094 return SUCCESS;
9095 }
9096
9097 static void
9098 do_vmrs (void)
9099 {
9100 unsigned Rt = inst.operands[0].reg;
9101
9102 if (thumb_mode && Rt == REG_SP)
9103 {
9104 inst.error = BAD_SP;
9105 return;
9106 }
9107
9108 /* APSR_ sets isvec. All other refs to PC are illegal. */
9109 if (!inst.operands[0].isvec && Rt == REG_PC)
9110 {
9111 inst.error = BAD_PC;
9112 return;
9113 }
9114
9115 /* If we get through parsing the register name, we just insert the number
9116 generated into the instruction without further validation. */
9117 inst.instruction |= (inst.operands[1].reg << 16);
9118 inst.instruction |= (Rt << 12);
9119 }
9120
9121 static void
9122 do_vmsr (void)
9123 {
9124 unsigned Rt = inst.operands[1].reg;
9125
9126 if (thumb_mode)
9127 reject_bad_reg (Rt);
9128 else if (Rt == REG_PC)
9129 {
9130 inst.error = BAD_PC;
9131 return;
9132 }
9133
9134 /* If we get through parsing the register name, we just insert the number
9135 generated into the instruction without further validation. */
9136 inst.instruction |= (inst.operands[0].reg << 16);
9137 inst.instruction |= (Rt << 12);
9138 }
9139
9140 static void
9141 do_mrs (void)
9142 {
9143 unsigned br;
9144
9145 if (do_vfp_nsyn_mrs () == SUCCESS)
9146 return;
9147
9148 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9149 inst.instruction |= inst.operands[0].reg << 12;
9150
9151 if (inst.operands[1].isreg)
9152 {
9153 br = inst.operands[1].reg;
9154 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
9155 as_bad (_("bad register for mrs"));
9156 }
9157 else
9158 {
9159 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9160 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9161 != (PSR_c|PSR_f),
9162 _("'APSR', 'CPSR' or 'SPSR' expected"));
9163 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9164 }
9165
9166 inst.instruction |= br;
9167 }
9168
9169 /* Two possible forms:
9170 "{C|S}PSR_<field>, Rm",
9171 "{C|S}PSR_f, #expression". */
9172
9173 static void
9174 do_msr (void)
9175 {
9176 if (do_vfp_nsyn_msr () == SUCCESS)
9177 return;
9178
9179 inst.instruction |= inst.operands[0].imm;
9180 if (inst.operands[1].isreg)
9181 inst.instruction |= inst.operands[1].reg;
9182 else
9183 {
9184 inst.instruction |= INST_IMMEDIATE;
9185 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
9186 inst.reloc.pc_rel = 0;
9187 }
9188 }
9189
9190 static void
9191 do_mul (void)
9192 {
9193 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9194
9195 if (!inst.operands[2].present)
9196 inst.operands[2].reg = inst.operands[0].reg;
9197 inst.instruction |= inst.operands[0].reg << 16;
9198 inst.instruction |= inst.operands[1].reg;
9199 inst.instruction |= inst.operands[2].reg << 8;
9200
9201 if (inst.operands[0].reg == inst.operands[1].reg
9202 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9203 as_tsktsk (_("Rd and Rm should be different in mul"));
9204 }
9205
9206 /* Long Multiply Parser
9207 UMULL RdLo, RdHi, Rm, Rs
9208 SMULL RdLo, RdHi, Rm, Rs
9209 UMLAL RdLo, RdHi, Rm, Rs
9210 SMLAL RdLo, RdHi, Rm, Rs. */
9211
9212 static void
9213 do_mull (void)
9214 {
9215 inst.instruction |= inst.operands[0].reg << 12;
9216 inst.instruction |= inst.operands[1].reg << 16;
9217 inst.instruction |= inst.operands[2].reg;
9218 inst.instruction |= inst.operands[3].reg << 8;
9219
9220 /* rdhi and rdlo must be different. */
9221 if (inst.operands[0].reg == inst.operands[1].reg)
9222 as_tsktsk (_("rdhi and rdlo must be different"));
9223
9224 /* rdhi, rdlo and rm must all be different before armv6. */
9225 if ((inst.operands[0].reg == inst.operands[2].reg
9226 || inst.operands[1].reg == inst.operands[2].reg)
9227 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9228 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9229 }
9230
9231 static void
9232 do_nop (void)
9233 {
9234 if (inst.operands[0].present
9235 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9236 {
9237 /* Architectural NOP hints are CPSR sets with no bits selected. */
9238 inst.instruction &= 0xf0000000;
9239 inst.instruction |= 0x0320f000;
9240 if (inst.operands[0].present)
9241 inst.instruction |= inst.operands[0].imm;
9242 }
9243 }
9244
9245 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9246 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9247 Condition defaults to COND_ALWAYS.
9248 Error if Rd, Rn or Rm are R15. */
9249
9250 static void
9251 do_pkhbt (void)
9252 {
9253 inst.instruction |= inst.operands[0].reg << 12;
9254 inst.instruction |= inst.operands[1].reg << 16;
9255 inst.instruction |= inst.operands[2].reg;
9256 if (inst.operands[3].present)
9257 encode_arm_shift (3);
9258 }
9259
9260 /* ARM V6 PKHTB (Argument Parse). */
9261
9262 static void
9263 do_pkhtb (void)
9264 {
9265 if (!inst.operands[3].present)
9266 {
9267 /* If the shift specifier is omitted, turn the instruction
9268 into pkhbt rd, rm, rn. */
9269 inst.instruction &= 0xfff00010;
9270 inst.instruction |= inst.operands[0].reg << 12;
9271 inst.instruction |= inst.operands[1].reg;
9272 inst.instruction |= inst.operands[2].reg << 16;
9273 }
9274 else
9275 {
9276 inst.instruction |= inst.operands[0].reg << 12;
9277 inst.instruction |= inst.operands[1].reg << 16;
9278 inst.instruction |= inst.operands[2].reg;
9279 encode_arm_shift (3);
9280 }
9281 }
9282
9283 /* ARMv5TE: Preload-Cache
9284 MP Extensions: Preload for write
9285
9286 PLD(W) <addr_mode>
9287
9288 Syntactically, like LDR with B=1, W=0, L=1. */
9289
9290 static void
9291 do_pld (void)
9292 {
9293 constraint (!inst.operands[0].isreg,
9294 _("'[' expected after PLD mnemonic"));
9295 constraint (inst.operands[0].postind,
9296 _("post-indexed expression used in preload instruction"));
9297 constraint (inst.operands[0].writeback,
9298 _("writeback used in preload instruction"));
9299 constraint (!inst.operands[0].preind,
9300 _("unindexed addressing used in preload instruction"));
9301 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9302 }
9303
9304 /* ARMv7: PLI <addr_mode> */
9305 static void
9306 do_pli (void)
9307 {
9308 constraint (!inst.operands[0].isreg,
9309 _("'[' expected after PLI mnemonic"));
9310 constraint (inst.operands[0].postind,
9311 _("post-indexed expression used in preload instruction"));
9312 constraint (inst.operands[0].writeback,
9313 _("writeback used in preload instruction"));
9314 constraint (!inst.operands[0].preind,
9315 _("unindexed addressing used in preload instruction"));
9316 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9317 inst.instruction &= ~PRE_INDEX;
9318 }
9319
9320 static void
9321 do_push_pop (void)
9322 {
9323 constraint (inst.operands[0].writeback,
9324 _("push/pop do not support {reglist}^"));
9325 inst.operands[1] = inst.operands[0];
9326 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9327 inst.operands[0].isreg = 1;
9328 inst.operands[0].writeback = 1;
9329 inst.operands[0].reg = REG_SP;
9330 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9331 }
9332
9333 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9334 word at the specified address and the following word
9335 respectively.
9336 Unconditionally executed.
9337 Error if Rn is R15. */
9338
9339 static void
9340 do_rfe (void)
9341 {
9342 inst.instruction |= inst.operands[0].reg << 16;
9343 if (inst.operands[0].writeback)
9344 inst.instruction |= WRITE_BACK;
9345 }
9346
9347 /* ARM V6 ssat (argument parse). */
9348
9349 static void
9350 do_ssat (void)
9351 {
9352 inst.instruction |= inst.operands[0].reg << 12;
9353 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9354 inst.instruction |= inst.operands[2].reg;
9355
9356 if (inst.operands[3].present)
9357 encode_arm_shift (3);
9358 }
9359
9360 /* ARM V6 usat (argument parse). */
9361
9362 static void
9363 do_usat (void)
9364 {
9365 inst.instruction |= inst.operands[0].reg << 12;
9366 inst.instruction |= inst.operands[1].imm << 16;
9367 inst.instruction |= inst.operands[2].reg;
9368
9369 if (inst.operands[3].present)
9370 encode_arm_shift (3);
9371 }
9372
9373 /* ARM V6 ssat16 (argument parse). */
9374
9375 static void
9376 do_ssat16 (void)
9377 {
9378 inst.instruction |= inst.operands[0].reg << 12;
9379 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9380 inst.instruction |= inst.operands[2].reg;
9381 }
9382
9383 static void
9384 do_usat16 (void)
9385 {
9386 inst.instruction |= inst.operands[0].reg << 12;
9387 inst.instruction |= inst.operands[1].imm << 16;
9388 inst.instruction |= inst.operands[2].reg;
9389 }
9390
9391 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9392 preserving the other bits.
9393
9394 setend <endian_specifier>, where <endian_specifier> is either
9395 BE or LE. */
9396
9397 static void
9398 do_setend (void)
9399 {
9400 if (warn_on_deprecated
9401 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9402 as_tsktsk (_("setend use is deprecated for ARMv8"));
9403
9404 if (inst.operands[0].imm)
9405 inst.instruction |= 0x200;
9406 }
9407
9408 static void
9409 do_shift (void)
9410 {
9411 unsigned int Rm = (inst.operands[1].present
9412 ? inst.operands[1].reg
9413 : inst.operands[0].reg);
9414
9415 inst.instruction |= inst.operands[0].reg << 12;
9416 inst.instruction |= Rm;
9417 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9418 {
9419 inst.instruction |= inst.operands[2].reg << 8;
9420 inst.instruction |= SHIFT_BY_REG;
9421 /* PR 12854: Error on extraneous shifts. */
9422 constraint (inst.operands[2].shifted,
9423 _("extraneous shift as part of operand to shift insn"));
9424 }
9425 else
9426 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
9427 }
9428
9429 static void
9430 do_smc (void)
9431 {
9432 inst.reloc.type = BFD_RELOC_ARM_SMC;
9433 inst.reloc.pc_rel = 0;
9434 }
9435
9436 static void
9437 do_hvc (void)
9438 {
9439 inst.reloc.type = BFD_RELOC_ARM_HVC;
9440 inst.reloc.pc_rel = 0;
9441 }
9442
9443 static void
9444 do_swi (void)
9445 {
9446 inst.reloc.type = BFD_RELOC_ARM_SWI;
9447 inst.reloc.pc_rel = 0;
9448 }
9449
9450 static void
9451 do_setpan (void)
9452 {
9453 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9454 _("selected processor does not support SETPAN instruction"));
9455
9456 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9457 }
9458
9459 static void
9460 do_t_setpan (void)
9461 {
9462 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9463 _("selected processor does not support SETPAN instruction"));
9464
9465 inst.instruction |= (inst.operands[0].imm << 3);
9466 }
9467
9468 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9469 SMLAxy{cond} Rd,Rm,Rs,Rn
9470 SMLAWy{cond} Rd,Rm,Rs,Rn
9471 Error if any register is R15. */
9472
9473 static void
9474 do_smla (void)
9475 {
9476 inst.instruction |= inst.operands[0].reg << 16;
9477 inst.instruction |= inst.operands[1].reg;
9478 inst.instruction |= inst.operands[2].reg << 8;
9479 inst.instruction |= inst.operands[3].reg << 12;
9480 }
9481
9482 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9483 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9484 Error if any register is R15.
9485 Warning if Rdlo == Rdhi. */
9486
9487 static void
9488 do_smlal (void)
9489 {
9490 inst.instruction |= inst.operands[0].reg << 12;
9491 inst.instruction |= inst.operands[1].reg << 16;
9492 inst.instruction |= inst.operands[2].reg;
9493 inst.instruction |= inst.operands[3].reg << 8;
9494
9495 if (inst.operands[0].reg == inst.operands[1].reg)
9496 as_tsktsk (_("rdhi and rdlo must be different"));
9497 }
9498
9499 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9500 SMULxy{cond} Rd,Rm,Rs
9501 Error if any register is R15. */
9502
9503 static void
9504 do_smul (void)
9505 {
9506 inst.instruction |= inst.operands[0].reg << 16;
9507 inst.instruction |= inst.operands[1].reg;
9508 inst.instruction |= inst.operands[2].reg << 8;
9509 }
9510
9511 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9512 the same for both ARM and Thumb-2. */
9513
9514 static void
9515 do_srs (void)
9516 {
9517 int reg;
9518
9519 if (inst.operands[0].present)
9520 {
9521 reg = inst.operands[0].reg;
9522 constraint (reg != REG_SP, _("SRS base register must be r13"));
9523 }
9524 else
9525 reg = REG_SP;
9526
9527 inst.instruction |= reg << 16;
9528 inst.instruction |= inst.operands[1].imm;
9529 if (inst.operands[0].writeback || inst.operands[1].writeback)
9530 inst.instruction |= WRITE_BACK;
9531 }
9532
9533 /* ARM V6 strex (argument parse). */
9534
9535 static void
9536 do_strex (void)
9537 {
9538 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9539 || inst.operands[2].postind || inst.operands[2].writeback
9540 || inst.operands[2].immisreg || inst.operands[2].shifted
9541 || inst.operands[2].negative
9542 /* See comment in do_ldrex(). */
9543 || (inst.operands[2].reg == REG_PC),
9544 BAD_ADDR_MODE);
9545
9546 constraint (inst.operands[0].reg == inst.operands[1].reg
9547 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9548
9549 constraint (inst.reloc.exp.X_op != O_constant
9550 || inst.reloc.exp.X_add_number != 0,
9551 _("offset must be zero in ARM encoding"));
9552
9553 inst.instruction |= inst.operands[0].reg << 12;
9554 inst.instruction |= inst.operands[1].reg;
9555 inst.instruction |= inst.operands[2].reg << 16;
9556 inst.reloc.type = BFD_RELOC_UNUSED;
9557 }
9558
9559 static void
9560 do_t_strexbh (void)
9561 {
9562 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9563 || inst.operands[2].postind || inst.operands[2].writeback
9564 || inst.operands[2].immisreg || inst.operands[2].shifted
9565 || inst.operands[2].negative,
9566 BAD_ADDR_MODE);
9567
9568 constraint (inst.operands[0].reg == inst.operands[1].reg
9569 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9570
9571 do_rm_rd_rn ();
9572 }
9573
9574 static void
9575 do_strexd (void)
9576 {
9577 constraint (inst.operands[1].reg % 2 != 0,
9578 _("even register required"));
9579 constraint (inst.operands[2].present
9580 && inst.operands[2].reg != inst.operands[1].reg + 1,
9581 _("can only store two consecutive registers"));
9582 /* If op 2 were present and equal to PC, this function wouldn't
9583 have been called in the first place. */
9584 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9585
9586 constraint (inst.operands[0].reg == inst.operands[1].reg
9587 || inst.operands[0].reg == inst.operands[1].reg + 1
9588 || inst.operands[0].reg == inst.operands[3].reg,
9589 BAD_OVERLAP);
9590
9591 inst.instruction |= inst.operands[0].reg << 12;
9592 inst.instruction |= inst.operands[1].reg;
9593 inst.instruction |= inst.operands[3].reg << 16;
9594 }
9595
9596 /* ARM V8 STRL. */
9597 static void
9598 do_stlex (void)
9599 {
9600 constraint (inst.operands[0].reg == inst.operands[1].reg
9601 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9602
9603 do_rd_rm_rn ();
9604 }
9605
9606 static void
9607 do_t_stlex (void)
9608 {
9609 constraint (inst.operands[0].reg == inst.operands[1].reg
9610 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9611
9612 do_rm_rd_rn ();
9613 }
9614
9615 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9616 extends it to 32-bits, and adds the result to a value in another
9617 register. You can specify a rotation by 0, 8, 16, or 24 bits
9618 before extracting the 16-bit value.
9619 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9620 Condition defaults to COND_ALWAYS.
9621 Error if any register uses R15. */
9622
9623 static void
9624 do_sxtah (void)
9625 {
9626 inst.instruction |= inst.operands[0].reg << 12;
9627 inst.instruction |= inst.operands[1].reg << 16;
9628 inst.instruction |= inst.operands[2].reg;
9629 inst.instruction |= inst.operands[3].imm << 10;
9630 }
9631
9632 /* ARM V6 SXTH.
9633
9634 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9635 Condition defaults to COND_ALWAYS.
9636 Error if any register uses R15. */
9637
9638 static void
9639 do_sxth (void)
9640 {
9641 inst.instruction |= inst.operands[0].reg << 12;
9642 inst.instruction |= inst.operands[1].reg;
9643 inst.instruction |= inst.operands[2].imm << 10;
9644 }
9645 \f
9646 /* VFP instructions. In a logical order: SP variant first, monad
9647 before dyad, arithmetic then move then load/store. */
9648
9649 static void
9650 do_vfp_sp_monadic (void)
9651 {
9652 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9653 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9654 }
9655
9656 static void
9657 do_vfp_sp_dyadic (void)
9658 {
9659 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9660 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9661 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9662 }
9663
9664 static void
9665 do_vfp_sp_compare_z (void)
9666 {
9667 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9668 }
9669
9670 static void
9671 do_vfp_dp_sp_cvt (void)
9672 {
9673 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9674 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9675 }
9676
9677 static void
9678 do_vfp_sp_dp_cvt (void)
9679 {
9680 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9681 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9682 }
9683
9684 static void
9685 do_vfp_reg_from_sp (void)
9686 {
9687 inst.instruction |= inst.operands[0].reg << 12;
9688 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9689 }
9690
9691 static void
9692 do_vfp_reg2_from_sp2 (void)
9693 {
9694 constraint (inst.operands[2].imm != 2,
9695 _("only two consecutive VFP SP registers allowed here"));
9696 inst.instruction |= inst.operands[0].reg << 12;
9697 inst.instruction |= inst.operands[1].reg << 16;
9698 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9699 }
9700
9701 static void
9702 do_vfp_sp_from_reg (void)
9703 {
9704 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9705 inst.instruction |= inst.operands[1].reg << 12;
9706 }
9707
9708 static void
9709 do_vfp_sp2_from_reg2 (void)
9710 {
9711 constraint (inst.operands[0].imm != 2,
9712 _("only two consecutive VFP SP registers allowed here"));
9713 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9714 inst.instruction |= inst.operands[1].reg << 12;
9715 inst.instruction |= inst.operands[2].reg << 16;
9716 }
9717
9718 static void
9719 do_vfp_sp_ldst (void)
9720 {
9721 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9722 encode_arm_cp_address (1, FALSE, TRUE, 0);
9723 }
9724
9725 static void
9726 do_vfp_dp_ldst (void)
9727 {
9728 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9729 encode_arm_cp_address (1, FALSE, TRUE, 0);
9730 }
9731
9732
9733 static void
9734 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9735 {
9736 if (inst.operands[0].writeback)
9737 inst.instruction |= WRITE_BACK;
9738 else
9739 constraint (ldstm_type != VFP_LDSTMIA,
9740 _("this addressing mode requires base-register writeback"));
9741 inst.instruction |= inst.operands[0].reg << 16;
9742 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9743 inst.instruction |= inst.operands[1].imm;
9744 }
9745
9746 static void
9747 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9748 {
9749 int count;
9750
9751 if (inst.operands[0].writeback)
9752 inst.instruction |= WRITE_BACK;
9753 else
9754 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9755 _("this addressing mode requires base-register writeback"));
9756
9757 inst.instruction |= inst.operands[0].reg << 16;
9758 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9759
9760 count = inst.operands[1].imm << 1;
9761 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9762 count += 1;
9763
9764 inst.instruction |= count;
9765 }
9766
9767 static void
9768 do_vfp_sp_ldstmia (void)
9769 {
9770 vfp_sp_ldstm (VFP_LDSTMIA);
9771 }
9772
9773 static void
9774 do_vfp_sp_ldstmdb (void)
9775 {
9776 vfp_sp_ldstm (VFP_LDSTMDB);
9777 }
9778
9779 static void
9780 do_vfp_dp_ldstmia (void)
9781 {
9782 vfp_dp_ldstm (VFP_LDSTMIA);
9783 }
9784
9785 static void
9786 do_vfp_dp_ldstmdb (void)
9787 {
9788 vfp_dp_ldstm (VFP_LDSTMDB);
9789 }
9790
9791 static void
9792 do_vfp_xp_ldstmia (void)
9793 {
9794 vfp_dp_ldstm (VFP_LDSTMIAX);
9795 }
9796
9797 static void
9798 do_vfp_xp_ldstmdb (void)
9799 {
9800 vfp_dp_ldstm (VFP_LDSTMDBX);
9801 }
9802
9803 static void
9804 do_vfp_dp_rd_rm (void)
9805 {
9806 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9807 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9808 }
9809
9810 static void
9811 do_vfp_dp_rn_rd (void)
9812 {
9813 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9814 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9815 }
9816
9817 static void
9818 do_vfp_dp_rd_rn (void)
9819 {
9820 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9821 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9822 }
9823
9824 static void
9825 do_vfp_dp_rd_rn_rm (void)
9826 {
9827 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9828 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9829 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
9830 }
9831
9832 static void
9833 do_vfp_dp_rd (void)
9834 {
9835 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9836 }
9837
9838 static void
9839 do_vfp_dp_rm_rd_rn (void)
9840 {
9841 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
9842 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9843 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
9844 }
9845
9846 /* VFPv3 instructions. */
9847 static void
9848 do_vfp_sp_const (void)
9849 {
9850 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9851 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9852 inst.instruction |= (inst.operands[1].imm & 0x0f);
9853 }
9854
9855 static void
9856 do_vfp_dp_const (void)
9857 {
9858 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9859 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9860 inst.instruction |= (inst.operands[1].imm & 0x0f);
9861 }
9862
9863 static void
9864 vfp_conv (int srcsize)
9865 {
9866 int immbits = srcsize - inst.operands[1].imm;
9867
9868 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9869 {
9870 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9871 i.e. immbits must be in range 0 - 16. */
9872 inst.error = _("immediate value out of range, expected range [0, 16]");
9873 return;
9874 }
9875 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9876 {
9877 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9878 i.e. immbits must be in range 0 - 31. */
9879 inst.error = _("immediate value out of range, expected range [1, 32]");
9880 return;
9881 }
9882
9883 inst.instruction |= (immbits & 1) << 5;
9884 inst.instruction |= (immbits >> 1);
9885 }
9886
9887 static void
9888 do_vfp_sp_conv_16 (void)
9889 {
9890 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9891 vfp_conv (16);
9892 }
9893
9894 static void
9895 do_vfp_dp_conv_16 (void)
9896 {
9897 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9898 vfp_conv (16);
9899 }
9900
9901 static void
9902 do_vfp_sp_conv_32 (void)
9903 {
9904 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9905 vfp_conv (32);
9906 }
9907
9908 static void
9909 do_vfp_dp_conv_32 (void)
9910 {
9911 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9912 vfp_conv (32);
9913 }
9914 \f
9915 /* FPA instructions. Also in a logical order. */
9916
9917 static void
9918 do_fpa_cmp (void)
9919 {
9920 inst.instruction |= inst.operands[0].reg << 16;
9921 inst.instruction |= inst.operands[1].reg;
9922 }
9923
9924 static void
9925 do_fpa_ldmstm (void)
9926 {
9927 inst.instruction |= inst.operands[0].reg << 12;
9928 switch (inst.operands[1].imm)
9929 {
9930 case 1: inst.instruction |= CP_T_X; break;
9931 case 2: inst.instruction |= CP_T_Y; break;
9932 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
9933 case 4: break;
9934 default: abort ();
9935 }
9936
9937 if (inst.instruction & (PRE_INDEX | INDEX_UP))
9938 {
9939 /* The instruction specified "ea" or "fd", so we can only accept
9940 [Rn]{!}. The instruction does not really support stacking or
9941 unstacking, so we have to emulate these by setting appropriate
9942 bits and offsets. */
9943 constraint (inst.reloc.exp.X_op != O_constant
9944 || inst.reloc.exp.X_add_number != 0,
9945 _("this instruction does not support indexing"));
9946
9947 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
9948 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
9949
9950 if (!(inst.instruction & INDEX_UP))
9951 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
9952
9953 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
9954 {
9955 inst.operands[2].preind = 0;
9956 inst.operands[2].postind = 1;
9957 }
9958 }
9959
9960 encode_arm_cp_address (2, TRUE, TRUE, 0);
9961 }
9962 \f
9963 /* iWMMXt instructions: strictly in alphabetical order. */
9964
9965 static void
9966 do_iwmmxt_tandorc (void)
9967 {
9968 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
9969 }
9970
9971 static void
9972 do_iwmmxt_textrc (void)
9973 {
9974 inst.instruction |= inst.operands[0].reg << 12;
9975 inst.instruction |= inst.operands[1].imm;
9976 }
9977
9978 static void
9979 do_iwmmxt_textrm (void)
9980 {
9981 inst.instruction |= inst.operands[0].reg << 12;
9982 inst.instruction |= inst.operands[1].reg << 16;
9983 inst.instruction |= inst.operands[2].imm;
9984 }
9985
9986 static void
9987 do_iwmmxt_tinsr (void)
9988 {
9989 inst.instruction |= inst.operands[0].reg << 16;
9990 inst.instruction |= inst.operands[1].reg << 12;
9991 inst.instruction |= inst.operands[2].imm;
9992 }
9993
9994 static void
9995 do_iwmmxt_tmia (void)
9996 {
9997 inst.instruction |= inst.operands[0].reg << 5;
9998 inst.instruction |= inst.operands[1].reg;
9999 inst.instruction |= inst.operands[2].reg << 12;
10000 }
10001
10002 static void
10003 do_iwmmxt_waligni (void)
10004 {
10005 inst.instruction |= inst.operands[0].reg << 12;
10006 inst.instruction |= inst.operands[1].reg << 16;
10007 inst.instruction |= inst.operands[2].reg;
10008 inst.instruction |= inst.operands[3].imm << 20;
10009 }
10010
10011 static void
10012 do_iwmmxt_wmerge (void)
10013 {
10014 inst.instruction |= inst.operands[0].reg << 12;
10015 inst.instruction |= inst.operands[1].reg << 16;
10016 inst.instruction |= inst.operands[2].reg;
10017 inst.instruction |= inst.operands[3].imm << 21;
10018 }
10019
10020 static void
10021 do_iwmmxt_wmov (void)
10022 {
10023 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10024 inst.instruction |= inst.operands[0].reg << 12;
10025 inst.instruction |= inst.operands[1].reg << 16;
10026 inst.instruction |= inst.operands[1].reg;
10027 }
10028
10029 static void
10030 do_iwmmxt_wldstbh (void)
10031 {
10032 int reloc;
10033 inst.instruction |= inst.operands[0].reg << 12;
10034 if (thumb_mode)
10035 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
10036 else
10037 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
10038 encode_arm_cp_address (1, TRUE, FALSE, reloc);
10039 }
10040
10041 static void
10042 do_iwmmxt_wldstw (void)
10043 {
10044 /* RIWR_RIWC clears .isreg for a control register. */
10045 if (!inst.operands[0].isreg)
10046 {
10047 constraint (inst.cond != COND_ALWAYS, BAD_COND);
10048 inst.instruction |= 0xf0000000;
10049 }
10050
10051 inst.instruction |= inst.operands[0].reg << 12;
10052 encode_arm_cp_address (1, TRUE, TRUE, 0);
10053 }
10054
10055 static void
10056 do_iwmmxt_wldstd (void)
10057 {
10058 inst.instruction |= inst.operands[0].reg << 12;
10059 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
10060 && inst.operands[1].immisreg)
10061 {
10062 inst.instruction &= ~0x1a000ff;
10063 inst.instruction |= (0xfU << 28);
10064 if (inst.operands[1].preind)
10065 inst.instruction |= PRE_INDEX;
10066 if (!inst.operands[1].negative)
10067 inst.instruction |= INDEX_UP;
10068 if (inst.operands[1].writeback)
10069 inst.instruction |= WRITE_BACK;
10070 inst.instruction |= inst.operands[1].reg << 16;
10071 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10072 inst.instruction |= inst.operands[1].imm;
10073 }
10074 else
10075 encode_arm_cp_address (1, TRUE, FALSE, 0);
10076 }
10077
10078 static void
10079 do_iwmmxt_wshufh (void)
10080 {
10081 inst.instruction |= inst.operands[0].reg << 12;
10082 inst.instruction |= inst.operands[1].reg << 16;
10083 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
10084 inst.instruction |= (inst.operands[2].imm & 0x0f);
10085 }
10086
10087 static void
10088 do_iwmmxt_wzero (void)
10089 {
10090 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10091 inst.instruction |= inst.operands[0].reg;
10092 inst.instruction |= inst.operands[0].reg << 12;
10093 inst.instruction |= inst.operands[0].reg << 16;
10094 }
10095
10096 static void
10097 do_iwmmxt_wrwrwr_or_imm5 (void)
10098 {
10099 if (inst.operands[2].isreg)
10100 do_rd_rn_rm ();
10101 else {
10102 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
10103 _("immediate operand requires iWMMXt2"));
10104 do_rd_rn ();
10105 if (inst.operands[2].imm == 0)
10106 {
10107 switch ((inst.instruction >> 20) & 0xf)
10108 {
10109 case 4:
10110 case 5:
10111 case 6:
10112 case 7:
10113 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10114 inst.operands[2].imm = 16;
10115 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
10116 break;
10117 case 8:
10118 case 9:
10119 case 10:
10120 case 11:
10121 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10122 inst.operands[2].imm = 32;
10123 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
10124 break;
10125 case 12:
10126 case 13:
10127 case 14:
10128 case 15:
10129 {
10130 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10131 unsigned long wrn;
10132 wrn = (inst.instruction >> 16) & 0xf;
10133 inst.instruction &= 0xff0fff0f;
10134 inst.instruction |= wrn;
10135 /* Bail out here; the instruction is now assembled. */
10136 return;
10137 }
10138 }
10139 }
10140 /* Map 32 -> 0, etc. */
10141 inst.operands[2].imm &= 0x1f;
10142 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10143 }
10144 }
10145 \f
10146 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10147 operations first, then control, shift, and load/store. */
10148
10149 /* Insns like "foo X,Y,Z". */
10150
10151 static void
10152 do_mav_triple (void)
10153 {
10154 inst.instruction |= inst.operands[0].reg << 16;
10155 inst.instruction |= inst.operands[1].reg;
10156 inst.instruction |= inst.operands[2].reg << 12;
10157 }
10158
10159 /* Insns like "foo W,X,Y,Z".
10160 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10161
10162 static void
10163 do_mav_quad (void)
10164 {
10165 inst.instruction |= inst.operands[0].reg << 5;
10166 inst.instruction |= inst.operands[1].reg << 12;
10167 inst.instruction |= inst.operands[2].reg << 16;
10168 inst.instruction |= inst.operands[3].reg;
10169 }
10170
10171 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10172 static void
10173 do_mav_dspsc (void)
10174 {
10175 inst.instruction |= inst.operands[1].reg << 12;
10176 }
10177
10178 /* Maverick shift immediate instructions.
10179 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10180 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10181
10182 static void
10183 do_mav_shift (void)
10184 {
10185 int imm = inst.operands[2].imm;
10186
10187 inst.instruction |= inst.operands[0].reg << 12;
10188 inst.instruction |= inst.operands[1].reg << 16;
10189
10190 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10191 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10192 Bit 4 should be 0. */
10193 imm = (imm & 0xf) | ((imm & 0x70) << 1);
10194
10195 inst.instruction |= imm;
10196 }
10197 \f
10198 /* XScale instructions. Also sorted arithmetic before move. */
10199
10200 /* Xscale multiply-accumulate (argument parse)
10201 MIAcc acc0,Rm,Rs
10202 MIAPHcc acc0,Rm,Rs
10203 MIAxycc acc0,Rm,Rs. */
10204
10205 static void
10206 do_xsc_mia (void)
10207 {
10208 inst.instruction |= inst.operands[1].reg;
10209 inst.instruction |= inst.operands[2].reg << 12;
10210 }
10211
10212 /* Xscale move-accumulator-register (argument parse)
10213
10214 MARcc acc0,RdLo,RdHi. */
10215
10216 static void
10217 do_xsc_mar (void)
10218 {
10219 inst.instruction |= inst.operands[1].reg << 12;
10220 inst.instruction |= inst.operands[2].reg << 16;
10221 }
10222
10223 /* Xscale move-register-accumulator (argument parse)
10224
10225 MRAcc RdLo,RdHi,acc0. */
10226
10227 static void
10228 do_xsc_mra (void)
10229 {
10230 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10231 inst.instruction |= inst.operands[0].reg << 12;
10232 inst.instruction |= inst.operands[1].reg << 16;
10233 }
10234 \f
10235 /* Encoding functions relevant only to Thumb. */
10236
10237 /* inst.operands[i] is a shifted-register operand; encode
10238 it into inst.instruction in the format used by Thumb32. */
10239
10240 static void
10241 encode_thumb32_shifted_operand (int i)
10242 {
10243 unsigned int value = inst.reloc.exp.X_add_number;
10244 unsigned int shift = inst.operands[i].shift_kind;
10245
10246 constraint (inst.operands[i].immisreg,
10247 _("shift by register not allowed in thumb mode"));
10248 inst.instruction |= inst.operands[i].reg;
10249 if (shift == SHIFT_RRX)
10250 inst.instruction |= SHIFT_ROR << 4;
10251 else
10252 {
10253 constraint (inst.reloc.exp.X_op != O_constant,
10254 _("expression too complex"));
10255
10256 constraint (value > 32
10257 || (value == 32 && (shift == SHIFT_LSL
10258 || shift == SHIFT_ROR)),
10259 _("shift expression is too large"));
10260
10261 if (value == 0)
10262 shift = SHIFT_LSL;
10263 else if (value == 32)
10264 value = 0;
10265
10266 inst.instruction |= shift << 4;
10267 inst.instruction |= (value & 0x1c) << 10;
10268 inst.instruction |= (value & 0x03) << 6;
10269 }
10270 }
10271
10272
10273 /* inst.operands[i] was set up by parse_address. Encode it into a
10274 Thumb32 format load or store instruction. Reject forms that cannot
10275 be used with such instructions. If is_t is true, reject forms that
10276 cannot be used with a T instruction; if is_d is true, reject forms
10277 that cannot be used with a D instruction. If it is a store insn,
10278 reject PC in Rn. */
10279
10280 static void
10281 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10282 {
10283 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10284
10285 constraint (!inst.operands[i].isreg,
10286 _("Instruction does not support =N addresses"));
10287
10288 inst.instruction |= inst.operands[i].reg << 16;
10289 if (inst.operands[i].immisreg)
10290 {
10291 constraint (is_pc, BAD_PC_ADDRESSING);
10292 constraint (is_t || is_d, _("cannot use register index with this instruction"));
10293 constraint (inst.operands[i].negative,
10294 _("Thumb does not support negative register indexing"));
10295 constraint (inst.operands[i].postind,
10296 _("Thumb does not support register post-indexing"));
10297 constraint (inst.operands[i].writeback,
10298 _("Thumb does not support register indexing with writeback"));
10299 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10300 _("Thumb supports only LSL in shifted register indexing"));
10301
10302 inst.instruction |= inst.operands[i].imm;
10303 if (inst.operands[i].shifted)
10304 {
10305 constraint (inst.reloc.exp.X_op != O_constant,
10306 _("expression too complex"));
10307 constraint (inst.reloc.exp.X_add_number < 0
10308 || inst.reloc.exp.X_add_number > 3,
10309 _("shift out of range"));
10310 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10311 }
10312 inst.reloc.type = BFD_RELOC_UNUSED;
10313 }
10314 else if (inst.operands[i].preind)
10315 {
10316 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10317 constraint (is_t && inst.operands[i].writeback,
10318 _("cannot use writeback with this instruction"));
10319 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10320 BAD_PC_ADDRESSING);
10321
10322 if (is_d)
10323 {
10324 inst.instruction |= 0x01000000;
10325 if (inst.operands[i].writeback)
10326 inst.instruction |= 0x00200000;
10327 }
10328 else
10329 {
10330 inst.instruction |= 0x00000c00;
10331 if (inst.operands[i].writeback)
10332 inst.instruction |= 0x00000100;
10333 }
10334 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10335 }
10336 else if (inst.operands[i].postind)
10337 {
10338 gas_assert (inst.operands[i].writeback);
10339 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10340 constraint (is_t, _("cannot use post-indexing with this instruction"));
10341
10342 if (is_d)
10343 inst.instruction |= 0x00200000;
10344 else
10345 inst.instruction |= 0x00000900;
10346 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10347 }
10348 else /* unindexed - only for coprocessor */
10349 inst.error = _("instruction does not accept unindexed addressing");
10350 }
10351
10352 /* Table of Thumb instructions which exist in both 16- and 32-bit
10353 encodings (the latter only in post-V6T2 cores). The index is the
10354 value used in the insns table below. When there is more than one
10355 possible 16-bit encoding for the instruction, this table always
10356 holds variant (1).
10357 Also contains several pseudo-instructions used during relaxation. */
10358 #define T16_32_TAB \
10359 X(_adc, 4140, eb400000), \
10360 X(_adcs, 4140, eb500000), \
10361 X(_add, 1c00, eb000000), \
10362 X(_adds, 1c00, eb100000), \
10363 X(_addi, 0000, f1000000), \
10364 X(_addis, 0000, f1100000), \
10365 X(_add_pc,000f, f20f0000), \
10366 X(_add_sp,000d, f10d0000), \
10367 X(_adr, 000f, f20f0000), \
10368 X(_and, 4000, ea000000), \
10369 X(_ands, 4000, ea100000), \
10370 X(_asr, 1000, fa40f000), \
10371 X(_asrs, 1000, fa50f000), \
10372 X(_b, e000, f000b000), \
10373 X(_bcond, d000, f0008000), \
10374 X(_bic, 4380, ea200000), \
10375 X(_bics, 4380, ea300000), \
10376 X(_cmn, 42c0, eb100f00), \
10377 X(_cmp, 2800, ebb00f00), \
10378 X(_cpsie, b660, f3af8400), \
10379 X(_cpsid, b670, f3af8600), \
10380 X(_cpy, 4600, ea4f0000), \
10381 X(_dec_sp,80dd, f1ad0d00), \
10382 X(_eor, 4040, ea800000), \
10383 X(_eors, 4040, ea900000), \
10384 X(_inc_sp,00dd, f10d0d00), \
10385 X(_ldmia, c800, e8900000), \
10386 X(_ldr, 6800, f8500000), \
10387 X(_ldrb, 7800, f8100000), \
10388 X(_ldrh, 8800, f8300000), \
10389 X(_ldrsb, 5600, f9100000), \
10390 X(_ldrsh, 5e00, f9300000), \
10391 X(_ldr_pc,4800, f85f0000), \
10392 X(_ldr_pc2,4800, f85f0000), \
10393 X(_ldr_sp,9800, f85d0000), \
10394 X(_lsl, 0000, fa00f000), \
10395 X(_lsls, 0000, fa10f000), \
10396 X(_lsr, 0800, fa20f000), \
10397 X(_lsrs, 0800, fa30f000), \
10398 X(_mov, 2000, ea4f0000), \
10399 X(_movs, 2000, ea5f0000), \
10400 X(_mul, 4340, fb00f000), \
10401 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10402 X(_mvn, 43c0, ea6f0000), \
10403 X(_mvns, 43c0, ea7f0000), \
10404 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10405 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10406 X(_orr, 4300, ea400000), \
10407 X(_orrs, 4300, ea500000), \
10408 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10409 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10410 X(_rev, ba00, fa90f080), \
10411 X(_rev16, ba40, fa90f090), \
10412 X(_revsh, bac0, fa90f0b0), \
10413 X(_ror, 41c0, fa60f000), \
10414 X(_rors, 41c0, fa70f000), \
10415 X(_sbc, 4180, eb600000), \
10416 X(_sbcs, 4180, eb700000), \
10417 X(_stmia, c000, e8800000), \
10418 X(_str, 6000, f8400000), \
10419 X(_strb, 7000, f8000000), \
10420 X(_strh, 8000, f8200000), \
10421 X(_str_sp,9000, f84d0000), \
10422 X(_sub, 1e00, eba00000), \
10423 X(_subs, 1e00, ebb00000), \
10424 X(_subi, 8000, f1a00000), \
10425 X(_subis, 8000, f1b00000), \
10426 X(_sxtb, b240, fa4ff080), \
10427 X(_sxth, b200, fa0ff080), \
10428 X(_tst, 4200, ea100f00), \
10429 X(_uxtb, b2c0, fa5ff080), \
10430 X(_uxth, b280, fa1ff080), \
10431 X(_nop, bf00, f3af8000), \
10432 X(_yield, bf10, f3af8001), \
10433 X(_wfe, bf20, f3af8002), \
10434 X(_wfi, bf30, f3af8003), \
10435 X(_sev, bf40, f3af8004), \
10436 X(_sevl, bf50, f3af8005), \
10437 X(_udf, de00, f7f0a000)
10438
10439 /* To catch errors in encoding functions, the codes are all offset by
10440 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10441 as 16-bit instructions. */
10442 #define X(a,b,c) T_MNEM##a
10443 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10444 #undef X
10445
10446 #define X(a,b,c) 0x##b
10447 static const unsigned short thumb_op16[] = { T16_32_TAB };
10448 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10449 #undef X
10450
10451 #define X(a,b,c) 0x##c
10452 static const unsigned int thumb_op32[] = { T16_32_TAB };
10453 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10454 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10455 #undef X
10456 #undef T16_32_TAB
10457
10458 /* Thumb instruction encoders, in alphabetical order. */
10459
10460 /* ADDW or SUBW. */
10461
10462 static void
10463 do_t_add_sub_w (void)
10464 {
10465 int Rd, Rn;
10466
10467 Rd = inst.operands[0].reg;
10468 Rn = inst.operands[1].reg;
10469
10470 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10471 is the SP-{plus,minus}-immediate form of the instruction. */
10472 if (Rn == REG_SP)
10473 constraint (Rd == REG_PC, BAD_PC);
10474 else
10475 reject_bad_reg (Rd);
10476
10477 inst.instruction |= (Rn << 16) | (Rd << 8);
10478 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10479 }
10480
10481 /* Parse an add or subtract instruction. We get here with inst.instruction
10482 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
10483
10484 static void
10485 do_t_add_sub (void)
10486 {
10487 int Rd, Rs, Rn;
10488
10489 Rd = inst.operands[0].reg;
10490 Rs = (inst.operands[1].present
10491 ? inst.operands[1].reg /* Rd, Rs, foo */
10492 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10493
10494 if (Rd == REG_PC)
10495 set_it_insn_type_last ();
10496
10497 if (unified_syntax)
10498 {
10499 bfd_boolean flags;
10500 bfd_boolean narrow;
10501 int opcode;
10502
10503 flags = (inst.instruction == T_MNEM_adds
10504 || inst.instruction == T_MNEM_subs);
10505 if (flags)
10506 narrow = !in_it_block ();
10507 else
10508 narrow = in_it_block ();
10509 if (!inst.operands[2].isreg)
10510 {
10511 int add;
10512
10513 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10514
10515 add = (inst.instruction == T_MNEM_add
10516 || inst.instruction == T_MNEM_adds);
10517 opcode = 0;
10518 if (inst.size_req != 4)
10519 {
10520 /* Attempt to use a narrow opcode, with relaxation if
10521 appropriate. */
10522 if (Rd == REG_SP && Rs == REG_SP && !flags)
10523 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10524 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10525 opcode = T_MNEM_add_sp;
10526 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10527 opcode = T_MNEM_add_pc;
10528 else if (Rd <= 7 && Rs <= 7 && narrow)
10529 {
10530 if (flags)
10531 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10532 else
10533 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10534 }
10535 if (opcode)
10536 {
10537 inst.instruction = THUMB_OP16(opcode);
10538 inst.instruction |= (Rd << 4) | Rs;
10539 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10540 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
10541 {
10542 if (inst.size_req == 2)
10543 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10544 else
10545 inst.relax = opcode;
10546 }
10547 }
10548 else
10549 constraint (inst.size_req == 2, BAD_HIREG);
10550 }
10551 if (inst.size_req == 4
10552 || (inst.size_req != 2 && !opcode))
10553 {
10554 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10555 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
10556 THUMB1_RELOC_ONLY);
10557 if (Rd == REG_PC)
10558 {
10559 constraint (add, BAD_PC);
10560 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10561 _("only SUBS PC, LR, #const allowed"));
10562 constraint (inst.reloc.exp.X_op != O_constant,
10563 _("expression too complex"));
10564 constraint (inst.reloc.exp.X_add_number < 0
10565 || inst.reloc.exp.X_add_number > 0xff,
10566 _("immediate value out of range"));
10567 inst.instruction = T2_SUBS_PC_LR
10568 | inst.reloc.exp.X_add_number;
10569 inst.reloc.type = BFD_RELOC_UNUSED;
10570 return;
10571 }
10572 else if (Rs == REG_PC)
10573 {
10574 /* Always use addw/subw. */
10575 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10576 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10577 }
10578 else
10579 {
10580 inst.instruction = THUMB_OP32 (inst.instruction);
10581 inst.instruction = (inst.instruction & 0xe1ffffff)
10582 | 0x10000000;
10583 if (flags)
10584 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10585 else
10586 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
10587 }
10588 inst.instruction |= Rd << 8;
10589 inst.instruction |= Rs << 16;
10590 }
10591 }
10592 else
10593 {
10594 unsigned int value = inst.reloc.exp.X_add_number;
10595 unsigned int shift = inst.operands[2].shift_kind;
10596
10597 Rn = inst.operands[2].reg;
10598 /* See if we can do this with a 16-bit instruction. */
10599 if (!inst.operands[2].shifted && inst.size_req != 4)
10600 {
10601 if (Rd > 7 || Rs > 7 || Rn > 7)
10602 narrow = FALSE;
10603
10604 if (narrow)
10605 {
10606 inst.instruction = ((inst.instruction == T_MNEM_adds
10607 || inst.instruction == T_MNEM_add)
10608 ? T_OPCODE_ADD_R3
10609 : T_OPCODE_SUB_R3);
10610 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10611 return;
10612 }
10613
10614 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10615 {
10616 /* Thumb-1 cores (except v6-M) require at least one high
10617 register in a narrow non flag setting add. */
10618 if (Rd > 7 || Rn > 7
10619 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10620 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10621 {
10622 if (Rd == Rn)
10623 {
10624 Rn = Rs;
10625 Rs = Rd;
10626 }
10627 inst.instruction = T_OPCODE_ADD_HI;
10628 inst.instruction |= (Rd & 8) << 4;
10629 inst.instruction |= (Rd & 7);
10630 inst.instruction |= Rn << 3;
10631 return;
10632 }
10633 }
10634 }
10635
10636 constraint (Rd == REG_PC, BAD_PC);
10637 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10638 constraint (Rs == REG_PC, BAD_PC);
10639 reject_bad_reg (Rn);
10640
10641 /* If we get here, it can't be done in 16 bits. */
10642 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10643 _("shift must be constant"));
10644 inst.instruction = THUMB_OP32 (inst.instruction);
10645 inst.instruction |= Rd << 8;
10646 inst.instruction |= Rs << 16;
10647 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10648 _("shift value over 3 not allowed in thumb mode"));
10649 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10650 _("only LSL shift allowed in thumb mode"));
10651 encode_thumb32_shifted_operand (2);
10652 }
10653 }
10654 else
10655 {
10656 constraint (inst.instruction == T_MNEM_adds
10657 || inst.instruction == T_MNEM_subs,
10658 BAD_THUMB32);
10659
10660 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10661 {
10662 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10663 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10664 BAD_HIREG);
10665
10666 inst.instruction = (inst.instruction == T_MNEM_add
10667 ? 0x0000 : 0x8000);
10668 inst.instruction |= (Rd << 4) | Rs;
10669 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10670 return;
10671 }
10672
10673 Rn = inst.operands[2].reg;
10674 constraint (inst.operands[2].shifted, _("unshifted register required"));
10675
10676 /* We now have Rd, Rs, and Rn set to registers. */
10677 if (Rd > 7 || Rs > 7 || Rn > 7)
10678 {
10679 /* Can't do this for SUB. */
10680 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10681 inst.instruction = T_OPCODE_ADD_HI;
10682 inst.instruction |= (Rd & 8) << 4;
10683 inst.instruction |= (Rd & 7);
10684 if (Rs == Rd)
10685 inst.instruction |= Rn << 3;
10686 else if (Rn == Rd)
10687 inst.instruction |= Rs << 3;
10688 else
10689 constraint (1, _("dest must overlap one source register"));
10690 }
10691 else
10692 {
10693 inst.instruction = (inst.instruction == T_MNEM_add
10694 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10695 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10696 }
10697 }
10698 }
10699
10700 static void
10701 do_t_adr (void)
10702 {
10703 unsigned Rd;
10704
10705 Rd = inst.operands[0].reg;
10706 reject_bad_reg (Rd);
10707
10708 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10709 {
10710 /* Defer to section relaxation. */
10711 inst.relax = inst.instruction;
10712 inst.instruction = THUMB_OP16 (inst.instruction);
10713 inst.instruction |= Rd << 4;
10714 }
10715 else if (unified_syntax && inst.size_req != 2)
10716 {
10717 /* Generate a 32-bit opcode. */
10718 inst.instruction = THUMB_OP32 (inst.instruction);
10719 inst.instruction |= Rd << 8;
10720 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
10721 inst.reloc.pc_rel = 1;
10722 }
10723 else
10724 {
10725 /* Generate a 16-bit opcode. */
10726 inst.instruction = THUMB_OP16 (inst.instruction);
10727 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10728 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
10729 inst.reloc.pc_rel = 1;
10730
10731 inst.instruction |= Rd << 4;
10732 }
10733 }
10734
10735 /* Arithmetic instructions for which there is just one 16-bit
10736 instruction encoding, and it allows only two low registers.
10737 For maximal compatibility with ARM syntax, we allow three register
10738 operands even when Thumb-32 instructions are not available, as long
10739 as the first two are identical. For instance, both "sbc r0,r1" and
10740 "sbc r0,r0,r1" are allowed. */
10741 static void
10742 do_t_arit3 (void)
10743 {
10744 int Rd, Rs, Rn;
10745
10746 Rd = inst.operands[0].reg;
10747 Rs = (inst.operands[1].present
10748 ? inst.operands[1].reg /* Rd, Rs, foo */
10749 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10750 Rn = inst.operands[2].reg;
10751
10752 reject_bad_reg (Rd);
10753 reject_bad_reg (Rs);
10754 if (inst.operands[2].isreg)
10755 reject_bad_reg (Rn);
10756
10757 if (unified_syntax)
10758 {
10759 if (!inst.operands[2].isreg)
10760 {
10761 /* For an immediate, we always generate a 32-bit opcode;
10762 section relaxation will shrink it later if possible. */
10763 inst.instruction = THUMB_OP32 (inst.instruction);
10764 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10765 inst.instruction |= Rd << 8;
10766 inst.instruction |= Rs << 16;
10767 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10768 }
10769 else
10770 {
10771 bfd_boolean narrow;
10772
10773 /* See if we can do this with a 16-bit instruction. */
10774 if (THUMB_SETS_FLAGS (inst.instruction))
10775 narrow = !in_it_block ();
10776 else
10777 narrow = in_it_block ();
10778
10779 if (Rd > 7 || Rn > 7 || Rs > 7)
10780 narrow = FALSE;
10781 if (inst.operands[2].shifted)
10782 narrow = FALSE;
10783 if (inst.size_req == 4)
10784 narrow = FALSE;
10785
10786 if (narrow
10787 && Rd == Rs)
10788 {
10789 inst.instruction = THUMB_OP16 (inst.instruction);
10790 inst.instruction |= Rd;
10791 inst.instruction |= Rn << 3;
10792 return;
10793 }
10794
10795 /* If we get here, it can't be done in 16 bits. */
10796 constraint (inst.operands[2].shifted
10797 && inst.operands[2].immisreg,
10798 _("shift must be constant"));
10799 inst.instruction = THUMB_OP32 (inst.instruction);
10800 inst.instruction |= Rd << 8;
10801 inst.instruction |= Rs << 16;
10802 encode_thumb32_shifted_operand (2);
10803 }
10804 }
10805 else
10806 {
10807 /* On its face this is a lie - the instruction does set the
10808 flags. However, the only supported mnemonic in this mode
10809 says it doesn't. */
10810 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10811
10812 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10813 _("unshifted register required"));
10814 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10815 constraint (Rd != Rs,
10816 _("dest and source1 must be the same register"));
10817
10818 inst.instruction = THUMB_OP16 (inst.instruction);
10819 inst.instruction |= Rd;
10820 inst.instruction |= Rn << 3;
10821 }
10822 }
10823
10824 /* Similarly, but for instructions where the arithmetic operation is
10825 commutative, so we can allow either of them to be different from
10826 the destination operand in a 16-bit instruction. For instance, all
10827 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10828 accepted. */
10829 static void
10830 do_t_arit3c (void)
10831 {
10832 int Rd, Rs, Rn;
10833
10834 Rd = inst.operands[0].reg;
10835 Rs = (inst.operands[1].present
10836 ? inst.operands[1].reg /* Rd, Rs, foo */
10837 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10838 Rn = inst.operands[2].reg;
10839
10840 reject_bad_reg (Rd);
10841 reject_bad_reg (Rs);
10842 if (inst.operands[2].isreg)
10843 reject_bad_reg (Rn);
10844
10845 if (unified_syntax)
10846 {
10847 if (!inst.operands[2].isreg)
10848 {
10849 /* For an immediate, we always generate a 32-bit opcode;
10850 section relaxation will shrink it later if possible. */
10851 inst.instruction = THUMB_OP32 (inst.instruction);
10852 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10853 inst.instruction |= Rd << 8;
10854 inst.instruction |= Rs << 16;
10855 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10856 }
10857 else
10858 {
10859 bfd_boolean narrow;
10860
10861 /* See if we can do this with a 16-bit instruction. */
10862 if (THUMB_SETS_FLAGS (inst.instruction))
10863 narrow = !in_it_block ();
10864 else
10865 narrow = in_it_block ();
10866
10867 if (Rd > 7 || Rn > 7 || Rs > 7)
10868 narrow = FALSE;
10869 if (inst.operands[2].shifted)
10870 narrow = FALSE;
10871 if (inst.size_req == 4)
10872 narrow = FALSE;
10873
10874 if (narrow)
10875 {
10876 if (Rd == Rs)
10877 {
10878 inst.instruction = THUMB_OP16 (inst.instruction);
10879 inst.instruction |= Rd;
10880 inst.instruction |= Rn << 3;
10881 return;
10882 }
10883 if (Rd == Rn)
10884 {
10885 inst.instruction = THUMB_OP16 (inst.instruction);
10886 inst.instruction |= Rd;
10887 inst.instruction |= Rs << 3;
10888 return;
10889 }
10890 }
10891
10892 /* If we get here, it can't be done in 16 bits. */
10893 constraint (inst.operands[2].shifted
10894 && inst.operands[2].immisreg,
10895 _("shift must be constant"));
10896 inst.instruction = THUMB_OP32 (inst.instruction);
10897 inst.instruction |= Rd << 8;
10898 inst.instruction |= Rs << 16;
10899 encode_thumb32_shifted_operand (2);
10900 }
10901 }
10902 else
10903 {
10904 /* On its face this is a lie - the instruction does set the
10905 flags. However, the only supported mnemonic in this mode
10906 says it doesn't. */
10907 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10908
10909 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10910 _("unshifted register required"));
10911 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10912
10913 inst.instruction = THUMB_OP16 (inst.instruction);
10914 inst.instruction |= Rd;
10915
10916 if (Rd == Rs)
10917 inst.instruction |= Rn << 3;
10918 else if (Rd == Rn)
10919 inst.instruction |= Rs << 3;
10920 else
10921 constraint (1, _("dest must overlap one source register"));
10922 }
10923 }
10924
10925 static void
10926 do_t_bfc (void)
10927 {
10928 unsigned Rd;
10929 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
10930 constraint (msb > 32, _("bit-field extends past end of register"));
10931 /* The instruction encoding stores the LSB and MSB,
10932 not the LSB and width. */
10933 Rd = inst.operands[0].reg;
10934 reject_bad_reg (Rd);
10935 inst.instruction |= Rd << 8;
10936 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
10937 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
10938 inst.instruction |= msb - 1;
10939 }
10940
10941 static void
10942 do_t_bfi (void)
10943 {
10944 int Rd, Rn;
10945 unsigned int msb;
10946
10947 Rd = inst.operands[0].reg;
10948 reject_bad_reg (Rd);
10949
10950 /* #0 in second position is alternative syntax for bfc, which is
10951 the same instruction but with REG_PC in the Rm field. */
10952 if (!inst.operands[1].isreg)
10953 Rn = REG_PC;
10954 else
10955 {
10956 Rn = inst.operands[1].reg;
10957 reject_bad_reg (Rn);
10958 }
10959
10960 msb = inst.operands[2].imm + inst.operands[3].imm;
10961 constraint (msb > 32, _("bit-field extends past end of register"));
10962 /* The instruction encoding stores the LSB and MSB,
10963 not the LSB and width. */
10964 inst.instruction |= Rd << 8;
10965 inst.instruction |= Rn << 16;
10966 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10967 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10968 inst.instruction |= msb - 1;
10969 }
10970
10971 static void
10972 do_t_bfx (void)
10973 {
10974 unsigned Rd, Rn;
10975
10976 Rd = inst.operands[0].reg;
10977 Rn = inst.operands[1].reg;
10978
10979 reject_bad_reg (Rd);
10980 reject_bad_reg (Rn);
10981
10982 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
10983 _("bit-field extends past end of register"));
10984 inst.instruction |= Rd << 8;
10985 inst.instruction |= Rn << 16;
10986 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10987 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10988 inst.instruction |= inst.operands[3].imm - 1;
10989 }
10990
10991 /* ARM V5 Thumb BLX (argument parse)
10992 BLX <target_addr> which is BLX(1)
10993 BLX <Rm> which is BLX(2)
10994 Unfortunately, there are two different opcodes for this mnemonic.
10995 So, the insns[].value is not used, and the code here zaps values
10996 into inst.instruction.
10997
10998 ??? How to take advantage of the additional two bits of displacement
10999 available in Thumb32 mode? Need new relocation? */
11000
11001 static void
11002 do_t_blx (void)
11003 {
11004 set_it_insn_type_last ();
11005
11006 if (inst.operands[0].isreg)
11007 {
11008 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
11009 /* We have a register, so this is BLX(2). */
11010 inst.instruction |= inst.operands[0].reg << 3;
11011 }
11012 else
11013 {
11014 /* No register. This must be BLX(1). */
11015 inst.instruction = 0xf000e800;
11016 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
11017 }
11018 }
11019
11020 static void
11021 do_t_branch (void)
11022 {
11023 int opcode;
11024 int cond;
11025 bfd_reloc_code_real_type reloc;
11026
11027 cond = inst.cond;
11028 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
11029
11030 if (in_it_block ())
11031 {
11032 /* Conditional branches inside IT blocks are encoded as unconditional
11033 branches. */
11034 cond = COND_ALWAYS;
11035 }
11036 else
11037 cond = inst.cond;
11038
11039 if (cond != COND_ALWAYS)
11040 opcode = T_MNEM_bcond;
11041 else
11042 opcode = inst.instruction;
11043
11044 if (unified_syntax
11045 && (inst.size_req == 4
11046 || (inst.size_req != 2
11047 && (inst.operands[0].hasreloc
11048 || inst.reloc.exp.X_op == O_constant))))
11049 {
11050 inst.instruction = THUMB_OP32(opcode);
11051 if (cond == COND_ALWAYS)
11052 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
11053 else
11054 {
11055 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
11056 _("selected architecture does not support "
11057 "wide conditional branch instruction"));
11058
11059 gas_assert (cond != 0xF);
11060 inst.instruction |= cond << 22;
11061 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
11062 }
11063 }
11064 else
11065 {
11066 inst.instruction = THUMB_OP16(opcode);
11067 if (cond == COND_ALWAYS)
11068 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
11069 else
11070 {
11071 inst.instruction |= cond << 8;
11072 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
11073 }
11074 /* Allow section relaxation. */
11075 if (unified_syntax && inst.size_req != 2)
11076 inst.relax = opcode;
11077 }
11078 inst.reloc.type = reloc;
11079 inst.reloc.pc_rel = 1;
11080 }
11081
11082 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11083 between the two is the maximum immediate allowed - which is passed in
11084 RANGE. */
11085 static void
11086 do_t_bkpt_hlt1 (int range)
11087 {
11088 constraint (inst.cond != COND_ALWAYS,
11089 _("instruction is always unconditional"));
11090 if (inst.operands[0].present)
11091 {
11092 constraint (inst.operands[0].imm > range,
11093 _("immediate value out of range"));
11094 inst.instruction |= inst.operands[0].imm;
11095 }
11096
11097 set_it_insn_type (NEUTRAL_IT_INSN);
11098 }
11099
11100 static void
11101 do_t_hlt (void)
11102 {
11103 do_t_bkpt_hlt1 (63);
11104 }
11105
11106 static void
11107 do_t_bkpt (void)
11108 {
11109 do_t_bkpt_hlt1 (255);
11110 }
11111
11112 static void
11113 do_t_branch23 (void)
11114 {
11115 set_it_insn_type_last ();
11116 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
11117
11118 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11119 this file. We used to simply ignore the PLT reloc type here --
11120 the branch encoding is now needed to deal with TLSCALL relocs.
11121 So if we see a PLT reloc now, put it back to how it used to be to
11122 keep the preexisting behaviour. */
11123 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
11124 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
11125
11126 #if defined(OBJ_COFF)
11127 /* If the destination of the branch is a defined symbol which does not have
11128 the THUMB_FUNC attribute, then we must be calling a function which has
11129 the (interfacearm) attribute. We look for the Thumb entry point to that
11130 function and change the branch to refer to that function instead. */
11131 if ( inst.reloc.exp.X_op == O_symbol
11132 && inst.reloc.exp.X_add_symbol != NULL
11133 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
11134 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
11135 inst.reloc.exp.X_add_symbol =
11136 find_real_start (inst.reloc.exp.X_add_symbol);
11137 #endif
11138 }
11139
11140 static void
11141 do_t_bx (void)
11142 {
11143 set_it_insn_type_last ();
11144 inst.instruction |= inst.operands[0].reg << 3;
11145 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11146 should cause the alignment to be checked once it is known. This is
11147 because BX PC only works if the instruction is word aligned. */
11148 }
11149
11150 static void
11151 do_t_bxj (void)
11152 {
11153 int Rm;
11154
11155 set_it_insn_type_last ();
11156 Rm = inst.operands[0].reg;
11157 reject_bad_reg (Rm);
11158 inst.instruction |= Rm << 16;
11159 }
11160
11161 static void
11162 do_t_clz (void)
11163 {
11164 unsigned Rd;
11165 unsigned Rm;
11166
11167 Rd = inst.operands[0].reg;
11168 Rm = inst.operands[1].reg;
11169
11170 reject_bad_reg (Rd);
11171 reject_bad_reg (Rm);
11172
11173 inst.instruction |= Rd << 8;
11174 inst.instruction |= Rm << 16;
11175 inst.instruction |= Rm;
11176 }
11177
11178 static void
11179 do_t_cps (void)
11180 {
11181 set_it_insn_type (OUTSIDE_IT_INSN);
11182 inst.instruction |= inst.operands[0].imm;
11183 }
11184
11185 static void
11186 do_t_cpsi (void)
11187 {
11188 set_it_insn_type (OUTSIDE_IT_INSN);
11189 if (unified_syntax
11190 && (inst.operands[1].present || inst.size_req == 4)
11191 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11192 {
11193 unsigned int imod = (inst.instruction & 0x0030) >> 4;
11194 inst.instruction = 0xf3af8000;
11195 inst.instruction |= imod << 9;
11196 inst.instruction |= inst.operands[0].imm << 5;
11197 if (inst.operands[1].present)
11198 inst.instruction |= 0x100 | inst.operands[1].imm;
11199 }
11200 else
11201 {
11202 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11203 && (inst.operands[0].imm & 4),
11204 _("selected processor does not support 'A' form "
11205 "of this instruction"));
11206 constraint (inst.operands[1].present || inst.size_req == 4,
11207 _("Thumb does not support the 2-argument "
11208 "form of this instruction"));
11209 inst.instruction |= inst.operands[0].imm;
11210 }
11211 }
11212
11213 /* THUMB CPY instruction (argument parse). */
11214
11215 static void
11216 do_t_cpy (void)
11217 {
11218 if (inst.size_req == 4)
11219 {
11220 inst.instruction = THUMB_OP32 (T_MNEM_mov);
11221 inst.instruction |= inst.operands[0].reg << 8;
11222 inst.instruction |= inst.operands[1].reg;
11223 }
11224 else
11225 {
11226 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11227 inst.instruction |= (inst.operands[0].reg & 0x7);
11228 inst.instruction |= inst.operands[1].reg << 3;
11229 }
11230 }
11231
11232 static void
11233 do_t_cbz (void)
11234 {
11235 set_it_insn_type (OUTSIDE_IT_INSN);
11236 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11237 inst.instruction |= inst.operands[0].reg;
11238 inst.reloc.pc_rel = 1;
11239 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11240 }
11241
11242 static void
11243 do_t_dbg (void)
11244 {
11245 inst.instruction |= inst.operands[0].imm;
11246 }
11247
11248 static void
11249 do_t_div (void)
11250 {
11251 unsigned Rd, Rn, Rm;
11252
11253 Rd = inst.operands[0].reg;
11254 Rn = (inst.operands[1].present
11255 ? inst.operands[1].reg : Rd);
11256 Rm = inst.operands[2].reg;
11257
11258 reject_bad_reg (Rd);
11259 reject_bad_reg (Rn);
11260 reject_bad_reg (Rm);
11261
11262 inst.instruction |= Rd << 8;
11263 inst.instruction |= Rn << 16;
11264 inst.instruction |= Rm;
11265 }
11266
11267 static void
11268 do_t_hint (void)
11269 {
11270 if (unified_syntax && inst.size_req == 4)
11271 inst.instruction = THUMB_OP32 (inst.instruction);
11272 else
11273 inst.instruction = THUMB_OP16 (inst.instruction);
11274 }
11275
11276 static void
11277 do_t_it (void)
11278 {
11279 unsigned int cond = inst.operands[0].imm;
11280
11281 set_it_insn_type (IT_INSN);
11282 now_it.mask = (inst.instruction & 0xf) | 0x10;
11283 now_it.cc = cond;
11284 now_it.warn_deprecated = FALSE;
11285
11286 /* If the condition is a negative condition, invert the mask. */
11287 if ((cond & 0x1) == 0x0)
11288 {
11289 unsigned int mask = inst.instruction & 0x000f;
11290
11291 if ((mask & 0x7) == 0)
11292 {
11293 /* No conversion needed. */
11294 now_it.block_length = 1;
11295 }
11296 else if ((mask & 0x3) == 0)
11297 {
11298 mask ^= 0x8;
11299 now_it.block_length = 2;
11300 }
11301 else if ((mask & 0x1) == 0)
11302 {
11303 mask ^= 0xC;
11304 now_it.block_length = 3;
11305 }
11306 else
11307 {
11308 mask ^= 0xE;
11309 now_it.block_length = 4;
11310 }
11311
11312 inst.instruction &= 0xfff0;
11313 inst.instruction |= mask;
11314 }
11315
11316 inst.instruction |= cond << 4;
11317 }
11318
11319 /* Helper function used for both push/pop and ldm/stm. */
11320 static void
11321 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
11322 {
11323 bfd_boolean load;
11324
11325 load = (inst.instruction & (1 << 20)) != 0;
11326
11327 if (mask & (1 << 13))
11328 inst.error = _("SP not allowed in register list");
11329
11330 if ((mask & (1 << base)) != 0
11331 && writeback)
11332 inst.error = _("having the base register in the register list when "
11333 "using write back is UNPREDICTABLE");
11334
11335 if (load)
11336 {
11337 if (mask & (1 << 15))
11338 {
11339 if (mask & (1 << 14))
11340 inst.error = _("LR and PC should not both be in register list");
11341 else
11342 set_it_insn_type_last ();
11343 }
11344 }
11345 else
11346 {
11347 if (mask & (1 << 15))
11348 inst.error = _("PC not allowed in register list");
11349 }
11350
11351 if ((mask & (mask - 1)) == 0)
11352 {
11353 /* Single register transfers implemented as str/ldr. */
11354 if (writeback)
11355 {
11356 if (inst.instruction & (1 << 23))
11357 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11358 else
11359 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11360 }
11361 else
11362 {
11363 if (inst.instruction & (1 << 23))
11364 inst.instruction = 0x00800000; /* ia -> [base] */
11365 else
11366 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11367 }
11368
11369 inst.instruction |= 0xf8400000;
11370 if (load)
11371 inst.instruction |= 0x00100000;
11372
11373 mask = ffs (mask) - 1;
11374 mask <<= 12;
11375 }
11376 else if (writeback)
11377 inst.instruction |= WRITE_BACK;
11378
11379 inst.instruction |= mask;
11380 inst.instruction |= base << 16;
11381 }
11382
11383 static void
11384 do_t_ldmstm (void)
11385 {
11386 /* This really doesn't seem worth it. */
11387 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11388 _("expression too complex"));
11389 constraint (inst.operands[1].writeback,
11390 _("Thumb load/store multiple does not support {reglist}^"));
11391
11392 if (unified_syntax)
11393 {
11394 bfd_boolean narrow;
11395 unsigned mask;
11396
11397 narrow = FALSE;
11398 /* See if we can use a 16-bit instruction. */
11399 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11400 && inst.size_req != 4
11401 && !(inst.operands[1].imm & ~0xff))
11402 {
11403 mask = 1 << inst.operands[0].reg;
11404
11405 if (inst.operands[0].reg <= 7)
11406 {
11407 if (inst.instruction == T_MNEM_stmia
11408 ? inst.operands[0].writeback
11409 : (inst.operands[0].writeback
11410 == !(inst.operands[1].imm & mask)))
11411 {
11412 if (inst.instruction == T_MNEM_stmia
11413 && (inst.operands[1].imm & mask)
11414 && (inst.operands[1].imm & (mask - 1)))
11415 as_warn (_("value stored for r%d is UNKNOWN"),
11416 inst.operands[0].reg);
11417
11418 inst.instruction = THUMB_OP16 (inst.instruction);
11419 inst.instruction |= inst.operands[0].reg << 8;
11420 inst.instruction |= inst.operands[1].imm;
11421 narrow = TRUE;
11422 }
11423 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11424 {
11425 /* This means 1 register in reg list one of 3 situations:
11426 1. Instruction is stmia, but without writeback.
11427 2. lmdia without writeback, but with Rn not in
11428 reglist.
11429 3. ldmia with writeback, but with Rn in reglist.
11430 Case 3 is UNPREDICTABLE behaviour, so we handle
11431 case 1 and 2 which can be converted into a 16-bit
11432 str or ldr. The SP cases are handled below. */
11433 unsigned long opcode;
11434 /* First, record an error for Case 3. */
11435 if (inst.operands[1].imm & mask
11436 && inst.operands[0].writeback)
11437 inst.error =
11438 _("having the base register in the register list when "
11439 "using write back is UNPREDICTABLE");
11440
11441 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11442 : T_MNEM_ldr);
11443 inst.instruction = THUMB_OP16 (opcode);
11444 inst.instruction |= inst.operands[0].reg << 3;
11445 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11446 narrow = TRUE;
11447 }
11448 }
11449 else if (inst.operands[0] .reg == REG_SP)
11450 {
11451 if (inst.operands[0].writeback)
11452 {
11453 inst.instruction =
11454 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11455 ? T_MNEM_push : T_MNEM_pop);
11456 inst.instruction |= inst.operands[1].imm;
11457 narrow = TRUE;
11458 }
11459 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11460 {
11461 inst.instruction =
11462 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11463 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11464 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11465 narrow = TRUE;
11466 }
11467 }
11468 }
11469
11470 if (!narrow)
11471 {
11472 if (inst.instruction < 0xffff)
11473 inst.instruction = THUMB_OP32 (inst.instruction);
11474
11475 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
11476 inst.operands[0].writeback);
11477 }
11478 }
11479 else
11480 {
11481 constraint (inst.operands[0].reg > 7
11482 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11483 constraint (inst.instruction != T_MNEM_ldmia
11484 && inst.instruction != T_MNEM_stmia,
11485 _("Thumb-2 instruction only valid in unified syntax"));
11486 if (inst.instruction == T_MNEM_stmia)
11487 {
11488 if (!inst.operands[0].writeback)
11489 as_warn (_("this instruction will write back the base register"));
11490 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11491 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11492 as_warn (_("value stored for r%d is UNKNOWN"),
11493 inst.operands[0].reg);
11494 }
11495 else
11496 {
11497 if (!inst.operands[0].writeback
11498 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11499 as_warn (_("this instruction will write back the base register"));
11500 else if (inst.operands[0].writeback
11501 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11502 as_warn (_("this instruction will not write back the base register"));
11503 }
11504
11505 inst.instruction = THUMB_OP16 (inst.instruction);
11506 inst.instruction |= inst.operands[0].reg << 8;
11507 inst.instruction |= inst.operands[1].imm;
11508 }
11509 }
11510
11511 static void
11512 do_t_ldrex (void)
11513 {
11514 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11515 || inst.operands[1].postind || inst.operands[1].writeback
11516 || inst.operands[1].immisreg || inst.operands[1].shifted
11517 || inst.operands[1].negative,
11518 BAD_ADDR_MODE);
11519
11520 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11521
11522 inst.instruction |= inst.operands[0].reg << 12;
11523 inst.instruction |= inst.operands[1].reg << 16;
11524 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11525 }
11526
11527 static void
11528 do_t_ldrexd (void)
11529 {
11530 if (!inst.operands[1].present)
11531 {
11532 constraint (inst.operands[0].reg == REG_LR,
11533 _("r14 not allowed as first register "
11534 "when second register is omitted"));
11535 inst.operands[1].reg = inst.operands[0].reg + 1;
11536 }
11537 constraint (inst.operands[0].reg == inst.operands[1].reg,
11538 BAD_OVERLAP);
11539
11540 inst.instruction |= inst.operands[0].reg << 12;
11541 inst.instruction |= inst.operands[1].reg << 8;
11542 inst.instruction |= inst.operands[2].reg << 16;
11543 }
11544
11545 static void
11546 do_t_ldst (void)
11547 {
11548 unsigned long opcode;
11549 int Rn;
11550
11551 if (inst.operands[0].isreg
11552 && !inst.operands[0].preind
11553 && inst.operands[0].reg == REG_PC)
11554 set_it_insn_type_last ();
11555
11556 opcode = inst.instruction;
11557 if (unified_syntax)
11558 {
11559 if (!inst.operands[1].isreg)
11560 {
11561 if (opcode <= 0xffff)
11562 inst.instruction = THUMB_OP32 (opcode);
11563 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11564 return;
11565 }
11566 if (inst.operands[1].isreg
11567 && !inst.operands[1].writeback
11568 && !inst.operands[1].shifted && !inst.operands[1].postind
11569 && !inst.operands[1].negative && inst.operands[0].reg <= 7
11570 && opcode <= 0xffff
11571 && inst.size_req != 4)
11572 {
11573 /* Insn may have a 16-bit form. */
11574 Rn = inst.operands[1].reg;
11575 if (inst.operands[1].immisreg)
11576 {
11577 inst.instruction = THUMB_OP16 (opcode);
11578 /* [Rn, Rik] */
11579 if (Rn <= 7 && inst.operands[1].imm <= 7)
11580 goto op16;
11581 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11582 reject_bad_reg (inst.operands[1].imm);
11583 }
11584 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11585 && opcode != T_MNEM_ldrsb)
11586 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11587 || (Rn == REG_SP && opcode == T_MNEM_str))
11588 {
11589 /* [Rn, #const] */
11590 if (Rn > 7)
11591 {
11592 if (Rn == REG_PC)
11593 {
11594 if (inst.reloc.pc_rel)
11595 opcode = T_MNEM_ldr_pc2;
11596 else
11597 opcode = T_MNEM_ldr_pc;
11598 }
11599 else
11600 {
11601 if (opcode == T_MNEM_ldr)
11602 opcode = T_MNEM_ldr_sp;
11603 else
11604 opcode = T_MNEM_str_sp;
11605 }
11606 inst.instruction = inst.operands[0].reg << 8;
11607 }
11608 else
11609 {
11610 inst.instruction = inst.operands[0].reg;
11611 inst.instruction |= inst.operands[1].reg << 3;
11612 }
11613 inst.instruction |= THUMB_OP16 (opcode);
11614 if (inst.size_req == 2)
11615 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11616 else
11617 inst.relax = opcode;
11618 return;
11619 }
11620 }
11621 /* Definitely a 32-bit variant. */
11622
11623 /* Warning for Erratum 752419. */
11624 if (opcode == T_MNEM_ldr
11625 && inst.operands[0].reg == REG_SP
11626 && inst.operands[1].writeback == 1
11627 && !inst.operands[1].immisreg)
11628 {
11629 if (no_cpu_selected ()
11630 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11631 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11632 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11633 as_warn (_("This instruction may be unpredictable "
11634 "if executed on M-profile cores "
11635 "with interrupts enabled."));
11636 }
11637
11638 /* Do some validations regarding addressing modes. */
11639 if (inst.operands[1].immisreg)
11640 reject_bad_reg (inst.operands[1].imm);
11641
11642 constraint (inst.operands[1].writeback == 1
11643 && inst.operands[0].reg == inst.operands[1].reg,
11644 BAD_OVERLAP);
11645
11646 inst.instruction = THUMB_OP32 (opcode);
11647 inst.instruction |= inst.operands[0].reg << 12;
11648 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11649 check_ldr_r15_aligned ();
11650 return;
11651 }
11652
11653 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11654
11655 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11656 {
11657 /* Only [Rn,Rm] is acceptable. */
11658 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11659 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11660 || inst.operands[1].postind || inst.operands[1].shifted
11661 || inst.operands[1].negative,
11662 _("Thumb does not support this addressing mode"));
11663 inst.instruction = THUMB_OP16 (inst.instruction);
11664 goto op16;
11665 }
11666
11667 inst.instruction = THUMB_OP16 (inst.instruction);
11668 if (!inst.operands[1].isreg)
11669 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11670 return;
11671
11672 constraint (!inst.operands[1].preind
11673 || inst.operands[1].shifted
11674 || inst.operands[1].writeback,
11675 _("Thumb does not support this addressing mode"));
11676 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11677 {
11678 constraint (inst.instruction & 0x0600,
11679 _("byte or halfword not valid for base register"));
11680 constraint (inst.operands[1].reg == REG_PC
11681 && !(inst.instruction & THUMB_LOAD_BIT),
11682 _("r15 based store not allowed"));
11683 constraint (inst.operands[1].immisreg,
11684 _("invalid base register for register offset"));
11685
11686 if (inst.operands[1].reg == REG_PC)
11687 inst.instruction = T_OPCODE_LDR_PC;
11688 else if (inst.instruction & THUMB_LOAD_BIT)
11689 inst.instruction = T_OPCODE_LDR_SP;
11690 else
11691 inst.instruction = T_OPCODE_STR_SP;
11692
11693 inst.instruction |= inst.operands[0].reg << 8;
11694 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11695 return;
11696 }
11697
11698 constraint (inst.operands[1].reg > 7, BAD_HIREG);
11699 if (!inst.operands[1].immisreg)
11700 {
11701 /* Immediate offset. */
11702 inst.instruction |= inst.operands[0].reg;
11703 inst.instruction |= inst.operands[1].reg << 3;
11704 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11705 return;
11706 }
11707
11708 /* Register offset. */
11709 constraint (inst.operands[1].imm > 7, BAD_HIREG);
11710 constraint (inst.operands[1].negative,
11711 _("Thumb does not support this addressing mode"));
11712
11713 op16:
11714 switch (inst.instruction)
11715 {
11716 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11717 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11718 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11719 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11720 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11721 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11722 case 0x5600 /* ldrsb */:
11723 case 0x5e00 /* ldrsh */: break;
11724 default: abort ();
11725 }
11726
11727 inst.instruction |= inst.operands[0].reg;
11728 inst.instruction |= inst.operands[1].reg << 3;
11729 inst.instruction |= inst.operands[1].imm << 6;
11730 }
11731
11732 static void
11733 do_t_ldstd (void)
11734 {
11735 if (!inst.operands[1].present)
11736 {
11737 inst.operands[1].reg = inst.operands[0].reg + 1;
11738 constraint (inst.operands[0].reg == REG_LR,
11739 _("r14 not allowed here"));
11740 constraint (inst.operands[0].reg == REG_R12,
11741 _("r12 not allowed here"));
11742 }
11743
11744 if (inst.operands[2].writeback
11745 && (inst.operands[0].reg == inst.operands[2].reg
11746 || inst.operands[1].reg == inst.operands[2].reg))
11747 as_warn (_("base register written back, and overlaps "
11748 "one of transfer registers"));
11749
11750 inst.instruction |= inst.operands[0].reg << 12;
11751 inst.instruction |= inst.operands[1].reg << 8;
11752 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11753 }
11754
11755 static void
11756 do_t_ldstt (void)
11757 {
11758 inst.instruction |= inst.operands[0].reg << 12;
11759 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11760 }
11761
11762 static void
11763 do_t_mla (void)
11764 {
11765 unsigned Rd, Rn, Rm, Ra;
11766
11767 Rd = inst.operands[0].reg;
11768 Rn = inst.operands[1].reg;
11769 Rm = inst.operands[2].reg;
11770 Ra = inst.operands[3].reg;
11771
11772 reject_bad_reg (Rd);
11773 reject_bad_reg (Rn);
11774 reject_bad_reg (Rm);
11775 reject_bad_reg (Ra);
11776
11777 inst.instruction |= Rd << 8;
11778 inst.instruction |= Rn << 16;
11779 inst.instruction |= Rm;
11780 inst.instruction |= Ra << 12;
11781 }
11782
11783 static void
11784 do_t_mlal (void)
11785 {
11786 unsigned RdLo, RdHi, Rn, Rm;
11787
11788 RdLo = inst.operands[0].reg;
11789 RdHi = inst.operands[1].reg;
11790 Rn = inst.operands[2].reg;
11791 Rm = inst.operands[3].reg;
11792
11793 reject_bad_reg (RdLo);
11794 reject_bad_reg (RdHi);
11795 reject_bad_reg (Rn);
11796 reject_bad_reg (Rm);
11797
11798 inst.instruction |= RdLo << 12;
11799 inst.instruction |= RdHi << 8;
11800 inst.instruction |= Rn << 16;
11801 inst.instruction |= Rm;
11802 }
11803
11804 static void
11805 do_t_mov_cmp (void)
11806 {
11807 unsigned Rn, Rm;
11808
11809 Rn = inst.operands[0].reg;
11810 Rm = inst.operands[1].reg;
11811
11812 if (Rn == REG_PC)
11813 set_it_insn_type_last ();
11814
11815 if (unified_syntax)
11816 {
11817 int r0off = (inst.instruction == T_MNEM_mov
11818 || inst.instruction == T_MNEM_movs) ? 8 : 16;
11819 unsigned long opcode;
11820 bfd_boolean narrow;
11821 bfd_boolean low_regs;
11822
11823 low_regs = (Rn <= 7 && Rm <= 7);
11824 opcode = inst.instruction;
11825 if (in_it_block ())
11826 narrow = opcode != T_MNEM_movs;
11827 else
11828 narrow = opcode != T_MNEM_movs || low_regs;
11829 if (inst.size_req == 4
11830 || inst.operands[1].shifted)
11831 narrow = FALSE;
11832
11833 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11834 if (opcode == T_MNEM_movs && inst.operands[1].isreg
11835 && !inst.operands[1].shifted
11836 && Rn == REG_PC
11837 && Rm == REG_LR)
11838 {
11839 inst.instruction = T2_SUBS_PC_LR;
11840 return;
11841 }
11842
11843 if (opcode == T_MNEM_cmp)
11844 {
11845 constraint (Rn == REG_PC, BAD_PC);
11846 if (narrow)
11847 {
11848 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11849 but valid. */
11850 warn_deprecated_sp (Rm);
11851 /* R15 was documented as a valid choice for Rm in ARMv6,
11852 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11853 tools reject R15, so we do too. */
11854 constraint (Rm == REG_PC, BAD_PC);
11855 }
11856 else
11857 reject_bad_reg (Rm);
11858 }
11859 else if (opcode == T_MNEM_mov
11860 || opcode == T_MNEM_movs)
11861 {
11862 if (inst.operands[1].isreg)
11863 {
11864 if (opcode == T_MNEM_movs)
11865 {
11866 reject_bad_reg (Rn);
11867 reject_bad_reg (Rm);
11868 }
11869 else if (narrow)
11870 {
11871 /* This is mov.n. */
11872 if ((Rn == REG_SP || Rn == REG_PC)
11873 && (Rm == REG_SP || Rm == REG_PC))
11874 {
11875 as_tsktsk (_("Use of r%u as a source register is "
11876 "deprecated when r%u is the destination "
11877 "register."), Rm, Rn);
11878 }
11879 }
11880 else
11881 {
11882 /* This is mov.w. */
11883 constraint (Rn == REG_PC, BAD_PC);
11884 constraint (Rm == REG_PC, BAD_PC);
11885 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
11886 }
11887 }
11888 else
11889 reject_bad_reg (Rn);
11890 }
11891
11892 if (!inst.operands[1].isreg)
11893 {
11894 /* Immediate operand. */
11895 if (!in_it_block () && opcode == T_MNEM_mov)
11896 narrow = 0;
11897 if (low_regs && narrow)
11898 {
11899 inst.instruction = THUMB_OP16 (opcode);
11900 inst.instruction |= Rn << 8;
11901 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11902 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
11903 {
11904 if (inst.size_req == 2)
11905 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11906 else
11907 inst.relax = opcode;
11908 }
11909 }
11910 else
11911 {
11912 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11913 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
11914 THUMB1_RELOC_ONLY);
11915
11916 inst.instruction = THUMB_OP32 (inst.instruction);
11917 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11918 inst.instruction |= Rn << r0off;
11919 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11920 }
11921 }
11922 else if (inst.operands[1].shifted && inst.operands[1].immisreg
11923 && (inst.instruction == T_MNEM_mov
11924 || inst.instruction == T_MNEM_movs))
11925 {
11926 /* Register shifts are encoded as separate shift instructions. */
11927 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
11928
11929 if (in_it_block ())
11930 narrow = !flags;
11931 else
11932 narrow = flags;
11933
11934 if (inst.size_req == 4)
11935 narrow = FALSE;
11936
11937 if (!low_regs || inst.operands[1].imm > 7)
11938 narrow = FALSE;
11939
11940 if (Rn != Rm)
11941 narrow = FALSE;
11942
11943 switch (inst.operands[1].shift_kind)
11944 {
11945 case SHIFT_LSL:
11946 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
11947 break;
11948 case SHIFT_ASR:
11949 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
11950 break;
11951 case SHIFT_LSR:
11952 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
11953 break;
11954 case SHIFT_ROR:
11955 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
11956 break;
11957 default:
11958 abort ();
11959 }
11960
11961 inst.instruction = opcode;
11962 if (narrow)
11963 {
11964 inst.instruction |= Rn;
11965 inst.instruction |= inst.operands[1].imm << 3;
11966 }
11967 else
11968 {
11969 if (flags)
11970 inst.instruction |= CONDS_BIT;
11971
11972 inst.instruction |= Rn << 8;
11973 inst.instruction |= Rm << 16;
11974 inst.instruction |= inst.operands[1].imm;
11975 }
11976 }
11977 else if (!narrow)
11978 {
11979 /* Some mov with immediate shift have narrow variants.
11980 Register shifts are handled above. */
11981 if (low_regs && inst.operands[1].shifted
11982 && (inst.instruction == T_MNEM_mov
11983 || inst.instruction == T_MNEM_movs))
11984 {
11985 if (in_it_block ())
11986 narrow = (inst.instruction == T_MNEM_mov);
11987 else
11988 narrow = (inst.instruction == T_MNEM_movs);
11989 }
11990
11991 if (narrow)
11992 {
11993 switch (inst.operands[1].shift_kind)
11994 {
11995 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11996 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11997 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11998 default: narrow = FALSE; break;
11999 }
12000 }
12001
12002 if (narrow)
12003 {
12004 inst.instruction |= Rn;
12005 inst.instruction |= Rm << 3;
12006 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12007 }
12008 else
12009 {
12010 inst.instruction = THUMB_OP32 (inst.instruction);
12011 inst.instruction |= Rn << r0off;
12012 encode_thumb32_shifted_operand (1);
12013 }
12014 }
12015 else
12016 switch (inst.instruction)
12017 {
12018 case T_MNEM_mov:
12019 /* In v4t or v5t a move of two lowregs produces unpredictable
12020 results. Don't allow this. */
12021 if (low_regs)
12022 {
12023 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
12024 "MOV Rd, Rs with two low registers is not "
12025 "permitted on this architecture");
12026 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12027 arm_ext_v6);
12028 }
12029
12030 inst.instruction = T_OPCODE_MOV_HR;
12031 inst.instruction |= (Rn & 0x8) << 4;
12032 inst.instruction |= (Rn & 0x7);
12033 inst.instruction |= Rm << 3;
12034 break;
12035
12036 case T_MNEM_movs:
12037 /* We know we have low registers at this point.
12038 Generate LSLS Rd, Rs, #0. */
12039 inst.instruction = T_OPCODE_LSL_I;
12040 inst.instruction |= Rn;
12041 inst.instruction |= Rm << 3;
12042 break;
12043
12044 case T_MNEM_cmp:
12045 if (low_regs)
12046 {
12047 inst.instruction = T_OPCODE_CMP_LR;
12048 inst.instruction |= Rn;
12049 inst.instruction |= Rm << 3;
12050 }
12051 else
12052 {
12053 inst.instruction = T_OPCODE_CMP_HR;
12054 inst.instruction |= (Rn & 0x8) << 4;
12055 inst.instruction |= (Rn & 0x7);
12056 inst.instruction |= Rm << 3;
12057 }
12058 break;
12059 }
12060 return;
12061 }
12062
12063 inst.instruction = THUMB_OP16 (inst.instruction);
12064
12065 /* PR 10443: Do not silently ignore shifted operands. */
12066 constraint (inst.operands[1].shifted,
12067 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12068
12069 if (inst.operands[1].isreg)
12070 {
12071 if (Rn < 8 && Rm < 8)
12072 {
12073 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12074 since a MOV instruction produces unpredictable results. */
12075 if (inst.instruction == T_OPCODE_MOV_I8)
12076 inst.instruction = T_OPCODE_ADD_I3;
12077 else
12078 inst.instruction = T_OPCODE_CMP_LR;
12079
12080 inst.instruction |= Rn;
12081 inst.instruction |= Rm << 3;
12082 }
12083 else
12084 {
12085 if (inst.instruction == T_OPCODE_MOV_I8)
12086 inst.instruction = T_OPCODE_MOV_HR;
12087 else
12088 inst.instruction = T_OPCODE_CMP_HR;
12089 do_t_cpy ();
12090 }
12091 }
12092 else
12093 {
12094 constraint (Rn > 7,
12095 _("only lo regs allowed with immediate"));
12096 inst.instruction |= Rn << 8;
12097 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
12098 }
12099 }
12100
12101 static void
12102 do_t_mov16 (void)
12103 {
12104 unsigned Rd;
12105 bfd_vma imm;
12106 bfd_boolean top;
12107
12108 top = (inst.instruction & 0x00800000) != 0;
12109 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
12110 {
12111 constraint (top, _(":lower16: not allowed this instruction"));
12112 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
12113 }
12114 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
12115 {
12116 constraint (!top, _(":upper16: not allowed this instruction"));
12117 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
12118 }
12119
12120 Rd = inst.operands[0].reg;
12121 reject_bad_reg (Rd);
12122
12123 inst.instruction |= Rd << 8;
12124 if (inst.reloc.type == BFD_RELOC_UNUSED)
12125 {
12126 imm = inst.reloc.exp.X_add_number;
12127 inst.instruction |= (imm & 0xf000) << 4;
12128 inst.instruction |= (imm & 0x0800) << 15;
12129 inst.instruction |= (imm & 0x0700) << 4;
12130 inst.instruction |= (imm & 0x00ff);
12131 }
12132 }
12133
12134 static void
12135 do_t_mvn_tst (void)
12136 {
12137 unsigned Rn, Rm;
12138
12139 Rn = inst.operands[0].reg;
12140 Rm = inst.operands[1].reg;
12141
12142 if (inst.instruction == T_MNEM_cmp
12143 || inst.instruction == T_MNEM_cmn)
12144 constraint (Rn == REG_PC, BAD_PC);
12145 else
12146 reject_bad_reg (Rn);
12147 reject_bad_reg (Rm);
12148
12149 if (unified_syntax)
12150 {
12151 int r0off = (inst.instruction == T_MNEM_mvn
12152 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
12153 bfd_boolean narrow;
12154
12155 if (inst.size_req == 4
12156 || inst.instruction > 0xffff
12157 || inst.operands[1].shifted
12158 || Rn > 7 || Rm > 7)
12159 narrow = FALSE;
12160 else if (inst.instruction == T_MNEM_cmn
12161 || inst.instruction == T_MNEM_tst)
12162 narrow = TRUE;
12163 else if (THUMB_SETS_FLAGS (inst.instruction))
12164 narrow = !in_it_block ();
12165 else
12166 narrow = in_it_block ();
12167
12168 if (!inst.operands[1].isreg)
12169 {
12170 /* For an immediate, we always generate a 32-bit opcode;
12171 section relaxation will shrink it later if possible. */
12172 if (inst.instruction < 0xffff)
12173 inst.instruction = THUMB_OP32 (inst.instruction);
12174 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12175 inst.instruction |= Rn << r0off;
12176 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12177 }
12178 else
12179 {
12180 /* See if we can do this with a 16-bit instruction. */
12181 if (narrow)
12182 {
12183 inst.instruction = THUMB_OP16 (inst.instruction);
12184 inst.instruction |= Rn;
12185 inst.instruction |= Rm << 3;
12186 }
12187 else
12188 {
12189 constraint (inst.operands[1].shifted
12190 && inst.operands[1].immisreg,
12191 _("shift must be constant"));
12192 if (inst.instruction < 0xffff)
12193 inst.instruction = THUMB_OP32 (inst.instruction);
12194 inst.instruction |= Rn << r0off;
12195 encode_thumb32_shifted_operand (1);
12196 }
12197 }
12198 }
12199 else
12200 {
12201 constraint (inst.instruction > 0xffff
12202 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12203 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12204 _("unshifted register required"));
12205 constraint (Rn > 7 || Rm > 7,
12206 BAD_HIREG);
12207
12208 inst.instruction = THUMB_OP16 (inst.instruction);
12209 inst.instruction |= Rn;
12210 inst.instruction |= Rm << 3;
12211 }
12212 }
12213
12214 static void
12215 do_t_mrs (void)
12216 {
12217 unsigned Rd;
12218
12219 if (do_vfp_nsyn_mrs () == SUCCESS)
12220 return;
12221
12222 Rd = inst.operands[0].reg;
12223 reject_bad_reg (Rd);
12224 inst.instruction |= Rd << 8;
12225
12226 if (inst.operands[1].isreg)
12227 {
12228 unsigned br = inst.operands[1].reg;
12229 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12230 as_bad (_("bad register for mrs"));
12231
12232 inst.instruction |= br & (0xf << 16);
12233 inst.instruction |= (br & 0x300) >> 4;
12234 inst.instruction |= (br & SPSR_BIT) >> 2;
12235 }
12236 else
12237 {
12238 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12239
12240 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12241 {
12242 /* PR gas/12698: The constraint is only applied for m_profile.
12243 If the user has specified -march=all, we want to ignore it as
12244 we are building for any CPU type, including non-m variants. */
12245 bfd_boolean m_profile =
12246 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12247 constraint ((flags != 0) && m_profile, _("selected processor does "
12248 "not support requested special purpose register"));
12249 }
12250 else
12251 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12252 devices). */
12253 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12254 _("'APSR', 'CPSR' or 'SPSR' expected"));
12255
12256 inst.instruction |= (flags & SPSR_BIT) >> 2;
12257 inst.instruction |= inst.operands[1].imm & 0xff;
12258 inst.instruction |= 0xf0000;
12259 }
12260 }
12261
12262 static void
12263 do_t_msr (void)
12264 {
12265 int flags;
12266 unsigned Rn;
12267
12268 if (do_vfp_nsyn_msr () == SUCCESS)
12269 return;
12270
12271 constraint (!inst.operands[1].isreg,
12272 _("Thumb encoding does not support an immediate here"));
12273
12274 if (inst.operands[0].isreg)
12275 flags = (int)(inst.operands[0].reg);
12276 else
12277 flags = inst.operands[0].imm;
12278
12279 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12280 {
12281 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12282
12283 /* PR gas/12698: The constraint is only applied for m_profile.
12284 If the user has specified -march=all, we want to ignore it as
12285 we are building for any CPU type, including non-m variants. */
12286 bfd_boolean m_profile =
12287 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12288 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12289 && (bits & ~(PSR_s | PSR_f)) != 0)
12290 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12291 && bits != PSR_f)) && m_profile,
12292 _("selected processor does not support requested special "
12293 "purpose register"));
12294 }
12295 else
12296 constraint ((flags & 0xff) != 0, _("selected processor does not support "
12297 "requested special purpose register"));
12298
12299 Rn = inst.operands[1].reg;
12300 reject_bad_reg (Rn);
12301
12302 inst.instruction |= (flags & SPSR_BIT) >> 2;
12303 inst.instruction |= (flags & 0xf0000) >> 8;
12304 inst.instruction |= (flags & 0x300) >> 4;
12305 inst.instruction |= (flags & 0xff);
12306 inst.instruction |= Rn << 16;
12307 }
12308
12309 static void
12310 do_t_mul (void)
12311 {
12312 bfd_boolean narrow;
12313 unsigned Rd, Rn, Rm;
12314
12315 if (!inst.operands[2].present)
12316 inst.operands[2].reg = inst.operands[0].reg;
12317
12318 Rd = inst.operands[0].reg;
12319 Rn = inst.operands[1].reg;
12320 Rm = inst.operands[2].reg;
12321
12322 if (unified_syntax)
12323 {
12324 if (inst.size_req == 4
12325 || (Rd != Rn
12326 && Rd != Rm)
12327 || Rn > 7
12328 || Rm > 7)
12329 narrow = FALSE;
12330 else if (inst.instruction == T_MNEM_muls)
12331 narrow = !in_it_block ();
12332 else
12333 narrow = in_it_block ();
12334 }
12335 else
12336 {
12337 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12338 constraint (Rn > 7 || Rm > 7,
12339 BAD_HIREG);
12340 narrow = TRUE;
12341 }
12342
12343 if (narrow)
12344 {
12345 /* 16-bit MULS/Conditional MUL. */
12346 inst.instruction = THUMB_OP16 (inst.instruction);
12347 inst.instruction |= Rd;
12348
12349 if (Rd == Rn)
12350 inst.instruction |= Rm << 3;
12351 else if (Rd == Rm)
12352 inst.instruction |= Rn << 3;
12353 else
12354 constraint (1, _("dest must overlap one source register"));
12355 }
12356 else
12357 {
12358 constraint (inst.instruction != T_MNEM_mul,
12359 _("Thumb-2 MUL must not set flags"));
12360 /* 32-bit MUL. */
12361 inst.instruction = THUMB_OP32 (inst.instruction);
12362 inst.instruction |= Rd << 8;
12363 inst.instruction |= Rn << 16;
12364 inst.instruction |= Rm << 0;
12365
12366 reject_bad_reg (Rd);
12367 reject_bad_reg (Rn);
12368 reject_bad_reg (Rm);
12369 }
12370 }
12371
12372 static void
12373 do_t_mull (void)
12374 {
12375 unsigned RdLo, RdHi, Rn, Rm;
12376
12377 RdLo = inst.operands[0].reg;
12378 RdHi = inst.operands[1].reg;
12379 Rn = inst.operands[2].reg;
12380 Rm = inst.operands[3].reg;
12381
12382 reject_bad_reg (RdLo);
12383 reject_bad_reg (RdHi);
12384 reject_bad_reg (Rn);
12385 reject_bad_reg (Rm);
12386
12387 inst.instruction |= RdLo << 12;
12388 inst.instruction |= RdHi << 8;
12389 inst.instruction |= Rn << 16;
12390 inst.instruction |= Rm;
12391
12392 if (RdLo == RdHi)
12393 as_tsktsk (_("rdhi and rdlo must be different"));
12394 }
12395
12396 static void
12397 do_t_nop (void)
12398 {
12399 set_it_insn_type (NEUTRAL_IT_INSN);
12400
12401 if (unified_syntax)
12402 {
12403 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12404 {
12405 inst.instruction = THUMB_OP32 (inst.instruction);
12406 inst.instruction |= inst.operands[0].imm;
12407 }
12408 else
12409 {
12410 /* PR9722: Check for Thumb2 availability before
12411 generating a thumb2 nop instruction. */
12412 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12413 {
12414 inst.instruction = THUMB_OP16 (inst.instruction);
12415 inst.instruction |= inst.operands[0].imm << 4;
12416 }
12417 else
12418 inst.instruction = 0x46c0;
12419 }
12420 }
12421 else
12422 {
12423 constraint (inst.operands[0].present,
12424 _("Thumb does not support NOP with hints"));
12425 inst.instruction = 0x46c0;
12426 }
12427 }
12428
12429 static void
12430 do_t_neg (void)
12431 {
12432 if (unified_syntax)
12433 {
12434 bfd_boolean narrow;
12435
12436 if (THUMB_SETS_FLAGS (inst.instruction))
12437 narrow = !in_it_block ();
12438 else
12439 narrow = in_it_block ();
12440 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12441 narrow = FALSE;
12442 if (inst.size_req == 4)
12443 narrow = FALSE;
12444
12445 if (!narrow)
12446 {
12447 inst.instruction = THUMB_OP32 (inst.instruction);
12448 inst.instruction |= inst.operands[0].reg << 8;
12449 inst.instruction |= inst.operands[1].reg << 16;
12450 }
12451 else
12452 {
12453 inst.instruction = THUMB_OP16 (inst.instruction);
12454 inst.instruction |= inst.operands[0].reg;
12455 inst.instruction |= inst.operands[1].reg << 3;
12456 }
12457 }
12458 else
12459 {
12460 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12461 BAD_HIREG);
12462 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12463
12464 inst.instruction = THUMB_OP16 (inst.instruction);
12465 inst.instruction |= inst.operands[0].reg;
12466 inst.instruction |= inst.operands[1].reg << 3;
12467 }
12468 }
12469
12470 static void
12471 do_t_orn (void)
12472 {
12473 unsigned Rd, Rn;
12474
12475 Rd = inst.operands[0].reg;
12476 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12477
12478 reject_bad_reg (Rd);
12479 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12480 reject_bad_reg (Rn);
12481
12482 inst.instruction |= Rd << 8;
12483 inst.instruction |= Rn << 16;
12484
12485 if (!inst.operands[2].isreg)
12486 {
12487 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12488 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12489 }
12490 else
12491 {
12492 unsigned Rm;
12493
12494 Rm = inst.operands[2].reg;
12495 reject_bad_reg (Rm);
12496
12497 constraint (inst.operands[2].shifted
12498 && inst.operands[2].immisreg,
12499 _("shift must be constant"));
12500 encode_thumb32_shifted_operand (2);
12501 }
12502 }
12503
12504 static void
12505 do_t_pkhbt (void)
12506 {
12507 unsigned Rd, Rn, Rm;
12508
12509 Rd = inst.operands[0].reg;
12510 Rn = inst.operands[1].reg;
12511 Rm = inst.operands[2].reg;
12512
12513 reject_bad_reg (Rd);
12514 reject_bad_reg (Rn);
12515 reject_bad_reg (Rm);
12516
12517 inst.instruction |= Rd << 8;
12518 inst.instruction |= Rn << 16;
12519 inst.instruction |= Rm;
12520 if (inst.operands[3].present)
12521 {
12522 unsigned int val = inst.reloc.exp.X_add_number;
12523 constraint (inst.reloc.exp.X_op != O_constant,
12524 _("expression too complex"));
12525 inst.instruction |= (val & 0x1c) << 10;
12526 inst.instruction |= (val & 0x03) << 6;
12527 }
12528 }
12529
12530 static void
12531 do_t_pkhtb (void)
12532 {
12533 if (!inst.operands[3].present)
12534 {
12535 unsigned Rtmp;
12536
12537 inst.instruction &= ~0x00000020;
12538
12539 /* PR 10168. Swap the Rm and Rn registers. */
12540 Rtmp = inst.operands[1].reg;
12541 inst.operands[1].reg = inst.operands[2].reg;
12542 inst.operands[2].reg = Rtmp;
12543 }
12544 do_t_pkhbt ();
12545 }
12546
12547 static void
12548 do_t_pld (void)
12549 {
12550 if (inst.operands[0].immisreg)
12551 reject_bad_reg (inst.operands[0].imm);
12552
12553 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12554 }
12555
12556 static void
12557 do_t_push_pop (void)
12558 {
12559 unsigned mask;
12560
12561 constraint (inst.operands[0].writeback,
12562 _("push/pop do not support {reglist}^"));
12563 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
12564 _("expression too complex"));
12565
12566 mask = inst.operands[0].imm;
12567 if (inst.size_req != 4 && (mask & ~0xff) == 0)
12568 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12569 else if (inst.size_req != 4
12570 && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
12571 ? REG_LR : REG_PC)))
12572 {
12573 inst.instruction = THUMB_OP16 (inst.instruction);
12574 inst.instruction |= THUMB_PP_PC_LR;
12575 inst.instruction |= mask & 0xff;
12576 }
12577 else if (unified_syntax)
12578 {
12579 inst.instruction = THUMB_OP32 (inst.instruction);
12580 encode_thumb2_ldmstm (13, mask, TRUE);
12581 }
12582 else
12583 {
12584 inst.error = _("invalid register list to push/pop instruction");
12585 return;
12586 }
12587 }
12588
12589 static void
12590 do_t_rbit (void)
12591 {
12592 unsigned Rd, Rm;
12593
12594 Rd = inst.operands[0].reg;
12595 Rm = inst.operands[1].reg;
12596
12597 reject_bad_reg (Rd);
12598 reject_bad_reg (Rm);
12599
12600 inst.instruction |= Rd << 8;
12601 inst.instruction |= Rm << 16;
12602 inst.instruction |= Rm;
12603 }
12604
12605 static void
12606 do_t_rev (void)
12607 {
12608 unsigned Rd, Rm;
12609
12610 Rd = inst.operands[0].reg;
12611 Rm = inst.operands[1].reg;
12612
12613 reject_bad_reg (Rd);
12614 reject_bad_reg (Rm);
12615
12616 if (Rd <= 7 && Rm <= 7
12617 && inst.size_req != 4)
12618 {
12619 inst.instruction = THUMB_OP16 (inst.instruction);
12620 inst.instruction |= Rd;
12621 inst.instruction |= Rm << 3;
12622 }
12623 else if (unified_syntax)
12624 {
12625 inst.instruction = THUMB_OP32 (inst.instruction);
12626 inst.instruction |= Rd << 8;
12627 inst.instruction |= Rm << 16;
12628 inst.instruction |= Rm;
12629 }
12630 else
12631 inst.error = BAD_HIREG;
12632 }
12633
12634 static void
12635 do_t_rrx (void)
12636 {
12637 unsigned Rd, Rm;
12638
12639 Rd = inst.operands[0].reg;
12640 Rm = inst.operands[1].reg;
12641
12642 reject_bad_reg (Rd);
12643 reject_bad_reg (Rm);
12644
12645 inst.instruction |= Rd << 8;
12646 inst.instruction |= Rm;
12647 }
12648
12649 static void
12650 do_t_rsb (void)
12651 {
12652 unsigned Rd, Rs;
12653
12654 Rd = inst.operands[0].reg;
12655 Rs = (inst.operands[1].present
12656 ? inst.operands[1].reg /* Rd, Rs, foo */
12657 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
12658
12659 reject_bad_reg (Rd);
12660 reject_bad_reg (Rs);
12661 if (inst.operands[2].isreg)
12662 reject_bad_reg (inst.operands[2].reg);
12663
12664 inst.instruction |= Rd << 8;
12665 inst.instruction |= Rs << 16;
12666 if (!inst.operands[2].isreg)
12667 {
12668 bfd_boolean narrow;
12669
12670 if ((inst.instruction & 0x00100000) != 0)
12671 narrow = !in_it_block ();
12672 else
12673 narrow = in_it_block ();
12674
12675 if (Rd > 7 || Rs > 7)
12676 narrow = FALSE;
12677
12678 if (inst.size_req == 4 || !unified_syntax)
12679 narrow = FALSE;
12680
12681 if (inst.reloc.exp.X_op != O_constant
12682 || inst.reloc.exp.X_add_number != 0)
12683 narrow = FALSE;
12684
12685 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12686 relaxation, but it doesn't seem worth the hassle. */
12687 if (narrow)
12688 {
12689 inst.reloc.type = BFD_RELOC_UNUSED;
12690 inst.instruction = THUMB_OP16 (T_MNEM_negs);
12691 inst.instruction |= Rs << 3;
12692 inst.instruction |= Rd;
12693 }
12694 else
12695 {
12696 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12697 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12698 }
12699 }
12700 else
12701 encode_thumb32_shifted_operand (2);
12702 }
12703
12704 static void
12705 do_t_setend (void)
12706 {
12707 if (warn_on_deprecated
12708 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12709 as_tsktsk (_("setend use is deprecated for ARMv8"));
12710
12711 set_it_insn_type (OUTSIDE_IT_INSN);
12712 if (inst.operands[0].imm)
12713 inst.instruction |= 0x8;
12714 }
12715
12716 static void
12717 do_t_shift (void)
12718 {
12719 if (!inst.operands[1].present)
12720 inst.operands[1].reg = inst.operands[0].reg;
12721
12722 if (unified_syntax)
12723 {
12724 bfd_boolean narrow;
12725 int shift_kind;
12726
12727 switch (inst.instruction)
12728 {
12729 case T_MNEM_asr:
12730 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12731 case T_MNEM_lsl:
12732 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12733 case T_MNEM_lsr:
12734 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12735 case T_MNEM_ror:
12736 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12737 default: abort ();
12738 }
12739
12740 if (THUMB_SETS_FLAGS (inst.instruction))
12741 narrow = !in_it_block ();
12742 else
12743 narrow = in_it_block ();
12744 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12745 narrow = FALSE;
12746 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12747 narrow = FALSE;
12748 if (inst.operands[2].isreg
12749 && (inst.operands[1].reg != inst.operands[0].reg
12750 || inst.operands[2].reg > 7))
12751 narrow = FALSE;
12752 if (inst.size_req == 4)
12753 narrow = FALSE;
12754
12755 reject_bad_reg (inst.operands[0].reg);
12756 reject_bad_reg (inst.operands[1].reg);
12757
12758 if (!narrow)
12759 {
12760 if (inst.operands[2].isreg)
12761 {
12762 reject_bad_reg (inst.operands[2].reg);
12763 inst.instruction = THUMB_OP32 (inst.instruction);
12764 inst.instruction |= inst.operands[0].reg << 8;
12765 inst.instruction |= inst.operands[1].reg << 16;
12766 inst.instruction |= inst.operands[2].reg;
12767
12768 /* PR 12854: Error on extraneous shifts. */
12769 constraint (inst.operands[2].shifted,
12770 _("extraneous shift as part of operand to shift insn"));
12771 }
12772 else
12773 {
12774 inst.operands[1].shifted = 1;
12775 inst.operands[1].shift_kind = shift_kind;
12776 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
12777 ? T_MNEM_movs : T_MNEM_mov);
12778 inst.instruction |= inst.operands[0].reg << 8;
12779 encode_thumb32_shifted_operand (1);
12780 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12781 inst.reloc.type = BFD_RELOC_UNUSED;
12782 }
12783 }
12784 else
12785 {
12786 if (inst.operands[2].isreg)
12787 {
12788 switch (shift_kind)
12789 {
12790 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
12791 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
12792 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
12793 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
12794 default: abort ();
12795 }
12796
12797 inst.instruction |= inst.operands[0].reg;
12798 inst.instruction |= inst.operands[2].reg << 3;
12799
12800 /* PR 12854: Error on extraneous shifts. */
12801 constraint (inst.operands[2].shifted,
12802 _("extraneous shift as part of operand to shift insn"));
12803 }
12804 else
12805 {
12806 switch (shift_kind)
12807 {
12808 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12809 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12810 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12811 default: abort ();
12812 }
12813 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12814 inst.instruction |= inst.operands[0].reg;
12815 inst.instruction |= inst.operands[1].reg << 3;
12816 }
12817 }
12818 }
12819 else
12820 {
12821 constraint (inst.operands[0].reg > 7
12822 || inst.operands[1].reg > 7, BAD_HIREG);
12823 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12824
12825 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
12826 {
12827 constraint (inst.operands[2].reg > 7, BAD_HIREG);
12828 constraint (inst.operands[0].reg != inst.operands[1].reg,
12829 _("source1 and dest must be same register"));
12830
12831 switch (inst.instruction)
12832 {
12833 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
12834 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
12835 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
12836 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
12837 default: abort ();
12838 }
12839
12840 inst.instruction |= inst.operands[0].reg;
12841 inst.instruction |= inst.operands[2].reg << 3;
12842
12843 /* PR 12854: Error on extraneous shifts. */
12844 constraint (inst.operands[2].shifted,
12845 _("extraneous shift as part of operand to shift insn"));
12846 }
12847 else
12848 {
12849 switch (inst.instruction)
12850 {
12851 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
12852 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
12853 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
12854 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
12855 default: abort ();
12856 }
12857 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12858 inst.instruction |= inst.operands[0].reg;
12859 inst.instruction |= inst.operands[1].reg << 3;
12860 }
12861 }
12862 }
12863
12864 static void
12865 do_t_simd (void)
12866 {
12867 unsigned Rd, Rn, Rm;
12868
12869 Rd = inst.operands[0].reg;
12870 Rn = inst.operands[1].reg;
12871 Rm = inst.operands[2].reg;
12872
12873 reject_bad_reg (Rd);
12874 reject_bad_reg (Rn);
12875 reject_bad_reg (Rm);
12876
12877 inst.instruction |= Rd << 8;
12878 inst.instruction |= Rn << 16;
12879 inst.instruction |= Rm;
12880 }
12881
12882 static void
12883 do_t_simd2 (void)
12884 {
12885 unsigned Rd, Rn, Rm;
12886
12887 Rd = inst.operands[0].reg;
12888 Rm = inst.operands[1].reg;
12889 Rn = inst.operands[2].reg;
12890
12891 reject_bad_reg (Rd);
12892 reject_bad_reg (Rn);
12893 reject_bad_reg (Rm);
12894
12895 inst.instruction |= Rd << 8;
12896 inst.instruction |= Rn << 16;
12897 inst.instruction |= Rm;
12898 }
12899
12900 static void
12901 do_t_smc (void)
12902 {
12903 unsigned int value = inst.reloc.exp.X_add_number;
12904 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
12905 _("SMC is not permitted on this architecture"));
12906 constraint (inst.reloc.exp.X_op != O_constant,
12907 _("expression too complex"));
12908 inst.reloc.type = BFD_RELOC_UNUSED;
12909 inst.instruction |= (value & 0xf000) >> 12;
12910 inst.instruction |= (value & 0x0ff0);
12911 inst.instruction |= (value & 0x000f) << 16;
12912 /* PR gas/15623: SMC instructions must be last in an IT block. */
12913 set_it_insn_type_last ();
12914 }
12915
12916 static void
12917 do_t_hvc (void)
12918 {
12919 unsigned int value = inst.reloc.exp.X_add_number;
12920
12921 inst.reloc.type = BFD_RELOC_UNUSED;
12922 inst.instruction |= (value & 0x0fff);
12923 inst.instruction |= (value & 0xf000) << 4;
12924 }
12925
12926 static void
12927 do_t_ssat_usat (int bias)
12928 {
12929 unsigned Rd, Rn;
12930
12931 Rd = inst.operands[0].reg;
12932 Rn = inst.operands[2].reg;
12933
12934 reject_bad_reg (Rd);
12935 reject_bad_reg (Rn);
12936
12937 inst.instruction |= Rd << 8;
12938 inst.instruction |= inst.operands[1].imm - bias;
12939 inst.instruction |= Rn << 16;
12940
12941 if (inst.operands[3].present)
12942 {
12943 offsetT shift_amount = inst.reloc.exp.X_add_number;
12944
12945 inst.reloc.type = BFD_RELOC_UNUSED;
12946
12947 constraint (inst.reloc.exp.X_op != O_constant,
12948 _("expression too complex"));
12949
12950 if (shift_amount != 0)
12951 {
12952 constraint (shift_amount > 31,
12953 _("shift expression is too large"));
12954
12955 if (inst.operands[3].shift_kind == SHIFT_ASR)
12956 inst.instruction |= 0x00200000; /* sh bit. */
12957
12958 inst.instruction |= (shift_amount & 0x1c) << 10;
12959 inst.instruction |= (shift_amount & 0x03) << 6;
12960 }
12961 }
12962 }
12963
12964 static void
12965 do_t_ssat (void)
12966 {
12967 do_t_ssat_usat (1);
12968 }
12969
12970 static void
12971 do_t_ssat16 (void)
12972 {
12973 unsigned Rd, Rn;
12974
12975 Rd = inst.operands[0].reg;
12976 Rn = inst.operands[2].reg;
12977
12978 reject_bad_reg (Rd);
12979 reject_bad_reg (Rn);
12980
12981 inst.instruction |= Rd << 8;
12982 inst.instruction |= inst.operands[1].imm - 1;
12983 inst.instruction |= Rn << 16;
12984 }
12985
12986 static void
12987 do_t_strex (void)
12988 {
12989 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
12990 || inst.operands[2].postind || inst.operands[2].writeback
12991 || inst.operands[2].immisreg || inst.operands[2].shifted
12992 || inst.operands[2].negative,
12993 BAD_ADDR_MODE);
12994
12995 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
12996
12997 inst.instruction |= inst.operands[0].reg << 8;
12998 inst.instruction |= inst.operands[1].reg << 12;
12999 inst.instruction |= inst.operands[2].reg << 16;
13000 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
13001 }
13002
13003 static void
13004 do_t_strexd (void)
13005 {
13006 if (!inst.operands[2].present)
13007 inst.operands[2].reg = inst.operands[1].reg + 1;
13008
13009 constraint (inst.operands[0].reg == inst.operands[1].reg
13010 || inst.operands[0].reg == inst.operands[2].reg
13011 || inst.operands[0].reg == inst.operands[3].reg,
13012 BAD_OVERLAP);
13013
13014 inst.instruction |= inst.operands[0].reg;
13015 inst.instruction |= inst.operands[1].reg << 12;
13016 inst.instruction |= inst.operands[2].reg << 8;
13017 inst.instruction |= inst.operands[3].reg << 16;
13018 }
13019
13020 static void
13021 do_t_sxtah (void)
13022 {
13023 unsigned Rd, Rn, Rm;
13024
13025 Rd = inst.operands[0].reg;
13026 Rn = inst.operands[1].reg;
13027 Rm = inst.operands[2].reg;
13028
13029 reject_bad_reg (Rd);
13030 reject_bad_reg (Rn);
13031 reject_bad_reg (Rm);
13032
13033 inst.instruction |= Rd << 8;
13034 inst.instruction |= Rn << 16;
13035 inst.instruction |= Rm;
13036 inst.instruction |= inst.operands[3].imm << 4;
13037 }
13038
13039 static void
13040 do_t_sxth (void)
13041 {
13042 unsigned Rd, Rm;
13043
13044 Rd = inst.operands[0].reg;
13045 Rm = inst.operands[1].reg;
13046
13047 reject_bad_reg (Rd);
13048 reject_bad_reg (Rm);
13049
13050 if (inst.instruction <= 0xffff
13051 && inst.size_req != 4
13052 && Rd <= 7 && Rm <= 7
13053 && (!inst.operands[2].present || inst.operands[2].imm == 0))
13054 {
13055 inst.instruction = THUMB_OP16 (inst.instruction);
13056 inst.instruction |= Rd;
13057 inst.instruction |= Rm << 3;
13058 }
13059 else if (unified_syntax)
13060 {
13061 if (inst.instruction <= 0xffff)
13062 inst.instruction = THUMB_OP32 (inst.instruction);
13063 inst.instruction |= Rd << 8;
13064 inst.instruction |= Rm;
13065 inst.instruction |= inst.operands[2].imm << 4;
13066 }
13067 else
13068 {
13069 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
13070 _("Thumb encoding does not support rotation"));
13071 constraint (1, BAD_HIREG);
13072 }
13073 }
13074
13075 static void
13076 do_t_swi (void)
13077 {
13078 /* We have to do the following check manually as ARM_EXT_OS only applies
13079 to ARM_EXT_V6M. */
13080 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
13081 {
13082 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
13083 /* This only applies to the v6m howver, not later architectures. */
13084 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
13085 as_bad (_("SVC is not permitted on this architecture"));
13086 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
13087 }
13088
13089 inst.reloc.type = BFD_RELOC_ARM_SWI;
13090 }
13091
13092 static void
13093 do_t_tb (void)
13094 {
13095 unsigned Rn, Rm;
13096 int half;
13097
13098 half = (inst.instruction & 0x10) != 0;
13099 set_it_insn_type_last ();
13100 constraint (inst.operands[0].immisreg,
13101 _("instruction requires register index"));
13102
13103 Rn = inst.operands[0].reg;
13104 Rm = inst.operands[0].imm;
13105
13106 constraint (Rn == REG_SP, BAD_SP);
13107 reject_bad_reg (Rm);
13108
13109 constraint (!half && inst.operands[0].shifted,
13110 _("instruction does not allow shifted index"));
13111 inst.instruction |= (Rn << 16) | Rm;
13112 }
13113
13114 static void
13115 do_t_udf (void)
13116 {
13117 if (!inst.operands[0].present)
13118 inst.operands[0].imm = 0;
13119
13120 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
13121 {
13122 constraint (inst.size_req == 2,
13123 _("immediate value out of range"));
13124 inst.instruction = THUMB_OP32 (inst.instruction);
13125 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
13126 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
13127 }
13128 else
13129 {
13130 inst.instruction = THUMB_OP16 (inst.instruction);
13131 inst.instruction |= inst.operands[0].imm;
13132 }
13133
13134 set_it_insn_type (NEUTRAL_IT_INSN);
13135 }
13136
13137
13138 static void
13139 do_t_usat (void)
13140 {
13141 do_t_ssat_usat (0);
13142 }
13143
13144 static void
13145 do_t_usat16 (void)
13146 {
13147 unsigned Rd, Rn;
13148
13149 Rd = inst.operands[0].reg;
13150 Rn = inst.operands[2].reg;
13151
13152 reject_bad_reg (Rd);
13153 reject_bad_reg (Rn);
13154
13155 inst.instruction |= Rd << 8;
13156 inst.instruction |= inst.operands[1].imm;
13157 inst.instruction |= Rn << 16;
13158 }
13159
13160 /* Neon instruction encoder helpers. */
13161
13162 /* Encodings for the different types for various Neon opcodes. */
13163
13164 /* An "invalid" code for the following tables. */
13165 #define N_INV -1u
13166
13167 struct neon_tab_entry
13168 {
13169 unsigned integer;
13170 unsigned float_or_poly;
13171 unsigned scalar_or_imm;
13172 };
13173
13174 /* Map overloaded Neon opcodes to their respective encodings. */
13175 #define NEON_ENC_TAB \
13176 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13177 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13178 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13179 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13180 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13181 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13182 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13183 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13184 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13185 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13186 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13187 /* Register variants of the following two instructions are encoded as
13188 vcge / vcgt with the operands reversed. */ \
13189 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13190 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13191 X(vfma, N_INV, 0x0000c10, N_INV), \
13192 X(vfms, N_INV, 0x0200c10, N_INV), \
13193 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13194 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13195 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13196 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13197 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13198 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13199 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13200 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13201 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13202 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13203 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13204 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13205 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13206 X(vshl, 0x0000400, N_INV, 0x0800510), \
13207 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13208 X(vand, 0x0000110, N_INV, 0x0800030), \
13209 X(vbic, 0x0100110, N_INV, 0x0800030), \
13210 X(veor, 0x1000110, N_INV, N_INV), \
13211 X(vorn, 0x0300110, N_INV, 0x0800010), \
13212 X(vorr, 0x0200110, N_INV, 0x0800010), \
13213 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13214 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13215 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13216 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13217 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13218 X(vst1, 0x0000000, 0x0800000, N_INV), \
13219 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13220 X(vst2, 0x0000100, 0x0800100, N_INV), \
13221 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13222 X(vst3, 0x0000200, 0x0800200, N_INV), \
13223 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13224 X(vst4, 0x0000300, 0x0800300, N_INV), \
13225 X(vmovn, 0x1b20200, N_INV, N_INV), \
13226 X(vtrn, 0x1b20080, N_INV, N_INV), \
13227 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13228 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13229 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13230 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13231 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13232 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13233 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13234 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13235 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13236 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13237 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13238 X(vseleq, 0xe000a00, N_INV, N_INV), \
13239 X(vselvs, 0xe100a00, N_INV, N_INV), \
13240 X(vselge, 0xe200a00, N_INV, N_INV), \
13241 X(vselgt, 0xe300a00, N_INV, N_INV), \
13242 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13243 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13244 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13245 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13246 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13247 X(aes, 0x3b00300, N_INV, N_INV), \
13248 X(sha3op, 0x2000c00, N_INV, N_INV), \
13249 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13250 X(sha2op, 0x3ba0380, N_INV, N_INV)
13251
13252 enum neon_opc
13253 {
13254 #define X(OPC,I,F,S) N_MNEM_##OPC
13255 NEON_ENC_TAB
13256 #undef X
13257 };
13258
13259 static const struct neon_tab_entry neon_enc_tab[] =
13260 {
13261 #define X(OPC,I,F,S) { (I), (F), (S) }
13262 NEON_ENC_TAB
13263 #undef X
13264 };
13265
13266 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13267 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13268 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13269 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13270 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13271 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13272 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13273 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13274 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13275 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13276 #define NEON_ENC_SINGLE_(X) \
13277 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13278 #define NEON_ENC_DOUBLE_(X) \
13279 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13280 #define NEON_ENC_FPV8_(X) \
13281 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13282
13283 #define NEON_ENCODE(type, inst) \
13284 do \
13285 { \
13286 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13287 inst.is_neon = 1; \
13288 } \
13289 while (0)
13290
13291 #define check_neon_suffixes \
13292 do \
13293 { \
13294 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13295 { \
13296 as_bad (_("invalid neon suffix for non neon instruction")); \
13297 return; \
13298 } \
13299 } \
13300 while (0)
13301
13302 /* Define shapes for instruction operands. The following mnemonic characters
13303 are used in this table:
13304
13305 F - VFP S<n> register
13306 D - Neon D<n> register
13307 Q - Neon Q<n> register
13308 I - Immediate
13309 S - Scalar
13310 R - ARM register
13311 L - D<n> register list
13312
13313 This table is used to generate various data:
13314 - enumerations of the form NS_DDR to be used as arguments to
13315 neon_select_shape.
13316 - a table classifying shapes into single, double, quad, mixed.
13317 - a table used to drive neon_select_shape. */
13318
13319 #define NEON_SHAPE_DEF \
13320 X(3, (D, D, D), DOUBLE), \
13321 X(3, (Q, Q, Q), QUAD), \
13322 X(3, (D, D, I), DOUBLE), \
13323 X(3, (Q, Q, I), QUAD), \
13324 X(3, (D, D, S), DOUBLE), \
13325 X(3, (Q, Q, S), QUAD), \
13326 X(2, (D, D), DOUBLE), \
13327 X(2, (Q, Q), QUAD), \
13328 X(2, (D, S), DOUBLE), \
13329 X(2, (Q, S), QUAD), \
13330 X(2, (D, R), DOUBLE), \
13331 X(2, (Q, R), QUAD), \
13332 X(2, (D, I), DOUBLE), \
13333 X(2, (Q, I), QUAD), \
13334 X(3, (D, L, D), DOUBLE), \
13335 X(2, (D, Q), MIXED), \
13336 X(2, (Q, D), MIXED), \
13337 X(3, (D, Q, I), MIXED), \
13338 X(3, (Q, D, I), MIXED), \
13339 X(3, (Q, D, D), MIXED), \
13340 X(3, (D, Q, Q), MIXED), \
13341 X(3, (Q, Q, D), MIXED), \
13342 X(3, (Q, D, S), MIXED), \
13343 X(3, (D, Q, S), MIXED), \
13344 X(4, (D, D, D, I), DOUBLE), \
13345 X(4, (Q, Q, Q, I), QUAD), \
13346 X(2, (F, F), SINGLE), \
13347 X(3, (F, F, F), SINGLE), \
13348 X(2, (F, I), SINGLE), \
13349 X(2, (F, D), MIXED), \
13350 X(2, (D, F), MIXED), \
13351 X(3, (F, F, I), MIXED), \
13352 X(4, (R, R, F, F), SINGLE), \
13353 X(4, (F, F, R, R), SINGLE), \
13354 X(3, (D, R, R), DOUBLE), \
13355 X(3, (R, R, D), DOUBLE), \
13356 X(2, (S, R), SINGLE), \
13357 X(2, (R, S), SINGLE), \
13358 X(2, (F, R), SINGLE), \
13359 X(2, (R, F), SINGLE), \
13360 /* Half float shape supported so far. */\
13361 X (2, (H, D), MIXED), \
13362 X (2, (D, H), MIXED), \
13363 X (2, (H, F), MIXED), \
13364 X (2, (F, H), MIXED), \
13365 X (2, (H, H), HALF), \
13366 X (2, (H, R), HALF), \
13367 X (2, (R, H), HALF), \
13368 X (2, (H, I), HALF), \
13369 X (3, (H, H, H), HALF), \
13370 X (3, (H, F, I), MIXED), \
13371 X (3, (F, H, I), MIXED)
13372
13373 #define S2(A,B) NS_##A##B
13374 #define S3(A,B,C) NS_##A##B##C
13375 #define S4(A,B,C,D) NS_##A##B##C##D
13376
13377 #define X(N, L, C) S##N L
13378
13379 enum neon_shape
13380 {
13381 NEON_SHAPE_DEF,
13382 NS_NULL
13383 };
13384
13385 #undef X
13386 #undef S2
13387 #undef S3
13388 #undef S4
13389
13390 enum neon_shape_class
13391 {
13392 SC_HALF,
13393 SC_SINGLE,
13394 SC_DOUBLE,
13395 SC_QUAD,
13396 SC_MIXED
13397 };
13398
13399 #define X(N, L, C) SC_##C
13400
13401 static enum neon_shape_class neon_shape_class[] =
13402 {
13403 NEON_SHAPE_DEF
13404 };
13405
13406 #undef X
13407
13408 enum neon_shape_el
13409 {
13410 SE_H,
13411 SE_F,
13412 SE_D,
13413 SE_Q,
13414 SE_I,
13415 SE_S,
13416 SE_R,
13417 SE_L
13418 };
13419
13420 /* Register widths of above. */
13421 static unsigned neon_shape_el_size[] =
13422 {
13423 16,
13424 32,
13425 64,
13426 128,
13427 0,
13428 32,
13429 32,
13430 0
13431 };
13432
13433 struct neon_shape_info
13434 {
13435 unsigned els;
13436 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13437 };
13438
13439 #define S2(A,B) { SE_##A, SE_##B }
13440 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13441 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13442
13443 #define X(N, L, C) { N, S##N L }
13444
13445 static struct neon_shape_info neon_shape_tab[] =
13446 {
13447 NEON_SHAPE_DEF
13448 };
13449
13450 #undef X
13451 #undef S2
13452 #undef S3
13453 #undef S4
13454
13455 /* Bit masks used in type checking given instructions.
13456 'N_EQK' means the type must be the same as (or based on in some way) the key
13457 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13458 set, various other bits can be set as well in order to modify the meaning of
13459 the type constraint. */
13460
13461 enum neon_type_mask
13462 {
13463 N_S8 = 0x0000001,
13464 N_S16 = 0x0000002,
13465 N_S32 = 0x0000004,
13466 N_S64 = 0x0000008,
13467 N_U8 = 0x0000010,
13468 N_U16 = 0x0000020,
13469 N_U32 = 0x0000040,
13470 N_U64 = 0x0000080,
13471 N_I8 = 0x0000100,
13472 N_I16 = 0x0000200,
13473 N_I32 = 0x0000400,
13474 N_I64 = 0x0000800,
13475 N_8 = 0x0001000,
13476 N_16 = 0x0002000,
13477 N_32 = 0x0004000,
13478 N_64 = 0x0008000,
13479 N_P8 = 0x0010000,
13480 N_P16 = 0x0020000,
13481 N_F16 = 0x0040000,
13482 N_F32 = 0x0080000,
13483 N_F64 = 0x0100000,
13484 N_P64 = 0x0200000,
13485 N_KEY = 0x1000000, /* Key element (main type specifier). */
13486 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
13487 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
13488 N_UNT = 0x8000000, /* Must be explicitly untyped. */
13489 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
13490 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
13491 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13492 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13493 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13494 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
13495 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13496 N_UTYP = 0,
13497 N_MAX_NONSPECIAL = N_P64
13498 };
13499
13500 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13501
13502 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13503 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13504 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13505 #define N_S_32 (N_S8 | N_S16 | N_S32)
13506 #define N_F_16_32 (N_F16 | N_F32)
13507 #define N_SUF_32 (N_SU_32 | N_F_16_32)
13508 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13509 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
13510 #define N_F_ALL (N_F16 | N_F32 | N_F64)
13511
13512 /* Pass this as the first type argument to neon_check_type to ignore types
13513 altogether. */
13514 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13515
13516 /* Select a "shape" for the current instruction (describing register types or
13517 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13518 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13519 function of operand parsing, so this function doesn't need to be called.
13520 Shapes should be listed in order of decreasing length. */
13521
13522 static enum neon_shape
13523 neon_select_shape (enum neon_shape shape, ...)
13524 {
13525 va_list ap;
13526 enum neon_shape first_shape = shape;
13527
13528 /* Fix missing optional operands. FIXME: we don't know at this point how
13529 many arguments we should have, so this makes the assumption that we have
13530 > 1. This is true of all current Neon opcodes, I think, but may not be
13531 true in the future. */
13532 if (!inst.operands[1].present)
13533 inst.operands[1] = inst.operands[0];
13534
13535 va_start (ap, shape);
13536
13537 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13538 {
13539 unsigned j;
13540 int matches = 1;
13541
13542 for (j = 0; j < neon_shape_tab[shape].els; j++)
13543 {
13544 if (!inst.operands[j].present)
13545 {
13546 matches = 0;
13547 break;
13548 }
13549
13550 switch (neon_shape_tab[shape].el[j])
13551 {
13552 /* If a .f16, .16, .u16, .s16 type specifier is given over
13553 a VFP single precision register operand, it's essentially
13554 means only half of the register is used.
13555
13556 If the type specifier is given after the mnemonics, the
13557 information is stored in inst.vectype. If the type specifier
13558 is given after register operand, the information is stored
13559 in inst.operands[].vectype.
13560
13561 When there is only one type specifier, and all the register
13562 operands are the same type of hardware register, the type
13563 specifier applies to all register operands.
13564
13565 If no type specifier is given, the shape is inferred from
13566 operand information.
13567
13568 for example:
13569 vadd.f16 s0, s1, s2: NS_HHH
13570 vabs.f16 s0, s1: NS_HH
13571 vmov.f16 s0, r1: NS_HR
13572 vmov.f16 r0, s1: NS_RH
13573 vcvt.f16 r0, s1: NS_RH
13574 vcvt.f16.s32 s2, s2, #29: NS_HFI
13575 vcvt.f16.s32 s2, s2: NS_HF
13576 */
13577 case SE_H:
13578 if (!(inst.operands[j].isreg
13579 && inst.operands[j].isvec
13580 && inst.operands[j].issingle
13581 && !inst.operands[j].isquad
13582 && ((inst.vectype.elems == 1
13583 && inst.vectype.el[0].size == 16)
13584 || (inst.vectype.elems > 1
13585 && inst.vectype.el[j].size == 16)
13586 || (inst.vectype.elems == 0
13587 && inst.operands[j].vectype.type != NT_invtype
13588 && inst.operands[j].vectype.size == 16))))
13589 matches = 0;
13590 break;
13591
13592 case SE_F:
13593 if (!(inst.operands[j].isreg
13594 && inst.operands[j].isvec
13595 && inst.operands[j].issingle
13596 && !inst.operands[j].isquad
13597 && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
13598 || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
13599 || (inst.vectype.elems == 0
13600 && (inst.operands[j].vectype.size == 32
13601 || inst.operands[j].vectype.type == NT_invtype)))))
13602 matches = 0;
13603 break;
13604
13605 case SE_D:
13606 if (!(inst.operands[j].isreg
13607 && inst.operands[j].isvec
13608 && !inst.operands[j].isquad
13609 && !inst.operands[j].issingle))
13610 matches = 0;
13611 break;
13612
13613 case SE_R:
13614 if (!(inst.operands[j].isreg
13615 && !inst.operands[j].isvec))
13616 matches = 0;
13617 break;
13618
13619 case SE_Q:
13620 if (!(inst.operands[j].isreg
13621 && inst.operands[j].isvec
13622 && inst.operands[j].isquad
13623 && !inst.operands[j].issingle))
13624 matches = 0;
13625 break;
13626
13627 case SE_I:
13628 if (!(!inst.operands[j].isreg
13629 && !inst.operands[j].isscalar))
13630 matches = 0;
13631 break;
13632
13633 case SE_S:
13634 if (!(!inst.operands[j].isreg
13635 && inst.operands[j].isscalar))
13636 matches = 0;
13637 break;
13638
13639 case SE_L:
13640 break;
13641 }
13642 if (!matches)
13643 break;
13644 }
13645 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
13646 /* We've matched all the entries in the shape table, and we don't
13647 have any left over operands which have not been matched. */
13648 break;
13649 }
13650
13651 va_end (ap);
13652
13653 if (shape == NS_NULL && first_shape != NS_NULL)
13654 first_error (_("invalid instruction shape"));
13655
13656 return shape;
13657 }
13658
13659 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13660 means the Q bit should be set). */
13661
13662 static int
13663 neon_quad (enum neon_shape shape)
13664 {
13665 return neon_shape_class[shape] == SC_QUAD;
13666 }
13667
13668 static void
13669 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
13670 unsigned *g_size)
13671 {
13672 /* Allow modification to be made to types which are constrained to be
13673 based on the key element, based on bits set alongside N_EQK. */
13674 if ((typebits & N_EQK) != 0)
13675 {
13676 if ((typebits & N_HLF) != 0)
13677 *g_size /= 2;
13678 else if ((typebits & N_DBL) != 0)
13679 *g_size *= 2;
13680 if ((typebits & N_SGN) != 0)
13681 *g_type = NT_signed;
13682 else if ((typebits & N_UNS) != 0)
13683 *g_type = NT_unsigned;
13684 else if ((typebits & N_INT) != 0)
13685 *g_type = NT_integer;
13686 else if ((typebits & N_FLT) != 0)
13687 *g_type = NT_float;
13688 else if ((typebits & N_SIZ) != 0)
13689 *g_type = NT_untyped;
13690 }
13691 }
13692
13693 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13694 operand type, i.e. the single type specified in a Neon instruction when it
13695 is the only one given. */
13696
13697 static struct neon_type_el
13698 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
13699 {
13700 struct neon_type_el dest = *key;
13701
13702 gas_assert ((thisarg & N_EQK) != 0);
13703
13704 neon_modify_type_size (thisarg, &dest.type, &dest.size);
13705
13706 return dest;
13707 }
13708
13709 /* Convert Neon type and size into compact bitmask representation. */
13710
13711 static enum neon_type_mask
13712 type_chk_of_el_type (enum neon_el_type type, unsigned size)
13713 {
13714 switch (type)
13715 {
13716 case NT_untyped:
13717 switch (size)
13718 {
13719 case 8: return N_8;
13720 case 16: return N_16;
13721 case 32: return N_32;
13722 case 64: return N_64;
13723 default: ;
13724 }
13725 break;
13726
13727 case NT_integer:
13728 switch (size)
13729 {
13730 case 8: return N_I8;
13731 case 16: return N_I16;
13732 case 32: return N_I32;
13733 case 64: return N_I64;
13734 default: ;
13735 }
13736 break;
13737
13738 case NT_float:
13739 switch (size)
13740 {
13741 case 16: return N_F16;
13742 case 32: return N_F32;
13743 case 64: return N_F64;
13744 default: ;
13745 }
13746 break;
13747
13748 case NT_poly:
13749 switch (size)
13750 {
13751 case 8: return N_P8;
13752 case 16: return N_P16;
13753 case 64: return N_P64;
13754 default: ;
13755 }
13756 break;
13757
13758 case NT_signed:
13759 switch (size)
13760 {
13761 case 8: return N_S8;
13762 case 16: return N_S16;
13763 case 32: return N_S32;
13764 case 64: return N_S64;
13765 default: ;
13766 }
13767 break;
13768
13769 case NT_unsigned:
13770 switch (size)
13771 {
13772 case 8: return N_U8;
13773 case 16: return N_U16;
13774 case 32: return N_U32;
13775 case 64: return N_U64;
13776 default: ;
13777 }
13778 break;
13779
13780 default: ;
13781 }
13782
13783 return N_UTYP;
13784 }
13785
13786 /* Convert compact Neon bitmask type representation to a type and size. Only
13787 handles the case where a single bit is set in the mask. */
13788
13789 static int
13790 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
13791 enum neon_type_mask mask)
13792 {
13793 if ((mask & N_EQK) != 0)
13794 return FAIL;
13795
13796 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
13797 *size = 8;
13798 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
13799 *size = 16;
13800 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
13801 *size = 32;
13802 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
13803 *size = 64;
13804 else
13805 return FAIL;
13806
13807 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
13808 *type = NT_signed;
13809 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
13810 *type = NT_unsigned;
13811 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
13812 *type = NT_integer;
13813 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
13814 *type = NT_untyped;
13815 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
13816 *type = NT_poly;
13817 else if ((mask & (N_F_ALL)) != 0)
13818 *type = NT_float;
13819 else
13820 return FAIL;
13821
13822 return SUCCESS;
13823 }
13824
13825 /* Modify a bitmask of allowed types. This is only needed for type
13826 relaxation. */
13827
13828 static unsigned
13829 modify_types_allowed (unsigned allowed, unsigned mods)
13830 {
13831 unsigned size;
13832 enum neon_el_type type;
13833 unsigned destmask;
13834 int i;
13835
13836 destmask = 0;
13837
13838 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
13839 {
13840 if (el_type_of_type_chk (&type, &size,
13841 (enum neon_type_mask) (allowed & i)) == SUCCESS)
13842 {
13843 neon_modify_type_size (mods, &type, &size);
13844 destmask |= type_chk_of_el_type (type, size);
13845 }
13846 }
13847
13848 return destmask;
13849 }
13850
13851 /* Check type and return type classification.
13852 The manual states (paraphrase): If one datatype is given, it indicates the
13853 type given in:
13854 - the second operand, if there is one
13855 - the operand, if there is no second operand
13856 - the result, if there are no operands.
13857 This isn't quite good enough though, so we use a concept of a "key" datatype
13858 which is set on a per-instruction basis, which is the one which matters when
13859 only one data type is written.
13860 Note: this function has side-effects (e.g. filling in missing operands). All
13861 Neon instructions should call it before performing bit encoding. */
13862
13863 static struct neon_type_el
13864 neon_check_type (unsigned els, enum neon_shape ns, ...)
13865 {
13866 va_list ap;
13867 unsigned i, pass, key_el = 0;
13868 unsigned types[NEON_MAX_TYPE_ELS];
13869 enum neon_el_type k_type = NT_invtype;
13870 unsigned k_size = -1u;
13871 struct neon_type_el badtype = {NT_invtype, -1};
13872 unsigned key_allowed = 0;
13873
13874 /* Optional registers in Neon instructions are always (not) in operand 1.
13875 Fill in the missing operand here, if it was omitted. */
13876 if (els > 1 && !inst.operands[1].present)
13877 inst.operands[1] = inst.operands[0];
13878
13879 /* Suck up all the varargs. */
13880 va_start (ap, ns);
13881 for (i = 0; i < els; i++)
13882 {
13883 unsigned thisarg = va_arg (ap, unsigned);
13884 if (thisarg == N_IGNORE_TYPE)
13885 {
13886 va_end (ap);
13887 return badtype;
13888 }
13889 types[i] = thisarg;
13890 if ((thisarg & N_KEY) != 0)
13891 key_el = i;
13892 }
13893 va_end (ap);
13894
13895 if (inst.vectype.elems > 0)
13896 for (i = 0; i < els; i++)
13897 if (inst.operands[i].vectype.type != NT_invtype)
13898 {
13899 first_error (_("types specified in both the mnemonic and operands"));
13900 return badtype;
13901 }
13902
13903 /* Duplicate inst.vectype elements here as necessary.
13904 FIXME: No idea if this is exactly the same as the ARM assembler,
13905 particularly when an insn takes one register and one non-register
13906 operand. */
13907 if (inst.vectype.elems == 1 && els > 1)
13908 {
13909 unsigned j;
13910 inst.vectype.elems = els;
13911 inst.vectype.el[key_el] = inst.vectype.el[0];
13912 for (j = 0; j < els; j++)
13913 if (j != key_el)
13914 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13915 types[j]);
13916 }
13917 else if (inst.vectype.elems == 0 && els > 0)
13918 {
13919 unsigned j;
13920 /* No types were given after the mnemonic, so look for types specified
13921 after each operand. We allow some flexibility here; as long as the
13922 "key" operand has a type, we can infer the others. */
13923 for (j = 0; j < els; j++)
13924 if (inst.operands[j].vectype.type != NT_invtype)
13925 inst.vectype.el[j] = inst.operands[j].vectype;
13926
13927 if (inst.operands[key_el].vectype.type != NT_invtype)
13928 {
13929 for (j = 0; j < els; j++)
13930 if (inst.operands[j].vectype.type == NT_invtype)
13931 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13932 types[j]);
13933 }
13934 else
13935 {
13936 first_error (_("operand types can't be inferred"));
13937 return badtype;
13938 }
13939 }
13940 else if (inst.vectype.elems != els)
13941 {
13942 first_error (_("type specifier has the wrong number of parts"));
13943 return badtype;
13944 }
13945
13946 for (pass = 0; pass < 2; pass++)
13947 {
13948 for (i = 0; i < els; i++)
13949 {
13950 unsigned thisarg = types[i];
13951 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
13952 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
13953 enum neon_el_type g_type = inst.vectype.el[i].type;
13954 unsigned g_size = inst.vectype.el[i].size;
13955
13956 /* Decay more-specific signed & unsigned types to sign-insensitive
13957 integer types if sign-specific variants are unavailable. */
13958 if ((g_type == NT_signed || g_type == NT_unsigned)
13959 && (types_allowed & N_SU_ALL) == 0)
13960 g_type = NT_integer;
13961
13962 /* If only untyped args are allowed, decay any more specific types to
13963 them. Some instructions only care about signs for some element
13964 sizes, so handle that properly. */
13965 if (((types_allowed & N_UNT) == 0)
13966 && ((g_size == 8 && (types_allowed & N_8) != 0)
13967 || (g_size == 16 && (types_allowed & N_16) != 0)
13968 || (g_size == 32 && (types_allowed & N_32) != 0)
13969 || (g_size == 64 && (types_allowed & N_64) != 0)))
13970 g_type = NT_untyped;
13971
13972 if (pass == 0)
13973 {
13974 if ((thisarg & N_KEY) != 0)
13975 {
13976 k_type = g_type;
13977 k_size = g_size;
13978 key_allowed = thisarg & ~N_KEY;
13979
13980 /* Check architecture constraint on FP16 extension. */
13981 if (k_size == 16
13982 && k_type == NT_float
13983 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
13984 {
13985 inst.error = _(BAD_FP16);
13986 return badtype;
13987 }
13988 }
13989 }
13990 else
13991 {
13992 if ((thisarg & N_VFP) != 0)
13993 {
13994 enum neon_shape_el regshape;
13995 unsigned regwidth, match;
13996
13997 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
13998 if (ns == NS_NULL)
13999 {
14000 first_error (_("invalid instruction shape"));
14001 return badtype;
14002 }
14003 regshape = neon_shape_tab[ns].el[i];
14004 regwidth = neon_shape_el_size[regshape];
14005
14006 /* In VFP mode, operands must match register widths. If we
14007 have a key operand, use its width, else use the width of
14008 the current operand. */
14009 if (k_size != -1u)
14010 match = k_size;
14011 else
14012 match = g_size;
14013
14014 /* FP16 will use a single precision register. */
14015 if (regwidth == 32 && match == 16)
14016 {
14017 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14018 match = regwidth;
14019 else
14020 {
14021 inst.error = _(BAD_FP16);
14022 return badtype;
14023 }
14024 }
14025
14026 if (regwidth != match)
14027 {
14028 first_error (_("operand size must match register width"));
14029 return badtype;
14030 }
14031 }
14032
14033 if ((thisarg & N_EQK) == 0)
14034 {
14035 unsigned given_type = type_chk_of_el_type (g_type, g_size);
14036
14037 if ((given_type & types_allowed) == 0)
14038 {
14039 first_error (_("bad type in Neon instruction"));
14040 return badtype;
14041 }
14042 }
14043 else
14044 {
14045 enum neon_el_type mod_k_type = k_type;
14046 unsigned mod_k_size = k_size;
14047 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
14048 if (g_type != mod_k_type || g_size != mod_k_size)
14049 {
14050 first_error (_("inconsistent types in Neon instruction"));
14051 return badtype;
14052 }
14053 }
14054 }
14055 }
14056 }
14057
14058 return inst.vectype.el[key_el];
14059 }
14060
14061 /* Neon-style VFP instruction forwarding. */
14062
14063 /* Thumb VFP instructions have 0xE in the condition field. */
14064
14065 static void
14066 do_vfp_cond_or_thumb (void)
14067 {
14068 inst.is_neon = 1;
14069
14070 if (thumb_mode)
14071 inst.instruction |= 0xe0000000;
14072 else
14073 inst.instruction |= inst.cond << 28;
14074 }
14075
14076 /* Look up and encode a simple mnemonic, for use as a helper function for the
14077 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14078 etc. It is assumed that operand parsing has already been done, and that the
14079 operands are in the form expected by the given opcode (this isn't necessarily
14080 the same as the form in which they were parsed, hence some massaging must
14081 take place before this function is called).
14082 Checks current arch version against that in the looked-up opcode. */
14083
14084 static void
14085 do_vfp_nsyn_opcode (const char *opname)
14086 {
14087 const struct asm_opcode *opcode;
14088
14089 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
14090
14091 if (!opcode)
14092 abort ();
14093
14094 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
14095 thumb_mode ? *opcode->tvariant : *opcode->avariant),
14096 _(BAD_FPU));
14097
14098 inst.is_neon = 1;
14099
14100 if (thumb_mode)
14101 {
14102 inst.instruction = opcode->tvalue;
14103 opcode->tencode ();
14104 }
14105 else
14106 {
14107 inst.instruction = (inst.cond << 28) | opcode->avalue;
14108 opcode->aencode ();
14109 }
14110 }
14111
14112 static void
14113 do_vfp_nsyn_add_sub (enum neon_shape rs)
14114 {
14115 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
14116
14117 if (rs == NS_FFF || rs == NS_HHH)
14118 {
14119 if (is_add)
14120 do_vfp_nsyn_opcode ("fadds");
14121 else
14122 do_vfp_nsyn_opcode ("fsubs");
14123
14124 /* ARMv8.2 fp16 instruction. */
14125 if (rs == NS_HHH)
14126 do_scalar_fp16_v82_encode ();
14127 }
14128 else
14129 {
14130 if (is_add)
14131 do_vfp_nsyn_opcode ("faddd");
14132 else
14133 do_vfp_nsyn_opcode ("fsubd");
14134 }
14135 }
14136
14137 /* Check operand types to see if this is a VFP instruction, and if so call
14138 PFN (). */
14139
14140 static int
14141 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
14142 {
14143 enum neon_shape rs;
14144 struct neon_type_el et;
14145
14146 switch (args)
14147 {
14148 case 2:
14149 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14150 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14151 break;
14152
14153 case 3:
14154 rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14155 et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14156 N_F_ALL | N_KEY | N_VFP);
14157 break;
14158
14159 default:
14160 abort ();
14161 }
14162
14163 if (et.type != NT_invtype)
14164 {
14165 pfn (rs);
14166 return SUCCESS;
14167 }
14168
14169 inst.error = NULL;
14170 return FAIL;
14171 }
14172
14173 static void
14174 do_vfp_nsyn_mla_mls (enum neon_shape rs)
14175 {
14176 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
14177
14178 if (rs == NS_FFF || rs == NS_HHH)
14179 {
14180 if (is_mla)
14181 do_vfp_nsyn_opcode ("fmacs");
14182 else
14183 do_vfp_nsyn_opcode ("fnmacs");
14184
14185 /* ARMv8.2 fp16 instruction. */
14186 if (rs == NS_HHH)
14187 do_scalar_fp16_v82_encode ();
14188 }
14189 else
14190 {
14191 if (is_mla)
14192 do_vfp_nsyn_opcode ("fmacd");
14193 else
14194 do_vfp_nsyn_opcode ("fnmacd");
14195 }
14196 }
14197
14198 static void
14199 do_vfp_nsyn_fma_fms (enum neon_shape rs)
14200 {
14201 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
14202
14203 if (rs == NS_FFF || rs == NS_HHH)
14204 {
14205 if (is_fma)
14206 do_vfp_nsyn_opcode ("ffmas");
14207 else
14208 do_vfp_nsyn_opcode ("ffnmas");
14209
14210 /* ARMv8.2 fp16 instruction. */
14211 if (rs == NS_HHH)
14212 do_scalar_fp16_v82_encode ();
14213 }
14214 else
14215 {
14216 if (is_fma)
14217 do_vfp_nsyn_opcode ("ffmad");
14218 else
14219 do_vfp_nsyn_opcode ("ffnmad");
14220 }
14221 }
14222
14223 static void
14224 do_vfp_nsyn_mul (enum neon_shape rs)
14225 {
14226 if (rs == NS_FFF || rs == NS_HHH)
14227 {
14228 do_vfp_nsyn_opcode ("fmuls");
14229
14230 /* ARMv8.2 fp16 instruction. */
14231 if (rs == NS_HHH)
14232 do_scalar_fp16_v82_encode ();
14233 }
14234 else
14235 do_vfp_nsyn_opcode ("fmuld");
14236 }
14237
14238 static void
14239 do_vfp_nsyn_abs_neg (enum neon_shape rs)
14240 {
14241 int is_neg = (inst.instruction & 0x80) != 0;
14242 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
14243
14244 if (rs == NS_FF || rs == NS_HH)
14245 {
14246 if (is_neg)
14247 do_vfp_nsyn_opcode ("fnegs");
14248 else
14249 do_vfp_nsyn_opcode ("fabss");
14250
14251 /* ARMv8.2 fp16 instruction. */
14252 if (rs == NS_HH)
14253 do_scalar_fp16_v82_encode ();
14254 }
14255 else
14256 {
14257 if (is_neg)
14258 do_vfp_nsyn_opcode ("fnegd");
14259 else
14260 do_vfp_nsyn_opcode ("fabsd");
14261 }
14262 }
14263
14264 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14265 insns belong to Neon, and are handled elsewhere. */
14266
14267 static void
14268 do_vfp_nsyn_ldm_stm (int is_dbmode)
14269 {
14270 int is_ldm = (inst.instruction & (1 << 20)) != 0;
14271 if (is_ldm)
14272 {
14273 if (is_dbmode)
14274 do_vfp_nsyn_opcode ("fldmdbs");
14275 else
14276 do_vfp_nsyn_opcode ("fldmias");
14277 }
14278 else
14279 {
14280 if (is_dbmode)
14281 do_vfp_nsyn_opcode ("fstmdbs");
14282 else
14283 do_vfp_nsyn_opcode ("fstmias");
14284 }
14285 }
14286
14287 static void
14288 do_vfp_nsyn_sqrt (void)
14289 {
14290 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14291 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14292
14293 if (rs == NS_FF || rs == NS_HH)
14294 {
14295 do_vfp_nsyn_opcode ("fsqrts");
14296
14297 /* ARMv8.2 fp16 instruction. */
14298 if (rs == NS_HH)
14299 do_scalar_fp16_v82_encode ();
14300 }
14301 else
14302 do_vfp_nsyn_opcode ("fsqrtd");
14303 }
14304
14305 static void
14306 do_vfp_nsyn_div (void)
14307 {
14308 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14309 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14310 N_F_ALL | N_KEY | N_VFP);
14311
14312 if (rs == NS_FFF || rs == NS_HHH)
14313 {
14314 do_vfp_nsyn_opcode ("fdivs");
14315
14316 /* ARMv8.2 fp16 instruction. */
14317 if (rs == NS_HHH)
14318 do_scalar_fp16_v82_encode ();
14319 }
14320 else
14321 do_vfp_nsyn_opcode ("fdivd");
14322 }
14323
14324 static void
14325 do_vfp_nsyn_nmul (void)
14326 {
14327 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14328 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14329 N_F_ALL | N_KEY | N_VFP);
14330
14331 if (rs == NS_FFF || rs == NS_HHH)
14332 {
14333 NEON_ENCODE (SINGLE, inst);
14334 do_vfp_sp_dyadic ();
14335
14336 /* ARMv8.2 fp16 instruction. */
14337 if (rs == NS_HHH)
14338 do_scalar_fp16_v82_encode ();
14339 }
14340 else
14341 {
14342 NEON_ENCODE (DOUBLE, inst);
14343 do_vfp_dp_rd_rn_rm ();
14344 }
14345 do_vfp_cond_or_thumb ();
14346
14347 }
14348
14349 static void
14350 do_vfp_nsyn_cmp (void)
14351 {
14352 enum neon_shape rs;
14353 if (inst.operands[1].isreg)
14354 {
14355 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14356 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14357
14358 if (rs == NS_FF || rs == NS_HH)
14359 {
14360 NEON_ENCODE (SINGLE, inst);
14361 do_vfp_sp_monadic ();
14362 }
14363 else
14364 {
14365 NEON_ENCODE (DOUBLE, inst);
14366 do_vfp_dp_rd_rm ();
14367 }
14368 }
14369 else
14370 {
14371 rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
14372 neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
14373
14374 switch (inst.instruction & 0x0fffffff)
14375 {
14376 case N_MNEM_vcmp:
14377 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
14378 break;
14379 case N_MNEM_vcmpe:
14380 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
14381 break;
14382 default:
14383 abort ();
14384 }
14385
14386 if (rs == NS_FI || rs == NS_HI)
14387 {
14388 NEON_ENCODE (SINGLE, inst);
14389 do_vfp_sp_compare_z ();
14390 }
14391 else
14392 {
14393 NEON_ENCODE (DOUBLE, inst);
14394 do_vfp_dp_rd ();
14395 }
14396 }
14397 do_vfp_cond_or_thumb ();
14398
14399 /* ARMv8.2 fp16 instruction. */
14400 if (rs == NS_HI || rs == NS_HH)
14401 do_scalar_fp16_v82_encode ();
14402 }
14403
14404 static void
14405 nsyn_insert_sp (void)
14406 {
14407 inst.operands[1] = inst.operands[0];
14408 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
14409 inst.operands[0].reg = REG_SP;
14410 inst.operands[0].isreg = 1;
14411 inst.operands[0].writeback = 1;
14412 inst.operands[0].present = 1;
14413 }
14414
14415 static void
14416 do_vfp_nsyn_push (void)
14417 {
14418 nsyn_insert_sp ();
14419
14420 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14421 _("register list must contain at least 1 and at most 16 "
14422 "registers"));
14423
14424 if (inst.operands[1].issingle)
14425 do_vfp_nsyn_opcode ("fstmdbs");
14426 else
14427 do_vfp_nsyn_opcode ("fstmdbd");
14428 }
14429
14430 static void
14431 do_vfp_nsyn_pop (void)
14432 {
14433 nsyn_insert_sp ();
14434
14435 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14436 _("register list must contain at least 1 and at most 16 "
14437 "registers"));
14438
14439 if (inst.operands[1].issingle)
14440 do_vfp_nsyn_opcode ("fldmias");
14441 else
14442 do_vfp_nsyn_opcode ("fldmiad");
14443 }
14444
14445 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14446 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14447
14448 static void
14449 neon_dp_fixup (struct arm_it* insn)
14450 {
14451 unsigned int i = insn->instruction;
14452 insn->is_neon = 1;
14453
14454 if (thumb_mode)
14455 {
14456 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14457 if (i & (1 << 24))
14458 i |= 1 << 28;
14459
14460 i &= ~(1 << 24);
14461
14462 i |= 0xef000000;
14463 }
14464 else
14465 i |= 0xf2000000;
14466
14467 insn->instruction = i;
14468 }
14469
14470 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14471 (0, 1, 2, 3). */
14472
14473 static unsigned
14474 neon_logbits (unsigned x)
14475 {
14476 return ffs (x) - 4;
14477 }
14478
14479 #define LOW4(R) ((R) & 0xf)
14480 #define HI1(R) (((R) >> 4) & 1)
14481
14482 /* Encode insns with bit pattern:
14483
14484 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14485 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14486
14487 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14488 different meaning for some instruction. */
14489
14490 static void
14491 neon_three_same (int isquad, int ubit, int size)
14492 {
14493 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14494 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14495 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14496 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14497 inst.instruction |= LOW4 (inst.operands[2].reg);
14498 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14499 inst.instruction |= (isquad != 0) << 6;
14500 inst.instruction |= (ubit != 0) << 24;
14501 if (size != -1)
14502 inst.instruction |= neon_logbits (size) << 20;
14503
14504 neon_dp_fixup (&inst);
14505 }
14506
14507 /* Encode instructions of the form:
14508
14509 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14510 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14511
14512 Don't write size if SIZE == -1. */
14513
14514 static void
14515 neon_two_same (int qbit, int ubit, int size)
14516 {
14517 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14518 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14519 inst.instruction |= LOW4 (inst.operands[1].reg);
14520 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14521 inst.instruction |= (qbit != 0) << 6;
14522 inst.instruction |= (ubit != 0) << 24;
14523
14524 if (size != -1)
14525 inst.instruction |= neon_logbits (size) << 18;
14526
14527 neon_dp_fixup (&inst);
14528 }
14529
14530 /* Neon instruction encoders, in approximate order of appearance. */
14531
14532 static void
14533 do_neon_dyadic_i_su (void)
14534 {
14535 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14536 struct neon_type_el et = neon_check_type (3, rs,
14537 N_EQK, N_EQK, N_SU_32 | N_KEY);
14538 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14539 }
14540
14541 static void
14542 do_neon_dyadic_i64_su (void)
14543 {
14544 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14545 struct neon_type_el et = neon_check_type (3, rs,
14546 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14547 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14548 }
14549
14550 static void
14551 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14552 unsigned immbits)
14553 {
14554 unsigned size = et.size >> 3;
14555 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14556 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14557 inst.instruction |= LOW4 (inst.operands[1].reg);
14558 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14559 inst.instruction |= (isquad != 0) << 6;
14560 inst.instruction |= immbits << 16;
14561 inst.instruction |= (size >> 3) << 7;
14562 inst.instruction |= (size & 0x7) << 19;
14563 if (write_ubit)
14564 inst.instruction |= (uval != 0) << 24;
14565
14566 neon_dp_fixup (&inst);
14567 }
14568
14569 static void
14570 do_neon_shl_imm (void)
14571 {
14572 if (!inst.operands[2].isreg)
14573 {
14574 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14575 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14576 int imm = inst.operands[2].imm;
14577
14578 constraint (imm < 0 || (unsigned)imm >= et.size,
14579 _("immediate out of range for shift"));
14580 NEON_ENCODE (IMMED, inst);
14581 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14582 }
14583 else
14584 {
14585 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14586 struct neon_type_el et = neon_check_type (3, rs,
14587 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14588 unsigned int tmp;
14589
14590 /* VSHL/VQSHL 3-register variants have syntax such as:
14591 vshl.xx Dd, Dm, Dn
14592 whereas other 3-register operations encoded by neon_three_same have
14593 syntax like:
14594 vadd.xx Dd, Dn, Dm
14595 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14596 here. */
14597 tmp = inst.operands[2].reg;
14598 inst.operands[2].reg = inst.operands[1].reg;
14599 inst.operands[1].reg = tmp;
14600 NEON_ENCODE (INTEGER, inst);
14601 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14602 }
14603 }
14604
14605 static void
14606 do_neon_qshl_imm (void)
14607 {
14608 if (!inst.operands[2].isreg)
14609 {
14610 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14611 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14612 int imm = inst.operands[2].imm;
14613
14614 constraint (imm < 0 || (unsigned)imm >= et.size,
14615 _("immediate out of range for shift"));
14616 NEON_ENCODE (IMMED, inst);
14617 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
14618 }
14619 else
14620 {
14621 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14622 struct neon_type_el et = neon_check_type (3, rs,
14623 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14624 unsigned int tmp;
14625
14626 /* See note in do_neon_shl_imm. */
14627 tmp = inst.operands[2].reg;
14628 inst.operands[2].reg = inst.operands[1].reg;
14629 inst.operands[1].reg = tmp;
14630 NEON_ENCODE (INTEGER, inst);
14631 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14632 }
14633 }
14634
14635 static void
14636 do_neon_rshl (void)
14637 {
14638 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14639 struct neon_type_el et = neon_check_type (3, rs,
14640 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14641 unsigned int tmp;
14642
14643 tmp = inst.operands[2].reg;
14644 inst.operands[2].reg = inst.operands[1].reg;
14645 inst.operands[1].reg = tmp;
14646 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14647 }
14648
14649 static int
14650 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
14651 {
14652 /* Handle .I8 pseudo-instructions. */
14653 if (size == 8)
14654 {
14655 /* Unfortunately, this will make everything apart from zero out-of-range.
14656 FIXME is this the intended semantics? There doesn't seem much point in
14657 accepting .I8 if so. */
14658 immediate |= immediate << 8;
14659 size = 16;
14660 }
14661
14662 if (size >= 32)
14663 {
14664 if (immediate == (immediate & 0x000000ff))
14665 {
14666 *immbits = immediate;
14667 return 0x1;
14668 }
14669 else if (immediate == (immediate & 0x0000ff00))
14670 {
14671 *immbits = immediate >> 8;
14672 return 0x3;
14673 }
14674 else if (immediate == (immediate & 0x00ff0000))
14675 {
14676 *immbits = immediate >> 16;
14677 return 0x5;
14678 }
14679 else if (immediate == (immediate & 0xff000000))
14680 {
14681 *immbits = immediate >> 24;
14682 return 0x7;
14683 }
14684 if ((immediate & 0xffff) != (immediate >> 16))
14685 goto bad_immediate;
14686 immediate &= 0xffff;
14687 }
14688
14689 if (immediate == (immediate & 0x000000ff))
14690 {
14691 *immbits = immediate;
14692 return 0x9;
14693 }
14694 else if (immediate == (immediate & 0x0000ff00))
14695 {
14696 *immbits = immediate >> 8;
14697 return 0xb;
14698 }
14699
14700 bad_immediate:
14701 first_error (_("immediate value out of range"));
14702 return FAIL;
14703 }
14704
14705 static void
14706 do_neon_logic (void)
14707 {
14708 if (inst.operands[2].present && inst.operands[2].isreg)
14709 {
14710 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14711 neon_check_type (3, rs, N_IGNORE_TYPE);
14712 /* U bit and size field were set as part of the bitmask. */
14713 NEON_ENCODE (INTEGER, inst);
14714 neon_three_same (neon_quad (rs), 0, -1);
14715 }
14716 else
14717 {
14718 const int three_ops_form = (inst.operands[2].present
14719 && !inst.operands[2].isreg);
14720 const int immoperand = (three_ops_form ? 2 : 1);
14721 enum neon_shape rs = (three_ops_form
14722 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
14723 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
14724 struct neon_type_el et = neon_check_type (2, rs,
14725 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14726 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
14727 unsigned immbits;
14728 int cmode;
14729
14730 if (et.type == NT_invtype)
14731 return;
14732
14733 if (three_ops_form)
14734 constraint (inst.operands[0].reg != inst.operands[1].reg,
14735 _("first and second operands shall be the same register"));
14736
14737 NEON_ENCODE (IMMED, inst);
14738
14739 immbits = inst.operands[immoperand].imm;
14740 if (et.size == 64)
14741 {
14742 /* .i64 is a pseudo-op, so the immediate must be a repeating
14743 pattern. */
14744 if (immbits != (inst.operands[immoperand].regisimm ?
14745 inst.operands[immoperand].reg : 0))
14746 {
14747 /* Set immbits to an invalid constant. */
14748 immbits = 0xdeadbeef;
14749 }
14750 }
14751
14752 switch (opcode)
14753 {
14754 case N_MNEM_vbic:
14755 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14756 break;
14757
14758 case N_MNEM_vorr:
14759 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14760 break;
14761
14762 case N_MNEM_vand:
14763 /* Pseudo-instruction for VBIC. */
14764 neon_invert_size (&immbits, 0, et.size);
14765 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14766 break;
14767
14768 case N_MNEM_vorn:
14769 /* Pseudo-instruction for VORR. */
14770 neon_invert_size (&immbits, 0, et.size);
14771 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14772 break;
14773
14774 default:
14775 abort ();
14776 }
14777
14778 if (cmode == FAIL)
14779 return;
14780
14781 inst.instruction |= neon_quad (rs) << 6;
14782 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14783 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14784 inst.instruction |= cmode << 8;
14785 neon_write_immbits (immbits);
14786
14787 neon_dp_fixup (&inst);
14788 }
14789 }
14790
14791 static void
14792 do_neon_bitfield (void)
14793 {
14794 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14795 neon_check_type (3, rs, N_IGNORE_TYPE);
14796 neon_three_same (neon_quad (rs), 0, -1);
14797 }
14798
14799 static void
14800 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
14801 unsigned destbits)
14802 {
14803 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14804 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
14805 types | N_KEY);
14806 if (et.type == NT_float)
14807 {
14808 NEON_ENCODE (FLOAT, inst);
14809 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
14810 }
14811 else
14812 {
14813 NEON_ENCODE (INTEGER, inst);
14814 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
14815 }
14816 }
14817
14818 static void
14819 do_neon_dyadic_if_su (void)
14820 {
14821 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14822 }
14823
14824 static void
14825 do_neon_dyadic_if_su_d (void)
14826 {
14827 /* This version only allow D registers, but that constraint is enforced during
14828 operand parsing so we don't need to do anything extra here. */
14829 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14830 }
14831
14832 static void
14833 do_neon_dyadic_if_i_d (void)
14834 {
14835 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14836 affected if we specify unsigned args. */
14837 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14838 }
14839
14840 enum vfp_or_neon_is_neon_bits
14841 {
14842 NEON_CHECK_CC = 1,
14843 NEON_CHECK_ARCH = 2,
14844 NEON_CHECK_ARCH8 = 4
14845 };
14846
14847 /* Call this function if an instruction which may have belonged to the VFP or
14848 Neon instruction sets, but turned out to be a Neon instruction (due to the
14849 operand types involved, etc.). We have to check and/or fix-up a couple of
14850 things:
14851
14852 - Make sure the user hasn't attempted to make a Neon instruction
14853 conditional.
14854 - Alter the value in the condition code field if necessary.
14855 - Make sure that the arch supports Neon instructions.
14856
14857 Which of these operations take place depends on bits from enum
14858 vfp_or_neon_is_neon_bits.
14859
14860 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14861 current instruction's condition is COND_ALWAYS, the condition field is
14862 changed to inst.uncond_value. This is necessary because instructions shared
14863 between VFP and Neon may be conditional for the VFP variants only, and the
14864 unconditional Neon version must have, e.g., 0xF in the condition field. */
14865
14866 static int
14867 vfp_or_neon_is_neon (unsigned check)
14868 {
14869 /* Conditions are always legal in Thumb mode (IT blocks). */
14870 if (!thumb_mode && (check & NEON_CHECK_CC))
14871 {
14872 if (inst.cond != COND_ALWAYS)
14873 {
14874 first_error (_(BAD_COND));
14875 return FAIL;
14876 }
14877 if (inst.uncond_value != -1)
14878 inst.instruction |= inst.uncond_value << 28;
14879 }
14880
14881 if ((check & NEON_CHECK_ARCH)
14882 && !mark_feature_used (&fpu_neon_ext_v1))
14883 {
14884 first_error (_(BAD_FPU));
14885 return FAIL;
14886 }
14887
14888 if ((check & NEON_CHECK_ARCH8)
14889 && !mark_feature_used (&fpu_neon_ext_armv8))
14890 {
14891 first_error (_(BAD_FPU));
14892 return FAIL;
14893 }
14894
14895 return SUCCESS;
14896 }
14897
14898 static void
14899 do_neon_addsub_if_i (void)
14900 {
14901 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
14902 return;
14903
14904 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14905 return;
14906
14907 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14908 affected if we specify unsigned args. */
14909 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
14910 }
14911
14912 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14913 result to be:
14914 V<op> A,B (A is operand 0, B is operand 2)
14915 to mean:
14916 V<op> A,B,A
14917 not:
14918 V<op> A,B,B
14919 so handle that case specially. */
14920
14921 static void
14922 neon_exchange_operands (void)
14923 {
14924 if (inst.operands[1].present)
14925 {
14926 void *scratch = xmalloc (sizeof (inst.operands[0]));
14927
14928 /* Swap operands[1] and operands[2]. */
14929 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
14930 inst.operands[1] = inst.operands[2];
14931 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
14932 free (scratch);
14933 }
14934 else
14935 {
14936 inst.operands[1] = inst.operands[2];
14937 inst.operands[2] = inst.operands[0];
14938 }
14939 }
14940
14941 static void
14942 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
14943 {
14944 if (inst.operands[2].isreg)
14945 {
14946 if (invert)
14947 neon_exchange_operands ();
14948 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
14949 }
14950 else
14951 {
14952 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14953 struct neon_type_el et = neon_check_type (2, rs,
14954 N_EQK | N_SIZ, immtypes | N_KEY);
14955
14956 NEON_ENCODE (IMMED, inst);
14957 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14958 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14959 inst.instruction |= LOW4 (inst.operands[1].reg);
14960 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14961 inst.instruction |= neon_quad (rs) << 6;
14962 inst.instruction |= (et.type == NT_float) << 10;
14963 inst.instruction |= neon_logbits (et.size) << 18;
14964
14965 neon_dp_fixup (&inst);
14966 }
14967 }
14968
14969 static void
14970 do_neon_cmp (void)
14971 {
14972 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
14973 }
14974
14975 static void
14976 do_neon_cmp_inv (void)
14977 {
14978 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
14979 }
14980
14981 static void
14982 do_neon_ceq (void)
14983 {
14984 neon_compare (N_IF_32, N_IF_32, FALSE);
14985 }
14986
14987 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14988 scalars, which are encoded in 5 bits, M : Rm.
14989 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14990 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14991 index in M. */
14992
14993 static unsigned
14994 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
14995 {
14996 unsigned regno = NEON_SCALAR_REG (scalar);
14997 unsigned elno = NEON_SCALAR_INDEX (scalar);
14998
14999 switch (elsize)
15000 {
15001 case 16:
15002 if (regno > 7 || elno > 3)
15003 goto bad_scalar;
15004 return regno | (elno << 3);
15005
15006 case 32:
15007 if (regno > 15 || elno > 1)
15008 goto bad_scalar;
15009 return regno | (elno << 4);
15010
15011 default:
15012 bad_scalar:
15013 first_error (_("scalar out of range for multiply instruction"));
15014 }
15015
15016 return 0;
15017 }
15018
15019 /* Encode multiply / multiply-accumulate scalar instructions. */
15020
15021 static void
15022 neon_mul_mac (struct neon_type_el et, int ubit)
15023 {
15024 unsigned scalar;
15025
15026 /* Give a more helpful error message if we have an invalid type. */
15027 if (et.type == NT_invtype)
15028 return;
15029
15030 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
15031 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15032 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15033 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15034 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15035 inst.instruction |= LOW4 (scalar);
15036 inst.instruction |= HI1 (scalar) << 5;
15037 inst.instruction |= (et.type == NT_float) << 8;
15038 inst.instruction |= neon_logbits (et.size) << 20;
15039 inst.instruction |= (ubit != 0) << 24;
15040
15041 neon_dp_fixup (&inst);
15042 }
15043
15044 static void
15045 do_neon_mac_maybe_scalar (void)
15046 {
15047 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
15048 return;
15049
15050 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15051 return;
15052
15053 if (inst.operands[2].isscalar)
15054 {
15055 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15056 struct neon_type_el et = neon_check_type (3, rs,
15057 N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
15058 NEON_ENCODE (SCALAR, inst);
15059 neon_mul_mac (et, neon_quad (rs));
15060 }
15061 else
15062 {
15063 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15064 affected if we specify unsigned args. */
15065 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15066 }
15067 }
15068
15069 static void
15070 do_neon_fmac (void)
15071 {
15072 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
15073 return;
15074
15075 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15076 return;
15077
15078 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15079 }
15080
15081 static void
15082 do_neon_tst (void)
15083 {
15084 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15085 struct neon_type_el et = neon_check_type (3, rs,
15086 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
15087 neon_three_same (neon_quad (rs), 0, et.size);
15088 }
15089
15090 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15091 same types as the MAC equivalents. The polynomial type for this instruction
15092 is encoded the same as the integer type. */
15093
15094 static void
15095 do_neon_mul (void)
15096 {
15097 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
15098 return;
15099
15100 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15101 return;
15102
15103 if (inst.operands[2].isscalar)
15104 do_neon_mac_maybe_scalar ();
15105 else
15106 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
15107 }
15108
15109 static void
15110 do_neon_qdmulh (void)
15111 {
15112 if (inst.operands[2].isscalar)
15113 {
15114 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15115 struct neon_type_el et = neon_check_type (3, rs,
15116 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15117 NEON_ENCODE (SCALAR, inst);
15118 neon_mul_mac (et, neon_quad (rs));
15119 }
15120 else
15121 {
15122 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15123 struct neon_type_el et = neon_check_type (3, rs,
15124 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15125 NEON_ENCODE (INTEGER, inst);
15126 /* The U bit (rounding) comes from bit mask. */
15127 neon_three_same (neon_quad (rs), 0, et.size);
15128 }
15129 }
15130
15131 static void
15132 do_neon_qrdmlah (void)
15133 {
15134 /* Check we're on the correct architecture. */
15135 if (!mark_feature_used (&fpu_neon_ext_armv8))
15136 inst.error =
15137 _("instruction form not available on this architecture.");
15138 else if (!mark_feature_used (&fpu_neon_ext_v8_1))
15139 {
15140 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
15141 record_feature_use (&fpu_neon_ext_v8_1);
15142 }
15143
15144 if (inst.operands[2].isscalar)
15145 {
15146 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15147 struct neon_type_el et = neon_check_type (3, rs,
15148 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15149 NEON_ENCODE (SCALAR, inst);
15150 neon_mul_mac (et, neon_quad (rs));
15151 }
15152 else
15153 {
15154 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15155 struct neon_type_el et = neon_check_type (3, rs,
15156 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15157 NEON_ENCODE (INTEGER, inst);
15158 /* The U bit (rounding) comes from bit mask. */
15159 neon_three_same (neon_quad (rs), 0, et.size);
15160 }
15161 }
15162
15163 static void
15164 do_neon_fcmp_absolute (void)
15165 {
15166 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15167 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15168 N_F_16_32 | N_KEY);
15169 /* Size field comes from bit mask. */
15170 neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
15171 }
15172
15173 static void
15174 do_neon_fcmp_absolute_inv (void)
15175 {
15176 neon_exchange_operands ();
15177 do_neon_fcmp_absolute ();
15178 }
15179
15180 static void
15181 do_neon_step (void)
15182 {
15183 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15184 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15185 N_F_16_32 | N_KEY);
15186 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
15187 }
15188
15189 static void
15190 do_neon_abs_neg (void)
15191 {
15192 enum neon_shape rs;
15193 struct neon_type_el et;
15194
15195 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
15196 return;
15197
15198 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15199 return;
15200
15201 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15202 et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
15203
15204 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15205 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15206 inst.instruction |= LOW4 (inst.operands[1].reg);
15207 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15208 inst.instruction |= neon_quad (rs) << 6;
15209 inst.instruction |= (et.type == NT_float) << 10;
15210 inst.instruction |= neon_logbits (et.size) << 18;
15211
15212 neon_dp_fixup (&inst);
15213 }
15214
15215 static void
15216 do_neon_sli (void)
15217 {
15218 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15219 struct neon_type_el et = neon_check_type (2, rs,
15220 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15221 int imm = inst.operands[2].imm;
15222 constraint (imm < 0 || (unsigned)imm >= et.size,
15223 _("immediate out of range for insert"));
15224 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15225 }
15226
15227 static void
15228 do_neon_sri (void)
15229 {
15230 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15231 struct neon_type_el et = neon_check_type (2, rs,
15232 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15233 int imm = inst.operands[2].imm;
15234 constraint (imm < 1 || (unsigned)imm > et.size,
15235 _("immediate out of range for insert"));
15236 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
15237 }
15238
15239 static void
15240 do_neon_qshlu_imm (void)
15241 {
15242 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15243 struct neon_type_el et = neon_check_type (2, rs,
15244 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
15245 int imm = inst.operands[2].imm;
15246 constraint (imm < 0 || (unsigned)imm >= et.size,
15247 _("immediate out of range for shift"));
15248 /* Only encodes the 'U present' variant of the instruction.
15249 In this case, signed types have OP (bit 8) set to 0.
15250 Unsigned types have OP set to 1. */
15251 inst.instruction |= (et.type == NT_unsigned) << 8;
15252 /* The rest of the bits are the same as other immediate shifts. */
15253 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15254 }
15255
15256 static void
15257 do_neon_qmovn (void)
15258 {
15259 struct neon_type_el et = neon_check_type (2, NS_DQ,
15260 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15261 /* Saturating move where operands can be signed or unsigned, and the
15262 destination has the same signedness. */
15263 NEON_ENCODE (INTEGER, inst);
15264 if (et.type == NT_unsigned)
15265 inst.instruction |= 0xc0;
15266 else
15267 inst.instruction |= 0x80;
15268 neon_two_same (0, 1, et.size / 2);
15269 }
15270
15271 static void
15272 do_neon_qmovun (void)
15273 {
15274 struct neon_type_el et = neon_check_type (2, NS_DQ,
15275 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15276 /* Saturating move with unsigned results. Operands must be signed. */
15277 NEON_ENCODE (INTEGER, inst);
15278 neon_two_same (0, 1, et.size / 2);
15279 }
15280
15281 static void
15282 do_neon_rshift_sat_narrow (void)
15283 {
15284 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15285 or unsigned. If operands are unsigned, results must also be unsigned. */
15286 struct neon_type_el et = neon_check_type (2, NS_DQI,
15287 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15288 int imm = inst.operands[2].imm;
15289 /* This gets the bounds check, size encoding and immediate bits calculation
15290 right. */
15291 et.size /= 2;
15292
15293 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15294 VQMOVN.I<size> <Dd>, <Qm>. */
15295 if (imm == 0)
15296 {
15297 inst.operands[2].present = 0;
15298 inst.instruction = N_MNEM_vqmovn;
15299 do_neon_qmovn ();
15300 return;
15301 }
15302
15303 constraint (imm < 1 || (unsigned)imm > et.size,
15304 _("immediate out of range"));
15305 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
15306 }
15307
15308 static void
15309 do_neon_rshift_sat_narrow_u (void)
15310 {
15311 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15312 or unsigned. If operands are unsigned, results must also be unsigned. */
15313 struct neon_type_el et = neon_check_type (2, NS_DQI,
15314 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15315 int imm = inst.operands[2].imm;
15316 /* This gets the bounds check, size encoding and immediate bits calculation
15317 right. */
15318 et.size /= 2;
15319
15320 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15321 VQMOVUN.I<size> <Dd>, <Qm>. */
15322 if (imm == 0)
15323 {
15324 inst.operands[2].present = 0;
15325 inst.instruction = N_MNEM_vqmovun;
15326 do_neon_qmovun ();
15327 return;
15328 }
15329
15330 constraint (imm < 1 || (unsigned)imm > et.size,
15331 _("immediate out of range"));
15332 /* FIXME: The manual is kind of unclear about what value U should have in
15333 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15334 must be 1. */
15335 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
15336 }
15337
15338 static void
15339 do_neon_movn (void)
15340 {
15341 struct neon_type_el et = neon_check_type (2, NS_DQ,
15342 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15343 NEON_ENCODE (INTEGER, inst);
15344 neon_two_same (0, 1, et.size / 2);
15345 }
15346
15347 static void
15348 do_neon_rshift_narrow (void)
15349 {
15350 struct neon_type_el et = neon_check_type (2, NS_DQI,
15351 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15352 int imm = inst.operands[2].imm;
15353 /* This gets the bounds check, size encoding and immediate bits calculation
15354 right. */
15355 et.size /= 2;
15356
15357 /* If immediate is zero then we are a pseudo-instruction for
15358 VMOVN.I<size> <Dd>, <Qm> */
15359 if (imm == 0)
15360 {
15361 inst.operands[2].present = 0;
15362 inst.instruction = N_MNEM_vmovn;
15363 do_neon_movn ();
15364 return;
15365 }
15366
15367 constraint (imm < 1 || (unsigned)imm > et.size,
15368 _("immediate out of range for narrowing operation"));
15369 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
15370 }
15371
15372 static void
15373 do_neon_shll (void)
15374 {
15375 /* FIXME: Type checking when lengthening. */
15376 struct neon_type_el et = neon_check_type (2, NS_QDI,
15377 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
15378 unsigned imm = inst.operands[2].imm;
15379
15380 if (imm == et.size)
15381 {
15382 /* Maximum shift variant. */
15383 NEON_ENCODE (INTEGER, inst);
15384 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15385 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15386 inst.instruction |= LOW4 (inst.operands[1].reg);
15387 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15388 inst.instruction |= neon_logbits (et.size) << 18;
15389
15390 neon_dp_fixup (&inst);
15391 }
15392 else
15393 {
15394 /* A more-specific type check for non-max versions. */
15395 et = neon_check_type (2, NS_QDI,
15396 N_EQK | N_DBL, N_SU_32 | N_KEY);
15397 NEON_ENCODE (IMMED, inst);
15398 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
15399 }
15400 }
15401
15402 /* Check the various types for the VCVT instruction, and return which version
15403 the current instruction is. */
15404
15405 #define CVT_FLAVOUR_VAR \
15406 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15407 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15408 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15409 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15410 /* Half-precision conversions. */ \
15411 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15412 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15413 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
15414 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
15415 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15416 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15417 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
15418 Compared with single/double precision variants, only the co-processor \
15419 field is different, so the encoding flow is reused here. */ \
15420 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
15421 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
15422 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15423 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15424 /* VFP instructions. */ \
15425 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15426 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15427 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15428 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15429 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15430 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15431 /* VFP instructions with bitshift. */ \
15432 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15433 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15434 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15435 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15436 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15437 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15438 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15439 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15440
15441 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15442 neon_cvt_flavour_##C,
15443
15444 /* The different types of conversions we can do. */
15445 enum neon_cvt_flavour
15446 {
15447 CVT_FLAVOUR_VAR
15448 neon_cvt_flavour_invalid,
15449 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
15450 };
15451
15452 #undef CVT_VAR
15453
15454 static enum neon_cvt_flavour
15455 get_neon_cvt_flavour (enum neon_shape rs)
15456 {
15457 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15458 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15459 if (et.type != NT_invtype) \
15460 { \
15461 inst.error = NULL; \
15462 return (neon_cvt_flavour_##C); \
15463 }
15464
15465 struct neon_type_el et;
15466 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
15467 || rs == NS_FF) ? N_VFP : 0;
15468 /* The instruction versions which take an immediate take one register
15469 argument, which is extended to the width of the full register. Thus the
15470 "source" and "destination" registers must have the same width. Hack that
15471 here by making the size equal to the key (wider, in this case) operand. */
15472 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
15473
15474 CVT_FLAVOUR_VAR;
15475
15476 return neon_cvt_flavour_invalid;
15477 #undef CVT_VAR
15478 }
15479
15480 enum neon_cvt_mode
15481 {
15482 neon_cvt_mode_a,
15483 neon_cvt_mode_n,
15484 neon_cvt_mode_p,
15485 neon_cvt_mode_m,
15486 neon_cvt_mode_z,
15487 neon_cvt_mode_x,
15488 neon_cvt_mode_r
15489 };
15490
15491 /* Neon-syntax VFP conversions. */
15492
15493 static void
15494 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
15495 {
15496 const char *opname = 0;
15497
15498 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
15499 || rs == NS_FHI || rs == NS_HFI)
15500 {
15501 /* Conversions with immediate bitshift. */
15502 const char *enc[] =
15503 {
15504 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15505 CVT_FLAVOUR_VAR
15506 NULL
15507 #undef CVT_VAR
15508 };
15509
15510 if (flavour < (int) ARRAY_SIZE (enc))
15511 {
15512 opname = enc[flavour];
15513 constraint (inst.operands[0].reg != inst.operands[1].reg,
15514 _("operands 0 and 1 must be the same register"));
15515 inst.operands[1] = inst.operands[2];
15516 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
15517 }
15518 }
15519 else
15520 {
15521 /* Conversions without bitshift. */
15522 const char *enc[] =
15523 {
15524 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15525 CVT_FLAVOUR_VAR
15526 NULL
15527 #undef CVT_VAR
15528 };
15529
15530 if (flavour < (int) ARRAY_SIZE (enc))
15531 opname = enc[flavour];
15532 }
15533
15534 if (opname)
15535 do_vfp_nsyn_opcode (opname);
15536
15537 /* ARMv8.2 fp16 VCVT instruction. */
15538 if (flavour == neon_cvt_flavour_s32_f16
15539 || flavour == neon_cvt_flavour_u32_f16
15540 || flavour == neon_cvt_flavour_f16_u32
15541 || flavour == neon_cvt_flavour_f16_s32)
15542 do_scalar_fp16_v82_encode ();
15543 }
15544
15545 static void
15546 do_vfp_nsyn_cvtz (void)
15547 {
15548 enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
15549 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15550 const char *enc[] =
15551 {
15552 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15553 CVT_FLAVOUR_VAR
15554 NULL
15555 #undef CVT_VAR
15556 };
15557
15558 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
15559 do_vfp_nsyn_opcode (enc[flavour]);
15560 }
15561
15562 static void
15563 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
15564 enum neon_cvt_mode mode)
15565 {
15566 int sz, op;
15567 int rm;
15568
15569 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15570 D register operands. */
15571 if (flavour == neon_cvt_flavour_s32_f64
15572 || flavour == neon_cvt_flavour_u32_f64)
15573 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15574 _(BAD_FPU));
15575
15576 if (flavour == neon_cvt_flavour_s32_f16
15577 || flavour == neon_cvt_flavour_u32_f16)
15578 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
15579 _(BAD_FP16));
15580
15581 set_it_insn_type (OUTSIDE_IT_INSN);
15582
15583 switch (flavour)
15584 {
15585 case neon_cvt_flavour_s32_f64:
15586 sz = 1;
15587 op = 1;
15588 break;
15589 case neon_cvt_flavour_s32_f32:
15590 sz = 0;
15591 op = 1;
15592 break;
15593 case neon_cvt_flavour_s32_f16:
15594 sz = 0;
15595 op = 1;
15596 break;
15597 case neon_cvt_flavour_u32_f64:
15598 sz = 1;
15599 op = 0;
15600 break;
15601 case neon_cvt_flavour_u32_f32:
15602 sz = 0;
15603 op = 0;
15604 break;
15605 case neon_cvt_flavour_u32_f16:
15606 sz = 0;
15607 op = 0;
15608 break;
15609 default:
15610 first_error (_("invalid instruction shape"));
15611 return;
15612 }
15613
15614 switch (mode)
15615 {
15616 case neon_cvt_mode_a: rm = 0; break;
15617 case neon_cvt_mode_n: rm = 1; break;
15618 case neon_cvt_mode_p: rm = 2; break;
15619 case neon_cvt_mode_m: rm = 3; break;
15620 default: first_error (_("invalid rounding mode")); return;
15621 }
15622
15623 NEON_ENCODE (FPV8, inst);
15624 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
15625 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
15626 inst.instruction |= sz << 8;
15627
15628 /* ARMv8.2 fp16 VCVT instruction. */
15629 if (flavour == neon_cvt_flavour_s32_f16
15630 ||flavour == neon_cvt_flavour_u32_f16)
15631 do_scalar_fp16_v82_encode ();
15632 inst.instruction |= op << 7;
15633 inst.instruction |= rm << 16;
15634 inst.instruction |= 0xf0000000;
15635 inst.is_neon = TRUE;
15636 }
15637
15638 static void
15639 do_neon_cvt_1 (enum neon_cvt_mode mode)
15640 {
15641 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
15642 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
15643 NS_FH, NS_HF, NS_FHI, NS_HFI,
15644 NS_NULL);
15645 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15646
15647 if (flavour == neon_cvt_flavour_invalid)
15648 return;
15649
15650 /* PR11109: Handle round-to-zero for VCVT conversions. */
15651 if (mode == neon_cvt_mode_z
15652 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
15653 && (flavour == neon_cvt_flavour_s16_f16
15654 || flavour == neon_cvt_flavour_u16_f16
15655 || flavour == neon_cvt_flavour_s32_f32
15656 || flavour == neon_cvt_flavour_u32_f32
15657 || flavour == neon_cvt_flavour_s32_f64
15658 || flavour == neon_cvt_flavour_u32_f64)
15659 && (rs == NS_FD || rs == NS_FF))
15660 {
15661 do_vfp_nsyn_cvtz ();
15662 return;
15663 }
15664
15665 /* ARMv8.2 fp16 VCVT conversions. */
15666 if (mode == neon_cvt_mode_z
15667 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
15668 && (flavour == neon_cvt_flavour_s32_f16
15669 || flavour == neon_cvt_flavour_u32_f16)
15670 && (rs == NS_FH))
15671 {
15672 do_vfp_nsyn_cvtz ();
15673 do_scalar_fp16_v82_encode ();
15674 return;
15675 }
15676
15677 /* VFP rather than Neon conversions. */
15678 if (flavour >= neon_cvt_flavour_first_fp)
15679 {
15680 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15681 do_vfp_nsyn_cvt (rs, flavour);
15682 else
15683 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15684
15685 return;
15686 }
15687
15688 switch (rs)
15689 {
15690 case NS_DDI:
15691 case NS_QQI:
15692 {
15693 unsigned immbits;
15694 unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
15695 0x0000100, 0x1000100, 0x0, 0x1000000};
15696
15697 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15698 return;
15699
15700 /* Fixed-point conversion with #0 immediate is encoded as an
15701 integer conversion. */
15702 if (inst.operands[2].present && inst.operands[2].imm == 0)
15703 goto int_encode;
15704 NEON_ENCODE (IMMED, inst);
15705 if (flavour != neon_cvt_flavour_invalid)
15706 inst.instruction |= enctab[flavour];
15707 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15708 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15709 inst.instruction |= LOW4 (inst.operands[1].reg);
15710 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15711 inst.instruction |= neon_quad (rs) << 6;
15712 inst.instruction |= 1 << 21;
15713 if (flavour < neon_cvt_flavour_s16_f16)
15714 {
15715 inst.instruction |= 1 << 21;
15716 immbits = 32 - inst.operands[2].imm;
15717 inst.instruction |= immbits << 16;
15718 }
15719 else
15720 {
15721 inst.instruction |= 3 << 20;
15722 immbits = 16 - inst.operands[2].imm;
15723 inst.instruction |= immbits << 16;
15724 inst.instruction &= ~(1 << 9);
15725 }
15726
15727 neon_dp_fixup (&inst);
15728 }
15729 break;
15730
15731 case NS_DD:
15732 case NS_QQ:
15733 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
15734 {
15735 NEON_ENCODE (FLOAT, inst);
15736 set_it_insn_type (OUTSIDE_IT_INSN);
15737
15738 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
15739 return;
15740
15741 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15742 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15743 inst.instruction |= LOW4 (inst.operands[1].reg);
15744 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15745 inst.instruction |= neon_quad (rs) << 6;
15746 inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
15747 || flavour == neon_cvt_flavour_u32_f32) << 7;
15748 inst.instruction |= mode << 8;
15749 if (flavour == neon_cvt_flavour_u16_f16
15750 || flavour == neon_cvt_flavour_s16_f16)
15751 /* Mask off the original size bits and reencode them. */
15752 inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
15753
15754 if (thumb_mode)
15755 inst.instruction |= 0xfc000000;
15756 else
15757 inst.instruction |= 0xf0000000;
15758 }
15759 else
15760 {
15761 int_encode:
15762 {
15763 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
15764 0x100, 0x180, 0x0, 0x080};
15765
15766 NEON_ENCODE (INTEGER, inst);
15767
15768 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15769 return;
15770
15771 if (flavour != neon_cvt_flavour_invalid)
15772 inst.instruction |= enctab[flavour];
15773
15774 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15775 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15776 inst.instruction |= LOW4 (inst.operands[1].reg);
15777 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15778 inst.instruction |= neon_quad (rs) << 6;
15779 if (flavour >= neon_cvt_flavour_s16_f16
15780 && flavour <= neon_cvt_flavour_f16_u16)
15781 /* Half precision. */
15782 inst.instruction |= 1 << 18;
15783 else
15784 inst.instruction |= 2 << 18;
15785
15786 neon_dp_fixup (&inst);
15787 }
15788 }
15789 break;
15790
15791 /* Half-precision conversions for Advanced SIMD -- neon. */
15792 case NS_QD:
15793 case NS_DQ:
15794
15795 if ((rs == NS_DQ)
15796 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
15797 {
15798 as_bad (_("operand size must match register width"));
15799 break;
15800 }
15801
15802 if ((rs == NS_QD)
15803 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
15804 {
15805 as_bad (_("operand size must match register width"));
15806 break;
15807 }
15808
15809 if (rs == NS_DQ)
15810 inst.instruction = 0x3b60600;
15811 else
15812 inst.instruction = 0x3b60700;
15813
15814 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15815 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15816 inst.instruction |= LOW4 (inst.operands[1].reg);
15817 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15818 neon_dp_fixup (&inst);
15819 break;
15820
15821 default:
15822 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15823 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15824 do_vfp_nsyn_cvt (rs, flavour);
15825 else
15826 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15827 }
15828 }
15829
15830 static void
15831 do_neon_cvtr (void)
15832 {
15833 do_neon_cvt_1 (neon_cvt_mode_x);
15834 }
15835
15836 static void
15837 do_neon_cvt (void)
15838 {
15839 do_neon_cvt_1 (neon_cvt_mode_z);
15840 }
15841
15842 static void
15843 do_neon_cvta (void)
15844 {
15845 do_neon_cvt_1 (neon_cvt_mode_a);
15846 }
15847
15848 static void
15849 do_neon_cvtn (void)
15850 {
15851 do_neon_cvt_1 (neon_cvt_mode_n);
15852 }
15853
15854 static void
15855 do_neon_cvtp (void)
15856 {
15857 do_neon_cvt_1 (neon_cvt_mode_p);
15858 }
15859
15860 static void
15861 do_neon_cvtm (void)
15862 {
15863 do_neon_cvt_1 (neon_cvt_mode_m);
15864 }
15865
15866 static void
15867 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
15868 {
15869 if (is_double)
15870 mark_feature_used (&fpu_vfp_ext_armv8);
15871
15872 encode_arm_vfp_reg (inst.operands[0].reg,
15873 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
15874 encode_arm_vfp_reg (inst.operands[1].reg,
15875 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
15876 inst.instruction |= to ? 0x10000 : 0;
15877 inst.instruction |= t ? 0x80 : 0;
15878 inst.instruction |= is_double ? 0x100 : 0;
15879 do_vfp_cond_or_thumb ();
15880 }
15881
15882 static void
15883 do_neon_cvttb_1 (bfd_boolean t)
15884 {
15885 enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
15886 NS_DF, NS_DH, NS_NULL);
15887
15888 if (rs == NS_NULL)
15889 return;
15890 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
15891 {
15892 inst.error = NULL;
15893 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
15894 }
15895 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
15896 {
15897 inst.error = NULL;
15898 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
15899 }
15900 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
15901 {
15902 /* The VCVTB and VCVTT instructions with D-register operands
15903 don't work for SP only targets. */
15904 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15905 _(BAD_FPU));
15906
15907 inst.error = NULL;
15908 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
15909 }
15910 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
15911 {
15912 /* The VCVTB and VCVTT instructions with D-register operands
15913 don't work for SP only targets. */
15914 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15915 _(BAD_FPU));
15916
15917 inst.error = NULL;
15918 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
15919 }
15920 else
15921 return;
15922 }
15923
15924 static void
15925 do_neon_cvtb (void)
15926 {
15927 do_neon_cvttb_1 (FALSE);
15928 }
15929
15930
15931 static void
15932 do_neon_cvtt (void)
15933 {
15934 do_neon_cvttb_1 (TRUE);
15935 }
15936
15937 static void
15938 neon_move_immediate (void)
15939 {
15940 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
15941 struct neon_type_el et = neon_check_type (2, rs,
15942 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15943 unsigned immlo, immhi = 0, immbits;
15944 int op, cmode, float_p;
15945
15946 constraint (et.type == NT_invtype,
15947 _("operand size must be specified for immediate VMOV"));
15948
15949 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15950 op = (inst.instruction & (1 << 5)) != 0;
15951
15952 immlo = inst.operands[1].imm;
15953 if (inst.operands[1].regisimm)
15954 immhi = inst.operands[1].reg;
15955
15956 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
15957 _("immediate has bits set outside the operand size"));
15958
15959 float_p = inst.operands[1].immisfloat;
15960
15961 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
15962 et.size, et.type)) == FAIL)
15963 {
15964 /* Invert relevant bits only. */
15965 neon_invert_size (&immlo, &immhi, et.size);
15966 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15967 with one or the other; those cases are caught by
15968 neon_cmode_for_move_imm. */
15969 op = !op;
15970 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
15971 &op, et.size, et.type)) == FAIL)
15972 {
15973 first_error (_("immediate out of range"));
15974 return;
15975 }
15976 }
15977
15978 inst.instruction &= ~(1 << 5);
15979 inst.instruction |= op << 5;
15980
15981 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15982 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15983 inst.instruction |= neon_quad (rs) << 6;
15984 inst.instruction |= cmode << 8;
15985
15986 neon_write_immbits (immbits);
15987 }
15988
15989 static void
15990 do_neon_mvn (void)
15991 {
15992 if (inst.operands[1].isreg)
15993 {
15994 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15995
15996 NEON_ENCODE (INTEGER, inst);
15997 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15998 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15999 inst.instruction |= LOW4 (inst.operands[1].reg);
16000 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16001 inst.instruction |= neon_quad (rs) << 6;
16002 }
16003 else
16004 {
16005 NEON_ENCODE (IMMED, inst);
16006 neon_move_immediate ();
16007 }
16008
16009 neon_dp_fixup (&inst);
16010 }
16011
16012 /* Encode instructions of form:
16013
16014 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16015 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
16016
16017 static void
16018 neon_mixed_length (struct neon_type_el et, unsigned size)
16019 {
16020 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16021 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16022 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16023 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16024 inst.instruction |= LOW4 (inst.operands[2].reg);
16025 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16026 inst.instruction |= (et.type == NT_unsigned) << 24;
16027 inst.instruction |= neon_logbits (size) << 20;
16028
16029 neon_dp_fixup (&inst);
16030 }
16031
16032 static void
16033 do_neon_dyadic_long (void)
16034 {
16035 /* FIXME: Type checking for lengthening op. */
16036 struct neon_type_el et = neon_check_type (3, NS_QDD,
16037 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
16038 neon_mixed_length (et, et.size);
16039 }
16040
16041 static void
16042 do_neon_abal (void)
16043 {
16044 struct neon_type_el et = neon_check_type (3, NS_QDD,
16045 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
16046 neon_mixed_length (et, et.size);
16047 }
16048
16049 static void
16050 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
16051 {
16052 if (inst.operands[2].isscalar)
16053 {
16054 struct neon_type_el et = neon_check_type (3, NS_QDS,
16055 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
16056 NEON_ENCODE (SCALAR, inst);
16057 neon_mul_mac (et, et.type == NT_unsigned);
16058 }
16059 else
16060 {
16061 struct neon_type_el et = neon_check_type (3, NS_QDD,
16062 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
16063 NEON_ENCODE (INTEGER, inst);
16064 neon_mixed_length (et, et.size);
16065 }
16066 }
16067
16068 static void
16069 do_neon_mac_maybe_scalar_long (void)
16070 {
16071 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
16072 }
16073
16074 static void
16075 do_neon_dyadic_wide (void)
16076 {
16077 struct neon_type_el et = neon_check_type (3, NS_QQD,
16078 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
16079 neon_mixed_length (et, et.size);
16080 }
16081
16082 static void
16083 do_neon_dyadic_narrow (void)
16084 {
16085 struct neon_type_el et = neon_check_type (3, NS_QDD,
16086 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
16087 /* Operand sign is unimportant, and the U bit is part of the opcode,
16088 so force the operand type to integer. */
16089 et.type = NT_integer;
16090 neon_mixed_length (et, et.size / 2);
16091 }
16092
16093 static void
16094 do_neon_mul_sat_scalar_long (void)
16095 {
16096 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
16097 }
16098
16099 static void
16100 do_neon_vmull (void)
16101 {
16102 if (inst.operands[2].isscalar)
16103 do_neon_mac_maybe_scalar_long ();
16104 else
16105 {
16106 struct neon_type_el et = neon_check_type (3, NS_QDD,
16107 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
16108
16109 if (et.type == NT_poly)
16110 NEON_ENCODE (POLY, inst);
16111 else
16112 NEON_ENCODE (INTEGER, inst);
16113
16114 /* For polynomial encoding the U bit must be zero, and the size must
16115 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
16116 obviously, as 0b10). */
16117 if (et.size == 64)
16118 {
16119 /* Check we're on the correct architecture. */
16120 if (!mark_feature_used (&fpu_crypto_ext_armv8))
16121 inst.error =
16122 _("Instruction form not available on this architecture.");
16123
16124 et.size = 32;
16125 }
16126
16127 neon_mixed_length (et, et.size);
16128 }
16129 }
16130
16131 static void
16132 do_neon_ext (void)
16133 {
16134 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
16135 struct neon_type_el et = neon_check_type (3, rs,
16136 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16137 unsigned imm = (inst.operands[3].imm * et.size) / 8;
16138
16139 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
16140 _("shift out of range"));
16141 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16142 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16143 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16144 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16145 inst.instruction |= LOW4 (inst.operands[2].reg);
16146 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16147 inst.instruction |= neon_quad (rs) << 6;
16148 inst.instruction |= imm << 8;
16149
16150 neon_dp_fixup (&inst);
16151 }
16152
16153 static void
16154 do_neon_rev (void)
16155 {
16156 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16157 struct neon_type_el et = neon_check_type (2, rs,
16158 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16159 unsigned op = (inst.instruction >> 7) & 3;
16160 /* N (width of reversed regions) is encoded as part of the bitmask. We
16161 extract it here to check the elements to be reversed are smaller.
16162 Otherwise we'd get a reserved instruction. */
16163 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
16164 gas_assert (elsize != 0);
16165 constraint (et.size >= elsize,
16166 _("elements must be smaller than reversal region"));
16167 neon_two_same (neon_quad (rs), 1, et.size);
16168 }
16169
16170 static void
16171 do_neon_dup (void)
16172 {
16173 if (inst.operands[1].isscalar)
16174 {
16175 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
16176 struct neon_type_el et = neon_check_type (2, rs,
16177 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16178 unsigned sizebits = et.size >> 3;
16179 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
16180 int logsize = neon_logbits (et.size);
16181 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
16182
16183 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
16184 return;
16185
16186 NEON_ENCODE (SCALAR, inst);
16187 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16188 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16189 inst.instruction |= LOW4 (dm);
16190 inst.instruction |= HI1 (dm) << 5;
16191 inst.instruction |= neon_quad (rs) << 6;
16192 inst.instruction |= x << 17;
16193 inst.instruction |= sizebits << 16;
16194
16195 neon_dp_fixup (&inst);
16196 }
16197 else
16198 {
16199 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
16200 struct neon_type_el et = neon_check_type (2, rs,
16201 N_8 | N_16 | N_32 | N_KEY, N_EQK);
16202 /* Duplicate ARM register to lanes of vector. */
16203 NEON_ENCODE (ARMREG, inst);
16204 switch (et.size)
16205 {
16206 case 8: inst.instruction |= 0x400000; break;
16207 case 16: inst.instruction |= 0x000020; break;
16208 case 32: inst.instruction |= 0x000000; break;
16209 default: break;
16210 }
16211 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16212 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
16213 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
16214 inst.instruction |= neon_quad (rs) << 21;
16215 /* The encoding for this instruction is identical for the ARM and Thumb
16216 variants, except for the condition field. */
16217 do_vfp_cond_or_thumb ();
16218 }
16219 }
16220
16221 /* VMOV has particularly many variations. It can be one of:
16222 0. VMOV<c><q> <Qd>, <Qm>
16223 1. VMOV<c><q> <Dd>, <Dm>
16224 (Register operations, which are VORR with Rm = Rn.)
16225 2. VMOV<c><q>.<dt> <Qd>, #<imm>
16226 3. VMOV<c><q>.<dt> <Dd>, #<imm>
16227 (Immediate loads.)
16228 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16229 (ARM register to scalar.)
16230 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16231 (Two ARM registers to vector.)
16232 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16233 (Scalar to ARM register.)
16234 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16235 (Vector to two ARM registers.)
16236 8. VMOV.F32 <Sd>, <Sm>
16237 9. VMOV.F64 <Dd>, <Dm>
16238 (VFP register moves.)
16239 10. VMOV.F32 <Sd>, #imm
16240 11. VMOV.F64 <Dd>, #imm
16241 (VFP float immediate load.)
16242 12. VMOV <Rd>, <Sm>
16243 (VFP single to ARM reg.)
16244 13. VMOV <Sd>, <Rm>
16245 (ARM reg to VFP single.)
16246 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16247 (Two ARM regs to two VFP singles.)
16248 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16249 (Two VFP singles to two ARM regs.)
16250
16251 These cases can be disambiguated using neon_select_shape, except cases 1/9
16252 and 3/11 which depend on the operand type too.
16253
16254 All the encoded bits are hardcoded by this function.
16255
16256 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16257 Cases 5, 7 may be used with VFPv2 and above.
16258
16259 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16260 can specify a type where it doesn't make sense to, and is ignored). */
16261
16262 static void
16263 do_neon_mov (void)
16264 {
16265 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
16266 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR,
16267 NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
16268 NS_HR, NS_RH, NS_HI, NS_NULL);
16269 struct neon_type_el et;
16270 const char *ldconst = 0;
16271
16272 switch (rs)
16273 {
16274 case NS_DD: /* case 1/9. */
16275 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16276 /* It is not an error here if no type is given. */
16277 inst.error = NULL;
16278 if (et.type == NT_float && et.size == 64)
16279 {
16280 do_vfp_nsyn_opcode ("fcpyd");
16281 break;
16282 }
16283 /* fall through. */
16284
16285 case NS_QQ: /* case 0/1. */
16286 {
16287 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16288 return;
16289 /* The architecture manual I have doesn't explicitly state which
16290 value the U bit should have for register->register moves, but
16291 the equivalent VORR instruction has U = 0, so do that. */
16292 inst.instruction = 0x0200110;
16293 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16294 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16295 inst.instruction |= LOW4 (inst.operands[1].reg);
16296 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16297 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16298 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16299 inst.instruction |= neon_quad (rs) << 6;
16300
16301 neon_dp_fixup (&inst);
16302 }
16303 break;
16304
16305 case NS_DI: /* case 3/11. */
16306 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16307 inst.error = NULL;
16308 if (et.type == NT_float && et.size == 64)
16309 {
16310 /* case 11 (fconstd). */
16311 ldconst = "fconstd";
16312 goto encode_fconstd;
16313 }
16314 /* fall through. */
16315
16316 case NS_QI: /* case 2/3. */
16317 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16318 return;
16319 inst.instruction = 0x0800010;
16320 neon_move_immediate ();
16321 neon_dp_fixup (&inst);
16322 break;
16323
16324 case NS_SR: /* case 4. */
16325 {
16326 unsigned bcdebits = 0;
16327 int logsize;
16328 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
16329 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
16330
16331 /* .<size> is optional here, defaulting to .32. */
16332 if (inst.vectype.elems == 0
16333 && inst.operands[0].vectype.type == NT_invtype
16334 && inst.operands[1].vectype.type == NT_invtype)
16335 {
16336 inst.vectype.el[0].type = NT_untyped;
16337 inst.vectype.el[0].size = 32;
16338 inst.vectype.elems = 1;
16339 }
16340
16341 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
16342 logsize = neon_logbits (et.size);
16343
16344 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16345 _(BAD_FPU));
16346 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16347 && et.size != 32, _(BAD_FPU));
16348 constraint (et.type == NT_invtype, _("bad type for scalar"));
16349 constraint (x >= 64 / et.size, _("scalar index out of range"));
16350
16351 switch (et.size)
16352 {
16353 case 8: bcdebits = 0x8; break;
16354 case 16: bcdebits = 0x1; break;
16355 case 32: bcdebits = 0x0; break;
16356 default: ;
16357 }
16358
16359 bcdebits |= x << logsize;
16360
16361 inst.instruction = 0xe000b10;
16362 do_vfp_cond_or_thumb ();
16363 inst.instruction |= LOW4 (dn) << 16;
16364 inst.instruction |= HI1 (dn) << 7;
16365 inst.instruction |= inst.operands[1].reg << 12;
16366 inst.instruction |= (bcdebits & 3) << 5;
16367 inst.instruction |= (bcdebits >> 2) << 21;
16368 }
16369 break;
16370
16371 case NS_DRR: /* case 5 (fmdrr). */
16372 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16373 _(BAD_FPU));
16374
16375 inst.instruction = 0xc400b10;
16376 do_vfp_cond_or_thumb ();
16377 inst.instruction |= LOW4 (inst.operands[0].reg);
16378 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
16379 inst.instruction |= inst.operands[1].reg << 12;
16380 inst.instruction |= inst.operands[2].reg << 16;
16381 break;
16382
16383 case NS_RS: /* case 6. */
16384 {
16385 unsigned logsize;
16386 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
16387 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
16388 unsigned abcdebits = 0;
16389
16390 /* .<dt> is optional here, defaulting to .32. */
16391 if (inst.vectype.elems == 0
16392 && inst.operands[0].vectype.type == NT_invtype
16393 && inst.operands[1].vectype.type == NT_invtype)
16394 {
16395 inst.vectype.el[0].type = NT_untyped;
16396 inst.vectype.el[0].size = 32;
16397 inst.vectype.elems = 1;
16398 }
16399
16400 et = neon_check_type (2, NS_NULL,
16401 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
16402 logsize = neon_logbits (et.size);
16403
16404 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16405 _(BAD_FPU));
16406 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16407 && et.size != 32, _(BAD_FPU));
16408 constraint (et.type == NT_invtype, _("bad type for scalar"));
16409 constraint (x >= 64 / et.size, _("scalar index out of range"));
16410
16411 switch (et.size)
16412 {
16413 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
16414 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
16415 case 32: abcdebits = 0x00; break;
16416 default: ;
16417 }
16418
16419 abcdebits |= x << logsize;
16420 inst.instruction = 0xe100b10;
16421 do_vfp_cond_or_thumb ();
16422 inst.instruction |= LOW4 (dn) << 16;
16423 inst.instruction |= HI1 (dn) << 7;
16424 inst.instruction |= inst.operands[0].reg << 12;
16425 inst.instruction |= (abcdebits & 3) << 5;
16426 inst.instruction |= (abcdebits >> 2) << 21;
16427 }
16428 break;
16429
16430 case NS_RRD: /* case 7 (fmrrd). */
16431 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16432 _(BAD_FPU));
16433
16434 inst.instruction = 0xc500b10;
16435 do_vfp_cond_or_thumb ();
16436 inst.instruction |= inst.operands[0].reg << 12;
16437 inst.instruction |= inst.operands[1].reg << 16;
16438 inst.instruction |= LOW4 (inst.operands[2].reg);
16439 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16440 break;
16441
16442 case NS_FF: /* case 8 (fcpys). */
16443 do_vfp_nsyn_opcode ("fcpys");
16444 break;
16445
16446 case NS_HI:
16447 case NS_FI: /* case 10 (fconsts). */
16448 ldconst = "fconsts";
16449 encode_fconstd:
16450 if (is_quarter_float (inst.operands[1].imm))
16451 {
16452 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
16453 do_vfp_nsyn_opcode (ldconst);
16454
16455 /* ARMv8.2 fp16 vmov.f16 instruction. */
16456 if (rs == NS_HI)
16457 do_scalar_fp16_v82_encode ();
16458 }
16459 else
16460 first_error (_("immediate out of range"));
16461 break;
16462
16463 case NS_RH:
16464 case NS_RF: /* case 12 (fmrs). */
16465 do_vfp_nsyn_opcode ("fmrs");
16466 /* ARMv8.2 fp16 vmov.f16 instruction. */
16467 if (rs == NS_RH)
16468 do_scalar_fp16_v82_encode ();
16469 break;
16470
16471 case NS_HR:
16472 case NS_FR: /* case 13 (fmsr). */
16473 do_vfp_nsyn_opcode ("fmsr");
16474 /* ARMv8.2 fp16 vmov.f16 instruction. */
16475 if (rs == NS_HR)
16476 do_scalar_fp16_v82_encode ();
16477 break;
16478
16479 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16480 (one of which is a list), but we have parsed four. Do some fiddling to
16481 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16482 expect. */
16483 case NS_RRFF: /* case 14 (fmrrs). */
16484 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
16485 _("VFP registers must be adjacent"));
16486 inst.operands[2].imm = 2;
16487 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16488 do_vfp_nsyn_opcode ("fmrrs");
16489 break;
16490
16491 case NS_FFRR: /* case 15 (fmsrr). */
16492 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
16493 _("VFP registers must be adjacent"));
16494 inst.operands[1] = inst.operands[2];
16495 inst.operands[2] = inst.operands[3];
16496 inst.operands[0].imm = 2;
16497 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16498 do_vfp_nsyn_opcode ("fmsrr");
16499 break;
16500
16501 case NS_NULL:
16502 /* neon_select_shape has determined that the instruction
16503 shape is wrong and has already set the error message. */
16504 break;
16505
16506 default:
16507 abort ();
16508 }
16509 }
16510
16511 static void
16512 do_neon_rshift_round_imm (void)
16513 {
16514 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16515 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
16516 int imm = inst.operands[2].imm;
16517
16518 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16519 if (imm == 0)
16520 {
16521 inst.operands[2].present = 0;
16522 do_neon_mov ();
16523 return;
16524 }
16525
16526 constraint (imm < 1 || (unsigned)imm > et.size,
16527 _("immediate out of range for shift"));
16528 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
16529 et.size - imm);
16530 }
16531
16532 static void
16533 do_neon_movhf (void)
16534 {
16535 enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
16536 constraint (rs != NS_HH, _("invalid suffix"));
16537
16538 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16539 _(BAD_FPU));
16540
16541 do_vfp_sp_monadic ();
16542
16543 inst.is_neon = 1;
16544 inst.instruction |= 0xf0000000;
16545 }
16546
16547 static void
16548 do_neon_movl (void)
16549 {
16550 struct neon_type_el et = neon_check_type (2, NS_QD,
16551 N_EQK | N_DBL, N_SU_32 | N_KEY);
16552 unsigned sizebits = et.size >> 3;
16553 inst.instruction |= sizebits << 19;
16554 neon_two_same (0, et.type == NT_unsigned, -1);
16555 }
16556
16557 static void
16558 do_neon_trn (void)
16559 {
16560 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16561 struct neon_type_el et = neon_check_type (2, rs,
16562 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16563 NEON_ENCODE (INTEGER, inst);
16564 neon_two_same (neon_quad (rs), 1, et.size);
16565 }
16566
16567 static void
16568 do_neon_zip_uzp (void)
16569 {
16570 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16571 struct neon_type_el et = neon_check_type (2, rs,
16572 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16573 if (rs == NS_DD && et.size == 32)
16574 {
16575 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16576 inst.instruction = N_MNEM_vtrn;
16577 do_neon_trn ();
16578 return;
16579 }
16580 neon_two_same (neon_quad (rs), 1, et.size);
16581 }
16582
16583 static void
16584 do_neon_sat_abs_neg (void)
16585 {
16586 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16587 struct neon_type_el et = neon_check_type (2, rs,
16588 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16589 neon_two_same (neon_quad (rs), 1, et.size);
16590 }
16591
16592 static void
16593 do_neon_pair_long (void)
16594 {
16595 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16596 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
16597 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16598 inst.instruction |= (et.type == NT_unsigned) << 7;
16599 neon_two_same (neon_quad (rs), 1, et.size);
16600 }
16601
16602 static void
16603 do_neon_recip_est (void)
16604 {
16605 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16606 struct neon_type_el et = neon_check_type (2, rs,
16607 N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
16608 inst.instruction |= (et.type == NT_float) << 8;
16609 neon_two_same (neon_quad (rs), 1, et.size);
16610 }
16611
16612 static void
16613 do_neon_cls (void)
16614 {
16615 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16616 struct neon_type_el et = neon_check_type (2, rs,
16617 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16618 neon_two_same (neon_quad (rs), 1, et.size);
16619 }
16620
16621 static void
16622 do_neon_clz (void)
16623 {
16624 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16625 struct neon_type_el et = neon_check_type (2, rs,
16626 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
16627 neon_two_same (neon_quad (rs), 1, et.size);
16628 }
16629
16630 static void
16631 do_neon_cnt (void)
16632 {
16633 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16634 struct neon_type_el et = neon_check_type (2, rs,
16635 N_EQK | N_INT, N_8 | N_KEY);
16636 neon_two_same (neon_quad (rs), 1, et.size);
16637 }
16638
16639 static void
16640 do_neon_swp (void)
16641 {
16642 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16643 neon_two_same (neon_quad (rs), 1, -1);
16644 }
16645
16646 static void
16647 do_neon_tbl_tbx (void)
16648 {
16649 unsigned listlenbits;
16650 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
16651
16652 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
16653 {
16654 first_error (_("bad list length for table lookup"));
16655 return;
16656 }
16657
16658 listlenbits = inst.operands[1].imm - 1;
16659 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16660 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16661 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16662 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16663 inst.instruction |= LOW4 (inst.operands[2].reg);
16664 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16665 inst.instruction |= listlenbits << 8;
16666
16667 neon_dp_fixup (&inst);
16668 }
16669
16670 static void
16671 do_neon_ldm_stm (void)
16672 {
16673 /* P, U and L bits are part of bitmask. */
16674 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
16675 unsigned offsetbits = inst.operands[1].imm * 2;
16676
16677 if (inst.operands[1].issingle)
16678 {
16679 do_vfp_nsyn_ldm_stm (is_dbmode);
16680 return;
16681 }
16682
16683 constraint (is_dbmode && !inst.operands[0].writeback,
16684 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16685
16686 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16687 _("register list must contain at least 1 and at most 16 "
16688 "registers"));
16689
16690 inst.instruction |= inst.operands[0].reg << 16;
16691 inst.instruction |= inst.operands[0].writeback << 21;
16692 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16693 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
16694
16695 inst.instruction |= offsetbits;
16696
16697 do_vfp_cond_or_thumb ();
16698 }
16699
16700 static void
16701 do_neon_ldr_str (void)
16702 {
16703 int is_ldr = (inst.instruction & (1 << 20)) != 0;
16704
16705 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16706 And is UNPREDICTABLE in thumb mode. */
16707 if (!is_ldr
16708 && inst.operands[1].reg == REG_PC
16709 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
16710 {
16711 if (thumb_mode)
16712 inst.error = _("Use of PC here is UNPREDICTABLE");
16713 else if (warn_on_deprecated)
16714 as_tsktsk (_("Use of PC here is deprecated"));
16715 }
16716
16717 if (inst.operands[0].issingle)
16718 {
16719 if (is_ldr)
16720 do_vfp_nsyn_opcode ("flds");
16721 else
16722 do_vfp_nsyn_opcode ("fsts");
16723
16724 /* ARMv8.2 vldr.16/vstr.16 instruction. */
16725 if (inst.vectype.el[0].size == 16)
16726 do_scalar_fp16_v82_encode ();
16727 }
16728 else
16729 {
16730 if (is_ldr)
16731 do_vfp_nsyn_opcode ("fldd");
16732 else
16733 do_vfp_nsyn_opcode ("fstd");
16734 }
16735 }
16736
16737 /* "interleave" version also handles non-interleaving register VLD1/VST1
16738 instructions. */
16739
16740 static void
16741 do_neon_ld_st_interleave (void)
16742 {
16743 struct neon_type_el et = neon_check_type (1, NS_NULL,
16744 N_8 | N_16 | N_32 | N_64);
16745 unsigned alignbits = 0;
16746 unsigned idx;
16747 /* The bits in this table go:
16748 0: register stride of one (0) or two (1)
16749 1,2: register list length, minus one (1, 2, 3, 4).
16750 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16751 We use -1 for invalid entries. */
16752 const int typetable[] =
16753 {
16754 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16755 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16756 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16757 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16758 };
16759 int typebits;
16760
16761 if (et.type == NT_invtype)
16762 return;
16763
16764 if (inst.operands[1].immisalign)
16765 switch (inst.operands[1].imm >> 8)
16766 {
16767 case 64: alignbits = 1; break;
16768 case 128:
16769 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
16770 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16771 goto bad_alignment;
16772 alignbits = 2;
16773 break;
16774 case 256:
16775 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16776 goto bad_alignment;
16777 alignbits = 3;
16778 break;
16779 default:
16780 bad_alignment:
16781 first_error (_("bad alignment"));
16782 return;
16783 }
16784
16785 inst.instruction |= alignbits << 4;
16786 inst.instruction |= neon_logbits (et.size) << 6;
16787
16788 /* Bits [4:6] of the immediate in a list specifier encode register stride
16789 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16790 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16791 up the right value for "type" in a table based on this value and the given
16792 list style, then stick it back. */
16793 idx = ((inst.operands[0].imm >> 4) & 7)
16794 | (((inst.instruction >> 8) & 3) << 3);
16795
16796 typebits = typetable[idx];
16797
16798 constraint (typebits == -1, _("bad list type for instruction"));
16799 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
16800 _("bad element type for instruction"));
16801
16802 inst.instruction &= ~0xf00;
16803 inst.instruction |= typebits << 8;
16804 }
16805
16806 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16807 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16808 otherwise. The variable arguments are a list of pairs of legal (size, align)
16809 values, terminated with -1. */
16810
16811 static int
16812 neon_alignment_bit (int size, int align, int *do_alignment, ...)
16813 {
16814 va_list ap;
16815 int result = FAIL, thissize, thisalign;
16816
16817 if (!inst.operands[1].immisalign)
16818 {
16819 *do_alignment = 0;
16820 return SUCCESS;
16821 }
16822
16823 va_start (ap, do_alignment);
16824
16825 do
16826 {
16827 thissize = va_arg (ap, int);
16828 if (thissize == -1)
16829 break;
16830 thisalign = va_arg (ap, int);
16831
16832 if (size == thissize && align == thisalign)
16833 result = SUCCESS;
16834 }
16835 while (result != SUCCESS);
16836
16837 va_end (ap);
16838
16839 if (result == SUCCESS)
16840 *do_alignment = 1;
16841 else
16842 first_error (_("unsupported alignment for instruction"));
16843
16844 return result;
16845 }
16846
16847 static void
16848 do_neon_ld_st_lane (void)
16849 {
16850 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16851 int align_good, do_alignment = 0;
16852 int logsize = neon_logbits (et.size);
16853 int align = inst.operands[1].imm >> 8;
16854 int n = (inst.instruction >> 8) & 3;
16855 int max_el = 64 / et.size;
16856
16857 if (et.type == NT_invtype)
16858 return;
16859
16860 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
16861 _("bad list length"));
16862 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
16863 _("scalar index out of range"));
16864 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
16865 && et.size == 8,
16866 _("stride of 2 unavailable when element size is 8"));
16867
16868 switch (n)
16869 {
16870 case 0: /* VLD1 / VST1. */
16871 align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
16872 32, 32, -1);
16873 if (align_good == FAIL)
16874 return;
16875 if (do_alignment)
16876 {
16877 unsigned alignbits = 0;
16878 switch (et.size)
16879 {
16880 case 16: alignbits = 0x1; break;
16881 case 32: alignbits = 0x3; break;
16882 default: ;
16883 }
16884 inst.instruction |= alignbits << 4;
16885 }
16886 break;
16887
16888 case 1: /* VLD2 / VST2. */
16889 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
16890 16, 32, 32, 64, -1);
16891 if (align_good == FAIL)
16892 return;
16893 if (do_alignment)
16894 inst.instruction |= 1 << 4;
16895 break;
16896
16897 case 2: /* VLD3 / VST3. */
16898 constraint (inst.operands[1].immisalign,
16899 _("can't use alignment with this instruction"));
16900 break;
16901
16902 case 3: /* VLD4 / VST4. */
16903 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
16904 16, 64, 32, 64, 32, 128, -1);
16905 if (align_good == FAIL)
16906 return;
16907 if (do_alignment)
16908 {
16909 unsigned alignbits = 0;
16910 switch (et.size)
16911 {
16912 case 8: alignbits = 0x1; break;
16913 case 16: alignbits = 0x1; break;
16914 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
16915 default: ;
16916 }
16917 inst.instruction |= alignbits << 4;
16918 }
16919 break;
16920
16921 default: ;
16922 }
16923
16924 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16925 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16926 inst.instruction |= 1 << (4 + logsize);
16927
16928 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
16929 inst.instruction |= logsize << 10;
16930 }
16931
16932 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16933
16934 static void
16935 do_neon_ld_dup (void)
16936 {
16937 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16938 int align_good, do_alignment = 0;
16939
16940 if (et.type == NT_invtype)
16941 return;
16942
16943 switch ((inst.instruction >> 8) & 3)
16944 {
16945 case 0: /* VLD1. */
16946 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
16947 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16948 &do_alignment, 16, 16, 32, 32, -1);
16949 if (align_good == FAIL)
16950 return;
16951 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
16952 {
16953 case 1: break;
16954 case 2: inst.instruction |= 1 << 5; break;
16955 default: first_error (_("bad list length")); return;
16956 }
16957 inst.instruction |= neon_logbits (et.size) << 6;
16958 break;
16959
16960 case 1: /* VLD2. */
16961 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16962 &do_alignment, 8, 16, 16, 32, 32, 64,
16963 -1);
16964 if (align_good == FAIL)
16965 return;
16966 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
16967 _("bad list length"));
16968 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16969 inst.instruction |= 1 << 5;
16970 inst.instruction |= neon_logbits (et.size) << 6;
16971 break;
16972
16973 case 2: /* VLD3. */
16974 constraint (inst.operands[1].immisalign,
16975 _("can't use alignment with this instruction"));
16976 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
16977 _("bad list length"));
16978 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16979 inst.instruction |= 1 << 5;
16980 inst.instruction |= neon_logbits (et.size) << 6;
16981 break;
16982
16983 case 3: /* VLD4. */
16984 {
16985 int align = inst.operands[1].imm >> 8;
16986 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
16987 16, 64, 32, 64, 32, 128, -1);
16988 if (align_good == FAIL)
16989 return;
16990 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
16991 _("bad list length"));
16992 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16993 inst.instruction |= 1 << 5;
16994 if (et.size == 32 && align == 128)
16995 inst.instruction |= 0x3 << 6;
16996 else
16997 inst.instruction |= neon_logbits (et.size) << 6;
16998 }
16999 break;
17000
17001 default: ;
17002 }
17003
17004 inst.instruction |= do_alignment << 4;
17005 }
17006
17007 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
17008 apart from bits [11:4]. */
17009
17010 static void
17011 do_neon_ldx_stx (void)
17012 {
17013 if (inst.operands[1].isreg)
17014 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
17015
17016 switch (NEON_LANE (inst.operands[0].imm))
17017 {
17018 case NEON_INTERLEAVE_LANES:
17019 NEON_ENCODE (INTERLV, inst);
17020 do_neon_ld_st_interleave ();
17021 break;
17022
17023 case NEON_ALL_LANES:
17024 NEON_ENCODE (DUP, inst);
17025 if (inst.instruction == N_INV)
17026 {
17027 first_error ("only loads support such operands");
17028 break;
17029 }
17030 do_neon_ld_dup ();
17031 break;
17032
17033 default:
17034 NEON_ENCODE (LANE, inst);
17035 do_neon_ld_st_lane ();
17036 }
17037
17038 /* L bit comes from bit mask. */
17039 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17040 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17041 inst.instruction |= inst.operands[1].reg << 16;
17042
17043 if (inst.operands[1].postind)
17044 {
17045 int postreg = inst.operands[1].imm & 0xf;
17046 constraint (!inst.operands[1].immisreg,
17047 _("post-index must be a register"));
17048 constraint (postreg == 0xd || postreg == 0xf,
17049 _("bad register for post-index"));
17050 inst.instruction |= postreg;
17051 }
17052 else
17053 {
17054 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
17055 constraint (inst.reloc.exp.X_op != O_constant
17056 || inst.reloc.exp.X_add_number != 0,
17057 BAD_ADDR_MODE);
17058
17059 if (inst.operands[1].writeback)
17060 {
17061 inst.instruction |= 0xd;
17062 }
17063 else
17064 inst.instruction |= 0xf;
17065 }
17066
17067 if (thumb_mode)
17068 inst.instruction |= 0xf9000000;
17069 else
17070 inst.instruction |= 0xf4000000;
17071 }
17072
17073 /* FP v8. */
17074 static void
17075 do_vfp_nsyn_fpv8 (enum neon_shape rs)
17076 {
17077 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17078 D register operands. */
17079 if (neon_shape_class[rs] == SC_DOUBLE)
17080 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17081 _(BAD_FPU));
17082
17083 NEON_ENCODE (FPV8, inst);
17084
17085 if (rs == NS_FFF || rs == NS_HHH)
17086 {
17087 do_vfp_sp_dyadic ();
17088
17089 /* ARMv8.2 fp16 instruction. */
17090 if (rs == NS_HHH)
17091 do_scalar_fp16_v82_encode ();
17092 }
17093 else
17094 do_vfp_dp_rd_rn_rm ();
17095
17096 if (rs == NS_DDD)
17097 inst.instruction |= 0x100;
17098
17099 inst.instruction |= 0xf0000000;
17100 }
17101
17102 static void
17103 do_vsel (void)
17104 {
17105 set_it_insn_type (OUTSIDE_IT_INSN);
17106
17107 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
17108 first_error (_("invalid instruction shape"));
17109 }
17110
17111 static void
17112 do_vmaxnm (void)
17113 {
17114 set_it_insn_type (OUTSIDE_IT_INSN);
17115
17116 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
17117 return;
17118
17119 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17120 return;
17121
17122 neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
17123 }
17124
17125 static void
17126 do_vrint_1 (enum neon_cvt_mode mode)
17127 {
17128 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
17129 struct neon_type_el et;
17130
17131 if (rs == NS_NULL)
17132 return;
17133
17134 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17135 D register operands. */
17136 if (neon_shape_class[rs] == SC_DOUBLE)
17137 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17138 _(BAD_FPU));
17139
17140 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
17141 | N_VFP);
17142 if (et.type != NT_invtype)
17143 {
17144 /* VFP encodings. */
17145 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
17146 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
17147 set_it_insn_type (OUTSIDE_IT_INSN);
17148
17149 NEON_ENCODE (FPV8, inst);
17150 if (rs == NS_FF || rs == NS_HH)
17151 do_vfp_sp_monadic ();
17152 else
17153 do_vfp_dp_rd_rm ();
17154
17155 switch (mode)
17156 {
17157 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
17158 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
17159 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
17160 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
17161 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
17162 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
17163 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
17164 default: abort ();
17165 }
17166
17167 inst.instruction |= (rs == NS_DD) << 8;
17168 do_vfp_cond_or_thumb ();
17169
17170 /* ARMv8.2 fp16 vrint instruction. */
17171 if (rs == NS_HH)
17172 do_scalar_fp16_v82_encode ();
17173 }
17174 else
17175 {
17176 /* Neon encodings (or something broken...). */
17177 inst.error = NULL;
17178 et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
17179
17180 if (et.type == NT_invtype)
17181 return;
17182
17183 set_it_insn_type (OUTSIDE_IT_INSN);
17184 NEON_ENCODE (FLOAT, inst);
17185
17186 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17187 return;
17188
17189 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17190 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17191 inst.instruction |= LOW4 (inst.operands[1].reg);
17192 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17193 inst.instruction |= neon_quad (rs) << 6;
17194 /* Mask off the original size bits and reencode them. */
17195 inst.instruction = ((inst.instruction & 0xfff3ffff)
17196 | neon_logbits (et.size) << 18);
17197
17198 switch (mode)
17199 {
17200 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
17201 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
17202 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
17203 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
17204 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
17205 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
17206 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
17207 default: abort ();
17208 }
17209
17210 if (thumb_mode)
17211 inst.instruction |= 0xfc000000;
17212 else
17213 inst.instruction |= 0xf0000000;
17214 }
17215 }
17216
17217 static void
17218 do_vrintx (void)
17219 {
17220 do_vrint_1 (neon_cvt_mode_x);
17221 }
17222
17223 static void
17224 do_vrintz (void)
17225 {
17226 do_vrint_1 (neon_cvt_mode_z);
17227 }
17228
17229 static void
17230 do_vrintr (void)
17231 {
17232 do_vrint_1 (neon_cvt_mode_r);
17233 }
17234
17235 static void
17236 do_vrinta (void)
17237 {
17238 do_vrint_1 (neon_cvt_mode_a);
17239 }
17240
17241 static void
17242 do_vrintn (void)
17243 {
17244 do_vrint_1 (neon_cvt_mode_n);
17245 }
17246
17247 static void
17248 do_vrintp (void)
17249 {
17250 do_vrint_1 (neon_cvt_mode_p);
17251 }
17252
17253 static void
17254 do_vrintm (void)
17255 {
17256 do_vrint_1 (neon_cvt_mode_m);
17257 }
17258
17259 /* Crypto v1 instructions. */
17260 static void
17261 do_crypto_2op_1 (unsigned elttype, int op)
17262 {
17263 set_it_insn_type (OUTSIDE_IT_INSN);
17264
17265 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
17266 == NT_invtype)
17267 return;
17268
17269 inst.error = NULL;
17270
17271 NEON_ENCODE (INTEGER, inst);
17272 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17273 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17274 inst.instruction |= LOW4 (inst.operands[1].reg);
17275 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17276 if (op != -1)
17277 inst.instruction |= op << 6;
17278
17279 if (thumb_mode)
17280 inst.instruction |= 0xfc000000;
17281 else
17282 inst.instruction |= 0xf0000000;
17283 }
17284
17285 static void
17286 do_crypto_3op_1 (int u, int op)
17287 {
17288 set_it_insn_type (OUTSIDE_IT_INSN);
17289
17290 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
17291 N_32 | N_UNT | N_KEY).type == NT_invtype)
17292 return;
17293
17294 inst.error = NULL;
17295
17296 NEON_ENCODE (INTEGER, inst);
17297 neon_three_same (1, u, 8 << op);
17298 }
17299
17300 static void
17301 do_aese (void)
17302 {
17303 do_crypto_2op_1 (N_8, 0);
17304 }
17305
17306 static void
17307 do_aesd (void)
17308 {
17309 do_crypto_2op_1 (N_8, 1);
17310 }
17311
17312 static void
17313 do_aesmc (void)
17314 {
17315 do_crypto_2op_1 (N_8, 2);
17316 }
17317
17318 static void
17319 do_aesimc (void)
17320 {
17321 do_crypto_2op_1 (N_8, 3);
17322 }
17323
17324 static void
17325 do_sha1c (void)
17326 {
17327 do_crypto_3op_1 (0, 0);
17328 }
17329
17330 static void
17331 do_sha1p (void)
17332 {
17333 do_crypto_3op_1 (0, 1);
17334 }
17335
17336 static void
17337 do_sha1m (void)
17338 {
17339 do_crypto_3op_1 (0, 2);
17340 }
17341
17342 static void
17343 do_sha1su0 (void)
17344 {
17345 do_crypto_3op_1 (0, 3);
17346 }
17347
17348 static void
17349 do_sha256h (void)
17350 {
17351 do_crypto_3op_1 (1, 0);
17352 }
17353
17354 static void
17355 do_sha256h2 (void)
17356 {
17357 do_crypto_3op_1 (1, 1);
17358 }
17359
17360 static void
17361 do_sha256su1 (void)
17362 {
17363 do_crypto_3op_1 (1, 2);
17364 }
17365
17366 static void
17367 do_sha1h (void)
17368 {
17369 do_crypto_2op_1 (N_32, -1);
17370 }
17371
17372 static void
17373 do_sha1su1 (void)
17374 {
17375 do_crypto_2op_1 (N_32, 0);
17376 }
17377
17378 static void
17379 do_sha256su0 (void)
17380 {
17381 do_crypto_2op_1 (N_32, 1);
17382 }
17383
17384 static void
17385 do_crc32_1 (unsigned int poly, unsigned int sz)
17386 {
17387 unsigned int Rd = inst.operands[0].reg;
17388 unsigned int Rn = inst.operands[1].reg;
17389 unsigned int Rm = inst.operands[2].reg;
17390
17391 set_it_insn_type (OUTSIDE_IT_INSN);
17392 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
17393 inst.instruction |= LOW4 (Rn) << 16;
17394 inst.instruction |= LOW4 (Rm);
17395 inst.instruction |= sz << (thumb_mode ? 4 : 21);
17396 inst.instruction |= poly << (thumb_mode ? 20 : 9);
17397
17398 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
17399 as_warn (UNPRED_REG ("r15"));
17400 if (thumb_mode && (Rd == REG_SP || Rn == REG_SP || Rm == REG_SP))
17401 as_warn (UNPRED_REG ("r13"));
17402 }
17403
17404 static void
17405 do_crc32b (void)
17406 {
17407 do_crc32_1 (0, 0);
17408 }
17409
17410 static void
17411 do_crc32h (void)
17412 {
17413 do_crc32_1 (0, 1);
17414 }
17415
17416 static void
17417 do_crc32w (void)
17418 {
17419 do_crc32_1 (0, 2);
17420 }
17421
17422 static void
17423 do_crc32cb (void)
17424 {
17425 do_crc32_1 (1, 0);
17426 }
17427
17428 static void
17429 do_crc32ch (void)
17430 {
17431 do_crc32_1 (1, 1);
17432 }
17433
17434 static void
17435 do_crc32cw (void)
17436 {
17437 do_crc32_1 (1, 2);
17438 }
17439
17440 \f
17441 /* Overall per-instruction processing. */
17442
17443 /* We need to be able to fix up arbitrary expressions in some statements.
17444 This is so that we can handle symbols that are an arbitrary distance from
17445 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17446 which returns part of an address in a form which will be valid for
17447 a data instruction. We do this by pushing the expression into a symbol
17448 in the expr_section, and creating a fix for that. */
17449
17450 static void
17451 fix_new_arm (fragS * frag,
17452 int where,
17453 short int size,
17454 expressionS * exp,
17455 int pc_rel,
17456 int reloc)
17457 {
17458 fixS * new_fix;
17459
17460 switch (exp->X_op)
17461 {
17462 case O_constant:
17463 if (pc_rel)
17464 {
17465 /* Create an absolute valued symbol, so we have something to
17466 refer to in the object file. Unfortunately for us, gas's
17467 generic expression parsing will already have folded out
17468 any use of .set foo/.type foo %function that may have
17469 been used to set type information of the target location,
17470 that's being specified symbolically. We have to presume
17471 the user knows what they are doing. */
17472 char name[16 + 8];
17473 symbolS *symbol;
17474
17475 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
17476
17477 symbol = symbol_find_or_make (name);
17478 S_SET_SEGMENT (symbol, absolute_section);
17479 symbol_set_frag (symbol, &zero_address_frag);
17480 S_SET_VALUE (symbol, exp->X_add_number);
17481 exp->X_op = O_symbol;
17482 exp->X_add_symbol = symbol;
17483 exp->X_add_number = 0;
17484 }
17485 /* FALLTHROUGH */
17486 case O_symbol:
17487 case O_add:
17488 case O_subtract:
17489 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
17490 (enum bfd_reloc_code_real) reloc);
17491 break;
17492
17493 default:
17494 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
17495 pc_rel, (enum bfd_reloc_code_real) reloc);
17496 break;
17497 }
17498
17499 /* Mark whether the fix is to a THUMB instruction, or an ARM
17500 instruction. */
17501 new_fix->tc_fix_data = thumb_mode;
17502 }
17503
17504 /* Create a frg for an instruction requiring relaxation. */
17505 static void
17506 output_relax_insn (void)
17507 {
17508 char * to;
17509 symbolS *sym;
17510 int offset;
17511
17512 /* The size of the instruction is unknown, so tie the debug info to the
17513 start of the instruction. */
17514 dwarf2_emit_insn (0);
17515
17516 switch (inst.reloc.exp.X_op)
17517 {
17518 case O_symbol:
17519 sym = inst.reloc.exp.X_add_symbol;
17520 offset = inst.reloc.exp.X_add_number;
17521 break;
17522 case O_constant:
17523 sym = NULL;
17524 offset = inst.reloc.exp.X_add_number;
17525 break;
17526 default:
17527 sym = make_expr_symbol (&inst.reloc.exp);
17528 offset = 0;
17529 break;
17530 }
17531 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
17532 inst.relax, sym, offset, NULL/*offset, opcode*/);
17533 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
17534 }
17535
17536 /* Write a 32-bit thumb instruction to buf. */
17537 static void
17538 put_thumb32_insn (char * buf, unsigned long insn)
17539 {
17540 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
17541 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
17542 }
17543
17544 static void
17545 output_inst (const char * str)
17546 {
17547 char * to = NULL;
17548
17549 if (inst.error)
17550 {
17551 as_bad ("%s -- `%s'", inst.error, str);
17552 return;
17553 }
17554 if (inst.relax)
17555 {
17556 output_relax_insn ();
17557 return;
17558 }
17559 if (inst.size == 0)
17560 return;
17561
17562 to = frag_more (inst.size);
17563 /* PR 9814: Record the thumb mode into the current frag so that we know
17564 what type of NOP padding to use, if necessary. We override any previous
17565 setting so that if the mode has changed then the NOPS that we use will
17566 match the encoding of the last instruction in the frag. */
17567 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
17568
17569 if (thumb_mode && (inst.size > THUMB_SIZE))
17570 {
17571 gas_assert (inst.size == (2 * THUMB_SIZE));
17572 put_thumb32_insn (to, inst.instruction);
17573 }
17574 else if (inst.size > INSN_SIZE)
17575 {
17576 gas_assert (inst.size == (2 * INSN_SIZE));
17577 md_number_to_chars (to, inst.instruction, INSN_SIZE);
17578 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
17579 }
17580 else
17581 md_number_to_chars (to, inst.instruction, inst.size);
17582
17583 if (inst.reloc.type != BFD_RELOC_UNUSED)
17584 fix_new_arm (frag_now, to - frag_now->fr_literal,
17585 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
17586 inst.reloc.type);
17587
17588 dwarf2_emit_insn (inst.size);
17589 }
17590
17591 static char *
17592 output_it_inst (int cond, int mask, char * to)
17593 {
17594 unsigned long instruction = 0xbf00;
17595
17596 mask &= 0xf;
17597 instruction |= mask;
17598 instruction |= cond << 4;
17599
17600 if (to == NULL)
17601 {
17602 to = frag_more (2);
17603 #ifdef OBJ_ELF
17604 dwarf2_emit_insn (2);
17605 #endif
17606 }
17607
17608 md_number_to_chars (to, instruction, 2);
17609
17610 return to;
17611 }
17612
17613 /* Tag values used in struct asm_opcode's tag field. */
17614 enum opcode_tag
17615 {
17616 OT_unconditional, /* Instruction cannot be conditionalized.
17617 The ARM condition field is still 0xE. */
17618 OT_unconditionalF, /* Instruction cannot be conditionalized
17619 and carries 0xF in its ARM condition field. */
17620 OT_csuffix, /* Instruction takes a conditional suffix. */
17621 OT_csuffixF, /* Some forms of the instruction take a conditional
17622 suffix, others place 0xF where the condition field
17623 would be. */
17624 OT_cinfix3, /* Instruction takes a conditional infix,
17625 beginning at character index 3. (In
17626 unified mode, it becomes a suffix.) */
17627 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
17628 tsts, cmps, cmns, and teqs. */
17629 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
17630 character index 3, even in unified mode. Used for
17631 legacy instructions where suffix and infix forms
17632 may be ambiguous. */
17633 OT_csuf_or_in3, /* Instruction takes either a conditional
17634 suffix or an infix at character index 3. */
17635 OT_odd_infix_unc, /* This is the unconditional variant of an
17636 instruction that takes a conditional infix
17637 at an unusual position. In unified mode,
17638 this variant will accept a suffix. */
17639 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
17640 are the conditional variants of instructions that
17641 take conditional infixes in unusual positions.
17642 The infix appears at character index
17643 (tag - OT_odd_infix_0). These are not accepted
17644 in unified mode. */
17645 };
17646
17647 /* Subroutine of md_assemble, responsible for looking up the primary
17648 opcode from the mnemonic the user wrote. STR points to the
17649 beginning of the mnemonic.
17650
17651 This is not simply a hash table lookup, because of conditional
17652 variants. Most instructions have conditional variants, which are
17653 expressed with a _conditional affix_ to the mnemonic. If we were
17654 to encode each conditional variant as a literal string in the opcode
17655 table, it would have approximately 20,000 entries.
17656
17657 Most mnemonics take this affix as a suffix, and in unified syntax,
17658 'most' is upgraded to 'all'. However, in the divided syntax, some
17659 instructions take the affix as an infix, notably the s-variants of
17660 the arithmetic instructions. Of those instructions, all but six
17661 have the infix appear after the third character of the mnemonic.
17662
17663 Accordingly, the algorithm for looking up primary opcodes given
17664 an identifier is:
17665
17666 1. Look up the identifier in the opcode table.
17667 If we find a match, go to step U.
17668
17669 2. Look up the last two characters of the identifier in the
17670 conditions table. If we find a match, look up the first N-2
17671 characters of the identifier in the opcode table. If we
17672 find a match, go to step CE.
17673
17674 3. Look up the fourth and fifth characters of the identifier in
17675 the conditions table. If we find a match, extract those
17676 characters from the identifier, and look up the remaining
17677 characters in the opcode table. If we find a match, go
17678 to step CM.
17679
17680 4. Fail.
17681
17682 U. Examine the tag field of the opcode structure, in case this is
17683 one of the six instructions with its conditional infix in an
17684 unusual place. If it is, the tag tells us where to find the
17685 infix; look it up in the conditions table and set inst.cond
17686 accordingly. Otherwise, this is an unconditional instruction.
17687 Again set inst.cond accordingly. Return the opcode structure.
17688
17689 CE. Examine the tag field to make sure this is an instruction that
17690 should receive a conditional suffix. If it is not, fail.
17691 Otherwise, set inst.cond from the suffix we already looked up,
17692 and return the opcode structure.
17693
17694 CM. Examine the tag field to make sure this is an instruction that
17695 should receive a conditional infix after the third character.
17696 If it is not, fail. Otherwise, undo the edits to the current
17697 line of input and proceed as for case CE. */
17698
17699 static const struct asm_opcode *
17700 opcode_lookup (char **str)
17701 {
17702 char *end, *base;
17703 char *affix;
17704 const struct asm_opcode *opcode;
17705 const struct asm_cond *cond;
17706 char save[2];
17707
17708 /* Scan up to the end of the mnemonic, which must end in white space,
17709 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17710 for (base = end = *str; *end != '\0'; end++)
17711 if (*end == ' ' || *end == '.')
17712 break;
17713
17714 if (end == base)
17715 return NULL;
17716
17717 /* Handle a possible width suffix and/or Neon type suffix. */
17718 if (end[0] == '.')
17719 {
17720 int offset = 2;
17721
17722 /* The .w and .n suffixes are only valid if the unified syntax is in
17723 use. */
17724 if (unified_syntax && end[1] == 'w')
17725 inst.size_req = 4;
17726 else if (unified_syntax && end[1] == 'n')
17727 inst.size_req = 2;
17728 else
17729 offset = 0;
17730
17731 inst.vectype.elems = 0;
17732
17733 *str = end + offset;
17734
17735 if (end[offset] == '.')
17736 {
17737 /* See if we have a Neon type suffix (possible in either unified or
17738 non-unified ARM syntax mode). */
17739 if (parse_neon_type (&inst.vectype, str) == FAIL)
17740 return NULL;
17741 }
17742 else if (end[offset] != '\0' && end[offset] != ' ')
17743 return NULL;
17744 }
17745 else
17746 *str = end;
17747
17748 /* Look for unaffixed or special-case affixed mnemonic. */
17749 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17750 end - base);
17751 if (opcode)
17752 {
17753 /* step U */
17754 if (opcode->tag < OT_odd_infix_0)
17755 {
17756 inst.cond = COND_ALWAYS;
17757 return opcode;
17758 }
17759
17760 if (warn_on_deprecated && unified_syntax)
17761 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17762 affix = base + (opcode->tag - OT_odd_infix_0);
17763 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17764 gas_assert (cond);
17765
17766 inst.cond = cond->value;
17767 return opcode;
17768 }
17769
17770 /* Cannot have a conditional suffix on a mnemonic of less than two
17771 characters. */
17772 if (end - base < 3)
17773 return NULL;
17774
17775 /* Look for suffixed mnemonic. */
17776 affix = end - 2;
17777 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17778 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17779 affix - base);
17780 if (opcode && cond)
17781 {
17782 /* step CE */
17783 switch (opcode->tag)
17784 {
17785 case OT_cinfix3_legacy:
17786 /* Ignore conditional suffixes matched on infix only mnemonics. */
17787 break;
17788
17789 case OT_cinfix3:
17790 case OT_cinfix3_deprecated:
17791 case OT_odd_infix_unc:
17792 if (!unified_syntax)
17793 return 0;
17794 /* Fall through. */
17795
17796 case OT_csuffix:
17797 case OT_csuffixF:
17798 case OT_csuf_or_in3:
17799 inst.cond = cond->value;
17800 return opcode;
17801
17802 case OT_unconditional:
17803 case OT_unconditionalF:
17804 if (thumb_mode)
17805 inst.cond = cond->value;
17806 else
17807 {
17808 /* Delayed diagnostic. */
17809 inst.error = BAD_COND;
17810 inst.cond = COND_ALWAYS;
17811 }
17812 return opcode;
17813
17814 default:
17815 return NULL;
17816 }
17817 }
17818
17819 /* Cannot have a usual-position infix on a mnemonic of less than
17820 six characters (five would be a suffix). */
17821 if (end - base < 6)
17822 return NULL;
17823
17824 /* Look for infixed mnemonic in the usual position. */
17825 affix = base + 3;
17826 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17827 if (!cond)
17828 return NULL;
17829
17830 memcpy (save, affix, 2);
17831 memmove (affix, affix + 2, (end - affix) - 2);
17832 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17833 (end - base) - 2);
17834 memmove (affix + 2, affix, (end - affix) - 2);
17835 memcpy (affix, save, 2);
17836
17837 if (opcode
17838 && (opcode->tag == OT_cinfix3
17839 || opcode->tag == OT_cinfix3_deprecated
17840 || opcode->tag == OT_csuf_or_in3
17841 || opcode->tag == OT_cinfix3_legacy))
17842 {
17843 /* Step CM. */
17844 if (warn_on_deprecated && unified_syntax
17845 && (opcode->tag == OT_cinfix3
17846 || opcode->tag == OT_cinfix3_deprecated))
17847 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17848
17849 inst.cond = cond->value;
17850 return opcode;
17851 }
17852
17853 return NULL;
17854 }
17855
17856 /* This function generates an initial IT instruction, leaving its block
17857 virtually open for the new instructions. Eventually,
17858 the mask will be updated by now_it_add_mask () each time
17859 a new instruction needs to be included in the IT block.
17860 Finally, the block is closed with close_automatic_it_block ().
17861 The block closure can be requested either from md_assemble (),
17862 a tencode (), or due to a label hook. */
17863
17864 static void
17865 new_automatic_it_block (int cond)
17866 {
17867 now_it.state = AUTOMATIC_IT_BLOCK;
17868 now_it.mask = 0x18;
17869 now_it.cc = cond;
17870 now_it.block_length = 1;
17871 mapping_state (MAP_THUMB);
17872 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
17873 now_it.warn_deprecated = FALSE;
17874 now_it.insn_cond = TRUE;
17875 }
17876
17877 /* Close an automatic IT block.
17878 See comments in new_automatic_it_block (). */
17879
17880 static void
17881 close_automatic_it_block (void)
17882 {
17883 now_it.mask = 0x10;
17884 now_it.block_length = 0;
17885 }
17886
17887 /* Update the mask of the current automatically-generated IT
17888 instruction. See comments in new_automatic_it_block (). */
17889
17890 static void
17891 now_it_add_mask (int cond)
17892 {
17893 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
17894 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
17895 | ((bitvalue) << (nbit)))
17896 const int resulting_bit = (cond & 1);
17897
17898 now_it.mask &= 0xf;
17899 now_it.mask = SET_BIT_VALUE (now_it.mask,
17900 resulting_bit,
17901 (5 - now_it.block_length));
17902 now_it.mask = SET_BIT_VALUE (now_it.mask,
17903 1,
17904 ((5 - now_it.block_length) - 1) );
17905 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
17906
17907 #undef CLEAR_BIT
17908 #undef SET_BIT_VALUE
17909 }
17910
17911 /* The IT blocks handling machinery is accessed through the these functions:
17912 it_fsm_pre_encode () from md_assemble ()
17913 set_it_insn_type () optional, from the tencode functions
17914 set_it_insn_type_last () ditto
17915 in_it_block () ditto
17916 it_fsm_post_encode () from md_assemble ()
17917 force_automatic_it_block_close () from label habdling functions
17918
17919 Rationale:
17920 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17921 initializing the IT insn type with a generic initial value depending
17922 on the inst.condition.
17923 2) During the tencode function, two things may happen:
17924 a) The tencode function overrides the IT insn type by
17925 calling either set_it_insn_type (type) or set_it_insn_type_last ().
17926 b) The tencode function queries the IT block state by
17927 calling in_it_block () (i.e. to determine narrow/not narrow mode).
17928
17929 Both set_it_insn_type and in_it_block run the internal FSM state
17930 handling function (handle_it_state), because: a) setting the IT insn
17931 type may incur in an invalid state (exiting the function),
17932 and b) querying the state requires the FSM to be updated.
17933 Specifically we want to avoid creating an IT block for conditional
17934 branches, so it_fsm_pre_encode is actually a guess and we can't
17935 determine whether an IT block is required until the tencode () routine
17936 has decided what type of instruction this actually it.
17937 Because of this, if set_it_insn_type and in_it_block have to be used,
17938 set_it_insn_type has to be called first.
17939
17940 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17941 determines the insn IT type depending on the inst.cond code.
17942 When a tencode () routine encodes an instruction that can be
17943 either outside an IT block, or, in the case of being inside, has to be
17944 the last one, set_it_insn_type_last () will determine the proper
17945 IT instruction type based on the inst.cond code. Otherwise,
17946 set_it_insn_type can be called for overriding that logic or
17947 for covering other cases.
17948
17949 Calling handle_it_state () may not transition the IT block state to
17950 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
17951 still queried. Instead, if the FSM determines that the state should
17952 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17953 after the tencode () function: that's what it_fsm_post_encode () does.
17954
17955 Since in_it_block () calls the state handling function to get an
17956 updated state, an error may occur (due to invalid insns combination).
17957 In that case, inst.error is set.
17958 Therefore, inst.error has to be checked after the execution of
17959 the tencode () routine.
17960
17961 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17962 any pending state change (if any) that didn't take place in
17963 handle_it_state () as explained above. */
17964
17965 static void
17966 it_fsm_pre_encode (void)
17967 {
17968 if (inst.cond != COND_ALWAYS)
17969 inst.it_insn_type = INSIDE_IT_INSN;
17970 else
17971 inst.it_insn_type = OUTSIDE_IT_INSN;
17972
17973 now_it.state_handled = 0;
17974 }
17975
17976 /* IT state FSM handling function. */
17977
17978 static int
17979 handle_it_state (void)
17980 {
17981 now_it.state_handled = 1;
17982 now_it.insn_cond = FALSE;
17983
17984 switch (now_it.state)
17985 {
17986 case OUTSIDE_IT_BLOCK:
17987 switch (inst.it_insn_type)
17988 {
17989 case OUTSIDE_IT_INSN:
17990 break;
17991
17992 case INSIDE_IT_INSN:
17993 case INSIDE_IT_LAST_INSN:
17994 if (thumb_mode == 0)
17995 {
17996 if (unified_syntax
17997 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
17998 as_tsktsk (_("Warning: conditional outside an IT block"\
17999 " for Thumb."));
18000 }
18001 else
18002 {
18003 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
18004 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
18005 {
18006 /* Automatically generate the IT instruction. */
18007 new_automatic_it_block (inst.cond);
18008 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
18009 close_automatic_it_block ();
18010 }
18011 else
18012 {
18013 inst.error = BAD_OUT_IT;
18014 return FAIL;
18015 }
18016 }
18017 break;
18018
18019 case IF_INSIDE_IT_LAST_INSN:
18020 case NEUTRAL_IT_INSN:
18021 break;
18022
18023 case IT_INSN:
18024 now_it.state = MANUAL_IT_BLOCK;
18025 now_it.block_length = 0;
18026 break;
18027 }
18028 break;
18029
18030 case AUTOMATIC_IT_BLOCK:
18031 /* Three things may happen now:
18032 a) We should increment current it block size;
18033 b) We should close current it block (closing insn or 4 insns);
18034 c) We should close current it block and start a new one (due
18035 to incompatible conditions or
18036 4 insns-length block reached). */
18037
18038 switch (inst.it_insn_type)
18039 {
18040 case OUTSIDE_IT_INSN:
18041 /* The closure of the block shall happen immediatelly,
18042 so any in_it_block () call reports the block as closed. */
18043 force_automatic_it_block_close ();
18044 break;
18045
18046 case INSIDE_IT_INSN:
18047 case INSIDE_IT_LAST_INSN:
18048 case IF_INSIDE_IT_LAST_INSN:
18049 now_it.block_length++;
18050
18051 if (now_it.block_length > 4
18052 || !now_it_compatible (inst.cond))
18053 {
18054 force_automatic_it_block_close ();
18055 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
18056 new_automatic_it_block (inst.cond);
18057 }
18058 else
18059 {
18060 now_it.insn_cond = TRUE;
18061 now_it_add_mask (inst.cond);
18062 }
18063
18064 if (now_it.state == AUTOMATIC_IT_BLOCK
18065 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
18066 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
18067 close_automatic_it_block ();
18068 break;
18069
18070 case NEUTRAL_IT_INSN:
18071 now_it.block_length++;
18072 now_it.insn_cond = TRUE;
18073
18074 if (now_it.block_length > 4)
18075 force_automatic_it_block_close ();
18076 else
18077 now_it_add_mask (now_it.cc & 1);
18078 break;
18079
18080 case IT_INSN:
18081 close_automatic_it_block ();
18082 now_it.state = MANUAL_IT_BLOCK;
18083 break;
18084 }
18085 break;
18086
18087 case MANUAL_IT_BLOCK:
18088 {
18089 /* Check conditional suffixes. */
18090 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
18091 int is_last;
18092 now_it.mask <<= 1;
18093 now_it.mask &= 0x1f;
18094 is_last = (now_it.mask == 0x10);
18095 now_it.insn_cond = TRUE;
18096
18097 switch (inst.it_insn_type)
18098 {
18099 case OUTSIDE_IT_INSN:
18100 inst.error = BAD_NOT_IT;
18101 return FAIL;
18102
18103 case INSIDE_IT_INSN:
18104 if (cond != inst.cond)
18105 {
18106 inst.error = BAD_IT_COND;
18107 return FAIL;
18108 }
18109 break;
18110
18111 case INSIDE_IT_LAST_INSN:
18112 case IF_INSIDE_IT_LAST_INSN:
18113 if (cond != inst.cond)
18114 {
18115 inst.error = BAD_IT_COND;
18116 return FAIL;
18117 }
18118 if (!is_last)
18119 {
18120 inst.error = BAD_BRANCH;
18121 return FAIL;
18122 }
18123 break;
18124
18125 case NEUTRAL_IT_INSN:
18126 /* The BKPT instruction is unconditional even in an IT block. */
18127 break;
18128
18129 case IT_INSN:
18130 inst.error = BAD_IT_IT;
18131 return FAIL;
18132 }
18133 }
18134 break;
18135 }
18136
18137 return SUCCESS;
18138 }
18139
18140 struct depr_insn_mask
18141 {
18142 unsigned long pattern;
18143 unsigned long mask;
18144 const char* description;
18145 };
18146
18147 /* List of 16-bit instruction patterns deprecated in an IT block in
18148 ARMv8. */
18149 static const struct depr_insn_mask depr_it_insns[] = {
18150 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
18151 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
18152 { 0xa000, 0xb800, N_("ADR") },
18153 { 0x4800, 0xf800, N_("Literal loads") },
18154 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
18155 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
18156 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
18157 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
18158 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
18159 { 0, 0, NULL }
18160 };
18161
18162 static void
18163 it_fsm_post_encode (void)
18164 {
18165 int is_last;
18166
18167 if (!now_it.state_handled)
18168 handle_it_state ();
18169
18170 if (now_it.insn_cond
18171 && !now_it.warn_deprecated
18172 && warn_on_deprecated
18173 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
18174 {
18175 if (inst.instruction >= 0x10000)
18176 {
18177 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
18178 "deprecated in ARMv8"));
18179 now_it.warn_deprecated = TRUE;
18180 }
18181 else
18182 {
18183 const struct depr_insn_mask *p = depr_it_insns;
18184
18185 while (p->mask != 0)
18186 {
18187 if ((inst.instruction & p->mask) == p->pattern)
18188 {
18189 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
18190 "of the following class are deprecated in ARMv8: "
18191 "%s"), p->description);
18192 now_it.warn_deprecated = TRUE;
18193 break;
18194 }
18195
18196 ++p;
18197 }
18198 }
18199
18200 if (now_it.block_length > 1)
18201 {
18202 as_tsktsk (_("IT blocks containing more than one conditional "
18203 "instruction are deprecated in ARMv8"));
18204 now_it.warn_deprecated = TRUE;
18205 }
18206 }
18207
18208 is_last = (now_it.mask == 0x10);
18209 if (is_last)
18210 {
18211 now_it.state = OUTSIDE_IT_BLOCK;
18212 now_it.mask = 0;
18213 }
18214 }
18215
18216 static void
18217 force_automatic_it_block_close (void)
18218 {
18219 if (now_it.state == AUTOMATIC_IT_BLOCK)
18220 {
18221 close_automatic_it_block ();
18222 now_it.state = OUTSIDE_IT_BLOCK;
18223 now_it.mask = 0;
18224 }
18225 }
18226
18227 static int
18228 in_it_block (void)
18229 {
18230 if (!now_it.state_handled)
18231 handle_it_state ();
18232
18233 return now_it.state != OUTSIDE_IT_BLOCK;
18234 }
18235
18236 /* Whether OPCODE only has T32 encoding. Since this function is only used by
18237 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
18238 here, hence the "known" in the function name. */
18239
18240 static bfd_boolean
18241 known_t32_only_insn (const struct asm_opcode *opcode)
18242 {
18243 /* Original Thumb-1 wide instruction. */
18244 if (opcode->tencode == do_t_blx
18245 || opcode->tencode == do_t_branch23
18246 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
18247 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
18248 return TRUE;
18249
18250 /* Wide-only instruction added to ARMv8-M Baseline. */
18251 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
18252 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
18253 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
18254 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
18255 return TRUE;
18256
18257 return FALSE;
18258 }
18259
18260 /* Whether wide instruction variant can be used if available for a valid OPCODE
18261 in ARCH. */
18262
18263 static bfd_boolean
18264 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
18265 {
18266 if (known_t32_only_insn (opcode))
18267 return TRUE;
18268
18269 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
18270 of variant T3 of B.W is checked in do_t_branch. */
18271 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
18272 && opcode->tencode == do_t_branch)
18273 return TRUE;
18274
18275 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
18276 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
18277 && opcode->tencode == do_t_mov_cmp
18278 /* Make sure CMP instruction is not affected. */
18279 && opcode->aencode == do_mov)
18280 return TRUE;
18281
18282 /* Wide instruction variants of all instructions with narrow *and* wide
18283 variants become available with ARMv6t2. Other opcodes are either
18284 narrow-only or wide-only and are thus available if OPCODE is valid. */
18285 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
18286 return TRUE;
18287
18288 /* OPCODE with narrow only instruction variant or wide variant not
18289 available. */
18290 return FALSE;
18291 }
18292
18293 void
18294 md_assemble (char *str)
18295 {
18296 char *p = str;
18297 const struct asm_opcode * opcode;
18298
18299 /* Align the previous label if needed. */
18300 if (last_label_seen != NULL)
18301 {
18302 symbol_set_frag (last_label_seen, frag_now);
18303 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
18304 S_SET_SEGMENT (last_label_seen, now_seg);
18305 }
18306
18307 memset (&inst, '\0', sizeof (inst));
18308 inst.reloc.type = BFD_RELOC_UNUSED;
18309
18310 opcode = opcode_lookup (&p);
18311 if (!opcode)
18312 {
18313 /* It wasn't an instruction, but it might be a register alias of
18314 the form alias .req reg, or a Neon .dn/.qn directive. */
18315 if (! create_register_alias (str, p)
18316 && ! create_neon_reg_alias (str, p))
18317 as_bad (_("bad instruction `%s'"), str);
18318
18319 return;
18320 }
18321
18322 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
18323 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
18324
18325 /* The value which unconditional instructions should have in place of the
18326 condition field. */
18327 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
18328
18329 if (thumb_mode)
18330 {
18331 arm_feature_set variant;
18332
18333 variant = cpu_variant;
18334 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
18335 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
18336 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
18337 /* Check that this instruction is supported for this CPU. */
18338 if (!opcode->tvariant
18339 || (thumb_mode == 1
18340 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
18341 {
18342 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
18343 return;
18344 }
18345 if (inst.cond != COND_ALWAYS && !unified_syntax
18346 && opcode->tencode != do_t_branch)
18347 {
18348 as_bad (_("Thumb does not support conditional execution"));
18349 return;
18350 }
18351
18352 /* Two things are addressed here:
18353 1) Implicit require narrow instructions on Thumb-1.
18354 This avoids relaxation accidentally introducing Thumb-2
18355 instructions.
18356 2) Reject wide instructions in non Thumb-2 cores.
18357
18358 Only instructions with narrow and wide variants need to be handled
18359 but selecting all non wide-only instructions is easier. */
18360 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
18361 && !t32_insn_ok (variant, opcode))
18362 {
18363 if (inst.size_req == 0)
18364 inst.size_req = 2;
18365 else if (inst.size_req == 4)
18366 {
18367 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
18368 as_bad (_("selected processor does not support 32bit wide "
18369 "variant of instruction `%s'"), str);
18370 else
18371 as_bad (_("selected processor does not support `%s' in "
18372 "Thumb-2 mode"), str);
18373 return;
18374 }
18375 }
18376
18377 inst.instruction = opcode->tvalue;
18378
18379 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
18380 {
18381 /* Prepare the it_insn_type for those encodings that don't set
18382 it. */
18383 it_fsm_pre_encode ();
18384
18385 opcode->tencode ();
18386
18387 it_fsm_post_encode ();
18388 }
18389
18390 if (!(inst.error || inst.relax))
18391 {
18392 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
18393 inst.size = (inst.instruction > 0xffff ? 4 : 2);
18394 if (inst.size_req && inst.size_req != inst.size)
18395 {
18396 as_bad (_("cannot honor width suffix -- `%s'"), str);
18397 return;
18398 }
18399 }
18400
18401 /* Something has gone badly wrong if we try to relax a fixed size
18402 instruction. */
18403 gas_assert (inst.size_req == 0 || !inst.relax);
18404
18405 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18406 *opcode->tvariant);
18407 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
18408 set those bits when Thumb-2 32-bit instructions are seen. The impact
18409 of relaxable instructions will be considered later after we finish all
18410 relaxation. */
18411 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
18412 variant = arm_arch_none;
18413 else
18414 variant = cpu_variant;
18415 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
18416 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18417 arm_ext_v6t2);
18418
18419 check_neon_suffixes;
18420
18421 if (!inst.error)
18422 {
18423 mapping_state (MAP_THUMB);
18424 }
18425 }
18426 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
18427 {
18428 bfd_boolean is_bx;
18429
18430 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18431 is_bx = (opcode->aencode == do_bx);
18432
18433 /* Check that this instruction is supported for this CPU. */
18434 if (!(is_bx && fix_v4bx)
18435 && !(opcode->avariant &&
18436 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
18437 {
18438 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
18439 return;
18440 }
18441 if (inst.size_req)
18442 {
18443 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
18444 return;
18445 }
18446
18447 inst.instruction = opcode->avalue;
18448 if (opcode->tag == OT_unconditionalF)
18449 inst.instruction |= 0xFU << 28;
18450 else
18451 inst.instruction |= inst.cond << 28;
18452 inst.size = INSN_SIZE;
18453 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
18454 {
18455 it_fsm_pre_encode ();
18456 opcode->aencode ();
18457 it_fsm_post_encode ();
18458 }
18459 /* Arm mode bx is marked as both v4T and v5 because it's still required
18460 on a hypothetical non-thumb v5 core. */
18461 if (is_bx)
18462 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
18463 else
18464 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
18465 *opcode->avariant);
18466
18467 check_neon_suffixes;
18468
18469 if (!inst.error)
18470 {
18471 mapping_state (MAP_ARM);
18472 }
18473 }
18474 else
18475 {
18476 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18477 "-- `%s'"), str);
18478 return;
18479 }
18480 output_inst (str);
18481 }
18482
18483 static void
18484 check_it_blocks_finished (void)
18485 {
18486 #ifdef OBJ_ELF
18487 asection *sect;
18488
18489 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
18490 if (seg_info (sect)->tc_segment_info_data.current_it.state
18491 == MANUAL_IT_BLOCK)
18492 {
18493 as_warn (_("section '%s' finished with an open IT block."),
18494 sect->name);
18495 }
18496 #else
18497 if (now_it.state == MANUAL_IT_BLOCK)
18498 as_warn (_("file finished with an open IT block."));
18499 #endif
18500 }
18501
18502 /* Various frobbings of labels and their addresses. */
18503
18504 void
18505 arm_start_line_hook (void)
18506 {
18507 last_label_seen = NULL;
18508 }
18509
18510 void
18511 arm_frob_label (symbolS * sym)
18512 {
18513 last_label_seen = sym;
18514
18515 ARM_SET_THUMB (sym, thumb_mode);
18516
18517 #if defined OBJ_COFF || defined OBJ_ELF
18518 ARM_SET_INTERWORK (sym, support_interwork);
18519 #endif
18520
18521 force_automatic_it_block_close ();
18522
18523 /* Note - do not allow local symbols (.Lxxx) to be labelled
18524 as Thumb functions. This is because these labels, whilst
18525 they exist inside Thumb code, are not the entry points for
18526 possible ARM->Thumb calls. Also, these labels can be used
18527 as part of a computed goto or switch statement. eg gcc
18528 can generate code that looks like this:
18529
18530 ldr r2, [pc, .Laaa]
18531 lsl r3, r3, #2
18532 ldr r2, [r3, r2]
18533 mov pc, r2
18534
18535 .Lbbb: .word .Lxxx
18536 .Lccc: .word .Lyyy
18537 ..etc...
18538 .Laaa: .word Lbbb
18539
18540 The first instruction loads the address of the jump table.
18541 The second instruction converts a table index into a byte offset.
18542 The third instruction gets the jump address out of the table.
18543 The fourth instruction performs the jump.
18544
18545 If the address stored at .Laaa is that of a symbol which has the
18546 Thumb_Func bit set, then the linker will arrange for this address
18547 to have the bottom bit set, which in turn would mean that the
18548 address computation performed by the third instruction would end
18549 up with the bottom bit set. Since the ARM is capable of unaligned
18550 word loads, the instruction would then load the incorrect address
18551 out of the jump table, and chaos would ensue. */
18552 if (label_is_thumb_function_name
18553 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
18554 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
18555 {
18556 /* When the address of a Thumb function is taken the bottom
18557 bit of that address should be set. This will allow
18558 interworking between Arm and Thumb functions to work
18559 correctly. */
18560
18561 THUMB_SET_FUNC (sym, 1);
18562
18563 label_is_thumb_function_name = FALSE;
18564 }
18565
18566 dwarf2_emit_label (sym);
18567 }
18568
18569 bfd_boolean
18570 arm_data_in_code (void)
18571 {
18572 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
18573 {
18574 *input_line_pointer = '/';
18575 input_line_pointer += 5;
18576 *input_line_pointer = 0;
18577 return TRUE;
18578 }
18579
18580 return FALSE;
18581 }
18582
18583 char *
18584 arm_canonicalize_symbol_name (char * name)
18585 {
18586 int len;
18587
18588 if (thumb_mode && (len = strlen (name)) > 5
18589 && streq (name + len - 5, "/data"))
18590 *(name + len - 5) = 0;
18591
18592 return name;
18593 }
18594 \f
18595 /* Table of all register names defined by default. The user can
18596 define additional names with .req. Note that all register names
18597 should appear in both upper and lowercase variants. Some registers
18598 also have mixed-case names. */
18599
18600 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
18601 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
18602 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
18603 #define REGSET(p,t) \
18604 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
18605 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
18606 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
18607 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
18608 #define REGSETH(p,t) \
18609 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
18610 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
18611 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
18612 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
18613 #define REGSET2(p,t) \
18614 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
18615 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
18616 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
18617 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
18618 #define SPLRBANK(base,bank,t) \
18619 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
18620 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
18621 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
18622 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
18623 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
18624 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
18625
18626 static const struct reg_entry reg_names[] =
18627 {
18628 /* ARM integer registers. */
18629 REGSET(r, RN), REGSET(R, RN),
18630
18631 /* ATPCS synonyms. */
18632 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
18633 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
18634 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
18635
18636 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
18637 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
18638 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
18639
18640 /* Well-known aliases. */
18641 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
18642 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
18643
18644 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
18645 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
18646
18647 /* Coprocessor numbers. */
18648 REGSET(p, CP), REGSET(P, CP),
18649
18650 /* Coprocessor register numbers. The "cr" variants are for backward
18651 compatibility. */
18652 REGSET(c, CN), REGSET(C, CN),
18653 REGSET(cr, CN), REGSET(CR, CN),
18654
18655 /* ARM banked registers. */
18656 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
18657 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
18658 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
18659 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
18660 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
18661 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
18662 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
18663
18664 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
18665 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
18666 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
18667 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
18668 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
18669 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
18670 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
18671 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
18672
18673 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
18674 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
18675 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
18676 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
18677 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
18678 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
18679 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
18680 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
18681 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
18682
18683 /* FPA registers. */
18684 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
18685 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
18686
18687 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
18688 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
18689
18690 /* VFP SP registers. */
18691 REGSET(s,VFS), REGSET(S,VFS),
18692 REGSETH(s,VFS), REGSETH(S,VFS),
18693
18694 /* VFP DP Registers. */
18695 REGSET(d,VFD), REGSET(D,VFD),
18696 /* Extra Neon DP registers. */
18697 REGSETH(d,VFD), REGSETH(D,VFD),
18698
18699 /* Neon QP registers. */
18700 REGSET2(q,NQ), REGSET2(Q,NQ),
18701
18702 /* VFP control registers. */
18703 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
18704 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
18705 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
18706 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
18707 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
18708 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
18709
18710 /* Maverick DSP coprocessor registers. */
18711 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
18712 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
18713
18714 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
18715 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
18716 REGDEF(dspsc,0,DSPSC),
18717
18718 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
18719 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
18720 REGDEF(DSPSC,0,DSPSC),
18721
18722 /* iWMMXt data registers - p0, c0-15. */
18723 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
18724
18725 /* iWMMXt control registers - p1, c0-3. */
18726 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
18727 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
18728 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
18729 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
18730
18731 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
18732 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
18733 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
18734 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
18735 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
18736
18737 /* XScale accumulator registers. */
18738 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
18739 };
18740 #undef REGDEF
18741 #undef REGNUM
18742 #undef REGSET
18743
18744 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18745 within psr_required_here. */
18746 static const struct asm_psr psrs[] =
18747 {
18748 /* Backward compatibility notation. Note that "all" is no longer
18749 truly all possible PSR bits. */
18750 {"all", PSR_c | PSR_f},
18751 {"flg", PSR_f},
18752 {"ctl", PSR_c},
18753
18754 /* Individual flags. */
18755 {"f", PSR_f},
18756 {"c", PSR_c},
18757 {"x", PSR_x},
18758 {"s", PSR_s},
18759
18760 /* Combinations of flags. */
18761 {"fs", PSR_f | PSR_s},
18762 {"fx", PSR_f | PSR_x},
18763 {"fc", PSR_f | PSR_c},
18764 {"sf", PSR_s | PSR_f},
18765 {"sx", PSR_s | PSR_x},
18766 {"sc", PSR_s | PSR_c},
18767 {"xf", PSR_x | PSR_f},
18768 {"xs", PSR_x | PSR_s},
18769 {"xc", PSR_x | PSR_c},
18770 {"cf", PSR_c | PSR_f},
18771 {"cs", PSR_c | PSR_s},
18772 {"cx", PSR_c | PSR_x},
18773 {"fsx", PSR_f | PSR_s | PSR_x},
18774 {"fsc", PSR_f | PSR_s | PSR_c},
18775 {"fxs", PSR_f | PSR_x | PSR_s},
18776 {"fxc", PSR_f | PSR_x | PSR_c},
18777 {"fcs", PSR_f | PSR_c | PSR_s},
18778 {"fcx", PSR_f | PSR_c | PSR_x},
18779 {"sfx", PSR_s | PSR_f | PSR_x},
18780 {"sfc", PSR_s | PSR_f | PSR_c},
18781 {"sxf", PSR_s | PSR_x | PSR_f},
18782 {"sxc", PSR_s | PSR_x | PSR_c},
18783 {"scf", PSR_s | PSR_c | PSR_f},
18784 {"scx", PSR_s | PSR_c | PSR_x},
18785 {"xfs", PSR_x | PSR_f | PSR_s},
18786 {"xfc", PSR_x | PSR_f | PSR_c},
18787 {"xsf", PSR_x | PSR_s | PSR_f},
18788 {"xsc", PSR_x | PSR_s | PSR_c},
18789 {"xcf", PSR_x | PSR_c | PSR_f},
18790 {"xcs", PSR_x | PSR_c | PSR_s},
18791 {"cfs", PSR_c | PSR_f | PSR_s},
18792 {"cfx", PSR_c | PSR_f | PSR_x},
18793 {"csf", PSR_c | PSR_s | PSR_f},
18794 {"csx", PSR_c | PSR_s | PSR_x},
18795 {"cxf", PSR_c | PSR_x | PSR_f},
18796 {"cxs", PSR_c | PSR_x | PSR_s},
18797 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
18798 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
18799 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
18800 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
18801 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
18802 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
18803 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
18804 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
18805 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
18806 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
18807 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
18808 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
18809 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
18810 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
18811 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
18812 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
18813 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
18814 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
18815 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
18816 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
18817 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
18818 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
18819 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
18820 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
18821 };
18822
18823 /* Table of V7M psr names. */
18824 static const struct asm_psr v7m_psrs[] =
18825 {
18826 {"apsr", 0x0 }, {"APSR", 0x0 },
18827 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
18828 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
18829 {"psr", 0x3 }, {"PSR", 0x3 },
18830 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
18831 {"ipsr", 0x5 }, {"IPSR", 0x5 },
18832 {"epsr", 0x6 }, {"EPSR", 0x6 },
18833 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
18834 {"msp", 0x8 }, {"MSP", 0x8 },
18835 {"psp", 0x9 }, {"PSP", 0x9 },
18836 {"msplim", 0xa }, {"MSPLIM", 0xa },
18837 {"psplim", 0xb }, {"PSPLIM", 0xb },
18838 {"primask", 0x10}, {"PRIMASK", 0x10},
18839 {"basepri", 0x11}, {"BASEPRI", 0x11},
18840 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
18841 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
18842 {"control", 0x14}, {"CONTROL", 0x14},
18843 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
18844 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
18845 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
18846 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
18847 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
18848 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
18849 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
18850 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
18851 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
18852 };
18853
18854 /* Table of all shift-in-operand names. */
18855 static const struct asm_shift_name shift_names [] =
18856 {
18857 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
18858 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
18859 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
18860 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
18861 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
18862 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
18863 };
18864
18865 /* Table of all explicit relocation names. */
18866 #ifdef OBJ_ELF
18867 static struct reloc_entry reloc_names[] =
18868 {
18869 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
18870 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
18871 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
18872 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
18873 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
18874 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
18875 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
18876 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
18877 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
18878 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
18879 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
18880 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
18881 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
18882 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
18883 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
18884 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
18885 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
18886 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
18887 };
18888 #endif
18889
18890 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
18891 static const struct asm_cond conds[] =
18892 {
18893 {"eq", 0x0},
18894 {"ne", 0x1},
18895 {"cs", 0x2}, {"hs", 0x2},
18896 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18897 {"mi", 0x4},
18898 {"pl", 0x5},
18899 {"vs", 0x6},
18900 {"vc", 0x7},
18901 {"hi", 0x8},
18902 {"ls", 0x9},
18903 {"ge", 0xa},
18904 {"lt", 0xb},
18905 {"gt", 0xc},
18906 {"le", 0xd},
18907 {"al", 0xe}
18908 };
18909
18910 #define UL_BARRIER(L,U,CODE,FEAT) \
18911 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
18912 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
18913
18914 static struct asm_barrier_opt barrier_opt_names[] =
18915 {
18916 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
18917 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
18918 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
18919 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
18920 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
18921 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
18922 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
18923 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
18924 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
18925 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
18926 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
18927 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
18928 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
18929 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
18930 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
18931 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
18932 };
18933
18934 #undef UL_BARRIER
18935
18936 /* Table of ARM-format instructions. */
18937
18938 /* Macros for gluing together operand strings. N.B. In all cases
18939 other than OPS0, the trailing OP_stop comes from default
18940 zero-initialization of the unspecified elements of the array. */
18941 #define OPS0() { OP_stop, }
18942 #define OPS1(a) { OP_##a, }
18943 #define OPS2(a,b) { OP_##a,OP_##b, }
18944 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
18945 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
18946 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18947 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18948
18949 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18950 This is useful when mixing operands for ARM and THUMB, i.e. using the
18951 MIX_ARM_THUMB_OPERANDS macro.
18952 In order to use these macros, prefix the number of operands with _
18953 e.g. _3. */
18954 #define OPS_1(a) { a, }
18955 #define OPS_2(a,b) { a,b, }
18956 #define OPS_3(a,b,c) { a,b,c, }
18957 #define OPS_4(a,b,c,d) { a,b,c,d, }
18958 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
18959 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18960
18961 /* These macros abstract out the exact format of the mnemonic table and
18962 save some repeated characters. */
18963
18964 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
18965 #define TxCE(mnem, op, top, nops, ops, ae, te) \
18966 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18967 THUMB_VARIANT, do_##ae, do_##te }
18968
18969 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18970 a T_MNEM_xyz enumerator. */
18971 #define TCE(mnem, aop, top, nops, ops, ae, te) \
18972 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18973 #define tCE(mnem, aop, top, nops, ops, ae, te) \
18974 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18975
18976 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18977 infix after the third character. */
18978 #define TxC3(mnem, op, top, nops, ops, ae, te) \
18979 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18980 THUMB_VARIANT, do_##ae, do_##te }
18981 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
18982 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18983 THUMB_VARIANT, do_##ae, do_##te }
18984 #define TC3(mnem, aop, top, nops, ops, ae, te) \
18985 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18986 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
18987 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18988 #define tC3(mnem, aop, top, nops, ops, ae, te) \
18989 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18990 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
18991 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18992
18993 /* Mnemonic that cannot be conditionalized. The ARM condition-code
18994 field is still 0xE. Many of the Thumb variants can be executed
18995 conditionally, so this is checked separately. */
18996 #define TUE(mnem, op, top, nops, ops, ae, te) \
18997 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18998 THUMB_VARIANT, do_##ae, do_##te }
18999
19000 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
19001 Used by mnemonics that have very minimal differences in the encoding for
19002 ARM and Thumb variants and can be handled in a common function. */
19003 #define TUEc(mnem, op, top, nops, ops, en) \
19004 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19005 THUMB_VARIANT, do_##en, do_##en }
19006
19007 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
19008 condition code field. */
19009 #define TUF(mnem, op, top, nops, ops, ae, te) \
19010 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
19011 THUMB_VARIANT, do_##ae, do_##te }
19012
19013 /* ARM-only variants of all the above. */
19014 #define CE(mnem, op, nops, ops, ae) \
19015 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19016
19017 #define C3(mnem, op, nops, ops, ae) \
19018 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19019
19020 /* Legacy mnemonics that always have conditional infix after the third
19021 character. */
19022 #define CL(mnem, op, nops, ops, ae) \
19023 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19024 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19025
19026 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
19027 #define cCE(mnem, op, nops, ops, ae) \
19028 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19029
19030 /* Legacy coprocessor instructions where conditional infix and conditional
19031 suffix are ambiguous. For consistency this includes all FPA instructions,
19032 not just the potentially ambiguous ones. */
19033 #define cCL(mnem, op, nops, ops, ae) \
19034 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19035 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19036
19037 /* Coprocessor, takes either a suffix or a position-3 infix
19038 (for an FPA corner case). */
19039 #define C3E(mnem, op, nops, ops, ae) \
19040 { mnem, OPS##nops ops, OT_csuf_or_in3, \
19041 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19042
19043 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
19044 { m1 #m2 m3, OPS##nops ops, \
19045 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
19046 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19047
19048 #define CM(m1, m2, op, nops, ops, ae) \
19049 xCM_ (m1, , m2, op, nops, ops, ae), \
19050 xCM_ (m1, eq, m2, op, nops, ops, ae), \
19051 xCM_ (m1, ne, m2, op, nops, ops, ae), \
19052 xCM_ (m1, cs, m2, op, nops, ops, ae), \
19053 xCM_ (m1, hs, m2, op, nops, ops, ae), \
19054 xCM_ (m1, cc, m2, op, nops, ops, ae), \
19055 xCM_ (m1, ul, m2, op, nops, ops, ae), \
19056 xCM_ (m1, lo, m2, op, nops, ops, ae), \
19057 xCM_ (m1, mi, m2, op, nops, ops, ae), \
19058 xCM_ (m1, pl, m2, op, nops, ops, ae), \
19059 xCM_ (m1, vs, m2, op, nops, ops, ae), \
19060 xCM_ (m1, vc, m2, op, nops, ops, ae), \
19061 xCM_ (m1, hi, m2, op, nops, ops, ae), \
19062 xCM_ (m1, ls, m2, op, nops, ops, ae), \
19063 xCM_ (m1, ge, m2, op, nops, ops, ae), \
19064 xCM_ (m1, lt, m2, op, nops, ops, ae), \
19065 xCM_ (m1, gt, m2, op, nops, ops, ae), \
19066 xCM_ (m1, le, m2, op, nops, ops, ae), \
19067 xCM_ (m1, al, m2, op, nops, ops, ae)
19068
19069 #define UE(mnem, op, nops, ops, ae) \
19070 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19071
19072 #define UF(mnem, op, nops, ops, ae) \
19073 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19074
19075 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
19076 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
19077 use the same encoding function for each. */
19078 #define NUF(mnem, op, nops, ops, enc) \
19079 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
19080 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19081
19082 /* Neon data processing, version which indirects through neon_enc_tab for
19083 the various overloaded versions of opcodes. */
19084 #define nUF(mnem, op, nops, ops, enc) \
19085 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
19086 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19087
19088 /* Neon insn with conditional suffix for the ARM version, non-overloaded
19089 version. */
19090 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
19091 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
19092 THUMB_VARIANT, do_##enc, do_##enc }
19093
19094 #define NCE(mnem, op, nops, ops, enc) \
19095 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19096
19097 #define NCEF(mnem, op, nops, ops, enc) \
19098 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19099
19100 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
19101 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
19102 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
19103 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19104
19105 #define nCE(mnem, op, nops, ops, enc) \
19106 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19107
19108 #define nCEF(mnem, op, nops, ops, enc) \
19109 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19110
19111 #define do_0 0
19112
19113 static const struct asm_opcode insns[] =
19114 {
19115 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
19116 #define THUMB_VARIANT & arm_ext_v4t
19117 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
19118 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
19119 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
19120 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
19121 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
19122 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
19123 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
19124 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
19125 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
19126 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
19127 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
19128 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
19129 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
19130 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
19131 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
19132 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
19133
19134 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
19135 for setting PSR flag bits. They are obsolete in V6 and do not
19136 have Thumb equivalents. */
19137 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
19138 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
19139 CL("tstp", 110f000, 2, (RR, SH), cmp),
19140 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
19141 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
19142 CL("cmpp", 150f000, 2, (RR, SH), cmp),
19143 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
19144 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
19145 CL("cmnp", 170f000, 2, (RR, SH), cmp),
19146
19147 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
19148 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
19149 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
19150 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
19151
19152 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
19153 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19154 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
19155 OP_RRnpc),
19156 OP_ADDRGLDR),ldst, t_ldst),
19157 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19158
19159 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19160 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19161 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19162 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19163 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19164 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19165
19166 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
19167 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
19168 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
19169 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
19170
19171 /* Pseudo ops. */
19172 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
19173 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
19174 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
19175 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
19176
19177 /* Thumb-compatibility pseudo ops. */
19178 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
19179 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
19180 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
19181 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
19182 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
19183 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
19184 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
19185 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
19186 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
19187 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
19188 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
19189 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
19190
19191 /* These may simplify to neg. */
19192 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
19193 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
19194
19195 #undef THUMB_VARIANT
19196 #define THUMB_VARIANT & arm_ext_v6
19197
19198 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
19199
19200 /* V1 instructions with no Thumb analogue prior to V6T2. */
19201 #undef THUMB_VARIANT
19202 #define THUMB_VARIANT & arm_ext_v6t2
19203
19204 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19205 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19206 CL("teqp", 130f000, 2, (RR, SH), cmp),
19207
19208 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19209 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19210 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
19211 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19212
19213 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19214 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19215
19216 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19217 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19218
19219 /* V1 instructions with no Thumb analogue at all. */
19220 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
19221 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
19222
19223 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
19224 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
19225 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
19226 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
19227 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
19228 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
19229 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
19230 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
19231
19232 #undef ARM_VARIANT
19233 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
19234 #undef THUMB_VARIANT
19235 #define THUMB_VARIANT & arm_ext_v4t
19236
19237 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
19238 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
19239
19240 #undef THUMB_VARIANT
19241 #define THUMB_VARIANT & arm_ext_v6t2
19242
19243 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19244 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
19245
19246 /* Generic coprocessor instructions. */
19247 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
19248 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19249 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19250 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19251 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19252 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19253 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
19254
19255 #undef ARM_VARIANT
19256 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
19257
19258 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19259 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19260
19261 #undef ARM_VARIANT
19262 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
19263 #undef THUMB_VARIANT
19264 #define THUMB_VARIANT & arm_ext_msr
19265
19266 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
19267 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
19268
19269 #undef ARM_VARIANT
19270 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
19271 #undef THUMB_VARIANT
19272 #define THUMB_VARIANT & arm_ext_v6t2
19273
19274 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19275 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19276 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19277 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19278 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19279 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19280 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19281 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19282
19283 #undef ARM_VARIANT
19284 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
19285 #undef THUMB_VARIANT
19286 #define THUMB_VARIANT & arm_ext_v4t
19287
19288 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19289 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19290 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19291 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19292 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19293 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19294
19295 #undef ARM_VARIANT
19296 #define ARM_VARIANT & arm_ext_v4t_5
19297
19298 /* ARM Architecture 4T. */
19299 /* Note: bx (and blx) are required on V5, even if the processor does
19300 not support Thumb. */
19301 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
19302
19303 #undef ARM_VARIANT
19304 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
19305 #undef THUMB_VARIANT
19306 #define THUMB_VARIANT & arm_ext_v5t
19307
19308 /* Note: blx has 2 variants; the .value coded here is for
19309 BLX(2). Only this variant has conditional execution. */
19310 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
19311 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
19312
19313 #undef THUMB_VARIANT
19314 #define THUMB_VARIANT & arm_ext_v6t2
19315
19316 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
19317 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19318 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19319 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19320 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19321 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
19322 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19323 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19324
19325 #undef ARM_VARIANT
19326 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
19327 #undef THUMB_VARIANT
19328 #define THUMB_VARIANT & arm_ext_v5exp
19329
19330 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19331 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19332 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19333 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19334
19335 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19336 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19337
19338 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19339 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19340 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19341 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19342
19343 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19344 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19345 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19346 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19347
19348 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19349 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19350
19351 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19352 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19353 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19354 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19355
19356 #undef ARM_VARIANT
19357 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
19358 #undef THUMB_VARIANT
19359 #define THUMB_VARIANT & arm_ext_v6t2
19360
19361 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
19362 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
19363 ldrd, t_ldstd),
19364 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
19365 ADDRGLDRS), ldrd, t_ldstd),
19366
19367 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19368 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19369
19370 #undef ARM_VARIANT
19371 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
19372
19373 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
19374
19375 #undef ARM_VARIANT
19376 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
19377 #undef THUMB_VARIANT
19378 #define THUMB_VARIANT & arm_ext_v6
19379
19380 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
19381 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
19382 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19383 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19384 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19385 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19386 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19387 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19388 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19389 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
19390
19391 #undef THUMB_VARIANT
19392 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19393
19394 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
19395 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19396 strex, t_strex),
19397 #undef THUMB_VARIANT
19398 #define THUMB_VARIANT & arm_ext_v6t2
19399
19400 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19401 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19402
19403 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
19404 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
19405
19406 /* ARM V6 not included in V7M. */
19407 #undef THUMB_VARIANT
19408 #define THUMB_VARIANT & arm_ext_v6_notm
19409 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19410 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19411 UF(rfeib, 9900a00, 1, (RRw), rfe),
19412 UF(rfeda, 8100a00, 1, (RRw), rfe),
19413 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19414 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19415 UF(rfefa, 8100a00, 1, (RRw), rfe),
19416 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19417 UF(rfeed, 9900a00, 1, (RRw), rfe),
19418 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19419 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19420 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19421 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
19422 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
19423 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
19424 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
19425 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19426 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19427 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
19428
19429 /* ARM V6 not included in V7M (eg. integer SIMD). */
19430 #undef THUMB_VARIANT
19431 #define THUMB_VARIANT & arm_ext_v6_dsp
19432 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
19433 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
19434 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19435 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19436 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19437 /* Old name for QASX. */
19438 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19439 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19440 /* Old name for QSAX. */
19441 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19442 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19443 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19444 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19445 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19446 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19447 /* Old name for SASX. */
19448 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19449 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19450 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19451 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19452 /* Old name for SHASX. */
19453 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19454 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19455 /* Old name for SHSAX. */
19456 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19457 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19458 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19459 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19460 /* Old name for SSAX. */
19461 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19462 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19463 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19464 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19465 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19466 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19467 /* Old name for UASX. */
19468 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19469 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19470 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19471 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19472 /* Old name for UHASX. */
19473 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19474 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19475 /* Old name for UHSAX. */
19476 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19477 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19478 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19479 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19480 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19481 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19482 /* Old name for UQASX. */
19483 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19484 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19485 /* Old name for UQSAX. */
19486 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19487 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19488 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19489 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19490 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19491 /* Old name for USAX. */
19492 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19493 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19494 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19495 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19496 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19497 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19498 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19499 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19500 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19501 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19502 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19503 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19504 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19505 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19506 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19507 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19508 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19509 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19510 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19511 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19512 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19513 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19514 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19515 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19516 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19517 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19518 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19519 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19520 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19521 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
19522 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
19523 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19524 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19525 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
19526
19527 #undef ARM_VARIANT
19528 #define ARM_VARIANT & arm_ext_v6k
19529 #undef THUMB_VARIANT
19530 #define THUMB_VARIANT & arm_ext_v6k
19531
19532 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
19533 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
19534 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
19535 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
19536
19537 #undef THUMB_VARIANT
19538 #define THUMB_VARIANT & arm_ext_v6_notm
19539 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
19540 ldrexd, t_ldrexd),
19541 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
19542 RRnpcb), strexd, t_strexd),
19543
19544 #undef THUMB_VARIANT
19545 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19546 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
19547 rd_rn, rd_rn),
19548 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
19549 rd_rn, rd_rn),
19550 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19551 strex, t_strexbh),
19552 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19553 strex, t_strexbh),
19554 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
19555
19556 #undef ARM_VARIANT
19557 #define ARM_VARIANT & arm_ext_sec
19558 #undef THUMB_VARIANT
19559 #define THUMB_VARIANT & arm_ext_sec
19560
19561 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
19562
19563 #undef ARM_VARIANT
19564 #define ARM_VARIANT & arm_ext_virt
19565 #undef THUMB_VARIANT
19566 #define THUMB_VARIANT & arm_ext_virt
19567
19568 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
19569 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
19570
19571 #undef ARM_VARIANT
19572 #define ARM_VARIANT & arm_ext_pan
19573 #undef THUMB_VARIANT
19574 #define THUMB_VARIANT & arm_ext_pan
19575
19576 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
19577
19578 #undef ARM_VARIANT
19579 #define ARM_VARIANT & arm_ext_v6t2
19580 #undef THUMB_VARIANT
19581 #define THUMB_VARIANT & arm_ext_v6t2
19582
19583 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
19584 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
19585 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
19586 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
19587
19588 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19589 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
19590
19591 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19592 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19593 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19594 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19595
19596 #undef THUMB_VARIANT
19597 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19598 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
19599 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
19600
19601 /* Thumb-only instructions. */
19602 #undef ARM_VARIANT
19603 #define ARM_VARIANT NULL
19604 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
19605 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
19606
19607 /* ARM does not really have an IT instruction, so always allow it.
19608 The opcode is copied from Thumb in order to allow warnings in
19609 -mimplicit-it=[never | arm] modes. */
19610 #undef ARM_VARIANT
19611 #define ARM_VARIANT & arm_ext_v1
19612 #undef THUMB_VARIANT
19613 #define THUMB_VARIANT & arm_ext_v6t2
19614
19615 TUE("it", bf08, bf08, 1, (COND), it, t_it),
19616 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
19617 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
19618 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
19619 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
19620 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
19621 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
19622 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
19623 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
19624 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
19625 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
19626 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
19627 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
19628 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
19629 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
19630 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
19631 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
19632 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
19633
19634 /* Thumb2 only instructions. */
19635 #undef ARM_VARIANT
19636 #define ARM_VARIANT NULL
19637
19638 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19639 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19640 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
19641 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
19642 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
19643 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
19644
19645 /* Hardware division instructions. */
19646 #undef ARM_VARIANT
19647 #define ARM_VARIANT & arm_ext_adiv
19648 #undef THUMB_VARIANT
19649 #define THUMB_VARIANT & arm_ext_div
19650
19651 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
19652 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
19653
19654 /* ARM V6M/V7 instructions. */
19655 #undef ARM_VARIANT
19656 #define ARM_VARIANT & arm_ext_barrier
19657 #undef THUMB_VARIANT
19658 #define THUMB_VARIANT & arm_ext_barrier
19659
19660 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
19661 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
19662 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
19663
19664 /* ARM V7 instructions. */
19665 #undef ARM_VARIANT
19666 #define ARM_VARIANT & arm_ext_v7
19667 #undef THUMB_VARIANT
19668 #define THUMB_VARIANT & arm_ext_v7
19669
19670 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
19671 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
19672
19673 #undef ARM_VARIANT
19674 #define ARM_VARIANT & arm_ext_mp
19675 #undef THUMB_VARIANT
19676 #define THUMB_VARIANT & arm_ext_mp
19677
19678 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
19679
19680 /* AArchv8 instructions. */
19681 #undef ARM_VARIANT
19682 #define ARM_VARIANT & arm_ext_v8
19683
19684 /* Instructions shared between armv8-a and armv8-m. */
19685 #undef THUMB_VARIANT
19686 #define THUMB_VARIANT & arm_ext_atomics
19687
19688 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19689 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19690 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19691 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19692 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19693 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19694 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19695 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
19696 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19697 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
19698 stlex, t_stlex),
19699 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
19700 stlex, t_stlex),
19701 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
19702 stlex, t_stlex),
19703 #undef THUMB_VARIANT
19704 #define THUMB_VARIANT & arm_ext_v8
19705
19706 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
19707 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
19708 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
19709 ldrexd, t_ldrexd),
19710 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
19711 strexd, t_strexd),
19712 /* ARMv8 T32 only. */
19713 #undef ARM_VARIANT
19714 #define ARM_VARIANT NULL
19715 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
19716 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
19717 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
19718
19719 /* FP for ARMv8. */
19720 #undef ARM_VARIANT
19721 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
19722 #undef THUMB_VARIANT
19723 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19724
19725 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
19726 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
19727 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
19728 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
19729 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19730 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19731 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
19732 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
19733 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
19734 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
19735 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
19736 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
19737 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
19738 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
19739 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
19740 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
19741 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
19742
19743 /* Crypto v1 extensions. */
19744 #undef ARM_VARIANT
19745 #define ARM_VARIANT & fpu_crypto_ext_armv8
19746 #undef THUMB_VARIANT
19747 #define THUMB_VARIANT & fpu_crypto_ext_armv8
19748
19749 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
19750 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
19751 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
19752 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
19753 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
19754 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
19755 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
19756 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
19757 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
19758 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
19759 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
19760 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
19761 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
19762 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
19763
19764 #undef ARM_VARIANT
19765 #define ARM_VARIANT & crc_ext_armv8
19766 #undef THUMB_VARIANT
19767 #define THUMB_VARIANT & crc_ext_armv8
19768 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
19769 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
19770 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
19771 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
19772 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
19773 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
19774
19775 /* ARMv8.2 RAS extension. */
19776 #undef ARM_VARIANT
19777 #define ARM_VARIANT & arm_ext_ras
19778 #undef THUMB_VARIANT
19779 #define THUMB_VARIANT & arm_ext_ras
19780 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
19781
19782 #undef ARM_VARIANT
19783 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
19784 #undef THUMB_VARIANT
19785 #define THUMB_VARIANT NULL
19786
19787 cCE("wfs", e200110, 1, (RR), rd),
19788 cCE("rfs", e300110, 1, (RR), rd),
19789 cCE("wfc", e400110, 1, (RR), rd),
19790 cCE("rfc", e500110, 1, (RR), rd),
19791
19792 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
19793 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
19794 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
19795 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
19796
19797 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
19798 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
19799 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
19800 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
19801
19802 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
19803 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
19804 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
19805 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
19806 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
19807 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
19808 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
19809 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
19810 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
19811 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
19812 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
19813 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
19814
19815 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
19816 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
19817 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
19818 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
19819 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
19820 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
19821 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
19822 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
19823 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
19824 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
19825 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
19826 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
19827
19828 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
19829 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
19830 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
19831 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
19832 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
19833 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
19834 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
19835 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
19836 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
19837 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
19838 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
19839 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
19840
19841 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
19842 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
19843 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
19844 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
19845 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
19846 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
19847 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
19848 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
19849 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
19850 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
19851 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
19852 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
19853
19854 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
19855 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
19856 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
19857 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
19858 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
19859 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
19860 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
19861 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
19862 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
19863 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
19864 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
19865 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
19866
19867 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
19868 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
19869 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
19870 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
19871 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
19872 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
19873 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
19874 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
19875 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
19876 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
19877 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
19878 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
19879
19880 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
19881 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
19882 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
19883 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
19884 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
19885 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
19886 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
19887 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
19888 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
19889 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
19890 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
19891 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
19892
19893 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
19894 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
19895 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
19896 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
19897 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
19898 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
19899 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
19900 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
19901 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
19902 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
19903 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
19904 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
19905
19906 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
19907 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
19908 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
19909 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
19910 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
19911 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
19912 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
19913 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
19914 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
19915 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
19916 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
19917 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
19918
19919 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
19920 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
19921 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
19922 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
19923 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
19924 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
19925 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
19926 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
19927 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
19928 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
19929 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
19930 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
19931
19932 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
19933 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
19934 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
19935 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
19936 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
19937 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
19938 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
19939 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
19940 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
19941 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
19942 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
19943 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
19944
19945 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
19946 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
19947 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
19948 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
19949 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
19950 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
19951 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
19952 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
19953 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
19954 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
19955 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
19956 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
19957
19958 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
19959 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
19960 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
19961 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
19962 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
19963 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
19964 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
19965 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
19966 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
19967 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
19968 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
19969 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
19970
19971 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
19972 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
19973 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
19974 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
19975 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
19976 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
19977 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
19978 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
19979 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
19980 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
19981 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
19982 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
19983
19984 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
19985 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
19986 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
19987 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
19988 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
19989 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
19990 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
19991 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
19992 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
19993 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
19994 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
19995 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
19996
19997 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
19998 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
19999 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
20000 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
20001 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
20002 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
20003 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
20004 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
20005 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
20006 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
20007 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
20008 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
20009
20010 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
20011 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
20012 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
20013 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
20014 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
20015 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20016 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20017 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20018 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
20019 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
20020 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
20021 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
20022
20023 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
20024 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
20025 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
20026 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
20027 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
20028 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20029 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20030 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20031 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
20032 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
20033 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
20034 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
20035
20036 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
20037 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
20038 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
20039 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
20040 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
20041 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20042 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20043 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20044 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
20045 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
20046 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
20047 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
20048
20049 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
20050 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
20051 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
20052 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
20053 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
20054 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20055 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20056 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20057 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
20058 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
20059 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
20060 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
20061
20062 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
20063 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
20064 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
20065 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
20066 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
20067 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20068 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20069 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20070 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
20071 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
20072 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
20073 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
20074
20075 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
20076 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
20077 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
20078 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
20079 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
20080 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20081 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20082 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20083 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
20084 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
20085 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
20086 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
20087
20088 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
20089 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
20090 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
20091 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
20092 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
20093 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20094 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20095 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20096 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
20097 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
20098 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
20099 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
20100
20101 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
20102 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
20103 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
20104 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
20105 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
20106 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20107 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20108 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20109 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
20110 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
20111 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
20112 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
20113
20114 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
20115 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
20116 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
20117 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
20118 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
20119 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20120 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20121 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20122 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
20123 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
20124 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
20125 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
20126
20127 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
20128 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
20129 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
20130 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
20131 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
20132 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20133 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20134 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20135 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
20136 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
20137 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
20138 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
20139
20140 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20141 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20142 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20143 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20144 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20145 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20146 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20147 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20148 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20149 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20150 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20151 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20152
20153 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20154 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20155 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20156 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20157 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20158 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20159 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20160 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20161 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20162 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20163 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20164 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20165
20166 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20167 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20168 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20169 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20170 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20171 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20172 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20173 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20174 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20175 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20176 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20177 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20178
20179 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
20180 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
20181 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
20182 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
20183
20184 cCL("flts", e000110, 2, (RF, RR), rn_rd),
20185 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
20186 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
20187 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
20188 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
20189 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
20190 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
20191 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
20192 cCL("flte", e080110, 2, (RF, RR), rn_rd),
20193 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
20194 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
20195 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
20196
20197 /* The implementation of the FIX instruction is broken on some
20198 assemblers, in that it accepts a precision specifier as well as a
20199 rounding specifier, despite the fact that this is meaningless.
20200 To be more compatible, we accept it as well, though of course it
20201 does not set any bits. */
20202 cCE("fix", e100110, 2, (RR, RF), rd_rm),
20203 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
20204 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
20205 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
20206 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
20207 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
20208 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
20209 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
20210 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
20211 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
20212 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
20213 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
20214 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
20215
20216 /* Instructions that were new with the real FPA, call them V2. */
20217 #undef ARM_VARIANT
20218 #define ARM_VARIANT & fpu_fpa_ext_v2
20219
20220 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20221 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20222 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20223 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20224 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20225 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20226
20227 #undef ARM_VARIANT
20228 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
20229
20230 /* Moves and type conversions. */
20231 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
20232 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
20233 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
20234 cCE("fmstat", ef1fa10, 0, (), noargs),
20235 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
20236 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
20237 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
20238 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
20239 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
20240 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
20241 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
20242 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
20243 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
20244 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
20245
20246 /* Memory operations. */
20247 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
20248 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
20249 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20250 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20251 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20252 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20253 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20254 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20255 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20256 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20257 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20258 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20259 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20260 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20261 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20262 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20263 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20264 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20265
20266 /* Monadic operations. */
20267 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
20268 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
20269 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
20270
20271 /* Dyadic operations. */
20272 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20273 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20274 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20275 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20276 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20277 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20278 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20279 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20280 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20281
20282 /* Comparisons. */
20283 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
20284 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
20285 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
20286 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
20287
20288 /* Double precision load/store are still present on single precision
20289 implementations. */
20290 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
20291 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
20292 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20293 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20294 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20295 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20296 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20297 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20298 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20299 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20300
20301 #undef ARM_VARIANT
20302 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
20303
20304 /* Moves and type conversions. */
20305 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20306 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
20307 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20308 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
20309 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
20310 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
20311 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
20312 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
20313 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
20314 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
20315 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20316 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
20317 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20318
20319 /* Monadic operations. */
20320 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20321 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20322 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20323
20324 /* Dyadic operations. */
20325 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20326 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20327 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20328 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20329 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20330 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20331 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20332 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20333 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20334
20335 /* Comparisons. */
20336 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20337 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
20338 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20339 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
20340
20341 #undef ARM_VARIANT
20342 #define ARM_VARIANT & fpu_vfp_ext_v2
20343
20344 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
20345 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
20346 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
20347 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
20348
20349 /* Instructions which may belong to either the Neon or VFP instruction sets.
20350 Individual encoder functions perform additional architecture checks. */
20351 #undef ARM_VARIANT
20352 #define ARM_VARIANT & fpu_vfp_ext_v1xd
20353 #undef THUMB_VARIANT
20354 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
20355
20356 /* These mnemonics are unique to VFP. */
20357 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
20358 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
20359 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20360 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20361 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20362 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20363 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20364 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
20365 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
20366 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
20367
20368 /* Mnemonics shared by Neon and VFP. */
20369 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
20370 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20371 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20372
20373 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20374 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20375
20376 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20377 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20378
20379 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20380 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20381 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20382 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20383 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20384 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20385 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20386 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20387
20388 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
20389 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
20390 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
20391 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
20392
20393
20394 /* NOTE: All VMOV encoding is special-cased! */
20395 NCE(vmov, 0, 1, (VMOV), neon_mov),
20396 NCE(vmovq, 0, 1, (VMOV), neon_mov),
20397
20398 #undef ARM_VARIANT
20399 #define ARM_VARIANT & arm_ext_fp16
20400 #undef THUMB_VARIANT
20401 #define THUMB_VARIANT & arm_ext_fp16
20402 /* New instructions added from v8.2, allowing the extraction and insertion of
20403 the upper 16 bits of a 32-bit vector register. */
20404 NCE (vmovx, eb00a40, 2, (RVS, RVS), neon_movhf),
20405 NCE (vins, eb00ac0, 2, (RVS, RVS), neon_movhf),
20406
20407 #undef THUMB_VARIANT
20408 #define THUMB_VARIANT & fpu_neon_ext_v1
20409 #undef ARM_VARIANT
20410 #define ARM_VARIANT & fpu_neon_ext_v1
20411
20412 /* Data processing with three registers of the same length. */
20413 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
20414 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
20415 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
20416 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20417 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20418 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20419 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20420 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20421 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20422 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
20423 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20424 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20425 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20426 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20427 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20428 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20429 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20430 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20431 /* If not immediate, fall back to neon_dyadic_i64_su.
20432 shl_imm should accept I8 I16 I32 I64,
20433 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
20434 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
20435 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
20436 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
20437 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
20438 /* Logic ops, types optional & ignored. */
20439 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20440 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20441 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20442 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20443 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20444 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20445 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20446 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20447 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
20448 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
20449 /* Bitfield ops, untyped. */
20450 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20451 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20452 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20453 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20454 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20455 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20456 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
20457 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20458 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20459 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20460 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20461 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20462 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20463 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20464 back to neon_dyadic_if_su. */
20465 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20466 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20467 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20468 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20469 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20470 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20471 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20472 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20473 /* Comparison. Type I8 I16 I32 F32. */
20474 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
20475 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
20476 /* As above, D registers only. */
20477 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20478 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20479 /* Int and float variants, signedness unimportant. */
20480 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20481 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20482 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
20483 /* Add/sub take types I8 I16 I32 I64 F32. */
20484 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20485 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20486 /* vtst takes sizes 8, 16, 32. */
20487 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
20488 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
20489 /* VMUL takes I8 I16 I32 F32 P8. */
20490 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
20491 /* VQD{R}MULH takes S16 S32. */
20492 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20493 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20494 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20495 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20496 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20497 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20498 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20499 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20500 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20501 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20502 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20503 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20504 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20505 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20506 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20507 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20508 /* ARM v8.1 extension. */
20509 nUF (vqrdmlah, _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
20510 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
20511 nUF (vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
20512 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
20513
20514 /* Two address, int/float. Types S8 S16 S32 F32. */
20515 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
20516 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
20517
20518 /* Data processing with two registers and a shift amount. */
20519 /* Right shifts, and variants with rounding.
20520 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
20521 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20522 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20523 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20524 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20525 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20526 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20527 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20528 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20529 /* Shift and insert. Sizes accepted 8 16 32 64. */
20530 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
20531 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
20532 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
20533 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
20534 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
20535 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
20536 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
20537 /* Right shift immediate, saturating & narrowing, with rounding variants.
20538 Types accepted S16 S32 S64 U16 U32 U64. */
20539 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20540 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20541 /* As above, unsigned. Types accepted S16 S32 S64. */
20542 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20543 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20544 /* Right shift narrowing. Types accepted I16 I32 I64. */
20545 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20546 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20547 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
20548 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
20549 /* CVT with optional immediate for fixed-point variant. */
20550 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
20551
20552 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
20553 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
20554
20555 /* Data processing, three registers of different lengths. */
20556 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
20557 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
20558 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
20559 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
20560 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
20561 /* If not scalar, fall back to neon_dyadic_long.
20562 Vector types as above, scalar types S16 S32 U16 U32. */
20563 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20564 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20565 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
20566 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20567 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20568 /* Dyadic, narrowing insns. Types I16 I32 I64. */
20569 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20570 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20571 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20572 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20573 /* Saturating doubling multiplies. Types S16 S32. */
20574 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20575 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20576 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20577 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
20578 S16 S32 U16 U32. */
20579 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
20580
20581 /* Extract. Size 8. */
20582 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
20583 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
20584
20585 /* Two registers, miscellaneous. */
20586 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
20587 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
20588 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
20589 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
20590 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
20591 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
20592 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
20593 /* Vector replicate. Sizes 8 16 32. */
20594 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
20595 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
20596 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
20597 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
20598 /* VMOVN. Types I16 I32 I64. */
20599 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
20600 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
20601 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
20602 /* VQMOVUN. Types S16 S32 S64. */
20603 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
20604 /* VZIP / VUZP. Sizes 8 16 32. */
20605 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
20606 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
20607 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
20608 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
20609 /* VQABS / VQNEG. Types S8 S16 S32. */
20610 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
20611 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
20612 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
20613 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
20614 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
20615 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
20616 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
20617 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
20618 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
20619 /* Reciprocal estimates. Types U32 F16 F32. */
20620 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
20621 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
20622 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
20623 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
20624 /* VCLS. Types S8 S16 S32. */
20625 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
20626 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
20627 /* VCLZ. Types I8 I16 I32. */
20628 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
20629 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
20630 /* VCNT. Size 8. */
20631 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
20632 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
20633 /* Two address, untyped. */
20634 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
20635 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
20636 /* VTRN. Sizes 8 16 32. */
20637 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
20638 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
20639
20640 /* Table lookup. Size 8. */
20641 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20642 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20643
20644 #undef THUMB_VARIANT
20645 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
20646 #undef ARM_VARIANT
20647 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
20648
20649 /* Neon element/structure load/store. */
20650 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
20651 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
20652 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
20653 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
20654 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
20655 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
20656 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
20657 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
20658
20659 #undef THUMB_VARIANT
20660 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
20661 #undef ARM_VARIANT
20662 #define ARM_VARIANT & fpu_vfp_ext_v3xd
20663 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
20664 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20665 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20666 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20667 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20668 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20669 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20670 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20671 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20672
20673 #undef THUMB_VARIANT
20674 #define THUMB_VARIANT & fpu_vfp_ext_v3
20675 #undef ARM_VARIANT
20676 #define ARM_VARIANT & fpu_vfp_ext_v3
20677
20678 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
20679 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20680 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20681 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20682 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20683 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20684 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20685 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20686 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20687
20688 #undef ARM_VARIANT
20689 #define ARM_VARIANT & fpu_vfp_ext_fma
20690 #undef THUMB_VARIANT
20691 #define THUMB_VARIANT & fpu_vfp_ext_fma
20692 /* Mnemonics shared by Neon and VFP. These are included in the
20693 VFP FMA variant; NEON and VFP FMA always includes the NEON
20694 FMA instructions. */
20695 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20696 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20697 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
20698 the v form should always be used. */
20699 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20700 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20701 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20702 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20703 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20704 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20705
20706 #undef THUMB_VARIANT
20707 #undef ARM_VARIANT
20708 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
20709
20710 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20711 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20712 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20713 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20714 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20715 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20716 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
20717 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
20718
20719 #undef ARM_VARIANT
20720 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
20721
20722 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
20723 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
20724 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
20725 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
20726 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
20727 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
20728 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
20729 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
20730 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
20731 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20732 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20733 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20734 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20735 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20736 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20737 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20738 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20739 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20740 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
20741 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
20742 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20743 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20744 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20745 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20746 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20747 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20748 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
20749 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
20750 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
20751 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
20752 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
20753 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
20754 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
20755 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
20756 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
20757 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
20758 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
20759 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20760 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20761 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20762 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20763 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20764 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20765 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20766 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20767 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20768 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
20769 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20770 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20771 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20772 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20773 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20774 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20775 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20776 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20777 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20778 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20779 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20780 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20781 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20782 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20783 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20784 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20785 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20786 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20787 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20788 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20789 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20790 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20791 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20792 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20793 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20794 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20795 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20796 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20797 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20798 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20799 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20800 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20801 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20802 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20803 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20804 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20805 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20806 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20807 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20808 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20809 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20810 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
20811 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20812 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20813 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20814 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20815 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20816 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20817 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20818 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20819 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20820 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20821 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20822 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20823 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20824 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20825 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20826 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20827 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20828 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20829 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20830 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20831 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20832 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
20833 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20834 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20835 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20836 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20837 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20838 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20839 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20840 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20841 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20842 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20843 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20844 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20845 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20846 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20847 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20848 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20849 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20850 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20851 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20852 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20853 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20854 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20855 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20856 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20857 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20858 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20859 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20860 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20861 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20862 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20863 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20864 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
20865 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
20866 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
20867 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
20868 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
20869 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
20870 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20871 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20872 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20873 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
20874 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
20875 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
20876 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
20877 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
20878 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
20879 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20880 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20881 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20882 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20883 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
20884
20885 #undef ARM_VARIANT
20886 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
20887
20888 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
20889 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
20890 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
20891 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
20892 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
20893 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
20894 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20895 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20896 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20897 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20898 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20899 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20900 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20901 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20902 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20903 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20904 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20905 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20906 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20907 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20908 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
20909 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20910 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20911 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20912 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20913 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20914 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20915 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20916 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20917 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20918 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20919 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20920 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20921 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20922 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20923 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20924 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20925 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20926 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20927 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20928 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20929 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20930 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20931 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20932 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20933 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20934 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20935 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20936 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20937 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20938 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20939 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20940 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20941 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20942 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20943 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20944 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20945
20946 #undef ARM_VARIANT
20947 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
20948
20949 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20950 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20951 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20952 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20953 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20954 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20955 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20956 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20957 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
20958 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
20959 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
20960 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
20961 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
20962 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
20963 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
20964 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
20965 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
20966 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
20967 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
20968 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
20969 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
20970 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
20971 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
20972 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
20973 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
20974 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
20975 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
20976 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
20977 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
20978 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
20979 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
20980 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
20981 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
20982 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
20983 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
20984 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
20985 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
20986 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
20987 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
20988 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
20989 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
20990 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
20991 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
20992 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
20993 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
20994 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
20995 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
20996 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
20997 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
20998 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
20999 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
21000 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
21001 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
21002 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
21003 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
21004 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
21005 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
21006 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
21007 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
21008 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
21009 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
21010 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
21011 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
21012 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
21013 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21014 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21015 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21016 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21017 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21018 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21019 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21020 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21021 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
21022 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
21023 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
21024 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
21025
21026 /* ARMv8-M instructions. */
21027 #undef ARM_VARIANT
21028 #define ARM_VARIANT NULL
21029 #undef THUMB_VARIANT
21030 #define THUMB_VARIANT & arm_ext_v8m
21031 TUE("sg", 0, e97fe97f, 0, (), 0, noargs),
21032 TUE("blxns", 0, 4784, 1, (RRnpc), 0, t_blx),
21033 TUE("bxns", 0, 4704, 1, (RRnpc), 0, t_bx),
21034 TUE("tt", 0, e840f000, 2, (RRnpc, RRnpc), 0, tt),
21035 TUE("ttt", 0, e840f040, 2, (RRnpc, RRnpc), 0, tt),
21036 TUE("tta", 0, e840f080, 2, (RRnpc, RRnpc), 0, tt),
21037 TUE("ttat", 0, e840f0c0, 2, (RRnpc, RRnpc), 0, tt),
21038
21039 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
21040 instructions behave as nop if no VFP is present. */
21041 #undef THUMB_VARIANT
21042 #define THUMB_VARIANT & arm_ext_v8m_main
21043 TUEc("vlldm", 0, ec300a00, 1, (RRnpc), rn),
21044 TUEc("vlstm", 0, ec200a00, 1, (RRnpc), rn),
21045 };
21046 #undef ARM_VARIANT
21047 #undef THUMB_VARIANT
21048 #undef TCE
21049 #undef TUE
21050 #undef TUF
21051 #undef TCC
21052 #undef cCE
21053 #undef cCL
21054 #undef C3E
21055 #undef CE
21056 #undef CM
21057 #undef UE
21058 #undef UF
21059 #undef UT
21060 #undef NUF
21061 #undef nUF
21062 #undef NCE
21063 #undef nCE
21064 #undef OPS0
21065 #undef OPS1
21066 #undef OPS2
21067 #undef OPS3
21068 #undef OPS4
21069 #undef OPS5
21070 #undef OPS6
21071 #undef do_0
21072 \f
21073 /* MD interface: bits in the object file. */
21074
21075 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
21076 for use in the a.out file, and stores them in the array pointed to by buf.
21077 This knows about the endian-ness of the target machine and does
21078 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
21079 2 (short) and 4 (long) Floating numbers are put out as a series of
21080 LITTLENUMS (shorts, here at least). */
21081
21082 void
21083 md_number_to_chars (char * buf, valueT val, int n)
21084 {
21085 if (target_big_endian)
21086 number_to_chars_bigendian (buf, val, n);
21087 else
21088 number_to_chars_littleendian (buf, val, n);
21089 }
21090
21091 static valueT
21092 md_chars_to_number (char * buf, int n)
21093 {
21094 valueT result = 0;
21095 unsigned char * where = (unsigned char *) buf;
21096
21097 if (target_big_endian)
21098 {
21099 while (n--)
21100 {
21101 result <<= 8;
21102 result |= (*where++ & 255);
21103 }
21104 }
21105 else
21106 {
21107 while (n--)
21108 {
21109 result <<= 8;
21110 result |= (where[n] & 255);
21111 }
21112 }
21113
21114 return result;
21115 }
21116
21117 /* MD interface: Sections. */
21118
21119 /* Calculate the maximum variable size (i.e., excluding fr_fix)
21120 that an rs_machine_dependent frag may reach. */
21121
21122 unsigned int
21123 arm_frag_max_var (fragS *fragp)
21124 {
21125 /* We only use rs_machine_dependent for variable-size Thumb instructions,
21126 which are either THUMB_SIZE (2) or INSN_SIZE (4).
21127
21128 Note that we generate relaxable instructions even for cases that don't
21129 really need it, like an immediate that's a trivial constant. So we're
21130 overestimating the instruction size for some of those cases. Rather
21131 than putting more intelligence here, it would probably be better to
21132 avoid generating a relaxation frag in the first place when it can be
21133 determined up front that a short instruction will suffice. */
21134
21135 gas_assert (fragp->fr_type == rs_machine_dependent);
21136 return INSN_SIZE;
21137 }
21138
21139 /* Estimate the size of a frag before relaxing. Assume everything fits in
21140 2 bytes. */
21141
21142 int
21143 md_estimate_size_before_relax (fragS * fragp,
21144 segT segtype ATTRIBUTE_UNUSED)
21145 {
21146 fragp->fr_var = 2;
21147 return 2;
21148 }
21149
21150 /* Convert a machine dependent frag. */
21151
21152 void
21153 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
21154 {
21155 unsigned long insn;
21156 unsigned long old_op;
21157 char *buf;
21158 expressionS exp;
21159 fixS *fixp;
21160 int reloc_type;
21161 int pc_rel;
21162 int opcode;
21163
21164 buf = fragp->fr_literal + fragp->fr_fix;
21165
21166 old_op = bfd_get_16(abfd, buf);
21167 if (fragp->fr_symbol)
21168 {
21169 exp.X_op = O_symbol;
21170 exp.X_add_symbol = fragp->fr_symbol;
21171 }
21172 else
21173 {
21174 exp.X_op = O_constant;
21175 }
21176 exp.X_add_number = fragp->fr_offset;
21177 opcode = fragp->fr_subtype;
21178 switch (opcode)
21179 {
21180 case T_MNEM_ldr_pc:
21181 case T_MNEM_ldr_pc2:
21182 case T_MNEM_ldr_sp:
21183 case T_MNEM_str_sp:
21184 case T_MNEM_ldr:
21185 case T_MNEM_ldrb:
21186 case T_MNEM_ldrh:
21187 case T_MNEM_str:
21188 case T_MNEM_strb:
21189 case T_MNEM_strh:
21190 if (fragp->fr_var == 4)
21191 {
21192 insn = THUMB_OP32 (opcode);
21193 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
21194 {
21195 insn |= (old_op & 0x700) << 4;
21196 }
21197 else
21198 {
21199 insn |= (old_op & 7) << 12;
21200 insn |= (old_op & 0x38) << 13;
21201 }
21202 insn |= 0x00000c00;
21203 put_thumb32_insn (buf, insn);
21204 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
21205 }
21206 else
21207 {
21208 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
21209 }
21210 pc_rel = (opcode == T_MNEM_ldr_pc2);
21211 break;
21212 case T_MNEM_adr:
21213 if (fragp->fr_var == 4)
21214 {
21215 insn = THUMB_OP32 (opcode);
21216 insn |= (old_op & 0xf0) << 4;
21217 put_thumb32_insn (buf, insn);
21218 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
21219 }
21220 else
21221 {
21222 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21223 exp.X_add_number -= 4;
21224 }
21225 pc_rel = 1;
21226 break;
21227 case T_MNEM_mov:
21228 case T_MNEM_movs:
21229 case T_MNEM_cmp:
21230 case T_MNEM_cmn:
21231 if (fragp->fr_var == 4)
21232 {
21233 int r0off = (opcode == T_MNEM_mov
21234 || opcode == T_MNEM_movs) ? 0 : 8;
21235 insn = THUMB_OP32 (opcode);
21236 insn = (insn & 0xe1ffffff) | 0x10000000;
21237 insn |= (old_op & 0x700) << r0off;
21238 put_thumb32_insn (buf, insn);
21239 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21240 }
21241 else
21242 {
21243 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
21244 }
21245 pc_rel = 0;
21246 break;
21247 case T_MNEM_b:
21248 if (fragp->fr_var == 4)
21249 {
21250 insn = THUMB_OP32(opcode);
21251 put_thumb32_insn (buf, insn);
21252 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
21253 }
21254 else
21255 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
21256 pc_rel = 1;
21257 break;
21258 case T_MNEM_bcond:
21259 if (fragp->fr_var == 4)
21260 {
21261 insn = THUMB_OP32(opcode);
21262 insn |= (old_op & 0xf00) << 14;
21263 put_thumb32_insn (buf, insn);
21264 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
21265 }
21266 else
21267 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
21268 pc_rel = 1;
21269 break;
21270 case T_MNEM_add_sp:
21271 case T_MNEM_add_pc:
21272 case T_MNEM_inc_sp:
21273 case T_MNEM_dec_sp:
21274 if (fragp->fr_var == 4)
21275 {
21276 /* ??? Choose between add and addw. */
21277 insn = THUMB_OP32 (opcode);
21278 insn |= (old_op & 0xf0) << 4;
21279 put_thumb32_insn (buf, insn);
21280 if (opcode == T_MNEM_add_pc)
21281 reloc_type = BFD_RELOC_ARM_T32_IMM12;
21282 else
21283 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21284 }
21285 else
21286 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21287 pc_rel = 0;
21288 break;
21289
21290 case T_MNEM_addi:
21291 case T_MNEM_addis:
21292 case T_MNEM_subi:
21293 case T_MNEM_subis:
21294 if (fragp->fr_var == 4)
21295 {
21296 insn = THUMB_OP32 (opcode);
21297 insn |= (old_op & 0xf0) << 4;
21298 insn |= (old_op & 0xf) << 16;
21299 put_thumb32_insn (buf, insn);
21300 if (insn & (1 << 20))
21301 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21302 else
21303 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21304 }
21305 else
21306 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21307 pc_rel = 0;
21308 break;
21309 default:
21310 abort ();
21311 }
21312 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
21313 (enum bfd_reloc_code_real) reloc_type);
21314 fixp->fx_file = fragp->fr_file;
21315 fixp->fx_line = fragp->fr_line;
21316 fragp->fr_fix += fragp->fr_var;
21317
21318 /* Set whether we use thumb-2 ISA based on final relaxation results. */
21319 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
21320 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
21321 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
21322 }
21323
21324 /* Return the size of a relaxable immediate operand instruction.
21325 SHIFT and SIZE specify the form of the allowable immediate. */
21326 static int
21327 relax_immediate (fragS *fragp, int size, int shift)
21328 {
21329 offsetT offset;
21330 offsetT mask;
21331 offsetT low;
21332
21333 /* ??? Should be able to do better than this. */
21334 if (fragp->fr_symbol)
21335 return 4;
21336
21337 low = (1 << shift) - 1;
21338 mask = (1 << (shift + size)) - (1 << shift);
21339 offset = fragp->fr_offset;
21340 /* Force misaligned offsets to 32-bit variant. */
21341 if (offset & low)
21342 return 4;
21343 if (offset & ~mask)
21344 return 4;
21345 return 2;
21346 }
21347
21348 /* Get the address of a symbol during relaxation. */
21349 static addressT
21350 relaxed_symbol_addr (fragS *fragp, long stretch)
21351 {
21352 fragS *sym_frag;
21353 addressT addr;
21354 symbolS *sym;
21355
21356 sym = fragp->fr_symbol;
21357 sym_frag = symbol_get_frag (sym);
21358 know (S_GET_SEGMENT (sym) != absolute_section
21359 || sym_frag == &zero_address_frag);
21360 addr = S_GET_VALUE (sym) + fragp->fr_offset;
21361
21362 /* If frag has yet to be reached on this pass, assume it will
21363 move by STRETCH just as we did. If this is not so, it will
21364 be because some frag between grows, and that will force
21365 another pass. */
21366
21367 if (stretch != 0
21368 && sym_frag->relax_marker != fragp->relax_marker)
21369 {
21370 fragS *f;
21371
21372 /* Adjust stretch for any alignment frag. Note that if have
21373 been expanding the earlier code, the symbol may be
21374 defined in what appears to be an earlier frag. FIXME:
21375 This doesn't handle the fr_subtype field, which specifies
21376 a maximum number of bytes to skip when doing an
21377 alignment. */
21378 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
21379 {
21380 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
21381 {
21382 if (stretch < 0)
21383 stretch = - ((- stretch)
21384 & ~ ((1 << (int) f->fr_offset) - 1));
21385 else
21386 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
21387 if (stretch == 0)
21388 break;
21389 }
21390 }
21391 if (f != NULL)
21392 addr += stretch;
21393 }
21394
21395 return addr;
21396 }
21397
21398 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
21399 load. */
21400 static int
21401 relax_adr (fragS *fragp, asection *sec, long stretch)
21402 {
21403 addressT addr;
21404 offsetT val;
21405
21406 /* Assume worst case for symbols not known to be in the same section. */
21407 if (fragp->fr_symbol == NULL
21408 || !S_IS_DEFINED (fragp->fr_symbol)
21409 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21410 || S_IS_WEAK (fragp->fr_symbol))
21411 return 4;
21412
21413 val = relaxed_symbol_addr (fragp, stretch);
21414 addr = fragp->fr_address + fragp->fr_fix;
21415 addr = (addr + 4) & ~3;
21416 /* Force misaligned targets to 32-bit variant. */
21417 if (val & 3)
21418 return 4;
21419 val -= addr;
21420 if (val < 0 || val > 1020)
21421 return 4;
21422 return 2;
21423 }
21424
21425 /* Return the size of a relaxable add/sub immediate instruction. */
21426 static int
21427 relax_addsub (fragS *fragp, asection *sec)
21428 {
21429 char *buf;
21430 int op;
21431
21432 buf = fragp->fr_literal + fragp->fr_fix;
21433 op = bfd_get_16(sec->owner, buf);
21434 if ((op & 0xf) == ((op >> 4) & 0xf))
21435 return relax_immediate (fragp, 8, 0);
21436 else
21437 return relax_immediate (fragp, 3, 0);
21438 }
21439
21440 /* Return TRUE iff the definition of symbol S could be pre-empted
21441 (overridden) at link or load time. */
21442 static bfd_boolean
21443 symbol_preemptible (symbolS *s)
21444 {
21445 /* Weak symbols can always be pre-empted. */
21446 if (S_IS_WEAK (s))
21447 return TRUE;
21448
21449 /* Non-global symbols cannot be pre-empted. */
21450 if (! S_IS_EXTERNAL (s))
21451 return FALSE;
21452
21453 #ifdef OBJ_ELF
21454 /* In ELF, a global symbol can be marked protected, or private. In that
21455 case it can't be pre-empted (other definitions in the same link unit
21456 would violate the ODR). */
21457 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
21458 return FALSE;
21459 #endif
21460
21461 /* Other global symbols might be pre-empted. */
21462 return TRUE;
21463 }
21464
21465 /* Return the size of a relaxable branch instruction. BITS is the
21466 size of the offset field in the narrow instruction. */
21467
21468 static int
21469 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
21470 {
21471 addressT addr;
21472 offsetT val;
21473 offsetT limit;
21474
21475 /* Assume worst case for symbols not known to be in the same section. */
21476 if (!S_IS_DEFINED (fragp->fr_symbol)
21477 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21478 || S_IS_WEAK (fragp->fr_symbol))
21479 return 4;
21480
21481 #ifdef OBJ_ELF
21482 /* A branch to a function in ARM state will require interworking. */
21483 if (S_IS_DEFINED (fragp->fr_symbol)
21484 && ARM_IS_FUNC (fragp->fr_symbol))
21485 return 4;
21486 #endif
21487
21488 if (symbol_preemptible (fragp->fr_symbol))
21489 return 4;
21490
21491 val = relaxed_symbol_addr (fragp, stretch);
21492 addr = fragp->fr_address + fragp->fr_fix + 4;
21493 val -= addr;
21494
21495 /* Offset is a signed value *2 */
21496 limit = 1 << bits;
21497 if (val >= limit || val < -limit)
21498 return 4;
21499 return 2;
21500 }
21501
21502
21503 /* Relax a machine dependent frag. This returns the amount by which
21504 the current size of the frag should change. */
21505
21506 int
21507 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
21508 {
21509 int oldsize;
21510 int newsize;
21511
21512 oldsize = fragp->fr_var;
21513 switch (fragp->fr_subtype)
21514 {
21515 case T_MNEM_ldr_pc2:
21516 newsize = relax_adr (fragp, sec, stretch);
21517 break;
21518 case T_MNEM_ldr_pc:
21519 case T_MNEM_ldr_sp:
21520 case T_MNEM_str_sp:
21521 newsize = relax_immediate (fragp, 8, 2);
21522 break;
21523 case T_MNEM_ldr:
21524 case T_MNEM_str:
21525 newsize = relax_immediate (fragp, 5, 2);
21526 break;
21527 case T_MNEM_ldrh:
21528 case T_MNEM_strh:
21529 newsize = relax_immediate (fragp, 5, 1);
21530 break;
21531 case T_MNEM_ldrb:
21532 case T_MNEM_strb:
21533 newsize = relax_immediate (fragp, 5, 0);
21534 break;
21535 case T_MNEM_adr:
21536 newsize = relax_adr (fragp, sec, stretch);
21537 break;
21538 case T_MNEM_mov:
21539 case T_MNEM_movs:
21540 case T_MNEM_cmp:
21541 case T_MNEM_cmn:
21542 newsize = relax_immediate (fragp, 8, 0);
21543 break;
21544 case T_MNEM_b:
21545 newsize = relax_branch (fragp, sec, 11, stretch);
21546 break;
21547 case T_MNEM_bcond:
21548 newsize = relax_branch (fragp, sec, 8, stretch);
21549 break;
21550 case T_MNEM_add_sp:
21551 case T_MNEM_add_pc:
21552 newsize = relax_immediate (fragp, 8, 2);
21553 break;
21554 case T_MNEM_inc_sp:
21555 case T_MNEM_dec_sp:
21556 newsize = relax_immediate (fragp, 7, 2);
21557 break;
21558 case T_MNEM_addi:
21559 case T_MNEM_addis:
21560 case T_MNEM_subi:
21561 case T_MNEM_subis:
21562 newsize = relax_addsub (fragp, sec);
21563 break;
21564 default:
21565 abort ();
21566 }
21567
21568 fragp->fr_var = newsize;
21569 /* Freeze wide instructions that are at or before the same location as
21570 in the previous pass. This avoids infinite loops.
21571 Don't freeze them unconditionally because targets may be artificially
21572 misaligned by the expansion of preceding frags. */
21573 if (stretch <= 0 && newsize > 2)
21574 {
21575 md_convert_frag (sec->owner, sec, fragp);
21576 frag_wane (fragp);
21577 }
21578
21579 return newsize - oldsize;
21580 }
21581
21582 /* Round up a section size to the appropriate boundary. */
21583
21584 valueT
21585 md_section_align (segT segment ATTRIBUTE_UNUSED,
21586 valueT size)
21587 {
21588 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
21589 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
21590 {
21591 /* For a.out, force the section size to be aligned. If we don't do
21592 this, BFD will align it for us, but it will not write out the
21593 final bytes of the section. This may be a bug in BFD, but it is
21594 easier to fix it here since that is how the other a.out targets
21595 work. */
21596 int align;
21597
21598 align = bfd_get_section_alignment (stdoutput, segment);
21599 size = ((size + (1 << align) - 1) & (-((valueT) 1 << align)));
21600 }
21601 #endif
21602
21603 return size;
21604 }
21605
21606 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
21607 of an rs_align_code fragment. */
21608
21609 void
21610 arm_handle_align (fragS * fragP)
21611 {
21612 static unsigned char const arm_noop[2][2][4] =
21613 {
21614 { /* ARMv1 */
21615 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
21616 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
21617 },
21618 { /* ARMv6k */
21619 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
21620 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
21621 },
21622 };
21623 static unsigned char const thumb_noop[2][2][2] =
21624 {
21625 { /* Thumb-1 */
21626 {0xc0, 0x46}, /* LE */
21627 {0x46, 0xc0}, /* BE */
21628 },
21629 { /* Thumb-2 */
21630 {0x00, 0xbf}, /* LE */
21631 {0xbf, 0x00} /* BE */
21632 }
21633 };
21634 static unsigned char const wide_thumb_noop[2][4] =
21635 { /* Wide Thumb-2 */
21636 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
21637 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
21638 };
21639
21640 unsigned bytes, fix, noop_size;
21641 char * p;
21642 const unsigned char * noop;
21643 const unsigned char *narrow_noop = NULL;
21644 #ifdef OBJ_ELF
21645 enum mstate state;
21646 #endif
21647
21648 if (fragP->fr_type != rs_align_code)
21649 return;
21650
21651 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
21652 p = fragP->fr_literal + fragP->fr_fix;
21653 fix = 0;
21654
21655 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
21656 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
21657
21658 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
21659
21660 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
21661 {
21662 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21663 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
21664 {
21665 narrow_noop = thumb_noop[1][target_big_endian];
21666 noop = wide_thumb_noop[target_big_endian];
21667 }
21668 else
21669 noop = thumb_noop[0][target_big_endian];
21670 noop_size = 2;
21671 #ifdef OBJ_ELF
21672 state = MAP_THUMB;
21673 #endif
21674 }
21675 else
21676 {
21677 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21678 ? selected_cpu : arm_arch_none,
21679 arm_ext_v6k) != 0]
21680 [target_big_endian];
21681 noop_size = 4;
21682 #ifdef OBJ_ELF
21683 state = MAP_ARM;
21684 #endif
21685 }
21686
21687 fragP->fr_var = noop_size;
21688
21689 if (bytes & (noop_size - 1))
21690 {
21691 fix = bytes & (noop_size - 1);
21692 #ifdef OBJ_ELF
21693 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
21694 #endif
21695 memset (p, 0, fix);
21696 p += fix;
21697 bytes -= fix;
21698 }
21699
21700 if (narrow_noop)
21701 {
21702 if (bytes & noop_size)
21703 {
21704 /* Insert a narrow noop. */
21705 memcpy (p, narrow_noop, noop_size);
21706 p += noop_size;
21707 bytes -= noop_size;
21708 fix += noop_size;
21709 }
21710
21711 /* Use wide noops for the remainder */
21712 noop_size = 4;
21713 }
21714
21715 while (bytes >= noop_size)
21716 {
21717 memcpy (p, noop, noop_size);
21718 p += noop_size;
21719 bytes -= noop_size;
21720 fix += noop_size;
21721 }
21722
21723 fragP->fr_fix += fix;
21724 }
21725
21726 /* Called from md_do_align. Used to create an alignment
21727 frag in a code section. */
21728
21729 void
21730 arm_frag_align_code (int n, int max)
21731 {
21732 char * p;
21733
21734 /* We assume that there will never be a requirement
21735 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
21736 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
21737 {
21738 char err_msg[128];
21739
21740 sprintf (err_msg,
21741 _("alignments greater than %d bytes not supported in .text sections."),
21742 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
21743 as_fatal ("%s", err_msg);
21744 }
21745
21746 p = frag_var (rs_align_code,
21747 MAX_MEM_FOR_RS_ALIGN_CODE,
21748 1,
21749 (relax_substateT) max,
21750 (symbolS *) NULL,
21751 (offsetT) n,
21752 (char *) NULL);
21753 *p = 0;
21754 }
21755
21756 /* Perform target specific initialisation of a frag.
21757 Note - despite the name this initialisation is not done when the frag
21758 is created, but only when its type is assigned. A frag can be created
21759 and used a long time before its type is set, so beware of assuming that
21760 this initialisationis performed first. */
21761
21762 #ifndef OBJ_ELF
21763 void
21764 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
21765 {
21766 /* Record whether this frag is in an ARM or a THUMB area. */
21767 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21768 }
21769
21770 #else /* OBJ_ELF is defined. */
21771 void
21772 arm_init_frag (fragS * fragP, int max_chars)
21773 {
21774 int frag_thumb_mode;
21775
21776 /* If the current ARM vs THUMB mode has not already
21777 been recorded into this frag then do so now. */
21778 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
21779 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21780
21781 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
21782
21783 /* Record a mapping symbol for alignment frags. We will delete this
21784 later if the alignment ends up empty. */
21785 switch (fragP->fr_type)
21786 {
21787 case rs_align:
21788 case rs_align_test:
21789 case rs_fill:
21790 mapping_state_2 (MAP_DATA, max_chars);
21791 break;
21792 case rs_align_code:
21793 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
21794 break;
21795 default:
21796 break;
21797 }
21798 }
21799
21800 /* When we change sections we need to issue a new mapping symbol. */
21801
21802 void
21803 arm_elf_change_section (void)
21804 {
21805 /* Link an unlinked unwind index table section to the .text section. */
21806 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
21807 && elf_linked_to_section (now_seg) == NULL)
21808 elf_linked_to_section (now_seg) = text_section;
21809 }
21810
21811 int
21812 arm_elf_section_type (const char * str, size_t len)
21813 {
21814 if (len == 5 && strncmp (str, "exidx", 5) == 0)
21815 return SHT_ARM_EXIDX;
21816
21817 return -1;
21818 }
21819 \f
21820 /* Code to deal with unwinding tables. */
21821
21822 static void add_unwind_adjustsp (offsetT);
21823
21824 /* Generate any deferred unwind frame offset. */
21825
21826 static void
21827 flush_pending_unwind (void)
21828 {
21829 offsetT offset;
21830
21831 offset = unwind.pending_offset;
21832 unwind.pending_offset = 0;
21833 if (offset != 0)
21834 add_unwind_adjustsp (offset);
21835 }
21836
21837 /* Add an opcode to this list for this function. Two-byte opcodes should
21838 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
21839 order. */
21840
21841 static void
21842 add_unwind_opcode (valueT op, int length)
21843 {
21844 /* Add any deferred stack adjustment. */
21845 if (unwind.pending_offset)
21846 flush_pending_unwind ();
21847
21848 unwind.sp_restored = 0;
21849
21850 if (unwind.opcode_count + length > unwind.opcode_alloc)
21851 {
21852 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
21853 if (unwind.opcodes)
21854 unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
21855 unwind.opcode_alloc);
21856 else
21857 unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
21858 }
21859 while (length > 0)
21860 {
21861 length--;
21862 unwind.opcodes[unwind.opcode_count] = op & 0xff;
21863 op >>= 8;
21864 unwind.opcode_count++;
21865 }
21866 }
21867
21868 /* Add unwind opcodes to adjust the stack pointer. */
21869
21870 static void
21871 add_unwind_adjustsp (offsetT offset)
21872 {
21873 valueT op;
21874
21875 if (offset > 0x200)
21876 {
21877 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
21878 char bytes[5];
21879 int n;
21880 valueT o;
21881
21882 /* Long form: 0xb2, uleb128. */
21883 /* This might not fit in a word so add the individual bytes,
21884 remembering the list is built in reverse order. */
21885 o = (valueT) ((offset - 0x204) >> 2);
21886 if (o == 0)
21887 add_unwind_opcode (0, 1);
21888
21889 /* Calculate the uleb128 encoding of the offset. */
21890 n = 0;
21891 while (o)
21892 {
21893 bytes[n] = o & 0x7f;
21894 o >>= 7;
21895 if (o)
21896 bytes[n] |= 0x80;
21897 n++;
21898 }
21899 /* Add the insn. */
21900 for (; n; n--)
21901 add_unwind_opcode (bytes[n - 1], 1);
21902 add_unwind_opcode (0xb2, 1);
21903 }
21904 else if (offset > 0x100)
21905 {
21906 /* Two short opcodes. */
21907 add_unwind_opcode (0x3f, 1);
21908 op = (offset - 0x104) >> 2;
21909 add_unwind_opcode (op, 1);
21910 }
21911 else if (offset > 0)
21912 {
21913 /* Short opcode. */
21914 op = (offset - 4) >> 2;
21915 add_unwind_opcode (op, 1);
21916 }
21917 else if (offset < 0)
21918 {
21919 offset = -offset;
21920 while (offset > 0x100)
21921 {
21922 add_unwind_opcode (0x7f, 1);
21923 offset -= 0x100;
21924 }
21925 op = ((offset - 4) >> 2) | 0x40;
21926 add_unwind_opcode (op, 1);
21927 }
21928 }
21929
21930 /* Finish the list of unwind opcodes for this function. */
21931 static void
21932 finish_unwind_opcodes (void)
21933 {
21934 valueT op;
21935
21936 if (unwind.fp_used)
21937 {
21938 /* Adjust sp as necessary. */
21939 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
21940 flush_pending_unwind ();
21941
21942 /* After restoring sp from the frame pointer. */
21943 op = 0x90 | unwind.fp_reg;
21944 add_unwind_opcode (op, 1);
21945 }
21946 else
21947 flush_pending_unwind ();
21948 }
21949
21950
21951 /* Start an exception table entry. If idx is nonzero this is an index table
21952 entry. */
21953
21954 static void
21955 start_unwind_section (const segT text_seg, int idx)
21956 {
21957 const char * text_name;
21958 const char * prefix;
21959 const char * prefix_once;
21960 const char * group_name;
21961 char * sec_name;
21962 int type;
21963 int flags;
21964 int linkonce;
21965
21966 if (idx)
21967 {
21968 prefix = ELF_STRING_ARM_unwind;
21969 prefix_once = ELF_STRING_ARM_unwind_once;
21970 type = SHT_ARM_EXIDX;
21971 }
21972 else
21973 {
21974 prefix = ELF_STRING_ARM_unwind_info;
21975 prefix_once = ELF_STRING_ARM_unwind_info_once;
21976 type = SHT_PROGBITS;
21977 }
21978
21979 text_name = segment_name (text_seg);
21980 if (streq (text_name, ".text"))
21981 text_name = "";
21982
21983 if (strncmp (text_name, ".gnu.linkonce.t.",
21984 strlen (".gnu.linkonce.t.")) == 0)
21985 {
21986 prefix = prefix_once;
21987 text_name += strlen (".gnu.linkonce.t.");
21988 }
21989
21990 sec_name = concat (prefix, text_name, (char *) NULL);
21991
21992 flags = SHF_ALLOC;
21993 linkonce = 0;
21994 group_name = 0;
21995
21996 /* Handle COMDAT group. */
21997 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
21998 {
21999 group_name = elf_group_name (text_seg);
22000 if (group_name == NULL)
22001 {
22002 as_bad (_("Group section `%s' has no group signature"),
22003 segment_name (text_seg));
22004 ignore_rest_of_line ();
22005 return;
22006 }
22007 flags |= SHF_GROUP;
22008 linkonce = 1;
22009 }
22010
22011 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
22012
22013 /* Set the section link for index tables. */
22014 if (idx)
22015 elf_linked_to_section (now_seg) = text_seg;
22016 }
22017
22018
22019 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
22020 personality routine data. Returns zero, or the index table value for
22021 an inline entry. */
22022
22023 static valueT
22024 create_unwind_entry (int have_data)
22025 {
22026 int size;
22027 addressT where;
22028 char *ptr;
22029 /* The current word of data. */
22030 valueT data;
22031 /* The number of bytes left in this word. */
22032 int n;
22033
22034 finish_unwind_opcodes ();
22035
22036 /* Remember the current text section. */
22037 unwind.saved_seg = now_seg;
22038 unwind.saved_subseg = now_subseg;
22039
22040 start_unwind_section (now_seg, 0);
22041
22042 if (unwind.personality_routine == NULL)
22043 {
22044 if (unwind.personality_index == -2)
22045 {
22046 if (have_data)
22047 as_bad (_("handlerdata in cantunwind frame"));
22048 return 1; /* EXIDX_CANTUNWIND. */
22049 }
22050
22051 /* Use a default personality routine if none is specified. */
22052 if (unwind.personality_index == -1)
22053 {
22054 if (unwind.opcode_count > 3)
22055 unwind.personality_index = 1;
22056 else
22057 unwind.personality_index = 0;
22058 }
22059
22060 /* Space for the personality routine entry. */
22061 if (unwind.personality_index == 0)
22062 {
22063 if (unwind.opcode_count > 3)
22064 as_bad (_("too many unwind opcodes for personality routine 0"));
22065
22066 if (!have_data)
22067 {
22068 /* All the data is inline in the index table. */
22069 data = 0x80;
22070 n = 3;
22071 while (unwind.opcode_count > 0)
22072 {
22073 unwind.opcode_count--;
22074 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22075 n--;
22076 }
22077
22078 /* Pad with "finish" opcodes. */
22079 while (n--)
22080 data = (data << 8) | 0xb0;
22081
22082 return data;
22083 }
22084 size = 0;
22085 }
22086 else
22087 /* We get two opcodes "free" in the first word. */
22088 size = unwind.opcode_count - 2;
22089 }
22090 else
22091 {
22092 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
22093 if (unwind.personality_index != -1)
22094 {
22095 as_bad (_("attempt to recreate an unwind entry"));
22096 return 1;
22097 }
22098
22099 /* An extra byte is required for the opcode count. */
22100 size = unwind.opcode_count + 1;
22101 }
22102
22103 size = (size + 3) >> 2;
22104 if (size > 0xff)
22105 as_bad (_("too many unwind opcodes"));
22106
22107 frag_align (2, 0, 0);
22108 record_alignment (now_seg, 2);
22109 unwind.table_entry = expr_build_dot ();
22110
22111 /* Allocate the table entry. */
22112 ptr = frag_more ((size << 2) + 4);
22113 /* PR 13449: Zero the table entries in case some of them are not used. */
22114 memset (ptr, 0, (size << 2) + 4);
22115 where = frag_now_fix () - ((size << 2) + 4);
22116
22117 switch (unwind.personality_index)
22118 {
22119 case -1:
22120 /* ??? Should this be a PLT generating relocation? */
22121 /* Custom personality routine. */
22122 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
22123 BFD_RELOC_ARM_PREL31);
22124
22125 where += 4;
22126 ptr += 4;
22127
22128 /* Set the first byte to the number of additional words. */
22129 data = size > 0 ? size - 1 : 0;
22130 n = 3;
22131 break;
22132
22133 /* ABI defined personality routines. */
22134 case 0:
22135 /* Three opcodes bytes are packed into the first word. */
22136 data = 0x80;
22137 n = 3;
22138 break;
22139
22140 case 1:
22141 case 2:
22142 /* The size and first two opcode bytes go in the first word. */
22143 data = ((0x80 + unwind.personality_index) << 8) | size;
22144 n = 2;
22145 break;
22146
22147 default:
22148 /* Should never happen. */
22149 abort ();
22150 }
22151
22152 /* Pack the opcodes into words (MSB first), reversing the list at the same
22153 time. */
22154 while (unwind.opcode_count > 0)
22155 {
22156 if (n == 0)
22157 {
22158 md_number_to_chars (ptr, data, 4);
22159 ptr += 4;
22160 n = 4;
22161 data = 0;
22162 }
22163 unwind.opcode_count--;
22164 n--;
22165 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22166 }
22167
22168 /* Finish off the last word. */
22169 if (n < 4)
22170 {
22171 /* Pad with "finish" opcodes. */
22172 while (n--)
22173 data = (data << 8) | 0xb0;
22174
22175 md_number_to_chars (ptr, data, 4);
22176 }
22177
22178 if (!have_data)
22179 {
22180 /* Add an empty descriptor if there is no user-specified data. */
22181 ptr = frag_more (4);
22182 md_number_to_chars (ptr, 0, 4);
22183 }
22184
22185 return 0;
22186 }
22187
22188
22189 /* Initialize the DWARF-2 unwind information for this procedure. */
22190
22191 void
22192 tc_arm_frame_initial_instructions (void)
22193 {
22194 cfi_add_CFA_def_cfa (REG_SP, 0);
22195 }
22196 #endif /* OBJ_ELF */
22197
22198 /* Convert REGNAME to a DWARF-2 register number. */
22199
22200 int
22201 tc_arm_regname_to_dw2regnum (char *regname)
22202 {
22203 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
22204 if (reg != FAIL)
22205 return reg;
22206
22207 /* PR 16694: Allow VFP registers as well. */
22208 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
22209 if (reg != FAIL)
22210 return 64 + reg;
22211
22212 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
22213 if (reg != FAIL)
22214 return reg + 256;
22215
22216 return -1;
22217 }
22218
22219 #ifdef TE_PE
22220 void
22221 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
22222 {
22223 expressionS exp;
22224
22225 exp.X_op = O_secrel;
22226 exp.X_add_symbol = symbol;
22227 exp.X_add_number = 0;
22228 emit_expr (&exp, size);
22229 }
22230 #endif
22231
22232 /* MD interface: Symbol and relocation handling. */
22233
22234 /* Return the address within the segment that a PC-relative fixup is
22235 relative to. For ARM, PC-relative fixups applied to instructions
22236 are generally relative to the location of the fixup plus 8 bytes.
22237 Thumb branches are offset by 4, and Thumb loads relative to PC
22238 require special handling. */
22239
22240 long
22241 md_pcrel_from_section (fixS * fixP, segT seg)
22242 {
22243 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
22244
22245 /* If this is pc-relative and we are going to emit a relocation
22246 then we just want to put out any pipeline compensation that the linker
22247 will need. Otherwise we want to use the calculated base.
22248 For WinCE we skip the bias for externals as well, since this
22249 is how the MS ARM-CE assembler behaves and we want to be compatible. */
22250 if (fixP->fx_pcrel
22251 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
22252 || (arm_force_relocation (fixP)
22253 #ifdef TE_WINCE
22254 && !S_IS_EXTERNAL (fixP->fx_addsy)
22255 #endif
22256 )))
22257 base = 0;
22258
22259
22260 switch (fixP->fx_r_type)
22261 {
22262 /* PC relative addressing on the Thumb is slightly odd as the
22263 bottom two bits of the PC are forced to zero for the
22264 calculation. This happens *after* application of the
22265 pipeline offset. However, Thumb adrl already adjusts for
22266 this, so we need not do it again. */
22267 case BFD_RELOC_ARM_THUMB_ADD:
22268 return base & ~3;
22269
22270 case BFD_RELOC_ARM_THUMB_OFFSET:
22271 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22272 case BFD_RELOC_ARM_T32_ADD_PC12:
22273 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
22274 return (base + 4) & ~3;
22275
22276 /* Thumb branches are simply offset by +4. */
22277 case BFD_RELOC_THUMB_PCREL_BRANCH7:
22278 case BFD_RELOC_THUMB_PCREL_BRANCH9:
22279 case BFD_RELOC_THUMB_PCREL_BRANCH12:
22280 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22281 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22282 return base + 4;
22283
22284 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22285 if (fixP->fx_addsy
22286 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22287 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22288 && ARM_IS_FUNC (fixP->fx_addsy)
22289 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22290 base = fixP->fx_where + fixP->fx_frag->fr_address;
22291 return base + 4;
22292
22293 /* BLX is like branches above, but forces the low two bits of PC to
22294 zero. */
22295 case BFD_RELOC_THUMB_PCREL_BLX:
22296 if (fixP->fx_addsy
22297 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22298 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22299 && THUMB_IS_FUNC (fixP->fx_addsy)
22300 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22301 base = fixP->fx_where + fixP->fx_frag->fr_address;
22302 return (base + 4) & ~3;
22303
22304 /* ARM mode branches are offset by +8. However, the Windows CE
22305 loader expects the relocation not to take this into account. */
22306 case BFD_RELOC_ARM_PCREL_BLX:
22307 if (fixP->fx_addsy
22308 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22309 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22310 && ARM_IS_FUNC (fixP->fx_addsy)
22311 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22312 base = fixP->fx_where + fixP->fx_frag->fr_address;
22313 return base + 8;
22314
22315 case BFD_RELOC_ARM_PCREL_CALL:
22316 if (fixP->fx_addsy
22317 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22318 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22319 && THUMB_IS_FUNC (fixP->fx_addsy)
22320 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22321 base = fixP->fx_where + fixP->fx_frag->fr_address;
22322 return base + 8;
22323
22324 case BFD_RELOC_ARM_PCREL_BRANCH:
22325 case BFD_RELOC_ARM_PCREL_JUMP:
22326 case BFD_RELOC_ARM_PLT32:
22327 #ifdef TE_WINCE
22328 /* When handling fixups immediately, because we have already
22329 discovered the value of a symbol, or the address of the frag involved
22330 we must account for the offset by +8, as the OS loader will never see the reloc.
22331 see fixup_segment() in write.c
22332 The S_IS_EXTERNAL test handles the case of global symbols.
22333 Those need the calculated base, not just the pipe compensation the linker will need. */
22334 if (fixP->fx_pcrel
22335 && fixP->fx_addsy != NULL
22336 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22337 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
22338 return base + 8;
22339 return base;
22340 #else
22341 return base + 8;
22342 #endif
22343
22344
22345 /* ARM mode loads relative to PC are also offset by +8. Unlike
22346 branches, the Windows CE loader *does* expect the relocation
22347 to take this into account. */
22348 case BFD_RELOC_ARM_OFFSET_IMM:
22349 case BFD_RELOC_ARM_OFFSET_IMM8:
22350 case BFD_RELOC_ARM_HWLITERAL:
22351 case BFD_RELOC_ARM_LITERAL:
22352 case BFD_RELOC_ARM_CP_OFF_IMM:
22353 return base + 8;
22354
22355
22356 /* Other PC-relative relocations are un-offset. */
22357 default:
22358 return base;
22359 }
22360 }
22361
22362 static bfd_boolean flag_warn_syms = TRUE;
22363
22364 bfd_boolean
22365 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
22366 {
22367 /* PR 18347 - Warn if the user attempts to create a symbol with the same
22368 name as an ARM instruction. Whilst strictly speaking it is allowed, it
22369 does mean that the resulting code might be very confusing to the reader.
22370 Also this warning can be triggered if the user omits an operand before
22371 an immediate address, eg:
22372
22373 LDR =foo
22374
22375 GAS treats this as an assignment of the value of the symbol foo to a
22376 symbol LDR, and so (without this code) it will not issue any kind of
22377 warning or error message.
22378
22379 Note - ARM instructions are case-insensitive but the strings in the hash
22380 table are all stored in lower case, so we must first ensure that name is
22381 lower case too. */
22382 if (flag_warn_syms && arm_ops_hsh)
22383 {
22384 char * nbuf = strdup (name);
22385 char * p;
22386
22387 for (p = nbuf; *p; p++)
22388 *p = TOLOWER (*p);
22389 if (hash_find (arm_ops_hsh, nbuf) != NULL)
22390 {
22391 static struct hash_control * already_warned = NULL;
22392
22393 if (already_warned == NULL)
22394 already_warned = hash_new ();
22395 /* Only warn about the symbol once. To keep the code
22396 simple we let hash_insert do the lookup for us. */
22397 if (hash_insert (already_warned, name, NULL) == NULL)
22398 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
22399 }
22400 else
22401 free (nbuf);
22402 }
22403
22404 return FALSE;
22405 }
22406
22407 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
22408 Otherwise we have no need to default values of symbols. */
22409
22410 symbolS *
22411 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
22412 {
22413 #ifdef OBJ_ELF
22414 if (name[0] == '_' && name[1] == 'G'
22415 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
22416 {
22417 if (!GOT_symbol)
22418 {
22419 if (symbol_find (name))
22420 as_bad (_("GOT already in the symbol table"));
22421
22422 GOT_symbol = symbol_new (name, undefined_section,
22423 (valueT) 0, & zero_address_frag);
22424 }
22425
22426 return GOT_symbol;
22427 }
22428 #endif
22429
22430 return NULL;
22431 }
22432
22433 /* Subroutine of md_apply_fix. Check to see if an immediate can be
22434 computed as two separate immediate values, added together. We
22435 already know that this value cannot be computed by just one ARM
22436 instruction. */
22437
22438 static unsigned int
22439 validate_immediate_twopart (unsigned int val,
22440 unsigned int * highpart)
22441 {
22442 unsigned int a;
22443 unsigned int i;
22444
22445 for (i = 0; i < 32; i += 2)
22446 if (((a = rotate_left (val, i)) & 0xff) != 0)
22447 {
22448 if (a & 0xff00)
22449 {
22450 if (a & ~ 0xffff)
22451 continue;
22452 * highpart = (a >> 8) | ((i + 24) << 7);
22453 }
22454 else if (a & 0xff0000)
22455 {
22456 if (a & 0xff000000)
22457 continue;
22458 * highpart = (a >> 16) | ((i + 16) << 7);
22459 }
22460 else
22461 {
22462 gas_assert (a & 0xff000000);
22463 * highpart = (a >> 24) | ((i + 8) << 7);
22464 }
22465
22466 return (a & 0xff) | (i << 7);
22467 }
22468
22469 return FAIL;
22470 }
22471
22472 static int
22473 validate_offset_imm (unsigned int val, int hwse)
22474 {
22475 if ((hwse && val > 255) || val > 4095)
22476 return FAIL;
22477 return val;
22478 }
22479
22480 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22481 negative immediate constant by altering the instruction. A bit of
22482 a hack really.
22483 MOV <-> MVN
22484 AND <-> BIC
22485 ADC <-> SBC
22486 by inverting the second operand, and
22487 ADD <-> SUB
22488 CMP <-> CMN
22489 by negating the second operand. */
22490
22491 static int
22492 negate_data_op (unsigned long * instruction,
22493 unsigned long value)
22494 {
22495 int op, new_inst;
22496 unsigned long negated, inverted;
22497
22498 negated = encode_arm_immediate (-value);
22499 inverted = encode_arm_immediate (~value);
22500
22501 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
22502 switch (op)
22503 {
22504 /* First negates. */
22505 case OPCODE_SUB: /* ADD <-> SUB */
22506 new_inst = OPCODE_ADD;
22507 value = negated;
22508 break;
22509
22510 case OPCODE_ADD:
22511 new_inst = OPCODE_SUB;
22512 value = negated;
22513 break;
22514
22515 case OPCODE_CMP: /* CMP <-> CMN */
22516 new_inst = OPCODE_CMN;
22517 value = negated;
22518 break;
22519
22520 case OPCODE_CMN:
22521 new_inst = OPCODE_CMP;
22522 value = negated;
22523 break;
22524
22525 /* Now Inverted ops. */
22526 case OPCODE_MOV: /* MOV <-> MVN */
22527 new_inst = OPCODE_MVN;
22528 value = inverted;
22529 break;
22530
22531 case OPCODE_MVN:
22532 new_inst = OPCODE_MOV;
22533 value = inverted;
22534 break;
22535
22536 case OPCODE_AND: /* AND <-> BIC */
22537 new_inst = OPCODE_BIC;
22538 value = inverted;
22539 break;
22540
22541 case OPCODE_BIC:
22542 new_inst = OPCODE_AND;
22543 value = inverted;
22544 break;
22545
22546 case OPCODE_ADC: /* ADC <-> SBC */
22547 new_inst = OPCODE_SBC;
22548 value = inverted;
22549 break;
22550
22551 case OPCODE_SBC:
22552 new_inst = OPCODE_ADC;
22553 value = inverted;
22554 break;
22555
22556 /* We cannot do anything. */
22557 default:
22558 return FAIL;
22559 }
22560
22561 if (value == (unsigned) FAIL)
22562 return FAIL;
22563
22564 *instruction &= OPCODE_MASK;
22565 *instruction |= new_inst << DATA_OP_SHIFT;
22566 return value;
22567 }
22568
22569 /* Like negate_data_op, but for Thumb-2. */
22570
22571 static unsigned int
22572 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
22573 {
22574 int op, new_inst;
22575 int rd;
22576 unsigned int negated, inverted;
22577
22578 negated = encode_thumb32_immediate (-value);
22579 inverted = encode_thumb32_immediate (~value);
22580
22581 rd = (*instruction >> 8) & 0xf;
22582 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
22583 switch (op)
22584 {
22585 /* ADD <-> SUB. Includes CMP <-> CMN. */
22586 case T2_OPCODE_SUB:
22587 new_inst = T2_OPCODE_ADD;
22588 value = negated;
22589 break;
22590
22591 case T2_OPCODE_ADD:
22592 new_inst = T2_OPCODE_SUB;
22593 value = negated;
22594 break;
22595
22596 /* ORR <-> ORN. Includes MOV <-> MVN. */
22597 case T2_OPCODE_ORR:
22598 new_inst = T2_OPCODE_ORN;
22599 value = inverted;
22600 break;
22601
22602 case T2_OPCODE_ORN:
22603 new_inst = T2_OPCODE_ORR;
22604 value = inverted;
22605 break;
22606
22607 /* AND <-> BIC. TST has no inverted equivalent. */
22608 case T2_OPCODE_AND:
22609 new_inst = T2_OPCODE_BIC;
22610 if (rd == 15)
22611 value = FAIL;
22612 else
22613 value = inverted;
22614 break;
22615
22616 case T2_OPCODE_BIC:
22617 new_inst = T2_OPCODE_AND;
22618 value = inverted;
22619 break;
22620
22621 /* ADC <-> SBC */
22622 case T2_OPCODE_ADC:
22623 new_inst = T2_OPCODE_SBC;
22624 value = inverted;
22625 break;
22626
22627 case T2_OPCODE_SBC:
22628 new_inst = T2_OPCODE_ADC;
22629 value = inverted;
22630 break;
22631
22632 /* We cannot do anything. */
22633 default:
22634 return FAIL;
22635 }
22636
22637 if (value == (unsigned int)FAIL)
22638 return FAIL;
22639
22640 *instruction &= T2_OPCODE_MASK;
22641 *instruction |= new_inst << T2_DATA_OP_SHIFT;
22642 return value;
22643 }
22644
22645 /* Read a 32-bit thumb instruction from buf. */
22646 static unsigned long
22647 get_thumb32_insn (char * buf)
22648 {
22649 unsigned long insn;
22650 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
22651 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22652
22653 return insn;
22654 }
22655
22656
22657 /* We usually want to set the low bit on the address of thumb function
22658 symbols. In particular .word foo - . should have the low bit set.
22659 Generic code tries to fold the difference of two symbols to
22660 a constant. Prevent this and force a relocation when the first symbols
22661 is a thumb function. */
22662
22663 bfd_boolean
22664 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
22665 {
22666 if (op == O_subtract
22667 && l->X_op == O_symbol
22668 && r->X_op == O_symbol
22669 && THUMB_IS_FUNC (l->X_add_symbol))
22670 {
22671 l->X_op = O_subtract;
22672 l->X_op_symbol = r->X_add_symbol;
22673 l->X_add_number -= r->X_add_number;
22674 return TRUE;
22675 }
22676
22677 /* Process as normal. */
22678 return FALSE;
22679 }
22680
22681 /* Encode Thumb2 unconditional branches and calls. The encoding
22682 for the 2 are identical for the immediate values. */
22683
22684 static void
22685 encode_thumb2_b_bl_offset (char * buf, offsetT value)
22686 {
22687 #define T2I1I2MASK ((1 << 13) | (1 << 11))
22688 offsetT newval;
22689 offsetT newval2;
22690 addressT S, I1, I2, lo, hi;
22691
22692 S = (value >> 24) & 0x01;
22693 I1 = (value >> 23) & 0x01;
22694 I2 = (value >> 22) & 0x01;
22695 hi = (value >> 12) & 0x3ff;
22696 lo = (value >> 1) & 0x7ff;
22697 newval = md_chars_to_number (buf, THUMB_SIZE);
22698 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22699 newval |= (S << 10) | hi;
22700 newval2 &= ~T2I1I2MASK;
22701 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
22702 md_number_to_chars (buf, newval, THUMB_SIZE);
22703 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22704 }
22705
22706 void
22707 md_apply_fix (fixS * fixP,
22708 valueT * valP,
22709 segT seg)
22710 {
22711 offsetT value = * valP;
22712 offsetT newval;
22713 unsigned int newimm;
22714 unsigned long temp;
22715 int sign;
22716 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
22717
22718 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
22719
22720 /* Note whether this will delete the relocation. */
22721
22722 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
22723 fixP->fx_done = 1;
22724
22725 /* On a 64-bit host, silently truncate 'value' to 32 bits for
22726 consistency with the behaviour on 32-bit hosts. Remember value
22727 for emit_reloc. */
22728 value &= 0xffffffff;
22729 value ^= 0x80000000;
22730 value -= 0x80000000;
22731
22732 *valP = value;
22733 fixP->fx_addnumber = value;
22734
22735 /* Same treatment for fixP->fx_offset. */
22736 fixP->fx_offset &= 0xffffffff;
22737 fixP->fx_offset ^= 0x80000000;
22738 fixP->fx_offset -= 0x80000000;
22739
22740 switch (fixP->fx_r_type)
22741 {
22742 case BFD_RELOC_NONE:
22743 /* This will need to go in the object file. */
22744 fixP->fx_done = 0;
22745 break;
22746
22747 case BFD_RELOC_ARM_IMMEDIATE:
22748 /* We claim that this fixup has been processed here,
22749 even if in fact we generate an error because we do
22750 not have a reloc for it, so tc_gen_reloc will reject it. */
22751 fixP->fx_done = 1;
22752
22753 if (fixP->fx_addsy)
22754 {
22755 const char *msg = 0;
22756
22757 if (! S_IS_DEFINED (fixP->fx_addsy))
22758 msg = _("undefined symbol %s used as an immediate value");
22759 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22760 msg = _("symbol %s is in a different section");
22761 else if (S_IS_WEAK (fixP->fx_addsy))
22762 msg = _("symbol %s is weak and may be overridden later");
22763
22764 if (msg)
22765 {
22766 as_bad_where (fixP->fx_file, fixP->fx_line,
22767 msg, S_GET_NAME (fixP->fx_addsy));
22768 break;
22769 }
22770 }
22771
22772 temp = md_chars_to_number (buf, INSN_SIZE);
22773
22774 /* If the offset is negative, we should use encoding A2 for ADR. */
22775 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
22776 newimm = negate_data_op (&temp, value);
22777 else
22778 {
22779 newimm = encode_arm_immediate (value);
22780
22781 /* If the instruction will fail, see if we can fix things up by
22782 changing the opcode. */
22783 if (newimm == (unsigned int) FAIL)
22784 newimm = negate_data_op (&temp, value);
22785 /* MOV accepts both ARM modified immediate (A1 encoding) and
22786 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
22787 When disassembling, MOV is preferred when there is no encoding
22788 overlap. */
22789 if (newimm == (unsigned int) FAIL
22790 && ((temp >> DATA_OP_SHIFT) & 0xf) == OPCODE_MOV
22791 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
22792 && !((temp >> SBIT_SHIFT) & 0x1)
22793 && value >= 0 && value <= 0xffff)
22794 {
22795 /* Clear bits[23:20] to change encoding from A1 to A2. */
22796 temp &= 0xff0fffff;
22797 /* Encoding high 4bits imm. Code below will encode the remaining
22798 low 12bits. */
22799 temp |= (value & 0x0000f000) << 4;
22800 newimm = value & 0x00000fff;
22801 }
22802 }
22803
22804 if (newimm == (unsigned int) FAIL)
22805 {
22806 as_bad_where (fixP->fx_file, fixP->fx_line,
22807 _("invalid constant (%lx) after fixup"),
22808 (unsigned long) value);
22809 break;
22810 }
22811
22812 newimm |= (temp & 0xfffff000);
22813 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22814 break;
22815
22816 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
22817 {
22818 unsigned int highpart = 0;
22819 unsigned int newinsn = 0xe1a00000; /* nop. */
22820
22821 if (fixP->fx_addsy)
22822 {
22823 const char *msg = 0;
22824
22825 if (! S_IS_DEFINED (fixP->fx_addsy))
22826 msg = _("undefined symbol %s used as an immediate value");
22827 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22828 msg = _("symbol %s is in a different section");
22829 else if (S_IS_WEAK (fixP->fx_addsy))
22830 msg = _("symbol %s is weak and may be overridden later");
22831
22832 if (msg)
22833 {
22834 as_bad_where (fixP->fx_file, fixP->fx_line,
22835 msg, S_GET_NAME (fixP->fx_addsy));
22836 break;
22837 }
22838 }
22839
22840 newimm = encode_arm_immediate (value);
22841 temp = md_chars_to_number (buf, INSN_SIZE);
22842
22843 /* If the instruction will fail, see if we can fix things up by
22844 changing the opcode. */
22845 if (newimm == (unsigned int) FAIL
22846 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
22847 {
22848 /* No ? OK - try using two ADD instructions to generate
22849 the value. */
22850 newimm = validate_immediate_twopart (value, & highpart);
22851
22852 /* Yes - then make sure that the second instruction is
22853 also an add. */
22854 if (newimm != (unsigned int) FAIL)
22855 newinsn = temp;
22856 /* Still No ? Try using a negated value. */
22857 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
22858 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
22859 /* Otherwise - give up. */
22860 else
22861 {
22862 as_bad_where (fixP->fx_file, fixP->fx_line,
22863 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
22864 (long) value);
22865 break;
22866 }
22867
22868 /* Replace the first operand in the 2nd instruction (which
22869 is the PC) with the destination register. We have
22870 already added in the PC in the first instruction and we
22871 do not want to do it again. */
22872 newinsn &= ~ 0xf0000;
22873 newinsn |= ((newinsn & 0x0f000) << 4);
22874 }
22875
22876 newimm |= (temp & 0xfffff000);
22877 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22878
22879 highpart |= (newinsn & 0xfffff000);
22880 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
22881 }
22882 break;
22883
22884 case BFD_RELOC_ARM_OFFSET_IMM:
22885 if (!fixP->fx_done && seg->use_rela_p)
22886 value = 0;
22887 /* Fall through. */
22888
22889 case BFD_RELOC_ARM_LITERAL:
22890 sign = value > 0;
22891
22892 if (value < 0)
22893 value = - value;
22894
22895 if (validate_offset_imm (value, 0) == FAIL)
22896 {
22897 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
22898 as_bad_where (fixP->fx_file, fixP->fx_line,
22899 _("invalid literal constant: pool needs to be closer"));
22900 else
22901 as_bad_where (fixP->fx_file, fixP->fx_line,
22902 _("bad immediate value for offset (%ld)"),
22903 (long) value);
22904 break;
22905 }
22906
22907 newval = md_chars_to_number (buf, INSN_SIZE);
22908 if (value == 0)
22909 newval &= 0xfffff000;
22910 else
22911 {
22912 newval &= 0xff7ff000;
22913 newval |= value | (sign ? INDEX_UP : 0);
22914 }
22915 md_number_to_chars (buf, newval, INSN_SIZE);
22916 break;
22917
22918 case BFD_RELOC_ARM_OFFSET_IMM8:
22919 case BFD_RELOC_ARM_HWLITERAL:
22920 sign = value > 0;
22921
22922 if (value < 0)
22923 value = - value;
22924
22925 if (validate_offset_imm (value, 1) == FAIL)
22926 {
22927 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
22928 as_bad_where (fixP->fx_file, fixP->fx_line,
22929 _("invalid literal constant: pool needs to be closer"));
22930 else
22931 as_bad_where (fixP->fx_file, fixP->fx_line,
22932 _("bad immediate value for 8-bit offset (%ld)"),
22933 (long) value);
22934 break;
22935 }
22936
22937 newval = md_chars_to_number (buf, INSN_SIZE);
22938 if (value == 0)
22939 newval &= 0xfffff0f0;
22940 else
22941 {
22942 newval &= 0xff7ff0f0;
22943 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
22944 }
22945 md_number_to_chars (buf, newval, INSN_SIZE);
22946 break;
22947
22948 case BFD_RELOC_ARM_T32_OFFSET_U8:
22949 if (value < 0 || value > 1020 || value % 4 != 0)
22950 as_bad_where (fixP->fx_file, fixP->fx_line,
22951 _("bad immediate value for offset (%ld)"), (long) value);
22952 value /= 4;
22953
22954 newval = md_chars_to_number (buf+2, THUMB_SIZE);
22955 newval |= value;
22956 md_number_to_chars (buf+2, newval, THUMB_SIZE);
22957 break;
22958
22959 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22960 /* This is a complicated relocation used for all varieties of Thumb32
22961 load/store instruction with immediate offset:
22962
22963 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22964 *4, optional writeback(W)
22965 (doubleword load/store)
22966
22967 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22968 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22969 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22970 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22971 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22972
22973 Uppercase letters indicate bits that are already encoded at
22974 this point. Lowercase letters are our problem. For the
22975 second block of instructions, the secondary opcode nybble
22976 (bits 8..11) is present, and bit 23 is zero, even if this is
22977 a PC-relative operation. */
22978 newval = md_chars_to_number (buf, THUMB_SIZE);
22979 newval <<= 16;
22980 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
22981
22982 if ((newval & 0xf0000000) == 0xe0000000)
22983 {
22984 /* Doubleword load/store: 8-bit offset, scaled by 4. */
22985 if (value >= 0)
22986 newval |= (1 << 23);
22987 else
22988 value = -value;
22989 if (value % 4 != 0)
22990 {
22991 as_bad_where (fixP->fx_file, fixP->fx_line,
22992 _("offset not a multiple of 4"));
22993 break;
22994 }
22995 value /= 4;
22996 if (value > 0xff)
22997 {
22998 as_bad_where (fixP->fx_file, fixP->fx_line,
22999 _("offset out of range"));
23000 break;
23001 }
23002 newval &= ~0xff;
23003 }
23004 else if ((newval & 0x000f0000) == 0x000f0000)
23005 {
23006 /* PC-relative, 12-bit offset. */
23007 if (value >= 0)
23008 newval |= (1 << 23);
23009 else
23010 value = -value;
23011 if (value > 0xfff)
23012 {
23013 as_bad_where (fixP->fx_file, fixP->fx_line,
23014 _("offset out of range"));
23015 break;
23016 }
23017 newval &= ~0xfff;
23018 }
23019 else if ((newval & 0x00000100) == 0x00000100)
23020 {
23021 /* Writeback: 8-bit, +/- offset. */
23022 if (value >= 0)
23023 newval |= (1 << 9);
23024 else
23025 value = -value;
23026 if (value > 0xff)
23027 {
23028 as_bad_where (fixP->fx_file, fixP->fx_line,
23029 _("offset out of range"));
23030 break;
23031 }
23032 newval &= ~0xff;
23033 }
23034 else if ((newval & 0x00000f00) == 0x00000e00)
23035 {
23036 /* T-instruction: positive 8-bit offset. */
23037 if (value < 0 || value > 0xff)
23038 {
23039 as_bad_where (fixP->fx_file, fixP->fx_line,
23040 _("offset out of range"));
23041 break;
23042 }
23043 newval &= ~0xff;
23044 newval |= value;
23045 }
23046 else
23047 {
23048 /* Positive 12-bit or negative 8-bit offset. */
23049 int limit;
23050 if (value >= 0)
23051 {
23052 newval |= (1 << 23);
23053 limit = 0xfff;
23054 }
23055 else
23056 {
23057 value = -value;
23058 limit = 0xff;
23059 }
23060 if (value > limit)
23061 {
23062 as_bad_where (fixP->fx_file, fixP->fx_line,
23063 _("offset out of range"));
23064 break;
23065 }
23066 newval &= ~limit;
23067 }
23068
23069 newval |= value;
23070 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
23071 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
23072 break;
23073
23074 case BFD_RELOC_ARM_SHIFT_IMM:
23075 newval = md_chars_to_number (buf, INSN_SIZE);
23076 if (((unsigned long) value) > 32
23077 || (value == 32
23078 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
23079 {
23080 as_bad_where (fixP->fx_file, fixP->fx_line,
23081 _("shift expression is too large"));
23082 break;
23083 }
23084
23085 if (value == 0)
23086 /* Shifts of zero must be done as lsl. */
23087 newval &= ~0x60;
23088 else if (value == 32)
23089 value = 0;
23090 newval &= 0xfffff07f;
23091 newval |= (value & 0x1f) << 7;
23092 md_number_to_chars (buf, newval, INSN_SIZE);
23093 break;
23094
23095 case BFD_RELOC_ARM_T32_IMMEDIATE:
23096 case BFD_RELOC_ARM_T32_ADD_IMM:
23097 case BFD_RELOC_ARM_T32_IMM12:
23098 case BFD_RELOC_ARM_T32_ADD_PC12:
23099 /* We claim that this fixup has been processed here,
23100 even if in fact we generate an error because we do
23101 not have a reloc for it, so tc_gen_reloc will reject it. */
23102 fixP->fx_done = 1;
23103
23104 if (fixP->fx_addsy
23105 && ! S_IS_DEFINED (fixP->fx_addsy))
23106 {
23107 as_bad_where (fixP->fx_file, fixP->fx_line,
23108 _("undefined symbol %s used as an immediate value"),
23109 S_GET_NAME (fixP->fx_addsy));
23110 break;
23111 }
23112
23113 newval = md_chars_to_number (buf, THUMB_SIZE);
23114 newval <<= 16;
23115 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
23116
23117 newimm = FAIL;
23118 if ((fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
23119 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
23120 Thumb2 modified immediate encoding (T2). */
23121 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
23122 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23123 {
23124 newimm = encode_thumb32_immediate (value);
23125 if (newimm == (unsigned int) FAIL)
23126 newimm = thumb32_negate_data_op (&newval, value);
23127 }
23128 if (newimm == (unsigned int) FAIL)
23129 {
23130 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE)
23131 {
23132 /* Turn add/sum into addw/subw. */
23133 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23134 newval = (newval & 0xfeffffff) | 0x02000000;
23135 /* No flat 12-bit imm encoding for addsw/subsw. */
23136 if ((newval & 0x00100000) == 0)
23137 {
23138 /* 12 bit immediate for addw/subw. */
23139 if (value < 0)
23140 {
23141 value = -value;
23142 newval ^= 0x00a00000;
23143 }
23144 if (value > 0xfff)
23145 newimm = (unsigned int) FAIL;
23146 else
23147 newimm = value;
23148 }
23149 }
23150 else
23151 {
23152 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
23153 UINT16 (T3 encoding), MOVW only accepts UINT16. When
23154 disassembling, MOV is preferred when there is no encoding
23155 overlap.
23156 NOTE: MOV is using ORR opcode under Thumb 2 mode. */
23157 if (((newval >> T2_DATA_OP_SHIFT) & 0xf) == T2_OPCODE_ORR
23158 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
23159 && !((newval >> T2_SBIT_SHIFT) & 0x1)
23160 && value >= 0 && value <=0xffff)
23161 {
23162 /* Toggle bit[25] to change encoding from T2 to T3. */
23163 newval ^= 1 << 25;
23164 /* Clear bits[19:16]. */
23165 newval &= 0xfff0ffff;
23166 /* Encoding high 4bits imm. Code below will encode the
23167 remaining low 12bits. */
23168 newval |= (value & 0x0000f000) << 4;
23169 newimm = value & 0x00000fff;
23170 }
23171 }
23172 }
23173
23174 if (newimm == (unsigned int)FAIL)
23175 {
23176 as_bad_where (fixP->fx_file, fixP->fx_line,
23177 _("invalid constant (%lx) after fixup"),
23178 (unsigned long) value);
23179 break;
23180 }
23181
23182 newval |= (newimm & 0x800) << 15;
23183 newval |= (newimm & 0x700) << 4;
23184 newval |= (newimm & 0x0ff);
23185
23186 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
23187 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
23188 break;
23189
23190 case BFD_RELOC_ARM_SMC:
23191 if (((unsigned long) value) > 0xffff)
23192 as_bad_where (fixP->fx_file, fixP->fx_line,
23193 _("invalid smc expression"));
23194 newval = md_chars_to_number (buf, INSN_SIZE);
23195 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23196 md_number_to_chars (buf, newval, INSN_SIZE);
23197 break;
23198
23199 case BFD_RELOC_ARM_HVC:
23200 if (((unsigned long) value) > 0xffff)
23201 as_bad_where (fixP->fx_file, fixP->fx_line,
23202 _("invalid hvc expression"));
23203 newval = md_chars_to_number (buf, INSN_SIZE);
23204 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23205 md_number_to_chars (buf, newval, INSN_SIZE);
23206 break;
23207
23208 case BFD_RELOC_ARM_SWI:
23209 if (fixP->tc_fix_data != 0)
23210 {
23211 if (((unsigned long) value) > 0xff)
23212 as_bad_where (fixP->fx_file, fixP->fx_line,
23213 _("invalid swi expression"));
23214 newval = md_chars_to_number (buf, THUMB_SIZE);
23215 newval |= value;
23216 md_number_to_chars (buf, newval, THUMB_SIZE);
23217 }
23218 else
23219 {
23220 if (((unsigned long) value) > 0x00ffffff)
23221 as_bad_where (fixP->fx_file, fixP->fx_line,
23222 _("invalid swi expression"));
23223 newval = md_chars_to_number (buf, INSN_SIZE);
23224 newval |= value;
23225 md_number_to_chars (buf, newval, INSN_SIZE);
23226 }
23227 break;
23228
23229 case BFD_RELOC_ARM_MULTI:
23230 if (((unsigned long) value) > 0xffff)
23231 as_bad_where (fixP->fx_file, fixP->fx_line,
23232 _("invalid expression in load/store multiple"));
23233 newval = value | md_chars_to_number (buf, INSN_SIZE);
23234 md_number_to_chars (buf, newval, INSN_SIZE);
23235 break;
23236
23237 #ifdef OBJ_ELF
23238 case BFD_RELOC_ARM_PCREL_CALL:
23239
23240 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23241 && fixP->fx_addsy
23242 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23243 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23244 && THUMB_IS_FUNC (fixP->fx_addsy))
23245 /* Flip the bl to blx. This is a simple flip
23246 bit here because we generate PCREL_CALL for
23247 unconditional bls. */
23248 {
23249 newval = md_chars_to_number (buf, INSN_SIZE);
23250 newval = newval | 0x10000000;
23251 md_number_to_chars (buf, newval, INSN_SIZE);
23252 temp = 1;
23253 fixP->fx_done = 1;
23254 }
23255 else
23256 temp = 3;
23257 goto arm_branch_common;
23258
23259 case BFD_RELOC_ARM_PCREL_JUMP:
23260 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23261 && fixP->fx_addsy
23262 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23263 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23264 && THUMB_IS_FUNC (fixP->fx_addsy))
23265 {
23266 /* This would map to a bl<cond>, b<cond>,
23267 b<always> to a Thumb function. We
23268 need to force a relocation for this particular
23269 case. */
23270 newval = md_chars_to_number (buf, INSN_SIZE);
23271 fixP->fx_done = 0;
23272 }
23273 /* Fall through. */
23274
23275 case BFD_RELOC_ARM_PLT32:
23276 #endif
23277 case BFD_RELOC_ARM_PCREL_BRANCH:
23278 temp = 3;
23279 goto arm_branch_common;
23280
23281 case BFD_RELOC_ARM_PCREL_BLX:
23282
23283 temp = 1;
23284 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23285 && fixP->fx_addsy
23286 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23287 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23288 && ARM_IS_FUNC (fixP->fx_addsy))
23289 {
23290 /* Flip the blx to a bl and warn. */
23291 const char *name = S_GET_NAME (fixP->fx_addsy);
23292 newval = 0xeb000000;
23293 as_warn_where (fixP->fx_file, fixP->fx_line,
23294 _("blx to '%s' an ARM ISA state function changed to bl"),
23295 name);
23296 md_number_to_chars (buf, newval, INSN_SIZE);
23297 temp = 3;
23298 fixP->fx_done = 1;
23299 }
23300
23301 #ifdef OBJ_ELF
23302 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23303 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
23304 #endif
23305
23306 arm_branch_common:
23307 /* We are going to store value (shifted right by two) in the
23308 instruction, in a 24 bit, signed field. Bits 26 through 32 either
23309 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
23310 also be be clear. */
23311 if (value & temp)
23312 as_bad_where (fixP->fx_file, fixP->fx_line,
23313 _("misaligned branch destination"));
23314 if ((value & (offsetT)0xfe000000) != (offsetT)0
23315 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
23316 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23317
23318 if (fixP->fx_done || !seg->use_rela_p)
23319 {
23320 newval = md_chars_to_number (buf, INSN_SIZE);
23321 newval |= (value >> 2) & 0x00ffffff;
23322 /* Set the H bit on BLX instructions. */
23323 if (temp == 1)
23324 {
23325 if (value & 2)
23326 newval |= 0x01000000;
23327 else
23328 newval &= ~0x01000000;
23329 }
23330 md_number_to_chars (buf, newval, INSN_SIZE);
23331 }
23332 break;
23333
23334 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
23335 /* CBZ can only branch forward. */
23336
23337 /* Attempts to use CBZ to branch to the next instruction
23338 (which, strictly speaking, are prohibited) will be turned into
23339 no-ops.
23340
23341 FIXME: It may be better to remove the instruction completely and
23342 perform relaxation. */
23343 if (value == -2)
23344 {
23345 newval = md_chars_to_number (buf, THUMB_SIZE);
23346 newval = 0xbf00; /* NOP encoding T1 */
23347 md_number_to_chars (buf, newval, THUMB_SIZE);
23348 }
23349 else
23350 {
23351 if (value & ~0x7e)
23352 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23353
23354 if (fixP->fx_done || !seg->use_rela_p)
23355 {
23356 newval = md_chars_to_number (buf, THUMB_SIZE);
23357 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
23358 md_number_to_chars (buf, newval, THUMB_SIZE);
23359 }
23360 }
23361 break;
23362
23363 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
23364 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
23365 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23366
23367 if (fixP->fx_done || !seg->use_rela_p)
23368 {
23369 newval = md_chars_to_number (buf, THUMB_SIZE);
23370 newval |= (value & 0x1ff) >> 1;
23371 md_number_to_chars (buf, newval, THUMB_SIZE);
23372 }
23373 break;
23374
23375 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
23376 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
23377 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23378
23379 if (fixP->fx_done || !seg->use_rela_p)
23380 {
23381 newval = md_chars_to_number (buf, THUMB_SIZE);
23382 newval |= (value & 0xfff) >> 1;
23383 md_number_to_chars (buf, newval, THUMB_SIZE);
23384 }
23385 break;
23386
23387 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23388 if (fixP->fx_addsy
23389 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23390 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23391 && ARM_IS_FUNC (fixP->fx_addsy)
23392 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23393 {
23394 /* Force a relocation for a branch 20 bits wide. */
23395 fixP->fx_done = 0;
23396 }
23397 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
23398 as_bad_where (fixP->fx_file, fixP->fx_line,
23399 _("conditional branch out of range"));
23400
23401 if (fixP->fx_done || !seg->use_rela_p)
23402 {
23403 offsetT newval2;
23404 addressT S, J1, J2, lo, hi;
23405
23406 S = (value & 0x00100000) >> 20;
23407 J2 = (value & 0x00080000) >> 19;
23408 J1 = (value & 0x00040000) >> 18;
23409 hi = (value & 0x0003f000) >> 12;
23410 lo = (value & 0x00000ffe) >> 1;
23411
23412 newval = md_chars_to_number (buf, THUMB_SIZE);
23413 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23414 newval |= (S << 10) | hi;
23415 newval2 |= (J1 << 13) | (J2 << 11) | lo;
23416 md_number_to_chars (buf, newval, THUMB_SIZE);
23417 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
23418 }
23419 break;
23420
23421 case BFD_RELOC_THUMB_PCREL_BLX:
23422 /* If there is a blx from a thumb state function to
23423 another thumb function flip this to a bl and warn
23424 about it. */
23425
23426 if (fixP->fx_addsy
23427 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23428 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23429 && THUMB_IS_FUNC (fixP->fx_addsy))
23430 {
23431 const char *name = S_GET_NAME (fixP->fx_addsy);
23432 as_warn_where (fixP->fx_file, fixP->fx_line,
23433 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
23434 name);
23435 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23436 newval = newval | 0x1000;
23437 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23438 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23439 fixP->fx_done = 1;
23440 }
23441
23442
23443 goto thumb_bl_common;
23444
23445 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23446 /* A bl from Thumb state ISA to an internal ARM state function
23447 is converted to a blx. */
23448 if (fixP->fx_addsy
23449 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23450 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23451 && ARM_IS_FUNC (fixP->fx_addsy)
23452 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23453 {
23454 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23455 newval = newval & ~0x1000;
23456 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23457 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
23458 fixP->fx_done = 1;
23459 }
23460
23461 thumb_bl_common:
23462
23463 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23464 /* For a BLX instruction, make sure that the relocation is rounded up
23465 to a word boundary. This follows the semantics of the instruction
23466 which specifies that bit 1 of the target address will come from bit
23467 1 of the base address. */
23468 value = (value + 3) & ~ 3;
23469
23470 #ifdef OBJ_ELF
23471 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
23472 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23473 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23474 #endif
23475
23476 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
23477 {
23478 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
23479 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23480 else if ((value & ~0x1ffffff)
23481 && ((value & ~0x1ffffff) != ~0x1ffffff))
23482 as_bad_where (fixP->fx_file, fixP->fx_line,
23483 _("Thumb2 branch out of range"));
23484 }
23485
23486 if (fixP->fx_done || !seg->use_rela_p)
23487 encode_thumb2_b_bl_offset (buf, value);
23488
23489 break;
23490
23491 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23492 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
23493 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23494
23495 if (fixP->fx_done || !seg->use_rela_p)
23496 encode_thumb2_b_bl_offset (buf, value);
23497
23498 break;
23499
23500 case BFD_RELOC_8:
23501 if (fixP->fx_done || !seg->use_rela_p)
23502 *buf = value;
23503 break;
23504
23505 case BFD_RELOC_16:
23506 if (fixP->fx_done || !seg->use_rela_p)
23507 md_number_to_chars (buf, value, 2);
23508 break;
23509
23510 #ifdef OBJ_ELF
23511 case BFD_RELOC_ARM_TLS_CALL:
23512 case BFD_RELOC_ARM_THM_TLS_CALL:
23513 case BFD_RELOC_ARM_TLS_DESCSEQ:
23514 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23515 case BFD_RELOC_ARM_TLS_GOTDESC:
23516 case BFD_RELOC_ARM_TLS_GD32:
23517 case BFD_RELOC_ARM_TLS_LE32:
23518 case BFD_RELOC_ARM_TLS_IE32:
23519 case BFD_RELOC_ARM_TLS_LDM32:
23520 case BFD_RELOC_ARM_TLS_LDO32:
23521 S_SET_THREAD_LOCAL (fixP->fx_addsy);
23522 break;
23523
23524 case BFD_RELOC_ARM_GOT32:
23525 case BFD_RELOC_ARM_GOTOFF:
23526 break;
23527
23528 case BFD_RELOC_ARM_GOT_PREL:
23529 if (fixP->fx_done || !seg->use_rela_p)
23530 md_number_to_chars (buf, value, 4);
23531 break;
23532
23533 case BFD_RELOC_ARM_TARGET2:
23534 /* TARGET2 is not partial-inplace, so we need to write the
23535 addend here for REL targets, because it won't be written out
23536 during reloc processing later. */
23537 if (fixP->fx_done || !seg->use_rela_p)
23538 md_number_to_chars (buf, fixP->fx_offset, 4);
23539 break;
23540 #endif
23541
23542 case BFD_RELOC_RVA:
23543 case BFD_RELOC_32:
23544 case BFD_RELOC_ARM_TARGET1:
23545 case BFD_RELOC_ARM_ROSEGREL32:
23546 case BFD_RELOC_ARM_SBREL32:
23547 case BFD_RELOC_32_PCREL:
23548 #ifdef TE_PE
23549 case BFD_RELOC_32_SECREL:
23550 #endif
23551 if (fixP->fx_done || !seg->use_rela_p)
23552 #ifdef TE_WINCE
23553 /* For WinCE we only do this for pcrel fixups. */
23554 if (fixP->fx_done || fixP->fx_pcrel)
23555 #endif
23556 md_number_to_chars (buf, value, 4);
23557 break;
23558
23559 #ifdef OBJ_ELF
23560 case BFD_RELOC_ARM_PREL31:
23561 if (fixP->fx_done || !seg->use_rela_p)
23562 {
23563 newval = md_chars_to_number (buf, 4) & 0x80000000;
23564 if ((value ^ (value >> 1)) & 0x40000000)
23565 {
23566 as_bad_where (fixP->fx_file, fixP->fx_line,
23567 _("rel31 relocation overflow"));
23568 }
23569 newval |= value & 0x7fffffff;
23570 md_number_to_chars (buf, newval, 4);
23571 }
23572 break;
23573 #endif
23574
23575 case BFD_RELOC_ARM_CP_OFF_IMM:
23576 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
23577 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
23578 newval = md_chars_to_number (buf, INSN_SIZE);
23579 else
23580 newval = get_thumb32_insn (buf);
23581 if ((newval & 0x0f200f00) == 0x0d000900)
23582 {
23583 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
23584 has permitted values that are multiples of 2, in the range 0
23585 to 510. */
23586 if (value < -510 || value > 510 || (value & 1))
23587 as_bad_where (fixP->fx_file, fixP->fx_line,
23588 _("co-processor offset out of range"));
23589 }
23590 else if (value < -1023 || value > 1023 || (value & 3))
23591 as_bad_where (fixP->fx_file, fixP->fx_line,
23592 _("co-processor offset out of range"));
23593 cp_off_common:
23594 sign = value > 0;
23595 if (value < 0)
23596 value = -value;
23597 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23598 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23599 newval = md_chars_to_number (buf, INSN_SIZE);
23600 else
23601 newval = get_thumb32_insn (buf);
23602 if (value == 0)
23603 newval &= 0xffffff00;
23604 else
23605 {
23606 newval &= 0xff7fff00;
23607 if ((newval & 0x0f200f00) == 0x0d000900)
23608 {
23609 /* This is a fp16 vstr/vldr.
23610
23611 It requires the immediate offset in the instruction is shifted
23612 left by 1 to be a half-word offset.
23613
23614 Here, left shift by 1 first, and later right shift by 2
23615 should get the right offset. */
23616 value <<= 1;
23617 }
23618 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
23619 }
23620 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23621 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23622 md_number_to_chars (buf, newval, INSN_SIZE);
23623 else
23624 put_thumb32_insn (buf, newval);
23625 break;
23626
23627 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
23628 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
23629 if (value < -255 || value > 255)
23630 as_bad_where (fixP->fx_file, fixP->fx_line,
23631 _("co-processor offset out of range"));
23632 value *= 4;
23633 goto cp_off_common;
23634
23635 case BFD_RELOC_ARM_THUMB_OFFSET:
23636 newval = md_chars_to_number (buf, THUMB_SIZE);
23637 /* Exactly what ranges, and where the offset is inserted depends
23638 on the type of instruction, we can establish this from the
23639 top 4 bits. */
23640 switch (newval >> 12)
23641 {
23642 case 4: /* PC load. */
23643 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
23644 forced to zero for these loads; md_pcrel_from has already
23645 compensated for this. */
23646 if (value & 3)
23647 as_bad_where (fixP->fx_file, fixP->fx_line,
23648 _("invalid offset, target not word aligned (0x%08lX)"),
23649 (((unsigned long) fixP->fx_frag->fr_address
23650 + (unsigned long) fixP->fx_where) & ~3)
23651 + (unsigned long) value);
23652
23653 if (value & ~0x3fc)
23654 as_bad_where (fixP->fx_file, fixP->fx_line,
23655 _("invalid offset, value too big (0x%08lX)"),
23656 (long) value);
23657
23658 newval |= value >> 2;
23659 break;
23660
23661 case 9: /* SP load/store. */
23662 if (value & ~0x3fc)
23663 as_bad_where (fixP->fx_file, fixP->fx_line,
23664 _("invalid offset, value too big (0x%08lX)"),
23665 (long) value);
23666 newval |= value >> 2;
23667 break;
23668
23669 case 6: /* Word load/store. */
23670 if (value & ~0x7c)
23671 as_bad_where (fixP->fx_file, fixP->fx_line,
23672 _("invalid offset, value too big (0x%08lX)"),
23673 (long) value);
23674 newval |= value << 4; /* 6 - 2. */
23675 break;
23676
23677 case 7: /* Byte load/store. */
23678 if (value & ~0x1f)
23679 as_bad_where (fixP->fx_file, fixP->fx_line,
23680 _("invalid offset, value too big (0x%08lX)"),
23681 (long) value);
23682 newval |= value << 6;
23683 break;
23684
23685 case 8: /* Halfword load/store. */
23686 if (value & ~0x3e)
23687 as_bad_where (fixP->fx_file, fixP->fx_line,
23688 _("invalid offset, value too big (0x%08lX)"),
23689 (long) value);
23690 newval |= value << 5; /* 6 - 1. */
23691 break;
23692
23693 default:
23694 as_bad_where (fixP->fx_file, fixP->fx_line,
23695 "Unable to process relocation for thumb opcode: %lx",
23696 (unsigned long) newval);
23697 break;
23698 }
23699 md_number_to_chars (buf, newval, THUMB_SIZE);
23700 break;
23701
23702 case BFD_RELOC_ARM_THUMB_ADD:
23703 /* This is a complicated relocation, since we use it for all of
23704 the following immediate relocations:
23705
23706 3bit ADD/SUB
23707 8bit ADD/SUB
23708 9bit ADD/SUB SP word-aligned
23709 10bit ADD PC/SP word-aligned
23710
23711 The type of instruction being processed is encoded in the
23712 instruction field:
23713
23714 0x8000 SUB
23715 0x00F0 Rd
23716 0x000F Rs
23717 */
23718 newval = md_chars_to_number (buf, THUMB_SIZE);
23719 {
23720 int rd = (newval >> 4) & 0xf;
23721 int rs = newval & 0xf;
23722 int subtract = !!(newval & 0x8000);
23723
23724 /* Check for HI regs, only very restricted cases allowed:
23725 Adjusting SP, and using PC or SP to get an address. */
23726 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
23727 || (rs > 7 && rs != REG_SP && rs != REG_PC))
23728 as_bad_where (fixP->fx_file, fixP->fx_line,
23729 _("invalid Hi register with immediate"));
23730
23731 /* If value is negative, choose the opposite instruction. */
23732 if (value < 0)
23733 {
23734 value = -value;
23735 subtract = !subtract;
23736 if (value < 0)
23737 as_bad_where (fixP->fx_file, fixP->fx_line,
23738 _("immediate value out of range"));
23739 }
23740
23741 if (rd == REG_SP)
23742 {
23743 if (value & ~0x1fc)
23744 as_bad_where (fixP->fx_file, fixP->fx_line,
23745 _("invalid immediate for stack address calculation"));
23746 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
23747 newval |= value >> 2;
23748 }
23749 else if (rs == REG_PC || rs == REG_SP)
23750 {
23751 /* PR gas/18541. If the addition is for a defined symbol
23752 within range of an ADR instruction then accept it. */
23753 if (subtract
23754 && value == 4
23755 && fixP->fx_addsy != NULL)
23756 {
23757 subtract = 0;
23758
23759 if (! S_IS_DEFINED (fixP->fx_addsy)
23760 || S_GET_SEGMENT (fixP->fx_addsy) != seg
23761 || S_IS_WEAK (fixP->fx_addsy))
23762 {
23763 as_bad_where (fixP->fx_file, fixP->fx_line,
23764 _("address calculation needs a strongly defined nearby symbol"));
23765 }
23766 else
23767 {
23768 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
23769
23770 /* Round up to the next 4-byte boundary. */
23771 if (v & 3)
23772 v = (v + 3) & ~ 3;
23773 else
23774 v += 4;
23775 v = S_GET_VALUE (fixP->fx_addsy) - v;
23776
23777 if (v & ~0x3fc)
23778 {
23779 as_bad_where (fixP->fx_file, fixP->fx_line,
23780 _("symbol too far away"));
23781 }
23782 else
23783 {
23784 fixP->fx_done = 1;
23785 value = v;
23786 }
23787 }
23788 }
23789
23790 if (subtract || value & ~0x3fc)
23791 as_bad_where (fixP->fx_file, fixP->fx_line,
23792 _("invalid immediate for address calculation (value = 0x%08lX)"),
23793 (unsigned long) (subtract ? - value : value));
23794 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
23795 newval |= rd << 8;
23796 newval |= value >> 2;
23797 }
23798 else if (rs == rd)
23799 {
23800 if (value & ~0xff)
23801 as_bad_where (fixP->fx_file, fixP->fx_line,
23802 _("immediate value out of range"));
23803 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
23804 newval |= (rd << 8) | value;
23805 }
23806 else
23807 {
23808 if (value & ~0x7)
23809 as_bad_where (fixP->fx_file, fixP->fx_line,
23810 _("immediate value out of range"));
23811 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
23812 newval |= rd | (rs << 3) | (value << 6);
23813 }
23814 }
23815 md_number_to_chars (buf, newval, THUMB_SIZE);
23816 break;
23817
23818 case BFD_RELOC_ARM_THUMB_IMM:
23819 newval = md_chars_to_number (buf, THUMB_SIZE);
23820 if (value < 0 || value > 255)
23821 as_bad_where (fixP->fx_file, fixP->fx_line,
23822 _("invalid immediate: %ld is out of range"),
23823 (long) value);
23824 newval |= value;
23825 md_number_to_chars (buf, newval, THUMB_SIZE);
23826 break;
23827
23828 case BFD_RELOC_ARM_THUMB_SHIFT:
23829 /* 5bit shift value (0..32). LSL cannot take 32. */
23830 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
23831 temp = newval & 0xf800;
23832 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
23833 as_bad_where (fixP->fx_file, fixP->fx_line,
23834 _("invalid shift value: %ld"), (long) value);
23835 /* Shifts of zero must be encoded as LSL. */
23836 if (value == 0)
23837 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
23838 /* Shifts of 32 are encoded as zero. */
23839 else if (value == 32)
23840 value = 0;
23841 newval |= value << 6;
23842 md_number_to_chars (buf, newval, THUMB_SIZE);
23843 break;
23844
23845 case BFD_RELOC_VTABLE_INHERIT:
23846 case BFD_RELOC_VTABLE_ENTRY:
23847 fixP->fx_done = 0;
23848 return;
23849
23850 case BFD_RELOC_ARM_MOVW:
23851 case BFD_RELOC_ARM_MOVT:
23852 case BFD_RELOC_ARM_THUMB_MOVW:
23853 case BFD_RELOC_ARM_THUMB_MOVT:
23854 if (fixP->fx_done || !seg->use_rela_p)
23855 {
23856 /* REL format relocations are limited to a 16-bit addend. */
23857 if (!fixP->fx_done)
23858 {
23859 if (value < -0x8000 || value > 0x7fff)
23860 as_bad_where (fixP->fx_file, fixP->fx_line,
23861 _("offset out of range"));
23862 }
23863 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
23864 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
23865 {
23866 value >>= 16;
23867 }
23868
23869 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
23870 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
23871 {
23872 newval = get_thumb32_insn (buf);
23873 newval &= 0xfbf08f00;
23874 newval |= (value & 0xf000) << 4;
23875 newval |= (value & 0x0800) << 15;
23876 newval |= (value & 0x0700) << 4;
23877 newval |= (value & 0x00ff);
23878 put_thumb32_insn (buf, newval);
23879 }
23880 else
23881 {
23882 newval = md_chars_to_number (buf, 4);
23883 newval &= 0xfff0f000;
23884 newval |= value & 0x0fff;
23885 newval |= (value & 0xf000) << 4;
23886 md_number_to_chars (buf, newval, 4);
23887 }
23888 }
23889 return;
23890
23891 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
23892 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
23893 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
23894 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
23895 gas_assert (!fixP->fx_done);
23896 {
23897 bfd_vma insn;
23898 bfd_boolean is_mov;
23899 bfd_vma encoded_addend = value;
23900
23901 /* Check that addend can be encoded in instruction. */
23902 if (!seg->use_rela_p && (value < 0 || value > 255))
23903 as_bad_where (fixP->fx_file, fixP->fx_line,
23904 _("the offset 0x%08lX is not representable"),
23905 (unsigned long) encoded_addend);
23906
23907 /* Extract the instruction. */
23908 insn = md_chars_to_number (buf, THUMB_SIZE);
23909 is_mov = (insn & 0xf800) == 0x2000;
23910
23911 /* Encode insn. */
23912 if (is_mov)
23913 {
23914 if (!seg->use_rela_p)
23915 insn |= encoded_addend;
23916 }
23917 else
23918 {
23919 int rd, rs;
23920
23921 /* Extract the instruction. */
23922 /* Encoding is the following
23923 0x8000 SUB
23924 0x00F0 Rd
23925 0x000F Rs
23926 */
23927 /* The following conditions must be true :
23928 - ADD
23929 - Rd == Rs
23930 - Rd <= 7
23931 */
23932 rd = (insn >> 4) & 0xf;
23933 rs = insn & 0xf;
23934 if ((insn & 0x8000) || (rd != rs) || rd > 7)
23935 as_bad_where (fixP->fx_file, fixP->fx_line,
23936 _("Unable to process relocation for thumb opcode: %lx"),
23937 (unsigned long) insn);
23938
23939 /* Encode as ADD immediate8 thumb 1 code. */
23940 insn = 0x3000 | (rd << 8);
23941
23942 /* Place the encoded addend into the first 8 bits of the
23943 instruction. */
23944 if (!seg->use_rela_p)
23945 insn |= encoded_addend;
23946 }
23947
23948 /* Update the instruction. */
23949 md_number_to_chars (buf, insn, THUMB_SIZE);
23950 }
23951 break;
23952
23953 case BFD_RELOC_ARM_ALU_PC_G0_NC:
23954 case BFD_RELOC_ARM_ALU_PC_G0:
23955 case BFD_RELOC_ARM_ALU_PC_G1_NC:
23956 case BFD_RELOC_ARM_ALU_PC_G1:
23957 case BFD_RELOC_ARM_ALU_PC_G2:
23958 case BFD_RELOC_ARM_ALU_SB_G0_NC:
23959 case BFD_RELOC_ARM_ALU_SB_G0:
23960 case BFD_RELOC_ARM_ALU_SB_G1_NC:
23961 case BFD_RELOC_ARM_ALU_SB_G1:
23962 case BFD_RELOC_ARM_ALU_SB_G2:
23963 gas_assert (!fixP->fx_done);
23964 if (!seg->use_rela_p)
23965 {
23966 bfd_vma insn;
23967 bfd_vma encoded_addend;
23968 bfd_vma addend_abs = abs (value);
23969
23970 /* Check that the absolute value of the addend can be
23971 expressed as an 8-bit constant plus a rotation. */
23972 encoded_addend = encode_arm_immediate (addend_abs);
23973 if (encoded_addend == (unsigned int) FAIL)
23974 as_bad_where (fixP->fx_file, fixP->fx_line,
23975 _("the offset 0x%08lX is not representable"),
23976 (unsigned long) addend_abs);
23977
23978 /* Extract the instruction. */
23979 insn = md_chars_to_number (buf, INSN_SIZE);
23980
23981 /* If the addend is positive, use an ADD instruction.
23982 Otherwise use a SUB. Take care not to destroy the S bit. */
23983 insn &= 0xff1fffff;
23984 if (value < 0)
23985 insn |= 1 << 22;
23986 else
23987 insn |= 1 << 23;
23988
23989 /* Place the encoded addend into the first 12 bits of the
23990 instruction. */
23991 insn &= 0xfffff000;
23992 insn |= encoded_addend;
23993
23994 /* Update the instruction. */
23995 md_number_to_chars (buf, insn, INSN_SIZE);
23996 }
23997 break;
23998
23999 case BFD_RELOC_ARM_LDR_PC_G0:
24000 case BFD_RELOC_ARM_LDR_PC_G1:
24001 case BFD_RELOC_ARM_LDR_PC_G2:
24002 case BFD_RELOC_ARM_LDR_SB_G0:
24003 case BFD_RELOC_ARM_LDR_SB_G1:
24004 case BFD_RELOC_ARM_LDR_SB_G2:
24005 gas_assert (!fixP->fx_done);
24006 if (!seg->use_rela_p)
24007 {
24008 bfd_vma insn;
24009 bfd_vma addend_abs = abs (value);
24010
24011 /* Check that the absolute value of the addend can be
24012 encoded in 12 bits. */
24013 if (addend_abs >= 0x1000)
24014 as_bad_where (fixP->fx_file, fixP->fx_line,
24015 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
24016 (unsigned long) addend_abs);
24017
24018 /* Extract the instruction. */
24019 insn = md_chars_to_number (buf, INSN_SIZE);
24020
24021 /* If the addend is negative, clear bit 23 of the instruction.
24022 Otherwise set it. */
24023 if (value < 0)
24024 insn &= ~(1 << 23);
24025 else
24026 insn |= 1 << 23;
24027
24028 /* Place the absolute value of the addend into the first 12 bits
24029 of the instruction. */
24030 insn &= 0xfffff000;
24031 insn |= addend_abs;
24032
24033 /* Update the instruction. */
24034 md_number_to_chars (buf, insn, INSN_SIZE);
24035 }
24036 break;
24037
24038 case BFD_RELOC_ARM_LDRS_PC_G0:
24039 case BFD_RELOC_ARM_LDRS_PC_G1:
24040 case BFD_RELOC_ARM_LDRS_PC_G2:
24041 case BFD_RELOC_ARM_LDRS_SB_G0:
24042 case BFD_RELOC_ARM_LDRS_SB_G1:
24043 case BFD_RELOC_ARM_LDRS_SB_G2:
24044 gas_assert (!fixP->fx_done);
24045 if (!seg->use_rela_p)
24046 {
24047 bfd_vma insn;
24048 bfd_vma addend_abs = abs (value);
24049
24050 /* Check that the absolute value of the addend can be
24051 encoded in 8 bits. */
24052 if (addend_abs >= 0x100)
24053 as_bad_where (fixP->fx_file, fixP->fx_line,
24054 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
24055 (unsigned long) addend_abs);
24056
24057 /* Extract the instruction. */
24058 insn = md_chars_to_number (buf, INSN_SIZE);
24059
24060 /* If the addend is negative, clear bit 23 of the instruction.
24061 Otherwise set it. */
24062 if (value < 0)
24063 insn &= ~(1 << 23);
24064 else
24065 insn |= 1 << 23;
24066
24067 /* Place the first four bits of the absolute value of the addend
24068 into the first 4 bits of the instruction, and the remaining
24069 four into bits 8 .. 11. */
24070 insn &= 0xfffff0f0;
24071 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
24072
24073 /* Update the instruction. */
24074 md_number_to_chars (buf, insn, INSN_SIZE);
24075 }
24076 break;
24077
24078 case BFD_RELOC_ARM_LDC_PC_G0:
24079 case BFD_RELOC_ARM_LDC_PC_G1:
24080 case BFD_RELOC_ARM_LDC_PC_G2:
24081 case BFD_RELOC_ARM_LDC_SB_G0:
24082 case BFD_RELOC_ARM_LDC_SB_G1:
24083 case BFD_RELOC_ARM_LDC_SB_G2:
24084 gas_assert (!fixP->fx_done);
24085 if (!seg->use_rela_p)
24086 {
24087 bfd_vma insn;
24088 bfd_vma addend_abs = abs (value);
24089
24090 /* Check that the absolute value of the addend is a multiple of
24091 four and, when divided by four, fits in 8 bits. */
24092 if (addend_abs & 0x3)
24093 as_bad_where (fixP->fx_file, fixP->fx_line,
24094 _("bad offset 0x%08lX (must be word-aligned)"),
24095 (unsigned long) addend_abs);
24096
24097 if ((addend_abs >> 2) > 0xff)
24098 as_bad_where (fixP->fx_file, fixP->fx_line,
24099 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
24100 (unsigned long) addend_abs);
24101
24102 /* Extract the instruction. */
24103 insn = md_chars_to_number (buf, INSN_SIZE);
24104
24105 /* If the addend is negative, clear bit 23 of the instruction.
24106 Otherwise set it. */
24107 if (value < 0)
24108 insn &= ~(1 << 23);
24109 else
24110 insn |= 1 << 23;
24111
24112 /* Place the addend (divided by four) into the first eight
24113 bits of the instruction. */
24114 insn &= 0xfffffff0;
24115 insn |= addend_abs >> 2;
24116
24117 /* Update the instruction. */
24118 md_number_to_chars (buf, insn, INSN_SIZE);
24119 }
24120 break;
24121
24122 case BFD_RELOC_ARM_V4BX:
24123 /* This will need to go in the object file. */
24124 fixP->fx_done = 0;
24125 break;
24126
24127 case BFD_RELOC_UNUSED:
24128 default:
24129 as_bad_where (fixP->fx_file, fixP->fx_line,
24130 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
24131 }
24132 }
24133
24134 /* Translate internal representation of relocation info to BFD target
24135 format. */
24136
24137 arelent *
24138 tc_gen_reloc (asection *section, fixS *fixp)
24139 {
24140 arelent * reloc;
24141 bfd_reloc_code_real_type code;
24142
24143 reloc = XNEW (arelent);
24144
24145 reloc->sym_ptr_ptr = XNEW (asymbol *);
24146 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
24147 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
24148
24149 if (fixp->fx_pcrel)
24150 {
24151 if (section->use_rela_p)
24152 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
24153 else
24154 fixp->fx_offset = reloc->address;
24155 }
24156 reloc->addend = fixp->fx_offset;
24157
24158 switch (fixp->fx_r_type)
24159 {
24160 case BFD_RELOC_8:
24161 if (fixp->fx_pcrel)
24162 {
24163 code = BFD_RELOC_8_PCREL;
24164 break;
24165 }
24166 /* Fall through. */
24167
24168 case BFD_RELOC_16:
24169 if (fixp->fx_pcrel)
24170 {
24171 code = BFD_RELOC_16_PCREL;
24172 break;
24173 }
24174 /* Fall through. */
24175
24176 case BFD_RELOC_32:
24177 if (fixp->fx_pcrel)
24178 {
24179 code = BFD_RELOC_32_PCREL;
24180 break;
24181 }
24182 /* Fall through. */
24183
24184 case BFD_RELOC_ARM_MOVW:
24185 if (fixp->fx_pcrel)
24186 {
24187 code = BFD_RELOC_ARM_MOVW_PCREL;
24188 break;
24189 }
24190 /* Fall through. */
24191
24192 case BFD_RELOC_ARM_MOVT:
24193 if (fixp->fx_pcrel)
24194 {
24195 code = BFD_RELOC_ARM_MOVT_PCREL;
24196 break;
24197 }
24198 /* Fall through. */
24199
24200 case BFD_RELOC_ARM_THUMB_MOVW:
24201 if (fixp->fx_pcrel)
24202 {
24203 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
24204 break;
24205 }
24206 /* Fall through. */
24207
24208 case BFD_RELOC_ARM_THUMB_MOVT:
24209 if (fixp->fx_pcrel)
24210 {
24211 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
24212 break;
24213 }
24214 /* Fall through. */
24215
24216 case BFD_RELOC_NONE:
24217 case BFD_RELOC_ARM_PCREL_BRANCH:
24218 case BFD_RELOC_ARM_PCREL_BLX:
24219 case BFD_RELOC_RVA:
24220 case BFD_RELOC_THUMB_PCREL_BRANCH7:
24221 case BFD_RELOC_THUMB_PCREL_BRANCH9:
24222 case BFD_RELOC_THUMB_PCREL_BRANCH12:
24223 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24224 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24225 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24226 case BFD_RELOC_VTABLE_ENTRY:
24227 case BFD_RELOC_VTABLE_INHERIT:
24228 #ifdef TE_PE
24229 case BFD_RELOC_32_SECREL:
24230 #endif
24231 code = fixp->fx_r_type;
24232 break;
24233
24234 case BFD_RELOC_THUMB_PCREL_BLX:
24235 #ifdef OBJ_ELF
24236 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
24237 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
24238 else
24239 #endif
24240 code = BFD_RELOC_THUMB_PCREL_BLX;
24241 break;
24242
24243 case BFD_RELOC_ARM_LITERAL:
24244 case BFD_RELOC_ARM_HWLITERAL:
24245 /* If this is called then the a literal has
24246 been referenced across a section boundary. */
24247 as_bad_where (fixp->fx_file, fixp->fx_line,
24248 _("literal referenced across section boundary"));
24249 return NULL;
24250
24251 #ifdef OBJ_ELF
24252 case BFD_RELOC_ARM_TLS_CALL:
24253 case BFD_RELOC_ARM_THM_TLS_CALL:
24254 case BFD_RELOC_ARM_TLS_DESCSEQ:
24255 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
24256 case BFD_RELOC_ARM_GOT32:
24257 case BFD_RELOC_ARM_GOTOFF:
24258 case BFD_RELOC_ARM_GOT_PREL:
24259 case BFD_RELOC_ARM_PLT32:
24260 case BFD_RELOC_ARM_TARGET1:
24261 case BFD_RELOC_ARM_ROSEGREL32:
24262 case BFD_RELOC_ARM_SBREL32:
24263 case BFD_RELOC_ARM_PREL31:
24264 case BFD_RELOC_ARM_TARGET2:
24265 case BFD_RELOC_ARM_TLS_LDO32:
24266 case BFD_RELOC_ARM_PCREL_CALL:
24267 case BFD_RELOC_ARM_PCREL_JUMP:
24268 case BFD_RELOC_ARM_ALU_PC_G0_NC:
24269 case BFD_RELOC_ARM_ALU_PC_G0:
24270 case BFD_RELOC_ARM_ALU_PC_G1_NC:
24271 case BFD_RELOC_ARM_ALU_PC_G1:
24272 case BFD_RELOC_ARM_ALU_PC_G2:
24273 case BFD_RELOC_ARM_LDR_PC_G0:
24274 case BFD_RELOC_ARM_LDR_PC_G1:
24275 case BFD_RELOC_ARM_LDR_PC_G2:
24276 case BFD_RELOC_ARM_LDRS_PC_G0:
24277 case BFD_RELOC_ARM_LDRS_PC_G1:
24278 case BFD_RELOC_ARM_LDRS_PC_G2:
24279 case BFD_RELOC_ARM_LDC_PC_G0:
24280 case BFD_RELOC_ARM_LDC_PC_G1:
24281 case BFD_RELOC_ARM_LDC_PC_G2:
24282 case BFD_RELOC_ARM_ALU_SB_G0_NC:
24283 case BFD_RELOC_ARM_ALU_SB_G0:
24284 case BFD_RELOC_ARM_ALU_SB_G1_NC:
24285 case BFD_RELOC_ARM_ALU_SB_G1:
24286 case BFD_RELOC_ARM_ALU_SB_G2:
24287 case BFD_RELOC_ARM_LDR_SB_G0:
24288 case BFD_RELOC_ARM_LDR_SB_G1:
24289 case BFD_RELOC_ARM_LDR_SB_G2:
24290 case BFD_RELOC_ARM_LDRS_SB_G0:
24291 case BFD_RELOC_ARM_LDRS_SB_G1:
24292 case BFD_RELOC_ARM_LDRS_SB_G2:
24293 case BFD_RELOC_ARM_LDC_SB_G0:
24294 case BFD_RELOC_ARM_LDC_SB_G1:
24295 case BFD_RELOC_ARM_LDC_SB_G2:
24296 case BFD_RELOC_ARM_V4BX:
24297 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
24298 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
24299 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
24300 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
24301 code = fixp->fx_r_type;
24302 break;
24303
24304 case BFD_RELOC_ARM_TLS_GOTDESC:
24305 case BFD_RELOC_ARM_TLS_GD32:
24306 case BFD_RELOC_ARM_TLS_LE32:
24307 case BFD_RELOC_ARM_TLS_IE32:
24308 case BFD_RELOC_ARM_TLS_LDM32:
24309 /* BFD will include the symbol's address in the addend.
24310 But we don't want that, so subtract it out again here. */
24311 if (!S_IS_COMMON (fixp->fx_addsy))
24312 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
24313 code = fixp->fx_r_type;
24314 break;
24315 #endif
24316
24317 case BFD_RELOC_ARM_IMMEDIATE:
24318 as_bad_where (fixp->fx_file, fixp->fx_line,
24319 _("internal relocation (type: IMMEDIATE) not fixed up"));
24320 return NULL;
24321
24322 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
24323 as_bad_where (fixp->fx_file, fixp->fx_line,
24324 _("ADRL used for a symbol not defined in the same file"));
24325 return NULL;
24326
24327 case BFD_RELOC_ARM_OFFSET_IMM:
24328 if (section->use_rela_p)
24329 {
24330 code = fixp->fx_r_type;
24331 break;
24332 }
24333
24334 if (fixp->fx_addsy != NULL
24335 && !S_IS_DEFINED (fixp->fx_addsy)
24336 && S_IS_LOCAL (fixp->fx_addsy))
24337 {
24338 as_bad_where (fixp->fx_file, fixp->fx_line,
24339 _("undefined local label `%s'"),
24340 S_GET_NAME (fixp->fx_addsy));
24341 return NULL;
24342 }
24343
24344 as_bad_where (fixp->fx_file, fixp->fx_line,
24345 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
24346 return NULL;
24347
24348 default:
24349 {
24350 const char * type;
24351
24352 switch (fixp->fx_r_type)
24353 {
24354 case BFD_RELOC_NONE: type = "NONE"; break;
24355 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
24356 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
24357 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
24358 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
24359 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
24360 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
24361 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
24362 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
24363 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
24364 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
24365 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
24366 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
24367 default: type = _("<unknown>"); break;
24368 }
24369 as_bad_where (fixp->fx_file, fixp->fx_line,
24370 _("cannot represent %s relocation in this object file format"),
24371 type);
24372 return NULL;
24373 }
24374 }
24375
24376 #ifdef OBJ_ELF
24377 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
24378 && GOT_symbol
24379 && fixp->fx_addsy == GOT_symbol)
24380 {
24381 code = BFD_RELOC_ARM_GOTPC;
24382 reloc->addend = fixp->fx_offset = reloc->address;
24383 }
24384 #endif
24385
24386 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
24387
24388 if (reloc->howto == NULL)
24389 {
24390 as_bad_where (fixp->fx_file, fixp->fx_line,
24391 _("cannot represent %s relocation in this object file format"),
24392 bfd_get_reloc_code_name (code));
24393 return NULL;
24394 }
24395
24396 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
24397 vtable entry to be used in the relocation's section offset. */
24398 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24399 reloc->address = fixp->fx_offset;
24400
24401 return reloc;
24402 }
24403
24404 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
24405
24406 void
24407 cons_fix_new_arm (fragS * frag,
24408 int where,
24409 int size,
24410 expressionS * exp,
24411 bfd_reloc_code_real_type reloc)
24412 {
24413 int pcrel = 0;
24414
24415 /* Pick a reloc.
24416 FIXME: @@ Should look at CPU word size. */
24417 switch (size)
24418 {
24419 case 1:
24420 reloc = BFD_RELOC_8;
24421 break;
24422 case 2:
24423 reloc = BFD_RELOC_16;
24424 break;
24425 case 4:
24426 default:
24427 reloc = BFD_RELOC_32;
24428 break;
24429 case 8:
24430 reloc = BFD_RELOC_64;
24431 break;
24432 }
24433
24434 #ifdef TE_PE
24435 if (exp->X_op == O_secrel)
24436 {
24437 exp->X_op = O_symbol;
24438 reloc = BFD_RELOC_32_SECREL;
24439 }
24440 #endif
24441
24442 fix_new_exp (frag, where, size, exp, pcrel, reloc);
24443 }
24444
24445 #if defined (OBJ_COFF)
24446 void
24447 arm_validate_fix (fixS * fixP)
24448 {
24449 /* If the destination of the branch is a defined symbol which does not have
24450 the THUMB_FUNC attribute, then we must be calling a function which has
24451 the (interfacearm) attribute. We look for the Thumb entry point to that
24452 function and change the branch to refer to that function instead. */
24453 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
24454 && fixP->fx_addsy != NULL
24455 && S_IS_DEFINED (fixP->fx_addsy)
24456 && ! THUMB_IS_FUNC (fixP->fx_addsy))
24457 {
24458 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
24459 }
24460 }
24461 #endif
24462
24463
24464 int
24465 arm_force_relocation (struct fix * fixp)
24466 {
24467 #if defined (OBJ_COFF) && defined (TE_PE)
24468 if (fixp->fx_r_type == BFD_RELOC_RVA)
24469 return 1;
24470 #endif
24471
24472 /* In case we have a call or a branch to a function in ARM ISA mode from
24473 a thumb function or vice-versa force the relocation. These relocations
24474 are cleared off for some cores that might have blx and simple transformations
24475 are possible. */
24476
24477 #ifdef OBJ_ELF
24478 switch (fixp->fx_r_type)
24479 {
24480 case BFD_RELOC_ARM_PCREL_JUMP:
24481 case BFD_RELOC_ARM_PCREL_CALL:
24482 case BFD_RELOC_THUMB_PCREL_BLX:
24483 if (THUMB_IS_FUNC (fixp->fx_addsy))
24484 return 1;
24485 break;
24486
24487 case BFD_RELOC_ARM_PCREL_BLX:
24488 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24489 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24490 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24491 if (ARM_IS_FUNC (fixp->fx_addsy))
24492 return 1;
24493 break;
24494
24495 default:
24496 break;
24497 }
24498 #endif
24499
24500 /* Resolve these relocations even if the symbol is extern or weak.
24501 Technically this is probably wrong due to symbol preemption.
24502 In practice these relocations do not have enough range to be useful
24503 at dynamic link time, and some code (e.g. in the Linux kernel)
24504 expects these references to be resolved. */
24505 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
24506 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
24507 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
24508 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
24509 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24510 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
24511 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
24512 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
24513 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
24514 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
24515 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
24516 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
24517 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
24518 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
24519 return 0;
24520
24521 /* Always leave these relocations for the linker. */
24522 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24523 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24524 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24525 return 1;
24526
24527 /* Always generate relocations against function symbols. */
24528 if (fixp->fx_r_type == BFD_RELOC_32
24529 && fixp->fx_addsy
24530 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
24531 return 1;
24532
24533 return generic_force_reloc (fixp);
24534 }
24535
24536 #if defined (OBJ_ELF) || defined (OBJ_COFF)
24537 /* Relocations against function names must be left unadjusted,
24538 so that the linker can use this information to generate interworking
24539 stubs. The MIPS version of this function
24540 also prevents relocations that are mips-16 specific, but I do not
24541 know why it does this.
24542
24543 FIXME:
24544 There is one other problem that ought to be addressed here, but
24545 which currently is not: Taking the address of a label (rather
24546 than a function) and then later jumping to that address. Such
24547 addresses also ought to have their bottom bit set (assuming that
24548 they reside in Thumb code), but at the moment they will not. */
24549
24550 bfd_boolean
24551 arm_fix_adjustable (fixS * fixP)
24552 {
24553 if (fixP->fx_addsy == NULL)
24554 return 1;
24555
24556 /* Preserve relocations against symbols with function type. */
24557 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
24558 return FALSE;
24559
24560 if (THUMB_IS_FUNC (fixP->fx_addsy)
24561 && fixP->fx_subsy == NULL)
24562 return FALSE;
24563
24564 /* We need the symbol name for the VTABLE entries. */
24565 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
24566 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24567 return FALSE;
24568
24569 /* Don't allow symbols to be discarded on GOT related relocs. */
24570 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
24571 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
24572 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
24573 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
24574 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
24575 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
24576 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
24577 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
24578 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
24579 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
24580 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
24581 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
24582 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
24583 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
24584 return FALSE;
24585
24586 /* Similarly for group relocations. */
24587 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24588 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24589 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24590 return FALSE;
24591
24592 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
24593 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
24594 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
24595 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
24596 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
24597 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
24598 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
24599 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
24600 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
24601 return FALSE;
24602
24603 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
24604 offsets, so keep these symbols. */
24605 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
24606 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
24607 return FALSE;
24608
24609 return TRUE;
24610 }
24611 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
24612
24613 #ifdef OBJ_ELF
24614 const char *
24615 elf32_arm_target_format (void)
24616 {
24617 #ifdef TE_SYMBIAN
24618 return (target_big_endian
24619 ? "elf32-bigarm-symbian"
24620 : "elf32-littlearm-symbian");
24621 #elif defined (TE_VXWORKS)
24622 return (target_big_endian
24623 ? "elf32-bigarm-vxworks"
24624 : "elf32-littlearm-vxworks");
24625 #elif defined (TE_NACL)
24626 return (target_big_endian
24627 ? "elf32-bigarm-nacl"
24628 : "elf32-littlearm-nacl");
24629 #else
24630 if (target_big_endian)
24631 return "elf32-bigarm";
24632 else
24633 return "elf32-littlearm";
24634 #endif
24635 }
24636
24637 void
24638 armelf_frob_symbol (symbolS * symp,
24639 int * puntp)
24640 {
24641 elf_frob_symbol (symp, puntp);
24642 }
24643 #endif
24644
24645 /* MD interface: Finalization. */
24646
24647 void
24648 arm_cleanup (void)
24649 {
24650 literal_pool * pool;
24651
24652 /* Ensure that all the IT blocks are properly closed. */
24653 check_it_blocks_finished ();
24654
24655 for (pool = list_of_pools; pool; pool = pool->next)
24656 {
24657 /* Put it at the end of the relevant section. */
24658 subseg_set (pool->section, pool->sub_section);
24659 #ifdef OBJ_ELF
24660 arm_elf_change_section ();
24661 #endif
24662 s_ltorg (0);
24663 }
24664 }
24665
24666 #ifdef OBJ_ELF
24667 /* Remove any excess mapping symbols generated for alignment frags in
24668 SEC. We may have created a mapping symbol before a zero byte
24669 alignment; remove it if there's a mapping symbol after the
24670 alignment. */
24671 static void
24672 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
24673 void *dummy ATTRIBUTE_UNUSED)
24674 {
24675 segment_info_type *seginfo = seg_info (sec);
24676 fragS *fragp;
24677
24678 if (seginfo == NULL || seginfo->frchainP == NULL)
24679 return;
24680
24681 for (fragp = seginfo->frchainP->frch_root;
24682 fragp != NULL;
24683 fragp = fragp->fr_next)
24684 {
24685 symbolS *sym = fragp->tc_frag_data.last_map;
24686 fragS *next = fragp->fr_next;
24687
24688 /* Variable-sized frags have been converted to fixed size by
24689 this point. But if this was variable-sized to start with,
24690 there will be a fixed-size frag after it. So don't handle
24691 next == NULL. */
24692 if (sym == NULL || next == NULL)
24693 continue;
24694
24695 if (S_GET_VALUE (sym) < next->fr_address)
24696 /* Not at the end of this frag. */
24697 continue;
24698 know (S_GET_VALUE (sym) == next->fr_address);
24699
24700 do
24701 {
24702 if (next->tc_frag_data.first_map != NULL)
24703 {
24704 /* Next frag starts with a mapping symbol. Discard this
24705 one. */
24706 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24707 break;
24708 }
24709
24710 if (next->fr_next == NULL)
24711 {
24712 /* This mapping symbol is at the end of the section. Discard
24713 it. */
24714 know (next->fr_fix == 0 && next->fr_var == 0);
24715 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24716 break;
24717 }
24718
24719 /* As long as we have empty frags without any mapping symbols,
24720 keep looking. */
24721 /* If the next frag is non-empty and does not start with a
24722 mapping symbol, then this mapping symbol is required. */
24723 if (next->fr_address != next->fr_next->fr_address)
24724 break;
24725
24726 next = next->fr_next;
24727 }
24728 while (next != NULL);
24729 }
24730 }
24731 #endif
24732
24733 /* Adjust the symbol table. This marks Thumb symbols as distinct from
24734 ARM ones. */
24735
24736 void
24737 arm_adjust_symtab (void)
24738 {
24739 #ifdef OBJ_COFF
24740 symbolS * sym;
24741
24742 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24743 {
24744 if (ARM_IS_THUMB (sym))
24745 {
24746 if (THUMB_IS_FUNC (sym))
24747 {
24748 /* Mark the symbol as a Thumb function. */
24749 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
24750 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
24751 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
24752
24753 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
24754 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
24755 else
24756 as_bad (_("%s: unexpected function type: %d"),
24757 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
24758 }
24759 else switch (S_GET_STORAGE_CLASS (sym))
24760 {
24761 case C_EXT:
24762 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
24763 break;
24764 case C_STAT:
24765 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
24766 break;
24767 case C_LABEL:
24768 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
24769 break;
24770 default:
24771 /* Do nothing. */
24772 break;
24773 }
24774 }
24775
24776 if (ARM_IS_INTERWORK (sym))
24777 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
24778 }
24779 #endif
24780 #ifdef OBJ_ELF
24781 symbolS * sym;
24782 char bind;
24783
24784 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24785 {
24786 if (ARM_IS_THUMB (sym))
24787 {
24788 elf_symbol_type * elf_sym;
24789
24790 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
24791 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
24792
24793 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
24794 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
24795 {
24796 /* If it's a .thumb_func, declare it as so,
24797 otherwise tag label as .code 16. */
24798 if (THUMB_IS_FUNC (sym))
24799 ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
24800 ST_BRANCH_TO_THUMB);
24801 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
24802 elf_sym->internal_elf_sym.st_info =
24803 ELF_ST_INFO (bind, STT_ARM_16BIT);
24804 }
24805 }
24806 }
24807
24808 /* Remove any overlapping mapping symbols generated by alignment frags. */
24809 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
24810 /* Now do generic ELF adjustments. */
24811 elf_adjust_symtab ();
24812 #endif
24813 }
24814
24815 /* MD interface: Initialization. */
24816
24817 static void
24818 set_constant_flonums (void)
24819 {
24820 int i;
24821
24822 for (i = 0; i < NUM_FLOAT_VALS; i++)
24823 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
24824 abort ();
24825 }
24826
24827 /* Auto-select Thumb mode if it's the only available instruction set for the
24828 given architecture. */
24829
24830 static void
24831 autoselect_thumb_from_cpu_variant (void)
24832 {
24833 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
24834 opcode_select (16);
24835 }
24836
24837 void
24838 md_begin (void)
24839 {
24840 unsigned mach;
24841 unsigned int i;
24842
24843 if ( (arm_ops_hsh = hash_new ()) == NULL
24844 || (arm_cond_hsh = hash_new ()) == NULL
24845 || (arm_shift_hsh = hash_new ()) == NULL
24846 || (arm_psr_hsh = hash_new ()) == NULL
24847 || (arm_v7m_psr_hsh = hash_new ()) == NULL
24848 || (arm_reg_hsh = hash_new ()) == NULL
24849 || (arm_reloc_hsh = hash_new ()) == NULL
24850 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
24851 as_fatal (_("virtual memory exhausted"));
24852
24853 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
24854 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
24855 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
24856 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
24857 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
24858 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
24859 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
24860 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
24861 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
24862 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
24863 (void *) (v7m_psrs + i));
24864 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
24865 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
24866 for (i = 0;
24867 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
24868 i++)
24869 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
24870 (void *) (barrier_opt_names + i));
24871 #ifdef OBJ_ELF
24872 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
24873 {
24874 struct reloc_entry * entry = reloc_names + i;
24875
24876 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
24877 /* This makes encode_branch() use the EABI versions of this relocation. */
24878 entry->reloc = BFD_RELOC_UNUSED;
24879
24880 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
24881 }
24882 #endif
24883
24884 set_constant_flonums ();
24885
24886 /* Set the cpu variant based on the command-line options. We prefer
24887 -mcpu= over -march= if both are set (as for GCC); and we prefer
24888 -mfpu= over any other way of setting the floating point unit.
24889 Use of legacy options with new options are faulted. */
24890 if (legacy_cpu)
24891 {
24892 if (mcpu_cpu_opt || march_cpu_opt)
24893 as_bad (_("use of old and new-style options to set CPU type"));
24894
24895 mcpu_cpu_opt = legacy_cpu;
24896 }
24897 else if (!mcpu_cpu_opt)
24898 mcpu_cpu_opt = march_cpu_opt;
24899
24900 if (legacy_fpu)
24901 {
24902 if (mfpu_opt)
24903 as_bad (_("use of old and new-style options to set FPU type"));
24904
24905 mfpu_opt = legacy_fpu;
24906 }
24907 else if (!mfpu_opt)
24908 {
24909 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
24910 || defined (TE_NetBSD) || defined (TE_VXWORKS))
24911 /* Some environments specify a default FPU. If they don't, infer it
24912 from the processor. */
24913 if (mcpu_fpu_opt)
24914 mfpu_opt = mcpu_fpu_opt;
24915 else
24916 mfpu_opt = march_fpu_opt;
24917 #else
24918 mfpu_opt = &fpu_default;
24919 #endif
24920 }
24921
24922 if (!mfpu_opt)
24923 {
24924 if (mcpu_cpu_opt != NULL)
24925 mfpu_opt = &fpu_default;
24926 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
24927 mfpu_opt = &fpu_arch_vfp_v2;
24928 else
24929 mfpu_opt = &fpu_arch_fpa;
24930 }
24931
24932 #ifdef CPU_DEFAULT
24933 if (!mcpu_cpu_opt)
24934 {
24935 mcpu_cpu_opt = &cpu_default;
24936 selected_cpu = cpu_default;
24937 }
24938 else if (no_cpu_selected ())
24939 selected_cpu = cpu_default;
24940 #else
24941 if (mcpu_cpu_opt)
24942 selected_cpu = *mcpu_cpu_opt;
24943 else
24944 mcpu_cpu_opt = &arm_arch_any;
24945 #endif
24946
24947 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24948
24949 autoselect_thumb_from_cpu_variant ();
24950
24951 arm_arch_used = thumb_arch_used = arm_arch_none;
24952
24953 #if defined OBJ_COFF || defined OBJ_ELF
24954 {
24955 unsigned int flags = 0;
24956
24957 #if defined OBJ_ELF
24958 flags = meabi_flags;
24959
24960 switch (meabi_flags)
24961 {
24962 case EF_ARM_EABI_UNKNOWN:
24963 #endif
24964 /* Set the flags in the private structure. */
24965 if (uses_apcs_26) flags |= F_APCS26;
24966 if (support_interwork) flags |= F_INTERWORK;
24967 if (uses_apcs_float) flags |= F_APCS_FLOAT;
24968 if (pic_code) flags |= F_PIC;
24969 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
24970 flags |= F_SOFT_FLOAT;
24971
24972 switch (mfloat_abi_opt)
24973 {
24974 case ARM_FLOAT_ABI_SOFT:
24975 case ARM_FLOAT_ABI_SOFTFP:
24976 flags |= F_SOFT_FLOAT;
24977 break;
24978
24979 case ARM_FLOAT_ABI_HARD:
24980 if (flags & F_SOFT_FLOAT)
24981 as_bad (_("hard-float conflicts with specified fpu"));
24982 break;
24983 }
24984
24985 /* Using pure-endian doubles (even if soft-float). */
24986 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
24987 flags |= F_VFP_FLOAT;
24988
24989 #if defined OBJ_ELF
24990 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
24991 flags |= EF_ARM_MAVERICK_FLOAT;
24992 break;
24993
24994 case EF_ARM_EABI_VER4:
24995 case EF_ARM_EABI_VER5:
24996 /* No additional flags to set. */
24997 break;
24998
24999 default:
25000 abort ();
25001 }
25002 #endif
25003 bfd_set_private_flags (stdoutput, flags);
25004
25005 /* We have run out flags in the COFF header to encode the
25006 status of ATPCS support, so instead we create a dummy,
25007 empty, debug section called .arm.atpcs. */
25008 if (atpcs)
25009 {
25010 asection * sec;
25011
25012 sec = bfd_make_section (stdoutput, ".arm.atpcs");
25013
25014 if (sec != NULL)
25015 {
25016 bfd_set_section_flags
25017 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
25018 bfd_set_section_size (stdoutput, sec, 0);
25019 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
25020 }
25021 }
25022 }
25023 #endif
25024
25025 /* Record the CPU type as well. */
25026 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
25027 mach = bfd_mach_arm_iWMMXt2;
25028 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
25029 mach = bfd_mach_arm_iWMMXt;
25030 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
25031 mach = bfd_mach_arm_XScale;
25032 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
25033 mach = bfd_mach_arm_ep9312;
25034 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
25035 mach = bfd_mach_arm_5TE;
25036 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
25037 {
25038 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
25039 mach = bfd_mach_arm_5T;
25040 else
25041 mach = bfd_mach_arm_5;
25042 }
25043 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
25044 {
25045 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
25046 mach = bfd_mach_arm_4T;
25047 else
25048 mach = bfd_mach_arm_4;
25049 }
25050 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
25051 mach = bfd_mach_arm_3M;
25052 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
25053 mach = bfd_mach_arm_3;
25054 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
25055 mach = bfd_mach_arm_2a;
25056 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
25057 mach = bfd_mach_arm_2;
25058 else
25059 mach = bfd_mach_arm_unknown;
25060
25061 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
25062 }
25063
25064 /* Command line processing. */
25065
25066 /* md_parse_option
25067 Invocation line includes a switch not recognized by the base assembler.
25068 See if it's a processor-specific option.
25069
25070 This routine is somewhat complicated by the need for backwards
25071 compatibility (since older releases of gcc can't be changed).
25072 The new options try to make the interface as compatible as
25073 possible with GCC.
25074
25075 New options (supported) are:
25076
25077 -mcpu=<cpu name> Assemble for selected processor
25078 -march=<architecture name> Assemble for selected architecture
25079 -mfpu=<fpu architecture> Assemble for selected FPU.
25080 -EB/-mbig-endian Big-endian
25081 -EL/-mlittle-endian Little-endian
25082 -k Generate PIC code
25083 -mthumb Start in Thumb mode
25084 -mthumb-interwork Code supports ARM/Thumb interworking
25085
25086 -m[no-]warn-deprecated Warn about deprecated features
25087 -m[no-]warn-syms Warn when symbols match instructions
25088
25089 For now we will also provide support for:
25090
25091 -mapcs-32 32-bit Program counter
25092 -mapcs-26 26-bit Program counter
25093 -macps-float Floats passed in FP registers
25094 -mapcs-reentrant Reentrant code
25095 -matpcs
25096 (sometime these will probably be replaced with -mapcs=<list of options>
25097 and -matpcs=<list of options>)
25098
25099 The remaining options are only supported for back-wards compatibility.
25100 Cpu variants, the arm part is optional:
25101 -m[arm]1 Currently not supported.
25102 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
25103 -m[arm]3 Arm 3 processor
25104 -m[arm]6[xx], Arm 6 processors
25105 -m[arm]7[xx][t][[d]m] Arm 7 processors
25106 -m[arm]8[10] Arm 8 processors
25107 -m[arm]9[20][tdmi] Arm 9 processors
25108 -mstrongarm[110[0]] StrongARM processors
25109 -mxscale XScale processors
25110 -m[arm]v[2345[t[e]]] Arm architectures
25111 -mall All (except the ARM1)
25112 FP variants:
25113 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
25114 -mfpe-old (No float load/store multiples)
25115 -mvfpxd VFP Single precision
25116 -mvfp All VFP
25117 -mno-fpu Disable all floating point instructions
25118
25119 The following CPU names are recognized:
25120 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
25121 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
25122 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
25123 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
25124 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
25125 arm10t arm10e, arm1020t, arm1020e, arm10200e,
25126 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
25127
25128 */
25129
25130 const char * md_shortopts = "m:k";
25131
25132 #ifdef ARM_BI_ENDIAN
25133 #define OPTION_EB (OPTION_MD_BASE + 0)
25134 #define OPTION_EL (OPTION_MD_BASE + 1)
25135 #else
25136 #if TARGET_BYTES_BIG_ENDIAN
25137 #define OPTION_EB (OPTION_MD_BASE + 0)
25138 #else
25139 #define OPTION_EL (OPTION_MD_BASE + 1)
25140 #endif
25141 #endif
25142 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
25143
25144 struct option md_longopts[] =
25145 {
25146 #ifdef OPTION_EB
25147 {"EB", no_argument, NULL, OPTION_EB},
25148 #endif
25149 #ifdef OPTION_EL
25150 {"EL", no_argument, NULL, OPTION_EL},
25151 #endif
25152 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
25153 {NULL, no_argument, NULL, 0}
25154 };
25155
25156
25157 size_t md_longopts_size = sizeof (md_longopts);
25158
25159 struct arm_option_table
25160 {
25161 const char *option; /* Option name to match. */
25162 const char *help; /* Help information. */
25163 int *var; /* Variable to change. */
25164 int value; /* What to change it to. */
25165 const char *deprecated; /* If non-null, print this message. */
25166 };
25167
25168 struct arm_option_table arm_opts[] =
25169 {
25170 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
25171 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
25172 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
25173 &support_interwork, 1, NULL},
25174 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
25175 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
25176 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
25177 1, NULL},
25178 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
25179 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
25180 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
25181 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
25182 NULL},
25183
25184 /* These are recognized by the assembler, but have no affect on code. */
25185 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
25186 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
25187
25188 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
25189 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
25190 &warn_on_deprecated, 0, NULL},
25191 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
25192 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
25193 {NULL, NULL, NULL, 0, NULL}
25194 };
25195
25196 struct arm_legacy_option_table
25197 {
25198 const char *option; /* Option name to match. */
25199 const arm_feature_set **var; /* Variable to change. */
25200 const arm_feature_set value; /* What to change it to. */
25201 const char *deprecated; /* If non-null, print this message. */
25202 };
25203
25204 const struct arm_legacy_option_table arm_legacy_opts[] =
25205 {
25206 /* DON'T add any new processors to this list -- we want the whole list
25207 to go away... Add them to the processors table instead. */
25208 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
25209 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
25210 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
25211 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
25212 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25213 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25214 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25215 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25216 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
25217 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
25218 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
25219 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
25220 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
25221 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
25222 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
25223 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
25224 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
25225 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
25226 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
25227 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
25228 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
25229 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
25230 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
25231 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
25232 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
25233 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
25234 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
25235 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
25236 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
25237 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
25238 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
25239 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
25240 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
25241 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
25242 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25243 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25244 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25245 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25246 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25247 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25248 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
25249 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
25250 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
25251 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
25252 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
25253 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
25254 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25255 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25256 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25257 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25258 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25259 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25260 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25261 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25262 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25263 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25264 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
25265 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
25266 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
25267 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
25268 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25269 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25270 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25271 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25272 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25273 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25274 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25275 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25276 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
25277 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
25278 N_("use -mcpu=strongarm110")},
25279 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
25280 N_("use -mcpu=strongarm1100")},
25281 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
25282 N_("use -mcpu=strongarm1110")},
25283 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
25284 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
25285 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
25286
25287 /* Architecture variants -- don't add any more to this list either. */
25288 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
25289 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
25290 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25291 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25292 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
25293 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
25294 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25295 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25296 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
25297 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
25298 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25299 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25300 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
25301 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
25302 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25303 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25304 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25305 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25306
25307 /* Floating point variants -- don't add any more to this list either. */
25308 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
25309 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
25310 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
25311 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
25312 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
25313
25314 {NULL, NULL, ARM_ARCH_NONE, NULL}
25315 };
25316
25317 struct arm_cpu_option_table
25318 {
25319 const char *name;
25320 size_t name_len;
25321 const arm_feature_set value;
25322 /* For some CPUs we assume an FPU unless the user explicitly sets
25323 -mfpu=... */
25324 const arm_feature_set default_fpu;
25325 /* The canonical name of the CPU, or NULL to use NAME converted to upper
25326 case. */
25327 const char *canonical_name;
25328 };
25329
25330 /* This list should, at a minimum, contain all the cpu names
25331 recognized by GCC. */
25332 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
25333 static const struct arm_cpu_option_table arm_cpus[] =
25334 {
25335 ARM_CPU_OPT ("all", ARM_ANY, FPU_ARCH_FPA, NULL),
25336 ARM_CPU_OPT ("arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL),
25337 ARM_CPU_OPT ("arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL),
25338 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
25339 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
25340 ARM_CPU_OPT ("arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25341 ARM_CPU_OPT ("arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25342 ARM_CPU_OPT ("arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25343 ARM_CPU_OPT ("arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25344 ARM_CPU_OPT ("arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25345 ARM_CPU_OPT ("arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25346 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
25347 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25348 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
25349 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25350 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
25351 ARM_CPU_OPT ("arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25352 ARM_CPU_OPT ("arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25353 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25354 ARM_CPU_OPT ("arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25355 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25356 ARM_CPU_OPT ("arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25357 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25358 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25359 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25360 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25361 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25362 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25363 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25364 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25365 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25366 ARM_CPU_OPT ("arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25367 ARM_CPU_OPT ("arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25368 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25369 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25370 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25371 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25372 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25373 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25374 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"),
25375 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25376 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25377 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25378 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25379 ARM_CPU_OPT ("fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25380 ARM_CPU_OPT ("fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25381 /* For V5 or later processors we default to using VFP; but the user
25382 should really set the FPU type explicitly. */
25383 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
25384 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25385 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
25386 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
25387 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
25388 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
25389 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"),
25390 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25391 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
25392 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"),
25393 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25394 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25395 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
25396 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
25397 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25398 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"),
25399 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
25400 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25401 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25402 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2,
25403 "ARM1026EJ-S"),
25404 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
25405 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25406 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25407 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25408 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25409 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25410 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"),
25411 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL),
25412 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2,
25413 "ARM1136JF-S"),
25414 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL),
25415 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, "MPCore"),
25416 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, "MPCore"),
25417 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL),
25418 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL),
25419 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6KZ, FPU_NONE, NULL),
25420 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6KZ, FPU_ARCH_VFP_V2, NULL),
25421 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC,
25422 FPU_NONE, "Cortex-A5"),
25423 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
25424 "Cortex-A7"),
25425 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC,
25426 ARM_FEATURE_COPROC (FPU_VFP_V3
25427 | FPU_NEON_EXT_V1),
25428 "Cortex-A8"),
25429 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC,
25430 ARM_FEATURE_COPROC (FPU_VFP_V3
25431 | FPU_NEON_EXT_V1),
25432 "Cortex-A9"),
25433 ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
25434 "Cortex-A12"),
25435 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
25436 "Cortex-A15"),
25437 ARM_CPU_OPT ("cortex-a17", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
25438 "Cortex-A17"),
25439 ARM_CPU_OPT ("cortex-a32", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25440 "Cortex-A32"),
25441 ARM_CPU_OPT ("cortex-a35", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25442 "Cortex-A35"),
25443 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25444 "Cortex-A53"),
25445 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25446 "Cortex-A57"),
25447 ARM_CPU_OPT ("cortex-a72", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25448 "Cortex-A72"),
25449 ARM_CPU_OPT ("cortex-a73", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25450 "Cortex-A73"),
25451 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R, FPU_NONE, "Cortex-R4"),
25452 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R, FPU_ARCH_VFP_V3D16,
25453 "Cortex-R4F"),
25454 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV,
25455 FPU_NONE, "Cortex-R5"),
25456 ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV,
25457 FPU_ARCH_VFP_V3D16,
25458 "Cortex-R7"),
25459 ARM_CPU_OPT ("cortex-r8", ARM_ARCH_V7R_IDIV,
25460 FPU_ARCH_VFP_V3D16,
25461 "Cortex-R8"),
25462 ARM_CPU_OPT ("cortex-m33", ARM_ARCH_V8M_MAIN_DSP,
25463 FPU_NONE, "Cortex-M33"),
25464 ARM_CPU_OPT ("cortex-m23", ARM_ARCH_V8M_BASE,
25465 FPU_NONE, "Cortex-M23"),
25466 ARM_CPU_OPT ("cortex-m7", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M7"),
25467 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M4"),
25468 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M, FPU_NONE, "Cortex-M3"),
25469 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M1"),
25470 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0"),
25471 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0+"),
25472 ARM_CPU_OPT ("exynos-m1", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25473 "Samsung " \
25474 "Exynos M1"),
25475 ARM_CPU_OPT ("falkor", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25476 "Qualcomm "
25477 "Falkor"),
25478 ARM_CPU_OPT ("qdf24xx", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25479 "Qualcomm "
25480 "QDF24XX"),
25481
25482 /* ??? XSCALE is really an architecture. */
25483 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
25484 /* ??? iwmmxt is not a processor. */
25485 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
25486 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
25487 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
25488 /* Maverick */
25489 ARM_CPU_OPT ("ep9312", ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
25490 FPU_ARCH_MAVERICK, "ARM920T"),
25491 /* Marvell processors. */
25492 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP
25493 | ARM_EXT_SEC,
25494 ARM_EXT2_V6T2_V8M),
25495 FPU_ARCH_VFP_V3D16, NULL),
25496 ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP
25497 | ARM_EXT_SEC,
25498 ARM_EXT2_V6T2_V8M),
25499 FPU_ARCH_NEON_VFP_V4, NULL),
25500 /* APM X-Gene family. */
25501 ARM_CPU_OPT ("xgene1", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25502 "APM X-Gene 1"),
25503 ARM_CPU_OPT ("xgene2", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25504 "APM X-Gene 2"),
25505
25506 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
25507 };
25508 #undef ARM_CPU_OPT
25509
25510 struct arm_arch_option_table
25511 {
25512 const char *name;
25513 size_t name_len;
25514 const arm_feature_set value;
25515 const arm_feature_set default_fpu;
25516 };
25517
25518 /* This list should, at a minimum, contain all the architecture names
25519 recognized by GCC. */
25520 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
25521 static const struct arm_arch_option_table arm_archs[] =
25522 {
25523 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
25524 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
25525 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
25526 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
25527 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
25528 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
25529 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
25530 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
25531 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
25532 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
25533 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
25534 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
25535 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
25536 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
25537 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
25538 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
25539 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
25540 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
25541 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
25542 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
25543 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
25544 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
25545 kept to preserve existing behaviour. */
25546 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
25547 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
25548 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
25549 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
25550 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
25551 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
25552 kept to preserve existing behaviour. */
25553 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
25554 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
25555 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
25556 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
25557 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
25558 /* The official spelling of the ARMv7 profile variants is the dashed form.
25559 Accept the non-dashed form for compatibility with old toolchains. */
25560 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
25561 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP),
25562 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
25563 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
25564 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
25565 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
25566 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
25567 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
25568 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
25569 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP),
25570 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
25571 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP),
25572 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP),
25573 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
25574 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
25575 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
25576 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
25577 };
25578 #undef ARM_ARCH_OPT
25579
25580 /* ISA extensions in the co-processor and main instruction set space. */
25581 struct arm_option_extension_value_table
25582 {
25583 const char *name;
25584 size_t name_len;
25585 const arm_feature_set merge_value;
25586 const arm_feature_set clear_value;
25587 /* List of architectures for which an extension is available. ARM_ARCH_NONE
25588 indicates that an extension is available for all architectures while
25589 ARM_ANY marks an empty entry. */
25590 const arm_feature_set allowed_archs[2];
25591 };
25592
25593 /* The following table must be in alphabetical order with a NULL last entry.
25594 */
25595 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
25596 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
25597 static const struct arm_option_extension_value_table arm_extensions[] =
25598 {
25599 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25600 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25601 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25602 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
25603 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25604 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
25605 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
25606 ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
25607 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
25608 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25609 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
25610 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
25611 ARM_ARCH_V8_2A),
25612 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
25613 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
25614 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
25615 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
25616 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
25617 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
25618 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
25619 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
25620 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
25621 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
25622 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
25623 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
25624 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
25625 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
25626 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
25627 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
25628 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
25629 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
25630 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
25631 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25632 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
25633 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
25634 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25635 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1,
25636 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
25637 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25638 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25639 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25640 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
25641 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
25642 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
25643 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
25644 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25645 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
25646 | ARM_EXT_DIV),
25647 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
25648 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
25649 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
25650 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
25651 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
25652 };
25653 #undef ARM_EXT_OPT
25654
25655 /* ISA floating-point and Advanced SIMD extensions. */
25656 struct arm_option_fpu_value_table
25657 {
25658 const char *name;
25659 const arm_feature_set value;
25660 };
25661
25662 /* This list should, at a minimum, contain all the fpu names
25663 recognized by GCC. */
25664 static const struct arm_option_fpu_value_table arm_fpus[] =
25665 {
25666 {"softfpa", FPU_NONE},
25667 {"fpe", FPU_ARCH_FPE},
25668 {"fpe2", FPU_ARCH_FPE},
25669 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
25670 {"fpa", FPU_ARCH_FPA},
25671 {"fpa10", FPU_ARCH_FPA},
25672 {"fpa11", FPU_ARCH_FPA},
25673 {"arm7500fe", FPU_ARCH_FPA},
25674 {"softvfp", FPU_ARCH_VFP},
25675 {"softvfp+vfp", FPU_ARCH_VFP_V2},
25676 {"vfp", FPU_ARCH_VFP_V2},
25677 {"vfp9", FPU_ARCH_VFP_V2},
25678 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
25679 {"vfp10", FPU_ARCH_VFP_V2},
25680 {"vfp10-r0", FPU_ARCH_VFP_V1},
25681 {"vfpxd", FPU_ARCH_VFP_V1xD},
25682 {"vfpv2", FPU_ARCH_VFP_V2},
25683 {"vfpv3", FPU_ARCH_VFP_V3},
25684 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
25685 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
25686 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
25687 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
25688 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
25689 {"arm1020t", FPU_ARCH_VFP_V1},
25690 {"arm1020e", FPU_ARCH_VFP_V2},
25691 {"arm1136jfs", FPU_ARCH_VFP_V2},
25692 {"arm1136jf-s", FPU_ARCH_VFP_V2},
25693 {"maverick", FPU_ARCH_MAVERICK},
25694 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
25695 {"neon-fp16", FPU_ARCH_NEON_FP16},
25696 {"vfpv4", FPU_ARCH_VFP_V4},
25697 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
25698 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
25699 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
25700 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
25701 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
25702 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
25703 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
25704 {"crypto-neon-fp-armv8",
25705 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
25706 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
25707 {"crypto-neon-fp-armv8.1",
25708 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
25709 {NULL, ARM_ARCH_NONE}
25710 };
25711
25712 struct arm_option_value_table
25713 {
25714 const char *name;
25715 long value;
25716 };
25717
25718 static const struct arm_option_value_table arm_float_abis[] =
25719 {
25720 {"hard", ARM_FLOAT_ABI_HARD},
25721 {"softfp", ARM_FLOAT_ABI_SOFTFP},
25722 {"soft", ARM_FLOAT_ABI_SOFT},
25723 {NULL, 0}
25724 };
25725
25726 #ifdef OBJ_ELF
25727 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
25728 static const struct arm_option_value_table arm_eabis[] =
25729 {
25730 {"gnu", EF_ARM_EABI_UNKNOWN},
25731 {"4", EF_ARM_EABI_VER4},
25732 {"5", EF_ARM_EABI_VER5},
25733 {NULL, 0}
25734 };
25735 #endif
25736
25737 struct arm_long_option_table
25738 {
25739 const char * option; /* Substring to match. */
25740 const char * help; /* Help information. */
25741 int (* func) (const char * subopt); /* Function to decode sub-option. */
25742 const char * deprecated; /* If non-null, print this message. */
25743 };
25744
25745 static bfd_boolean
25746 arm_parse_extension (const char *str, const arm_feature_set **opt_p)
25747 {
25748 arm_feature_set *ext_set = XNEW (arm_feature_set);
25749
25750 /* We insist on extensions being specified in alphabetical order, and with
25751 extensions being added before being removed. We achieve this by having
25752 the global ARM_EXTENSIONS table in alphabetical order, and using the
25753 ADDING_VALUE variable to indicate whether we are adding an extension (1)
25754 or removing it (0) and only allowing it to change in the order
25755 -1 -> 1 -> 0. */
25756 const struct arm_option_extension_value_table * opt = NULL;
25757 const arm_feature_set arm_any = ARM_ANY;
25758 int adding_value = -1;
25759
25760 /* Copy the feature set, so that we can modify it. */
25761 *ext_set = **opt_p;
25762 *opt_p = ext_set;
25763
25764 while (str != NULL && *str != 0)
25765 {
25766 const char *ext;
25767 size_t len;
25768
25769 if (*str != '+')
25770 {
25771 as_bad (_("invalid architectural extension"));
25772 return FALSE;
25773 }
25774
25775 str++;
25776 ext = strchr (str, '+');
25777
25778 if (ext != NULL)
25779 len = ext - str;
25780 else
25781 len = strlen (str);
25782
25783 if (len >= 2 && strncmp (str, "no", 2) == 0)
25784 {
25785 if (adding_value != 0)
25786 {
25787 adding_value = 0;
25788 opt = arm_extensions;
25789 }
25790
25791 len -= 2;
25792 str += 2;
25793 }
25794 else if (len > 0)
25795 {
25796 if (adding_value == -1)
25797 {
25798 adding_value = 1;
25799 opt = arm_extensions;
25800 }
25801 else if (adding_value != 1)
25802 {
25803 as_bad (_("must specify extensions to add before specifying "
25804 "those to remove"));
25805 return FALSE;
25806 }
25807 }
25808
25809 if (len == 0)
25810 {
25811 as_bad (_("missing architectural extension"));
25812 return FALSE;
25813 }
25814
25815 gas_assert (adding_value != -1);
25816 gas_assert (opt != NULL);
25817
25818 /* Scan over the options table trying to find an exact match. */
25819 for (; opt->name != NULL; opt++)
25820 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25821 {
25822 int i, nb_allowed_archs =
25823 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
25824 /* Check we can apply the extension to this architecture. */
25825 for (i = 0; i < nb_allowed_archs; i++)
25826 {
25827 /* Empty entry. */
25828 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
25829 continue;
25830 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *ext_set))
25831 break;
25832 }
25833 if (i == nb_allowed_archs)
25834 {
25835 as_bad (_("extension does not apply to the base architecture"));
25836 return FALSE;
25837 }
25838
25839 /* Add or remove the extension. */
25840 if (adding_value)
25841 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
25842 else
25843 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
25844
25845 break;
25846 }
25847
25848 if (opt->name == NULL)
25849 {
25850 /* Did we fail to find an extension because it wasn't specified in
25851 alphabetical order, or because it does not exist? */
25852
25853 for (opt = arm_extensions; opt->name != NULL; opt++)
25854 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25855 break;
25856
25857 if (opt->name == NULL)
25858 as_bad (_("unknown architectural extension `%s'"), str);
25859 else
25860 as_bad (_("architectural extensions must be specified in "
25861 "alphabetical order"));
25862
25863 return FALSE;
25864 }
25865 else
25866 {
25867 /* We should skip the extension we've just matched the next time
25868 round. */
25869 opt++;
25870 }
25871
25872 str = ext;
25873 };
25874
25875 return TRUE;
25876 }
25877
25878 static bfd_boolean
25879 arm_parse_cpu (const char *str)
25880 {
25881 const struct arm_cpu_option_table *opt;
25882 const char *ext = strchr (str, '+');
25883 size_t len;
25884
25885 if (ext != NULL)
25886 len = ext - str;
25887 else
25888 len = strlen (str);
25889
25890 if (len == 0)
25891 {
25892 as_bad (_("missing cpu name `%s'"), str);
25893 return FALSE;
25894 }
25895
25896 for (opt = arm_cpus; opt->name != NULL; opt++)
25897 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25898 {
25899 mcpu_cpu_opt = &opt->value;
25900 mcpu_fpu_opt = &opt->default_fpu;
25901 if (opt->canonical_name)
25902 {
25903 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
25904 strcpy (selected_cpu_name, opt->canonical_name);
25905 }
25906 else
25907 {
25908 size_t i;
25909
25910 if (len >= sizeof selected_cpu_name)
25911 len = (sizeof selected_cpu_name) - 1;
25912
25913 for (i = 0; i < len; i++)
25914 selected_cpu_name[i] = TOUPPER (opt->name[i]);
25915 selected_cpu_name[i] = 0;
25916 }
25917
25918 if (ext != NULL)
25919 return arm_parse_extension (ext, &mcpu_cpu_opt);
25920
25921 return TRUE;
25922 }
25923
25924 as_bad (_("unknown cpu `%s'"), str);
25925 return FALSE;
25926 }
25927
25928 static bfd_boolean
25929 arm_parse_arch (const char *str)
25930 {
25931 const struct arm_arch_option_table *opt;
25932 const char *ext = strchr (str, '+');
25933 size_t len;
25934
25935 if (ext != NULL)
25936 len = ext - str;
25937 else
25938 len = strlen (str);
25939
25940 if (len == 0)
25941 {
25942 as_bad (_("missing architecture name `%s'"), str);
25943 return FALSE;
25944 }
25945
25946 for (opt = arm_archs; opt->name != NULL; opt++)
25947 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25948 {
25949 march_cpu_opt = &opt->value;
25950 march_fpu_opt = &opt->default_fpu;
25951 strcpy (selected_cpu_name, opt->name);
25952
25953 if (ext != NULL)
25954 return arm_parse_extension (ext, &march_cpu_opt);
25955
25956 return TRUE;
25957 }
25958
25959 as_bad (_("unknown architecture `%s'\n"), str);
25960 return FALSE;
25961 }
25962
25963 static bfd_boolean
25964 arm_parse_fpu (const char * str)
25965 {
25966 const struct arm_option_fpu_value_table * opt;
25967
25968 for (opt = arm_fpus; opt->name != NULL; opt++)
25969 if (streq (opt->name, str))
25970 {
25971 mfpu_opt = &opt->value;
25972 return TRUE;
25973 }
25974
25975 as_bad (_("unknown floating point format `%s'\n"), str);
25976 return FALSE;
25977 }
25978
25979 static bfd_boolean
25980 arm_parse_float_abi (const char * str)
25981 {
25982 const struct arm_option_value_table * opt;
25983
25984 for (opt = arm_float_abis; opt->name != NULL; opt++)
25985 if (streq (opt->name, str))
25986 {
25987 mfloat_abi_opt = opt->value;
25988 return TRUE;
25989 }
25990
25991 as_bad (_("unknown floating point abi `%s'\n"), str);
25992 return FALSE;
25993 }
25994
25995 #ifdef OBJ_ELF
25996 static bfd_boolean
25997 arm_parse_eabi (const char * str)
25998 {
25999 const struct arm_option_value_table *opt;
26000
26001 for (opt = arm_eabis; opt->name != NULL; opt++)
26002 if (streq (opt->name, str))
26003 {
26004 meabi_flags = opt->value;
26005 return TRUE;
26006 }
26007 as_bad (_("unknown EABI `%s'\n"), str);
26008 return FALSE;
26009 }
26010 #endif
26011
26012 static bfd_boolean
26013 arm_parse_it_mode (const char * str)
26014 {
26015 bfd_boolean ret = TRUE;
26016
26017 if (streq ("arm", str))
26018 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
26019 else if (streq ("thumb", str))
26020 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
26021 else if (streq ("always", str))
26022 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
26023 else if (streq ("never", str))
26024 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
26025 else
26026 {
26027 as_bad (_("unknown implicit IT mode `%s', should be "\
26028 "arm, thumb, always, or never."), str);
26029 ret = FALSE;
26030 }
26031
26032 return ret;
26033 }
26034
26035 static bfd_boolean
26036 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
26037 {
26038 codecomposer_syntax = TRUE;
26039 arm_comment_chars[0] = ';';
26040 arm_line_separator_chars[0] = 0;
26041 return TRUE;
26042 }
26043
26044 struct arm_long_option_table arm_long_opts[] =
26045 {
26046 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
26047 arm_parse_cpu, NULL},
26048 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
26049 arm_parse_arch, NULL},
26050 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
26051 arm_parse_fpu, NULL},
26052 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
26053 arm_parse_float_abi, NULL},
26054 #ifdef OBJ_ELF
26055 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
26056 arm_parse_eabi, NULL},
26057 #endif
26058 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
26059 arm_parse_it_mode, NULL},
26060 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
26061 arm_ccs_mode, NULL},
26062 {NULL, NULL, 0, NULL}
26063 };
26064
26065 int
26066 md_parse_option (int c, const char * arg)
26067 {
26068 struct arm_option_table *opt;
26069 const struct arm_legacy_option_table *fopt;
26070 struct arm_long_option_table *lopt;
26071
26072 switch (c)
26073 {
26074 #ifdef OPTION_EB
26075 case OPTION_EB:
26076 target_big_endian = 1;
26077 break;
26078 #endif
26079
26080 #ifdef OPTION_EL
26081 case OPTION_EL:
26082 target_big_endian = 0;
26083 break;
26084 #endif
26085
26086 case OPTION_FIX_V4BX:
26087 fix_v4bx = TRUE;
26088 break;
26089
26090 case 'a':
26091 /* Listing option. Just ignore these, we don't support additional
26092 ones. */
26093 return 0;
26094
26095 default:
26096 for (opt = arm_opts; opt->option != NULL; opt++)
26097 {
26098 if (c == opt->option[0]
26099 && ((arg == NULL && opt->option[1] == 0)
26100 || streq (arg, opt->option + 1)))
26101 {
26102 /* If the option is deprecated, tell the user. */
26103 if (warn_on_deprecated && opt->deprecated != NULL)
26104 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
26105 arg ? arg : "", _(opt->deprecated));
26106
26107 if (opt->var != NULL)
26108 *opt->var = opt->value;
26109
26110 return 1;
26111 }
26112 }
26113
26114 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
26115 {
26116 if (c == fopt->option[0]
26117 && ((arg == NULL && fopt->option[1] == 0)
26118 || streq (arg, fopt->option + 1)))
26119 {
26120 /* If the option is deprecated, tell the user. */
26121 if (warn_on_deprecated && fopt->deprecated != NULL)
26122 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
26123 arg ? arg : "", _(fopt->deprecated));
26124
26125 if (fopt->var != NULL)
26126 *fopt->var = &fopt->value;
26127
26128 return 1;
26129 }
26130 }
26131
26132 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26133 {
26134 /* These options are expected to have an argument. */
26135 if (c == lopt->option[0]
26136 && arg != NULL
26137 && strncmp (arg, lopt->option + 1,
26138 strlen (lopt->option + 1)) == 0)
26139 {
26140 /* If the option is deprecated, tell the user. */
26141 if (warn_on_deprecated && lopt->deprecated != NULL)
26142 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
26143 _(lopt->deprecated));
26144
26145 /* Call the sup-option parser. */
26146 return lopt->func (arg + strlen (lopt->option) - 1);
26147 }
26148 }
26149
26150 return 0;
26151 }
26152
26153 return 1;
26154 }
26155
26156 void
26157 md_show_usage (FILE * fp)
26158 {
26159 struct arm_option_table *opt;
26160 struct arm_long_option_table *lopt;
26161
26162 fprintf (fp, _(" ARM-specific assembler options:\n"));
26163
26164 for (opt = arm_opts; opt->option != NULL; opt++)
26165 if (opt->help != NULL)
26166 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
26167
26168 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26169 if (lopt->help != NULL)
26170 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
26171
26172 #ifdef OPTION_EB
26173 fprintf (fp, _("\
26174 -EB assemble code for a big-endian cpu\n"));
26175 #endif
26176
26177 #ifdef OPTION_EL
26178 fprintf (fp, _("\
26179 -EL assemble code for a little-endian cpu\n"));
26180 #endif
26181
26182 fprintf (fp, _("\
26183 --fix-v4bx Allow BX in ARMv4 code\n"));
26184 }
26185
26186
26187 #ifdef OBJ_ELF
26188 typedef struct
26189 {
26190 int val;
26191 arm_feature_set flags;
26192 } cpu_arch_ver_table;
26193
26194 /* Mapping from CPU features to EABI CPU arch values. As a general rule, table
26195 must be sorted least features first but some reordering is needed, eg. for
26196 Thumb-2 instructions to be detected as coming from ARMv6T2. */
26197 static const cpu_arch_ver_table cpu_arch_ver[] =
26198 {
26199 {1, ARM_ARCH_V4},
26200 {2, ARM_ARCH_V4T},
26201 {3, ARM_ARCH_V5},
26202 {3, ARM_ARCH_V5T},
26203 {4, ARM_ARCH_V5TE},
26204 {5, ARM_ARCH_V5TEJ},
26205 {6, ARM_ARCH_V6},
26206 {9, ARM_ARCH_V6K},
26207 {7, ARM_ARCH_V6Z},
26208 {11, ARM_ARCH_V6M},
26209 {12, ARM_ARCH_V6SM},
26210 {8, ARM_ARCH_V6T2},
26211 {10, ARM_ARCH_V7VE},
26212 {10, ARM_ARCH_V7R},
26213 {10, ARM_ARCH_V7M},
26214 {14, ARM_ARCH_V8A},
26215 {16, ARM_ARCH_V8M_BASE},
26216 {17, ARM_ARCH_V8M_MAIN},
26217 {0, ARM_ARCH_NONE}
26218 };
26219
26220 /* Set an attribute if it has not already been set by the user. */
26221 static void
26222 aeabi_set_attribute_int (int tag, int value)
26223 {
26224 if (tag < 1
26225 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
26226 || !attributes_set_explicitly[tag])
26227 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
26228 }
26229
26230 static void
26231 aeabi_set_attribute_string (int tag, const char *value)
26232 {
26233 if (tag < 1
26234 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
26235 || !attributes_set_explicitly[tag])
26236 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
26237 }
26238
26239 /* Set the public EABI object attributes. */
26240 void
26241 aeabi_set_public_attributes (void)
26242 {
26243 int arch;
26244 char profile;
26245 int virt_sec = 0;
26246 int fp16_optional = 0;
26247 arm_feature_set arm_arch = ARM_ARCH_NONE;
26248 arm_feature_set flags;
26249 arm_feature_set tmp;
26250 arm_feature_set arm_arch_v8m_base = ARM_ARCH_V8M_BASE;
26251 const cpu_arch_ver_table *p;
26252
26253 /* Choose the architecture based on the capabilities of the requested cpu
26254 (if any) and/or the instructions actually used. */
26255 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
26256 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
26257 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
26258
26259 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
26260 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
26261
26262 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
26263 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
26264
26265 selected_cpu = flags;
26266
26267 /* Allow the user to override the reported architecture. */
26268 if (object_arch)
26269 {
26270 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
26271 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
26272 }
26273
26274 /* We need to make sure that the attributes do not identify us as v6S-M
26275 when the only v6S-M feature in use is the Operating System Extensions. */
26276 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
26277 if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
26278 ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
26279
26280 tmp = flags;
26281 arch = 0;
26282 for (p = cpu_arch_ver; p->val; p++)
26283 {
26284 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
26285 {
26286 arch = p->val;
26287 arm_arch = p->flags;
26288 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
26289 }
26290 }
26291
26292 /* The table lookup above finds the last architecture to contribute
26293 a new feature. Unfortunately, Tag13 is a subset of the union of
26294 v6T2 and v7-M, so it is never seen as contributing a new feature.
26295 We can not search for the last entry which is entirely used,
26296 because if no CPU is specified we build up only those flags
26297 actually used. Perhaps we should separate out the specified
26298 and implicit cases. Avoid taking this path for -march=all by
26299 checking for contradictory v7-A / v7-M features. */
26300 if (arch == TAG_CPU_ARCH_V7
26301 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
26302 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
26303 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
26304 {
26305 arch = TAG_CPU_ARCH_V7E_M;
26306 arm_arch = (arm_feature_set) ARM_ARCH_V7EM;
26307 }
26308
26309 ARM_CLEAR_FEATURE (tmp, flags, arm_arch_v8m_base);
26310 if (arch == TAG_CPU_ARCH_V8M_BASE && ARM_CPU_HAS_FEATURE (tmp, arm_arch_any))
26311 {
26312 arch = TAG_CPU_ARCH_V8M_MAIN;
26313 arm_arch = (arm_feature_set) ARM_ARCH_V8M_MAIN;
26314 }
26315
26316 /* In cpu_arch_ver ARMv8-A is before ARMv8-M for atomics to be detected as
26317 coming from ARMv8-A. However, since ARMv8-A has more instructions than
26318 ARMv8-M, -march=all must be detected as ARMv8-A. */
26319 if (arch == TAG_CPU_ARCH_V8M_MAIN
26320 && ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
26321 {
26322 arch = TAG_CPU_ARCH_V8;
26323 arm_arch = (arm_feature_set) ARM_ARCH_V8A;
26324 }
26325
26326 /* Tag_CPU_name. */
26327 if (selected_cpu_name[0])
26328 {
26329 char *q;
26330
26331 q = selected_cpu_name;
26332 if (strncmp (q, "armv", 4) == 0)
26333 {
26334 int i;
26335
26336 q += 4;
26337 for (i = 0; q[i]; i++)
26338 q[i] = TOUPPER (q[i]);
26339 }
26340 aeabi_set_attribute_string (Tag_CPU_name, q);
26341 }
26342
26343 /* Tag_CPU_arch. */
26344 aeabi_set_attribute_int (Tag_CPU_arch, arch);
26345
26346 /* Tag_CPU_arch_profile. */
26347 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
26348 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
26349 || (ARM_CPU_HAS_FEATURE (flags, arm_ext_atomics)
26350 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only)))
26351 profile = 'A';
26352 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
26353 profile = 'R';
26354 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
26355 profile = 'M';
26356 else
26357 profile = '\0';
26358
26359 if (profile != '\0')
26360 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
26361
26362 /* Tag_DSP_extension. */
26363 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_dsp))
26364 {
26365 arm_feature_set ext;
26366
26367 /* DSP instructions not in architecture. */
26368 ARM_CLEAR_FEATURE (ext, flags, arm_arch);
26369 if (ARM_CPU_HAS_FEATURE (ext, arm_ext_dsp))
26370 aeabi_set_attribute_int (Tag_DSP_extension, 1);
26371 }
26372
26373 /* Tag_ARM_ISA_use. */
26374 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
26375 || arch == 0)
26376 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
26377
26378 /* Tag_THUMB_ISA_use. */
26379 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
26380 || arch == 0)
26381 {
26382 int thumb_isa_use;
26383
26384 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
26385 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
26386 thumb_isa_use = 3;
26387 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
26388 thumb_isa_use = 2;
26389 else
26390 thumb_isa_use = 1;
26391 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
26392 }
26393
26394 /* Tag_VFP_arch. */
26395 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
26396 aeabi_set_attribute_int (Tag_VFP_arch,
26397 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
26398 ? 7 : 8);
26399 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
26400 aeabi_set_attribute_int (Tag_VFP_arch,
26401 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
26402 ? 5 : 6);
26403 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
26404 {
26405 fp16_optional = 1;
26406 aeabi_set_attribute_int (Tag_VFP_arch, 3);
26407 }
26408 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
26409 {
26410 aeabi_set_attribute_int (Tag_VFP_arch, 4);
26411 fp16_optional = 1;
26412 }
26413 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
26414 aeabi_set_attribute_int (Tag_VFP_arch, 2);
26415 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
26416 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
26417 aeabi_set_attribute_int (Tag_VFP_arch, 1);
26418
26419 /* Tag_ABI_HardFP_use. */
26420 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
26421 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
26422 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
26423
26424 /* Tag_WMMX_arch. */
26425 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
26426 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
26427 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
26428 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
26429
26430 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
26431 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
26432 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
26433 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
26434 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
26435 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
26436 {
26437 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
26438 {
26439 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
26440 }
26441 else
26442 {
26443 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
26444 fp16_optional = 1;
26445 }
26446 }
26447
26448 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
26449 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
26450 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
26451
26452 /* Tag_DIV_use.
26453
26454 We set Tag_DIV_use to two when integer divide instructions have been used
26455 in ARM state, or when Thumb integer divide instructions have been used,
26456 but we have no architecture profile set, nor have we any ARM instructions.
26457
26458 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
26459 by the base architecture.
26460
26461 For new architectures we will have to check these tests. */
26462 gas_assert (arch <= TAG_CPU_ARCH_V8
26463 || (arch >= TAG_CPU_ARCH_V8M_BASE
26464 && arch <= TAG_CPU_ARCH_V8M_MAIN));
26465 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
26466 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
26467 aeabi_set_attribute_int (Tag_DIV_use, 0);
26468 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
26469 || (profile == '\0'
26470 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
26471 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
26472 aeabi_set_attribute_int (Tag_DIV_use, 2);
26473
26474 /* Tag_MP_extension_use. */
26475 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
26476 aeabi_set_attribute_int (Tag_MPextension_use, 1);
26477
26478 /* Tag Virtualization_use. */
26479 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
26480 virt_sec |= 1;
26481 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
26482 virt_sec |= 2;
26483 if (virt_sec != 0)
26484 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
26485 }
26486
26487 /* Add the default contents for the .ARM.attributes section. */
26488 void
26489 arm_md_end (void)
26490 {
26491 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
26492 return;
26493
26494 aeabi_set_public_attributes ();
26495 }
26496 #endif /* OBJ_ELF */
26497
26498
26499 /* Parse a .cpu directive. */
26500
26501 static void
26502 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
26503 {
26504 const struct arm_cpu_option_table *opt;
26505 char *name;
26506 char saved_char;
26507
26508 name = input_line_pointer;
26509 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26510 input_line_pointer++;
26511 saved_char = *input_line_pointer;
26512 *input_line_pointer = 0;
26513
26514 /* Skip the first "all" entry. */
26515 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
26516 if (streq (opt->name, name))
26517 {
26518 mcpu_cpu_opt = &opt->value;
26519 selected_cpu = opt->value;
26520 if (opt->canonical_name)
26521 strcpy (selected_cpu_name, opt->canonical_name);
26522 else
26523 {
26524 int i;
26525 for (i = 0; opt->name[i]; i++)
26526 selected_cpu_name[i] = TOUPPER (opt->name[i]);
26527
26528 selected_cpu_name[i] = 0;
26529 }
26530 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26531 *input_line_pointer = saved_char;
26532 demand_empty_rest_of_line ();
26533 return;
26534 }
26535 as_bad (_("unknown cpu `%s'"), name);
26536 *input_line_pointer = saved_char;
26537 ignore_rest_of_line ();
26538 }
26539
26540
26541 /* Parse a .arch directive. */
26542
26543 static void
26544 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
26545 {
26546 const struct arm_arch_option_table *opt;
26547 char saved_char;
26548 char *name;
26549
26550 name = input_line_pointer;
26551 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26552 input_line_pointer++;
26553 saved_char = *input_line_pointer;
26554 *input_line_pointer = 0;
26555
26556 /* Skip the first "all" entry. */
26557 for (opt = arm_archs + 1; opt->name != NULL; opt++)
26558 if (streq (opt->name, name))
26559 {
26560 mcpu_cpu_opt = &opt->value;
26561 selected_cpu = opt->value;
26562 strcpy (selected_cpu_name, opt->name);
26563 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26564 *input_line_pointer = saved_char;
26565 demand_empty_rest_of_line ();
26566 return;
26567 }
26568
26569 as_bad (_("unknown architecture `%s'\n"), name);
26570 *input_line_pointer = saved_char;
26571 ignore_rest_of_line ();
26572 }
26573
26574
26575 /* Parse a .object_arch directive. */
26576
26577 static void
26578 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
26579 {
26580 const struct arm_arch_option_table *opt;
26581 char saved_char;
26582 char *name;
26583
26584 name = input_line_pointer;
26585 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26586 input_line_pointer++;
26587 saved_char = *input_line_pointer;
26588 *input_line_pointer = 0;
26589
26590 /* Skip the first "all" entry. */
26591 for (opt = arm_archs + 1; opt->name != NULL; opt++)
26592 if (streq (opt->name, name))
26593 {
26594 object_arch = &opt->value;
26595 *input_line_pointer = saved_char;
26596 demand_empty_rest_of_line ();
26597 return;
26598 }
26599
26600 as_bad (_("unknown architecture `%s'\n"), name);
26601 *input_line_pointer = saved_char;
26602 ignore_rest_of_line ();
26603 }
26604
26605 /* Parse a .arch_extension directive. */
26606
26607 static void
26608 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
26609 {
26610 const struct arm_option_extension_value_table *opt;
26611 const arm_feature_set arm_any = ARM_ANY;
26612 char saved_char;
26613 char *name;
26614 int adding_value = 1;
26615
26616 name = input_line_pointer;
26617 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26618 input_line_pointer++;
26619 saved_char = *input_line_pointer;
26620 *input_line_pointer = 0;
26621
26622 if (strlen (name) >= 2
26623 && strncmp (name, "no", 2) == 0)
26624 {
26625 adding_value = 0;
26626 name += 2;
26627 }
26628
26629 for (opt = arm_extensions; opt->name != NULL; opt++)
26630 if (streq (opt->name, name))
26631 {
26632 int i, nb_allowed_archs =
26633 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
26634 for (i = 0; i < nb_allowed_archs; i++)
26635 {
26636 /* Empty entry. */
26637 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
26638 continue;
26639 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *mcpu_cpu_opt))
26640 break;
26641 }
26642
26643 if (i == nb_allowed_archs)
26644 {
26645 as_bad (_("architectural extension `%s' is not allowed for the "
26646 "current base architecture"), name);
26647 break;
26648 }
26649
26650 if (adding_value)
26651 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu,
26652 opt->merge_value);
26653 else
26654 ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->clear_value);
26655
26656 mcpu_cpu_opt = &selected_cpu;
26657 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26658 *input_line_pointer = saved_char;
26659 demand_empty_rest_of_line ();
26660 return;
26661 }
26662
26663 if (opt->name == NULL)
26664 as_bad (_("unknown architecture extension `%s'\n"), name);
26665
26666 *input_line_pointer = saved_char;
26667 ignore_rest_of_line ();
26668 }
26669
26670 /* Parse a .fpu directive. */
26671
26672 static void
26673 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
26674 {
26675 const struct arm_option_fpu_value_table *opt;
26676 char saved_char;
26677 char *name;
26678
26679 name = input_line_pointer;
26680 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26681 input_line_pointer++;
26682 saved_char = *input_line_pointer;
26683 *input_line_pointer = 0;
26684
26685 for (opt = arm_fpus; opt->name != NULL; opt++)
26686 if (streq (opt->name, name))
26687 {
26688 mfpu_opt = &opt->value;
26689 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26690 *input_line_pointer = saved_char;
26691 demand_empty_rest_of_line ();
26692 return;
26693 }
26694
26695 as_bad (_("unknown floating point format `%s'\n"), name);
26696 *input_line_pointer = saved_char;
26697 ignore_rest_of_line ();
26698 }
26699
26700 /* Copy symbol information. */
26701
26702 void
26703 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
26704 {
26705 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
26706 }
26707
26708 #ifdef OBJ_ELF
26709 /* Given a symbolic attribute NAME, return the proper integer value.
26710 Returns -1 if the attribute is not known. */
26711
26712 int
26713 arm_convert_symbolic_attribute (const char *name)
26714 {
26715 static const struct
26716 {
26717 const char * name;
26718 const int tag;
26719 }
26720 attribute_table[] =
26721 {
26722 /* When you modify this table you should
26723 also modify the list in doc/c-arm.texi. */
26724 #define T(tag) {#tag, tag}
26725 T (Tag_CPU_raw_name),
26726 T (Tag_CPU_name),
26727 T (Tag_CPU_arch),
26728 T (Tag_CPU_arch_profile),
26729 T (Tag_ARM_ISA_use),
26730 T (Tag_THUMB_ISA_use),
26731 T (Tag_FP_arch),
26732 T (Tag_VFP_arch),
26733 T (Tag_WMMX_arch),
26734 T (Tag_Advanced_SIMD_arch),
26735 T (Tag_PCS_config),
26736 T (Tag_ABI_PCS_R9_use),
26737 T (Tag_ABI_PCS_RW_data),
26738 T (Tag_ABI_PCS_RO_data),
26739 T (Tag_ABI_PCS_GOT_use),
26740 T (Tag_ABI_PCS_wchar_t),
26741 T (Tag_ABI_FP_rounding),
26742 T (Tag_ABI_FP_denormal),
26743 T (Tag_ABI_FP_exceptions),
26744 T (Tag_ABI_FP_user_exceptions),
26745 T (Tag_ABI_FP_number_model),
26746 T (Tag_ABI_align_needed),
26747 T (Tag_ABI_align8_needed),
26748 T (Tag_ABI_align_preserved),
26749 T (Tag_ABI_align8_preserved),
26750 T (Tag_ABI_enum_size),
26751 T (Tag_ABI_HardFP_use),
26752 T (Tag_ABI_VFP_args),
26753 T (Tag_ABI_WMMX_args),
26754 T (Tag_ABI_optimization_goals),
26755 T (Tag_ABI_FP_optimization_goals),
26756 T (Tag_compatibility),
26757 T (Tag_CPU_unaligned_access),
26758 T (Tag_FP_HP_extension),
26759 T (Tag_VFP_HP_extension),
26760 T (Tag_ABI_FP_16bit_format),
26761 T (Tag_MPextension_use),
26762 T (Tag_DIV_use),
26763 T (Tag_nodefaults),
26764 T (Tag_also_compatible_with),
26765 T (Tag_conformance),
26766 T (Tag_T2EE_use),
26767 T (Tag_Virtualization_use),
26768 T (Tag_DSP_extension),
26769 /* We deliberately do not include Tag_MPextension_use_legacy. */
26770 #undef T
26771 };
26772 unsigned int i;
26773
26774 if (name == NULL)
26775 return -1;
26776
26777 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
26778 if (streq (name, attribute_table[i].name))
26779 return attribute_table[i].tag;
26780
26781 return -1;
26782 }
26783
26784
26785 /* Apply sym value for relocations only in the case that they are for
26786 local symbols in the same segment as the fixup and you have the
26787 respective architectural feature for blx and simple switches. */
26788 int
26789 arm_apply_sym_value (struct fix * fixP, segT this_seg)
26790 {
26791 if (fixP->fx_addsy
26792 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
26793 /* PR 17444: If the local symbol is in a different section then a reloc
26794 will always be generated for it, so applying the symbol value now
26795 will result in a double offset being stored in the relocation. */
26796 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
26797 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
26798 {
26799 switch (fixP->fx_r_type)
26800 {
26801 case BFD_RELOC_ARM_PCREL_BLX:
26802 case BFD_RELOC_THUMB_PCREL_BRANCH23:
26803 if (ARM_IS_FUNC (fixP->fx_addsy))
26804 return 1;
26805 break;
26806
26807 case BFD_RELOC_ARM_PCREL_CALL:
26808 case BFD_RELOC_THUMB_PCREL_BLX:
26809 if (THUMB_IS_FUNC (fixP->fx_addsy))
26810 return 1;
26811 break;
26812
26813 default:
26814 break;
26815 }
26816
26817 }
26818 return 0;
26819 }
26820 #endif /* OBJ_ELF */
This page took 0.721412 seconds and 5 git commands to generate.