[ARM] Add ARMv8.3 command line option and feature flag
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2016 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 #endif /* OBJ_ELF */
79
80 /* Results from operand parsing worker functions. */
81
82 typedef enum
83 {
84 PARSE_OPERAND_SUCCESS,
85 PARSE_OPERAND_FAIL,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result;
88
89 enum arm_float_abi
90 {
91 ARM_FLOAT_ABI_HARD,
92 ARM_FLOAT_ABI_SOFTFP,
93 ARM_FLOAT_ABI_SOFT
94 };
95
96 /* Types of processor to assemble for. */
97 #ifndef CPU_DEFAULT
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
101
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
104 #endif
105
106 #ifndef FPU_DEFAULT
107 # ifdef TE_LINUX
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
110 # ifdef OBJ_ELF
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
112 # else
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
115 # endif
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
118 # else
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
121 # endif
122 #endif /* ifndef FPU_DEFAULT */
123
124 #define streq(a, b) (strcmp (a, b) == 0)
125
126 static arm_feature_set cpu_variant;
127 static arm_feature_set arm_arch_used;
128 static arm_feature_set thumb_arch_used;
129
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26 = FALSE;
132 static int atpcs = FALSE;
133 static int support_interwork = FALSE;
134 static int uses_apcs_float = FALSE;
135 static int pic_code = FALSE;
136 static int fix_v4bx = FALSE;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated = TRUE;
139
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax = FALSE;
142
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
145 assembly flags. */
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
148
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
154 static const arm_feature_set *object_arch = NULL;
155
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default = FPU_DEFAULT;
158 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
159 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
161 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
162 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164 #ifdef OBJ_ELF
165 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
166 #endif
167 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
168
169 #ifdef CPU_DEFAULT
170 static const arm_feature_set cpu_default = CPU_DEFAULT;
171 #endif
172
173 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
174 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
175 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
176 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
177 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
178 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
179 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
180 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
181 static const arm_feature_set arm_ext_v4t_5 =
182 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
183 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
184 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
185 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
186 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
187 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
188 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
189 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
190 static const arm_feature_set arm_ext_v6m = ARM_FEATURE_CORE_LOW (ARM_EXT_V6M);
191 static const arm_feature_set arm_ext_v6_notm =
192 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
193 static const arm_feature_set arm_ext_v6_dsp =
194 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
195 static const arm_feature_set arm_ext_barrier =
196 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
197 static const arm_feature_set arm_ext_msr =
198 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
199 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
200 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
201 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
202 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
203 #ifdef OBJ_ELF
204 static const arm_feature_set arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
205 #endif
206 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
207 static const arm_feature_set arm_ext_m =
208 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M,
209 ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
210 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
211 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
212 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
213 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
214 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
215 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
216 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
217 static const arm_feature_set arm_ext_v8m_main =
218 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
219 /* Instructions in ARMv8-M only found in M profile architectures. */
220 static const arm_feature_set arm_ext_v8m_m_only =
221 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
222 static const arm_feature_set arm_ext_v6t2_v8m =
223 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
224 /* Instructions shared between ARMv8-A and ARMv8-M. */
225 static const arm_feature_set arm_ext_atomics =
226 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
227 #ifdef OBJ_ELF
228 /* DSP instructions Tag_DSP_extension refers to. */
229 static const arm_feature_set arm_ext_dsp =
230 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
231 #endif
232 static const arm_feature_set arm_ext_ras =
233 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
234 /* FP16 instructions. */
235 static const arm_feature_set arm_ext_fp16 =
236 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
237
238 static const arm_feature_set arm_arch_any = ARM_ANY;
239 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
240 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
241 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
242 #ifdef OBJ_ELF
243 static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
244 #endif
245
246 static const arm_feature_set arm_cext_iwmmxt2 =
247 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
248 static const arm_feature_set arm_cext_iwmmxt =
249 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
250 static const arm_feature_set arm_cext_xscale =
251 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
252 static const arm_feature_set arm_cext_maverick =
253 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
254 static const arm_feature_set fpu_fpa_ext_v1 =
255 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
256 static const arm_feature_set fpu_fpa_ext_v2 =
257 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
258 static const arm_feature_set fpu_vfp_ext_v1xd =
259 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
260 static const arm_feature_set fpu_vfp_ext_v1 =
261 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
262 static const arm_feature_set fpu_vfp_ext_v2 =
263 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
264 static const arm_feature_set fpu_vfp_ext_v3xd =
265 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
266 static const arm_feature_set fpu_vfp_ext_v3 =
267 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
268 static const arm_feature_set fpu_vfp_ext_d32 =
269 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
270 static const arm_feature_set fpu_neon_ext_v1 =
271 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
272 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
273 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
274 #ifdef OBJ_ELF
275 static const arm_feature_set fpu_vfp_fp16 =
276 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
277 static const arm_feature_set fpu_neon_ext_fma =
278 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
279 #endif
280 static const arm_feature_set fpu_vfp_ext_fma =
281 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
282 static const arm_feature_set fpu_vfp_ext_armv8 =
283 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
284 static const arm_feature_set fpu_vfp_ext_armv8xd =
285 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
286 static const arm_feature_set fpu_neon_ext_armv8 =
287 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
288 static const arm_feature_set fpu_crypto_ext_armv8 =
289 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
290 static const arm_feature_set crc_ext_armv8 =
291 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
292 static const arm_feature_set fpu_neon_ext_v8_1 =
293 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
294
295 static int mfloat_abi_opt = -1;
296 /* Record user cpu selection for object attributes. */
297 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
298 /* Must be long enough to hold any of the names in arm_cpus. */
299 static char selected_cpu_name[20];
300
301 extern FLONUM_TYPE generic_floating_point_number;
302
303 /* Return if no cpu was selected on command-line. */
304 static bfd_boolean
305 no_cpu_selected (void)
306 {
307 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
308 }
309
310 #ifdef OBJ_ELF
311 # ifdef EABI_DEFAULT
312 static int meabi_flags = EABI_DEFAULT;
313 # else
314 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
315 # endif
316
317 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
318
319 bfd_boolean
320 arm_is_eabi (void)
321 {
322 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
323 }
324 #endif
325
326 #ifdef OBJ_ELF
327 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
328 symbolS * GOT_symbol;
329 #endif
330
331 /* 0: assemble for ARM,
332 1: assemble for Thumb,
333 2: assemble for Thumb even though target CPU does not support thumb
334 instructions. */
335 static int thumb_mode = 0;
336 /* A value distinct from the possible values for thumb_mode that we
337 can use to record whether thumb_mode has been copied into the
338 tc_frag_data field of a frag. */
339 #define MODE_RECORDED (1 << 4)
340
341 /* Specifies the intrinsic IT insn behavior mode. */
342 enum implicit_it_mode
343 {
344 IMPLICIT_IT_MODE_NEVER = 0x00,
345 IMPLICIT_IT_MODE_ARM = 0x01,
346 IMPLICIT_IT_MODE_THUMB = 0x02,
347 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
348 };
349 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
350
351 /* If unified_syntax is true, we are processing the new unified
352 ARM/Thumb syntax. Important differences from the old ARM mode:
353
354 - Immediate operands do not require a # prefix.
355 - Conditional affixes always appear at the end of the
356 instruction. (For backward compatibility, those instructions
357 that formerly had them in the middle, continue to accept them
358 there.)
359 - The IT instruction may appear, and if it does is validated
360 against subsequent conditional affixes. It does not generate
361 machine code.
362
363 Important differences from the old Thumb mode:
364
365 - Immediate operands do not require a # prefix.
366 - Most of the V6T2 instructions are only available in unified mode.
367 - The .N and .W suffixes are recognized and honored (it is an error
368 if they cannot be honored).
369 - All instructions set the flags if and only if they have an 's' affix.
370 - Conditional affixes may be used. They are validated against
371 preceding IT instructions. Unlike ARM mode, you cannot use a
372 conditional affix except in the scope of an IT instruction. */
373
374 static bfd_boolean unified_syntax = FALSE;
375
376 /* An immediate operand can start with #, and ld*, st*, pld operands
377 can contain [ and ]. We need to tell APP not to elide whitespace
378 before a [, which can appear as the first operand for pld.
379 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
380 const char arm_symbol_chars[] = "#[]{}";
381
382 enum neon_el_type
383 {
384 NT_invtype,
385 NT_untyped,
386 NT_integer,
387 NT_float,
388 NT_poly,
389 NT_signed,
390 NT_unsigned
391 };
392
393 struct neon_type_el
394 {
395 enum neon_el_type type;
396 unsigned size;
397 };
398
399 #define NEON_MAX_TYPE_ELS 4
400
401 struct neon_type
402 {
403 struct neon_type_el el[NEON_MAX_TYPE_ELS];
404 unsigned elems;
405 };
406
407 enum it_instruction_type
408 {
409 OUTSIDE_IT_INSN,
410 INSIDE_IT_INSN,
411 INSIDE_IT_LAST_INSN,
412 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
413 if inside, should be the last one. */
414 NEUTRAL_IT_INSN, /* This could be either inside or outside,
415 i.e. BKPT and NOP. */
416 IT_INSN /* The IT insn has been parsed. */
417 };
418
419 /* The maximum number of operands we need. */
420 #define ARM_IT_MAX_OPERANDS 6
421
422 struct arm_it
423 {
424 const char * error;
425 unsigned long instruction;
426 int size;
427 int size_req;
428 int cond;
429 /* "uncond_value" is set to the value in place of the conditional field in
430 unconditional versions of the instruction, or -1 if nothing is
431 appropriate. */
432 int uncond_value;
433 struct neon_type vectype;
434 /* This does not indicate an actual NEON instruction, only that
435 the mnemonic accepts neon-style type suffixes. */
436 int is_neon;
437 /* Set to the opcode if the instruction needs relaxation.
438 Zero if the instruction is not relaxed. */
439 unsigned long relax;
440 struct
441 {
442 bfd_reloc_code_real_type type;
443 expressionS exp;
444 int pc_rel;
445 } reloc;
446
447 enum it_instruction_type it_insn_type;
448
449 struct
450 {
451 unsigned reg;
452 signed int imm;
453 struct neon_type_el vectype;
454 unsigned present : 1; /* Operand present. */
455 unsigned isreg : 1; /* Operand was a register. */
456 unsigned immisreg : 1; /* .imm field is a second register. */
457 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
458 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
459 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
460 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
461 instructions. This allows us to disambiguate ARM <-> vector insns. */
462 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
463 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
464 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
465 unsigned issingle : 1; /* Operand is VFP single-precision register. */
466 unsigned hasreloc : 1; /* Operand has relocation suffix. */
467 unsigned writeback : 1; /* Operand has trailing ! */
468 unsigned preind : 1; /* Preindexed address. */
469 unsigned postind : 1; /* Postindexed address. */
470 unsigned negative : 1; /* Index register was negated. */
471 unsigned shifted : 1; /* Shift applied to operation. */
472 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
473 } operands[ARM_IT_MAX_OPERANDS];
474 };
475
476 static struct arm_it inst;
477
478 #define NUM_FLOAT_VALS 8
479
480 const char * fp_const[] =
481 {
482 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
483 };
484
485 /* Number of littlenums required to hold an extended precision number. */
486 #define MAX_LITTLENUMS 6
487
488 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
489
490 #define FAIL (-1)
491 #define SUCCESS (0)
492
493 #define SUFF_S 1
494 #define SUFF_D 2
495 #define SUFF_E 3
496 #define SUFF_P 4
497
498 #define CP_T_X 0x00008000
499 #define CP_T_Y 0x00400000
500
501 #define CONDS_BIT 0x00100000
502 #define LOAD_BIT 0x00100000
503
504 #define DOUBLE_LOAD_FLAG 0x00000001
505
506 struct asm_cond
507 {
508 const char * template_name;
509 unsigned long value;
510 };
511
512 #define COND_ALWAYS 0xE
513
514 struct asm_psr
515 {
516 const char * template_name;
517 unsigned long field;
518 };
519
520 struct asm_barrier_opt
521 {
522 const char * template_name;
523 unsigned long value;
524 const arm_feature_set arch;
525 };
526
527 /* The bit that distinguishes CPSR and SPSR. */
528 #define SPSR_BIT (1 << 22)
529
530 /* The individual PSR flag bits. */
531 #define PSR_c (1 << 16)
532 #define PSR_x (1 << 17)
533 #define PSR_s (1 << 18)
534 #define PSR_f (1 << 19)
535
536 struct reloc_entry
537 {
538 const char * name;
539 bfd_reloc_code_real_type reloc;
540 };
541
542 enum vfp_reg_pos
543 {
544 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
545 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
546 };
547
548 enum vfp_ldstm_type
549 {
550 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
551 };
552
553 /* Bits for DEFINED field in neon_typed_alias. */
554 #define NTA_HASTYPE 1
555 #define NTA_HASINDEX 2
556
557 struct neon_typed_alias
558 {
559 unsigned char defined;
560 unsigned char index;
561 struct neon_type_el eltype;
562 };
563
564 /* ARM register categories. This includes coprocessor numbers and various
565 architecture extensions' registers. */
566 enum arm_reg_type
567 {
568 REG_TYPE_RN,
569 REG_TYPE_CP,
570 REG_TYPE_CN,
571 REG_TYPE_FN,
572 REG_TYPE_VFS,
573 REG_TYPE_VFD,
574 REG_TYPE_NQ,
575 REG_TYPE_VFSD,
576 REG_TYPE_NDQ,
577 REG_TYPE_NSDQ,
578 REG_TYPE_VFC,
579 REG_TYPE_MVF,
580 REG_TYPE_MVD,
581 REG_TYPE_MVFX,
582 REG_TYPE_MVDX,
583 REG_TYPE_MVAX,
584 REG_TYPE_DSPSC,
585 REG_TYPE_MMXWR,
586 REG_TYPE_MMXWC,
587 REG_TYPE_MMXWCG,
588 REG_TYPE_XSCALE,
589 REG_TYPE_RNB
590 };
591
592 /* Structure for a hash table entry for a register.
593 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
594 information which states whether a vector type or index is specified (for a
595 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
596 struct reg_entry
597 {
598 const char * name;
599 unsigned int number;
600 unsigned char type;
601 unsigned char builtin;
602 struct neon_typed_alias * neon;
603 };
604
605 /* Diagnostics used when we don't get a register of the expected type. */
606 const char * const reg_expected_msgs[] =
607 {
608 N_("ARM register expected"),
609 N_("bad or missing co-processor number"),
610 N_("co-processor register expected"),
611 N_("FPA register expected"),
612 N_("VFP single precision register expected"),
613 N_("VFP/Neon double precision register expected"),
614 N_("Neon quad precision register expected"),
615 N_("VFP single or double precision register expected"),
616 N_("Neon double or quad precision register expected"),
617 N_("VFP single, double or Neon quad precision register expected"),
618 N_("VFP system register expected"),
619 N_("Maverick MVF register expected"),
620 N_("Maverick MVD register expected"),
621 N_("Maverick MVFX register expected"),
622 N_("Maverick MVDX register expected"),
623 N_("Maverick MVAX register expected"),
624 N_("Maverick DSPSC register expected"),
625 N_("iWMMXt data register expected"),
626 N_("iWMMXt control register expected"),
627 N_("iWMMXt scalar register expected"),
628 N_("XScale accumulator register expected"),
629 };
630
631 /* Some well known registers that we refer to directly elsewhere. */
632 #define REG_R12 12
633 #define REG_SP 13
634 #define REG_LR 14
635 #define REG_PC 15
636
637 /* ARM instructions take 4bytes in the object file, Thumb instructions
638 take 2: */
639 #define INSN_SIZE 4
640
641 struct asm_opcode
642 {
643 /* Basic string to match. */
644 const char * template_name;
645
646 /* Parameters to instruction. */
647 unsigned int operands[8];
648
649 /* Conditional tag - see opcode_lookup. */
650 unsigned int tag : 4;
651
652 /* Basic instruction code. */
653 unsigned int avalue : 28;
654
655 /* Thumb-format instruction code. */
656 unsigned int tvalue;
657
658 /* Which architecture variant provides this instruction. */
659 const arm_feature_set * avariant;
660 const arm_feature_set * tvariant;
661
662 /* Function to call to encode instruction in ARM format. */
663 void (* aencode) (void);
664
665 /* Function to call to encode instruction in Thumb format. */
666 void (* tencode) (void);
667 };
668
669 /* Defines for various bits that we will want to toggle. */
670 #define INST_IMMEDIATE 0x02000000
671 #define OFFSET_REG 0x02000000
672 #define HWOFFSET_IMM 0x00400000
673 #define SHIFT_BY_REG 0x00000010
674 #define PRE_INDEX 0x01000000
675 #define INDEX_UP 0x00800000
676 #define WRITE_BACK 0x00200000
677 #define LDM_TYPE_2_OR_3 0x00400000
678 #define CPSI_MMOD 0x00020000
679
680 #define LITERAL_MASK 0xf000f000
681 #define OPCODE_MASK 0xfe1fffff
682 #define V4_STR_BIT 0x00000020
683 #define VLDR_VMOV_SAME 0x0040f000
684
685 #define T2_SUBS_PC_LR 0xf3de8f00
686
687 #define DATA_OP_SHIFT 21
688 #define SBIT_SHIFT 20
689
690 #define T2_OPCODE_MASK 0xfe1fffff
691 #define T2_DATA_OP_SHIFT 21
692 #define T2_SBIT_SHIFT 20
693
694 #define A_COND_MASK 0xf0000000
695 #define A_PUSH_POP_OP_MASK 0x0fff0000
696
697 /* Opcodes for pushing/poping registers to/from the stack. */
698 #define A1_OPCODE_PUSH 0x092d0000
699 #define A2_OPCODE_PUSH 0x052d0004
700 #define A2_OPCODE_POP 0x049d0004
701
702 /* Codes to distinguish the arithmetic instructions. */
703 #define OPCODE_AND 0
704 #define OPCODE_EOR 1
705 #define OPCODE_SUB 2
706 #define OPCODE_RSB 3
707 #define OPCODE_ADD 4
708 #define OPCODE_ADC 5
709 #define OPCODE_SBC 6
710 #define OPCODE_RSC 7
711 #define OPCODE_TST 8
712 #define OPCODE_TEQ 9
713 #define OPCODE_CMP 10
714 #define OPCODE_CMN 11
715 #define OPCODE_ORR 12
716 #define OPCODE_MOV 13
717 #define OPCODE_BIC 14
718 #define OPCODE_MVN 15
719
720 #define T2_OPCODE_AND 0
721 #define T2_OPCODE_BIC 1
722 #define T2_OPCODE_ORR 2
723 #define T2_OPCODE_ORN 3
724 #define T2_OPCODE_EOR 4
725 #define T2_OPCODE_ADD 8
726 #define T2_OPCODE_ADC 10
727 #define T2_OPCODE_SBC 11
728 #define T2_OPCODE_SUB 13
729 #define T2_OPCODE_RSB 14
730
731 #define T_OPCODE_MUL 0x4340
732 #define T_OPCODE_TST 0x4200
733 #define T_OPCODE_CMN 0x42c0
734 #define T_OPCODE_NEG 0x4240
735 #define T_OPCODE_MVN 0x43c0
736
737 #define T_OPCODE_ADD_R3 0x1800
738 #define T_OPCODE_SUB_R3 0x1a00
739 #define T_OPCODE_ADD_HI 0x4400
740 #define T_OPCODE_ADD_ST 0xb000
741 #define T_OPCODE_SUB_ST 0xb080
742 #define T_OPCODE_ADD_SP 0xa800
743 #define T_OPCODE_ADD_PC 0xa000
744 #define T_OPCODE_ADD_I8 0x3000
745 #define T_OPCODE_SUB_I8 0x3800
746 #define T_OPCODE_ADD_I3 0x1c00
747 #define T_OPCODE_SUB_I3 0x1e00
748
749 #define T_OPCODE_ASR_R 0x4100
750 #define T_OPCODE_LSL_R 0x4080
751 #define T_OPCODE_LSR_R 0x40c0
752 #define T_OPCODE_ROR_R 0x41c0
753 #define T_OPCODE_ASR_I 0x1000
754 #define T_OPCODE_LSL_I 0x0000
755 #define T_OPCODE_LSR_I 0x0800
756
757 #define T_OPCODE_MOV_I8 0x2000
758 #define T_OPCODE_CMP_I8 0x2800
759 #define T_OPCODE_CMP_LR 0x4280
760 #define T_OPCODE_MOV_HR 0x4600
761 #define T_OPCODE_CMP_HR 0x4500
762
763 #define T_OPCODE_LDR_PC 0x4800
764 #define T_OPCODE_LDR_SP 0x9800
765 #define T_OPCODE_STR_SP 0x9000
766 #define T_OPCODE_LDR_IW 0x6800
767 #define T_OPCODE_STR_IW 0x6000
768 #define T_OPCODE_LDR_IH 0x8800
769 #define T_OPCODE_STR_IH 0x8000
770 #define T_OPCODE_LDR_IB 0x7800
771 #define T_OPCODE_STR_IB 0x7000
772 #define T_OPCODE_LDR_RW 0x5800
773 #define T_OPCODE_STR_RW 0x5000
774 #define T_OPCODE_LDR_RH 0x5a00
775 #define T_OPCODE_STR_RH 0x5200
776 #define T_OPCODE_LDR_RB 0x5c00
777 #define T_OPCODE_STR_RB 0x5400
778
779 #define T_OPCODE_PUSH 0xb400
780 #define T_OPCODE_POP 0xbc00
781
782 #define T_OPCODE_BRANCH 0xe000
783
784 #define THUMB_SIZE 2 /* Size of thumb instruction. */
785 #define THUMB_PP_PC_LR 0x0100
786 #define THUMB_LOAD_BIT 0x0800
787 #define THUMB2_LOAD_BIT 0x00100000
788
789 #define BAD_ARGS _("bad arguments to instruction")
790 #define BAD_SP _("r13 not allowed here")
791 #define BAD_PC _("r15 not allowed here")
792 #define BAD_COND _("instruction cannot be conditional")
793 #define BAD_OVERLAP _("registers may not be the same")
794 #define BAD_HIREG _("lo register required")
795 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
796 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
797 #define BAD_BRANCH _("branch must be last instruction in IT block")
798 #define BAD_NOT_IT _("instruction not allowed in IT block")
799 #define BAD_FPU _("selected FPU does not support instruction")
800 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
801 #define BAD_IT_COND _("incorrect condition in IT block")
802 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
803 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
804 #define BAD_PC_ADDRESSING \
805 _("cannot use register index with PC-relative addressing")
806 #define BAD_PC_WRITEBACK \
807 _("cannot use writeback with PC-relative addressing")
808 #define BAD_RANGE _("branch out of range")
809 #define BAD_FP16 _("selected processor does not support fp16 instruction")
810 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
811 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
812
813 static struct hash_control * arm_ops_hsh;
814 static struct hash_control * arm_cond_hsh;
815 static struct hash_control * arm_shift_hsh;
816 static struct hash_control * arm_psr_hsh;
817 static struct hash_control * arm_v7m_psr_hsh;
818 static struct hash_control * arm_reg_hsh;
819 static struct hash_control * arm_reloc_hsh;
820 static struct hash_control * arm_barrier_opt_hsh;
821
822 /* Stuff needed to resolve the label ambiguity
823 As:
824 ...
825 label: <insn>
826 may differ from:
827 ...
828 label:
829 <insn> */
830
831 symbolS * last_label_seen;
832 static int label_is_thumb_function_name = FALSE;
833
834 /* Literal pool structure. Held on a per-section
835 and per-sub-section basis. */
836
837 #define MAX_LITERAL_POOL_SIZE 1024
838 typedef struct literal_pool
839 {
840 expressionS literals [MAX_LITERAL_POOL_SIZE];
841 unsigned int next_free_entry;
842 unsigned int id;
843 symbolS * symbol;
844 segT section;
845 subsegT sub_section;
846 #ifdef OBJ_ELF
847 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
848 #endif
849 struct literal_pool * next;
850 unsigned int alignment;
851 } literal_pool;
852
853 /* Pointer to a linked list of literal pools. */
854 literal_pool * list_of_pools = NULL;
855
856 typedef enum asmfunc_states
857 {
858 OUTSIDE_ASMFUNC,
859 WAITING_ASMFUNC_NAME,
860 WAITING_ENDASMFUNC
861 } asmfunc_states;
862
863 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
864
865 #ifdef OBJ_ELF
866 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
867 #else
868 static struct current_it now_it;
869 #endif
870
871 static inline int
872 now_it_compatible (int cond)
873 {
874 return (cond & ~1) == (now_it.cc & ~1);
875 }
876
877 static inline int
878 conditional_insn (void)
879 {
880 return inst.cond != COND_ALWAYS;
881 }
882
883 static int in_it_block (void);
884
885 static int handle_it_state (void);
886
887 static void force_automatic_it_block_close (void);
888
889 static void it_fsm_post_encode (void);
890
891 #define set_it_insn_type(type) \
892 do \
893 { \
894 inst.it_insn_type = type; \
895 if (handle_it_state () == FAIL) \
896 return; \
897 } \
898 while (0)
899
900 #define set_it_insn_type_nonvoid(type, failret) \
901 do \
902 { \
903 inst.it_insn_type = type; \
904 if (handle_it_state () == FAIL) \
905 return failret; \
906 } \
907 while(0)
908
909 #define set_it_insn_type_last() \
910 do \
911 { \
912 if (inst.cond == COND_ALWAYS) \
913 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
914 else \
915 set_it_insn_type (INSIDE_IT_LAST_INSN); \
916 } \
917 while (0)
918
919 /* Pure syntax. */
920
921 /* This array holds the chars that always start a comment. If the
922 pre-processor is disabled, these aren't very useful. */
923 char arm_comment_chars[] = "@";
924
925 /* This array holds the chars that only start a comment at the beginning of
926 a line. If the line seems to have the form '# 123 filename'
927 .line and .file directives will appear in the pre-processed output. */
928 /* Note that input_file.c hand checks for '#' at the beginning of the
929 first line of the input file. This is because the compiler outputs
930 #NO_APP at the beginning of its output. */
931 /* Also note that comments like this one will always work. */
932 const char line_comment_chars[] = "#";
933
934 char arm_line_separator_chars[] = ";";
935
936 /* Chars that can be used to separate mant
937 from exp in floating point numbers. */
938 const char EXP_CHARS[] = "eE";
939
940 /* Chars that mean this number is a floating point constant. */
941 /* As in 0f12.456 */
942 /* or 0d1.2345e12 */
943
944 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
945
946 /* Prefix characters that indicate the start of an immediate
947 value. */
948 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
949
950 /* Separator character handling. */
951
952 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
953
954 static inline int
955 skip_past_char (char ** str, char c)
956 {
957 /* PR gas/14987: Allow for whitespace before the expected character. */
958 skip_whitespace (*str);
959
960 if (**str == c)
961 {
962 (*str)++;
963 return SUCCESS;
964 }
965 else
966 return FAIL;
967 }
968
969 #define skip_past_comma(str) skip_past_char (str, ',')
970
971 /* Arithmetic expressions (possibly involving symbols). */
972
973 /* Return TRUE if anything in the expression is a bignum. */
974
975 static int
976 walk_no_bignums (symbolS * sp)
977 {
978 if (symbol_get_value_expression (sp)->X_op == O_big)
979 return 1;
980
981 if (symbol_get_value_expression (sp)->X_add_symbol)
982 {
983 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
984 || (symbol_get_value_expression (sp)->X_op_symbol
985 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
986 }
987
988 return 0;
989 }
990
991 static int in_my_get_expression = 0;
992
993 /* Third argument to my_get_expression. */
994 #define GE_NO_PREFIX 0
995 #define GE_IMM_PREFIX 1
996 #define GE_OPT_PREFIX 2
997 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
998 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
999 #define GE_OPT_PREFIX_BIG 3
1000
1001 static int
1002 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1003 {
1004 char * save_in;
1005 segT seg;
1006
1007 /* In unified syntax, all prefixes are optional. */
1008 if (unified_syntax)
1009 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1010 : GE_OPT_PREFIX;
1011
1012 switch (prefix_mode)
1013 {
1014 case GE_NO_PREFIX: break;
1015 case GE_IMM_PREFIX:
1016 if (!is_immediate_prefix (**str))
1017 {
1018 inst.error = _("immediate expression requires a # prefix");
1019 return FAIL;
1020 }
1021 (*str)++;
1022 break;
1023 case GE_OPT_PREFIX:
1024 case GE_OPT_PREFIX_BIG:
1025 if (is_immediate_prefix (**str))
1026 (*str)++;
1027 break;
1028 default: abort ();
1029 }
1030
1031 memset (ep, 0, sizeof (expressionS));
1032
1033 save_in = input_line_pointer;
1034 input_line_pointer = *str;
1035 in_my_get_expression = 1;
1036 seg = expression (ep);
1037 in_my_get_expression = 0;
1038
1039 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1040 {
1041 /* We found a bad or missing expression in md_operand(). */
1042 *str = input_line_pointer;
1043 input_line_pointer = save_in;
1044 if (inst.error == NULL)
1045 inst.error = (ep->X_op == O_absent
1046 ? _("missing expression") :_("bad expression"));
1047 return 1;
1048 }
1049
1050 #ifdef OBJ_AOUT
1051 if (seg != absolute_section
1052 && seg != text_section
1053 && seg != data_section
1054 && seg != bss_section
1055 && seg != undefined_section)
1056 {
1057 inst.error = _("bad segment");
1058 *str = input_line_pointer;
1059 input_line_pointer = save_in;
1060 return 1;
1061 }
1062 #else
1063 (void) seg;
1064 #endif
1065
1066 /* Get rid of any bignums now, so that we don't generate an error for which
1067 we can't establish a line number later on. Big numbers are never valid
1068 in instructions, which is where this routine is always called. */
1069 if (prefix_mode != GE_OPT_PREFIX_BIG
1070 && (ep->X_op == O_big
1071 || (ep->X_add_symbol
1072 && (walk_no_bignums (ep->X_add_symbol)
1073 || (ep->X_op_symbol
1074 && walk_no_bignums (ep->X_op_symbol))))))
1075 {
1076 inst.error = _("invalid constant");
1077 *str = input_line_pointer;
1078 input_line_pointer = save_in;
1079 return 1;
1080 }
1081
1082 *str = input_line_pointer;
1083 input_line_pointer = save_in;
1084 return 0;
1085 }
1086
1087 /* Turn a string in input_line_pointer into a floating point constant
1088 of type TYPE, and store the appropriate bytes in *LITP. The number
1089 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1090 returned, or NULL on OK.
1091
1092 Note that fp constants aren't represent in the normal way on the ARM.
1093 In big endian mode, things are as expected. However, in little endian
1094 mode fp constants are big-endian word-wise, and little-endian byte-wise
1095 within the words. For example, (double) 1.1 in big endian mode is
1096 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1097 the byte sequence 99 99 f1 3f 9a 99 99 99.
1098
1099 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1100
1101 const char *
1102 md_atof (int type, char * litP, int * sizeP)
1103 {
1104 int prec;
1105 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1106 char *t;
1107 int i;
1108
1109 switch (type)
1110 {
1111 case 'f':
1112 case 'F':
1113 case 's':
1114 case 'S':
1115 prec = 2;
1116 break;
1117
1118 case 'd':
1119 case 'D':
1120 case 'r':
1121 case 'R':
1122 prec = 4;
1123 break;
1124
1125 case 'x':
1126 case 'X':
1127 prec = 5;
1128 break;
1129
1130 case 'p':
1131 case 'P':
1132 prec = 5;
1133 break;
1134
1135 default:
1136 *sizeP = 0;
1137 return _("Unrecognized or unsupported floating point constant");
1138 }
1139
1140 t = atof_ieee (input_line_pointer, type, words);
1141 if (t)
1142 input_line_pointer = t;
1143 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1144
1145 if (target_big_endian)
1146 {
1147 for (i = 0; i < prec; i++)
1148 {
1149 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1150 litP += sizeof (LITTLENUM_TYPE);
1151 }
1152 }
1153 else
1154 {
1155 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1156 for (i = prec - 1; i >= 0; i--)
1157 {
1158 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1159 litP += sizeof (LITTLENUM_TYPE);
1160 }
1161 else
1162 /* For a 4 byte float the order of elements in `words' is 1 0.
1163 For an 8 byte float the order is 1 0 3 2. */
1164 for (i = 0; i < prec; i += 2)
1165 {
1166 md_number_to_chars (litP, (valueT) words[i + 1],
1167 sizeof (LITTLENUM_TYPE));
1168 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1169 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1170 litP += 2 * sizeof (LITTLENUM_TYPE);
1171 }
1172 }
1173
1174 return NULL;
1175 }
1176
1177 /* We handle all bad expressions here, so that we can report the faulty
1178 instruction in the error message. */
1179 void
1180 md_operand (expressionS * exp)
1181 {
1182 if (in_my_get_expression)
1183 exp->X_op = O_illegal;
1184 }
1185
1186 /* Immediate values. */
1187
1188 /* Generic immediate-value read function for use in directives.
1189 Accepts anything that 'expression' can fold to a constant.
1190 *val receives the number. */
1191 #ifdef OBJ_ELF
1192 static int
1193 immediate_for_directive (int *val)
1194 {
1195 expressionS exp;
1196 exp.X_op = O_illegal;
1197
1198 if (is_immediate_prefix (*input_line_pointer))
1199 {
1200 input_line_pointer++;
1201 expression (&exp);
1202 }
1203
1204 if (exp.X_op != O_constant)
1205 {
1206 as_bad (_("expected #constant"));
1207 ignore_rest_of_line ();
1208 return FAIL;
1209 }
1210 *val = exp.X_add_number;
1211 return SUCCESS;
1212 }
1213 #endif
1214
1215 /* Register parsing. */
1216
1217 /* Generic register parser. CCP points to what should be the
1218 beginning of a register name. If it is indeed a valid register
1219 name, advance CCP over it and return the reg_entry structure;
1220 otherwise return NULL. Does not issue diagnostics. */
1221
1222 static struct reg_entry *
1223 arm_reg_parse_multi (char **ccp)
1224 {
1225 char *start = *ccp;
1226 char *p;
1227 struct reg_entry *reg;
1228
1229 skip_whitespace (start);
1230
1231 #ifdef REGISTER_PREFIX
1232 if (*start != REGISTER_PREFIX)
1233 return NULL;
1234 start++;
1235 #endif
1236 #ifdef OPTIONAL_REGISTER_PREFIX
1237 if (*start == OPTIONAL_REGISTER_PREFIX)
1238 start++;
1239 #endif
1240
1241 p = start;
1242 if (!ISALPHA (*p) || !is_name_beginner (*p))
1243 return NULL;
1244
1245 do
1246 p++;
1247 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1248
1249 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1250
1251 if (!reg)
1252 return NULL;
1253
1254 *ccp = p;
1255 return reg;
1256 }
1257
1258 static int
1259 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1260 enum arm_reg_type type)
1261 {
1262 /* Alternative syntaxes are accepted for a few register classes. */
1263 switch (type)
1264 {
1265 case REG_TYPE_MVF:
1266 case REG_TYPE_MVD:
1267 case REG_TYPE_MVFX:
1268 case REG_TYPE_MVDX:
1269 /* Generic coprocessor register names are allowed for these. */
1270 if (reg && reg->type == REG_TYPE_CN)
1271 return reg->number;
1272 break;
1273
1274 case REG_TYPE_CP:
1275 /* For backward compatibility, a bare number is valid here. */
1276 {
1277 unsigned long processor = strtoul (start, ccp, 10);
1278 if (*ccp != start && processor <= 15)
1279 return processor;
1280 }
1281 /* Fall through. */
1282
1283 case REG_TYPE_MMXWC:
1284 /* WC includes WCG. ??? I'm not sure this is true for all
1285 instructions that take WC registers. */
1286 if (reg && reg->type == REG_TYPE_MMXWCG)
1287 return reg->number;
1288 break;
1289
1290 default:
1291 break;
1292 }
1293
1294 return FAIL;
1295 }
1296
1297 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1298 return value is the register number or FAIL. */
1299
1300 static int
1301 arm_reg_parse (char **ccp, enum arm_reg_type type)
1302 {
1303 char *start = *ccp;
1304 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1305 int ret;
1306
1307 /* Do not allow a scalar (reg+index) to parse as a register. */
1308 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1309 return FAIL;
1310
1311 if (reg && reg->type == type)
1312 return reg->number;
1313
1314 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1315 return ret;
1316
1317 *ccp = start;
1318 return FAIL;
1319 }
1320
1321 /* Parse a Neon type specifier. *STR should point at the leading '.'
1322 character. Does no verification at this stage that the type fits the opcode
1323 properly. E.g.,
1324
1325 .i32.i32.s16
1326 .s32.f32
1327 .u16
1328
1329 Can all be legally parsed by this function.
1330
1331 Fills in neon_type struct pointer with parsed information, and updates STR
1332 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1333 type, FAIL if not. */
1334
1335 static int
1336 parse_neon_type (struct neon_type *type, char **str)
1337 {
1338 char *ptr = *str;
1339
1340 if (type)
1341 type->elems = 0;
1342
1343 while (type->elems < NEON_MAX_TYPE_ELS)
1344 {
1345 enum neon_el_type thistype = NT_untyped;
1346 unsigned thissize = -1u;
1347
1348 if (*ptr != '.')
1349 break;
1350
1351 ptr++;
1352
1353 /* Just a size without an explicit type. */
1354 if (ISDIGIT (*ptr))
1355 goto parsesize;
1356
1357 switch (TOLOWER (*ptr))
1358 {
1359 case 'i': thistype = NT_integer; break;
1360 case 'f': thistype = NT_float; break;
1361 case 'p': thistype = NT_poly; break;
1362 case 's': thistype = NT_signed; break;
1363 case 'u': thistype = NT_unsigned; break;
1364 case 'd':
1365 thistype = NT_float;
1366 thissize = 64;
1367 ptr++;
1368 goto done;
1369 default:
1370 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1371 return FAIL;
1372 }
1373
1374 ptr++;
1375
1376 /* .f is an abbreviation for .f32. */
1377 if (thistype == NT_float && !ISDIGIT (*ptr))
1378 thissize = 32;
1379 else
1380 {
1381 parsesize:
1382 thissize = strtoul (ptr, &ptr, 10);
1383
1384 if (thissize != 8 && thissize != 16 && thissize != 32
1385 && thissize != 64)
1386 {
1387 as_bad (_("bad size %d in type specifier"), thissize);
1388 return FAIL;
1389 }
1390 }
1391
1392 done:
1393 if (type)
1394 {
1395 type->el[type->elems].type = thistype;
1396 type->el[type->elems].size = thissize;
1397 type->elems++;
1398 }
1399 }
1400
1401 /* Empty/missing type is not a successful parse. */
1402 if (type->elems == 0)
1403 return FAIL;
1404
1405 *str = ptr;
1406
1407 return SUCCESS;
1408 }
1409
1410 /* Errors may be set multiple times during parsing or bit encoding
1411 (particularly in the Neon bits), but usually the earliest error which is set
1412 will be the most meaningful. Avoid overwriting it with later (cascading)
1413 errors by calling this function. */
1414
1415 static void
1416 first_error (const char *err)
1417 {
1418 if (!inst.error)
1419 inst.error = err;
1420 }
1421
1422 /* Parse a single type, e.g. ".s32", leading period included. */
1423 static int
1424 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1425 {
1426 char *str = *ccp;
1427 struct neon_type optype;
1428
1429 if (*str == '.')
1430 {
1431 if (parse_neon_type (&optype, &str) == SUCCESS)
1432 {
1433 if (optype.elems == 1)
1434 *vectype = optype.el[0];
1435 else
1436 {
1437 first_error (_("only one type should be specified for operand"));
1438 return FAIL;
1439 }
1440 }
1441 else
1442 {
1443 first_error (_("vector type expected"));
1444 return FAIL;
1445 }
1446 }
1447 else
1448 return FAIL;
1449
1450 *ccp = str;
1451
1452 return SUCCESS;
1453 }
1454
1455 /* Special meanings for indices (which have a range of 0-7), which will fit into
1456 a 4-bit integer. */
1457
1458 #define NEON_ALL_LANES 15
1459 #define NEON_INTERLEAVE_LANES 14
1460
1461 /* Parse either a register or a scalar, with an optional type. Return the
1462 register number, and optionally fill in the actual type of the register
1463 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1464 type/index information in *TYPEINFO. */
1465
1466 static int
1467 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1468 enum arm_reg_type *rtype,
1469 struct neon_typed_alias *typeinfo)
1470 {
1471 char *str = *ccp;
1472 struct reg_entry *reg = arm_reg_parse_multi (&str);
1473 struct neon_typed_alias atype;
1474 struct neon_type_el parsetype;
1475
1476 atype.defined = 0;
1477 atype.index = -1;
1478 atype.eltype.type = NT_invtype;
1479 atype.eltype.size = -1;
1480
1481 /* Try alternate syntax for some types of register. Note these are mutually
1482 exclusive with the Neon syntax extensions. */
1483 if (reg == NULL)
1484 {
1485 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1486 if (altreg != FAIL)
1487 *ccp = str;
1488 if (typeinfo)
1489 *typeinfo = atype;
1490 return altreg;
1491 }
1492
1493 /* Undo polymorphism when a set of register types may be accepted. */
1494 if ((type == REG_TYPE_NDQ
1495 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1496 || (type == REG_TYPE_VFSD
1497 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1498 || (type == REG_TYPE_NSDQ
1499 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1500 || reg->type == REG_TYPE_NQ))
1501 || (type == REG_TYPE_MMXWC
1502 && (reg->type == REG_TYPE_MMXWCG)))
1503 type = (enum arm_reg_type) reg->type;
1504
1505 if (type != reg->type)
1506 return FAIL;
1507
1508 if (reg->neon)
1509 atype = *reg->neon;
1510
1511 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1512 {
1513 if ((atype.defined & NTA_HASTYPE) != 0)
1514 {
1515 first_error (_("can't redefine type for operand"));
1516 return FAIL;
1517 }
1518 atype.defined |= NTA_HASTYPE;
1519 atype.eltype = parsetype;
1520 }
1521
1522 if (skip_past_char (&str, '[') == SUCCESS)
1523 {
1524 if (type != REG_TYPE_VFD)
1525 {
1526 first_error (_("only D registers may be indexed"));
1527 return FAIL;
1528 }
1529
1530 if ((atype.defined & NTA_HASINDEX) != 0)
1531 {
1532 first_error (_("can't change index for operand"));
1533 return FAIL;
1534 }
1535
1536 atype.defined |= NTA_HASINDEX;
1537
1538 if (skip_past_char (&str, ']') == SUCCESS)
1539 atype.index = NEON_ALL_LANES;
1540 else
1541 {
1542 expressionS exp;
1543
1544 my_get_expression (&exp, &str, GE_NO_PREFIX);
1545
1546 if (exp.X_op != O_constant)
1547 {
1548 first_error (_("constant expression required"));
1549 return FAIL;
1550 }
1551
1552 if (skip_past_char (&str, ']') == FAIL)
1553 return FAIL;
1554
1555 atype.index = exp.X_add_number;
1556 }
1557 }
1558
1559 if (typeinfo)
1560 *typeinfo = atype;
1561
1562 if (rtype)
1563 *rtype = type;
1564
1565 *ccp = str;
1566
1567 return reg->number;
1568 }
1569
1570 /* Like arm_reg_parse, but allow allow the following extra features:
1571 - If RTYPE is non-zero, return the (possibly restricted) type of the
1572 register (e.g. Neon double or quad reg when either has been requested).
1573 - If this is a Neon vector type with additional type information, fill
1574 in the struct pointed to by VECTYPE (if non-NULL).
1575 This function will fault on encountering a scalar. */
1576
1577 static int
1578 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1579 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1580 {
1581 struct neon_typed_alias atype;
1582 char *str = *ccp;
1583 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1584
1585 if (reg == FAIL)
1586 return FAIL;
1587
1588 /* Do not allow regname(... to parse as a register. */
1589 if (*str == '(')
1590 return FAIL;
1591
1592 /* Do not allow a scalar (reg+index) to parse as a register. */
1593 if ((atype.defined & NTA_HASINDEX) != 0)
1594 {
1595 first_error (_("register operand expected, but got scalar"));
1596 return FAIL;
1597 }
1598
1599 if (vectype)
1600 *vectype = atype.eltype;
1601
1602 *ccp = str;
1603
1604 return reg;
1605 }
1606
1607 #define NEON_SCALAR_REG(X) ((X) >> 4)
1608 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1609
1610 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1611 have enough information to be able to do a good job bounds-checking. So, we
1612 just do easy checks here, and do further checks later. */
1613
1614 static int
1615 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1616 {
1617 int reg;
1618 char *str = *ccp;
1619 struct neon_typed_alias atype;
1620
1621 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1622
1623 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1624 return FAIL;
1625
1626 if (atype.index == NEON_ALL_LANES)
1627 {
1628 first_error (_("scalar must have an index"));
1629 return FAIL;
1630 }
1631 else if (atype.index >= 64 / elsize)
1632 {
1633 first_error (_("scalar index out of range"));
1634 return FAIL;
1635 }
1636
1637 if (type)
1638 *type = atype.eltype;
1639
1640 *ccp = str;
1641
1642 return reg * 16 + atype.index;
1643 }
1644
1645 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1646
1647 static long
1648 parse_reg_list (char ** strp)
1649 {
1650 char * str = * strp;
1651 long range = 0;
1652 int another_range;
1653
1654 /* We come back here if we get ranges concatenated by '+' or '|'. */
1655 do
1656 {
1657 skip_whitespace (str);
1658
1659 another_range = 0;
1660
1661 if (*str == '{')
1662 {
1663 int in_range = 0;
1664 int cur_reg = -1;
1665
1666 str++;
1667 do
1668 {
1669 int reg;
1670
1671 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1672 {
1673 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1674 return FAIL;
1675 }
1676
1677 if (in_range)
1678 {
1679 int i;
1680
1681 if (reg <= cur_reg)
1682 {
1683 first_error (_("bad range in register list"));
1684 return FAIL;
1685 }
1686
1687 for (i = cur_reg + 1; i < reg; i++)
1688 {
1689 if (range & (1 << i))
1690 as_tsktsk
1691 (_("Warning: duplicated register (r%d) in register list"),
1692 i);
1693 else
1694 range |= 1 << i;
1695 }
1696 in_range = 0;
1697 }
1698
1699 if (range & (1 << reg))
1700 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1701 reg);
1702 else if (reg <= cur_reg)
1703 as_tsktsk (_("Warning: register range not in ascending order"));
1704
1705 range |= 1 << reg;
1706 cur_reg = reg;
1707 }
1708 while (skip_past_comma (&str) != FAIL
1709 || (in_range = 1, *str++ == '-'));
1710 str--;
1711
1712 if (skip_past_char (&str, '}') == FAIL)
1713 {
1714 first_error (_("missing `}'"));
1715 return FAIL;
1716 }
1717 }
1718 else
1719 {
1720 expressionS exp;
1721
1722 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1723 return FAIL;
1724
1725 if (exp.X_op == O_constant)
1726 {
1727 if (exp.X_add_number
1728 != (exp.X_add_number & 0x0000ffff))
1729 {
1730 inst.error = _("invalid register mask");
1731 return FAIL;
1732 }
1733
1734 if ((range & exp.X_add_number) != 0)
1735 {
1736 int regno = range & exp.X_add_number;
1737
1738 regno &= -regno;
1739 regno = (1 << regno) - 1;
1740 as_tsktsk
1741 (_("Warning: duplicated register (r%d) in register list"),
1742 regno);
1743 }
1744
1745 range |= exp.X_add_number;
1746 }
1747 else
1748 {
1749 if (inst.reloc.type != 0)
1750 {
1751 inst.error = _("expression too complex");
1752 return FAIL;
1753 }
1754
1755 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1756 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1757 inst.reloc.pc_rel = 0;
1758 }
1759 }
1760
1761 if (*str == '|' || *str == '+')
1762 {
1763 str++;
1764 another_range = 1;
1765 }
1766 }
1767 while (another_range);
1768
1769 *strp = str;
1770 return range;
1771 }
1772
1773 /* Types of registers in a list. */
1774
1775 enum reg_list_els
1776 {
1777 REGLIST_VFP_S,
1778 REGLIST_VFP_D,
1779 REGLIST_NEON_D
1780 };
1781
1782 /* Parse a VFP register list. If the string is invalid return FAIL.
1783 Otherwise return the number of registers, and set PBASE to the first
1784 register. Parses registers of type ETYPE.
1785 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1786 - Q registers can be used to specify pairs of D registers
1787 - { } can be omitted from around a singleton register list
1788 FIXME: This is not implemented, as it would require backtracking in
1789 some cases, e.g.:
1790 vtbl.8 d3,d4,d5
1791 This could be done (the meaning isn't really ambiguous), but doesn't
1792 fit in well with the current parsing framework.
1793 - 32 D registers may be used (also true for VFPv3).
1794 FIXME: Types are ignored in these register lists, which is probably a
1795 bug. */
1796
1797 static int
1798 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1799 {
1800 char *str = *ccp;
1801 int base_reg;
1802 int new_base;
1803 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1804 int max_regs = 0;
1805 int count = 0;
1806 int warned = 0;
1807 unsigned long mask = 0;
1808 int i;
1809
1810 if (skip_past_char (&str, '{') == FAIL)
1811 {
1812 inst.error = _("expecting {");
1813 return FAIL;
1814 }
1815
1816 switch (etype)
1817 {
1818 case REGLIST_VFP_S:
1819 regtype = REG_TYPE_VFS;
1820 max_regs = 32;
1821 break;
1822
1823 case REGLIST_VFP_D:
1824 regtype = REG_TYPE_VFD;
1825 break;
1826
1827 case REGLIST_NEON_D:
1828 regtype = REG_TYPE_NDQ;
1829 break;
1830 }
1831
1832 if (etype != REGLIST_VFP_S)
1833 {
1834 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1835 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1836 {
1837 max_regs = 32;
1838 if (thumb_mode)
1839 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1840 fpu_vfp_ext_d32);
1841 else
1842 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1843 fpu_vfp_ext_d32);
1844 }
1845 else
1846 max_regs = 16;
1847 }
1848
1849 base_reg = max_regs;
1850
1851 do
1852 {
1853 int setmask = 1, addregs = 1;
1854
1855 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1856
1857 if (new_base == FAIL)
1858 {
1859 first_error (_(reg_expected_msgs[regtype]));
1860 return FAIL;
1861 }
1862
1863 if (new_base >= max_regs)
1864 {
1865 first_error (_("register out of range in list"));
1866 return FAIL;
1867 }
1868
1869 /* Note: a value of 2 * n is returned for the register Q<n>. */
1870 if (regtype == REG_TYPE_NQ)
1871 {
1872 setmask = 3;
1873 addregs = 2;
1874 }
1875
1876 if (new_base < base_reg)
1877 base_reg = new_base;
1878
1879 if (mask & (setmask << new_base))
1880 {
1881 first_error (_("invalid register list"));
1882 return FAIL;
1883 }
1884
1885 if ((mask >> new_base) != 0 && ! warned)
1886 {
1887 as_tsktsk (_("register list not in ascending order"));
1888 warned = 1;
1889 }
1890
1891 mask |= setmask << new_base;
1892 count += addregs;
1893
1894 if (*str == '-') /* We have the start of a range expression */
1895 {
1896 int high_range;
1897
1898 str++;
1899
1900 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1901 == FAIL)
1902 {
1903 inst.error = gettext (reg_expected_msgs[regtype]);
1904 return FAIL;
1905 }
1906
1907 if (high_range >= max_regs)
1908 {
1909 first_error (_("register out of range in list"));
1910 return FAIL;
1911 }
1912
1913 if (regtype == REG_TYPE_NQ)
1914 high_range = high_range + 1;
1915
1916 if (high_range <= new_base)
1917 {
1918 inst.error = _("register range not in ascending order");
1919 return FAIL;
1920 }
1921
1922 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1923 {
1924 if (mask & (setmask << new_base))
1925 {
1926 inst.error = _("invalid register list");
1927 return FAIL;
1928 }
1929
1930 mask |= setmask << new_base;
1931 count += addregs;
1932 }
1933 }
1934 }
1935 while (skip_past_comma (&str) != FAIL);
1936
1937 str++;
1938
1939 /* Sanity check -- should have raised a parse error above. */
1940 if (count == 0 || count > max_regs)
1941 abort ();
1942
1943 *pbase = base_reg;
1944
1945 /* Final test -- the registers must be consecutive. */
1946 mask >>= base_reg;
1947 for (i = 0; i < count; i++)
1948 {
1949 if ((mask & (1u << i)) == 0)
1950 {
1951 inst.error = _("non-contiguous register range");
1952 return FAIL;
1953 }
1954 }
1955
1956 *ccp = str;
1957
1958 return count;
1959 }
1960
1961 /* True if two alias types are the same. */
1962
1963 static bfd_boolean
1964 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1965 {
1966 if (!a && !b)
1967 return TRUE;
1968
1969 if (!a || !b)
1970 return FALSE;
1971
1972 if (a->defined != b->defined)
1973 return FALSE;
1974
1975 if ((a->defined & NTA_HASTYPE) != 0
1976 && (a->eltype.type != b->eltype.type
1977 || a->eltype.size != b->eltype.size))
1978 return FALSE;
1979
1980 if ((a->defined & NTA_HASINDEX) != 0
1981 && (a->index != b->index))
1982 return FALSE;
1983
1984 return TRUE;
1985 }
1986
1987 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1988 The base register is put in *PBASE.
1989 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1990 the return value.
1991 The register stride (minus one) is put in bit 4 of the return value.
1992 Bits [6:5] encode the list length (minus one).
1993 The type of the list elements is put in *ELTYPE, if non-NULL. */
1994
1995 #define NEON_LANE(X) ((X) & 0xf)
1996 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1997 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1998
1999 static int
2000 parse_neon_el_struct_list (char **str, unsigned *pbase,
2001 struct neon_type_el *eltype)
2002 {
2003 char *ptr = *str;
2004 int base_reg = -1;
2005 int reg_incr = -1;
2006 int count = 0;
2007 int lane = -1;
2008 int leading_brace = 0;
2009 enum arm_reg_type rtype = REG_TYPE_NDQ;
2010 const char *const incr_error = _("register stride must be 1 or 2");
2011 const char *const type_error = _("mismatched element/structure types in list");
2012 struct neon_typed_alias firsttype;
2013 firsttype.defined = 0;
2014 firsttype.eltype.type = NT_invtype;
2015 firsttype.eltype.size = -1;
2016 firsttype.index = -1;
2017
2018 if (skip_past_char (&ptr, '{') == SUCCESS)
2019 leading_brace = 1;
2020
2021 do
2022 {
2023 struct neon_typed_alias atype;
2024 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2025
2026 if (getreg == FAIL)
2027 {
2028 first_error (_(reg_expected_msgs[rtype]));
2029 return FAIL;
2030 }
2031
2032 if (base_reg == -1)
2033 {
2034 base_reg = getreg;
2035 if (rtype == REG_TYPE_NQ)
2036 {
2037 reg_incr = 1;
2038 }
2039 firsttype = atype;
2040 }
2041 else if (reg_incr == -1)
2042 {
2043 reg_incr = getreg - base_reg;
2044 if (reg_incr < 1 || reg_incr > 2)
2045 {
2046 first_error (_(incr_error));
2047 return FAIL;
2048 }
2049 }
2050 else if (getreg != base_reg + reg_incr * count)
2051 {
2052 first_error (_(incr_error));
2053 return FAIL;
2054 }
2055
2056 if (! neon_alias_types_same (&atype, &firsttype))
2057 {
2058 first_error (_(type_error));
2059 return FAIL;
2060 }
2061
2062 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2063 modes. */
2064 if (ptr[0] == '-')
2065 {
2066 struct neon_typed_alias htype;
2067 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2068 if (lane == -1)
2069 lane = NEON_INTERLEAVE_LANES;
2070 else if (lane != NEON_INTERLEAVE_LANES)
2071 {
2072 first_error (_(type_error));
2073 return FAIL;
2074 }
2075 if (reg_incr == -1)
2076 reg_incr = 1;
2077 else if (reg_incr != 1)
2078 {
2079 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2080 return FAIL;
2081 }
2082 ptr++;
2083 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2084 if (hireg == FAIL)
2085 {
2086 first_error (_(reg_expected_msgs[rtype]));
2087 return FAIL;
2088 }
2089 if (! neon_alias_types_same (&htype, &firsttype))
2090 {
2091 first_error (_(type_error));
2092 return FAIL;
2093 }
2094 count += hireg + dregs - getreg;
2095 continue;
2096 }
2097
2098 /* If we're using Q registers, we can't use [] or [n] syntax. */
2099 if (rtype == REG_TYPE_NQ)
2100 {
2101 count += 2;
2102 continue;
2103 }
2104
2105 if ((atype.defined & NTA_HASINDEX) != 0)
2106 {
2107 if (lane == -1)
2108 lane = atype.index;
2109 else if (lane != atype.index)
2110 {
2111 first_error (_(type_error));
2112 return FAIL;
2113 }
2114 }
2115 else if (lane == -1)
2116 lane = NEON_INTERLEAVE_LANES;
2117 else if (lane != NEON_INTERLEAVE_LANES)
2118 {
2119 first_error (_(type_error));
2120 return FAIL;
2121 }
2122 count++;
2123 }
2124 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2125
2126 /* No lane set by [x]. We must be interleaving structures. */
2127 if (lane == -1)
2128 lane = NEON_INTERLEAVE_LANES;
2129
2130 /* Sanity check. */
2131 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2132 || (count > 1 && reg_incr == -1))
2133 {
2134 first_error (_("error parsing element/structure list"));
2135 return FAIL;
2136 }
2137
2138 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2139 {
2140 first_error (_("expected }"));
2141 return FAIL;
2142 }
2143
2144 if (reg_incr == -1)
2145 reg_incr = 1;
2146
2147 if (eltype)
2148 *eltype = firsttype.eltype;
2149
2150 *pbase = base_reg;
2151 *str = ptr;
2152
2153 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2154 }
2155
2156 /* Parse an explicit relocation suffix on an expression. This is
2157 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2158 arm_reloc_hsh contains no entries, so this function can only
2159 succeed if there is no () after the word. Returns -1 on error,
2160 BFD_RELOC_UNUSED if there wasn't any suffix. */
2161
2162 static int
2163 parse_reloc (char **str)
2164 {
2165 struct reloc_entry *r;
2166 char *p, *q;
2167
2168 if (**str != '(')
2169 return BFD_RELOC_UNUSED;
2170
2171 p = *str + 1;
2172 q = p;
2173
2174 while (*q && *q != ')' && *q != ',')
2175 q++;
2176 if (*q != ')')
2177 return -1;
2178
2179 if ((r = (struct reloc_entry *)
2180 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2181 return -1;
2182
2183 *str = q + 1;
2184 return r->reloc;
2185 }
2186
2187 /* Directives: register aliases. */
2188
2189 static struct reg_entry *
2190 insert_reg_alias (char *str, unsigned number, int type)
2191 {
2192 struct reg_entry *new_reg;
2193 const char *name;
2194
2195 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2196 {
2197 if (new_reg->builtin)
2198 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2199
2200 /* Only warn about a redefinition if it's not defined as the
2201 same register. */
2202 else if (new_reg->number != number || new_reg->type != type)
2203 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2204
2205 return NULL;
2206 }
2207
2208 name = xstrdup (str);
2209 new_reg = XNEW (struct reg_entry);
2210
2211 new_reg->name = name;
2212 new_reg->number = number;
2213 new_reg->type = type;
2214 new_reg->builtin = FALSE;
2215 new_reg->neon = NULL;
2216
2217 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2218 abort ();
2219
2220 return new_reg;
2221 }
2222
2223 static void
2224 insert_neon_reg_alias (char *str, int number, int type,
2225 struct neon_typed_alias *atype)
2226 {
2227 struct reg_entry *reg = insert_reg_alias (str, number, type);
2228
2229 if (!reg)
2230 {
2231 first_error (_("attempt to redefine typed alias"));
2232 return;
2233 }
2234
2235 if (atype)
2236 {
2237 reg->neon = XNEW (struct neon_typed_alias);
2238 *reg->neon = *atype;
2239 }
2240 }
2241
2242 /* Look for the .req directive. This is of the form:
2243
2244 new_register_name .req existing_register_name
2245
2246 If we find one, or if it looks sufficiently like one that we want to
2247 handle any error here, return TRUE. Otherwise return FALSE. */
2248
2249 static bfd_boolean
2250 create_register_alias (char * newname, char *p)
2251 {
2252 struct reg_entry *old;
2253 char *oldname, *nbuf;
2254 size_t nlen;
2255
2256 /* The input scrubber ensures that whitespace after the mnemonic is
2257 collapsed to single spaces. */
2258 oldname = p;
2259 if (strncmp (oldname, " .req ", 6) != 0)
2260 return FALSE;
2261
2262 oldname += 6;
2263 if (*oldname == '\0')
2264 return FALSE;
2265
2266 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2267 if (!old)
2268 {
2269 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2270 return TRUE;
2271 }
2272
2273 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2274 the desired alias name, and p points to its end. If not, then
2275 the desired alias name is in the global original_case_string. */
2276 #ifdef TC_CASE_SENSITIVE
2277 nlen = p - newname;
2278 #else
2279 newname = original_case_string;
2280 nlen = strlen (newname);
2281 #endif
2282
2283 nbuf = xmemdup0 (newname, nlen);
2284
2285 /* Create aliases under the new name as stated; an all-lowercase
2286 version of the new name; and an all-uppercase version of the new
2287 name. */
2288 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2289 {
2290 for (p = nbuf; *p; p++)
2291 *p = TOUPPER (*p);
2292
2293 if (strncmp (nbuf, newname, nlen))
2294 {
2295 /* If this attempt to create an additional alias fails, do not bother
2296 trying to create the all-lower case alias. We will fail and issue
2297 a second, duplicate error message. This situation arises when the
2298 programmer does something like:
2299 foo .req r0
2300 Foo .req r1
2301 The second .req creates the "Foo" alias but then fails to create
2302 the artificial FOO alias because it has already been created by the
2303 first .req. */
2304 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2305 {
2306 free (nbuf);
2307 return TRUE;
2308 }
2309 }
2310
2311 for (p = nbuf; *p; p++)
2312 *p = TOLOWER (*p);
2313
2314 if (strncmp (nbuf, newname, nlen))
2315 insert_reg_alias (nbuf, old->number, old->type);
2316 }
2317
2318 free (nbuf);
2319 return TRUE;
2320 }
2321
2322 /* Create a Neon typed/indexed register alias using directives, e.g.:
2323 X .dn d5.s32[1]
2324 Y .qn 6.s16
2325 Z .dn d7
2326 T .dn Z[0]
2327 These typed registers can be used instead of the types specified after the
2328 Neon mnemonic, so long as all operands given have types. Types can also be
2329 specified directly, e.g.:
2330 vadd d0.s32, d1.s32, d2.s32 */
2331
2332 static bfd_boolean
2333 create_neon_reg_alias (char *newname, char *p)
2334 {
2335 enum arm_reg_type basetype;
2336 struct reg_entry *basereg;
2337 struct reg_entry mybasereg;
2338 struct neon_type ntype;
2339 struct neon_typed_alias typeinfo;
2340 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2341 int namelen;
2342
2343 typeinfo.defined = 0;
2344 typeinfo.eltype.type = NT_invtype;
2345 typeinfo.eltype.size = -1;
2346 typeinfo.index = -1;
2347
2348 nameend = p;
2349
2350 if (strncmp (p, " .dn ", 5) == 0)
2351 basetype = REG_TYPE_VFD;
2352 else if (strncmp (p, " .qn ", 5) == 0)
2353 basetype = REG_TYPE_NQ;
2354 else
2355 return FALSE;
2356
2357 p += 5;
2358
2359 if (*p == '\0')
2360 return FALSE;
2361
2362 basereg = arm_reg_parse_multi (&p);
2363
2364 if (basereg && basereg->type != basetype)
2365 {
2366 as_bad (_("bad type for register"));
2367 return FALSE;
2368 }
2369
2370 if (basereg == NULL)
2371 {
2372 expressionS exp;
2373 /* Try parsing as an integer. */
2374 my_get_expression (&exp, &p, GE_NO_PREFIX);
2375 if (exp.X_op != O_constant)
2376 {
2377 as_bad (_("expression must be constant"));
2378 return FALSE;
2379 }
2380 basereg = &mybasereg;
2381 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2382 : exp.X_add_number;
2383 basereg->neon = 0;
2384 }
2385
2386 if (basereg->neon)
2387 typeinfo = *basereg->neon;
2388
2389 if (parse_neon_type (&ntype, &p) == SUCCESS)
2390 {
2391 /* We got a type. */
2392 if (typeinfo.defined & NTA_HASTYPE)
2393 {
2394 as_bad (_("can't redefine the type of a register alias"));
2395 return FALSE;
2396 }
2397
2398 typeinfo.defined |= NTA_HASTYPE;
2399 if (ntype.elems != 1)
2400 {
2401 as_bad (_("you must specify a single type only"));
2402 return FALSE;
2403 }
2404 typeinfo.eltype = ntype.el[0];
2405 }
2406
2407 if (skip_past_char (&p, '[') == SUCCESS)
2408 {
2409 expressionS exp;
2410 /* We got a scalar index. */
2411
2412 if (typeinfo.defined & NTA_HASINDEX)
2413 {
2414 as_bad (_("can't redefine the index of a scalar alias"));
2415 return FALSE;
2416 }
2417
2418 my_get_expression (&exp, &p, GE_NO_PREFIX);
2419
2420 if (exp.X_op != O_constant)
2421 {
2422 as_bad (_("scalar index must be constant"));
2423 return FALSE;
2424 }
2425
2426 typeinfo.defined |= NTA_HASINDEX;
2427 typeinfo.index = exp.X_add_number;
2428
2429 if (skip_past_char (&p, ']') == FAIL)
2430 {
2431 as_bad (_("expecting ]"));
2432 return FALSE;
2433 }
2434 }
2435
2436 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2437 the desired alias name, and p points to its end. If not, then
2438 the desired alias name is in the global original_case_string. */
2439 #ifdef TC_CASE_SENSITIVE
2440 namelen = nameend - newname;
2441 #else
2442 newname = original_case_string;
2443 namelen = strlen (newname);
2444 #endif
2445
2446 namebuf = xmemdup0 (newname, namelen);
2447
2448 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2449 typeinfo.defined != 0 ? &typeinfo : NULL);
2450
2451 /* Insert name in all uppercase. */
2452 for (p = namebuf; *p; p++)
2453 *p = TOUPPER (*p);
2454
2455 if (strncmp (namebuf, newname, namelen))
2456 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2457 typeinfo.defined != 0 ? &typeinfo : NULL);
2458
2459 /* Insert name in all lowercase. */
2460 for (p = namebuf; *p; p++)
2461 *p = TOLOWER (*p);
2462
2463 if (strncmp (namebuf, newname, namelen))
2464 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2465 typeinfo.defined != 0 ? &typeinfo : NULL);
2466
2467 free (namebuf);
2468 return TRUE;
2469 }
2470
2471 /* Should never be called, as .req goes between the alias and the
2472 register name, not at the beginning of the line. */
2473
2474 static void
2475 s_req (int a ATTRIBUTE_UNUSED)
2476 {
2477 as_bad (_("invalid syntax for .req directive"));
2478 }
2479
2480 static void
2481 s_dn (int a ATTRIBUTE_UNUSED)
2482 {
2483 as_bad (_("invalid syntax for .dn directive"));
2484 }
2485
2486 static void
2487 s_qn (int a ATTRIBUTE_UNUSED)
2488 {
2489 as_bad (_("invalid syntax for .qn directive"));
2490 }
2491
2492 /* The .unreq directive deletes an alias which was previously defined
2493 by .req. For example:
2494
2495 my_alias .req r11
2496 .unreq my_alias */
2497
2498 static void
2499 s_unreq (int a ATTRIBUTE_UNUSED)
2500 {
2501 char * name;
2502 char saved_char;
2503
2504 name = input_line_pointer;
2505
2506 while (*input_line_pointer != 0
2507 && *input_line_pointer != ' '
2508 && *input_line_pointer != '\n')
2509 ++input_line_pointer;
2510
2511 saved_char = *input_line_pointer;
2512 *input_line_pointer = 0;
2513
2514 if (!*name)
2515 as_bad (_("invalid syntax for .unreq directive"));
2516 else
2517 {
2518 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2519 name);
2520
2521 if (!reg)
2522 as_bad (_("unknown register alias '%s'"), name);
2523 else if (reg->builtin)
2524 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2525 name);
2526 else
2527 {
2528 char * p;
2529 char * nbuf;
2530
2531 hash_delete (arm_reg_hsh, name, FALSE);
2532 free ((char *) reg->name);
2533 if (reg->neon)
2534 free (reg->neon);
2535 free (reg);
2536
2537 /* Also locate the all upper case and all lower case versions.
2538 Do not complain if we cannot find one or the other as it
2539 was probably deleted above. */
2540
2541 nbuf = strdup (name);
2542 for (p = nbuf; *p; p++)
2543 *p = TOUPPER (*p);
2544 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2545 if (reg)
2546 {
2547 hash_delete (arm_reg_hsh, nbuf, FALSE);
2548 free ((char *) reg->name);
2549 if (reg->neon)
2550 free (reg->neon);
2551 free (reg);
2552 }
2553
2554 for (p = nbuf; *p; p++)
2555 *p = TOLOWER (*p);
2556 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2557 if (reg)
2558 {
2559 hash_delete (arm_reg_hsh, nbuf, FALSE);
2560 free ((char *) reg->name);
2561 if (reg->neon)
2562 free (reg->neon);
2563 free (reg);
2564 }
2565
2566 free (nbuf);
2567 }
2568 }
2569
2570 *input_line_pointer = saved_char;
2571 demand_empty_rest_of_line ();
2572 }
2573
2574 /* Directives: Instruction set selection. */
2575
2576 #ifdef OBJ_ELF
2577 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2578 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2579 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2580 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2581
2582 /* Create a new mapping symbol for the transition to STATE. */
2583
2584 static void
2585 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2586 {
2587 symbolS * symbolP;
2588 const char * symname;
2589 int type;
2590
2591 switch (state)
2592 {
2593 case MAP_DATA:
2594 symname = "$d";
2595 type = BSF_NO_FLAGS;
2596 break;
2597 case MAP_ARM:
2598 symname = "$a";
2599 type = BSF_NO_FLAGS;
2600 break;
2601 case MAP_THUMB:
2602 symname = "$t";
2603 type = BSF_NO_FLAGS;
2604 break;
2605 default:
2606 abort ();
2607 }
2608
2609 symbolP = symbol_new (symname, now_seg, value, frag);
2610 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2611
2612 switch (state)
2613 {
2614 case MAP_ARM:
2615 THUMB_SET_FUNC (symbolP, 0);
2616 ARM_SET_THUMB (symbolP, 0);
2617 ARM_SET_INTERWORK (symbolP, support_interwork);
2618 break;
2619
2620 case MAP_THUMB:
2621 THUMB_SET_FUNC (symbolP, 1);
2622 ARM_SET_THUMB (symbolP, 1);
2623 ARM_SET_INTERWORK (symbolP, support_interwork);
2624 break;
2625
2626 case MAP_DATA:
2627 default:
2628 break;
2629 }
2630
2631 /* Save the mapping symbols for future reference. Also check that
2632 we do not place two mapping symbols at the same offset within a
2633 frag. We'll handle overlap between frags in
2634 check_mapping_symbols.
2635
2636 If .fill or other data filling directive generates zero sized data,
2637 the mapping symbol for the following code will have the same value
2638 as the one generated for the data filling directive. In this case,
2639 we replace the old symbol with the new one at the same address. */
2640 if (value == 0)
2641 {
2642 if (frag->tc_frag_data.first_map != NULL)
2643 {
2644 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2645 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2646 }
2647 frag->tc_frag_data.first_map = symbolP;
2648 }
2649 if (frag->tc_frag_data.last_map != NULL)
2650 {
2651 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2652 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2653 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2654 }
2655 frag->tc_frag_data.last_map = symbolP;
2656 }
2657
2658 /* We must sometimes convert a region marked as code to data during
2659 code alignment, if an odd number of bytes have to be padded. The
2660 code mapping symbol is pushed to an aligned address. */
2661
2662 static void
2663 insert_data_mapping_symbol (enum mstate state,
2664 valueT value, fragS *frag, offsetT bytes)
2665 {
2666 /* If there was already a mapping symbol, remove it. */
2667 if (frag->tc_frag_data.last_map != NULL
2668 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2669 {
2670 symbolS *symp = frag->tc_frag_data.last_map;
2671
2672 if (value == 0)
2673 {
2674 know (frag->tc_frag_data.first_map == symp);
2675 frag->tc_frag_data.first_map = NULL;
2676 }
2677 frag->tc_frag_data.last_map = NULL;
2678 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2679 }
2680
2681 make_mapping_symbol (MAP_DATA, value, frag);
2682 make_mapping_symbol (state, value + bytes, frag);
2683 }
2684
2685 static void mapping_state_2 (enum mstate state, int max_chars);
2686
2687 /* Set the mapping state to STATE. Only call this when about to
2688 emit some STATE bytes to the file. */
2689
2690 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2691 void
2692 mapping_state (enum mstate state)
2693 {
2694 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2695
2696 if (mapstate == state)
2697 /* The mapping symbol has already been emitted.
2698 There is nothing else to do. */
2699 return;
2700
2701 if (state == MAP_ARM || state == MAP_THUMB)
2702 /* PR gas/12931
2703 All ARM instructions require 4-byte alignment.
2704 (Almost) all Thumb instructions require 2-byte alignment.
2705
2706 When emitting instructions into any section, mark the section
2707 appropriately.
2708
2709 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2710 but themselves require 2-byte alignment; this applies to some
2711 PC- relative forms. However, these cases will invovle implicit
2712 literal pool generation or an explicit .align >=2, both of
2713 which will cause the section to me marked with sufficient
2714 alignment. Thus, we don't handle those cases here. */
2715 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2716
2717 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2718 /* This case will be evaluated later. */
2719 return;
2720
2721 mapping_state_2 (state, 0);
2722 }
2723
2724 /* Same as mapping_state, but MAX_CHARS bytes have already been
2725 allocated. Put the mapping symbol that far back. */
2726
2727 static void
2728 mapping_state_2 (enum mstate state, int max_chars)
2729 {
2730 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2731
2732 if (!SEG_NORMAL (now_seg))
2733 return;
2734
2735 if (mapstate == state)
2736 /* The mapping symbol has already been emitted.
2737 There is nothing else to do. */
2738 return;
2739
2740 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2741 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2742 {
2743 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2744 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2745
2746 if (add_symbol)
2747 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2748 }
2749
2750 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2751 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2752 }
2753 #undef TRANSITION
2754 #else
2755 #define mapping_state(x) ((void)0)
2756 #define mapping_state_2(x, y) ((void)0)
2757 #endif
2758
2759 /* Find the real, Thumb encoded start of a Thumb function. */
2760
2761 #ifdef OBJ_COFF
2762 static symbolS *
2763 find_real_start (symbolS * symbolP)
2764 {
2765 char * real_start;
2766 const char * name = S_GET_NAME (symbolP);
2767 symbolS * new_target;
2768
2769 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2770 #define STUB_NAME ".real_start_of"
2771
2772 if (name == NULL)
2773 abort ();
2774
2775 /* The compiler may generate BL instructions to local labels because
2776 it needs to perform a branch to a far away location. These labels
2777 do not have a corresponding ".real_start_of" label. We check
2778 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2779 the ".real_start_of" convention for nonlocal branches. */
2780 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2781 return symbolP;
2782
2783 real_start = concat (STUB_NAME, name, NULL);
2784 new_target = symbol_find (real_start);
2785 free (real_start);
2786
2787 if (new_target == NULL)
2788 {
2789 as_warn (_("Failed to find real start of function: %s\n"), name);
2790 new_target = symbolP;
2791 }
2792
2793 return new_target;
2794 }
2795 #endif
2796
2797 static void
2798 opcode_select (int width)
2799 {
2800 switch (width)
2801 {
2802 case 16:
2803 if (! thumb_mode)
2804 {
2805 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2806 as_bad (_("selected processor does not support THUMB opcodes"));
2807
2808 thumb_mode = 1;
2809 /* No need to force the alignment, since we will have been
2810 coming from ARM mode, which is word-aligned. */
2811 record_alignment (now_seg, 1);
2812 }
2813 break;
2814
2815 case 32:
2816 if (thumb_mode)
2817 {
2818 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2819 as_bad (_("selected processor does not support ARM opcodes"));
2820
2821 thumb_mode = 0;
2822
2823 if (!need_pass_2)
2824 frag_align (2, 0, 0);
2825
2826 record_alignment (now_seg, 1);
2827 }
2828 break;
2829
2830 default:
2831 as_bad (_("invalid instruction size selected (%d)"), width);
2832 }
2833 }
2834
2835 static void
2836 s_arm (int ignore ATTRIBUTE_UNUSED)
2837 {
2838 opcode_select (32);
2839 demand_empty_rest_of_line ();
2840 }
2841
2842 static void
2843 s_thumb (int ignore ATTRIBUTE_UNUSED)
2844 {
2845 opcode_select (16);
2846 demand_empty_rest_of_line ();
2847 }
2848
2849 static void
2850 s_code (int unused ATTRIBUTE_UNUSED)
2851 {
2852 int temp;
2853
2854 temp = get_absolute_expression ();
2855 switch (temp)
2856 {
2857 case 16:
2858 case 32:
2859 opcode_select (temp);
2860 break;
2861
2862 default:
2863 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2864 }
2865 }
2866
2867 static void
2868 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2869 {
2870 /* If we are not already in thumb mode go into it, EVEN if
2871 the target processor does not support thumb instructions.
2872 This is used by gcc/config/arm/lib1funcs.asm for example
2873 to compile interworking support functions even if the
2874 target processor should not support interworking. */
2875 if (! thumb_mode)
2876 {
2877 thumb_mode = 2;
2878 record_alignment (now_seg, 1);
2879 }
2880
2881 demand_empty_rest_of_line ();
2882 }
2883
2884 static void
2885 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2886 {
2887 s_thumb (0);
2888
2889 /* The following label is the name/address of the start of a Thumb function.
2890 We need to know this for the interworking support. */
2891 label_is_thumb_function_name = TRUE;
2892 }
2893
2894 /* Perform a .set directive, but also mark the alias as
2895 being a thumb function. */
2896
2897 static void
2898 s_thumb_set (int equiv)
2899 {
2900 /* XXX the following is a duplicate of the code for s_set() in read.c
2901 We cannot just call that code as we need to get at the symbol that
2902 is created. */
2903 char * name;
2904 char delim;
2905 char * end_name;
2906 symbolS * symbolP;
2907
2908 /* Especial apologies for the random logic:
2909 This just grew, and could be parsed much more simply!
2910 Dean - in haste. */
2911 delim = get_symbol_name (& name);
2912 end_name = input_line_pointer;
2913 (void) restore_line_pointer (delim);
2914
2915 if (*input_line_pointer != ',')
2916 {
2917 *end_name = 0;
2918 as_bad (_("expected comma after name \"%s\""), name);
2919 *end_name = delim;
2920 ignore_rest_of_line ();
2921 return;
2922 }
2923
2924 input_line_pointer++;
2925 *end_name = 0;
2926
2927 if (name[0] == '.' && name[1] == '\0')
2928 {
2929 /* XXX - this should not happen to .thumb_set. */
2930 abort ();
2931 }
2932
2933 if ((symbolP = symbol_find (name)) == NULL
2934 && (symbolP = md_undefined_symbol (name)) == NULL)
2935 {
2936 #ifndef NO_LISTING
2937 /* When doing symbol listings, play games with dummy fragments living
2938 outside the normal fragment chain to record the file and line info
2939 for this symbol. */
2940 if (listing & LISTING_SYMBOLS)
2941 {
2942 extern struct list_info_struct * listing_tail;
2943 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2944
2945 memset (dummy_frag, 0, sizeof (fragS));
2946 dummy_frag->fr_type = rs_fill;
2947 dummy_frag->line = listing_tail;
2948 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2949 dummy_frag->fr_symbol = symbolP;
2950 }
2951 else
2952 #endif
2953 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2954
2955 #ifdef OBJ_COFF
2956 /* "set" symbols are local unless otherwise specified. */
2957 SF_SET_LOCAL (symbolP);
2958 #endif /* OBJ_COFF */
2959 } /* Make a new symbol. */
2960
2961 symbol_table_insert (symbolP);
2962
2963 * end_name = delim;
2964
2965 if (equiv
2966 && S_IS_DEFINED (symbolP)
2967 && S_GET_SEGMENT (symbolP) != reg_section)
2968 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2969
2970 pseudo_set (symbolP);
2971
2972 demand_empty_rest_of_line ();
2973
2974 /* XXX Now we come to the Thumb specific bit of code. */
2975
2976 THUMB_SET_FUNC (symbolP, 1);
2977 ARM_SET_THUMB (symbolP, 1);
2978 #if defined OBJ_ELF || defined OBJ_COFF
2979 ARM_SET_INTERWORK (symbolP, support_interwork);
2980 #endif
2981 }
2982
2983 /* Directives: Mode selection. */
2984
2985 /* .syntax [unified|divided] - choose the new unified syntax
2986 (same for Arm and Thumb encoding, modulo slight differences in what
2987 can be represented) or the old divergent syntax for each mode. */
2988 static void
2989 s_syntax (int unused ATTRIBUTE_UNUSED)
2990 {
2991 char *name, delim;
2992
2993 delim = get_symbol_name (& name);
2994
2995 if (!strcasecmp (name, "unified"))
2996 unified_syntax = TRUE;
2997 else if (!strcasecmp (name, "divided"))
2998 unified_syntax = FALSE;
2999 else
3000 {
3001 as_bad (_("unrecognized syntax mode \"%s\""), name);
3002 return;
3003 }
3004 (void) restore_line_pointer (delim);
3005 demand_empty_rest_of_line ();
3006 }
3007
3008 /* Directives: sectioning and alignment. */
3009
3010 static void
3011 s_bss (int ignore ATTRIBUTE_UNUSED)
3012 {
3013 /* We don't support putting frags in the BSS segment, we fake it by
3014 marking in_bss, then looking at s_skip for clues. */
3015 subseg_set (bss_section, 0);
3016 demand_empty_rest_of_line ();
3017
3018 #ifdef md_elf_section_change_hook
3019 md_elf_section_change_hook ();
3020 #endif
3021 }
3022
3023 static void
3024 s_even (int ignore ATTRIBUTE_UNUSED)
3025 {
3026 /* Never make frag if expect extra pass. */
3027 if (!need_pass_2)
3028 frag_align (1, 0, 0);
3029
3030 record_alignment (now_seg, 1);
3031
3032 demand_empty_rest_of_line ();
3033 }
3034
3035 /* Directives: CodeComposer Studio. */
3036
3037 /* .ref (for CodeComposer Studio syntax only). */
3038 static void
3039 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3040 {
3041 if (codecomposer_syntax)
3042 ignore_rest_of_line ();
3043 else
3044 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3045 }
3046
3047 /* If name is not NULL, then it is used for marking the beginning of a
3048 function, whereas if it is NULL then it means the function end. */
3049 static void
3050 asmfunc_debug (const char * name)
3051 {
3052 static const char * last_name = NULL;
3053
3054 if (name != NULL)
3055 {
3056 gas_assert (last_name == NULL);
3057 last_name = name;
3058
3059 if (debug_type == DEBUG_STABS)
3060 stabs_generate_asm_func (name, name);
3061 }
3062 else
3063 {
3064 gas_assert (last_name != NULL);
3065
3066 if (debug_type == DEBUG_STABS)
3067 stabs_generate_asm_endfunc (last_name, last_name);
3068
3069 last_name = NULL;
3070 }
3071 }
3072
3073 static void
3074 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3075 {
3076 if (codecomposer_syntax)
3077 {
3078 switch (asmfunc_state)
3079 {
3080 case OUTSIDE_ASMFUNC:
3081 asmfunc_state = WAITING_ASMFUNC_NAME;
3082 break;
3083
3084 case WAITING_ASMFUNC_NAME:
3085 as_bad (_(".asmfunc repeated."));
3086 break;
3087
3088 case WAITING_ENDASMFUNC:
3089 as_bad (_(".asmfunc without function."));
3090 break;
3091 }
3092 demand_empty_rest_of_line ();
3093 }
3094 else
3095 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3096 }
3097
3098 static void
3099 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3100 {
3101 if (codecomposer_syntax)
3102 {
3103 switch (asmfunc_state)
3104 {
3105 case OUTSIDE_ASMFUNC:
3106 as_bad (_(".endasmfunc without a .asmfunc."));
3107 break;
3108
3109 case WAITING_ASMFUNC_NAME:
3110 as_bad (_(".endasmfunc without function."));
3111 break;
3112
3113 case WAITING_ENDASMFUNC:
3114 asmfunc_state = OUTSIDE_ASMFUNC;
3115 asmfunc_debug (NULL);
3116 break;
3117 }
3118 demand_empty_rest_of_line ();
3119 }
3120 else
3121 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3122 }
3123
3124 static void
3125 s_ccs_def (int name)
3126 {
3127 if (codecomposer_syntax)
3128 s_globl (name);
3129 else
3130 as_bad (_(".def pseudo-op only available with -mccs flag."));
3131 }
3132
3133 /* Directives: Literal pools. */
3134
3135 static literal_pool *
3136 find_literal_pool (void)
3137 {
3138 literal_pool * pool;
3139
3140 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3141 {
3142 if (pool->section == now_seg
3143 && pool->sub_section == now_subseg)
3144 break;
3145 }
3146
3147 return pool;
3148 }
3149
3150 static literal_pool *
3151 find_or_make_literal_pool (void)
3152 {
3153 /* Next literal pool ID number. */
3154 static unsigned int latest_pool_num = 1;
3155 literal_pool * pool;
3156
3157 pool = find_literal_pool ();
3158
3159 if (pool == NULL)
3160 {
3161 /* Create a new pool. */
3162 pool = XNEW (literal_pool);
3163 if (! pool)
3164 return NULL;
3165
3166 pool->next_free_entry = 0;
3167 pool->section = now_seg;
3168 pool->sub_section = now_subseg;
3169 pool->next = list_of_pools;
3170 pool->symbol = NULL;
3171 pool->alignment = 2;
3172
3173 /* Add it to the list. */
3174 list_of_pools = pool;
3175 }
3176
3177 /* New pools, and emptied pools, will have a NULL symbol. */
3178 if (pool->symbol == NULL)
3179 {
3180 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3181 (valueT) 0, &zero_address_frag);
3182 pool->id = latest_pool_num ++;
3183 }
3184
3185 /* Done. */
3186 return pool;
3187 }
3188
3189 /* Add the literal in the global 'inst'
3190 structure to the relevant literal pool. */
3191
3192 static int
3193 add_to_lit_pool (unsigned int nbytes)
3194 {
3195 #define PADDING_SLOT 0x1
3196 #define LIT_ENTRY_SIZE_MASK 0xFF
3197 literal_pool * pool;
3198 unsigned int entry, pool_size = 0;
3199 bfd_boolean padding_slot_p = FALSE;
3200 unsigned imm1 = 0;
3201 unsigned imm2 = 0;
3202
3203 if (nbytes == 8)
3204 {
3205 imm1 = inst.operands[1].imm;
3206 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3207 : inst.reloc.exp.X_unsigned ? 0
3208 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3209 if (target_big_endian)
3210 {
3211 imm1 = imm2;
3212 imm2 = inst.operands[1].imm;
3213 }
3214 }
3215
3216 pool = find_or_make_literal_pool ();
3217
3218 /* Check if this literal value is already in the pool. */
3219 for (entry = 0; entry < pool->next_free_entry; entry ++)
3220 {
3221 if (nbytes == 4)
3222 {
3223 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3224 && (inst.reloc.exp.X_op == O_constant)
3225 && (pool->literals[entry].X_add_number
3226 == inst.reloc.exp.X_add_number)
3227 && (pool->literals[entry].X_md == nbytes)
3228 && (pool->literals[entry].X_unsigned
3229 == inst.reloc.exp.X_unsigned))
3230 break;
3231
3232 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3233 && (inst.reloc.exp.X_op == O_symbol)
3234 && (pool->literals[entry].X_add_number
3235 == inst.reloc.exp.X_add_number)
3236 && (pool->literals[entry].X_add_symbol
3237 == inst.reloc.exp.X_add_symbol)
3238 && (pool->literals[entry].X_op_symbol
3239 == inst.reloc.exp.X_op_symbol)
3240 && (pool->literals[entry].X_md == nbytes))
3241 break;
3242 }
3243 else if ((nbytes == 8)
3244 && !(pool_size & 0x7)
3245 && ((entry + 1) != pool->next_free_entry)
3246 && (pool->literals[entry].X_op == O_constant)
3247 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3248 && (pool->literals[entry].X_unsigned
3249 == inst.reloc.exp.X_unsigned)
3250 && (pool->literals[entry + 1].X_op == O_constant)
3251 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3252 && (pool->literals[entry + 1].X_unsigned
3253 == inst.reloc.exp.X_unsigned))
3254 break;
3255
3256 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3257 if (padding_slot_p && (nbytes == 4))
3258 break;
3259
3260 pool_size += 4;
3261 }
3262
3263 /* Do we need to create a new entry? */
3264 if (entry == pool->next_free_entry)
3265 {
3266 if (entry >= MAX_LITERAL_POOL_SIZE)
3267 {
3268 inst.error = _("literal pool overflow");
3269 return FAIL;
3270 }
3271
3272 if (nbytes == 8)
3273 {
3274 /* For 8-byte entries, we align to an 8-byte boundary,
3275 and split it into two 4-byte entries, because on 32-bit
3276 host, 8-byte constants are treated as big num, thus
3277 saved in "generic_bignum" which will be overwritten
3278 by later assignments.
3279
3280 We also need to make sure there is enough space for
3281 the split.
3282
3283 We also check to make sure the literal operand is a
3284 constant number. */
3285 if (!(inst.reloc.exp.X_op == O_constant
3286 || inst.reloc.exp.X_op == O_big))
3287 {
3288 inst.error = _("invalid type for literal pool");
3289 return FAIL;
3290 }
3291 else if (pool_size & 0x7)
3292 {
3293 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3294 {
3295 inst.error = _("literal pool overflow");
3296 return FAIL;
3297 }
3298
3299 pool->literals[entry] = inst.reloc.exp;
3300 pool->literals[entry].X_op = O_constant;
3301 pool->literals[entry].X_add_number = 0;
3302 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3303 pool->next_free_entry += 1;
3304 pool_size += 4;
3305 }
3306 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3307 {
3308 inst.error = _("literal pool overflow");
3309 return FAIL;
3310 }
3311
3312 pool->literals[entry] = inst.reloc.exp;
3313 pool->literals[entry].X_op = O_constant;
3314 pool->literals[entry].X_add_number = imm1;
3315 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3316 pool->literals[entry++].X_md = 4;
3317 pool->literals[entry] = inst.reloc.exp;
3318 pool->literals[entry].X_op = O_constant;
3319 pool->literals[entry].X_add_number = imm2;
3320 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3321 pool->literals[entry].X_md = 4;
3322 pool->alignment = 3;
3323 pool->next_free_entry += 1;
3324 }
3325 else
3326 {
3327 pool->literals[entry] = inst.reloc.exp;
3328 pool->literals[entry].X_md = 4;
3329 }
3330
3331 #ifdef OBJ_ELF
3332 /* PR ld/12974: Record the location of the first source line to reference
3333 this entry in the literal pool. If it turns out during linking that the
3334 symbol does not exist we will be able to give an accurate line number for
3335 the (first use of the) missing reference. */
3336 if (debug_type == DEBUG_DWARF2)
3337 dwarf2_where (pool->locs + entry);
3338 #endif
3339 pool->next_free_entry += 1;
3340 }
3341 else if (padding_slot_p)
3342 {
3343 pool->literals[entry] = inst.reloc.exp;
3344 pool->literals[entry].X_md = nbytes;
3345 }
3346
3347 inst.reloc.exp.X_op = O_symbol;
3348 inst.reloc.exp.X_add_number = pool_size;
3349 inst.reloc.exp.X_add_symbol = pool->symbol;
3350
3351 return SUCCESS;
3352 }
3353
3354 bfd_boolean
3355 tc_start_label_without_colon (void)
3356 {
3357 bfd_boolean ret = TRUE;
3358
3359 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3360 {
3361 const char *label = input_line_pointer;
3362
3363 while (!is_end_of_line[(int) label[-1]])
3364 --label;
3365
3366 if (*label == '.')
3367 {
3368 as_bad (_("Invalid label '%s'"), label);
3369 ret = FALSE;
3370 }
3371
3372 asmfunc_debug (label);
3373
3374 asmfunc_state = WAITING_ENDASMFUNC;
3375 }
3376
3377 return ret;
3378 }
3379
3380 /* Can't use symbol_new here, so have to create a symbol and then at
3381 a later date assign it a value. Thats what these functions do. */
3382
3383 static void
3384 symbol_locate (symbolS * symbolP,
3385 const char * name, /* It is copied, the caller can modify. */
3386 segT segment, /* Segment identifier (SEG_<something>). */
3387 valueT valu, /* Symbol value. */
3388 fragS * frag) /* Associated fragment. */
3389 {
3390 size_t name_length;
3391 char * preserved_copy_of_name;
3392
3393 name_length = strlen (name) + 1; /* +1 for \0. */
3394 obstack_grow (&notes, name, name_length);
3395 preserved_copy_of_name = (char *) obstack_finish (&notes);
3396
3397 #ifdef tc_canonicalize_symbol_name
3398 preserved_copy_of_name =
3399 tc_canonicalize_symbol_name (preserved_copy_of_name);
3400 #endif
3401
3402 S_SET_NAME (symbolP, preserved_copy_of_name);
3403
3404 S_SET_SEGMENT (symbolP, segment);
3405 S_SET_VALUE (symbolP, valu);
3406 symbol_clear_list_pointers (symbolP);
3407
3408 symbol_set_frag (symbolP, frag);
3409
3410 /* Link to end of symbol chain. */
3411 {
3412 extern int symbol_table_frozen;
3413
3414 if (symbol_table_frozen)
3415 abort ();
3416 }
3417
3418 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3419
3420 obj_symbol_new_hook (symbolP);
3421
3422 #ifdef tc_symbol_new_hook
3423 tc_symbol_new_hook (symbolP);
3424 #endif
3425
3426 #ifdef DEBUG_SYMS
3427 verify_symbol_chain (symbol_rootP, symbol_lastP);
3428 #endif /* DEBUG_SYMS */
3429 }
3430
3431 static void
3432 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3433 {
3434 unsigned int entry;
3435 literal_pool * pool;
3436 char sym_name[20];
3437
3438 pool = find_literal_pool ();
3439 if (pool == NULL
3440 || pool->symbol == NULL
3441 || pool->next_free_entry == 0)
3442 return;
3443
3444 /* Align pool as you have word accesses.
3445 Only make a frag if we have to. */
3446 if (!need_pass_2)
3447 frag_align (pool->alignment, 0, 0);
3448
3449 record_alignment (now_seg, 2);
3450
3451 #ifdef OBJ_ELF
3452 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3453 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3454 #endif
3455 sprintf (sym_name, "$$lit_\002%x", pool->id);
3456
3457 symbol_locate (pool->symbol, sym_name, now_seg,
3458 (valueT) frag_now_fix (), frag_now);
3459 symbol_table_insert (pool->symbol);
3460
3461 ARM_SET_THUMB (pool->symbol, thumb_mode);
3462
3463 #if defined OBJ_COFF || defined OBJ_ELF
3464 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3465 #endif
3466
3467 for (entry = 0; entry < pool->next_free_entry; entry ++)
3468 {
3469 #ifdef OBJ_ELF
3470 if (debug_type == DEBUG_DWARF2)
3471 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3472 #endif
3473 /* First output the expression in the instruction to the pool. */
3474 emit_expr (&(pool->literals[entry]),
3475 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3476 }
3477
3478 /* Mark the pool as empty. */
3479 pool->next_free_entry = 0;
3480 pool->symbol = NULL;
3481 }
3482
3483 #ifdef OBJ_ELF
3484 /* Forward declarations for functions below, in the MD interface
3485 section. */
3486 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3487 static valueT create_unwind_entry (int);
3488 static void start_unwind_section (const segT, int);
3489 static void add_unwind_opcode (valueT, int);
3490 static void flush_pending_unwind (void);
3491
3492 /* Directives: Data. */
3493
3494 static void
3495 s_arm_elf_cons (int nbytes)
3496 {
3497 expressionS exp;
3498
3499 #ifdef md_flush_pending_output
3500 md_flush_pending_output ();
3501 #endif
3502
3503 if (is_it_end_of_statement ())
3504 {
3505 demand_empty_rest_of_line ();
3506 return;
3507 }
3508
3509 #ifdef md_cons_align
3510 md_cons_align (nbytes);
3511 #endif
3512
3513 mapping_state (MAP_DATA);
3514 do
3515 {
3516 int reloc;
3517 char *base = input_line_pointer;
3518
3519 expression (& exp);
3520
3521 if (exp.X_op != O_symbol)
3522 emit_expr (&exp, (unsigned int) nbytes);
3523 else
3524 {
3525 char *before_reloc = input_line_pointer;
3526 reloc = parse_reloc (&input_line_pointer);
3527 if (reloc == -1)
3528 {
3529 as_bad (_("unrecognized relocation suffix"));
3530 ignore_rest_of_line ();
3531 return;
3532 }
3533 else if (reloc == BFD_RELOC_UNUSED)
3534 emit_expr (&exp, (unsigned int) nbytes);
3535 else
3536 {
3537 reloc_howto_type *howto = (reloc_howto_type *)
3538 bfd_reloc_type_lookup (stdoutput,
3539 (bfd_reloc_code_real_type) reloc);
3540 int size = bfd_get_reloc_size (howto);
3541
3542 if (reloc == BFD_RELOC_ARM_PLT32)
3543 {
3544 as_bad (_("(plt) is only valid on branch targets"));
3545 reloc = BFD_RELOC_UNUSED;
3546 size = 0;
3547 }
3548
3549 if (size > nbytes)
3550 as_bad (_("%s relocations do not fit in %d bytes"),
3551 howto->name, nbytes);
3552 else
3553 {
3554 /* We've parsed an expression stopping at O_symbol.
3555 But there may be more expression left now that we
3556 have parsed the relocation marker. Parse it again.
3557 XXX Surely there is a cleaner way to do this. */
3558 char *p = input_line_pointer;
3559 int offset;
3560 char *save_buf = XNEWVEC (char, input_line_pointer - base);
3561
3562 memcpy (save_buf, base, input_line_pointer - base);
3563 memmove (base + (input_line_pointer - before_reloc),
3564 base, before_reloc - base);
3565
3566 input_line_pointer = base + (input_line_pointer-before_reloc);
3567 expression (&exp);
3568 memcpy (base, save_buf, p - base);
3569
3570 offset = nbytes - size;
3571 p = frag_more (nbytes);
3572 memset (p, 0, nbytes);
3573 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3574 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3575 free (save_buf);
3576 }
3577 }
3578 }
3579 }
3580 while (*input_line_pointer++ == ',');
3581
3582 /* Put terminator back into stream. */
3583 input_line_pointer --;
3584 demand_empty_rest_of_line ();
3585 }
3586
3587 /* Emit an expression containing a 32-bit thumb instruction.
3588 Implementation based on put_thumb32_insn. */
3589
3590 static void
3591 emit_thumb32_expr (expressionS * exp)
3592 {
3593 expressionS exp_high = *exp;
3594
3595 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3596 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3597 exp->X_add_number &= 0xffff;
3598 emit_expr (exp, (unsigned int) THUMB_SIZE);
3599 }
3600
3601 /* Guess the instruction size based on the opcode. */
3602
3603 static int
3604 thumb_insn_size (int opcode)
3605 {
3606 if ((unsigned int) opcode < 0xe800u)
3607 return 2;
3608 else if ((unsigned int) opcode >= 0xe8000000u)
3609 return 4;
3610 else
3611 return 0;
3612 }
3613
3614 static bfd_boolean
3615 emit_insn (expressionS *exp, int nbytes)
3616 {
3617 int size = 0;
3618
3619 if (exp->X_op == O_constant)
3620 {
3621 size = nbytes;
3622
3623 if (size == 0)
3624 size = thumb_insn_size (exp->X_add_number);
3625
3626 if (size != 0)
3627 {
3628 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3629 {
3630 as_bad (_(".inst.n operand too big. "\
3631 "Use .inst.w instead"));
3632 size = 0;
3633 }
3634 else
3635 {
3636 if (now_it.state == AUTOMATIC_IT_BLOCK)
3637 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3638 else
3639 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3640
3641 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3642 emit_thumb32_expr (exp);
3643 else
3644 emit_expr (exp, (unsigned int) size);
3645
3646 it_fsm_post_encode ();
3647 }
3648 }
3649 else
3650 as_bad (_("cannot determine Thumb instruction size. " \
3651 "Use .inst.n/.inst.w instead"));
3652 }
3653 else
3654 as_bad (_("constant expression required"));
3655
3656 return (size != 0);
3657 }
3658
3659 /* Like s_arm_elf_cons but do not use md_cons_align and
3660 set the mapping state to MAP_ARM/MAP_THUMB. */
3661
3662 static void
3663 s_arm_elf_inst (int nbytes)
3664 {
3665 if (is_it_end_of_statement ())
3666 {
3667 demand_empty_rest_of_line ();
3668 return;
3669 }
3670
3671 /* Calling mapping_state () here will not change ARM/THUMB,
3672 but will ensure not to be in DATA state. */
3673
3674 if (thumb_mode)
3675 mapping_state (MAP_THUMB);
3676 else
3677 {
3678 if (nbytes != 0)
3679 {
3680 as_bad (_("width suffixes are invalid in ARM mode"));
3681 ignore_rest_of_line ();
3682 return;
3683 }
3684
3685 nbytes = 4;
3686
3687 mapping_state (MAP_ARM);
3688 }
3689
3690 do
3691 {
3692 expressionS exp;
3693
3694 expression (& exp);
3695
3696 if (! emit_insn (& exp, nbytes))
3697 {
3698 ignore_rest_of_line ();
3699 return;
3700 }
3701 }
3702 while (*input_line_pointer++ == ',');
3703
3704 /* Put terminator back into stream. */
3705 input_line_pointer --;
3706 demand_empty_rest_of_line ();
3707 }
3708
3709 /* Parse a .rel31 directive. */
3710
3711 static void
3712 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3713 {
3714 expressionS exp;
3715 char *p;
3716 valueT highbit;
3717
3718 highbit = 0;
3719 if (*input_line_pointer == '1')
3720 highbit = 0x80000000;
3721 else if (*input_line_pointer != '0')
3722 as_bad (_("expected 0 or 1"));
3723
3724 input_line_pointer++;
3725 if (*input_line_pointer != ',')
3726 as_bad (_("missing comma"));
3727 input_line_pointer++;
3728
3729 #ifdef md_flush_pending_output
3730 md_flush_pending_output ();
3731 #endif
3732
3733 #ifdef md_cons_align
3734 md_cons_align (4);
3735 #endif
3736
3737 mapping_state (MAP_DATA);
3738
3739 expression (&exp);
3740
3741 p = frag_more (4);
3742 md_number_to_chars (p, highbit, 4);
3743 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3744 BFD_RELOC_ARM_PREL31);
3745
3746 demand_empty_rest_of_line ();
3747 }
3748
3749 /* Directives: AEABI stack-unwind tables. */
3750
3751 /* Parse an unwind_fnstart directive. Simply records the current location. */
3752
3753 static void
3754 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3755 {
3756 demand_empty_rest_of_line ();
3757 if (unwind.proc_start)
3758 {
3759 as_bad (_("duplicate .fnstart directive"));
3760 return;
3761 }
3762
3763 /* Mark the start of the function. */
3764 unwind.proc_start = expr_build_dot ();
3765
3766 /* Reset the rest of the unwind info. */
3767 unwind.opcode_count = 0;
3768 unwind.table_entry = NULL;
3769 unwind.personality_routine = NULL;
3770 unwind.personality_index = -1;
3771 unwind.frame_size = 0;
3772 unwind.fp_offset = 0;
3773 unwind.fp_reg = REG_SP;
3774 unwind.fp_used = 0;
3775 unwind.sp_restored = 0;
3776 }
3777
3778
3779 /* Parse a handlerdata directive. Creates the exception handling table entry
3780 for the function. */
3781
3782 static void
3783 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3784 {
3785 demand_empty_rest_of_line ();
3786 if (!unwind.proc_start)
3787 as_bad (MISSING_FNSTART);
3788
3789 if (unwind.table_entry)
3790 as_bad (_("duplicate .handlerdata directive"));
3791
3792 create_unwind_entry (1);
3793 }
3794
3795 /* Parse an unwind_fnend directive. Generates the index table entry. */
3796
3797 static void
3798 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3799 {
3800 long where;
3801 char *ptr;
3802 valueT val;
3803 unsigned int marked_pr_dependency;
3804
3805 demand_empty_rest_of_line ();
3806
3807 if (!unwind.proc_start)
3808 {
3809 as_bad (_(".fnend directive without .fnstart"));
3810 return;
3811 }
3812
3813 /* Add eh table entry. */
3814 if (unwind.table_entry == NULL)
3815 val = create_unwind_entry (0);
3816 else
3817 val = 0;
3818
3819 /* Add index table entry. This is two words. */
3820 start_unwind_section (unwind.saved_seg, 1);
3821 frag_align (2, 0, 0);
3822 record_alignment (now_seg, 2);
3823
3824 ptr = frag_more (8);
3825 memset (ptr, 0, 8);
3826 where = frag_now_fix () - 8;
3827
3828 /* Self relative offset of the function start. */
3829 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3830 BFD_RELOC_ARM_PREL31);
3831
3832 /* Indicate dependency on EHABI-defined personality routines to the
3833 linker, if it hasn't been done already. */
3834 marked_pr_dependency
3835 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3836 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3837 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3838 {
3839 static const char *const name[] =
3840 {
3841 "__aeabi_unwind_cpp_pr0",
3842 "__aeabi_unwind_cpp_pr1",
3843 "__aeabi_unwind_cpp_pr2"
3844 };
3845 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3846 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3847 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3848 |= 1 << unwind.personality_index;
3849 }
3850
3851 if (val)
3852 /* Inline exception table entry. */
3853 md_number_to_chars (ptr + 4, val, 4);
3854 else
3855 /* Self relative offset of the table entry. */
3856 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3857 BFD_RELOC_ARM_PREL31);
3858
3859 /* Restore the original section. */
3860 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3861
3862 unwind.proc_start = NULL;
3863 }
3864
3865
3866 /* Parse an unwind_cantunwind directive. */
3867
3868 static void
3869 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3870 {
3871 demand_empty_rest_of_line ();
3872 if (!unwind.proc_start)
3873 as_bad (MISSING_FNSTART);
3874
3875 if (unwind.personality_routine || unwind.personality_index != -1)
3876 as_bad (_("personality routine specified for cantunwind frame"));
3877
3878 unwind.personality_index = -2;
3879 }
3880
3881
3882 /* Parse a personalityindex directive. */
3883
3884 static void
3885 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3886 {
3887 expressionS exp;
3888
3889 if (!unwind.proc_start)
3890 as_bad (MISSING_FNSTART);
3891
3892 if (unwind.personality_routine || unwind.personality_index != -1)
3893 as_bad (_("duplicate .personalityindex directive"));
3894
3895 expression (&exp);
3896
3897 if (exp.X_op != O_constant
3898 || exp.X_add_number < 0 || exp.X_add_number > 15)
3899 {
3900 as_bad (_("bad personality routine number"));
3901 ignore_rest_of_line ();
3902 return;
3903 }
3904
3905 unwind.personality_index = exp.X_add_number;
3906
3907 demand_empty_rest_of_line ();
3908 }
3909
3910
3911 /* Parse a personality directive. */
3912
3913 static void
3914 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3915 {
3916 char *name, *p, c;
3917
3918 if (!unwind.proc_start)
3919 as_bad (MISSING_FNSTART);
3920
3921 if (unwind.personality_routine || unwind.personality_index != -1)
3922 as_bad (_("duplicate .personality directive"));
3923
3924 c = get_symbol_name (& name);
3925 p = input_line_pointer;
3926 if (c == '"')
3927 ++ input_line_pointer;
3928 unwind.personality_routine = symbol_find_or_make (name);
3929 *p = c;
3930 demand_empty_rest_of_line ();
3931 }
3932
3933
3934 /* Parse a directive saving core registers. */
3935
3936 static void
3937 s_arm_unwind_save_core (void)
3938 {
3939 valueT op;
3940 long range;
3941 int n;
3942
3943 range = parse_reg_list (&input_line_pointer);
3944 if (range == FAIL)
3945 {
3946 as_bad (_("expected register list"));
3947 ignore_rest_of_line ();
3948 return;
3949 }
3950
3951 demand_empty_rest_of_line ();
3952
3953 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3954 into .unwind_save {..., sp...}. We aren't bothered about the value of
3955 ip because it is clobbered by calls. */
3956 if (unwind.sp_restored && unwind.fp_reg == 12
3957 && (range & 0x3000) == 0x1000)
3958 {
3959 unwind.opcode_count--;
3960 unwind.sp_restored = 0;
3961 range = (range | 0x2000) & ~0x1000;
3962 unwind.pending_offset = 0;
3963 }
3964
3965 /* Pop r4-r15. */
3966 if (range & 0xfff0)
3967 {
3968 /* See if we can use the short opcodes. These pop a block of up to 8
3969 registers starting with r4, plus maybe r14. */
3970 for (n = 0; n < 8; n++)
3971 {
3972 /* Break at the first non-saved register. */
3973 if ((range & (1 << (n + 4))) == 0)
3974 break;
3975 }
3976 /* See if there are any other bits set. */
3977 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3978 {
3979 /* Use the long form. */
3980 op = 0x8000 | ((range >> 4) & 0xfff);
3981 add_unwind_opcode (op, 2);
3982 }
3983 else
3984 {
3985 /* Use the short form. */
3986 if (range & 0x4000)
3987 op = 0xa8; /* Pop r14. */
3988 else
3989 op = 0xa0; /* Do not pop r14. */
3990 op |= (n - 1);
3991 add_unwind_opcode (op, 1);
3992 }
3993 }
3994
3995 /* Pop r0-r3. */
3996 if (range & 0xf)
3997 {
3998 op = 0xb100 | (range & 0xf);
3999 add_unwind_opcode (op, 2);
4000 }
4001
4002 /* Record the number of bytes pushed. */
4003 for (n = 0; n < 16; n++)
4004 {
4005 if (range & (1 << n))
4006 unwind.frame_size += 4;
4007 }
4008 }
4009
4010
4011 /* Parse a directive saving FPA registers. */
4012
4013 static void
4014 s_arm_unwind_save_fpa (int reg)
4015 {
4016 expressionS exp;
4017 int num_regs;
4018 valueT op;
4019
4020 /* Get Number of registers to transfer. */
4021 if (skip_past_comma (&input_line_pointer) != FAIL)
4022 expression (&exp);
4023 else
4024 exp.X_op = O_illegal;
4025
4026 if (exp.X_op != O_constant)
4027 {
4028 as_bad (_("expected , <constant>"));
4029 ignore_rest_of_line ();
4030 return;
4031 }
4032
4033 num_regs = exp.X_add_number;
4034
4035 if (num_regs < 1 || num_regs > 4)
4036 {
4037 as_bad (_("number of registers must be in the range [1:4]"));
4038 ignore_rest_of_line ();
4039 return;
4040 }
4041
4042 demand_empty_rest_of_line ();
4043
4044 if (reg == 4)
4045 {
4046 /* Short form. */
4047 op = 0xb4 | (num_regs - 1);
4048 add_unwind_opcode (op, 1);
4049 }
4050 else
4051 {
4052 /* Long form. */
4053 op = 0xc800 | (reg << 4) | (num_regs - 1);
4054 add_unwind_opcode (op, 2);
4055 }
4056 unwind.frame_size += num_regs * 12;
4057 }
4058
4059
4060 /* Parse a directive saving VFP registers for ARMv6 and above. */
4061
4062 static void
4063 s_arm_unwind_save_vfp_armv6 (void)
4064 {
4065 int count;
4066 unsigned int start;
4067 valueT op;
4068 int num_vfpv3_regs = 0;
4069 int num_regs_below_16;
4070
4071 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4072 if (count == FAIL)
4073 {
4074 as_bad (_("expected register list"));
4075 ignore_rest_of_line ();
4076 return;
4077 }
4078
4079 demand_empty_rest_of_line ();
4080
4081 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4082 than FSTMX/FLDMX-style ones). */
4083
4084 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4085 if (start >= 16)
4086 num_vfpv3_regs = count;
4087 else if (start + count > 16)
4088 num_vfpv3_regs = start + count - 16;
4089
4090 if (num_vfpv3_regs > 0)
4091 {
4092 int start_offset = start > 16 ? start - 16 : 0;
4093 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4094 add_unwind_opcode (op, 2);
4095 }
4096
4097 /* Generate opcode for registers numbered in the range 0 .. 15. */
4098 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4099 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4100 if (num_regs_below_16 > 0)
4101 {
4102 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4103 add_unwind_opcode (op, 2);
4104 }
4105
4106 unwind.frame_size += count * 8;
4107 }
4108
4109
4110 /* Parse a directive saving VFP registers for pre-ARMv6. */
4111
4112 static void
4113 s_arm_unwind_save_vfp (void)
4114 {
4115 int count;
4116 unsigned int reg;
4117 valueT op;
4118
4119 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
4120 if (count == FAIL)
4121 {
4122 as_bad (_("expected register list"));
4123 ignore_rest_of_line ();
4124 return;
4125 }
4126
4127 demand_empty_rest_of_line ();
4128
4129 if (reg == 8)
4130 {
4131 /* Short form. */
4132 op = 0xb8 | (count - 1);
4133 add_unwind_opcode (op, 1);
4134 }
4135 else
4136 {
4137 /* Long form. */
4138 op = 0xb300 | (reg << 4) | (count - 1);
4139 add_unwind_opcode (op, 2);
4140 }
4141 unwind.frame_size += count * 8 + 4;
4142 }
4143
4144
4145 /* Parse a directive saving iWMMXt data registers. */
4146
4147 static void
4148 s_arm_unwind_save_mmxwr (void)
4149 {
4150 int reg;
4151 int hi_reg;
4152 int i;
4153 unsigned mask = 0;
4154 valueT op;
4155
4156 if (*input_line_pointer == '{')
4157 input_line_pointer++;
4158
4159 do
4160 {
4161 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4162
4163 if (reg == FAIL)
4164 {
4165 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4166 goto error;
4167 }
4168
4169 if (mask >> reg)
4170 as_tsktsk (_("register list not in ascending order"));
4171 mask |= 1 << reg;
4172
4173 if (*input_line_pointer == '-')
4174 {
4175 input_line_pointer++;
4176 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4177 if (hi_reg == FAIL)
4178 {
4179 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4180 goto error;
4181 }
4182 else if (reg >= hi_reg)
4183 {
4184 as_bad (_("bad register range"));
4185 goto error;
4186 }
4187 for (; reg < hi_reg; reg++)
4188 mask |= 1 << reg;
4189 }
4190 }
4191 while (skip_past_comma (&input_line_pointer) != FAIL);
4192
4193 skip_past_char (&input_line_pointer, '}');
4194
4195 demand_empty_rest_of_line ();
4196
4197 /* Generate any deferred opcodes because we're going to be looking at
4198 the list. */
4199 flush_pending_unwind ();
4200
4201 for (i = 0; i < 16; i++)
4202 {
4203 if (mask & (1 << i))
4204 unwind.frame_size += 8;
4205 }
4206
4207 /* Attempt to combine with a previous opcode. We do this because gcc
4208 likes to output separate unwind directives for a single block of
4209 registers. */
4210 if (unwind.opcode_count > 0)
4211 {
4212 i = unwind.opcodes[unwind.opcode_count - 1];
4213 if ((i & 0xf8) == 0xc0)
4214 {
4215 i &= 7;
4216 /* Only merge if the blocks are contiguous. */
4217 if (i < 6)
4218 {
4219 if ((mask & 0xfe00) == (1 << 9))
4220 {
4221 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4222 unwind.opcode_count--;
4223 }
4224 }
4225 else if (i == 6 && unwind.opcode_count >= 2)
4226 {
4227 i = unwind.opcodes[unwind.opcode_count - 2];
4228 reg = i >> 4;
4229 i &= 0xf;
4230
4231 op = 0xffff << (reg - 1);
4232 if (reg > 0
4233 && ((mask & op) == (1u << (reg - 1))))
4234 {
4235 op = (1 << (reg + i + 1)) - 1;
4236 op &= ~((1 << reg) - 1);
4237 mask |= op;
4238 unwind.opcode_count -= 2;
4239 }
4240 }
4241 }
4242 }
4243
4244 hi_reg = 15;
4245 /* We want to generate opcodes in the order the registers have been
4246 saved, ie. descending order. */
4247 for (reg = 15; reg >= -1; reg--)
4248 {
4249 /* Save registers in blocks. */
4250 if (reg < 0
4251 || !(mask & (1 << reg)))
4252 {
4253 /* We found an unsaved reg. Generate opcodes to save the
4254 preceding block. */
4255 if (reg != hi_reg)
4256 {
4257 if (reg == 9)
4258 {
4259 /* Short form. */
4260 op = 0xc0 | (hi_reg - 10);
4261 add_unwind_opcode (op, 1);
4262 }
4263 else
4264 {
4265 /* Long form. */
4266 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4267 add_unwind_opcode (op, 2);
4268 }
4269 }
4270 hi_reg = reg - 1;
4271 }
4272 }
4273
4274 return;
4275 error:
4276 ignore_rest_of_line ();
4277 }
4278
4279 static void
4280 s_arm_unwind_save_mmxwcg (void)
4281 {
4282 int reg;
4283 int hi_reg;
4284 unsigned mask = 0;
4285 valueT op;
4286
4287 if (*input_line_pointer == '{')
4288 input_line_pointer++;
4289
4290 skip_whitespace (input_line_pointer);
4291
4292 do
4293 {
4294 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4295
4296 if (reg == FAIL)
4297 {
4298 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4299 goto error;
4300 }
4301
4302 reg -= 8;
4303 if (mask >> reg)
4304 as_tsktsk (_("register list not in ascending order"));
4305 mask |= 1 << reg;
4306
4307 if (*input_line_pointer == '-')
4308 {
4309 input_line_pointer++;
4310 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4311 if (hi_reg == FAIL)
4312 {
4313 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4314 goto error;
4315 }
4316 else if (reg >= hi_reg)
4317 {
4318 as_bad (_("bad register range"));
4319 goto error;
4320 }
4321 for (; reg < hi_reg; reg++)
4322 mask |= 1 << reg;
4323 }
4324 }
4325 while (skip_past_comma (&input_line_pointer) != FAIL);
4326
4327 skip_past_char (&input_line_pointer, '}');
4328
4329 demand_empty_rest_of_line ();
4330
4331 /* Generate any deferred opcodes because we're going to be looking at
4332 the list. */
4333 flush_pending_unwind ();
4334
4335 for (reg = 0; reg < 16; reg++)
4336 {
4337 if (mask & (1 << reg))
4338 unwind.frame_size += 4;
4339 }
4340 op = 0xc700 | mask;
4341 add_unwind_opcode (op, 2);
4342 return;
4343 error:
4344 ignore_rest_of_line ();
4345 }
4346
4347
4348 /* Parse an unwind_save directive.
4349 If the argument is non-zero, this is a .vsave directive. */
4350
4351 static void
4352 s_arm_unwind_save (int arch_v6)
4353 {
4354 char *peek;
4355 struct reg_entry *reg;
4356 bfd_boolean had_brace = FALSE;
4357
4358 if (!unwind.proc_start)
4359 as_bad (MISSING_FNSTART);
4360
4361 /* Figure out what sort of save we have. */
4362 peek = input_line_pointer;
4363
4364 if (*peek == '{')
4365 {
4366 had_brace = TRUE;
4367 peek++;
4368 }
4369
4370 reg = arm_reg_parse_multi (&peek);
4371
4372 if (!reg)
4373 {
4374 as_bad (_("register expected"));
4375 ignore_rest_of_line ();
4376 return;
4377 }
4378
4379 switch (reg->type)
4380 {
4381 case REG_TYPE_FN:
4382 if (had_brace)
4383 {
4384 as_bad (_("FPA .unwind_save does not take a register list"));
4385 ignore_rest_of_line ();
4386 return;
4387 }
4388 input_line_pointer = peek;
4389 s_arm_unwind_save_fpa (reg->number);
4390 return;
4391
4392 case REG_TYPE_RN:
4393 s_arm_unwind_save_core ();
4394 return;
4395
4396 case REG_TYPE_VFD:
4397 if (arch_v6)
4398 s_arm_unwind_save_vfp_armv6 ();
4399 else
4400 s_arm_unwind_save_vfp ();
4401 return;
4402
4403 case REG_TYPE_MMXWR:
4404 s_arm_unwind_save_mmxwr ();
4405 return;
4406
4407 case REG_TYPE_MMXWCG:
4408 s_arm_unwind_save_mmxwcg ();
4409 return;
4410
4411 default:
4412 as_bad (_(".unwind_save does not support this kind of register"));
4413 ignore_rest_of_line ();
4414 }
4415 }
4416
4417
4418 /* Parse an unwind_movsp directive. */
4419
4420 static void
4421 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4422 {
4423 int reg;
4424 valueT op;
4425 int offset;
4426
4427 if (!unwind.proc_start)
4428 as_bad (MISSING_FNSTART);
4429
4430 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4431 if (reg == FAIL)
4432 {
4433 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4434 ignore_rest_of_line ();
4435 return;
4436 }
4437
4438 /* Optional constant. */
4439 if (skip_past_comma (&input_line_pointer) != FAIL)
4440 {
4441 if (immediate_for_directive (&offset) == FAIL)
4442 return;
4443 }
4444 else
4445 offset = 0;
4446
4447 demand_empty_rest_of_line ();
4448
4449 if (reg == REG_SP || reg == REG_PC)
4450 {
4451 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4452 return;
4453 }
4454
4455 if (unwind.fp_reg != REG_SP)
4456 as_bad (_("unexpected .unwind_movsp directive"));
4457
4458 /* Generate opcode to restore the value. */
4459 op = 0x90 | reg;
4460 add_unwind_opcode (op, 1);
4461
4462 /* Record the information for later. */
4463 unwind.fp_reg = reg;
4464 unwind.fp_offset = unwind.frame_size - offset;
4465 unwind.sp_restored = 1;
4466 }
4467
4468 /* Parse an unwind_pad directive. */
4469
4470 static void
4471 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4472 {
4473 int offset;
4474
4475 if (!unwind.proc_start)
4476 as_bad (MISSING_FNSTART);
4477
4478 if (immediate_for_directive (&offset) == FAIL)
4479 return;
4480
4481 if (offset & 3)
4482 {
4483 as_bad (_("stack increment must be multiple of 4"));
4484 ignore_rest_of_line ();
4485 return;
4486 }
4487
4488 /* Don't generate any opcodes, just record the details for later. */
4489 unwind.frame_size += offset;
4490 unwind.pending_offset += offset;
4491
4492 demand_empty_rest_of_line ();
4493 }
4494
4495 /* Parse an unwind_setfp directive. */
4496
4497 static void
4498 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4499 {
4500 int sp_reg;
4501 int fp_reg;
4502 int offset;
4503
4504 if (!unwind.proc_start)
4505 as_bad (MISSING_FNSTART);
4506
4507 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4508 if (skip_past_comma (&input_line_pointer) == FAIL)
4509 sp_reg = FAIL;
4510 else
4511 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4512
4513 if (fp_reg == FAIL || sp_reg == FAIL)
4514 {
4515 as_bad (_("expected <reg>, <reg>"));
4516 ignore_rest_of_line ();
4517 return;
4518 }
4519
4520 /* Optional constant. */
4521 if (skip_past_comma (&input_line_pointer) != FAIL)
4522 {
4523 if (immediate_for_directive (&offset) == FAIL)
4524 return;
4525 }
4526 else
4527 offset = 0;
4528
4529 demand_empty_rest_of_line ();
4530
4531 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4532 {
4533 as_bad (_("register must be either sp or set by a previous"
4534 "unwind_movsp directive"));
4535 return;
4536 }
4537
4538 /* Don't generate any opcodes, just record the information for later. */
4539 unwind.fp_reg = fp_reg;
4540 unwind.fp_used = 1;
4541 if (sp_reg == REG_SP)
4542 unwind.fp_offset = unwind.frame_size - offset;
4543 else
4544 unwind.fp_offset -= offset;
4545 }
4546
4547 /* Parse an unwind_raw directive. */
4548
4549 static void
4550 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4551 {
4552 expressionS exp;
4553 /* This is an arbitrary limit. */
4554 unsigned char op[16];
4555 int count;
4556
4557 if (!unwind.proc_start)
4558 as_bad (MISSING_FNSTART);
4559
4560 expression (&exp);
4561 if (exp.X_op == O_constant
4562 && skip_past_comma (&input_line_pointer) != FAIL)
4563 {
4564 unwind.frame_size += exp.X_add_number;
4565 expression (&exp);
4566 }
4567 else
4568 exp.X_op = O_illegal;
4569
4570 if (exp.X_op != O_constant)
4571 {
4572 as_bad (_("expected <offset>, <opcode>"));
4573 ignore_rest_of_line ();
4574 return;
4575 }
4576
4577 count = 0;
4578
4579 /* Parse the opcode. */
4580 for (;;)
4581 {
4582 if (count >= 16)
4583 {
4584 as_bad (_("unwind opcode too long"));
4585 ignore_rest_of_line ();
4586 }
4587 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4588 {
4589 as_bad (_("invalid unwind opcode"));
4590 ignore_rest_of_line ();
4591 return;
4592 }
4593 op[count++] = exp.X_add_number;
4594
4595 /* Parse the next byte. */
4596 if (skip_past_comma (&input_line_pointer) == FAIL)
4597 break;
4598
4599 expression (&exp);
4600 }
4601
4602 /* Add the opcode bytes in reverse order. */
4603 while (count--)
4604 add_unwind_opcode (op[count], 1);
4605
4606 demand_empty_rest_of_line ();
4607 }
4608
4609
4610 /* Parse a .eabi_attribute directive. */
4611
4612 static void
4613 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4614 {
4615 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4616
4617 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4618 attributes_set_explicitly[tag] = 1;
4619 }
4620
4621 /* Emit a tls fix for the symbol. */
4622
4623 static void
4624 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4625 {
4626 char *p;
4627 expressionS exp;
4628 #ifdef md_flush_pending_output
4629 md_flush_pending_output ();
4630 #endif
4631
4632 #ifdef md_cons_align
4633 md_cons_align (4);
4634 #endif
4635
4636 /* Since we're just labelling the code, there's no need to define a
4637 mapping symbol. */
4638 expression (&exp);
4639 p = obstack_next_free (&frchain_now->frch_obstack);
4640 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4641 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4642 : BFD_RELOC_ARM_TLS_DESCSEQ);
4643 }
4644 #endif /* OBJ_ELF */
4645
4646 static void s_arm_arch (int);
4647 static void s_arm_object_arch (int);
4648 static void s_arm_cpu (int);
4649 static void s_arm_fpu (int);
4650 static void s_arm_arch_extension (int);
4651
4652 #ifdef TE_PE
4653
4654 static void
4655 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4656 {
4657 expressionS exp;
4658
4659 do
4660 {
4661 expression (&exp);
4662 if (exp.X_op == O_symbol)
4663 exp.X_op = O_secrel;
4664
4665 emit_expr (&exp, 4);
4666 }
4667 while (*input_line_pointer++ == ',');
4668
4669 input_line_pointer--;
4670 demand_empty_rest_of_line ();
4671 }
4672 #endif /* TE_PE */
4673
4674 /* This table describes all the machine specific pseudo-ops the assembler
4675 has to support. The fields are:
4676 pseudo-op name without dot
4677 function to call to execute this pseudo-op
4678 Integer arg to pass to the function. */
4679
4680 const pseudo_typeS md_pseudo_table[] =
4681 {
4682 /* Never called because '.req' does not start a line. */
4683 { "req", s_req, 0 },
4684 /* Following two are likewise never called. */
4685 { "dn", s_dn, 0 },
4686 { "qn", s_qn, 0 },
4687 { "unreq", s_unreq, 0 },
4688 { "bss", s_bss, 0 },
4689 { "align", s_align_ptwo, 2 },
4690 { "arm", s_arm, 0 },
4691 { "thumb", s_thumb, 0 },
4692 { "code", s_code, 0 },
4693 { "force_thumb", s_force_thumb, 0 },
4694 { "thumb_func", s_thumb_func, 0 },
4695 { "thumb_set", s_thumb_set, 0 },
4696 { "even", s_even, 0 },
4697 { "ltorg", s_ltorg, 0 },
4698 { "pool", s_ltorg, 0 },
4699 { "syntax", s_syntax, 0 },
4700 { "cpu", s_arm_cpu, 0 },
4701 { "arch", s_arm_arch, 0 },
4702 { "object_arch", s_arm_object_arch, 0 },
4703 { "fpu", s_arm_fpu, 0 },
4704 { "arch_extension", s_arm_arch_extension, 0 },
4705 #ifdef OBJ_ELF
4706 { "word", s_arm_elf_cons, 4 },
4707 { "long", s_arm_elf_cons, 4 },
4708 { "inst.n", s_arm_elf_inst, 2 },
4709 { "inst.w", s_arm_elf_inst, 4 },
4710 { "inst", s_arm_elf_inst, 0 },
4711 { "rel31", s_arm_rel31, 0 },
4712 { "fnstart", s_arm_unwind_fnstart, 0 },
4713 { "fnend", s_arm_unwind_fnend, 0 },
4714 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4715 { "personality", s_arm_unwind_personality, 0 },
4716 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4717 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4718 { "save", s_arm_unwind_save, 0 },
4719 { "vsave", s_arm_unwind_save, 1 },
4720 { "movsp", s_arm_unwind_movsp, 0 },
4721 { "pad", s_arm_unwind_pad, 0 },
4722 { "setfp", s_arm_unwind_setfp, 0 },
4723 { "unwind_raw", s_arm_unwind_raw, 0 },
4724 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4725 { "tlsdescseq", s_arm_tls_descseq, 0 },
4726 #else
4727 { "word", cons, 4},
4728
4729 /* These are used for dwarf. */
4730 {"2byte", cons, 2},
4731 {"4byte", cons, 4},
4732 {"8byte", cons, 8},
4733 /* These are used for dwarf2. */
4734 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4735 { "loc", dwarf2_directive_loc, 0 },
4736 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4737 #endif
4738 { "extend", float_cons, 'x' },
4739 { "ldouble", float_cons, 'x' },
4740 { "packed", float_cons, 'p' },
4741 #ifdef TE_PE
4742 {"secrel32", pe_directive_secrel, 0},
4743 #endif
4744
4745 /* These are for compatibility with CodeComposer Studio. */
4746 {"ref", s_ccs_ref, 0},
4747 {"def", s_ccs_def, 0},
4748 {"asmfunc", s_ccs_asmfunc, 0},
4749 {"endasmfunc", s_ccs_endasmfunc, 0},
4750
4751 { 0, 0, 0 }
4752 };
4753 \f
4754 /* Parser functions used exclusively in instruction operands. */
4755
4756 /* Generic immediate-value read function for use in insn parsing.
4757 STR points to the beginning of the immediate (the leading #);
4758 VAL receives the value; if the value is outside [MIN, MAX]
4759 issue an error. PREFIX_OPT is true if the immediate prefix is
4760 optional. */
4761
4762 static int
4763 parse_immediate (char **str, int *val, int min, int max,
4764 bfd_boolean prefix_opt)
4765 {
4766 expressionS exp;
4767 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4768 if (exp.X_op != O_constant)
4769 {
4770 inst.error = _("constant expression required");
4771 return FAIL;
4772 }
4773
4774 if (exp.X_add_number < min || exp.X_add_number > max)
4775 {
4776 inst.error = _("immediate value out of range");
4777 return FAIL;
4778 }
4779
4780 *val = exp.X_add_number;
4781 return SUCCESS;
4782 }
4783
4784 /* Less-generic immediate-value read function with the possibility of loading a
4785 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4786 instructions. Puts the result directly in inst.operands[i]. */
4787
4788 static int
4789 parse_big_immediate (char **str, int i, expressionS *in_exp,
4790 bfd_boolean allow_symbol_p)
4791 {
4792 expressionS exp;
4793 expressionS *exp_p = in_exp ? in_exp : &exp;
4794 char *ptr = *str;
4795
4796 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4797
4798 if (exp_p->X_op == O_constant)
4799 {
4800 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4801 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4802 O_constant. We have to be careful not to break compilation for
4803 32-bit X_add_number, though. */
4804 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4805 {
4806 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4807 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4808 & 0xffffffff);
4809 inst.operands[i].regisimm = 1;
4810 }
4811 }
4812 else if (exp_p->X_op == O_big
4813 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4814 {
4815 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4816
4817 /* Bignums have their least significant bits in
4818 generic_bignum[0]. Make sure we put 32 bits in imm and
4819 32 bits in reg, in a (hopefully) portable way. */
4820 gas_assert (parts != 0);
4821
4822 /* Make sure that the number is not too big.
4823 PR 11972: Bignums can now be sign-extended to the
4824 size of a .octa so check that the out of range bits
4825 are all zero or all one. */
4826 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4827 {
4828 LITTLENUM_TYPE m = -1;
4829
4830 if (generic_bignum[parts * 2] != 0
4831 && generic_bignum[parts * 2] != m)
4832 return FAIL;
4833
4834 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4835 if (generic_bignum[j] != generic_bignum[j-1])
4836 return FAIL;
4837 }
4838
4839 inst.operands[i].imm = 0;
4840 for (j = 0; j < parts; j++, idx++)
4841 inst.operands[i].imm |= generic_bignum[idx]
4842 << (LITTLENUM_NUMBER_OF_BITS * j);
4843 inst.operands[i].reg = 0;
4844 for (j = 0; j < parts; j++, idx++)
4845 inst.operands[i].reg |= generic_bignum[idx]
4846 << (LITTLENUM_NUMBER_OF_BITS * j);
4847 inst.operands[i].regisimm = 1;
4848 }
4849 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4850 return FAIL;
4851
4852 *str = ptr;
4853
4854 return SUCCESS;
4855 }
4856
4857 /* Returns the pseudo-register number of an FPA immediate constant,
4858 or FAIL if there isn't a valid constant here. */
4859
4860 static int
4861 parse_fpa_immediate (char ** str)
4862 {
4863 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4864 char * save_in;
4865 expressionS exp;
4866 int i;
4867 int j;
4868
4869 /* First try and match exact strings, this is to guarantee
4870 that some formats will work even for cross assembly. */
4871
4872 for (i = 0; fp_const[i]; i++)
4873 {
4874 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4875 {
4876 char *start = *str;
4877
4878 *str += strlen (fp_const[i]);
4879 if (is_end_of_line[(unsigned char) **str])
4880 return i + 8;
4881 *str = start;
4882 }
4883 }
4884
4885 /* Just because we didn't get a match doesn't mean that the constant
4886 isn't valid, just that it is in a format that we don't
4887 automatically recognize. Try parsing it with the standard
4888 expression routines. */
4889
4890 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4891
4892 /* Look for a raw floating point number. */
4893 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4894 && is_end_of_line[(unsigned char) *save_in])
4895 {
4896 for (i = 0; i < NUM_FLOAT_VALS; i++)
4897 {
4898 for (j = 0; j < MAX_LITTLENUMS; j++)
4899 {
4900 if (words[j] != fp_values[i][j])
4901 break;
4902 }
4903
4904 if (j == MAX_LITTLENUMS)
4905 {
4906 *str = save_in;
4907 return i + 8;
4908 }
4909 }
4910 }
4911
4912 /* Try and parse a more complex expression, this will probably fail
4913 unless the code uses a floating point prefix (eg "0f"). */
4914 save_in = input_line_pointer;
4915 input_line_pointer = *str;
4916 if (expression (&exp) == absolute_section
4917 && exp.X_op == O_big
4918 && exp.X_add_number < 0)
4919 {
4920 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4921 Ditto for 15. */
4922 #define X_PRECISION 5
4923 #define E_PRECISION 15L
4924 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
4925 {
4926 for (i = 0; i < NUM_FLOAT_VALS; i++)
4927 {
4928 for (j = 0; j < MAX_LITTLENUMS; j++)
4929 {
4930 if (words[j] != fp_values[i][j])
4931 break;
4932 }
4933
4934 if (j == MAX_LITTLENUMS)
4935 {
4936 *str = input_line_pointer;
4937 input_line_pointer = save_in;
4938 return i + 8;
4939 }
4940 }
4941 }
4942 }
4943
4944 *str = input_line_pointer;
4945 input_line_pointer = save_in;
4946 inst.error = _("invalid FPA immediate expression");
4947 return FAIL;
4948 }
4949
4950 /* Returns 1 if a number has "quarter-precision" float format
4951 0baBbbbbbc defgh000 00000000 00000000. */
4952
4953 static int
4954 is_quarter_float (unsigned imm)
4955 {
4956 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4957 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4958 }
4959
4960
4961 /* Detect the presence of a floating point or integer zero constant,
4962 i.e. #0.0 or #0. */
4963
4964 static bfd_boolean
4965 parse_ifimm_zero (char **in)
4966 {
4967 int error_code;
4968
4969 if (!is_immediate_prefix (**in))
4970 return FALSE;
4971
4972 ++*in;
4973
4974 /* Accept #0x0 as a synonym for #0. */
4975 if (strncmp (*in, "0x", 2) == 0)
4976 {
4977 int val;
4978 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
4979 return FALSE;
4980 return TRUE;
4981 }
4982
4983 error_code = atof_generic (in, ".", EXP_CHARS,
4984 &generic_floating_point_number);
4985
4986 if (!error_code
4987 && generic_floating_point_number.sign == '+'
4988 && (generic_floating_point_number.low
4989 > generic_floating_point_number.leader))
4990 return TRUE;
4991
4992 return FALSE;
4993 }
4994
4995 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4996 0baBbbbbbc defgh000 00000000 00000000.
4997 The zero and minus-zero cases need special handling, since they can't be
4998 encoded in the "quarter-precision" float format, but can nonetheless be
4999 loaded as integer constants. */
5000
5001 static unsigned
5002 parse_qfloat_immediate (char **ccp, int *immed)
5003 {
5004 char *str = *ccp;
5005 char *fpnum;
5006 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5007 int found_fpchar = 0;
5008
5009 skip_past_char (&str, '#');
5010
5011 /* We must not accidentally parse an integer as a floating-point number. Make
5012 sure that the value we parse is not an integer by checking for special
5013 characters '.' or 'e'.
5014 FIXME: This is a horrible hack, but doing better is tricky because type
5015 information isn't in a very usable state at parse time. */
5016 fpnum = str;
5017 skip_whitespace (fpnum);
5018
5019 if (strncmp (fpnum, "0x", 2) == 0)
5020 return FAIL;
5021 else
5022 {
5023 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5024 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5025 {
5026 found_fpchar = 1;
5027 break;
5028 }
5029
5030 if (!found_fpchar)
5031 return FAIL;
5032 }
5033
5034 if ((str = atof_ieee (str, 's', words)) != NULL)
5035 {
5036 unsigned fpword = 0;
5037 int i;
5038
5039 /* Our FP word must be 32 bits (single-precision FP). */
5040 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5041 {
5042 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5043 fpword |= words[i];
5044 }
5045
5046 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5047 *immed = fpword;
5048 else
5049 return FAIL;
5050
5051 *ccp = str;
5052
5053 return SUCCESS;
5054 }
5055
5056 return FAIL;
5057 }
5058
5059 /* Shift operands. */
5060 enum shift_kind
5061 {
5062 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5063 };
5064
5065 struct asm_shift_name
5066 {
5067 const char *name;
5068 enum shift_kind kind;
5069 };
5070
5071 /* Third argument to parse_shift. */
5072 enum parse_shift_mode
5073 {
5074 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5075 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5076 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5077 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5078 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5079 };
5080
5081 /* Parse a <shift> specifier on an ARM data processing instruction.
5082 This has three forms:
5083
5084 (LSL|LSR|ASL|ASR|ROR) Rs
5085 (LSL|LSR|ASL|ASR|ROR) #imm
5086 RRX
5087
5088 Note that ASL is assimilated to LSL in the instruction encoding, and
5089 RRX to ROR #0 (which cannot be written as such). */
5090
5091 static int
5092 parse_shift (char **str, int i, enum parse_shift_mode mode)
5093 {
5094 const struct asm_shift_name *shift_name;
5095 enum shift_kind shift;
5096 char *s = *str;
5097 char *p = s;
5098 int reg;
5099
5100 for (p = *str; ISALPHA (*p); p++)
5101 ;
5102
5103 if (p == *str)
5104 {
5105 inst.error = _("shift expression expected");
5106 return FAIL;
5107 }
5108
5109 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5110 p - *str);
5111
5112 if (shift_name == NULL)
5113 {
5114 inst.error = _("shift expression expected");
5115 return FAIL;
5116 }
5117
5118 shift = shift_name->kind;
5119
5120 switch (mode)
5121 {
5122 case NO_SHIFT_RESTRICT:
5123 case SHIFT_IMMEDIATE: break;
5124
5125 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5126 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5127 {
5128 inst.error = _("'LSL' or 'ASR' required");
5129 return FAIL;
5130 }
5131 break;
5132
5133 case SHIFT_LSL_IMMEDIATE:
5134 if (shift != SHIFT_LSL)
5135 {
5136 inst.error = _("'LSL' required");
5137 return FAIL;
5138 }
5139 break;
5140
5141 case SHIFT_ASR_IMMEDIATE:
5142 if (shift != SHIFT_ASR)
5143 {
5144 inst.error = _("'ASR' required");
5145 return FAIL;
5146 }
5147 break;
5148
5149 default: abort ();
5150 }
5151
5152 if (shift != SHIFT_RRX)
5153 {
5154 /* Whitespace can appear here if the next thing is a bare digit. */
5155 skip_whitespace (p);
5156
5157 if (mode == NO_SHIFT_RESTRICT
5158 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5159 {
5160 inst.operands[i].imm = reg;
5161 inst.operands[i].immisreg = 1;
5162 }
5163 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5164 return FAIL;
5165 }
5166 inst.operands[i].shift_kind = shift;
5167 inst.operands[i].shifted = 1;
5168 *str = p;
5169 return SUCCESS;
5170 }
5171
5172 /* Parse a <shifter_operand> for an ARM data processing instruction:
5173
5174 #<immediate>
5175 #<immediate>, <rotate>
5176 <Rm>
5177 <Rm>, <shift>
5178
5179 where <shift> is defined by parse_shift above, and <rotate> is a
5180 multiple of 2 between 0 and 30. Validation of immediate operands
5181 is deferred to md_apply_fix. */
5182
5183 static int
5184 parse_shifter_operand (char **str, int i)
5185 {
5186 int value;
5187 expressionS exp;
5188
5189 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5190 {
5191 inst.operands[i].reg = value;
5192 inst.operands[i].isreg = 1;
5193
5194 /* parse_shift will override this if appropriate */
5195 inst.reloc.exp.X_op = O_constant;
5196 inst.reloc.exp.X_add_number = 0;
5197
5198 if (skip_past_comma (str) == FAIL)
5199 return SUCCESS;
5200
5201 /* Shift operation on register. */
5202 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5203 }
5204
5205 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
5206 return FAIL;
5207
5208 if (skip_past_comma (str) == SUCCESS)
5209 {
5210 /* #x, y -- ie explicit rotation by Y. */
5211 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5212 return FAIL;
5213
5214 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
5215 {
5216 inst.error = _("constant expression expected");
5217 return FAIL;
5218 }
5219
5220 value = exp.X_add_number;
5221 if (value < 0 || value > 30 || value % 2 != 0)
5222 {
5223 inst.error = _("invalid rotation");
5224 return FAIL;
5225 }
5226 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
5227 {
5228 inst.error = _("invalid constant");
5229 return FAIL;
5230 }
5231
5232 /* Encode as specified. */
5233 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
5234 return SUCCESS;
5235 }
5236
5237 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5238 inst.reloc.pc_rel = 0;
5239 return SUCCESS;
5240 }
5241
5242 /* Group relocation information. Each entry in the table contains the
5243 textual name of the relocation as may appear in assembler source
5244 and must end with a colon.
5245 Along with this textual name are the relocation codes to be used if
5246 the corresponding instruction is an ALU instruction (ADD or SUB only),
5247 an LDR, an LDRS, or an LDC. */
5248
5249 struct group_reloc_table_entry
5250 {
5251 const char *name;
5252 int alu_code;
5253 int ldr_code;
5254 int ldrs_code;
5255 int ldc_code;
5256 };
5257
5258 typedef enum
5259 {
5260 /* Varieties of non-ALU group relocation. */
5261
5262 GROUP_LDR,
5263 GROUP_LDRS,
5264 GROUP_LDC
5265 } group_reloc_type;
5266
5267 static struct group_reloc_table_entry group_reloc_table[] =
5268 { /* Program counter relative: */
5269 { "pc_g0_nc",
5270 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5271 0, /* LDR */
5272 0, /* LDRS */
5273 0 }, /* LDC */
5274 { "pc_g0",
5275 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5276 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5277 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5278 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5279 { "pc_g1_nc",
5280 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5281 0, /* LDR */
5282 0, /* LDRS */
5283 0 }, /* LDC */
5284 { "pc_g1",
5285 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5286 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5287 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5288 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5289 { "pc_g2",
5290 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5291 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5292 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5293 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5294 /* Section base relative */
5295 { "sb_g0_nc",
5296 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5297 0, /* LDR */
5298 0, /* LDRS */
5299 0 }, /* LDC */
5300 { "sb_g0",
5301 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5302 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5303 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5304 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5305 { "sb_g1_nc",
5306 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5307 0, /* LDR */
5308 0, /* LDRS */
5309 0 }, /* LDC */
5310 { "sb_g1",
5311 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5312 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5313 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5314 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5315 { "sb_g2",
5316 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5317 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5318 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5319 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5320 /* Absolute thumb alu relocations. */
5321 { "lower0_7",
5322 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5323 0, /* LDR. */
5324 0, /* LDRS. */
5325 0 }, /* LDC. */
5326 { "lower8_15",
5327 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5328 0, /* LDR. */
5329 0, /* LDRS. */
5330 0 }, /* LDC. */
5331 { "upper0_7",
5332 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5333 0, /* LDR. */
5334 0, /* LDRS. */
5335 0 }, /* LDC. */
5336 { "upper8_15",
5337 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5338 0, /* LDR. */
5339 0, /* LDRS. */
5340 0 } }; /* LDC. */
5341
5342 /* Given the address of a pointer pointing to the textual name of a group
5343 relocation as may appear in assembler source, attempt to find its details
5344 in group_reloc_table. The pointer will be updated to the character after
5345 the trailing colon. On failure, FAIL will be returned; SUCCESS
5346 otherwise. On success, *entry will be updated to point at the relevant
5347 group_reloc_table entry. */
5348
5349 static int
5350 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5351 {
5352 unsigned int i;
5353 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5354 {
5355 int length = strlen (group_reloc_table[i].name);
5356
5357 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5358 && (*str)[length] == ':')
5359 {
5360 *out = &group_reloc_table[i];
5361 *str += (length + 1);
5362 return SUCCESS;
5363 }
5364 }
5365
5366 return FAIL;
5367 }
5368
5369 /* Parse a <shifter_operand> for an ARM data processing instruction
5370 (as for parse_shifter_operand) where group relocations are allowed:
5371
5372 #<immediate>
5373 #<immediate>, <rotate>
5374 #:<group_reloc>:<expression>
5375 <Rm>
5376 <Rm>, <shift>
5377
5378 where <group_reloc> is one of the strings defined in group_reloc_table.
5379 The hashes are optional.
5380
5381 Everything else is as for parse_shifter_operand. */
5382
5383 static parse_operand_result
5384 parse_shifter_operand_group_reloc (char **str, int i)
5385 {
5386 /* Determine if we have the sequence of characters #: or just :
5387 coming next. If we do, then we check for a group relocation.
5388 If we don't, punt the whole lot to parse_shifter_operand. */
5389
5390 if (((*str)[0] == '#' && (*str)[1] == ':')
5391 || (*str)[0] == ':')
5392 {
5393 struct group_reloc_table_entry *entry;
5394
5395 if ((*str)[0] == '#')
5396 (*str) += 2;
5397 else
5398 (*str)++;
5399
5400 /* Try to parse a group relocation. Anything else is an error. */
5401 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5402 {
5403 inst.error = _("unknown group relocation");
5404 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5405 }
5406
5407 /* We now have the group relocation table entry corresponding to
5408 the name in the assembler source. Next, we parse the expression. */
5409 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5410 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5411
5412 /* Record the relocation type (always the ALU variant here). */
5413 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5414 gas_assert (inst.reloc.type != 0);
5415
5416 return PARSE_OPERAND_SUCCESS;
5417 }
5418 else
5419 return parse_shifter_operand (str, i) == SUCCESS
5420 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5421
5422 /* Never reached. */
5423 }
5424
5425 /* Parse a Neon alignment expression. Information is written to
5426 inst.operands[i]. We assume the initial ':' has been skipped.
5427
5428 align .imm = align << 8, .immisalign=1, .preind=0 */
5429 static parse_operand_result
5430 parse_neon_alignment (char **str, int i)
5431 {
5432 char *p = *str;
5433 expressionS exp;
5434
5435 my_get_expression (&exp, &p, GE_NO_PREFIX);
5436
5437 if (exp.X_op != O_constant)
5438 {
5439 inst.error = _("alignment must be constant");
5440 return PARSE_OPERAND_FAIL;
5441 }
5442
5443 inst.operands[i].imm = exp.X_add_number << 8;
5444 inst.operands[i].immisalign = 1;
5445 /* Alignments are not pre-indexes. */
5446 inst.operands[i].preind = 0;
5447
5448 *str = p;
5449 return PARSE_OPERAND_SUCCESS;
5450 }
5451
5452 /* Parse all forms of an ARM address expression. Information is written
5453 to inst.operands[i] and/or inst.reloc.
5454
5455 Preindexed addressing (.preind=1):
5456
5457 [Rn, #offset] .reg=Rn .reloc.exp=offset
5458 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5459 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5460 .shift_kind=shift .reloc.exp=shift_imm
5461
5462 These three may have a trailing ! which causes .writeback to be set also.
5463
5464 Postindexed addressing (.postind=1, .writeback=1):
5465
5466 [Rn], #offset .reg=Rn .reloc.exp=offset
5467 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5468 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5469 .shift_kind=shift .reloc.exp=shift_imm
5470
5471 Unindexed addressing (.preind=0, .postind=0):
5472
5473 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5474
5475 Other:
5476
5477 [Rn]{!} shorthand for [Rn,#0]{!}
5478 =immediate .isreg=0 .reloc.exp=immediate
5479 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5480
5481 It is the caller's responsibility to check for addressing modes not
5482 supported by the instruction, and to set inst.reloc.type. */
5483
5484 static parse_operand_result
5485 parse_address_main (char **str, int i, int group_relocations,
5486 group_reloc_type group_type)
5487 {
5488 char *p = *str;
5489 int reg;
5490
5491 if (skip_past_char (&p, '[') == FAIL)
5492 {
5493 if (skip_past_char (&p, '=') == FAIL)
5494 {
5495 /* Bare address - translate to PC-relative offset. */
5496 inst.reloc.pc_rel = 1;
5497 inst.operands[i].reg = REG_PC;
5498 inst.operands[i].isreg = 1;
5499 inst.operands[i].preind = 1;
5500
5501 if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
5502 return PARSE_OPERAND_FAIL;
5503 }
5504 else if (parse_big_immediate (&p, i, &inst.reloc.exp,
5505 /*allow_symbol_p=*/TRUE))
5506 return PARSE_OPERAND_FAIL;
5507
5508 *str = p;
5509 return PARSE_OPERAND_SUCCESS;
5510 }
5511
5512 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5513 skip_whitespace (p);
5514
5515 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5516 {
5517 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5518 return PARSE_OPERAND_FAIL;
5519 }
5520 inst.operands[i].reg = reg;
5521 inst.operands[i].isreg = 1;
5522
5523 if (skip_past_comma (&p) == SUCCESS)
5524 {
5525 inst.operands[i].preind = 1;
5526
5527 if (*p == '+') p++;
5528 else if (*p == '-') p++, inst.operands[i].negative = 1;
5529
5530 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5531 {
5532 inst.operands[i].imm = reg;
5533 inst.operands[i].immisreg = 1;
5534
5535 if (skip_past_comma (&p) == SUCCESS)
5536 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5537 return PARSE_OPERAND_FAIL;
5538 }
5539 else if (skip_past_char (&p, ':') == SUCCESS)
5540 {
5541 /* FIXME: '@' should be used here, but it's filtered out by generic
5542 code before we get to see it here. This may be subject to
5543 change. */
5544 parse_operand_result result = parse_neon_alignment (&p, i);
5545
5546 if (result != PARSE_OPERAND_SUCCESS)
5547 return result;
5548 }
5549 else
5550 {
5551 if (inst.operands[i].negative)
5552 {
5553 inst.operands[i].negative = 0;
5554 p--;
5555 }
5556
5557 if (group_relocations
5558 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5559 {
5560 struct group_reloc_table_entry *entry;
5561
5562 /* Skip over the #: or : sequence. */
5563 if (*p == '#')
5564 p += 2;
5565 else
5566 p++;
5567
5568 /* Try to parse a group relocation. Anything else is an
5569 error. */
5570 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5571 {
5572 inst.error = _("unknown group relocation");
5573 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5574 }
5575
5576 /* We now have the group relocation table entry corresponding to
5577 the name in the assembler source. Next, we parse the
5578 expression. */
5579 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5580 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5581
5582 /* Record the relocation type. */
5583 switch (group_type)
5584 {
5585 case GROUP_LDR:
5586 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5587 break;
5588
5589 case GROUP_LDRS:
5590 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5591 break;
5592
5593 case GROUP_LDC:
5594 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5595 break;
5596
5597 default:
5598 gas_assert (0);
5599 }
5600
5601 if (inst.reloc.type == 0)
5602 {
5603 inst.error = _("this group relocation is not allowed on this instruction");
5604 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5605 }
5606 }
5607 else
5608 {
5609 char *q = p;
5610 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5611 return PARSE_OPERAND_FAIL;
5612 /* If the offset is 0, find out if it's a +0 or -0. */
5613 if (inst.reloc.exp.X_op == O_constant
5614 && inst.reloc.exp.X_add_number == 0)
5615 {
5616 skip_whitespace (q);
5617 if (*q == '#')
5618 {
5619 q++;
5620 skip_whitespace (q);
5621 }
5622 if (*q == '-')
5623 inst.operands[i].negative = 1;
5624 }
5625 }
5626 }
5627 }
5628 else if (skip_past_char (&p, ':') == SUCCESS)
5629 {
5630 /* FIXME: '@' should be used here, but it's filtered out by generic code
5631 before we get to see it here. This may be subject to change. */
5632 parse_operand_result result = parse_neon_alignment (&p, i);
5633
5634 if (result != PARSE_OPERAND_SUCCESS)
5635 return result;
5636 }
5637
5638 if (skip_past_char (&p, ']') == FAIL)
5639 {
5640 inst.error = _("']' expected");
5641 return PARSE_OPERAND_FAIL;
5642 }
5643
5644 if (skip_past_char (&p, '!') == SUCCESS)
5645 inst.operands[i].writeback = 1;
5646
5647 else if (skip_past_comma (&p) == SUCCESS)
5648 {
5649 if (skip_past_char (&p, '{') == SUCCESS)
5650 {
5651 /* [Rn], {expr} - unindexed, with option */
5652 if (parse_immediate (&p, &inst.operands[i].imm,
5653 0, 255, TRUE) == FAIL)
5654 return PARSE_OPERAND_FAIL;
5655
5656 if (skip_past_char (&p, '}') == FAIL)
5657 {
5658 inst.error = _("'}' expected at end of 'option' field");
5659 return PARSE_OPERAND_FAIL;
5660 }
5661 if (inst.operands[i].preind)
5662 {
5663 inst.error = _("cannot combine index with option");
5664 return PARSE_OPERAND_FAIL;
5665 }
5666 *str = p;
5667 return PARSE_OPERAND_SUCCESS;
5668 }
5669 else
5670 {
5671 inst.operands[i].postind = 1;
5672 inst.operands[i].writeback = 1;
5673
5674 if (inst.operands[i].preind)
5675 {
5676 inst.error = _("cannot combine pre- and post-indexing");
5677 return PARSE_OPERAND_FAIL;
5678 }
5679
5680 if (*p == '+') p++;
5681 else if (*p == '-') p++, inst.operands[i].negative = 1;
5682
5683 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5684 {
5685 /* We might be using the immediate for alignment already. If we
5686 are, OR the register number into the low-order bits. */
5687 if (inst.operands[i].immisalign)
5688 inst.operands[i].imm |= reg;
5689 else
5690 inst.operands[i].imm = reg;
5691 inst.operands[i].immisreg = 1;
5692
5693 if (skip_past_comma (&p) == SUCCESS)
5694 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5695 return PARSE_OPERAND_FAIL;
5696 }
5697 else
5698 {
5699 char *q = p;
5700 if (inst.operands[i].negative)
5701 {
5702 inst.operands[i].negative = 0;
5703 p--;
5704 }
5705 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5706 return PARSE_OPERAND_FAIL;
5707 /* If the offset is 0, find out if it's a +0 or -0. */
5708 if (inst.reloc.exp.X_op == O_constant
5709 && inst.reloc.exp.X_add_number == 0)
5710 {
5711 skip_whitespace (q);
5712 if (*q == '#')
5713 {
5714 q++;
5715 skip_whitespace (q);
5716 }
5717 if (*q == '-')
5718 inst.operands[i].negative = 1;
5719 }
5720 }
5721 }
5722 }
5723
5724 /* If at this point neither .preind nor .postind is set, we have a
5725 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5726 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5727 {
5728 inst.operands[i].preind = 1;
5729 inst.reloc.exp.X_op = O_constant;
5730 inst.reloc.exp.X_add_number = 0;
5731 }
5732 *str = p;
5733 return PARSE_OPERAND_SUCCESS;
5734 }
5735
5736 static int
5737 parse_address (char **str, int i)
5738 {
5739 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5740 ? SUCCESS : FAIL;
5741 }
5742
5743 static parse_operand_result
5744 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5745 {
5746 return parse_address_main (str, i, 1, type);
5747 }
5748
5749 /* Parse an operand for a MOVW or MOVT instruction. */
5750 static int
5751 parse_half (char **str)
5752 {
5753 char * p;
5754
5755 p = *str;
5756 skip_past_char (&p, '#');
5757 if (strncasecmp (p, ":lower16:", 9) == 0)
5758 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5759 else if (strncasecmp (p, ":upper16:", 9) == 0)
5760 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5761
5762 if (inst.reloc.type != BFD_RELOC_UNUSED)
5763 {
5764 p += 9;
5765 skip_whitespace (p);
5766 }
5767
5768 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5769 return FAIL;
5770
5771 if (inst.reloc.type == BFD_RELOC_UNUSED)
5772 {
5773 if (inst.reloc.exp.X_op != O_constant)
5774 {
5775 inst.error = _("constant expression expected");
5776 return FAIL;
5777 }
5778 if (inst.reloc.exp.X_add_number < 0
5779 || inst.reloc.exp.X_add_number > 0xffff)
5780 {
5781 inst.error = _("immediate value out of range");
5782 return FAIL;
5783 }
5784 }
5785 *str = p;
5786 return SUCCESS;
5787 }
5788
5789 /* Miscellaneous. */
5790
5791 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5792 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5793 static int
5794 parse_psr (char **str, bfd_boolean lhs)
5795 {
5796 char *p;
5797 unsigned long psr_field;
5798 const struct asm_psr *psr;
5799 char *start;
5800 bfd_boolean is_apsr = FALSE;
5801 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5802
5803 /* PR gas/12698: If the user has specified -march=all then m_profile will
5804 be TRUE, but we want to ignore it in this case as we are building for any
5805 CPU type, including non-m variants. */
5806 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
5807 m_profile = FALSE;
5808
5809 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5810 feature for ease of use and backwards compatibility. */
5811 p = *str;
5812 if (strncasecmp (p, "SPSR", 4) == 0)
5813 {
5814 if (m_profile)
5815 goto unsupported_psr;
5816
5817 psr_field = SPSR_BIT;
5818 }
5819 else if (strncasecmp (p, "CPSR", 4) == 0)
5820 {
5821 if (m_profile)
5822 goto unsupported_psr;
5823
5824 psr_field = 0;
5825 }
5826 else if (strncasecmp (p, "APSR", 4) == 0)
5827 {
5828 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5829 and ARMv7-R architecture CPUs. */
5830 is_apsr = TRUE;
5831 psr_field = 0;
5832 }
5833 else if (m_profile)
5834 {
5835 start = p;
5836 do
5837 p++;
5838 while (ISALNUM (*p) || *p == '_');
5839
5840 if (strncasecmp (start, "iapsr", 5) == 0
5841 || strncasecmp (start, "eapsr", 5) == 0
5842 || strncasecmp (start, "xpsr", 4) == 0
5843 || strncasecmp (start, "psr", 3) == 0)
5844 p = start + strcspn (start, "rR") + 1;
5845
5846 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5847 p - start);
5848
5849 if (!psr)
5850 return FAIL;
5851
5852 /* If APSR is being written, a bitfield may be specified. Note that
5853 APSR itself is handled above. */
5854 if (psr->field <= 3)
5855 {
5856 psr_field = psr->field;
5857 is_apsr = TRUE;
5858 goto check_suffix;
5859 }
5860
5861 *str = p;
5862 /* M-profile MSR instructions have the mask field set to "10", except
5863 *PSR variants which modify APSR, which may use a different mask (and
5864 have been handled already). Do that by setting the PSR_f field
5865 here. */
5866 return psr->field | (lhs ? PSR_f : 0);
5867 }
5868 else
5869 goto unsupported_psr;
5870
5871 p += 4;
5872 check_suffix:
5873 if (*p == '_')
5874 {
5875 /* A suffix follows. */
5876 p++;
5877 start = p;
5878
5879 do
5880 p++;
5881 while (ISALNUM (*p) || *p == '_');
5882
5883 if (is_apsr)
5884 {
5885 /* APSR uses a notation for bits, rather than fields. */
5886 unsigned int nzcvq_bits = 0;
5887 unsigned int g_bit = 0;
5888 char *bit;
5889
5890 for (bit = start; bit != p; bit++)
5891 {
5892 switch (TOLOWER (*bit))
5893 {
5894 case 'n':
5895 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5896 break;
5897
5898 case 'z':
5899 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5900 break;
5901
5902 case 'c':
5903 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5904 break;
5905
5906 case 'v':
5907 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5908 break;
5909
5910 case 'q':
5911 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5912 break;
5913
5914 case 'g':
5915 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5916 break;
5917
5918 default:
5919 inst.error = _("unexpected bit specified after APSR");
5920 return FAIL;
5921 }
5922 }
5923
5924 if (nzcvq_bits == 0x1f)
5925 psr_field |= PSR_f;
5926
5927 if (g_bit == 0x1)
5928 {
5929 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5930 {
5931 inst.error = _("selected processor does not "
5932 "support DSP extension");
5933 return FAIL;
5934 }
5935
5936 psr_field |= PSR_s;
5937 }
5938
5939 if ((nzcvq_bits & 0x20) != 0
5940 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5941 || (g_bit & 0x2) != 0)
5942 {
5943 inst.error = _("bad bitmask specified after APSR");
5944 return FAIL;
5945 }
5946 }
5947 else
5948 {
5949 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5950 p - start);
5951 if (!psr)
5952 goto error;
5953
5954 psr_field |= psr->field;
5955 }
5956 }
5957 else
5958 {
5959 if (ISALNUM (*p))
5960 goto error; /* Garbage after "[CS]PSR". */
5961
5962 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5963 is deprecated, but allow it anyway. */
5964 if (is_apsr && lhs)
5965 {
5966 psr_field |= PSR_f;
5967 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5968 "deprecated"));
5969 }
5970 else if (!m_profile)
5971 /* These bits are never right for M-profile devices: don't set them
5972 (only code paths which read/write APSR reach here). */
5973 psr_field |= (PSR_c | PSR_f);
5974 }
5975 *str = p;
5976 return psr_field;
5977
5978 unsupported_psr:
5979 inst.error = _("selected processor does not support requested special "
5980 "purpose register");
5981 return FAIL;
5982
5983 error:
5984 inst.error = _("flag for {c}psr instruction expected");
5985 return FAIL;
5986 }
5987
5988 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5989 value suitable for splatting into the AIF field of the instruction. */
5990
5991 static int
5992 parse_cps_flags (char **str)
5993 {
5994 int val = 0;
5995 int saw_a_flag = 0;
5996 char *s = *str;
5997
5998 for (;;)
5999 switch (*s++)
6000 {
6001 case '\0': case ',':
6002 goto done;
6003
6004 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6005 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6006 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6007
6008 default:
6009 inst.error = _("unrecognized CPS flag");
6010 return FAIL;
6011 }
6012
6013 done:
6014 if (saw_a_flag == 0)
6015 {
6016 inst.error = _("missing CPS flags");
6017 return FAIL;
6018 }
6019
6020 *str = s - 1;
6021 return val;
6022 }
6023
6024 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6025 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6026
6027 static int
6028 parse_endian_specifier (char **str)
6029 {
6030 int little_endian;
6031 char *s = *str;
6032
6033 if (strncasecmp (s, "BE", 2))
6034 little_endian = 0;
6035 else if (strncasecmp (s, "LE", 2))
6036 little_endian = 1;
6037 else
6038 {
6039 inst.error = _("valid endian specifiers are be or le");
6040 return FAIL;
6041 }
6042
6043 if (ISALNUM (s[2]) || s[2] == '_')
6044 {
6045 inst.error = _("valid endian specifiers are be or le");
6046 return FAIL;
6047 }
6048
6049 *str = s + 2;
6050 return little_endian;
6051 }
6052
6053 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6054 value suitable for poking into the rotate field of an sxt or sxta
6055 instruction, or FAIL on error. */
6056
6057 static int
6058 parse_ror (char **str)
6059 {
6060 int rot;
6061 char *s = *str;
6062
6063 if (strncasecmp (s, "ROR", 3) == 0)
6064 s += 3;
6065 else
6066 {
6067 inst.error = _("missing rotation field after comma");
6068 return FAIL;
6069 }
6070
6071 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6072 return FAIL;
6073
6074 switch (rot)
6075 {
6076 case 0: *str = s; return 0x0;
6077 case 8: *str = s; return 0x1;
6078 case 16: *str = s; return 0x2;
6079 case 24: *str = s; return 0x3;
6080
6081 default:
6082 inst.error = _("rotation can only be 0, 8, 16, or 24");
6083 return FAIL;
6084 }
6085 }
6086
6087 /* Parse a conditional code (from conds[] below). The value returned is in the
6088 range 0 .. 14, or FAIL. */
6089 static int
6090 parse_cond (char **str)
6091 {
6092 char *q;
6093 const struct asm_cond *c;
6094 int n;
6095 /* Condition codes are always 2 characters, so matching up to
6096 3 characters is sufficient. */
6097 char cond[3];
6098
6099 q = *str;
6100 n = 0;
6101 while (ISALPHA (*q) && n < 3)
6102 {
6103 cond[n] = TOLOWER (*q);
6104 q++;
6105 n++;
6106 }
6107
6108 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6109 if (!c)
6110 {
6111 inst.error = _("condition required");
6112 return FAIL;
6113 }
6114
6115 *str = q;
6116 return c->value;
6117 }
6118
6119 /* Record a use of the given feature. */
6120 static void
6121 record_feature_use (const arm_feature_set *feature)
6122 {
6123 if (thumb_mode)
6124 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6125 else
6126 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6127 }
6128
6129 /* If the given feature available in the selected CPU, mark it as used.
6130 Returns TRUE iff feature is available. */
6131 static bfd_boolean
6132 mark_feature_used (const arm_feature_set *feature)
6133 {
6134 /* Ensure the option is valid on the current architecture. */
6135 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6136 return FALSE;
6137
6138 /* Add the appropriate architecture feature for the barrier option used.
6139 */
6140 record_feature_use (feature);
6141
6142 return TRUE;
6143 }
6144
6145 /* Parse an option for a barrier instruction. Returns the encoding for the
6146 option, or FAIL. */
6147 static int
6148 parse_barrier (char **str)
6149 {
6150 char *p, *q;
6151 const struct asm_barrier_opt *o;
6152
6153 p = q = *str;
6154 while (ISALPHA (*q))
6155 q++;
6156
6157 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6158 q - p);
6159 if (!o)
6160 return FAIL;
6161
6162 if (!mark_feature_used (&o->arch))
6163 return FAIL;
6164
6165 *str = q;
6166 return o->value;
6167 }
6168
6169 /* Parse the operands of a table branch instruction. Similar to a memory
6170 operand. */
6171 static int
6172 parse_tb (char **str)
6173 {
6174 char * p = *str;
6175 int reg;
6176
6177 if (skip_past_char (&p, '[') == FAIL)
6178 {
6179 inst.error = _("'[' expected");
6180 return FAIL;
6181 }
6182
6183 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6184 {
6185 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6186 return FAIL;
6187 }
6188 inst.operands[0].reg = reg;
6189
6190 if (skip_past_comma (&p) == FAIL)
6191 {
6192 inst.error = _("',' expected");
6193 return FAIL;
6194 }
6195
6196 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6197 {
6198 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6199 return FAIL;
6200 }
6201 inst.operands[0].imm = reg;
6202
6203 if (skip_past_comma (&p) == SUCCESS)
6204 {
6205 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6206 return FAIL;
6207 if (inst.reloc.exp.X_add_number != 1)
6208 {
6209 inst.error = _("invalid shift");
6210 return FAIL;
6211 }
6212 inst.operands[0].shifted = 1;
6213 }
6214
6215 if (skip_past_char (&p, ']') == FAIL)
6216 {
6217 inst.error = _("']' expected");
6218 return FAIL;
6219 }
6220 *str = p;
6221 return SUCCESS;
6222 }
6223
6224 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6225 information on the types the operands can take and how they are encoded.
6226 Up to four operands may be read; this function handles setting the
6227 ".present" field for each read operand itself.
6228 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6229 else returns FAIL. */
6230
6231 static int
6232 parse_neon_mov (char **str, int *which_operand)
6233 {
6234 int i = *which_operand, val;
6235 enum arm_reg_type rtype;
6236 char *ptr = *str;
6237 struct neon_type_el optype;
6238
6239 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6240 {
6241 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6242 inst.operands[i].reg = val;
6243 inst.operands[i].isscalar = 1;
6244 inst.operands[i].vectype = optype;
6245 inst.operands[i++].present = 1;
6246
6247 if (skip_past_comma (&ptr) == FAIL)
6248 goto wanted_comma;
6249
6250 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6251 goto wanted_arm;
6252
6253 inst.operands[i].reg = val;
6254 inst.operands[i].isreg = 1;
6255 inst.operands[i].present = 1;
6256 }
6257 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6258 != FAIL)
6259 {
6260 /* Cases 0, 1, 2, 3, 5 (D only). */
6261 if (skip_past_comma (&ptr) == FAIL)
6262 goto wanted_comma;
6263
6264 inst.operands[i].reg = val;
6265 inst.operands[i].isreg = 1;
6266 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6267 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6268 inst.operands[i].isvec = 1;
6269 inst.operands[i].vectype = optype;
6270 inst.operands[i++].present = 1;
6271
6272 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6273 {
6274 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6275 Case 13: VMOV <Sd>, <Rm> */
6276 inst.operands[i].reg = val;
6277 inst.operands[i].isreg = 1;
6278 inst.operands[i].present = 1;
6279
6280 if (rtype == REG_TYPE_NQ)
6281 {
6282 first_error (_("can't use Neon quad register here"));
6283 return FAIL;
6284 }
6285 else if (rtype != REG_TYPE_VFS)
6286 {
6287 i++;
6288 if (skip_past_comma (&ptr) == FAIL)
6289 goto wanted_comma;
6290 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6291 goto wanted_arm;
6292 inst.operands[i].reg = val;
6293 inst.operands[i].isreg = 1;
6294 inst.operands[i].present = 1;
6295 }
6296 }
6297 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6298 &optype)) != FAIL)
6299 {
6300 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6301 Case 1: VMOV<c><q> <Dd>, <Dm>
6302 Case 8: VMOV.F32 <Sd>, <Sm>
6303 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6304
6305 inst.operands[i].reg = val;
6306 inst.operands[i].isreg = 1;
6307 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6308 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6309 inst.operands[i].isvec = 1;
6310 inst.operands[i].vectype = optype;
6311 inst.operands[i].present = 1;
6312
6313 if (skip_past_comma (&ptr) == SUCCESS)
6314 {
6315 /* Case 15. */
6316 i++;
6317
6318 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6319 goto wanted_arm;
6320
6321 inst.operands[i].reg = val;
6322 inst.operands[i].isreg = 1;
6323 inst.operands[i++].present = 1;
6324
6325 if (skip_past_comma (&ptr) == FAIL)
6326 goto wanted_comma;
6327
6328 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6329 goto wanted_arm;
6330
6331 inst.operands[i].reg = val;
6332 inst.operands[i].isreg = 1;
6333 inst.operands[i].present = 1;
6334 }
6335 }
6336 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6337 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6338 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6339 Case 10: VMOV.F32 <Sd>, #<imm>
6340 Case 11: VMOV.F64 <Dd>, #<imm> */
6341 inst.operands[i].immisfloat = 1;
6342 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6343 == SUCCESS)
6344 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6345 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6346 ;
6347 else
6348 {
6349 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6350 return FAIL;
6351 }
6352 }
6353 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6354 {
6355 /* Cases 6, 7. */
6356 inst.operands[i].reg = val;
6357 inst.operands[i].isreg = 1;
6358 inst.operands[i++].present = 1;
6359
6360 if (skip_past_comma (&ptr) == FAIL)
6361 goto wanted_comma;
6362
6363 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6364 {
6365 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6366 inst.operands[i].reg = val;
6367 inst.operands[i].isscalar = 1;
6368 inst.operands[i].present = 1;
6369 inst.operands[i].vectype = optype;
6370 }
6371 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6372 {
6373 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6374 inst.operands[i].reg = val;
6375 inst.operands[i].isreg = 1;
6376 inst.operands[i++].present = 1;
6377
6378 if (skip_past_comma (&ptr) == FAIL)
6379 goto wanted_comma;
6380
6381 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6382 == FAIL)
6383 {
6384 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6385 return FAIL;
6386 }
6387
6388 inst.operands[i].reg = val;
6389 inst.operands[i].isreg = 1;
6390 inst.operands[i].isvec = 1;
6391 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6392 inst.operands[i].vectype = optype;
6393 inst.operands[i].present = 1;
6394
6395 if (rtype == REG_TYPE_VFS)
6396 {
6397 /* Case 14. */
6398 i++;
6399 if (skip_past_comma (&ptr) == FAIL)
6400 goto wanted_comma;
6401 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6402 &optype)) == FAIL)
6403 {
6404 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6405 return FAIL;
6406 }
6407 inst.operands[i].reg = val;
6408 inst.operands[i].isreg = 1;
6409 inst.operands[i].isvec = 1;
6410 inst.operands[i].issingle = 1;
6411 inst.operands[i].vectype = optype;
6412 inst.operands[i].present = 1;
6413 }
6414 }
6415 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6416 != FAIL)
6417 {
6418 /* Case 13. */
6419 inst.operands[i].reg = val;
6420 inst.operands[i].isreg = 1;
6421 inst.operands[i].isvec = 1;
6422 inst.operands[i].issingle = 1;
6423 inst.operands[i].vectype = optype;
6424 inst.operands[i].present = 1;
6425 }
6426 }
6427 else
6428 {
6429 first_error (_("parse error"));
6430 return FAIL;
6431 }
6432
6433 /* Successfully parsed the operands. Update args. */
6434 *which_operand = i;
6435 *str = ptr;
6436 return SUCCESS;
6437
6438 wanted_comma:
6439 first_error (_("expected comma"));
6440 return FAIL;
6441
6442 wanted_arm:
6443 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6444 return FAIL;
6445 }
6446
6447 /* Use this macro when the operand constraints are different
6448 for ARM and THUMB (e.g. ldrd). */
6449 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6450 ((arm_operand) | ((thumb_operand) << 16))
6451
6452 /* Matcher codes for parse_operands. */
6453 enum operand_parse_code
6454 {
6455 OP_stop, /* end of line */
6456
6457 OP_RR, /* ARM register */
6458 OP_RRnpc, /* ARM register, not r15 */
6459 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6460 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6461 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6462 optional trailing ! */
6463 OP_RRw, /* ARM register, not r15, optional trailing ! */
6464 OP_RCP, /* Coprocessor number */
6465 OP_RCN, /* Coprocessor register */
6466 OP_RF, /* FPA register */
6467 OP_RVS, /* VFP single precision register */
6468 OP_RVD, /* VFP double precision register (0..15) */
6469 OP_RND, /* Neon double precision register (0..31) */
6470 OP_RNQ, /* Neon quad precision register */
6471 OP_RVSD, /* VFP single or double precision register */
6472 OP_RNDQ, /* Neon double or quad precision register */
6473 OP_RNSDQ, /* Neon single, double or quad precision register */
6474 OP_RNSC, /* Neon scalar D[X] */
6475 OP_RVC, /* VFP control register */
6476 OP_RMF, /* Maverick F register */
6477 OP_RMD, /* Maverick D register */
6478 OP_RMFX, /* Maverick FX register */
6479 OP_RMDX, /* Maverick DX register */
6480 OP_RMAX, /* Maverick AX register */
6481 OP_RMDS, /* Maverick DSPSC register */
6482 OP_RIWR, /* iWMMXt wR register */
6483 OP_RIWC, /* iWMMXt wC register */
6484 OP_RIWG, /* iWMMXt wCG register */
6485 OP_RXA, /* XScale accumulator register */
6486
6487 OP_REGLST, /* ARM register list */
6488 OP_VRSLST, /* VFP single-precision register list */
6489 OP_VRDLST, /* VFP double-precision register list */
6490 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6491 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6492 OP_NSTRLST, /* Neon element/structure list */
6493
6494 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6495 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6496 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6497 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6498 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6499 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6500 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6501 OP_VMOV, /* Neon VMOV operands. */
6502 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6503 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6504 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6505
6506 OP_I0, /* immediate zero */
6507 OP_I7, /* immediate value 0 .. 7 */
6508 OP_I15, /* 0 .. 15 */
6509 OP_I16, /* 1 .. 16 */
6510 OP_I16z, /* 0 .. 16 */
6511 OP_I31, /* 0 .. 31 */
6512 OP_I31w, /* 0 .. 31, optional trailing ! */
6513 OP_I32, /* 1 .. 32 */
6514 OP_I32z, /* 0 .. 32 */
6515 OP_I63, /* 0 .. 63 */
6516 OP_I63s, /* -64 .. 63 */
6517 OP_I64, /* 1 .. 64 */
6518 OP_I64z, /* 0 .. 64 */
6519 OP_I255, /* 0 .. 255 */
6520
6521 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6522 OP_I7b, /* 0 .. 7 */
6523 OP_I15b, /* 0 .. 15 */
6524 OP_I31b, /* 0 .. 31 */
6525
6526 OP_SH, /* shifter operand */
6527 OP_SHG, /* shifter operand with possible group relocation */
6528 OP_ADDR, /* Memory address expression (any mode) */
6529 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6530 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6531 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6532 OP_EXP, /* arbitrary expression */
6533 OP_EXPi, /* same, with optional immediate prefix */
6534 OP_EXPr, /* same, with optional relocation suffix */
6535 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6536
6537 OP_CPSF, /* CPS flags */
6538 OP_ENDI, /* Endianness specifier */
6539 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6540 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6541 OP_COND, /* conditional code */
6542 OP_TB, /* Table branch. */
6543
6544 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6545
6546 OP_RRnpc_I0, /* ARM register or literal 0 */
6547 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
6548 OP_RR_EXi, /* ARM register or expression with imm prefix */
6549 OP_RF_IF, /* FPA register or immediate */
6550 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6551 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6552
6553 /* Optional operands. */
6554 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6555 OP_oI31b, /* 0 .. 31 */
6556 OP_oI32b, /* 1 .. 32 */
6557 OP_oI32z, /* 0 .. 32 */
6558 OP_oIffffb, /* 0 .. 65535 */
6559 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6560
6561 OP_oRR, /* ARM register */
6562 OP_oRRnpc, /* ARM register, not the PC */
6563 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6564 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6565 OP_oRND, /* Optional Neon double precision register */
6566 OP_oRNQ, /* Optional Neon quad precision register */
6567 OP_oRNDQ, /* Optional Neon double or quad precision register */
6568 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6569 OP_oSHll, /* LSL immediate */
6570 OP_oSHar, /* ASR immediate */
6571 OP_oSHllar, /* LSL or ASR immediate */
6572 OP_oROR, /* ROR 0/8/16/24 */
6573 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6574
6575 /* Some pre-defined mixed (ARM/THUMB) operands. */
6576 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6577 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6578 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6579
6580 OP_FIRST_OPTIONAL = OP_oI7b
6581 };
6582
6583 /* Generic instruction operand parser. This does no encoding and no
6584 semantic validation; it merely squirrels values away in the inst
6585 structure. Returns SUCCESS or FAIL depending on whether the
6586 specified grammar matched. */
6587 static int
6588 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6589 {
6590 unsigned const int *upat = pattern;
6591 char *backtrack_pos = 0;
6592 const char *backtrack_error = 0;
6593 int i, val = 0, backtrack_index = 0;
6594 enum arm_reg_type rtype;
6595 parse_operand_result result;
6596 unsigned int op_parse_code;
6597
6598 #define po_char_or_fail(chr) \
6599 do \
6600 { \
6601 if (skip_past_char (&str, chr) == FAIL) \
6602 goto bad_args; \
6603 } \
6604 while (0)
6605
6606 #define po_reg_or_fail(regtype) \
6607 do \
6608 { \
6609 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6610 & inst.operands[i].vectype); \
6611 if (val == FAIL) \
6612 { \
6613 first_error (_(reg_expected_msgs[regtype])); \
6614 goto failure; \
6615 } \
6616 inst.operands[i].reg = val; \
6617 inst.operands[i].isreg = 1; \
6618 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6619 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6620 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6621 || rtype == REG_TYPE_VFD \
6622 || rtype == REG_TYPE_NQ); \
6623 } \
6624 while (0)
6625
6626 #define po_reg_or_goto(regtype, label) \
6627 do \
6628 { \
6629 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6630 & inst.operands[i].vectype); \
6631 if (val == FAIL) \
6632 goto label; \
6633 \
6634 inst.operands[i].reg = val; \
6635 inst.operands[i].isreg = 1; \
6636 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6637 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6638 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6639 || rtype == REG_TYPE_VFD \
6640 || rtype == REG_TYPE_NQ); \
6641 } \
6642 while (0)
6643
6644 #define po_imm_or_fail(min, max, popt) \
6645 do \
6646 { \
6647 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6648 goto failure; \
6649 inst.operands[i].imm = val; \
6650 } \
6651 while (0)
6652
6653 #define po_scalar_or_goto(elsz, label) \
6654 do \
6655 { \
6656 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6657 if (val == FAIL) \
6658 goto label; \
6659 inst.operands[i].reg = val; \
6660 inst.operands[i].isscalar = 1; \
6661 } \
6662 while (0)
6663
6664 #define po_misc_or_fail(expr) \
6665 do \
6666 { \
6667 if (expr) \
6668 goto failure; \
6669 } \
6670 while (0)
6671
6672 #define po_misc_or_fail_no_backtrack(expr) \
6673 do \
6674 { \
6675 result = expr; \
6676 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6677 backtrack_pos = 0; \
6678 if (result != PARSE_OPERAND_SUCCESS) \
6679 goto failure; \
6680 } \
6681 while (0)
6682
6683 #define po_barrier_or_imm(str) \
6684 do \
6685 { \
6686 val = parse_barrier (&str); \
6687 if (val == FAIL && ! ISALPHA (*str)) \
6688 goto immediate; \
6689 if (val == FAIL \
6690 /* ISB can only take SY as an option. */ \
6691 || ((inst.instruction & 0xf0) == 0x60 \
6692 && val != 0xf)) \
6693 { \
6694 inst.error = _("invalid barrier type"); \
6695 backtrack_pos = 0; \
6696 goto failure; \
6697 } \
6698 } \
6699 while (0)
6700
6701 skip_whitespace (str);
6702
6703 for (i = 0; upat[i] != OP_stop; i++)
6704 {
6705 op_parse_code = upat[i];
6706 if (op_parse_code >= 1<<16)
6707 op_parse_code = thumb ? (op_parse_code >> 16)
6708 : (op_parse_code & ((1<<16)-1));
6709
6710 if (op_parse_code >= OP_FIRST_OPTIONAL)
6711 {
6712 /* Remember where we are in case we need to backtrack. */
6713 gas_assert (!backtrack_pos);
6714 backtrack_pos = str;
6715 backtrack_error = inst.error;
6716 backtrack_index = i;
6717 }
6718
6719 if (i > 0 && (i > 1 || inst.operands[0].present))
6720 po_char_or_fail (',');
6721
6722 switch (op_parse_code)
6723 {
6724 /* Registers */
6725 case OP_oRRnpc:
6726 case OP_oRRnpcsp:
6727 case OP_RRnpc:
6728 case OP_RRnpcsp:
6729 case OP_oRR:
6730 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6731 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6732 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6733 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6734 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6735 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6736 case OP_oRND:
6737 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6738 case OP_RVC:
6739 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6740 break;
6741 /* Also accept generic coprocessor regs for unknown registers. */
6742 coproc_reg:
6743 po_reg_or_fail (REG_TYPE_CN);
6744 break;
6745 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6746 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6747 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6748 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6749 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6750 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6751 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6752 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6753 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6754 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6755 case OP_oRNQ:
6756 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6757 case OP_oRNDQ:
6758 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6759 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6760 case OP_oRNSDQ:
6761 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6762
6763 /* Neon scalar. Using an element size of 8 means that some invalid
6764 scalars are accepted here, so deal with those in later code. */
6765 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6766
6767 case OP_RNDQ_I0:
6768 {
6769 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6770 break;
6771 try_imm0:
6772 po_imm_or_fail (0, 0, TRUE);
6773 }
6774 break;
6775
6776 case OP_RVSD_I0:
6777 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6778 break;
6779
6780 case OP_RSVD_FI0:
6781 {
6782 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6783 break;
6784 try_ifimm0:
6785 if (parse_ifimm_zero (&str))
6786 inst.operands[i].imm = 0;
6787 else
6788 {
6789 inst.error
6790 = _("only floating point zero is allowed as immediate value");
6791 goto failure;
6792 }
6793 }
6794 break;
6795
6796 case OP_RR_RNSC:
6797 {
6798 po_scalar_or_goto (8, try_rr);
6799 break;
6800 try_rr:
6801 po_reg_or_fail (REG_TYPE_RN);
6802 }
6803 break;
6804
6805 case OP_RNSDQ_RNSC:
6806 {
6807 po_scalar_or_goto (8, try_nsdq);
6808 break;
6809 try_nsdq:
6810 po_reg_or_fail (REG_TYPE_NSDQ);
6811 }
6812 break;
6813
6814 case OP_RNDQ_RNSC:
6815 {
6816 po_scalar_or_goto (8, try_ndq);
6817 break;
6818 try_ndq:
6819 po_reg_or_fail (REG_TYPE_NDQ);
6820 }
6821 break;
6822
6823 case OP_RND_RNSC:
6824 {
6825 po_scalar_or_goto (8, try_vfd);
6826 break;
6827 try_vfd:
6828 po_reg_or_fail (REG_TYPE_VFD);
6829 }
6830 break;
6831
6832 case OP_VMOV:
6833 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6834 not careful then bad things might happen. */
6835 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6836 break;
6837
6838 case OP_RNDQ_Ibig:
6839 {
6840 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6841 break;
6842 try_immbig:
6843 /* There's a possibility of getting a 64-bit immediate here, so
6844 we need special handling. */
6845 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6846 == FAIL)
6847 {
6848 inst.error = _("immediate value is out of range");
6849 goto failure;
6850 }
6851 }
6852 break;
6853
6854 case OP_RNDQ_I63b:
6855 {
6856 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6857 break;
6858 try_shimm:
6859 po_imm_or_fail (0, 63, TRUE);
6860 }
6861 break;
6862
6863 case OP_RRnpcb:
6864 po_char_or_fail ('[');
6865 po_reg_or_fail (REG_TYPE_RN);
6866 po_char_or_fail (']');
6867 break;
6868
6869 case OP_RRnpctw:
6870 case OP_RRw:
6871 case OP_oRRw:
6872 po_reg_or_fail (REG_TYPE_RN);
6873 if (skip_past_char (&str, '!') == SUCCESS)
6874 inst.operands[i].writeback = 1;
6875 break;
6876
6877 /* Immediates */
6878 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6879 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6880 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6881 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6882 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6883 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6884 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6885 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6886 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6887 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6888 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6889 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6890
6891 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6892 case OP_oI7b:
6893 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6894 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6895 case OP_oI31b:
6896 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6897 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6898 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6899 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6900
6901 /* Immediate variants */
6902 case OP_oI255c:
6903 po_char_or_fail ('{');
6904 po_imm_or_fail (0, 255, TRUE);
6905 po_char_or_fail ('}');
6906 break;
6907
6908 case OP_I31w:
6909 /* The expression parser chokes on a trailing !, so we have
6910 to find it first and zap it. */
6911 {
6912 char *s = str;
6913 while (*s && *s != ',')
6914 s++;
6915 if (s[-1] == '!')
6916 {
6917 s[-1] = '\0';
6918 inst.operands[i].writeback = 1;
6919 }
6920 po_imm_or_fail (0, 31, TRUE);
6921 if (str == s - 1)
6922 str = s;
6923 }
6924 break;
6925
6926 /* Expressions */
6927 case OP_EXPi: EXPi:
6928 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6929 GE_OPT_PREFIX));
6930 break;
6931
6932 case OP_EXP:
6933 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6934 GE_NO_PREFIX));
6935 break;
6936
6937 case OP_EXPr: EXPr:
6938 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6939 GE_NO_PREFIX));
6940 if (inst.reloc.exp.X_op == O_symbol)
6941 {
6942 val = parse_reloc (&str);
6943 if (val == -1)
6944 {
6945 inst.error = _("unrecognized relocation suffix");
6946 goto failure;
6947 }
6948 else if (val != BFD_RELOC_UNUSED)
6949 {
6950 inst.operands[i].imm = val;
6951 inst.operands[i].hasreloc = 1;
6952 }
6953 }
6954 break;
6955
6956 /* Operand for MOVW or MOVT. */
6957 case OP_HALF:
6958 po_misc_or_fail (parse_half (&str));
6959 break;
6960
6961 /* Register or expression. */
6962 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6963 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6964
6965 /* Register or immediate. */
6966 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6967 I0: po_imm_or_fail (0, 0, FALSE); break;
6968
6969 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6970 IF:
6971 if (!is_immediate_prefix (*str))
6972 goto bad_args;
6973 str++;
6974 val = parse_fpa_immediate (&str);
6975 if (val == FAIL)
6976 goto failure;
6977 /* FPA immediates are encoded as registers 8-15.
6978 parse_fpa_immediate has already applied the offset. */
6979 inst.operands[i].reg = val;
6980 inst.operands[i].isreg = 1;
6981 break;
6982
6983 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6984 I32z: po_imm_or_fail (0, 32, FALSE); break;
6985
6986 /* Two kinds of register. */
6987 case OP_RIWR_RIWC:
6988 {
6989 struct reg_entry *rege = arm_reg_parse_multi (&str);
6990 if (!rege
6991 || (rege->type != REG_TYPE_MMXWR
6992 && rege->type != REG_TYPE_MMXWC
6993 && rege->type != REG_TYPE_MMXWCG))
6994 {
6995 inst.error = _("iWMMXt data or control register expected");
6996 goto failure;
6997 }
6998 inst.operands[i].reg = rege->number;
6999 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
7000 }
7001 break;
7002
7003 case OP_RIWC_RIWG:
7004 {
7005 struct reg_entry *rege = arm_reg_parse_multi (&str);
7006 if (!rege
7007 || (rege->type != REG_TYPE_MMXWC
7008 && rege->type != REG_TYPE_MMXWCG))
7009 {
7010 inst.error = _("iWMMXt control register expected");
7011 goto failure;
7012 }
7013 inst.operands[i].reg = rege->number;
7014 inst.operands[i].isreg = 1;
7015 }
7016 break;
7017
7018 /* Misc */
7019 case OP_CPSF: val = parse_cps_flags (&str); break;
7020 case OP_ENDI: val = parse_endian_specifier (&str); break;
7021 case OP_oROR: val = parse_ror (&str); break;
7022 case OP_COND: val = parse_cond (&str); break;
7023 case OP_oBARRIER_I15:
7024 po_barrier_or_imm (str); break;
7025 immediate:
7026 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
7027 goto failure;
7028 break;
7029
7030 case OP_wPSR:
7031 case OP_rPSR:
7032 po_reg_or_goto (REG_TYPE_RNB, try_psr);
7033 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7034 {
7035 inst.error = _("Banked registers are not available with this "
7036 "architecture.");
7037 goto failure;
7038 }
7039 break;
7040 try_psr:
7041 val = parse_psr (&str, op_parse_code == OP_wPSR);
7042 break;
7043
7044 case OP_APSR_RR:
7045 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7046 break;
7047 try_apsr:
7048 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7049 instruction). */
7050 if (strncasecmp (str, "APSR_", 5) == 0)
7051 {
7052 unsigned found = 0;
7053 str += 5;
7054 while (found < 15)
7055 switch (*str++)
7056 {
7057 case 'c': found = (found & 1) ? 16 : found | 1; break;
7058 case 'n': found = (found & 2) ? 16 : found | 2; break;
7059 case 'z': found = (found & 4) ? 16 : found | 4; break;
7060 case 'v': found = (found & 8) ? 16 : found | 8; break;
7061 default: found = 16;
7062 }
7063 if (found != 15)
7064 goto failure;
7065 inst.operands[i].isvec = 1;
7066 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7067 inst.operands[i].reg = REG_PC;
7068 }
7069 else
7070 goto failure;
7071 break;
7072
7073 case OP_TB:
7074 po_misc_or_fail (parse_tb (&str));
7075 break;
7076
7077 /* Register lists. */
7078 case OP_REGLST:
7079 val = parse_reg_list (&str);
7080 if (*str == '^')
7081 {
7082 inst.operands[i].writeback = 1;
7083 str++;
7084 }
7085 break;
7086
7087 case OP_VRSLST:
7088 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
7089 break;
7090
7091 case OP_VRDLST:
7092 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7093 break;
7094
7095 case OP_VRSDLST:
7096 /* Allow Q registers too. */
7097 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7098 REGLIST_NEON_D);
7099 if (val == FAIL)
7100 {
7101 inst.error = NULL;
7102 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7103 REGLIST_VFP_S);
7104 inst.operands[i].issingle = 1;
7105 }
7106 break;
7107
7108 case OP_NRDLST:
7109 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7110 REGLIST_NEON_D);
7111 break;
7112
7113 case OP_NSTRLST:
7114 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7115 &inst.operands[i].vectype);
7116 break;
7117
7118 /* Addressing modes */
7119 case OP_ADDR:
7120 po_misc_or_fail (parse_address (&str, i));
7121 break;
7122
7123 case OP_ADDRGLDR:
7124 po_misc_or_fail_no_backtrack (
7125 parse_address_group_reloc (&str, i, GROUP_LDR));
7126 break;
7127
7128 case OP_ADDRGLDRS:
7129 po_misc_or_fail_no_backtrack (
7130 parse_address_group_reloc (&str, i, GROUP_LDRS));
7131 break;
7132
7133 case OP_ADDRGLDC:
7134 po_misc_or_fail_no_backtrack (
7135 parse_address_group_reloc (&str, i, GROUP_LDC));
7136 break;
7137
7138 case OP_SH:
7139 po_misc_or_fail (parse_shifter_operand (&str, i));
7140 break;
7141
7142 case OP_SHG:
7143 po_misc_or_fail_no_backtrack (
7144 parse_shifter_operand_group_reloc (&str, i));
7145 break;
7146
7147 case OP_oSHll:
7148 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7149 break;
7150
7151 case OP_oSHar:
7152 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7153 break;
7154
7155 case OP_oSHllar:
7156 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7157 break;
7158
7159 default:
7160 as_fatal (_("unhandled operand code %d"), op_parse_code);
7161 }
7162
7163 /* Various value-based sanity checks and shared operations. We
7164 do not signal immediate failures for the register constraints;
7165 this allows a syntax error to take precedence. */
7166 switch (op_parse_code)
7167 {
7168 case OP_oRRnpc:
7169 case OP_RRnpc:
7170 case OP_RRnpcb:
7171 case OP_RRw:
7172 case OP_oRRw:
7173 case OP_RRnpc_I0:
7174 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7175 inst.error = BAD_PC;
7176 break;
7177
7178 case OP_oRRnpcsp:
7179 case OP_RRnpcsp:
7180 if (inst.operands[i].isreg)
7181 {
7182 if (inst.operands[i].reg == REG_PC)
7183 inst.error = BAD_PC;
7184 else if (inst.operands[i].reg == REG_SP)
7185 inst.error = BAD_SP;
7186 }
7187 break;
7188
7189 case OP_RRnpctw:
7190 if (inst.operands[i].isreg
7191 && inst.operands[i].reg == REG_PC
7192 && (inst.operands[i].writeback || thumb))
7193 inst.error = BAD_PC;
7194 break;
7195
7196 case OP_CPSF:
7197 case OP_ENDI:
7198 case OP_oROR:
7199 case OP_wPSR:
7200 case OP_rPSR:
7201 case OP_COND:
7202 case OP_oBARRIER_I15:
7203 case OP_REGLST:
7204 case OP_VRSLST:
7205 case OP_VRDLST:
7206 case OP_VRSDLST:
7207 case OP_NRDLST:
7208 case OP_NSTRLST:
7209 if (val == FAIL)
7210 goto failure;
7211 inst.operands[i].imm = val;
7212 break;
7213
7214 default:
7215 break;
7216 }
7217
7218 /* If we get here, this operand was successfully parsed. */
7219 inst.operands[i].present = 1;
7220 continue;
7221
7222 bad_args:
7223 inst.error = BAD_ARGS;
7224
7225 failure:
7226 if (!backtrack_pos)
7227 {
7228 /* The parse routine should already have set inst.error, but set a
7229 default here just in case. */
7230 if (!inst.error)
7231 inst.error = _("syntax error");
7232 return FAIL;
7233 }
7234
7235 /* Do not backtrack over a trailing optional argument that
7236 absorbed some text. We will only fail again, with the
7237 'garbage following instruction' error message, which is
7238 probably less helpful than the current one. */
7239 if (backtrack_index == i && backtrack_pos != str
7240 && upat[i+1] == OP_stop)
7241 {
7242 if (!inst.error)
7243 inst.error = _("syntax error");
7244 return FAIL;
7245 }
7246
7247 /* Try again, skipping the optional argument at backtrack_pos. */
7248 str = backtrack_pos;
7249 inst.error = backtrack_error;
7250 inst.operands[backtrack_index].present = 0;
7251 i = backtrack_index;
7252 backtrack_pos = 0;
7253 }
7254
7255 /* Check that we have parsed all the arguments. */
7256 if (*str != '\0' && !inst.error)
7257 inst.error = _("garbage following instruction");
7258
7259 return inst.error ? FAIL : SUCCESS;
7260 }
7261
7262 #undef po_char_or_fail
7263 #undef po_reg_or_fail
7264 #undef po_reg_or_goto
7265 #undef po_imm_or_fail
7266 #undef po_scalar_or_fail
7267 #undef po_barrier_or_imm
7268
7269 /* Shorthand macro for instruction encoding functions issuing errors. */
7270 #define constraint(expr, err) \
7271 do \
7272 { \
7273 if (expr) \
7274 { \
7275 inst.error = err; \
7276 return; \
7277 } \
7278 } \
7279 while (0)
7280
7281 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7282 instructions are unpredictable if these registers are used. This
7283 is the BadReg predicate in ARM's Thumb-2 documentation. */
7284 #define reject_bad_reg(reg) \
7285 do \
7286 if (reg == REG_SP || reg == REG_PC) \
7287 { \
7288 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
7289 return; \
7290 } \
7291 while (0)
7292
7293 /* If REG is R13 (the stack pointer), warn that its use is
7294 deprecated. */
7295 #define warn_deprecated_sp(reg) \
7296 do \
7297 if (warn_on_deprecated && reg == REG_SP) \
7298 as_tsktsk (_("use of r13 is deprecated")); \
7299 while (0)
7300
7301 /* Functions for operand encoding. ARM, then Thumb. */
7302
7303 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7304
7305 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7306
7307 The only binary encoding difference is the Coprocessor number. Coprocessor
7308 9 is used for half-precision calculations or conversions. The format of the
7309 instruction is the same as the equivalent Coprocessor 10 instruction that
7310 exists for Single-Precision operation. */
7311
7312 static void
7313 do_scalar_fp16_v82_encode (void)
7314 {
7315 if (inst.cond != COND_ALWAYS)
7316 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7317 " the behaviour is UNPREDICTABLE"));
7318 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
7319 _(BAD_FP16));
7320
7321 inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
7322 mark_feature_used (&arm_ext_fp16);
7323 }
7324
7325 /* If VAL can be encoded in the immediate field of an ARM instruction,
7326 return the encoded form. Otherwise, return FAIL. */
7327
7328 static unsigned int
7329 encode_arm_immediate (unsigned int val)
7330 {
7331 unsigned int a, i;
7332
7333 if (val <= 0xff)
7334 return val;
7335
7336 for (i = 2; i < 32; i += 2)
7337 if ((a = rotate_left (val, i)) <= 0xff)
7338 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7339
7340 return FAIL;
7341 }
7342
7343 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7344 return the encoded form. Otherwise, return FAIL. */
7345 static unsigned int
7346 encode_thumb32_immediate (unsigned int val)
7347 {
7348 unsigned int a, i;
7349
7350 if (val <= 0xff)
7351 return val;
7352
7353 for (i = 1; i <= 24; i++)
7354 {
7355 a = val >> i;
7356 if ((val & ~(0xff << i)) == 0)
7357 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7358 }
7359
7360 a = val & 0xff;
7361 if (val == ((a << 16) | a))
7362 return 0x100 | a;
7363 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7364 return 0x300 | a;
7365
7366 a = val & 0xff00;
7367 if (val == ((a << 16) | a))
7368 return 0x200 | (a >> 8);
7369
7370 return FAIL;
7371 }
7372 /* Encode a VFP SP or DP register number into inst.instruction. */
7373
7374 static void
7375 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7376 {
7377 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7378 && reg > 15)
7379 {
7380 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7381 {
7382 if (thumb_mode)
7383 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7384 fpu_vfp_ext_d32);
7385 else
7386 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7387 fpu_vfp_ext_d32);
7388 }
7389 else
7390 {
7391 first_error (_("D register out of range for selected VFP version"));
7392 return;
7393 }
7394 }
7395
7396 switch (pos)
7397 {
7398 case VFP_REG_Sd:
7399 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7400 break;
7401
7402 case VFP_REG_Sn:
7403 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7404 break;
7405
7406 case VFP_REG_Sm:
7407 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7408 break;
7409
7410 case VFP_REG_Dd:
7411 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7412 break;
7413
7414 case VFP_REG_Dn:
7415 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7416 break;
7417
7418 case VFP_REG_Dm:
7419 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7420 break;
7421
7422 default:
7423 abort ();
7424 }
7425 }
7426
7427 /* Encode a <shift> in an ARM-format instruction. The immediate,
7428 if any, is handled by md_apply_fix. */
7429 static void
7430 encode_arm_shift (int i)
7431 {
7432 /* register-shifted register. */
7433 if (inst.operands[i].immisreg)
7434 {
7435 int index;
7436 for (index = 0; index <= i; ++index)
7437 {
7438 /* Check the operand only when it's presented. In pre-UAL syntax,
7439 if the destination register is the same as the first operand, two
7440 register form of the instruction can be used. */
7441 if (inst.operands[index].present && inst.operands[index].isreg
7442 && inst.operands[index].reg == REG_PC)
7443 as_warn (UNPRED_REG ("r15"));
7444 }
7445
7446 if (inst.operands[i].imm == REG_PC)
7447 as_warn (UNPRED_REG ("r15"));
7448 }
7449
7450 if (inst.operands[i].shift_kind == SHIFT_RRX)
7451 inst.instruction |= SHIFT_ROR << 5;
7452 else
7453 {
7454 inst.instruction |= inst.operands[i].shift_kind << 5;
7455 if (inst.operands[i].immisreg)
7456 {
7457 inst.instruction |= SHIFT_BY_REG;
7458 inst.instruction |= inst.operands[i].imm << 8;
7459 }
7460 else
7461 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7462 }
7463 }
7464
7465 static void
7466 encode_arm_shifter_operand (int i)
7467 {
7468 if (inst.operands[i].isreg)
7469 {
7470 inst.instruction |= inst.operands[i].reg;
7471 encode_arm_shift (i);
7472 }
7473 else
7474 {
7475 inst.instruction |= INST_IMMEDIATE;
7476 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7477 inst.instruction |= inst.operands[i].imm;
7478 }
7479 }
7480
7481 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7482 static void
7483 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7484 {
7485 /* PR 14260:
7486 Generate an error if the operand is not a register. */
7487 constraint (!inst.operands[i].isreg,
7488 _("Instruction does not support =N addresses"));
7489
7490 inst.instruction |= inst.operands[i].reg << 16;
7491
7492 if (inst.operands[i].preind)
7493 {
7494 if (is_t)
7495 {
7496 inst.error = _("instruction does not accept preindexed addressing");
7497 return;
7498 }
7499 inst.instruction |= PRE_INDEX;
7500 if (inst.operands[i].writeback)
7501 inst.instruction |= WRITE_BACK;
7502
7503 }
7504 else if (inst.operands[i].postind)
7505 {
7506 gas_assert (inst.operands[i].writeback);
7507 if (is_t)
7508 inst.instruction |= WRITE_BACK;
7509 }
7510 else /* unindexed - only for coprocessor */
7511 {
7512 inst.error = _("instruction does not accept unindexed addressing");
7513 return;
7514 }
7515
7516 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7517 && (((inst.instruction & 0x000f0000) >> 16)
7518 == ((inst.instruction & 0x0000f000) >> 12)))
7519 as_warn ((inst.instruction & LOAD_BIT)
7520 ? _("destination register same as write-back base")
7521 : _("source register same as write-back base"));
7522 }
7523
7524 /* inst.operands[i] was set up by parse_address. Encode it into an
7525 ARM-format mode 2 load or store instruction. If is_t is true,
7526 reject forms that cannot be used with a T instruction (i.e. not
7527 post-indexed). */
7528 static void
7529 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7530 {
7531 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7532
7533 encode_arm_addr_mode_common (i, is_t);
7534
7535 if (inst.operands[i].immisreg)
7536 {
7537 constraint ((inst.operands[i].imm == REG_PC
7538 || (is_pc && inst.operands[i].writeback)),
7539 BAD_PC_ADDRESSING);
7540 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7541 inst.instruction |= inst.operands[i].imm;
7542 if (!inst.operands[i].negative)
7543 inst.instruction |= INDEX_UP;
7544 if (inst.operands[i].shifted)
7545 {
7546 if (inst.operands[i].shift_kind == SHIFT_RRX)
7547 inst.instruction |= SHIFT_ROR << 5;
7548 else
7549 {
7550 inst.instruction |= inst.operands[i].shift_kind << 5;
7551 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7552 }
7553 }
7554 }
7555 else /* immediate offset in inst.reloc */
7556 {
7557 if (is_pc && !inst.reloc.pc_rel)
7558 {
7559 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7560
7561 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7562 cannot use PC in addressing.
7563 PC cannot be used in writeback addressing, either. */
7564 constraint ((is_t || inst.operands[i].writeback),
7565 BAD_PC_ADDRESSING);
7566
7567 /* Use of PC in str is deprecated for ARMv7. */
7568 if (warn_on_deprecated
7569 && !is_load
7570 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7571 as_tsktsk (_("use of PC in this instruction is deprecated"));
7572 }
7573
7574 if (inst.reloc.type == BFD_RELOC_UNUSED)
7575 {
7576 /* Prefer + for zero encoded value. */
7577 if (!inst.operands[i].negative)
7578 inst.instruction |= INDEX_UP;
7579 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7580 }
7581 }
7582 }
7583
7584 /* inst.operands[i] was set up by parse_address. Encode it into an
7585 ARM-format mode 3 load or store instruction. Reject forms that
7586 cannot be used with such instructions. If is_t is true, reject
7587 forms that cannot be used with a T instruction (i.e. not
7588 post-indexed). */
7589 static void
7590 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7591 {
7592 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7593 {
7594 inst.error = _("instruction does not accept scaled register index");
7595 return;
7596 }
7597
7598 encode_arm_addr_mode_common (i, is_t);
7599
7600 if (inst.operands[i].immisreg)
7601 {
7602 constraint ((inst.operands[i].imm == REG_PC
7603 || (is_t && inst.operands[i].reg == REG_PC)),
7604 BAD_PC_ADDRESSING);
7605 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7606 BAD_PC_WRITEBACK);
7607 inst.instruction |= inst.operands[i].imm;
7608 if (!inst.operands[i].negative)
7609 inst.instruction |= INDEX_UP;
7610 }
7611 else /* immediate offset in inst.reloc */
7612 {
7613 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7614 && inst.operands[i].writeback),
7615 BAD_PC_WRITEBACK);
7616 inst.instruction |= HWOFFSET_IMM;
7617 if (inst.reloc.type == BFD_RELOC_UNUSED)
7618 {
7619 /* Prefer + for zero encoded value. */
7620 if (!inst.operands[i].negative)
7621 inst.instruction |= INDEX_UP;
7622
7623 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7624 }
7625 }
7626 }
7627
7628 /* Write immediate bits [7:0] to the following locations:
7629
7630 |28/24|23 19|18 16|15 4|3 0|
7631 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7632
7633 This function is used by VMOV/VMVN/VORR/VBIC. */
7634
7635 static void
7636 neon_write_immbits (unsigned immbits)
7637 {
7638 inst.instruction |= immbits & 0xf;
7639 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7640 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7641 }
7642
7643 /* Invert low-order SIZE bits of XHI:XLO. */
7644
7645 static void
7646 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7647 {
7648 unsigned immlo = xlo ? *xlo : 0;
7649 unsigned immhi = xhi ? *xhi : 0;
7650
7651 switch (size)
7652 {
7653 case 8:
7654 immlo = (~immlo) & 0xff;
7655 break;
7656
7657 case 16:
7658 immlo = (~immlo) & 0xffff;
7659 break;
7660
7661 case 64:
7662 immhi = (~immhi) & 0xffffffff;
7663 /* fall through. */
7664
7665 case 32:
7666 immlo = (~immlo) & 0xffffffff;
7667 break;
7668
7669 default:
7670 abort ();
7671 }
7672
7673 if (xlo)
7674 *xlo = immlo;
7675
7676 if (xhi)
7677 *xhi = immhi;
7678 }
7679
7680 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7681 A, B, C, D. */
7682
7683 static int
7684 neon_bits_same_in_bytes (unsigned imm)
7685 {
7686 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7687 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7688 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7689 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7690 }
7691
7692 /* For immediate of above form, return 0bABCD. */
7693
7694 static unsigned
7695 neon_squash_bits (unsigned imm)
7696 {
7697 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7698 | ((imm & 0x01000000) >> 21);
7699 }
7700
7701 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7702
7703 static unsigned
7704 neon_qfloat_bits (unsigned imm)
7705 {
7706 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7707 }
7708
7709 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7710 the instruction. *OP is passed as the initial value of the op field, and
7711 may be set to a different value depending on the constant (i.e.
7712 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7713 MVN). If the immediate looks like a repeated pattern then also
7714 try smaller element sizes. */
7715
7716 static int
7717 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7718 unsigned *immbits, int *op, int size,
7719 enum neon_el_type type)
7720 {
7721 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7722 float. */
7723 if (type == NT_float && !float_p)
7724 return FAIL;
7725
7726 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7727 {
7728 if (size != 32 || *op == 1)
7729 return FAIL;
7730 *immbits = neon_qfloat_bits (immlo);
7731 return 0xf;
7732 }
7733
7734 if (size == 64)
7735 {
7736 if (neon_bits_same_in_bytes (immhi)
7737 && neon_bits_same_in_bytes (immlo))
7738 {
7739 if (*op == 1)
7740 return FAIL;
7741 *immbits = (neon_squash_bits (immhi) << 4)
7742 | neon_squash_bits (immlo);
7743 *op = 1;
7744 return 0xe;
7745 }
7746
7747 if (immhi != immlo)
7748 return FAIL;
7749 }
7750
7751 if (size >= 32)
7752 {
7753 if (immlo == (immlo & 0x000000ff))
7754 {
7755 *immbits = immlo;
7756 return 0x0;
7757 }
7758 else if (immlo == (immlo & 0x0000ff00))
7759 {
7760 *immbits = immlo >> 8;
7761 return 0x2;
7762 }
7763 else if (immlo == (immlo & 0x00ff0000))
7764 {
7765 *immbits = immlo >> 16;
7766 return 0x4;
7767 }
7768 else if (immlo == (immlo & 0xff000000))
7769 {
7770 *immbits = immlo >> 24;
7771 return 0x6;
7772 }
7773 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7774 {
7775 *immbits = (immlo >> 8) & 0xff;
7776 return 0xc;
7777 }
7778 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7779 {
7780 *immbits = (immlo >> 16) & 0xff;
7781 return 0xd;
7782 }
7783
7784 if ((immlo & 0xffff) != (immlo >> 16))
7785 return FAIL;
7786 immlo &= 0xffff;
7787 }
7788
7789 if (size >= 16)
7790 {
7791 if (immlo == (immlo & 0x000000ff))
7792 {
7793 *immbits = immlo;
7794 return 0x8;
7795 }
7796 else if (immlo == (immlo & 0x0000ff00))
7797 {
7798 *immbits = immlo >> 8;
7799 return 0xa;
7800 }
7801
7802 if ((immlo & 0xff) != (immlo >> 8))
7803 return FAIL;
7804 immlo &= 0xff;
7805 }
7806
7807 if (immlo == (immlo & 0x000000ff))
7808 {
7809 /* Don't allow MVN with 8-bit immediate. */
7810 if (*op == 1)
7811 return FAIL;
7812 *immbits = immlo;
7813 return 0xe;
7814 }
7815
7816 return FAIL;
7817 }
7818
7819 #if defined BFD_HOST_64_BIT
7820 /* Returns TRUE if double precision value V may be cast
7821 to single precision without loss of accuracy. */
7822
7823 static bfd_boolean
7824 is_double_a_single (bfd_int64_t v)
7825 {
7826 int exp = (int)((v >> 52) & 0x7FF);
7827 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7828
7829 return (exp == 0 || exp == 0x7FF
7830 || (exp >= 1023 - 126 && exp <= 1023 + 127))
7831 && (mantissa & 0x1FFFFFFFl) == 0;
7832 }
7833
7834 /* Returns a double precision value casted to single precision
7835 (ignoring the least significant bits in exponent and mantissa). */
7836
7837 static int
7838 double_to_single (bfd_int64_t v)
7839 {
7840 int sign = (int) ((v >> 63) & 1l);
7841 int exp = (int) ((v >> 52) & 0x7FF);
7842 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7843
7844 if (exp == 0x7FF)
7845 exp = 0xFF;
7846 else
7847 {
7848 exp = exp - 1023 + 127;
7849 if (exp >= 0xFF)
7850 {
7851 /* Infinity. */
7852 exp = 0x7F;
7853 mantissa = 0;
7854 }
7855 else if (exp < 0)
7856 {
7857 /* No denormalized numbers. */
7858 exp = 0;
7859 mantissa = 0;
7860 }
7861 }
7862 mantissa >>= 29;
7863 return (sign << 31) | (exp << 23) | mantissa;
7864 }
7865 #endif /* BFD_HOST_64_BIT */
7866
7867 enum lit_type
7868 {
7869 CONST_THUMB,
7870 CONST_ARM,
7871 CONST_VEC
7872 };
7873
7874 static void do_vfp_nsyn_opcode (const char *);
7875
7876 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7877 Determine whether it can be performed with a move instruction; if
7878 it can, convert inst.instruction to that move instruction and
7879 return TRUE; if it can't, convert inst.instruction to a literal-pool
7880 load and return FALSE. If this is not a valid thing to do in the
7881 current context, set inst.error and return TRUE.
7882
7883 inst.operands[i] describes the destination register. */
7884
7885 static bfd_boolean
7886 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
7887 {
7888 unsigned long tbit;
7889 bfd_boolean thumb_p = (t == CONST_THUMB);
7890 bfd_boolean arm_p = (t == CONST_ARM);
7891
7892 if (thumb_p)
7893 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7894 else
7895 tbit = LOAD_BIT;
7896
7897 if ((inst.instruction & tbit) == 0)
7898 {
7899 inst.error = _("invalid pseudo operation");
7900 return TRUE;
7901 }
7902
7903 if (inst.reloc.exp.X_op != O_constant
7904 && inst.reloc.exp.X_op != O_symbol
7905 && inst.reloc.exp.X_op != O_big)
7906 {
7907 inst.error = _("constant expression expected");
7908 return TRUE;
7909 }
7910
7911 if (inst.reloc.exp.X_op == O_constant
7912 || inst.reloc.exp.X_op == O_big)
7913 {
7914 #if defined BFD_HOST_64_BIT
7915 bfd_int64_t v;
7916 #else
7917 offsetT v;
7918 #endif
7919 if (inst.reloc.exp.X_op == O_big)
7920 {
7921 LITTLENUM_TYPE w[X_PRECISION];
7922 LITTLENUM_TYPE * l;
7923
7924 if (inst.reloc.exp.X_add_number == -1)
7925 {
7926 gen_to_words (w, X_PRECISION, E_PRECISION);
7927 l = w;
7928 /* FIXME: Should we check words w[2..5] ? */
7929 }
7930 else
7931 l = generic_bignum;
7932
7933 #if defined BFD_HOST_64_BIT
7934 v =
7935 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
7936 << LITTLENUM_NUMBER_OF_BITS)
7937 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
7938 << LITTLENUM_NUMBER_OF_BITS)
7939 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
7940 << LITTLENUM_NUMBER_OF_BITS)
7941 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
7942 #else
7943 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
7944 | (l[0] & LITTLENUM_MASK);
7945 #endif
7946 }
7947 else
7948 v = inst.reloc.exp.X_add_number;
7949
7950 if (!inst.operands[i].issingle)
7951 {
7952 if (thumb_p)
7953 {
7954 /* This can be encoded only for a low register. */
7955 if ((v & ~0xFF) == 0 && (inst.operands[i].reg < 8))
7956 {
7957 /* This can be done with a mov(1) instruction. */
7958 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7959 inst.instruction |= v;
7960 return TRUE;
7961 }
7962
7963 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
7964 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
7965 {
7966 /* Check if on thumb2 it can be done with a mov.w, mvn or
7967 movw instruction. */
7968 unsigned int newimm;
7969 bfd_boolean isNegated;
7970
7971 newimm = encode_thumb32_immediate (v);
7972 if (newimm != (unsigned int) FAIL)
7973 isNegated = FALSE;
7974 else
7975 {
7976 newimm = encode_thumb32_immediate (~v);
7977 if (newimm != (unsigned int) FAIL)
7978 isNegated = TRUE;
7979 }
7980
7981 /* The number can be loaded with a mov.w or mvn
7982 instruction. */
7983 if (newimm != (unsigned int) FAIL
7984 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
7985 {
7986 inst.instruction = (0xf04f0000 /* MOV.W. */
7987 | (inst.operands[i].reg << 8));
7988 /* Change to MOVN. */
7989 inst.instruction |= (isNegated ? 0x200000 : 0);
7990 inst.instruction |= (newimm & 0x800) << 15;
7991 inst.instruction |= (newimm & 0x700) << 4;
7992 inst.instruction |= (newimm & 0x0ff);
7993 return TRUE;
7994 }
7995 /* The number can be loaded with a movw instruction. */
7996 else if ((v & ~0xFFFF) == 0
7997 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
7998 {
7999 int imm = v & 0xFFFF;
8000
8001 inst.instruction = 0xf2400000; /* MOVW. */
8002 inst.instruction |= (inst.operands[i].reg << 8);
8003 inst.instruction |= (imm & 0xf000) << 4;
8004 inst.instruction |= (imm & 0x0800) << 15;
8005 inst.instruction |= (imm & 0x0700) << 4;
8006 inst.instruction |= (imm & 0x00ff);
8007 return TRUE;
8008 }
8009 }
8010 }
8011 else if (arm_p)
8012 {
8013 int value = encode_arm_immediate (v);
8014
8015 if (value != FAIL)
8016 {
8017 /* This can be done with a mov instruction. */
8018 inst.instruction &= LITERAL_MASK;
8019 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
8020 inst.instruction |= value & 0xfff;
8021 return TRUE;
8022 }
8023
8024 value = encode_arm_immediate (~ v);
8025 if (value != FAIL)
8026 {
8027 /* This can be done with a mvn instruction. */
8028 inst.instruction &= LITERAL_MASK;
8029 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8030 inst.instruction |= value & 0xfff;
8031 return TRUE;
8032 }
8033 }
8034 else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8035 {
8036 int op = 0;
8037 unsigned immbits = 0;
8038 unsigned immlo = inst.operands[1].imm;
8039 unsigned immhi = inst.operands[1].regisimm
8040 ? inst.operands[1].reg
8041 : inst.reloc.exp.X_unsigned
8042 ? 0
8043 : ((bfd_int64_t)((int) immlo)) >> 32;
8044 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8045 &op, 64, NT_invtype);
8046
8047 if (cmode == FAIL)
8048 {
8049 neon_invert_size (&immlo, &immhi, 64);
8050 op = !op;
8051 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8052 &op, 64, NT_invtype);
8053 }
8054
8055 if (cmode != FAIL)
8056 {
8057 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8058 | (1 << 23)
8059 | (cmode << 8)
8060 | (op << 5)
8061 | (1 << 4);
8062
8063 /* Fill other bits in vmov encoding for both thumb and arm. */
8064 if (thumb_mode)
8065 inst.instruction |= (0x7U << 29) | (0xF << 24);
8066 else
8067 inst.instruction |= (0xFU << 28) | (0x1 << 25);
8068 neon_write_immbits (immbits);
8069 return TRUE;
8070 }
8071 }
8072 }
8073
8074 if (t == CONST_VEC)
8075 {
8076 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8077 if (inst.operands[i].issingle
8078 && is_quarter_float (inst.operands[1].imm)
8079 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8080 {
8081 inst.operands[1].imm =
8082 neon_qfloat_bits (v);
8083 do_vfp_nsyn_opcode ("fconsts");
8084 return TRUE;
8085 }
8086
8087 /* If our host does not support a 64-bit type then we cannot perform
8088 the following optimization. This mean that there will be a
8089 discrepancy between the output produced by an assembler built for
8090 a 32-bit-only host and the output produced from a 64-bit host, but
8091 this cannot be helped. */
8092 #if defined BFD_HOST_64_BIT
8093 else if (!inst.operands[1].issingle
8094 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8095 {
8096 if (is_double_a_single (v)
8097 && is_quarter_float (double_to_single (v)))
8098 {
8099 inst.operands[1].imm =
8100 neon_qfloat_bits (double_to_single (v));
8101 do_vfp_nsyn_opcode ("fconstd");
8102 return TRUE;
8103 }
8104 }
8105 #endif
8106 }
8107 }
8108
8109 if (add_to_lit_pool ((!inst.operands[i].isvec
8110 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
8111 return TRUE;
8112
8113 inst.operands[1].reg = REG_PC;
8114 inst.operands[1].isreg = 1;
8115 inst.operands[1].preind = 1;
8116 inst.reloc.pc_rel = 1;
8117 inst.reloc.type = (thumb_p
8118 ? BFD_RELOC_ARM_THUMB_OFFSET
8119 : (mode_3
8120 ? BFD_RELOC_ARM_HWLITERAL
8121 : BFD_RELOC_ARM_LITERAL));
8122 return FALSE;
8123 }
8124
8125 /* inst.operands[i] was set up by parse_address. Encode it into an
8126 ARM-format instruction. Reject all forms which cannot be encoded
8127 into a coprocessor load/store instruction. If wb_ok is false,
8128 reject use of writeback; if unind_ok is false, reject use of
8129 unindexed addressing. If reloc_override is not 0, use it instead
8130 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8131 (in which case it is preserved). */
8132
8133 static int
8134 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8135 {
8136 if (!inst.operands[i].isreg)
8137 {
8138 /* PR 18256 */
8139 if (! inst.operands[0].isvec)
8140 {
8141 inst.error = _("invalid co-processor operand");
8142 return FAIL;
8143 }
8144 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8145 return SUCCESS;
8146 }
8147
8148 inst.instruction |= inst.operands[i].reg << 16;
8149
8150 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8151
8152 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8153 {
8154 gas_assert (!inst.operands[i].writeback);
8155 if (!unind_ok)
8156 {
8157 inst.error = _("instruction does not support unindexed addressing");
8158 return FAIL;
8159 }
8160 inst.instruction |= inst.operands[i].imm;
8161 inst.instruction |= INDEX_UP;
8162 return SUCCESS;
8163 }
8164
8165 if (inst.operands[i].preind)
8166 inst.instruction |= PRE_INDEX;
8167
8168 if (inst.operands[i].writeback)
8169 {
8170 if (inst.operands[i].reg == REG_PC)
8171 {
8172 inst.error = _("pc may not be used with write-back");
8173 return FAIL;
8174 }
8175 if (!wb_ok)
8176 {
8177 inst.error = _("instruction does not support writeback");
8178 return FAIL;
8179 }
8180 inst.instruction |= WRITE_BACK;
8181 }
8182
8183 if (reloc_override)
8184 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
8185 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
8186 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
8187 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
8188 {
8189 if (thumb_mode)
8190 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8191 else
8192 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
8193 }
8194
8195 /* Prefer + for zero encoded value. */
8196 if (!inst.operands[i].negative)
8197 inst.instruction |= INDEX_UP;
8198
8199 return SUCCESS;
8200 }
8201
8202 /* Functions for instruction encoding, sorted by sub-architecture.
8203 First some generics; their names are taken from the conventional
8204 bit positions for register arguments in ARM format instructions. */
8205
8206 static void
8207 do_noargs (void)
8208 {
8209 }
8210
8211 static void
8212 do_rd (void)
8213 {
8214 inst.instruction |= inst.operands[0].reg << 12;
8215 }
8216
8217 static void
8218 do_rn (void)
8219 {
8220 inst.instruction |= inst.operands[0].reg << 16;
8221 }
8222
8223 static void
8224 do_rd_rm (void)
8225 {
8226 inst.instruction |= inst.operands[0].reg << 12;
8227 inst.instruction |= inst.operands[1].reg;
8228 }
8229
8230 static void
8231 do_rm_rn (void)
8232 {
8233 inst.instruction |= inst.operands[0].reg;
8234 inst.instruction |= inst.operands[1].reg << 16;
8235 }
8236
8237 static void
8238 do_rd_rn (void)
8239 {
8240 inst.instruction |= inst.operands[0].reg << 12;
8241 inst.instruction |= inst.operands[1].reg << 16;
8242 }
8243
8244 static void
8245 do_rn_rd (void)
8246 {
8247 inst.instruction |= inst.operands[0].reg << 16;
8248 inst.instruction |= inst.operands[1].reg << 12;
8249 }
8250
8251 static void
8252 do_tt (void)
8253 {
8254 inst.instruction |= inst.operands[0].reg << 8;
8255 inst.instruction |= inst.operands[1].reg << 16;
8256 }
8257
8258 static bfd_boolean
8259 check_obsolete (const arm_feature_set *feature, const char *msg)
8260 {
8261 if (ARM_CPU_IS_ANY (cpu_variant))
8262 {
8263 as_tsktsk ("%s", msg);
8264 return TRUE;
8265 }
8266 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8267 {
8268 as_bad ("%s", msg);
8269 return TRUE;
8270 }
8271
8272 return FALSE;
8273 }
8274
8275 static void
8276 do_rd_rm_rn (void)
8277 {
8278 unsigned Rn = inst.operands[2].reg;
8279 /* Enforce restrictions on SWP instruction. */
8280 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8281 {
8282 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8283 _("Rn must not overlap other operands"));
8284
8285 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8286 */
8287 if (!check_obsolete (&arm_ext_v8,
8288 _("swp{b} use is obsoleted for ARMv8 and later"))
8289 && warn_on_deprecated
8290 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8291 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8292 }
8293
8294 inst.instruction |= inst.operands[0].reg << 12;
8295 inst.instruction |= inst.operands[1].reg;
8296 inst.instruction |= Rn << 16;
8297 }
8298
8299 static void
8300 do_rd_rn_rm (void)
8301 {
8302 inst.instruction |= inst.operands[0].reg << 12;
8303 inst.instruction |= inst.operands[1].reg << 16;
8304 inst.instruction |= inst.operands[2].reg;
8305 }
8306
8307 static void
8308 do_rm_rd_rn (void)
8309 {
8310 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8311 constraint (((inst.reloc.exp.X_op != O_constant
8312 && inst.reloc.exp.X_op != O_illegal)
8313 || inst.reloc.exp.X_add_number != 0),
8314 BAD_ADDR_MODE);
8315 inst.instruction |= inst.operands[0].reg;
8316 inst.instruction |= inst.operands[1].reg << 12;
8317 inst.instruction |= inst.operands[2].reg << 16;
8318 }
8319
8320 static void
8321 do_imm0 (void)
8322 {
8323 inst.instruction |= inst.operands[0].imm;
8324 }
8325
8326 static void
8327 do_rd_cpaddr (void)
8328 {
8329 inst.instruction |= inst.operands[0].reg << 12;
8330 encode_arm_cp_address (1, TRUE, TRUE, 0);
8331 }
8332
8333 /* ARM instructions, in alphabetical order by function name (except
8334 that wrapper functions appear immediately after the function they
8335 wrap). */
8336
8337 /* This is a pseudo-op of the form "adr rd, label" to be converted
8338 into a relative address of the form "add rd, pc, #label-.-8". */
8339
8340 static void
8341 do_adr (void)
8342 {
8343 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8344
8345 /* Frag hacking will turn this into a sub instruction if the offset turns
8346 out to be negative. */
8347 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8348 inst.reloc.pc_rel = 1;
8349 inst.reloc.exp.X_add_number -= 8;
8350 }
8351
8352 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8353 into a relative address of the form:
8354 add rd, pc, #low(label-.-8)"
8355 add rd, rd, #high(label-.-8)" */
8356
8357 static void
8358 do_adrl (void)
8359 {
8360 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8361
8362 /* Frag hacking will turn this into a sub instruction if the offset turns
8363 out to be negative. */
8364 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8365 inst.reloc.pc_rel = 1;
8366 inst.size = INSN_SIZE * 2;
8367 inst.reloc.exp.X_add_number -= 8;
8368 }
8369
8370 static void
8371 do_arit (void)
8372 {
8373 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8374 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
8375 THUMB1_RELOC_ONLY);
8376 if (!inst.operands[1].present)
8377 inst.operands[1].reg = inst.operands[0].reg;
8378 inst.instruction |= inst.operands[0].reg << 12;
8379 inst.instruction |= inst.operands[1].reg << 16;
8380 encode_arm_shifter_operand (2);
8381 }
8382
8383 static void
8384 do_barrier (void)
8385 {
8386 if (inst.operands[0].present)
8387 inst.instruction |= inst.operands[0].imm;
8388 else
8389 inst.instruction |= 0xf;
8390 }
8391
8392 static void
8393 do_bfc (void)
8394 {
8395 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8396 constraint (msb > 32, _("bit-field extends past end of register"));
8397 /* The instruction encoding stores the LSB and MSB,
8398 not the LSB and width. */
8399 inst.instruction |= inst.operands[0].reg << 12;
8400 inst.instruction |= inst.operands[1].imm << 7;
8401 inst.instruction |= (msb - 1) << 16;
8402 }
8403
8404 static void
8405 do_bfi (void)
8406 {
8407 unsigned int msb;
8408
8409 /* #0 in second position is alternative syntax for bfc, which is
8410 the same instruction but with REG_PC in the Rm field. */
8411 if (!inst.operands[1].isreg)
8412 inst.operands[1].reg = REG_PC;
8413
8414 msb = inst.operands[2].imm + inst.operands[3].imm;
8415 constraint (msb > 32, _("bit-field extends past end of register"));
8416 /* The instruction encoding stores the LSB and MSB,
8417 not the LSB and width. */
8418 inst.instruction |= inst.operands[0].reg << 12;
8419 inst.instruction |= inst.operands[1].reg;
8420 inst.instruction |= inst.operands[2].imm << 7;
8421 inst.instruction |= (msb - 1) << 16;
8422 }
8423
8424 static void
8425 do_bfx (void)
8426 {
8427 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8428 _("bit-field extends past end of register"));
8429 inst.instruction |= inst.operands[0].reg << 12;
8430 inst.instruction |= inst.operands[1].reg;
8431 inst.instruction |= inst.operands[2].imm << 7;
8432 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8433 }
8434
8435 /* ARM V5 breakpoint instruction (argument parse)
8436 BKPT <16 bit unsigned immediate>
8437 Instruction is not conditional.
8438 The bit pattern given in insns[] has the COND_ALWAYS condition,
8439 and it is an error if the caller tried to override that. */
8440
8441 static void
8442 do_bkpt (void)
8443 {
8444 /* Top 12 of 16 bits to bits 19:8. */
8445 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8446
8447 /* Bottom 4 of 16 bits to bits 3:0. */
8448 inst.instruction |= inst.operands[0].imm & 0xf;
8449 }
8450
8451 static void
8452 encode_branch (int default_reloc)
8453 {
8454 if (inst.operands[0].hasreloc)
8455 {
8456 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8457 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8458 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8459 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8460 ? BFD_RELOC_ARM_PLT32
8461 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8462 }
8463 else
8464 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
8465 inst.reloc.pc_rel = 1;
8466 }
8467
8468 static void
8469 do_branch (void)
8470 {
8471 #ifdef OBJ_ELF
8472 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8473 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8474 else
8475 #endif
8476 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8477 }
8478
8479 static void
8480 do_bl (void)
8481 {
8482 #ifdef OBJ_ELF
8483 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8484 {
8485 if (inst.cond == COND_ALWAYS)
8486 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8487 else
8488 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8489 }
8490 else
8491 #endif
8492 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8493 }
8494
8495 /* ARM V5 branch-link-exchange instruction (argument parse)
8496 BLX <target_addr> ie BLX(1)
8497 BLX{<condition>} <Rm> ie BLX(2)
8498 Unfortunately, there are two different opcodes for this mnemonic.
8499 So, the insns[].value is not used, and the code here zaps values
8500 into inst.instruction.
8501 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8502
8503 static void
8504 do_blx (void)
8505 {
8506 if (inst.operands[0].isreg)
8507 {
8508 /* Arg is a register; the opcode provided by insns[] is correct.
8509 It is not illegal to do "blx pc", just useless. */
8510 if (inst.operands[0].reg == REG_PC)
8511 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8512
8513 inst.instruction |= inst.operands[0].reg;
8514 }
8515 else
8516 {
8517 /* Arg is an address; this instruction cannot be executed
8518 conditionally, and the opcode must be adjusted.
8519 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8520 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8521 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8522 inst.instruction = 0xfa000000;
8523 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8524 }
8525 }
8526
8527 static void
8528 do_bx (void)
8529 {
8530 bfd_boolean want_reloc;
8531
8532 if (inst.operands[0].reg == REG_PC)
8533 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8534
8535 inst.instruction |= inst.operands[0].reg;
8536 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8537 it is for ARMv4t or earlier. */
8538 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8539 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
8540 want_reloc = TRUE;
8541
8542 #ifdef OBJ_ELF
8543 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8544 #endif
8545 want_reloc = FALSE;
8546
8547 if (want_reloc)
8548 inst.reloc.type = BFD_RELOC_ARM_V4BX;
8549 }
8550
8551
8552 /* ARM v5TEJ. Jump to Jazelle code. */
8553
8554 static void
8555 do_bxj (void)
8556 {
8557 if (inst.operands[0].reg == REG_PC)
8558 as_tsktsk (_("use of r15 in bxj is not really useful"));
8559
8560 inst.instruction |= inst.operands[0].reg;
8561 }
8562
8563 /* Co-processor data operation:
8564 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8565 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8566 static void
8567 do_cdp (void)
8568 {
8569 inst.instruction |= inst.operands[0].reg << 8;
8570 inst.instruction |= inst.operands[1].imm << 20;
8571 inst.instruction |= inst.operands[2].reg << 12;
8572 inst.instruction |= inst.operands[3].reg << 16;
8573 inst.instruction |= inst.operands[4].reg;
8574 inst.instruction |= inst.operands[5].imm << 5;
8575 }
8576
8577 static void
8578 do_cmp (void)
8579 {
8580 inst.instruction |= inst.operands[0].reg << 16;
8581 encode_arm_shifter_operand (1);
8582 }
8583
8584 /* Transfer between coprocessor and ARM registers.
8585 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8586 MRC2
8587 MCR{cond}
8588 MCR2
8589
8590 No special properties. */
8591
8592 struct deprecated_coproc_regs_s
8593 {
8594 unsigned cp;
8595 int opc1;
8596 unsigned crn;
8597 unsigned crm;
8598 int opc2;
8599 arm_feature_set deprecated;
8600 arm_feature_set obsoleted;
8601 const char *dep_msg;
8602 const char *obs_msg;
8603 };
8604
8605 #define DEPR_ACCESS_V8 \
8606 N_("This coprocessor register access is deprecated in ARMv8")
8607
8608 /* Table of all deprecated coprocessor registers. */
8609 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8610 {
8611 {15, 0, 7, 10, 5, /* CP15DMB. */
8612 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8613 DEPR_ACCESS_V8, NULL},
8614 {15, 0, 7, 10, 4, /* CP15DSB. */
8615 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8616 DEPR_ACCESS_V8, NULL},
8617 {15, 0, 7, 5, 4, /* CP15ISB. */
8618 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8619 DEPR_ACCESS_V8, NULL},
8620 {14, 6, 1, 0, 0, /* TEEHBR. */
8621 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8622 DEPR_ACCESS_V8, NULL},
8623 {14, 6, 0, 0, 0, /* TEECR. */
8624 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8625 DEPR_ACCESS_V8, NULL},
8626 };
8627
8628 #undef DEPR_ACCESS_V8
8629
8630 static const size_t deprecated_coproc_reg_count =
8631 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8632
8633 static void
8634 do_co_reg (void)
8635 {
8636 unsigned Rd;
8637 size_t i;
8638
8639 Rd = inst.operands[2].reg;
8640 if (thumb_mode)
8641 {
8642 if (inst.instruction == 0xee000010
8643 || inst.instruction == 0xfe000010)
8644 /* MCR, MCR2 */
8645 reject_bad_reg (Rd);
8646 else
8647 /* MRC, MRC2 */
8648 constraint (Rd == REG_SP, BAD_SP);
8649 }
8650 else
8651 {
8652 /* MCR */
8653 if (inst.instruction == 0xe000010)
8654 constraint (Rd == REG_PC, BAD_PC);
8655 }
8656
8657 for (i = 0; i < deprecated_coproc_reg_count; ++i)
8658 {
8659 const struct deprecated_coproc_regs_s *r =
8660 deprecated_coproc_regs + i;
8661
8662 if (inst.operands[0].reg == r->cp
8663 && inst.operands[1].imm == r->opc1
8664 && inst.operands[3].reg == r->crn
8665 && inst.operands[4].reg == r->crm
8666 && inst.operands[5].imm == r->opc2)
8667 {
8668 if (! ARM_CPU_IS_ANY (cpu_variant)
8669 && warn_on_deprecated
8670 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8671 as_tsktsk ("%s", r->dep_msg);
8672 }
8673 }
8674
8675 inst.instruction |= inst.operands[0].reg << 8;
8676 inst.instruction |= inst.operands[1].imm << 21;
8677 inst.instruction |= Rd << 12;
8678 inst.instruction |= inst.operands[3].reg << 16;
8679 inst.instruction |= inst.operands[4].reg;
8680 inst.instruction |= inst.operands[5].imm << 5;
8681 }
8682
8683 /* Transfer between coprocessor register and pair of ARM registers.
8684 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8685 MCRR2
8686 MRRC{cond}
8687 MRRC2
8688
8689 Two XScale instructions are special cases of these:
8690
8691 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8692 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8693
8694 Result unpredictable if Rd or Rn is R15. */
8695
8696 static void
8697 do_co_reg2c (void)
8698 {
8699 unsigned Rd, Rn;
8700
8701 Rd = inst.operands[2].reg;
8702 Rn = inst.operands[3].reg;
8703
8704 if (thumb_mode)
8705 {
8706 reject_bad_reg (Rd);
8707 reject_bad_reg (Rn);
8708 }
8709 else
8710 {
8711 constraint (Rd == REG_PC, BAD_PC);
8712 constraint (Rn == REG_PC, BAD_PC);
8713 }
8714
8715 /* Only check the MRRC{2} variants. */
8716 if ((inst.instruction & 0x0FF00000) == 0x0C500000)
8717 {
8718 /* If Rd == Rn, error that the operation is
8719 unpredictable (example MRRC p3,#1,r1,r1,c4). */
8720 constraint (Rd == Rn, BAD_OVERLAP);
8721 }
8722
8723 inst.instruction |= inst.operands[0].reg << 8;
8724 inst.instruction |= inst.operands[1].imm << 4;
8725 inst.instruction |= Rd << 12;
8726 inst.instruction |= Rn << 16;
8727 inst.instruction |= inst.operands[4].reg;
8728 }
8729
8730 static void
8731 do_cpsi (void)
8732 {
8733 inst.instruction |= inst.operands[0].imm << 6;
8734 if (inst.operands[1].present)
8735 {
8736 inst.instruction |= CPSI_MMOD;
8737 inst.instruction |= inst.operands[1].imm;
8738 }
8739 }
8740
8741 static void
8742 do_dbg (void)
8743 {
8744 inst.instruction |= inst.operands[0].imm;
8745 }
8746
8747 static void
8748 do_div (void)
8749 {
8750 unsigned Rd, Rn, Rm;
8751
8752 Rd = inst.operands[0].reg;
8753 Rn = (inst.operands[1].present
8754 ? inst.operands[1].reg : Rd);
8755 Rm = inst.operands[2].reg;
8756
8757 constraint ((Rd == REG_PC), BAD_PC);
8758 constraint ((Rn == REG_PC), BAD_PC);
8759 constraint ((Rm == REG_PC), BAD_PC);
8760
8761 inst.instruction |= Rd << 16;
8762 inst.instruction |= Rn << 0;
8763 inst.instruction |= Rm << 8;
8764 }
8765
8766 static void
8767 do_it (void)
8768 {
8769 /* There is no IT instruction in ARM mode. We
8770 process it to do the validation as if in
8771 thumb mode, just in case the code gets
8772 assembled for thumb using the unified syntax. */
8773
8774 inst.size = 0;
8775 if (unified_syntax)
8776 {
8777 set_it_insn_type (IT_INSN);
8778 now_it.mask = (inst.instruction & 0xf) | 0x10;
8779 now_it.cc = inst.operands[0].imm;
8780 }
8781 }
8782
8783 /* If there is only one register in the register list,
8784 then return its register number. Otherwise return -1. */
8785 static int
8786 only_one_reg_in_list (int range)
8787 {
8788 int i = ffs (range) - 1;
8789 return (i > 15 || range != (1 << i)) ? -1 : i;
8790 }
8791
8792 static void
8793 encode_ldmstm(int from_push_pop_mnem)
8794 {
8795 int base_reg = inst.operands[0].reg;
8796 int range = inst.operands[1].imm;
8797 int one_reg;
8798
8799 inst.instruction |= base_reg << 16;
8800 inst.instruction |= range;
8801
8802 if (inst.operands[1].writeback)
8803 inst.instruction |= LDM_TYPE_2_OR_3;
8804
8805 if (inst.operands[0].writeback)
8806 {
8807 inst.instruction |= WRITE_BACK;
8808 /* Check for unpredictable uses of writeback. */
8809 if (inst.instruction & LOAD_BIT)
8810 {
8811 /* Not allowed in LDM type 2. */
8812 if ((inst.instruction & LDM_TYPE_2_OR_3)
8813 && ((range & (1 << REG_PC)) == 0))
8814 as_warn (_("writeback of base register is UNPREDICTABLE"));
8815 /* Only allowed if base reg not in list for other types. */
8816 else if (range & (1 << base_reg))
8817 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8818 }
8819 else /* STM. */
8820 {
8821 /* Not allowed for type 2. */
8822 if (inst.instruction & LDM_TYPE_2_OR_3)
8823 as_warn (_("writeback of base register is UNPREDICTABLE"));
8824 /* Only allowed if base reg not in list, or first in list. */
8825 else if ((range & (1 << base_reg))
8826 && (range & ((1 << base_reg) - 1)))
8827 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8828 }
8829 }
8830
8831 /* If PUSH/POP has only one register, then use the A2 encoding. */
8832 one_reg = only_one_reg_in_list (range);
8833 if (from_push_pop_mnem && one_reg >= 0)
8834 {
8835 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
8836
8837 inst.instruction &= A_COND_MASK;
8838 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
8839 inst.instruction |= one_reg << 12;
8840 }
8841 }
8842
8843 static void
8844 do_ldmstm (void)
8845 {
8846 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
8847 }
8848
8849 /* ARMv5TE load-consecutive (argument parse)
8850 Mode is like LDRH.
8851
8852 LDRccD R, mode
8853 STRccD R, mode. */
8854
8855 static void
8856 do_ldrd (void)
8857 {
8858 constraint (inst.operands[0].reg % 2 != 0,
8859 _("first transfer register must be even"));
8860 constraint (inst.operands[1].present
8861 && inst.operands[1].reg != inst.operands[0].reg + 1,
8862 _("can only transfer two consecutive registers"));
8863 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8864 constraint (!inst.operands[2].isreg, _("'[' expected"));
8865
8866 if (!inst.operands[1].present)
8867 inst.operands[1].reg = inst.operands[0].reg + 1;
8868
8869 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8870 register and the first register written; we have to diagnose
8871 overlap between the base and the second register written here. */
8872
8873 if (inst.operands[2].reg == inst.operands[1].reg
8874 && (inst.operands[2].writeback || inst.operands[2].postind))
8875 as_warn (_("base register written back, and overlaps "
8876 "second transfer register"));
8877
8878 if (!(inst.instruction & V4_STR_BIT))
8879 {
8880 /* For an index-register load, the index register must not overlap the
8881 destination (even if not write-back). */
8882 if (inst.operands[2].immisreg
8883 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8884 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8885 as_warn (_("index register overlaps transfer register"));
8886 }
8887 inst.instruction |= inst.operands[0].reg << 12;
8888 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8889 }
8890
8891 static void
8892 do_ldrex (void)
8893 {
8894 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8895 || inst.operands[1].postind || inst.operands[1].writeback
8896 || inst.operands[1].immisreg || inst.operands[1].shifted
8897 || inst.operands[1].negative
8898 /* This can arise if the programmer has written
8899 strex rN, rM, foo
8900 or if they have mistakenly used a register name as the last
8901 operand, eg:
8902 strex rN, rM, rX
8903 It is very difficult to distinguish between these two cases
8904 because "rX" might actually be a label. ie the register
8905 name has been occluded by a symbol of the same name. So we
8906 just generate a general 'bad addressing mode' type error
8907 message and leave it up to the programmer to discover the
8908 true cause and fix their mistake. */
8909 || (inst.operands[1].reg == REG_PC),
8910 BAD_ADDR_MODE);
8911
8912 constraint (inst.reloc.exp.X_op != O_constant
8913 || inst.reloc.exp.X_add_number != 0,
8914 _("offset must be zero in ARM encoding"));
8915
8916 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
8917
8918 inst.instruction |= inst.operands[0].reg << 12;
8919 inst.instruction |= inst.operands[1].reg << 16;
8920 inst.reloc.type = BFD_RELOC_UNUSED;
8921 }
8922
8923 static void
8924 do_ldrexd (void)
8925 {
8926 constraint (inst.operands[0].reg % 2 != 0,
8927 _("even register required"));
8928 constraint (inst.operands[1].present
8929 && inst.operands[1].reg != inst.operands[0].reg + 1,
8930 _("can only load two consecutive registers"));
8931 /* If op 1 were present and equal to PC, this function wouldn't
8932 have been called in the first place. */
8933 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8934
8935 inst.instruction |= inst.operands[0].reg << 12;
8936 inst.instruction |= inst.operands[2].reg << 16;
8937 }
8938
8939 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8940 which is not a multiple of four is UNPREDICTABLE. */
8941 static void
8942 check_ldr_r15_aligned (void)
8943 {
8944 constraint (!(inst.operands[1].immisreg)
8945 && (inst.operands[0].reg == REG_PC
8946 && inst.operands[1].reg == REG_PC
8947 && (inst.reloc.exp.X_add_number & 0x3)),
8948 _("ldr to register 15 must be 4-byte alligned"));
8949 }
8950
8951 static void
8952 do_ldst (void)
8953 {
8954 inst.instruction |= inst.operands[0].reg << 12;
8955 if (!inst.operands[1].isreg)
8956 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
8957 return;
8958 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
8959 check_ldr_r15_aligned ();
8960 }
8961
8962 static void
8963 do_ldstt (void)
8964 {
8965 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8966 reject [Rn,...]. */
8967 if (inst.operands[1].preind)
8968 {
8969 constraint (inst.reloc.exp.X_op != O_constant
8970 || inst.reloc.exp.X_add_number != 0,
8971 _("this instruction requires a post-indexed address"));
8972
8973 inst.operands[1].preind = 0;
8974 inst.operands[1].postind = 1;
8975 inst.operands[1].writeback = 1;
8976 }
8977 inst.instruction |= inst.operands[0].reg << 12;
8978 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
8979 }
8980
8981 /* Halfword and signed-byte load/store operations. */
8982
8983 static void
8984 do_ldstv4 (void)
8985 {
8986 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8987 inst.instruction |= inst.operands[0].reg << 12;
8988 if (!inst.operands[1].isreg)
8989 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
8990 return;
8991 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
8992 }
8993
8994 static void
8995 do_ldsttv4 (void)
8996 {
8997 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8998 reject [Rn,...]. */
8999 if (inst.operands[1].preind)
9000 {
9001 constraint (inst.reloc.exp.X_op != O_constant
9002 || inst.reloc.exp.X_add_number != 0,
9003 _("this instruction requires a post-indexed address"));
9004
9005 inst.operands[1].preind = 0;
9006 inst.operands[1].postind = 1;
9007 inst.operands[1].writeback = 1;
9008 }
9009 inst.instruction |= inst.operands[0].reg << 12;
9010 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
9011 }
9012
9013 /* Co-processor register load/store.
9014 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9015 static void
9016 do_lstc (void)
9017 {
9018 inst.instruction |= inst.operands[0].reg << 8;
9019 inst.instruction |= inst.operands[1].reg << 12;
9020 encode_arm_cp_address (2, TRUE, TRUE, 0);
9021 }
9022
9023 static void
9024 do_mlas (void)
9025 {
9026 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9027 if (inst.operands[0].reg == inst.operands[1].reg
9028 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9029 && !(inst.instruction & 0x00400000))
9030 as_tsktsk (_("Rd and Rm should be different in mla"));
9031
9032 inst.instruction |= inst.operands[0].reg << 16;
9033 inst.instruction |= inst.operands[1].reg;
9034 inst.instruction |= inst.operands[2].reg << 8;
9035 inst.instruction |= inst.operands[3].reg << 12;
9036 }
9037
9038 static void
9039 do_mov (void)
9040 {
9041 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9042 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9043 THUMB1_RELOC_ONLY);
9044 inst.instruction |= inst.operands[0].reg << 12;
9045 encode_arm_shifter_operand (1);
9046 }
9047
9048 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9049 static void
9050 do_mov16 (void)
9051 {
9052 bfd_vma imm;
9053 bfd_boolean top;
9054
9055 top = (inst.instruction & 0x00400000) != 0;
9056 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
9057 _(":lower16: not allowed this instruction"));
9058 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
9059 _(":upper16: not allowed instruction"));
9060 inst.instruction |= inst.operands[0].reg << 12;
9061 if (inst.reloc.type == BFD_RELOC_UNUSED)
9062 {
9063 imm = inst.reloc.exp.X_add_number;
9064 /* The value is in two pieces: 0:11, 16:19. */
9065 inst.instruction |= (imm & 0x00000fff);
9066 inst.instruction |= (imm & 0x0000f000) << 4;
9067 }
9068 }
9069
9070 static int
9071 do_vfp_nsyn_mrs (void)
9072 {
9073 if (inst.operands[0].isvec)
9074 {
9075 if (inst.operands[1].reg != 1)
9076 first_error (_("operand 1 must be FPSCR"));
9077 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
9078 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
9079 do_vfp_nsyn_opcode ("fmstat");
9080 }
9081 else if (inst.operands[1].isvec)
9082 do_vfp_nsyn_opcode ("fmrx");
9083 else
9084 return FAIL;
9085
9086 return SUCCESS;
9087 }
9088
9089 static int
9090 do_vfp_nsyn_msr (void)
9091 {
9092 if (inst.operands[0].isvec)
9093 do_vfp_nsyn_opcode ("fmxr");
9094 else
9095 return FAIL;
9096
9097 return SUCCESS;
9098 }
9099
9100 static void
9101 do_vmrs (void)
9102 {
9103 unsigned Rt = inst.operands[0].reg;
9104
9105 if (thumb_mode && Rt == REG_SP)
9106 {
9107 inst.error = BAD_SP;
9108 return;
9109 }
9110
9111 /* APSR_ sets isvec. All other refs to PC are illegal. */
9112 if (!inst.operands[0].isvec && Rt == REG_PC)
9113 {
9114 inst.error = BAD_PC;
9115 return;
9116 }
9117
9118 /* If we get through parsing the register name, we just insert the number
9119 generated into the instruction without further validation. */
9120 inst.instruction |= (inst.operands[1].reg << 16);
9121 inst.instruction |= (Rt << 12);
9122 }
9123
9124 static void
9125 do_vmsr (void)
9126 {
9127 unsigned Rt = inst.operands[1].reg;
9128
9129 if (thumb_mode)
9130 reject_bad_reg (Rt);
9131 else if (Rt == REG_PC)
9132 {
9133 inst.error = BAD_PC;
9134 return;
9135 }
9136
9137 /* If we get through parsing the register name, we just insert the number
9138 generated into the instruction without further validation. */
9139 inst.instruction |= (inst.operands[0].reg << 16);
9140 inst.instruction |= (Rt << 12);
9141 }
9142
9143 static void
9144 do_mrs (void)
9145 {
9146 unsigned br;
9147
9148 if (do_vfp_nsyn_mrs () == SUCCESS)
9149 return;
9150
9151 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9152 inst.instruction |= inst.operands[0].reg << 12;
9153
9154 if (inst.operands[1].isreg)
9155 {
9156 br = inst.operands[1].reg;
9157 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
9158 as_bad (_("bad register for mrs"));
9159 }
9160 else
9161 {
9162 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9163 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9164 != (PSR_c|PSR_f),
9165 _("'APSR', 'CPSR' or 'SPSR' expected"));
9166 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9167 }
9168
9169 inst.instruction |= br;
9170 }
9171
9172 /* Two possible forms:
9173 "{C|S}PSR_<field>, Rm",
9174 "{C|S}PSR_f, #expression". */
9175
9176 static void
9177 do_msr (void)
9178 {
9179 if (do_vfp_nsyn_msr () == SUCCESS)
9180 return;
9181
9182 inst.instruction |= inst.operands[0].imm;
9183 if (inst.operands[1].isreg)
9184 inst.instruction |= inst.operands[1].reg;
9185 else
9186 {
9187 inst.instruction |= INST_IMMEDIATE;
9188 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
9189 inst.reloc.pc_rel = 0;
9190 }
9191 }
9192
9193 static void
9194 do_mul (void)
9195 {
9196 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9197
9198 if (!inst.operands[2].present)
9199 inst.operands[2].reg = inst.operands[0].reg;
9200 inst.instruction |= inst.operands[0].reg << 16;
9201 inst.instruction |= inst.operands[1].reg;
9202 inst.instruction |= inst.operands[2].reg << 8;
9203
9204 if (inst.operands[0].reg == inst.operands[1].reg
9205 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9206 as_tsktsk (_("Rd and Rm should be different in mul"));
9207 }
9208
9209 /* Long Multiply Parser
9210 UMULL RdLo, RdHi, Rm, Rs
9211 SMULL RdLo, RdHi, Rm, Rs
9212 UMLAL RdLo, RdHi, Rm, Rs
9213 SMLAL RdLo, RdHi, Rm, Rs. */
9214
9215 static void
9216 do_mull (void)
9217 {
9218 inst.instruction |= inst.operands[0].reg << 12;
9219 inst.instruction |= inst.operands[1].reg << 16;
9220 inst.instruction |= inst.operands[2].reg;
9221 inst.instruction |= inst.operands[3].reg << 8;
9222
9223 /* rdhi and rdlo must be different. */
9224 if (inst.operands[0].reg == inst.operands[1].reg)
9225 as_tsktsk (_("rdhi and rdlo must be different"));
9226
9227 /* rdhi, rdlo and rm must all be different before armv6. */
9228 if ((inst.operands[0].reg == inst.operands[2].reg
9229 || inst.operands[1].reg == inst.operands[2].reg)
9230 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9231 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9232 }
9233
9234 static void
9235 do_nop (void)
9236 {
9237 if (inst.operands[0].present
9238 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9239 {
9240 /* Architectural NOP hints are CPSR sets with no bits selected. */
9241 inst.instruction &= 0xf0000000;
9242 inst.instruction |= 0x0320f000;
9243 if (inst.operands[0].present)
9244 inst.instruction |= inst.operands[0].imm;
9245 }
9246 }
9247
9248 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9249 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9250 Condition defaults to COND_ALWAYS.
9251 Error if Rd, Rn or Rm are R15. */
9252
9253 static void
9254 do_pkhbt (void)
9255 {
9256 inst.instruction |= inst.operands[0].reg << 12;
9257 inst.instruction |= inst.operands[1].reg << 16;
9258 inst.instruction |= inst.operands[2].reg;
9259 if (inst.operands[3].present)
9260 encode_arm_shift (3);
9261 }
9262
9263 /* ARM V6 PKHTB (Argument Parse). */
9264
9265 static void
9266 do_pkhtb (void)
9267 {
9268 if (!inst.operands[3].present)
9269 {
9270 /* If the shift specifier is omitted, turn the instruction
9271 into pkhbt rd, rm, rn. */
9272 inst.instruction &= 0xfff00010;
9273 inst.instruction |= inst.operands[0].reg << 12;
9274 inst.instruction |= inst.operands[1].reg;
9275 inst.instruction |= inst.operands[2].reg << 16;
9276 }
9277 else
9278 {
9279 inst.instruction |= inst.operands[0].reg << 12;
9280 inst.instruction |= inst.operands[1].reg << 16;
9281 inst.instruction |= inst.operands[2].reg;
9282 encode_arm_shift (3);
9283 }
9284 }
9285
9286 /* ARMv5TE: Preload-Cache
9287 MP Extensions: Preload for write
9288
9289 PLD(W) <addr_mode>
9290
9291 Syntactically, like LDR with B=1, W=0, L=1. */
9292
9293 static void
9294 do_pld (void)
9295 {
9296 constraint (!inst.operands[0].isreg,
9297 _("'[' expected after PLD mnemonic"));
9298 constraint (inst.operands[0].postind,
9299 _("post-indexed expression used in preload instruction"));
9300 constraint (inst.operands[0].writeback,
9301 _("writeback used in preload instruction"));
9302 constraint (!inst.operands[0].preind,
9303 _("unindexed addressing used in preload instruction"));
9304 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9305 }
9306
9307 /* ARMv7: PLI <addr_mode> */
9308 static void
9309 do_pli (void)
9310 {
9311 constraint (!inst.operands[0].isreg,
9312 _("'[' expected after PLI mnemonic"));
9313 constraint (inst.operands[0].postind,
9314 _("post-indexed expression used in preload instruction"));
9315 constraint (inst.operands[0].writeback,
9316 _("writeback used in preload instruction"));
9317 constraint (!inst.operands[0].preind,
9318 _("unindexed addressing used in preload instruction"));
9319 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9320 inst.instruction &= ~PRE_INDEX;
9321 }
9322
9323 static void
9324 do_push_pop (void)
9325 {
9326 constraint (inst.operands[0].writeback,
9327 _("push/pop do not support {reglist}^"));
9328 inst.operands[1] = inst.operands[0];
9329 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9330 inst.operands[0].isreg = 1;
9331 inst.operands[0].writeback = 1;
9332 inst.operands[0].reg = REG_SP;
9333 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9334 }
9335
9336 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9337 word at the specified address and the following word
9338 respectively.
9339 Unconditionally executed.
9340 Error if Rn is R15. */
9341
9342 static void
9343 do_rfe (void)
9344 {
9345 inst.instruction |= inst.operands[0].reg << 16;
9346 if (inst.operands[0].writeback)
9347 inst.instruction |= WRITE_BACK;
9348 }
9349
9350 /* ARM V6 ssat (argument parse). */
9351
9352 static void
9353 do_ssat (void)
9354 {
9355 inst.instruction |= inst.operands[0].reg << 12;
9356 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9357 inst.instruction |= inst.operands[2].reg;
9358
9359 if (inst.operands[3].present)
9360 encode_arm_shift (3);
9361 }
9362
9363 /* ARM V6 usat (argument parse). */
9364
9365 static void
9366 do_usat (void)
9367 {
9368 inst.instruction |= inst.operands[0].reg << 12;
9369 inst.instruction |= inst.operands[1].imm << 16;
9370 inst.instruction |= inst.operands[2].reg;
9371
9372 if (inst.operands[3].present)
9373 encode_arm_shift (3);
9374 }
9375
9376 /* ARM V6 ssat16 (argument parse). */
9377
9378 static void
9379 do_ssat16 (void)
9380 {
9381 inst.instruction |= inst.operands[0].reg << 12;
9382 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9383 inst.instruction |= inst.operands[2].reg;
9384 }
9385
9386 static void
9387 do_usat16 (void)
9388 {
9389 inst.instruction |= inst.operands[0].reg << 12;
9390 inst.instruction |= inst.operands[1].imm << 16;
9391 inst.instruction |= inst.operands[2].reg;
9392 }
9393
9394 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9395 preserving the other bits.
9396
9397 setend <endian_specifier>, where <endian_specifier> is either
9398 BE or LE. */
9399
9400 static void
9401 do_setend (void)
9402 {
9403 if (warn_on_deprecated
9404 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9405 as_tsktsk (_("setend use is deprecated for ARMv8"));
9406
9407 if (inst.operands[0].imm)
9408 inst.instruction |= 0x200;
9409 }
9410
9411 static void
9412 do_shift (void)
9413 {
9414 unsigned int Rm = (inst.operands[1].present
9415 ? inst.operands[1].reg
9416 : inst.operands[0].reg);
9417
9418 inst.instruction |= inst.operands[0].reg << 12;
9419 inst.instruction |= Rm;
9420 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9421 {
9422 inst.instruction |= inst.operands[2].reg << 8;
9423 inst.instruction |= SHIFT_BY_REG;
9424 /* PR 12854: Error on extraneous shifts. */
9425 constraint (inst.operands[2].shifted,
9426 _("extraneous shift as part of operand to shift insn"));
9427 }
9428 else
9429 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
9430 }
9431
9432 static void
9433 do_smc (void)
9434 {
9435 inst.reloc.type = BFD_RELOC_ARM_SMC;
9436 inst.reloc.pc_rel = 0;
9437 }
9438
9439 static void
9440 do_hvc (void)
9441 {
9442 inst.reloc.type = BFD_RELOC_ARM_HVC;
9443 inst.reloc.pc_rel = 0;
9444 }
9445
9446 static void
9447 do_swi (void)
9448 {
9449 inst.reloc.type = BFD_RELOC_ARM_SWI;
9450 inst.reloc.pc_rel = 0;
9451 }
9452
9453 static void
9454 do_setpan (void)
9455 {
9456 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9457 _("selected processor does not support SETPAN instruction"));
9458
9459 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9460 }
9461
9462 static void
9463 do_t_setpan (void)
9464 {
9465 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9466 _("selected processor does not support SETPAN instruction"));
9467
9468 inst.instruction |= (inst.operands[0].imm << 3);
9469 }
9470
9471 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9472 SMLAxy{cond} Rd,Rm,Rs,Rn
9473 SMLAWy{cond} Rd,Rm,Rs,Rn
9474 Error if any register is R15. */
9475
9476 static void
9477 do_smla (void)
9478 {
9479 inst.instruction |= inst.operands[0].reg << 16;
9480 inst.instruction |= inst.operands[1].reg;
9481 inst.instruction |= inst.operands[2].reg << 8;
9482 inst.instruction |= inst.operands[3].reg << 12;
9483 }
9484
9485 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9486 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9487 Error if any register is R15.
9488 Warning if Rdlo == Rdhi. */
9489
9490 static void
9491 do_smlal (void)
9492 {
9493 inst.instruction |= inst.operands[0].reg << 12;
9494 inst.instruction |= inst.operands[1].reg << 16;
9495 inst.instruction |= inst.operands[2].reg;
9496 inst.instruction |= inst.operands[3].reg << 8;
9497
9498 if (inst.operands[0].reg == inst.operands[1].reg)
9499 as_tsktsk (_("rdhi and rdlo must be different"));
9500 }
9501
9502 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9503 SMULxy{cond} Rd,Rm,Rs
9504 Error if any register is R15. */
9505
9506 static void
9507 do_smul (void)
9508 {
9509 inst.instruction |= inst.operands[0].reg << 16;
9510 inst.instruction |= inst.operands[1].reg;
9511 inst.instruction |= inst.operands[2].reg << 8;
9512 }
9513
9514 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9515 the same for both ARM and Thumb-2. */
9516
9517 static void
9518 do_srs (void)
9519 {
9520 int reg;
9521
9522 if (inst.operands[0].present)
9523 {
9524 reg = inst.operands[0].reg;
9525 constraint (reg != REG_SP, _("SRS base register must be r13"));
9526 }
9527 else
9528 reg = REG_SP;
9529
9530 inst.instruction |= reg << 16;
9531 inst.instruction |= inst.operands[1].imm;
9532 if (inst.operands[0].writeback || inst.operands[1].writeback)
9533 inst.instruction |= WRITE_BACK;
9534 }
9535
9536 /* ARM V6 strex (argument parse). */
9537
9538 static void
9539 do_strex (void)
9540 {
9541 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9542 || inst.operands[2].postind || inst.operands[2].writeback
9543 || inst.operands[2].immisreg || inst.operands[2].shifted
9544 || inst.operands[2].negative
9545 /* See comment in do_ldrex(). */
9546 || (inst.operands[2].reg == REG_PC),
9547 BAD_ADDR_MODE);
9548
9549 constraint (inst.operands[0].reg == inst.operands[1].reg
9550 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9551
9552 constraint (inst.reloc.exp.X_op != O_constant
9553 || inst.reloc.exp.X_add_number != 0,
9554 _("offset must be zero in ARM encoding"));
9555
9556 inst.instruction |= inst.operands[0].reg << 12;
9557 inst.instruction |= inst.operands[1].reg;
9558 inst.instruction |= inst.operands[2].reg << 16;
9559 inst.reloc.type = BFD_RELOC_UNUSED;
9560 }
9561
9562 static void
9563 do_t_strexbh (void)
9564 {
9565 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9566 || inst.operands[2].postind || inst.operands[2].writeback
9567 || inst.operands[2].immisreg || inst.operands[2].shifted
9568 || inst.operands[2].negative,
9569 BAD_ADDR_MODE);
9570
9571 constraint (inst.operands[0].reg == inst.operands[1].reg
9572 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9573
9574 do_rm_rd_rn ();
9575 }
9576
9577 static void
9578 do_strexd (void)
9579 {
9580 constraint (inst.operands[1].reg % 2 != 0,
9581 _("even register required"));
9582 constraint (inst.operands[2].present
9583 && inst.operands[2].reg != inst.operands[1].reg + 1,
9584 _("can only store two consecutive registers"));
9585 /* If op 2 were present and equal to PC, this function wouldn't
9586 have been called in the first place. */
9587 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9588
9589 constraint (inst.operands[0].reg == inst.operands[1].reg
9590 || inst.operands[0].reg == inst.operands[1].reg + 1
9591 || inst.operands[0].reg == inst.operands[3].reg,
9592 BAD_OVERLAP);
9593
9594 inst.instruction |= inst.operands[0].reg << 12;
9595 inst.instruction |= inst.operands[1].reg;
9596 inst.instruction |= inst.operands[3].reg << 16;
9597 }
9598
9599 /* ARM V8 STRL. */
9600 static void
9601 do_stlex (void)
9602 {
9603 constraint (inst.operands[0].reg == inst.operands[1].reg
9604 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9605
9606 do_rd_rm_rn ();
9607 }
9608
9609 static void
9610 do_t_stlex (void)
9611 {
9612 constraint (inst.operands[0].reg == inst.operands[1].reg
9613 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9614
9615 do_rm_rd_rn ();
9616 }
9617
9618 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9619 extends it to 32-bits, and adds the result to a value in another
9620 register. You can specify a rotation by 0, 8, 16, or 24 bits
9621 before extracting the 16-bit value.
9622 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9623 Condition defaults to COND_ALWAYS.
9624 Error if any register uses R15. */
9625
9626 static void
9627 do_sxtah (void)
9628 {
9629 inst.instruction |= inst.operands[0].reg << 12;
9630 inst.instruction |= inst.operands[1].reg << 16;
9631 inst.instruction |= inst.operands[2].reg;
9632 inst.instruction |= inst.operands[3].imm << 10;
9633 }
9634
9635 /* ARM V6 SXTH.
9636
9637 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9638 Condition defaults to COND_ALWAYS.
9639 Error if any register uses R15. */
9640
9641 static void
9642 do_sxth (void)
9643 {
9644 inst.instruction |= inst.operands[0].reg << 12;
9645 inst.instruction |= inst.operands[1].reg;
9646 inst.instruction |= inst.operands[2].imm << 10;
9647 }
9648 \f
9649 /* VFP instructions. In a logical order: SP variant first, monad
9650 before dyad, arithmetic then move then load/store. */
9651
9652 static void
9653 do_vfp_sp_monadic (void)
9654 {
9655 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9656 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9657 }
9658
9659 static void
9660 do_vfp_sp_dyadic (void)
9661 {
9662 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9663 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9664 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9665 }
9666
9667 static void
9668 do_vfp_sp_compare_z (void)
9669 {
9670 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9671 }
9672
9673 static void
9674 do_vfp_dp_sp_cvt (void)
9675 {
9676 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9677 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9678 }
9679
9680 static void
9681 do_vfp_sp_dp_cvt (void)
9682 {
9683 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9684 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9685 }
9686
9687 static void
9688 do_vfp_reg_from_sp (void)
9689 {
9690 inst.instruction |= inst.operands[0].reg << 12;
9691 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9692 }
9693
9694 static void
9695 do_vfp_reg2_from_sp2 (void)
9696 {
9697 constraint (inst.operands[2].imm != 2,
9698 _("only two consecutive VFP SP registers allowed here"));
9699 inst.instruction |= inst.operands[0].reg << 12;
9700 inst.instruction |= inst.operands[1].reg << 16;
9701 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9702 }
9703
9704 static void
9705 do_vfp_sp_from_reg (void)
9706 {
9707 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9708 inst.instruction |= inst.operands[1].reg << 12;
9709 }
9710
9711 static void
9712 do_vfp_sp2_from_reg2 (void)
9713 {
9714 constraint (inst.operands[0].imm != 2,
9715 _("only two consecutive VFP SP registers allowed here"));
9716 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9717 inst.instruction |= inst.operands[1].reg << 12;
9718 inst.instruction |= inst.operands[2].reg << 16;
9719 }
9720
9721 static void
9722 do_vfp_sp_ldst (void)
9723 {
9724 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9725 encode_arm_cp_address (1, FALSE, TRUE, 0);
9726 }
9727
9728 static void
9729 do_vfp_dp_ldst (void)
9730 {
9731 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9732 encode_arm_cp_address (1, FALSE, TRUE, 0);
9733 }
9734
9735
9736 static void
9737 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9738 {
9739 if (inst.operands[0].writeback)
9740 inst.instruction |= WRITE_BACK;
9741 else
9742 constraint (ldstm_type != VFP_LDSTMIA,
9743 _("this addressing mode requires base-register writeback"));
9744 inst.instruction |= inst.operands[0].reg << 16;
9745 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9746 inst.instruction |= inst.operands[1].imm;
9747 }
9748
9749 static void
9750 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9751 {
9752 int count;
9753
9754 if (inst.operands[0].writeback)
9755 inst.instruction |= WRITE_BACK;
9756 else
9757 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9758 _("this addressing mode requires base-register writeback"));
9759
9760 inst.instruction |= inst.operands[0].reg << 16;
9761 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9762
9763 count = inst.operands[1].imm << 1;
9764 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9765 count += 1;
9766
9767 inst.instruction |= count;
9768 }
9769
9770 static void
9771 do_vfp_sp_ldstmia (void)
9772 {
9773 vfp_sp_ldstm (VFP_LDSTMIA);
9774 }
9775
9776 static void
9777 do_vfp_sp_ldstmdb (void)
9778 {
9779 vfp_sp_ldstm (VFP_LDSTMDB);
9780 }
9781
9782 static void
9783 do_vfp_dp_ldstmia (void)
9784 {
9785 vfp_dp_ldstm (VFP_LDSTMIA);
9786 }
9787
9788 static void
9789 do_vfp_dp_ldstmdb (void)
9790 {
9791 vfp_dp_ldstm (VFP_LDSTMDB);
9792 }
9793
9794 static void
9795 do_vfp_xp_ldstmia (void)
9796 {
9797 vfp_dp_ldstm (VFP_LDSTMIAX);
9798 }
9799
9800 static void
9801 do_vfp_xp_ldstmdb (void)
9802 {
9803 vfp_dp_ldstm (VFP_LDSTMDBX);
9804 }
9805
9806 static void
9807 do_vfp_dp_rd_rm (void)
9808 {
9809 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9810 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9811 }
9812
9813 static void
9814 do_vfp_dp_rn_rd (void)
9815 {
9816 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9817 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9818 }
9819
9820 static void
9821 do_vfp_dp_rd_rn (void)
9822 {
9823 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9824 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9825 }
9826
9827 static void
9828 do_vfp_dp_rd_rn_rm (void)
9829 {
9830 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9831 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9832 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
9833 }
9834
9835 static void
9836 do_vfp_dp_rd (void)
9837 {
9838 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9839 }
9840
9841 static void
9842 do_vfp_dp_rm_rd_rn (void)
9843 {
9844 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
9845 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9846 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
9847 }
9848
9849 /* VFPv3 instructions. */
9850 static void
9851 do_vfp_sp_const (void)
9852 {
9853 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9854 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9855 inst.instruction |= (inst.operands[1].imm & 0x0f);
9856 }
9857
9858 static void
9859 do_vfp_dp_const (void)
9860 {
9861 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9862 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9863 inst.instruction |= (inst.operands[1].imm & 0x0f);
9864 }
9865
9866 static void
9867 vfp_conv (int srcsize)
9868 {
9869 int immbits = srcsize - inst.operands[1].imm;
9870
9871 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9872 {
9873 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9874 i.e. immbits must be in range 0 - 16. */
9875 inst.error = _("immediate value out of range, expected range [0, 16]");
9876 return;
9877 }
9878 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9879 {
9880 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9881 i.e. immbits must be in range 0 - 31. */
9882 inst.error = _("immediate value out of range, expected range [1, 32]");
9883 return;
9884 }
9885
9886 inst.instruction |= (immbits & 1) << 5;
9887 inst.instruction |= (immbits >> 1);
9888 }
9889
9890 static void
9891 do_vfp_sp_conv_16 (void)
9892 {
9893 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9894 vfp_conv (16);
9895 }
9896
9897 static void
9898 do_vfp_dp_conv_16 (void)
9899 {
9900 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9901 vfp_conv (16);
9902 }
9903
9904 static void
9905 do_vfp_sp_conv_32 (void)
9906 {
9907 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9908 vfp_conv (32);
9909 }
9910
9911 static void
9912 do_vfp_dp_conv_32 (void)
9913 {
9914 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9915 vfp_conv (32);
9916 }
9917 \f
9918 /* FPA instructions. Also in a logical order. */
9919
9920 static void
9921 do_fpa_cmp (void)
9922 {
9923 inst.instruction |= inst.operands[0].reg << 16;
9924 inst.instruction |= inst.operands[1].reg;
9925 }
9926
9927 static void
9928 do_fpa_ldmstm (void)
9929 {
9930 inst.instruction |= inst.operands[0].reg << 12;
9931 switch (inst.operands[1].imm)
9932 {
9933 case 1: inst.instruction |= CP_T_X; break;
9934 case 2: inst.instruction |= CP_T_Y; break;
9935 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
9936 case 4: break;
9937 default: abort ();
9938 }
9939
9940 if (inst.instruction & (PRE_INDEX | INDEX_UP))
9941 {
9942 /* The instruction specified "ea" or "fd", so we can only accept
9943 [Rn]{!}. The instruction does not really support stacking or
9944 unstacking, so we have to emulate these by setting appropriate
9945 bits and offsets. */
9946 constraint (inst.reloc.exp.X_op != O_constant
9947 || inst.reloc.exp.X_add_number != 0,
9948 _("this instruction does not support indexing"));
9949
9950 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
9951 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
9952
9953 if (!(inst.instruction & INDEX_UP))
9954 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
9955
9956 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
9957 {
9958 inst.operands[2].preind = 0;
9959 inst.operands[2].postind = 1;
9960 }
9961 }
9962
9963 encode_arm_cp_address (2, TRUE, TRUE, 0);
9964 }
9965 \f
9966 /* iWMMXt instructions: strictly in alphabetical order. */
9967
9968 static void
9969 do_iwmmxt_tandorc (void)
9970 {
9971 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
9972 }
9973
9974 static void
9975 do_iwmmxt_textrc (void)
9976 {
9977 inst.instruction |= inst.operands[0].reg << 12;
9978 inst.instruction |= inst.operands[1].imm;
9979 }
9980
9981 static void
9982 do_iwmmxt_textrm (void)
9983 {
9984 inst.instruction |= inst.operands[0].reg << 12;
9985 inst.instruction |= inst.operands[1].reg << 16;
9986 inst.instruction |= inst.operands[2].imm;
9987 }
9988
9989 static void
9990 do_iwmmxt_tinsr (void)
9991 {
9992 inst.instruction |= inst.operands[0].reg << 16;
9993 inst.instruction |= inst.operands[1].reg << 12;
9994 inst.instruction |= inst.operands[2].imm;
9995 }
9996
9997 static void
9998 do_iwmmxt_tmia (void)
9999 {
10000 inst.instruction |= inst.operands[0].reg << 5;
10001 inst.instruction |= inst.operands[1].reg;
10002 inst.instruction |= inst.operands[2].reg << 12;
10003 }
10004
10005 static void
10006 do_iwmmxt_waligni (void)
10007 {
10008 inst.instruction |= inst.operands[0].reg << 12;
10009 inst.instruction |= inst.operands[1].reg << 16;
10010 inst.instruction |= inst.operands[2].reg;
10011 inst.instruction |= inst.operands[3].imm << 20;
10012 }
10013
10014 static void
10015 do_iwmmxt_wmerge (void)
10016 {
10017 inst.instruction |= inst.operands[0].reg << 12;
10018 inst.instruction |= inst.operands[1].reg << 16;
10019 inst.instruction |= inst.operands[2].reg;
10020 inst.instruction |= inst.operands[3].imm << 21;
10021 }
10022
10023 static void
10024 do_iwmmxt_wmov (void)
10025 {
10026 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10027 inst.instruction |= inst.operands[0].reg << 12;
10028 inst.instruction |= inst.operands[1].reg << 16;
10029 inst.instruction |= inst.operands[1].reg;
10030 }
10031
10032 static void
10033 do_iwmmxt_wldstbh (void)
10034 {
10035 int reloc;
10036 inst.instruction |= inst.operands[0].reg << 12;
10037 if (thumb_mode)
10038 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
10039 else
10040 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
10041 encode_arm_cp_address (1, TRUE, FALSE, reloc);
10042 }
10043
10044 static void
10045 do_iwmmxt_wldstw (void)
10046 {
10047 /* RIWR_RIWC clears .isreg for a control register. */
10048 if (!inst.operands[0].isreg)
10049 {
10050 constraint (inst.cond != COND_ALWAYS, BAD_COND);
10051 inst.instruction |= 0xf0000000;
10052 }
10053
10054 inst.instruction |= inst.operands[0].reg << 12;
10055 encode_arm_cp_address (1, TRUE, TRUE, 0);
10056 }
10057
10058 static void
10059 do_iwmmxt_wldstd (void)
10060 {
10061 inst.instruction |= inst.operands[0].reg << 12;
10062 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
10063 && inst.operands[1].immisreg)
10064 {
10065 inst.instruction &= ~0x1a000ff;
10066 inst.instruction |= (0xfU << 28);
10067 if (inst.operands[1].preind)
10068 inst.instruction |= PRE_INDEX;
10069 if (!inst.operands[1].negative)
10070 inst.instruction |= INDEX_UP;
10071 if (inst.operands[1].writeback)
10072 inst.instruction |= WRITE_BACK;
10073 inst.instruction |= inst.operands[1].reg << 16;
10074 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10075 inst.instruction |= inst.operands[1].imm;
10076 }
10077 else
10078 encode_arm_cp_address (1, TRUE, FALSE, 0);
10079 }
10080
10081 static void
10082 do_iwmmxt_wshufh (void)
10083 {
10084 inst.instruction |= inst.operands[0].reg << 12;
10085 inst.instruction |= inst.operands[1].reg << 16;
10086 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
10087 inst.instruction |= (inst.operands[2].imm & 0x0f);
10088 }
10089
10090 static void
10091 do_iwmmxt_wzero (void)
10092 {
10093 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10094 inst.instruction |= inst.operands[0].reg;
10095 inst.instruction |= inst.operands[0].reg << 12;
10096 inst.instruction |= inst.operands[0].reg << 16;
10097 }
10098
10099 static void
10100 do_iwmmxt_wrwrwr_or_imm5 (void)
10101 {
10102 if (inst.operands[2].isreg)
10103 do_rd_rn_rm ();
10104 else {
10105 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
10106 _("immediate operand requires iWMMXt2"));
10107 do_rd_rn ();
10108 if (inst.operands[2].imm == 0)
10109 {
10110 switch ((inst.instruction >> 20) & 0xf)
10111 {
10112 case 4:
10113 case 5:
10114 case 6:
10115 case 7:
10116 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10117 inst.operands[2].imm = 16;
10118 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
10119 break;
10120 case 8:
10121 case 9:
10122 case 10:
10123 case 11:
10124 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10125 inst.operands[2].imm = 32;
10126 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
10127 break;
10128 case 12:
10129 case 13:
10130 case 14:
10131 case 15:
10132 {
10133 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10134 unsigned long wrn;
10135 wrn = (inst.instruction >> 16) & 0xf;
10136 inst.instruction &= 0xff0fff0f;
10137 inst.instruction |= wrn;
10138 /* Bail out here; the instruction is now assembled. */
10139 return;
10140 }
10141 }
10142 }
10143 /* Map 32 -> 0, etc. */
10144 inst.operands[2].imm &= 0x1f;
10145 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10146 }
10147 }
10148 \f
10149 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10150 operations first, then control, shift, and load/store. */
10151
10152 /* Insns like "foo X,Y,Z". */
10153
10154 static void
10155 do_mav_triple (void)
10156 {
10157 inst.instruction |= inst.operands[0].reg << 16;
10158 inst.instruction |= inst.operands[1].reg;
10159 inst.instruction |= inst.operands[2].reg << 12;
10160 }
10161
10162 /* Insns like "foo W,X,Y,Z".
10163 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10164
10165 static void
10166 do_mav_quad (void)
10167 {
10168 inst.instruction |= inst.operands[0].reg << 5;
10169 inst.instruction |= inst.operands[1].reg << 12;
10170 inst.instruction |= inst.operands[2].reg << 16;
10171 inst.instruction |= inst.operands[3].reg;
10172 }
10173
10174 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10175 static void
10176 do_mav_dspsc (void)
10177 {
10178 inst.instruction |= inst.operands[1].reg << 12;
10179 }
10180
10181 /* Maverick shift immediate instructions.
10182 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10183 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10184
10185 static void
10186 do_mav_shift (void)
10187 {
10188 int imm = inst.operands[2].imm;
10189
10190 inst.instruction |= inst.operands[0].reg << 12;
10191 inst.instruction |= inst.operands[1].reg << 16;
10192
10193 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10194 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10195 Bit 4 should be 0. */
10196 imm = (imm & 0xf) | ((imm & 0x70) << 1);
10197
10198 inst.instruction |= imm;
10199 }
10200 \f
10201 /* XScale instructions. Also sorted arithmetic before move. */
10202
10203 /* Xscale multiply-accumulate (argument parse)
10204 MIAcc acc0,Rm,Rs
10205 MIAPHcc acc0,Rm,Rs
10206 MIAxycc acc0,Rm,Rs. */
10207
10208 static void
10209 do_xsc_mia (void)
10210 {
10211 inst.instruction |= inst.operands[1].reg;
10212 inst.instruction |= inst.operands[2].reg << 12;
10213 }
10214
10215 /* Xscale move-accumulator-register (argument parse)
10216
10217 MARcc acc0,RdLo,RdHi. */
10218
10219 static void
10220 do_xsc_mar (void)
10221 {
10222 inst.instruction |= inst.operands[1].reg << 12;
10223 inst.instruction |= inst.operands[2].reg << 16;
10224 }
10225
10226 /* Xscale move-register-accumulator (argument parse)
10227
10228 MRAcc RdLo,RdHi,acc0. */
10229
10230 static void
10231 do_xsc_mra (void)
10232 {
10233 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10234 inst.instruction |= inst.operands[0].reg << 12;
10235 inst.instruction |= inst.operands[1].reg << 16;
10236 }
10237 \f
10238 /* Encoding functions relevant only to Thumb. */
10239
10240 /* inst.operands[i] is a shifted-register operand; encode
10241 it into inst.instruction in the format used by Thumb32. */
10242
10243 static void
10244 encode_thumb32_shifted_operand (int i)
10245 {
10246 unsigned int value = inst.reloc.exp.X_add_number;
10247 unsigned int shift = inst.operands[i].shift_kind;
10248
10249 constraint (inst.operands[i].immisreg,
10250 _("shift by register not allowed in thumb mode"));
10251 inst.instruction |= inst.operands[i].reg;
10252 if (shift == SHIFT_RRX)
10253 inst.instruction |= SHIFT_ROR << 4;
10254 else
10255 {
10256 constraint (inst.reloc.exp.X_op != O_constant,
10257 _("expression too complex"));
10258
10259 constraint (value > 32
10260 || (value == 32 && (shift == SHIFT_LSL
10261 || shift == SHIFT_ROR)),
10262 _("shift expression is too large"));
10263
10264 if (value == 0)
10265 shift = SHIFT_LSL;
10266 else if (value == 32)
10267 value = 0;
10268
10269 inst.instruction |= shift << 4;
10270 inst.instruction |= (value & 0x1c) << 10;
10271 inst.instruction |= (value & 0x03) << 6;
10272 }
10273 }
10274
10275
10276 /* inst.operands[i] was set up by parse_address. Encode it into a
10277 Thumb32 format load or store instruction. Reject forms that cannot
10278 be used with such instructions. If is_t is true, reject forms that
10279 cannot be used with a T instruction; if is_d is true, reject forms
10280 that cannot be used with a D instruction. If it is a store insn,
10281 reject PC in Rn. */
10282
10283 static void
10284 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10285 {
10286 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10287
10288 constraint (!inst.operands[i].isreg,
10289 _("Instruction does not support =N addresses"));
10290
10291 inst.instruction |= inst.operands[i].reg << 16;
10292 if (inst.operands[i].immisreg)
10293 {
10294 constraint (is_pc, BAD_PC_ADDRESSING);
10295 constraint (is_t || is_d, _("cannot use register index with this instruction"));
10296 constraint (inst.operands[i].negative,
10297 _("Thumb does not support negative register indexing"));
10298 constraint (inst.operands[i].postind,
10299 _("Thumb does not support register post-indexing"));
10300 constraint (inst.operands[i].writeback,
10301 _("Thumb does not support register indexing with writeback"));
10302 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10303 _("Thumb supports only LSL in shifted register indexing"));
10304
10305 inst.instruction |= inst.operands[i].imm;
10306 if (inst.operands[i].shifted)
10307 {
10308 constraint (inst.reloc.exp.X_op != O_constant,
10309 _("expression too complex"));
10310 constraint (inst.reloc.exp.X_add_number < 0
10311 || inst.reloc.exp.X_add_number > 3,
10312 _("shift out of range"));
10313 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10314 }
10315 inst.reloc.type = BFD_RELOC_UNUSED;
10316 }
10317 else if (inst.operands[i].preind)
10318 {
10319 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10320 constraint (is_t && inst.operands[i].writeback,
10321 _("cannot use writeback with this instruction"));
10322 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10323 BAD_PC_ADDRESSING);
10324
10325 if (is_d)
10326 {
10327 inst.instruction |= 0x01000000;
10328 if (inst.operands[i].writeback)
10329 inst.instruction |= 0x00200000;
10330 }
10331 else
10332 {
10333 inst.instruction |= 0x00000c00;
10334 if (inst.operands[i].writeback)
10335 inst.instruction |= 0x00000100;
10336 }
10337 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10338 }
10339 else if (inst.operands[i].postind)
10340 {
10341 gas_assert (inst.operands[i].writeback);
10342 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10343 constraint (is_t, _("cannot use post-indexing with this instruction"));
10344
10345 if (is_d)
10346 inst.instruction |= 0x00200000;
10347 else
10348 inst.instruction |= 0x00000900;
10349 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10350 }
10351 else /* unindexed - only for coprocessor */
10352 inst.error = _("instruction does not accept unindexed addressing");
10353 }
10354
10355 /* Table of Thumb instructions which exist in both 16- and 32-bit
10356 encodings (the latter only in post-V6T2 cores). The index is the
10357 value used in the insns table below. When there is more than one
10358 possible 16-bit encoding for the instruction, this table always
10359 holds variant (1).
10360 Also contains several pseudo-instructions used during relaxation. */
10361 #define T16_32_TAB \
10362 X(_adc, 4140, eb400000), \
10363 X(_adcs, 4140, eb500000), \
10364 X(_add, 1c00, eb000000), \
10365 X(_adds, 1c00, eb100000), \
10366 X(_addi, 0000, f1000000), \
10367 X(_addis, 0000, f1100000), \
10368 X(_add_pc,000f, f20f0000), \
10369 X(_add_sp,000d, f10d0000), \
10370 X(_adr, 000f, f20f0000), \
10371 X(_and, 4000, ea000000), \
10372 X(_ands, 4000, ea100000), \
10373 X(_asr, 1000, fa40f000), \
10374 X(_asrs, 1000, fa50f000), \
10375 X(_b, e000, f000b000), \
10376 X(_bcond, d000, f0008000), \
10377 X(_bic, 4380, ea200000), \
10378 X(_bics, 4380, ea300000), \
10379 X(_cmn, 42c0, eb100f00), \
10380 X(_cmp, 2800, ebb00f00), \
10381 X(_cpsie, b660, f3af8400), \
10382 X(_cpsid, b670, f3af8600), \
10383 X(_cpy, 4600, ea4f0000), \
10384 X(_dec_sp,80dd, f1ad0d00), \
10385 X(_eor, 4040, ea800000), \
10386 X(_eors, 4040, ea900000), \
10387 X(_inc_sp,00dd, f10d0d00), \
10388 X(_ldmia, c800, e8900000), \
10389 X(_ldr, 6800, f8500000), \
10390 X(_ldrb, 7800, f8100000), \
10391 X(_ldrh, 8800, f8300000), \
10392 X(_ldrsb, 5600, f9100000), \
10393 X(_ldrsh, 5e00, f9300000), \
10394 X(_ldr_pc,4800, f85f0000), \
10395 X(_ldr_pc2,4800, f85f0000), \
10396 X(_ldr_sp,9800, f85d0000), \
10397 X(_lsl, 0000, fa00f000), \
10398 X(_lsls, 0000, fa10f000), \
10399 X(_lsr, 0800, fa20f000), \
10400 X(_lsrs, 0800, fa30f000), \
10401 X(_mov, 2000, ea4f0000), \
10402 X(_movs, 2000, ea5f0000), \
10403 X(_mul, 4340, fb00f000), \
10404 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10405 X(_mvn, 43c0, ea6f0000), \
10406 X(_mvns, 43c0, ea7f0000), \
10407 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10408 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10409 X(_orr, 4300, ea400000), \
10410 X(_orrs, 4300, ea500000), \
10411 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10412 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10413 X(_rev, ba00, fa90f080), \
10414 X(_rev16, ba40, fa90f090), \
10415 X(_revsh, bac0, fa90f0b0), \
10416 X(_ror, 41c0, fa60f000), \
10417 X(_rors, 41c0, fa70f000), \
10418 X(_sbc, 4180, eb600000), \
10419 X(_sbcs, 4180, eb700000), \
10420 X(_stmia, c000, e8800000), \
10421 X(_str, 6000, f8400000), \
10422 X(_strb, 7000, f8000000), \
10423 X(_strh, 8000, f8200000), \
10424 X(_str_sp,9000, f84d0000), \
10425 X(_sub, 1e00, eba00000), \
10426 X(_subs, 1e00, ebb00000), \
10427 X(_subi, 8000, f1a00000), \
10428 X(_subis, 8000, f1b00000), \
10429 X(_sxtb, b240, fa4ff080), \
10430 X(_sxth, b200, fa0ff080), \
10431 X(_tst, 4200, ea100f00), \
10432 X(_uxtb, b2c0, fa5ff080), \
10433 X(_uxth, b280, fa1ff080), \
10434 X(_nop, bf00, f3af8000), \
10435 X(_yield, bf10, f3af8001), \
10436 X(_wfe, bf20, f3af8002), \
10437 X(_wfi, bf30, f3af8003), \
10438 X(_sev, bf40, f3af8004), \
10439 X(_sevl, bf50, f3af8005), \
10440 X(_udf, de00, f7f0a000)
10441
10442 /* To catch errors in encoding functions, the codes are all offset by
10443 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10444 as 16-bit instructions. */
10445 #define X(a,b,c) T_MNEM##a
10446 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10447 #undef X
10448
10449 #define X(a,b,c) 0x##b
10450 static const unsigned short thumb_op16[] = { T16_32_TAB };
10451 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10452 #undef X
10453
10454 #define X(a,b,c) 0x##c
10455 static const unsigned int thumb_op32[] = { T16_32_TAB };
10456 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10457 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10458 #undef X
10459 #undef T16_32_TAB
10460
10461 /* Thumb instruction encoders, in alphabetical order. */
10462
10463 /* ADDW or SUBW. */
10464
10465 static void
10466 do_t_add_sub_w (void)
10467 {
10468 int Rd, Rn;
10469
10470 Rd = inst.operands[0].reg;
10471 Rn = inst.operands[1].reg;
10472
10473 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10474 is the SP-{plus,minus}-immediate form of the instruction. */
10475 if (Rn == REG_SP)
10476 constraint (Rd == REG_PC, BAD_PC);
10477 else
10478 reject_bad_reg (Rd);
10479
10480 inst.instruction |= (Rn << 16) | (Rd << 8);
10481 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10482 }
10483
10484 /* Parse an add or subtract instruction. We get here with inst.instruction
10485 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
10486
10487 static void
10488 do_t_add_sub (void)
10489 {
10490 int Rd, Rs, Rn;
10491
10492 Rd = inst.operands[0].reg;
10493 Rs = (inst.operands[1].present
10494 ? inst.operands[1].reg /* Rd, Rs, foo */
10495 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10496
10497 if (Rd == REG_PC)
10498 set_it_insn_type_last ();
10499
10500 if (unified_syntax)
10501 {
10502 bfd_boolean flags;
10503 bfd_boolean narrow;
10504 int opcode;
10505
10506 flags = (inst.instruction == T_MNEM_adds
10507 || inst.instruction == T_MNEM_subs);
10508 if (flags)
10509 narrow = !in_it_block ();
10510 else
10511 narrow = in_it_block ();
10512 if (!inst.operands[2].isreg)
10513 {
10514 int add;
10515
10516 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10517
10518 add = (inst.instruction == T_MNEM_add
10519 || inst.instruction == T_MNEM_adds);
10520 opcode = 0;
10521 if (inst.size_req != 4)
10522 {
10523 /* Attempt to use a narrow opcode, with relaxation if
10524 appropriate. */
10525 if (Rd == REG_SP && Rs == REG_SP && !flags)
10526 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10527 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10528 opcode = T_MNEM_add_sp;
10529 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10530 opcode = T_MNEM_add_pc;
10531 else if (Rd <= 7 && Rs <= 7 && narrow)
10532 {
10533 if (flags)
10534 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10535 else
10536 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10537 }
10538 if (opcode)
10539 {
10540 inst.instruction = THUMB_OP16(opcode);
10541 inst.instruction |= (Rd << 4) | Rs;
10542 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10543 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
10544 {
10545 if (inst.size_req == 2)
10546 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10547 else
10548 inst.relax = opcode;
10549 }
10550 }
10551 else
10552 constraint (inst.size_req == 2, BAD_HIREG);
10553 }
10554 if (inst.size_req == 4
10555 || (inst.size_req != 2 && !opcode))
10556 {
10557 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10558 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
10559 THUMB1_RELOC_ONLY);
10560 if (Rd == REG_PC)
10561 {
10562 constraint (add, BAD_PC);
10563 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10564 _("only SUBS PC, LR, #const allowed"));
10565 constraint (inst.reloc.exp.X_op != O_constant,
10566 _("expression too complex"));
10567 constraint (inst.reloc.exp.X_add_number < 0
10568 || inst.reloc.exp.X_add_number > 0xff,
10569 _("immediate value out of range"));
10570 inst.instruction = T2_SUBS_PC_LR
10571 | inst.reloc.exp.X_add_number;
10572 inst.reloc.type = BFD_RELOC_UNUSED;
10573 return;
10574 }
10575 else if (Rs == REG_PC)
10576 {
10577 /* Always use addw/subw. */
10578 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10579 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10580 }
10581 else
10582 {
10583 inst.instruction = THUMB_OP32 (inst.instruction);
10584 inst.instruction = (inst.instruction & 0xe1ffffff)
10585 | 0x10000000;
10586 if (flags)
10587 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10588 else
10589 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
10590 }
10591 inst.instruction |= Rd << 8;
10592 inst.instruction |= Rs << 16;
10593 }
10594 }
10595 else
10596 {
10597 unsigned int value = inst.reloc.exp.X_add_number;
10598 unsigned int shift = inst.operands[2].shift_kind;
10599
10600 Rn = inst.operands[2].reg;
10601 /* See if we can do this with a 16-bit instruction. */
10602 if (!inst.operands[2].shifted && inst.size_req != 4)
10603 {
10604 if (Rd > 7 || Rs > 7 || Rn > 7)
10605 narrow = FALSE;
10606
10607 if (narrow)
10608 {
10609 inst.instruction = ((inst.instruction == T_MNEM_adds
10610 || inst.instruction == T_MNEM_add)
10611 ? T_OPCODE_ADD_R3
10612 : T_OPCODE_SUB_R3);
10613 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10614 return;
10615 }
10616
10617 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10618 {
10619 /* Thumb-1 cores (except v6-M) require at least one high
10620 register in a narrow non flag setting add. */
10621 if (Rd > 7 || Rn > 7
10622 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10623 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10624 {
10625 if (Rd == Rn)
10626 {
10627 Rn = Rs;
10628 Rs = Rd;
10629 }
10630 inst.instruction = T_OPCODE_ADD_HI;
10631 inst.instruction |= (Rd & 8) << 4;
10632 inst.instruction |= (Rd & 7);
10633 inst.instruction |= Rn << 3;
10634 return;
10635 }
10636 }
10637 }
10638
10639 constraint (Rd == REG_PC, BAD_PC);
10640 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10641 constraint (Rs == REG_PC, BAD_PC);
10642 reject_bad_reg (Rn);
10643
10644 /* If we get here, it can't be done in 16 bits. */
10645 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10646 _("shift must be constant"));
10647 inst.instruction = THUMB_OP32 (inst.instruction);
10648 inst.instruction |= Rd << 8;
10649 inst.instruction |= Rs << 16;
10650 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10651 _("shift value over 3 not allowed in thumb mode"));
10652 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10653 _("only LSL shift allowed in thumb mode"));
10654 encode_thumb32_shifted_operand (2);
10655 }
10656 }
10657 else
10658 {
10659 constraint (inst.instruction == T_MNEM_adds
10660 || inst.instruction == T_MNEM_subs,
10661 BAD_THUMB32);
10662
10663 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10664 {
10665 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10666 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10667 BAD_HIREG);
10668
10669 inst.instruction = (inst.instruction == T_MNEM_add
10670 ? 0x0000 : 0x8000);
10671 inst.instruction |= (Rd << 4) | Rs;
10672 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10673 return;
10674 }
10675
10676 Rn = inst.operands[2].reg;
10677 constraint (inst.operands[2].shifted, _("unshifted register required"));
10678
10679 /* We now have Rd, Rs, and Rn set to registers. */
10680 if (Rd > 7 || Rs > 7 || Rn > 7)
10681 {
10682 /* Can't do this for SUB. */
10683 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10684 inst.instruction = T_OPCODE_ADD_HI;
10685 inst.instruction |= (Rd & 8) << 4;
10686 inst.instruction |= (Rd & 7);
10687 if (Rs == Rd)
10688 inst.instruction |= Rn << 3;
10689 else if (Rn == Rd)
10690 inst.instruction |= Rs << 3;
10691 else
10692 constraint (1, _("dest must overlap one source register"));
10693 }
10694 else
10695 {
10696 inst.instruction = (inst.instruction == T_MNEM_add
10697 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10698 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10699 }
10700 }
10701 }
10702
10703 static void
10704 do_t_adr (void)
10705 {
10706 unsigned Rd;
10707
10708 Rd = inst.operands[0].reg;
10709 reject_bad_reg (Rd);
10710
10711 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10712 {
10713 /* Defer to section relaxation. */
10714 inst.relax = inst.instruction;
10715 inst.instruction = THUMB_OP16 (inst.instruction);
10716 inst.instruction |= Rd << 4;
10717 }
10718 else if (unified_syntax && inst.size_req != 2)
10719 {
10720 /* Generate a 32-bit opcode. */
10721 inst.instruction = THUMB_OP32 (inst.instruction);
10722 inst.instruction |= Rd << 8;
10723 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
10724 inst.reloc.pc_rel = 1;
10725 }
10726 else
10727 {
10728 /* Generate a 16-bit opcode. */
10729 inst.instruction = THUMB_OP16 (inst.instruction);
10730 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10731 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
10732 inst.reloc.pc_rel = 1;
10733
10734 inst.instruction |= Rd << 4;
10735 }
10736 }
10737
10738 /* Arithmetic instructions for which there is just one 16-bit
10739 instruction encoding, and it allows only two low registers.
10740 For maximal compatibility with ARM syntax, we allow three register
10741 operands even when Thumb-32 instructions are not available, as long
10742 as the first two are identical. For instance, both "sbc r0,r1" and
10743 "sbc r0,r0,r1" are allowed. */
10744 static void
10745 do_t_arit3 (void)
10746 {
10747 int Rd, Rs, Rn;
10748
10749 Rd = inst.operands[0].reg;
10750 Rs = (inst.operands[1].present
10751 ? inst.operands[1].reg /* Rd, Rs, foo */
10752 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10753 Rn = inst.operands[2].reg;
10754
10755 reject_bad_reg (Rd);
10756 reject_bad_reg (Rs);
10757 if (inst.operands[2].isreg)
10758 reject_bad_reg (Rn);
10759
10760 if (unified_syntax)
10761 {
10762 if (!inst.operands[2].isreg)
10763 {
10764 /* For an immediate, we always generate a 32-bit opcode;
10765 section relaxation will shrink it later if possible. */
10766 inst.instruction = THUMB_OP32 (inst.instruction);
10767 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10768 inst.instruction |= Rd << 8;
10769 inst.instruction |= Rs << 16;
10770 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10771 }
10772 else
10773 {
10774 bfd_boolean narrow;
10775
10776 /* See if we can do this with a 16-bit instruction. */
10777 if (THUMB_SETS_FLAGS (inst.instruction))
10778 narrow = !in_it_block ();
10779 else
10780 narrow = in_it_block ();
10781
10782 if (Rd > 7 || Rn > 7 || Rs > 7)
10783 narrow = FALSE;
10784 if (inst.operands[2].shifted)
10785 narrow = FALSE;
10786 if (inst.size_req == 4)
10787 narrow = FALSE;
10788
10789 if (narrow
10790 && Rd == Rs)
10791 {
10792 inst.instruction = THUMB_OP16 (inst.instruction);
10793 inst.instruction |= Rd;
10794 inst.instruction |= Rn << 3;
10795 return;
10796 }
10797
10798 /* If we get here, it can't be done in 16 bits. */
10799 constraint (inst.operands[2].shifted
10800 && inst.operands[2].immisreg,
10801 _("shift must be constant"));
10802 inst.instruction = THUMB_OP32 (inst.instruction);
10803 inst.instruction |= Rd << 8;
10804 inst.instruction |= Rs << 16;
10805 encode_thumb32_shifted_operand (2);
10806 }
10807 }
10808 else
10809 {
10810 /* On its face this is a lie - the instruction does set the
10811 flags. However, the only supported mnemonic in this mode
10812 says it doesn't. */
10813 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10814
10815 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10816 _("unshifted register required"));
10817 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10818 constraint (Rd != Rs,
10819 _("dest and source1 must be the same register"));
10820
10821 inst.instruction = THUMB_OP16 (inst.instruction);
10822 inst.instruction |= Rd;
10823 inst.instruction |= Rn << 3;
10824 }
10825 }
10826
10827 /* Similarly, but for instructions where the arithmetic operation is
10828 commutative, so we can allow either of them to be different from
10829 the destination operand in a 16-bit instruction. For instance, all
10830 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10831 accepted. */
10832 static void
10833 do_t_arit3c (void)
10834 {
10835 int Rd, Rs, Rn;
10836
10837 Rd = inst.operands[0].reg;
10838 Rs = (inst.operands[1].present
10839 ? inst.operands[1].reg /* Rd, Rs, foo */
10840 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10841 Rn = inst.operands[2].reg;
10842
10843 reject_bad_reg (Rd);
10844 reject_bad_reg (Rs);
10845 if (inst.operands[2].isreg)
10846 reject_bad_reg (Rn);
10847
10848 if (unified_syntax)
10849 {
10850 if (!inst.operands[2].isreg)
10851 {
10852 /* For an immediate, we always generate a 32-bit opcode;
10853 section relaxation will shrink it later if possible. */
10854 inst.instruction = THUMB_OP32 (inst.instruction);
10855 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10856 inst.instruction |= Rd << 8;
10857 inst.instruction |= Rs << 16;
10858 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10859 }
10860 else
10861 {
10862 bfd_boolean narrow;
10863
10864 /* See if we can do this with a 16-bit instruction. */
10865 if (THUMB_SETS_FLAGS (inst.instruction))
10866 narrow = !in_it_block ();
10867 else
10868 narrow = in_it_block ();
10869
10870 if (Rd > 7 || Rn > 7 || Rs > 7)
10871 narrow = FALSE;
10872 if (inst.operands[2].shifted)
10873 narrow = FALSE;
10874 if (inst.size_req == 4)
10875 narrow = FALSE;
10876
10877 if (narrow)
10878 {
10879 if (Rd == Rs)
10880 {
10881 inst.instruction = THUMB_OP16 (inst.instruction);
10882 inst.instruction |= Rd;
10883 inst.instruction |= Rn << 3;
10884 return;
10885 }
10886 if (Rd == Rn)
10887 {
10888 inst.instruction = THUMB_OP16 (inst.instruction);
10889 inst.instruction |= Rd;
10890 inst.instruction |= Rs << 3;
10891 return;
10892 }
10893 }
10894
10895 /* If we get here, it can't be done in 16 bits. */
10896 constraint (inst.operands[2].shifted
10897 && inst.operands[2].immisreg,
10898 _("shift must be constant"));
10899 inst.instruction = THUMB_OP32 (inst.instruction);
10900 inst.instruction |= Rd << 8;
10901 inst.instruction |= Rs << 16;
10902 encode_thumb32_shifted_operand (2);
10903 }
10904 }
10905 else
10906 {
10907 /* On its face this is a lie - the instruction does set the
10908 flags. However, the only supported mnemonic in this mode
10909 says it doesn't. */
10910 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10911
10912 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10913 _("unshifted register required"));
10914 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10915
10916 inst.instruction = THUMB_OP16 (inst.instruction);
10917 inst.instruction |= Rd;
10918
10919 if (Rd == Rs)
10920 inst.instruction |= Rn << 3;
10921 else if (Rd == Rn)
10922 inst.instruction |= Rs << 3;
10923 else
10924 constraint (1, _("dest must overlap one source register"));
10925 }
10926 }
10927
10928 static void
10929 do_t_bfc (void)
10930 {
10931 unsigned Rd;
10932 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
10933 constraint (msb > 32, _("bit-field extends past end of register"));
10934 /* The instruction encoding stores the LSB and MSB,
10935 not the LSB and width. */
10936 Rd = inst.operands[0].reg;
10937 reject_bad_reg (Rd);
10938 inst.instruction |= Rd << 8;
10939 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
10940 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
10941 inst.instruction |= msb - 1;
10942 }
10943
10944 static void
10945 do_t_bfi (void)
10946 {
10947 int Rd, Rn;
10948 unsigned int msb;
10949
10950 Rd = inst.operands[0].reg;
10951 reject_bad_reg (Rd);
10952
10953 /* #0 in second position is alternative syntax for bfc, which is
10954 the same instruction but with REG_PC in the Rm field. */
10955 if (!inst.operands[1].isreg)
10956 Rn = REG_PC;
10957 else
10958 {
10959 Rn = inst.operands[1].reg;
10960 reject_bad_reg (Rn);
10961 }
10962
10963 msb = inst.operands[2].imm + inst.operands[3].imm;
10964 constraint (msb > 32, _("bit-field extends past end of register"));
10965 /* The instruction encoding stores the LSB and MSB,
10966 not the LSB and width. */
10967 inst.instruction |= Rd << 8;
10968 inst.instruction |= Rn << 16;
10969 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10970 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10971 inst.instruction |= msb - 1;
10972 }
10973
10974 static void
10975 do_t_bfx (void)
10976 {
10977 unsigned Rd, Rn;
10978
10979 Rd = inst.operands[0].reg;
10980 Rn = inst.operands[1].reg;
10981
10982 reject_bad_reg (Rd);
10983 reject_bad_reg (Rn);
10984
10985 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
10986 _("bit-field extends past end of register"));
10987 inst.instruction |= Rd << 8;
10988 inst.instruction |= Rn << 16;
10989 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10990 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10991 inst.instruction |= inst.operands[3].imm - 1;
10992 }
10993
10994 /* ARM V5 Thumb BLX (argument parse)
10995 BLX <target_addr> which is BLX(1)
10996 BLX <Rm> which is BLX(2)
10997 Unfortunately, there are two different opcodes for this mnemonic.
10998 So, the insns[].value is not used, and the code here zaps values
10999 into inst.instruction.
11000
11001 ??? How to take advantage of the additional two bits of displacement
11002 available in Thumb32 mode? Need new relocation? */
11003
11004 static void
11005 do_t_blx (void)
11006 {
11007 set_it_insn_type_last ();
11008
11009 if (inst.operands[0].isreg)
11010 {
11011 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
11012 /* We have a register, so this is BLX(2). */
11013 inst.instruction |= inst.operands[0].reg << 3;
11014 }
11015 else
11016 {
11017 /* No register. This must be BLX(1). */
11018 inst.instruction = 0xf000e800;
11019 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
11020 }
11021 }
11022
11023 static void
11024 do_t_branch (void)
11025 {
11026 int opcode;
11027 int cond;
11028 bfd_reloc_code_real_type reloc;
11029
11030 cond = inst.cond;
11031 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
11032
11033 if (in_it_block ())
11034 {
11035 /* Conditional branches inside IT blocks are encoded as unconditional
11036 branches. */
11037 cond = COND_ALWAYS;
11038 }
11039 else
11040 cond = inst.cond;
11041
11042 if (cond != COND_ALWAYS)
11043 opcode = T_MNEM_bcond;
11044 else
11045 opcode = inst.instruction;
11046
11047 if (unified_syntax
11048 && (inst.size_req == 4
11049 || (inst.size_req != 2
11050 && (inst.operands[0].hasreloc
11051 || inst.reloc.exp.X_op == O_constant))))
11052 {
11053 inst.instruction = THUMB_OP32(opcode);
11054 if (cond == COND_ALWAYS)
11055 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
11056 else
11057 {
11058 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
11059 _("selected architecture does not support "
11060 "wide conditional branch instruction"));
11061
11062 gas_assert (cond != 0xF);
11063 inst.instruction |= cond << 22;
11064 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
11065 }
11066 }
11067 else
11068 {
11069 inst.instruction = THUMB_OP16(opcode);
11070 if (cond == COND_ALWAYS)
11071 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
11072 else
11073 {
11074 inst.instruction |= cond << 8;
11075 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
11076 }
11077 /* Allow section relaxation. */
11078 if (unified_syntax && inst.size_req != 2)
11079 inst.relax = opcode;
11080 }
11081 inst.reloc.type = reloc;
11082 inst.reloc.pc_rel = 1;
11083 }
11084
11085 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11086 between the two is the maximum immediate allowed - which is passed in
11087 RANGE. */
11088 static void
11089 do_t_bkpt_hlt1 (int range)
11090 {
11091 constraint (inst.cond != COND_ALWAYS,
11092 _("instruction is always unconditional"));
11093 if (inst.operands[0].present)
11094 {
11095 constraint (inst.operands[0].imm > range,
11096 _("immediate value out of range"));
11097 inst.instruction |= inst.operands[0].imm;
11098 }
11099
11100 set_it_insn_type (NEUTRAL_IT_INSN);
11101 }
11102
11103 static void
11104 do_t_hlt (void)
11105 {
11106 do_t_bkpt_hlt1 (63);
11107 }
11108
11109 static void
11110 do_t_bkpt (void)
11111 {
11112 do_t_bkpt_hlt1 (255);
11113 }
11114
11115 static void
11116 do_t_branch23 (void)
11117 {
11118 set_it_insn_type_last ();
11119 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
11120
11121 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11122 this file. We used to simply ignore the PLT reloc type here --
11123 the branch encoding is now needed to deal with TLSCALL relocs.
11124 So if we see a PLT reloc now, put it back to how it used to be to
11125 keep the preexisting behaviour. */
11126 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
11127 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
11128
11129 #if defined(OBJ_COFF)
11130 /* If the destination of the branch is a defined symbol which does not have
11131 the THUMB_FUNC attribute, then we must be calling a function which has
11132 the (interfacearm) attribute. We look for the Thumb entry point to that
11133 function and change the branch to refer to that function instead. */
11134 if ( inst.reloc.exp.X_op == O_symbol
11135 && inst.reloc.exp.X_add_symbol != NULL
11136 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
11137 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
11138 inst.reloc.exp.X_add_symbol =
11139 find_real_start (inst.reloc.exp.X_add_symbol);
11140 #endif
11141 }
11142
11143 static void
11144 do_t_bx (void)
11145 {
11146 set_it_insn_type_last ();
11147 inst.instruction |= inst.operands[0].reg << 3;
11148 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11149 should cause the alignment to be checked once it is known. This is
11150 because BX PC only works if the instruction is word aligned. */
11151 }
11152
11153 static void
11154 do_t_bxj (void)
11155 {
11156 int Rm;
11157
11158 set_it_insn_type_last ();
11159 Rm = inst.operands[0].reg;
11160 reject_bad_reg (Rm);
11161 inst.instruction |= Rm << 16;
11162 }
11163
11164 static void
11165 do_t_clz (void)
11166 {
11167 unsigned Rd;
11168 unsigned Rm;
11169
11170 Rd = inst.operands[0].reg;
11171 Rm = inst.operands[1].reg;
11172
11173 reject_bad_reg (Rd);
11174 reject_bad_reg (Rm);
11175
11176 inst.instruction |= Rd << 8;
11177 inst.instruction |= Rm << 16;
11178 inst.instruction |= Rm;
11179 }
11180
11181 static void
11182 do_t_cps (void)
11183 {
11184 set_it_insn_type (OUTSIDE_IT_INSN);
11185 inst.instruction |= inst.operands[0].imm;
11186 }
11187
11188 static void
11189 do_t_cpsi (void)
11190 {
11191 set_it_insn_type (OUTSIDE_IT_INSN);
11192 if (unified_syntax
11193 && (inst.operands[1].present || inst.size_req == 4)
11194 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11195 {
11196 unsigned int imod = (inst.instruction & 0x0030) >> 4;
11197 inst.instruction = 0xf3af8000;
11198 inst.instruction |= imod << 9;
11199 inst.instruction |= inst.operands[0].imm << 5;
11200 if (inst.operands[1].present)
11201 inst.instruction |= 0x100 | inst.operands[1].imm;
11202 }
11203 else
11204 {
11205 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11206 && (inst.operands[0].imm & 4),
11207 _("selected processor does not support 'A' form "
11208 "of this instruction"));
11209 constraint (inst.operands[1].present || inst.size_req == 4,
11210 _("Thumb does not support the 2-argument "
11211 "form of this instruction"));
11212 inst.instruction |= inst.operands[0].imm;
11213 }
11214 }
11215
11216 /* THUMB CPY instruction (argument parse). */
11217
11218 static void
11219 do_t_cpy (void)
11220 {
11221 if (inst.size_req == 4)
11222 {
11223 inst.instruction = THUMB_OP32 (T_MNEM_mov);
11224 inst.instruction |= inst.operands[0].reg << 8;
11225 inst.instruction |= inst.operands[1].reg;
11226 }
11227 else
11228 {
11229 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11230 inst.instruction |= (inst.operands[0].reg & 0x7);
11231 inst.instruction |= inst.operands[1].reg << 3;
11232 }
11233 }
11234
11235 static void
11236 do_t_cbz (void)
11237 {
11238 set_it_insn_type (OUTSIDE_IT_INSN);
11239 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11240 inst.instruction |= inst.operands[0].reg;
11241 inst.reloc.pc_rel = 1;
11242 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11243 }
11244
11245 static void
11246 do_t_dbg (void)
11247 {
11248 inst.instruction |= inst.operands[0].imm;
11249 }
11250
11251 static void
11252 do_t_div (void)
11253 {
11254 unsigned Rd, Rn, Rm;
11255
11256 Rd = inst.operands[0].reg;
11257 Rn = (inst.operands[1].present
11258 ? inst.operands[1].reg : Rd);
11259 Rm = inst.operands[2].reg;
11260
11261 reject_bad_reg (Rd);
11262 reject_bad_reg (Rn);
11263 reject_bad_reg (Rm);
11264
11265 inst.instruction |= Rd << 8;
11266 inst.instruction |= Rn << 16;
11267 inst.instruction |= Rm;
11268 }
11269
11270 static void
11271 do_t_hint (void)
11272 {
11273 if (unified_syntax && inst.size_req == 4)
11274 inst.instruction = THUMB_OP32 (inst.instruction);
11275 else
11276 inst.instruction = THUMB_OP16 (inst.instruction);
11277 }
11278
11279 static void
11280 do_t_it (void)
11281 {
11282 unsigned int cond = inst.operands[0].imm;
11283
11284 set_it_insn_type (IT_INSN);
11285 now_it.mask = (inst.instruction & 0xf) | 0x10;
11286 now_it.cc = cond;
11287 now_it.warn_deprecated = FALSE;
11288
11289 /* If the condition is a negative condition, invert the mask. */
11290 if ((cond & 0x1) == 0x0)
11291 {
11292 unsigned int mask = inst.instruction & 0x000f;
11293
11294 if ((mask & 0x7) == 0)
11295 {
11296 /* No conversion needed. */
11297 now_it.block_length = 1;
11298 }
11299 else if ((mask & 0x3) == 0)
11300 {
11301 mask ^= 0x8;
11302 now_it.block_length = 2;
11303 }
11304 else if ((mask & 0x1) == 0)
11305 {
11306 mask ^= 0xC;
11307 now_it.block_length = 3;
11308 }
11309 else
11310 {
11311 mask ^= 0xE;
11312 now_it.block_length = 4;
11313 }
11314
11315 inst.instruction &= 0xfff0;
11316 inst.instruction |= mask;
11317 }
11318
11319 inst.instruction |= cond << 4;
11320 }
11321
11322 /* Helper function used for both push/pop and ldm/stm. */
11323 static void
11324 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
11325 {
11326 bfd_boolean load;
11327
11328 load = (inst.instruction & (1 << 20)) != 0;
11329
11330 if (mask & (1 << 13))
11331 inst.error = _("SP not allowed in register list");
11332
11333 if ((mask & (1 << base)) != 0
11334 && writeback)
11335 inst.error = _("having the base register in the register list when "
11336 "using write back is UNPREDICTABLE");
11337
11338 if (load)
11339 {
11340 if (mask & (1 << 15))
11341 {
11342 if (mask & (1 << 14))
11343 inst.error = _("LR and PC should not both be in register list");
11344 else
11345 set_it_insn_type_last ();
11346 }
11347 }
11348 else
11349 {
11350 if (mask & (1 << 15))
11351 inst.error = _("PC not allowed in register list");
11352 }
11353
11354 if ((mask & (mask - 1)) == 0)
11355 {
11356 /* Single register transfers implemented as str/ldr. */
11357 if (writeback)
11358 {
11359 if (inst.instruction & (1 << 23))
11360 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11361 else
11362 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11363 }
11364 else
11365 {
11366 if (inst.instruction & (1 << 23))
11367 inst.instruction = 0x00800000; /* ia -> [base] */
11368 else
11369 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11370 }
11371
11372 inst.instruction |= 0xf8400000;
11373 if (load)
11374 inst.instruction |= 0x00100000;
11375
11376 mask = ffs (mask) - 1;
11377 mask <<= 12;
11378 }
11379 else if (writeback)
11380 inst.instruction |= WRITE_BACK;
11381
11382 inst.instruction |= mask;
11383 inst.instruction |= base << 16;
11384 }
11385
11386 static void
11387 do_t_ldmstm (void)
11388 {
11389 /* This really doesn't seem worth it. */
11390 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11391 _("expression too complex"));
11392 constraint (inst.operands[1].writeback,
11393 _("Thumb load/store multiple does not support {reglist}^"));
11394
11395 if (unified_syntax)
11396 {
11397 bfd_boolean narrow;
11398 unsigned mask;
11399
11400 narrow = FALSE;
11401 /* See if we can use a 16-bit instruction. */
11402 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11403 && inst.size_req != 4
11404 && !(inst.operands[1].imm & ~0xff))
11405 {
11406 mask = 1 << inst.operands[0].reg;
11407
11408 if (inst.operands[0].reg <= 7)
11409 {
11410 if (inst.instruction == T_MNEM_stmia
11411 ? inst.operands[0].writeback
11412 : (inst.operands[0].writeback
11413 == !(inst.operands[1].imm & mask)))
11414 {
11415 if (inst.instruction == T_MNEM_stmia
11416 && (inst.operands[1].imm & mask)
11417 && (inst.operands[1].imm & (mask - 1)))
11418 as_warn (_("value stored for r%d is UNKNOWN"),
11419 inst.operands[0].reg);
11420
11421 inst.instruction = THUMB_OP16 (inst.instruction);
11422 inst.instruction |= inst.operands[0].reg << 8;
11423 inst.instruction |= inst.operands[1].imm;
11424 narrow = TRUE;
11425 }
11426 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11427 {
11428 /* This means 1 register in reg list one of 3 situations:
11429 1. Instruction is stmia, but without writeback.
11430 2. lmdia without writeback, but with Rn not in
11431 reglist.
11432 3. ldmia with writeback, but with Rn in reglist.
11433 Case 3 is UNPREDICTABLE behaviour, so we handle
11434 case 1 and 2 which can be converted into a 16-bit
11435 str or ldr. The SP cases are handled below. */
11436 unsigned long opcode;
11437 /* First, record an error for Case 3. */
11438 if (inst.operands[1].imm & mask
11439 && inst.operands[0].writeback)
11440 inst.error =
11441 _("having the base register in the register list when "
11442 "using write back is UNPREDICTABLE");
11443
11444 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11445 : T_MNEM_ldr);
11446 inst.instruction = THUMB_OP16 (opcode);
11447 inst.instruction |= inst.operands[0].reg << 3;
11448 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11449 narrow = TRUE;
11450 }
11451 }
11452 else if (inst.operands[0] .reg == REG_SP)
11453 {
11454 if (inst.operands[0].writeback)
11455 {
11456 inst.instruction =
11457 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11458 ? T_MNEM_push : T_MNEM_pop);
11459 inst.instruction |= inst.operands[1].imm;
11460 narrow = TRUE;
11461 }
11462 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11463 {
11464 inst.instruction =
11465 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11466 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11467 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11468 narrow = TRUE;
11469 }
11470 }
11471 }
11472
11473 if (!narrow)
11474 {
11475 if (inst.instruction < 0xffff)
11476 inst.instruction = THUMB_OP32 (inst.instruction);
11477
11478 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
11479 inst.operands[0].writeback);
11480 }
11481 }
11482 else
11483 {
11484 constraint (inst.operands[0].reg > 7
11485 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11486 constraint (inst.instruction != T_MNEM_ldmia
11487 && inst.instruction != T_MNEM_stmia,
11488 _("Thumb-2 instruction only valid in unified syntax"));
11489 if (inst.instruction == T_MNEM_stmia)
11490 {
11491 if (!inst.operands[0].writeback)
11492 as_warn (_("this instruction will write back the base register"));
11493 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11494 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11495 as_warn (_("value stored for r%d is UNKNOWN"),
11496 inst.operands[0].reg);
11497 }
11498 else
11499 {
11500 if (!inst.operands[0].writeback
11501 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11502 as_warn (_("this instruction will write back the base register"));
11503 else if (inst.operands[0].writeback
11504 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11505 as_warn (_("this instruction will not write back the base register"));
11506 }
11507
11508 inst.instruction = THUMB_OP16 (inst.instruction);
11509 inst.instruction |= inst.operands[0].reg << 8;
11510 inst.instruction |= inst.operands[1].imm;
11511 }
11512 }
11513
11514 static void
11515 do_t_ldrex (void)
11516 {
11517 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11518 || inst.operands[1].postind || inst.operands[1].writeback
11519 || inst.operands[1].immisreg || inst.operands[1].shifted
11520 || inst.operands[1].negative,
11521 BAD_ADDR_MODE);
11522
11523 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11524
11525 inst.instruction |= inst.operands[0].reg << 12;
11526 inst.instruction |= inst.operands[1].reg << 16;
11527 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11528 }
11529
11530 static void
11531 do_t_ldrexd (void)
11532 {
11533 if (!inst.operands[1].present)
11534 {
11535 constraint (inst.operands[0].reg == REG_LR,
11536 _("r14 not allowed as first register "
11537 "when second register is omitted"));
11538 inst.operands[1].reg = inst.operands[0].reg + 1;
11539 }
11540 constraint (inst.operands[0].reg == inst.operands[1].reg,
11541 BAD_OVERLAP);
11542
11543 inst.instruction |= inst.operands[0].reg << 12;
11544 inst.instruction |= inst.operands[1].reg << 8;
11545 inst.instruction |= inst.operands[2].reg << 16;
11546 }
11547
11548 static void
11549 do_t_ldst (void)
11550 {
11551 unsigned long opcode;
11552 int Rn;
11553
11554 if (inst.operands[0].isreg
11555 && !inst.operands[0].preind
11556 && inst.operands[0].reg == REG_PC)
11557 set_it_insn_type_last ();
11558
11559 opcode = inst.instruction;
11560 if (unified_syntax)
11561 {
11562 if (!inst.operands[1].isreg)
11563 {
11564 if (opcode <= 0xffff)
11565 inst.instruction = THUMB_OP32 (opcode);
11566 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11567 return;
11568 }
11569 if (inst.operands[1].isreg
11570 && !inst.operands[1].writeback
11571 && !inst.operands[1].shifted && !inst.operands[1].postind
11572 && !inst.operands[1].negative && inst.operands[0].reg <= 7
11573 && opcode <= 0xffff
11574 && inst.size_req != 4)
11575 {
11576 /* Insn may have a 16-bit form. */
11577 Rn = inst.operands[1].reg;
11578 if (inst.operands[1].immisreg)
11579 {
11580 inst.instruction = THUMB_OP16 (opcode);
11581 /* [Rn, Rik] */
11582 if (Rn <= 7 && inst.operands[1].imm <= 7)
11583 goto op16;
11584 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11585 reject_bad_reg (inst.operands[1].imm);
11586 }
11587 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11588 && opcode != T_MNEM_ldrsb)
11589 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11590 || (Rn == REG_SP && opcode == T_MNEM_str))
11591 {
11592 /* [Rn, #const] */
11593 if (Rn > 7)
11594 {
11595 if (Rn == REG_PC)
11596 {
11597 if (inst.reloc.pc_rel)
11598 opcode = T_MNEM_ldr_pc2;
11599 else
11600 opcode = T_MNEM_ldr_pc;
11601 }
11602 else
11603 {
11604 if (opcode == T_MNEM_ldr)
11605 opcode = T_MNEM_ldr_sp;
11606 else
11607 opcode = T_MNEM_str_sp;
11608 }
11609 inst.instruction = inst.operands[0].reg << 8;
11610 }
11611 else
11612 {
11613 inst.instruction = inst.operands[0].reg;
11614 inst.instruction |= inst.operands[1].reg << 3;
11615 }
11616 inst.instruction |= THUMB_OP16 (opcode);
11617 if (inst.size_req == 2)
11618 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11619 else
11620 inst.relax = opcode;
11621 return;
11622 }
11623 }
11624 /* Definitely a 32-bit variant. */
11625
11626 /* Warning for Erratum 752419. */
11627 if (opcode == T_MNEM_ldr
11628 && inst.operands[0].reg == REG_SP
11629 && inst.operands[1].writeback == 1
11630 && !inst.operands[1].immisreg)
11631 {
11632 if (no_cpu_selected ()
11633 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11634 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11635 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11636 as_warn (_("This instruction may be unpredictable "
11637 "if executed on M-profile cores "
11638 "with interrupts enabled."));
11639 }
11640
11641 /* Do some validations regarding addressing modes. */
11642 if (inst.operands[1].immisreg)
11643 reject_bad_reg (inst.operands[1].imm);
11644
11645 constraint (inst.operands[1].writeback == 1
11646 && inst.operands[0].reg == inst.operands[1].reg,
11647 BAD_OVERLAP);
11648
11649 inst.instruction = THUMB_OP32 (opcode);
11650 inst.instruction |= inst.operands[0].reg << 12;
11651 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11652 check_ldr_r15_aligned ();
11653 return;
11654 }
11655
11656 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11657
11658 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11659 {
11660 /* Only [Rn,Rm] is acceptable. */
11661 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11662 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11663 || inst.operands[1].postind || inst.operands[1].shifted
11664 || inst.operands[1].negative,
11665 _("Thumb does not support this addressing mode"));
11666 inst.instruction = THUMB_OP16 (inst.instruction);
11667 goto op16;
11668 }
11669
11670 inst.instruction = THUMB_OP16 (inst.instruction);
11671 if (!inst.operands[1].isreg)
11672 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11673 return;
11674
11675 constraint (!inst.operands[1].preind
11676 || inst.operands[1].shifted
11677 || inst.operands[1].writeback,
11678 _("Thumb does not support this addressing mode"));
11679 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11680 {
11681 constraint (inst.instruction & 0x0600,
11682 _("byte or halfword not valid for base register"));
11683 constraint (inst.operands[1].reg == REG_PC
11684 && !(inst.instruction & THUMB_LOAD_BIT),
11685 _("r15 based store not allowed"));
11686 constraint (inst.operands[1].immisreg,
11687 _("invalid base register for register offset"));
11688
11689 if (inst.operands[1].reg == REG_PC)
11690 inst.instruction = T_OPCODE_LDR_PC;
11691 else if (inst.instruction & THUMB_LOAD_BIT)
11692 inst.instruction = T_OPCODE_LDR_SP;
11693 else
11694 inst.instruction = T_OPCODE_STR_SP;
11695
11696 inst.instruction |= inst.operands[0].reg << 8;
11697 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11698 return;
11699 }
11700
11701 constraint (inst.operands[1].reg > 7, BAD_HIREG);
11702 if (!inst.operands[1].immisreg)
11703 {
11704 /* Immediate offset. */
11705 inst.instruction |= inst.operands[0].reg;
11706 inst.instruction |= inst.operands[1].reg << 3;
11707 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11708 return;
11709 }
11710
11711 /* Register offset. */
11712 constraint (inst.operands[1].imm > 7, BAD_HIREG);
11713 constraint (inst.operands[1].negative,
11714 _("Thumb does not support this addressing mode"));
11715
11716 op16:
11717 switch (inst.instruction)
11718 {
11719 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11720 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11721 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11722 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11723 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11724 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11725 case 0x5600 /* ldrsb */:
11726 case 0x5e00 /* ldrsh */: break;
11727 default: abort ();
11728 }
11729
11730 inst.instruction |= inst.operands[0].reg;
11731 inst.instruction |= inst.operands[1].reg << 3;
11732 inst.instruction |= inst.operands[1].imm << 6;
11733 }
11734
11735 static void
11736 do_t_ldstd (void)
11737 {
11738 if (!inst.operands[1].present)
11739 {
11740 inst.operands[1].reg = inst.operands[0].reg + 1;
11741 constraint (inst.operands[0].reg == REG_LR,
11742 _("r14 not allowed here"));
11743 constraint (inst.operands[0].reg == REG_R12,
11744 _("r12 not allowed here"));
11745 }
11746
11747 if (inst.operands[2].writeback
11748 && (inst.operands[0].reg == inst.operands[2].reg
11749 || inst.operands[1].reg == inst.operands[2].reg))
11750 as_warn (_("base register written back, and overlaps "
11751 "one of transfer registers"));
11752
11753 inst.instruction |= inst.operands[0].reg << 12;
11754 inst.instruction |= inst.operands[1].reg << 8;
11755 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11756 }
11757
11758 static void
11759 do_t_ldstt (void)
11760 {
11761 inst.instruction |= inst.operands[0].reg << 12;
11762 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11763 }
11764
11765 static void
11766 do_t_mla (void)
11767 {
11768 unsigned Rd, Rn, Rm, Ra;
11769
11770 Rd = inst.operands[0].reg;
11771 Rn = inst.operands[1].reg;
11772 Rm = inst.operands[2].reg;
11773 Ra = inst.operands[3].reg;
11774
11775 reject_bad_reg (Rd);
11776 reject_bad_reg (Rn);
11777 reject_bad_reg (Rm);
11778 reject_bad_reg (Ra);
11779
11780 inst.instruction |= Rd << 8;
11781 inst.instruction |= Rn << 16;
11782 inst.instruction |= Rm;
11783 inst.instruction |= Ra << 12;
11784 }
11785
11786 static void
11787 do_t_mlal (void)
11788 {
11789 unsigned RdLo, RdHi, Rn, Rm;
11790
11791 RdLo = inst.operands[0].reg;
11792 RdHi = inst.operands[1].reg;
11793 Rn = inst.operands[2].reg;
11794 Rm = inst.operands[3].reg;
11795
11796 reject_bad_reg (RdLo);
11797 reject_bad_reg (RdHi);
11798 reject_bad_reg (Rn);
11799 reject_bad_reg (Rm);
11800
11801 inst.instruction |= RdLo << 12;
11802 inst.instruction |= RdHi << 8;
11803 inst.instruction |= Rn << 16;
11804 inst.instruction |= Rm;
11805 }
11806
11807 static void
11808 do_t_mov_cmp (void)
11809 {
11810 unsigned Rn, Rm;
11811
11812 Rn = inst.operands[0].reg;
11813 Rm = inst.operands[1].reg;
11814
11815 if (Rn == REG_PC)
11816 set_it_insn_type_last ();
11817
11818 if (unified_syntax)
11819 {
11820 int r0off = (inst.instruction == T_MNEM_mov
11821 || inst.instruction == T_MNEM_movs) ? 8 : 16;
11822 unsigned long opcode;
11823 bfd_boolean narrow;
11824 bfd_boolean low_regs;
11825
11826 low_regs = (Rn <= 7 && Rm <= 7);
11827 opcode = inst.instruction;
11828 if (in_it_block ())
11829 narrow = opcode != T_MNEM_movs;
11830 else
11831 narrow = opcode != T_MNEM_movs || low_regs;
11832 if (inst.size_req == 4
11833 || inst.operands[1].shifted)
11834 narrow = FALSE;
11835
11836 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11837 if (opcode == T_MNEM_movs && inst.operands[1].isreg
11838 && !inst.operands[1].shifted
11839 && Rn == REG_PC
11840 && Rm == REG_LR)
11841 {
11842 inst.instruction = T2_SUBS_PC_LR;
11843 return;
11844 }
11845
11846 if (opcode == T_MNEM_cmp)
11847 {
11848 constraint (Rn == REG_PC, BAD_PC);
11849 if (narrow)
11850 {
11851 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11852 but valid. */
11853 warn_deprecated_sp (Rm);
11854 /* R15 was documented as a valid choice for Rm in ARMv6,
11855 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11856 tools reject R15, so we do too. */
11857 constraint (Rm == REG_PC, BAD_PC);
11858 }
11859 else
11860 reject_bad_reg (Rm);
11861 }
11862 else if (opcode == T_MNEM_mov
11863 || opcode == T_MNEM_movs)
11864 {
11865 if (inst.operands[1].isreg)
11866 {
11867 if (opcode == T_MNEM_movs)
11868 {
11869 reject_bad_reg (Rn);
11870 reject_bad_reg (Rm);
11871 }
11872 else if (narrow)
11873 {
11874 /* This is mov.n. */
11875 if ((Rn == REG_SP || Rn == REG_PC)
11876 && (Rm == REG_SP || Rm == REG_PC))
11877 {
11878 as_tsktsk (_("Use of r%u as a source register is "
11879 "deprecated when r%u is the destination "
11880 "register."), Rm, Rn);
11881 }
11882 }
11883 else
11884 {
11885 /* This is mov.w. */
11886 constraint (Rn == REG_PC, BAD_PC);
11887 constraint (Rm == REG_PC, BAD_PC);
11888 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
11889 }
11890 }
11891 else
11892 reject_bad_reg (Rn);
11893 }
11894
11895 if (!inst.operands[1].isreg)
11896 {
11897 /* Immediate operand. */
11898 if (!in_it_block () && opcode == T_MNEM_mov)
11899 narrow = 0;
11900 if (low_regs && narrow)
11901 {
11902 inst.instruction = THUMB_OP16 (opcode);
11903 inst.instruction |= Rn << 8;
11904 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11905 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
11906 {
11907 if (inst.size_req == 2)
11908 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11909 else
11910 inst.relax = opcode;
11911 }
11912 }
11913 else
11914 {
11915 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11916 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
11917 THUMB1_RELOC_ONLY);
11918
11919 inst.instruction = THUMB_OP32 (inst.instruction);
11920 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11921 inst.instruction |= Rn << r0off;
11922 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11923 }
11924 }
11925 else if (inst.operands[1].shifted && inst.operands[1].immisreg
11926 && (inst.instruction == T_MNEM_mov
11927 || inst.instruction == T_MNEM_movs))
11928 {
11929 /* Register shifts are encoded as separate shift instructions. */
11930 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
11931
11932 if (in_it_block ())
11933 narrow = !flags;
11934 else
11935 narrow = flags;
11936
11937 if (inst.size_req == 4)
11938 narrow = FALSE;
11939
11940 if (!low_regs || inst.operands[1].imm > 7)
11941 narrow = FALSE;
11942
11943 if (Rn != Rm)
11944 narrow = FALSE;
11945
11946 switch (inst.operands[1].shift_kind)
11947 {
11948 case SHIFT_LSL:
11949 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
11950 break;
11951 case SHIFT_ASR:
11952 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
11953 break;
11954 case SHIFT_LSR:
11955 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
11956 break;
11957 case SHIFT_ROR:
11958 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
11959 break;
11960 default:
11961 abort ();
11962 }
11963
11964 inst.instruction = opcode;
11965 if (narrow)
11966 {
11967 inst.instruction |= Rn;
11968 inst.instruction |= inst.operands[1].imm << 3;
11969 }
11970 else
11971 {
11972 if (flags)
11973 inst.instruction |= CONDS_BIT;
11974
11975 inst.instruction |= Rn << 8;
11976 inst.instruction |= Rm << 16;
11977 inst.instruction |= inst.operands[1].imm;
11978 }
11979 }
11980 else if (!narrow)
11981 {
11982 /* Some mov with immediate shift have narrow variants.
11983 Register shifts are handled above. */
11984 if (low_regs && inst.operands[1].shifted
11985 && (inst.instruction == T_MNEM_mov
11986 || inst.instruction == T_MNEM_movs))
11987 {
11988 if (in_it_block ())
11989 narrow = (inst.instruction == T_MNEM_mov);
11990 else
11991 narrow = (inst.instruction == T_MNEM_movs);
11992 }
11993
11994 if (narrow)
11995 {
11996 switch (inst.operands[1].shift_kind)
11997 {
11998 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11999 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12000 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12001 default: narrow = FALSE; break;
12002 }
12003 }
12004
12005 if (narrow)
12006 {
12007 inst.instruction |= Rn;
12008 inst.instruction |= Rm << 3;
12009 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12010 }
12011 else
12012 {
12013 inst.instruction = THUMB_OP32 (inst.instruction);
12014 inst.instruction |= Rn << r0off;
12015 encode_thumb32_shifted_operand (1);
12016 }
12017 }
12018 else
12019 switch (inst.instruction)
12020 {
12021 case T_MNEM_mov:
12022 /* In v4t or v5t a move of two lowregs produces unpredictable
12023 results. Don't allow this. */
12024 if (low_regs)
12025 {
12026 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
12027 "MOV Rd, Rs with two low registers is not "
12028 "permitted on this architecture");
12029 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12030 arm_ext_v6);
12031 }
12032
12033 inst.instruction = T_OPCODE_MOV_HR;
12034 inst.instruction |= (Rn & 0x8) << 4;
12035 inst.instruction |= (Rn & 0x7);
12036 inst.instruction |= Rm << 3;
12037 break;
12038
12039 case T_MNEM_movs:
12040 /* We know we have low registers at this point.
12041 Generate LSLS Rd, Rs, #0. */
12042 inst.instruction = T_OPCODE_LSL_I;
12043 inst.instruction |= Rn;
12044 inst.instruction |= Rm << 3;
12045 break;
12046
12047 case T_MNEM_cmp:
12048 if (low_regs)
12049 {
12050 inst.instruction = T_OPCODE_CMP_LR;
12051 inst.instruction |= Rn;
12052 inst.instruction |= Rm << 3;
12053 }
12054 else
12055 {
12056 inst.instruction = T_OPCODE_CMP_HR;
12057 inst.instruction |= (Rn & 0x8) << 4;
12058 inst.instruction |= (Rn & 0x7);
12059 inst.instruction |= Rm << 3;
12060 }
12061 break;
12062 }
12063 return;
12064 }
12065
12066 inst.instruction = THUMB_OP16 (inst.instruction);
12067
12068 /* PR 10443: Do not silently ignore shifted operands. */
12069 constraint (inst.operands[1].shifted,
12070 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12071
12072 if (inst.operands[1].isreg)
12073 {
12074 if (Rn < 8 && Rm < 8)
12075 {
12076 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12077 since a MOV instruction produces unpredictable results. */
12078 if (inst.instruction == T_OPCODE_MOV_I8)
12079 inst.instruction = T_OPCODE_ADD_I3;
12080 else
12081 inst.instruction = T_OPCODE_CMP_LR;
12082
12083 inst.instruction |= Rn;
12084 inst.instruction |= Rm << 3;
12085 }
12086 else
12087 {
12088 if (inst.instruction == T_OPCODE_MOV_I8)
12089 inst.instruction = T_OPCODE_MOV_HR;
12090 else
12091 inst.instruction = T_OPCODE_CMP_HR;
12092 do_t_cpy ();
12093 }
12094 }
12095 else
12096 {
12097 constraint (Rn > 7,
12098 _("only lo regs allowed with immediate"));
12099 inst.instruction |= Rn << 8;
12100 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
12101 }
12102 }
12103
12104 static void
12105 do_t_mov16 (void)
12106 {
12107 unsigned Rd;
12108 bfd_vma imm;
12109 bfd_boolean top;
12110
12111 top = (inst.instruction & 0x00800000) != 0;
12112 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
12113 {
12114 constraint (top, _(":lower16: not allowed this instruction"));
12115 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
12116 }
12117 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
12118 {
12119 constraint (!top, _(":upper16: not allowed this instruction"));
12120 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
12121 }
12122
12123 Rd = inst.operands[0].reg;
12124 reject_bad_reg (Rd);
12125
12126 inst.instruction |= Rd << 8;
12127 if (inst.reloc.type == BFD_RELOC_UNUSED)
12128 {
12129 imm = inst.reloc.exp.X_add_number;
12130 inst.instruction |= (imm & 0xf000) << 4;
12131 inst.instruction |= (imm & 0x0800) << 15;
12132 inst.instruction |= (imm & 0x0700) << 4;
12133 inst.instruction |= (imm & 0x00ff);
12134 }
12135 }
12136
12137 static void
12138 do_t_mvn_tst (void)
12139 {
12140 unsigned Rn, Rm;
12141
12142 Rn = inst.operands[0].reg;
12143 Rm = inst.operands[1].reg;
12144
12145 if (inst.instruction == T_MNEM_cmp
12146 || inst.instruction == T_MNEM_cmn)
12147 constraint (Rn == REG_PC, BAD_PC);
12148 else
12149 reject_bad_reg (Rn);
12150 reject_bad_reg (Rm);
12151
12152 if (unified_syntax)
12153 {
12154 int r0off = (inst.instruction == T_MNEM_mvn
12155 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
12156 bfd_boolean narrow;
12157
12158 if (inst.size_req == 4
12159 || inst.instruction > 0xffff
12160 || inst.operands[1].shifted
12161 || Rn > 7 || Rm > 7)
12162 narrow = FALSE;
12163 else if (inst.instruction == T_MNEM_cmn
12164 || inst.instruction == T_MNEM_tst)
12165 narrow = TRUE;
12166 else if (THUMB_SETS_FLAGS (inst.instruction))
12167 narrow = !in_it_block ();
12168 else
12169 narrow = in_it_block ();
12170
12171 if (!inst.operands[1].isreg)
12172 {
12173 /* For an immediate, we always generate a 32-bit opcode;
12174 section relaxation will shrink it later if possible. */
12175 if (inst.instruction < 0xffff)
12176 inst.instruction = THUMB_OP32 (inst.instruction);
12177 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12178 inst.instruction |= Rn << r0off;
12179 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12180 }
12181 else
12182 {
12183 /* See if we can do this with a 16-bit instruction. */
12184 if (narrow)
12185 {
12186 inst.instruction = THUMB_OP16 (inst.instruction);
12187 inst.instruction |= Rn;
12188 inst.instruction |= Rm << 3;
12189 }
12190 else
12191 {
12192 constraint (inst.operands[1].shifted
12193 && inst.operands[1].immisreg,
12194 _("shift must be constant"));
12195 if (inst.instruction < 0xffff)
12196 inst.instruction = THUMB_OP32 (inst.instruction);
12197 inst.instruction |= Rn << r0off;
12198 encode_thumb32_shifted_operand (1);
12199 }
12200 }
12201 }
12202 else
12203 {
12204 constraint (inst.instruction > 0xffff
12205 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12206 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12207 _("unshifted register required"));
12208 constraint (Rn > 7 || Rm > 7,
12209 BAD_HIREG);
12210
12211 inst.instruction = THUMB_OP16 (inst.instruction);
12212 inst.instruction |= Rn;
12213 inst.instruction |= Rm << 3;
12214 }
12215 }
12216
12217 static void
12218 do_t_mrs (void)
12219 {
12220 unsigned Rd;
12221
12222 if (do_vfp_nsyn_mrs () == SUCCESS)
12223 return;
12224
12225 Rd = inst.operands[0].reg;
12226 reject_bad_reg (Rd);
12227 inst.instruction |= Rd << 8;
12228
12229 if (inst.operands[1].isreg)
12230 {
12231 unsigned br = inst.operands[1].reg;
12232 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12233 as_bad (_("bad register for mrs"));
12234
12235 inst.instruction |= br & (0xf << 16);
12236 inst.instruction |= (br & 0x300) >> 4;
12237 inst.instruction |= (br & SPSR_BIT) >> 2;
12238 }
12239 else
12240 {
12241 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12242
12243 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12244 {
12245 /* PR gas/12698: The constraint is only applied for m_profile.
12246 If the user has specified -march=all, we want to ignore it as
12247 we are building for any CPU type, including non-m variants. */
12248 bfd_boolean m_profile =
12249 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12250 constraint ((flags != 0) && m_profile, _("selected processor does "
12251 "not support requested special purpose register"));
12252 }
12253 else
12254 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12255 devices). */
12256 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12257 _("'APSR', 'CPSR' or 'SPSR' expected"));
12258
12259 inst.instruction |= (flags & SPSR_BIT) >> 2;
12260 inst.instruction |= inst.operands[1].imm & 0xff;
12261 inst.instruction |= 0xf0000;
12262 }
12263 }
12264
12265 static void
12266 do_t_msr (void)
12267 {
12268 int flags;
12269 unsigned Rn;
12270
12271 if (do_vfp_nsyn_msr () == SUCCESS)
12272 return;
12273
12274 constraint (!inst.operands[1].isreg,
12275 _("Thumb encoding does not support an immediate here"));
12276
12277 if (inst.operands[0].isreg)
12278 flags = (int)(inst.operands[0].reg);
12279 else
12280 flags = inst.operands[0].imm;
12281
12282 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12283 {
12284 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12285
12286 /* PR gas/12698: The constraint is only applied for m_profile.
12287 If the user has specified -march=all, we want to ignore it as
12288 we are building for any CPU type, including non-m variants. */
12289 bfd_boolean m_profile =
12290 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12291 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12292 && (bits & ~(PSR_s | PSR_f)) != 0)
12293 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12294 && bits != PSR_f)) && m_profile,
12295 _("selected processor does not support requested special "
12296 "purpose register"));
12297 }
12298 else
12299 constraint ((flags & 0xff) != 0, _("selected processor does not support "
12300 "requested special purpose register"));
12301
12302 Rn = inst.operands[1].reg;
12303 reject_bad_reg (Rn);
12304
12305 inst.instruction |= (flags & SPSR_BIT) >> 2;
12306 inst.instruction |= (flags & 0xf0000) >> 8;
12307 inst.instruction |= (flags & 0x300) >> 4;
12308 inst.instruction |= (flags & 0xff);
12309 inst.instruction |= Rn << 16;
12310 }
12311
12312 static void
12313 do_t_mul (void)
12314 {
12315 bfd_boolean narrow;
12316 unsigned Rd, Rn, Rm;
12317
12318 if (!inst.operands[2].present)
12319 inst.operands[2].reg = inst.operands[0].reg;
12320
12321 Rd = inst.operands[0].reg;
12322 Rn = inst.operands[1].reg;
12323 Rm = inst.operands[2].reg;
12324
12325 if (unified_syntax)
12326 {
12327 if (inst.size_req == 4
12328 || (Rd != Rn
12329 && Rd != Rm)
12330 || Rn > 7
12331 || Rm > 7)
12332 narrow = FALSE;
12333 else if (inst.instruction == T_MNEM_muls)
12334 narrow = !in_it_block ();
12335 else
12336 narrow = in_it_block ();
12337 }
12338 else
12339 {
12340 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12341 constraint (Rn > 7 || Rm > 7,
12342 BAD_HIREG);
12343 narrow = TRUE;
12344 }
12345
12346 if (narrow)
12347 {
12348 /* 16-bit MULS/Conditional MUL. */
12349 inst.instruction = THUMB_OP16 (inst.instruction);
12350 inst.instruction |= Rd;
12351
12352 if (Rd == Rn)
12353 inst.instruction |= Rm << 3;
12354 else if (Rd == Rm)
12355 inst.instruction |= Rn << 3;
12356 else
12357 constraint (1, _("dest must overlap one source register"));
12358 }
12359 else
12360 {
12361 constraint (inst.instruction != T_MNEM_mul,
12362 _("Thumb-2 MUL must not set flags"));
12363 /* 32-bit MUL. */
12364 inst.instruction = THUMB_OP32 (inst.instruction);
12365 inst.instruction |= Rd << 8;
12366 inst.instruction |= Rn << 16;
12367 inst.instruction |= Rm << 0;
12368
12369 reject_bad_reg (Rd);
12370 reject_bad_reg (Rn);
12371 reject_bad_reg (Rm);
12372 }
12373 }
12374
12375 static void
12376 do_t_mull (void)
12377 {
12378 unsigned RdLo, RdHi, Rn, Rm;
12379
12380 RdLo = inst.operands[0].reg;
12381 RdHi = inst.operands[1].reg;
12382 Rn = inst.operands[2].reg;
12383 Rm = inst.operands[3].reg;
12384
12385 reject_bad_reg (RdLo);
12386 reject_bad_reg (RdHi);
12387 reject_bad_reg (Rn);
12388 reject_bad_reg (Rm);
12389
12390 inst.instruction |= RdLo << 12;
12391 inst.instruction |= RdHi << 8;
12392 inst.instruction |= Rn << 16;
12393 inst.instruction |= Rm;
12394
12395 if (RdLo == RdHi)
12396 as_tsktsk (_("rdhi and rdlo must be different"));
12397 }
12398
12399 static void
12400 do_t_nop (void)
12401 {
12402 set_it_insn_type (NEUTRAL_IT_INSN);
12403
12404 if (unified_syntax)
12405 {
12406 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12407 {
12408 inst.instruction = THUMB_OP32 (inst.instruction);
12409 inst.instruction |= inst.operands[0].imm;
12410 }
12411 else
12412 {
12413 /* PR9722: Check for Thumb2 availability before
12414 generating a thumb2 nop instruction. */
12415 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12416 {
12417 inst.instruction = THUMB_OP16 (inst.instruction);
12418 inst.instruction |= inst.operands[0].imm << 4;
12419 }
12420 else
12421 inst.instruction = 0x46c0;
12422 }
12423 }
12424 else
12425 {
12426 constraint (inst.operands[0].present,
12427 _("Thumb does not support NOP with hints"));
12428 inst.instruction = 0x46c0;
12429 }
12430 }
12431
12432 static void
12433 do_t_neg (void)
12434 {
12435 if (unified_syntax)
12436 {
12437 bfd_boolean narrow;
12438
12439 if (THUMB_SETS_FLAGS (inst.instruction))
12440 narrow = !in_it_block ();
12441 else
12442 narrow = in_it_block ();
12443 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12444 narrow = FALSE;
12445 if (inst.size_req == 4)
12446 narrow = FALSE;
12447
12448 if (!narrow)
12449 {
12450 inst.instruction = THUMB_OP32 (inst.instruction);
12451 inst.instruction |= inst.operands[0].reg << 8;
12452 inst.instruction |= inst.operands[1].reg << 16;
12453 }
12454 else
12455 {
12456 inst.instruction = THUMB_OP16 (inst.instruction);
12457 inst.instruction |= inst.operands[0].reg;
12458 inst.instruction |= inst.operands[1].reg << 3;
12459 }
12460 }
12461 else
12462 {
12463 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12464 BAD_HIREG);
12465 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12466
12467 inst.instruction = THUMB_OP16 (inst.instruction);
12468 inst.instruction |= inst.operands[0].reg;
12469 inst.instruction |= inst.operands[1].reg << 3;
12470 }
12471 }
12472
12473 static void
12474 do_t_orn (void)
12475 {
12476 unsigned Rd, Rn;
12477
12478 Rd = inst.operands[0].reg;
12479 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12480
12481 reject_bad_reg (Rd);
12482 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12483 reject_bad_reg (Rn);
12484
12485 inst.instruction |= Rd << 8;
12486 inst.instruction |= Rn << 16;
12487
12488 if (!inst.operands[2].isreg)
12489 {
12490 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12491 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12492 }
12493 else
12494 {
12495 unsigned Rm;
12496
12497 Rm = inst.operands[2].reg;
12498 reject_bad_reg (Rm);
12499
12500 constraint (inst.operands[2].shifted
12501 && inst.operands[2].immisreg,
12502 _("shift must be constant"));
12503 encode_thumb32_shifted_operand (2);
12504 }
12505 }
12506
12507 static void
12508 do_t_pkhbt (void)
12509 {
12510 unsigned Rd, Rn, Rm;
12511
12512 Rd = inst.operands[0].reg;
12513 Rn = inst.operands[1].reg;
12514 Rm = inst.operands[2].reg;
12515
12516 reject_bad_reg (Rd);
12517 reject_bad_reg (Rn);
12518 reject_bad_reg (Rm);
12519
12520 inst.instruction |= Rd << 8;
12521 inst.instruction |= Rn << 16;
12522 inst.instruction |= Rm;
12523 if (inst.operands[3].present)
12524 {
12525 unsigned int val = inst.reloc.exp.X_add_number;
12526 constraint (inst.reloc.exp.X_op != O_constant,
12527 _("expression too complex"));
12528 inst.instruction |= (val & 0x1c) << 10;
12529 inst.instruction |= (val & 0x03) << 6;
12530 }
12531 }
12532
12533 static void
12534 do_t_pkhtb (void)
12535 {
12536 if (!inst.operands[3].present)
12537 {
12538 unsigned Rtmp;
12539
12540 inst.instruction &= ~0x00000020;
12541
12542 /* PR 10168. Swap the Rm and Rn registers. */
12543 Rtmp = inst.operands[1].reg;
12544 inst.operands[1].reg = inst.operands[2].reg;
12545 inst.operands[2].reg = Rtmp;
12546 }
12547 do_t_pkhbt ();
12548 }
12549
12550 static void
12551 do_t_pld (void)
12552 {
12553 if (inst.operands[0].immisreg)
12554 reject_bad_reg (inst.operands[0].imm);
12555
12556 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12557 }
12558
12559 static void
12560 do_t_push_pop (void)
12561 {
12562 unsigned mask;
12563
12564 constraint (inst.operands[0].writeback,
12565 _("push/pop do not support {reglist}^"));
12566 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
12567 _("expression too complex"));
12568
12569 mask = inst.operands[0].imm;
12570 if (inst.size_req != 4 && (mask & ~0xff) == 0)
12571 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12572 else if (inst.size_req != 4
12573 && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
12574 ? REG_LR : REG_PC)))
12575 {
12576 inst.instruction = THUMB_OP16 (inst.instruction);
12577 inst.instruction |= THUMB_PP_PC_LR;
12578 inst.instruction |= mask & 0xff;
12579 }
12580 else if (unified_syntax)
12581 {
12582 inst.instruction = THUMB_OP32 (inst.instruction);
12583 encode_thumb2_ldmstm (13, mask, TRUE);
12584 }
12585 else
12586 {
12587 inst.error = _("invalid register list to push/pop instruction");
12588 return;
12589 }
12590 }
12591
12592 static void
12593 do_t_rbit (void)
12594 {
12595 unsigned Rd, Rm;
12596
12597 Rd = inst.operands[0].reg;
12598 Rm = inst.operands[1].reg;
12599
12600 reject_bad_reg (Rd);
12601 reject_bad_reg (Rm);
12602
12603 inst.instruction |= Rd << 8;
12604 inst.instruction |= Rm << 16;
12605 inst.instruction |= Rm;
12606 }
12607
12608 static void
12609 do_t_rev (void)
12610 {
12611 unsigned Rd, Rm;
12612
12613 Rd = inst.operands[0].reg;
12614 Rm = inst.operands[1].reg;
12615
12616 reject_bad_reg (Rd);
12617 reject_bad_reg (Rm);
12618
12619 if (Rd <= 7 && Rm <= 7
12620 && inst.size_req != 4)
12621 {
12622 inst.instruction = THUMB_OP16 (inst.instruction);
12623 inst.instruction |= Rd;
12624 inst.instruction |= Rm << 3;
12625 }
12626 else if (unified_syntax)
12627 {
12628 inst.instruction = THUMB_OP32 (inst.instruction);
12629 inst.instruction |= Rd << 8;
12630 inst.instruction |= Rm << 16;
12631 inst.instruction |= Rm;
12632 }
12633 else
12634 inst.error = BAD_HIREG;
12635 }
12636
12637 static void
12638 do_t_rrx (void)
12639 {
12640 unsigned Rd, Rm;
12641
12642 Rd = inst.operands[0].reg;
12643 Rm = inst.operands[1].reg;
12644
12645 reject_bad_reg (Rd);
12646 reject_bad_reg (Rm);
12647
12648 inst.instruction |= Rd << 8;
12649 inst.instruction |= Rm;
12650 }
12651
12652 static void
12653 do_t_rsb (void)
12654 {
12655 unsigned Rd, Rs;
12656
12657 Rd = inst.operands[0].reg;
12658 Rs = (inst.operands[1].present
12659 ? inst.operands[1].reg /* Rd, Rs, foo */
12660 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
12661
12662 reject_bad_reg (Rd);
12663 reject_bad_reg (Rs);
12664 if (inst.operands[2].isreg)
12665 reject_bad_reg (inst.operands[2].reg);
12666
12667 inst.instruction |= Rd << 8;
12668 inst.instruction |= Rs << 16;
12669 if (!inst.operands[2].isreg)
12670 {
12671 bfd_boolean narrow;
12672
12673 if ((inst.instruction & 0x00100000) != 0)
12674 narrow = !in_it_block ();
12675 else
12676 narrow = in_it_block ();
12677
12678 if (Rd > 7 || Rs > 7)
12679 narrow = FALSE;
12680
12681 if (inst.size_req == 4 || !unified_syntax)
12682 narrow = FALSE;
12683
12684 if (inst.reloc.exp.X_op != O_constant
12685 || inst.reloc.exp.X_add_number != 0)
12686 narrow = FALSE;
12687
12688 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12689 relaxation, but it doesn't seem worth the hassle. */
12690 if (narrow)
12691 {
12692 inst.reloc.type = BFD_RELOC_UNUSED;
12693 inst.instruction = THUMB_OP16 (T_MNEM_negs);
12694 inst.instruction |= Rs << 3;
12695 inst.instruction |= Rd;
12696 }
12697 else
12698 {
12699 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12700 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12701 }
12702 }
12703 else
12704 encode_thumb32_shifted_operand (2);
12705 }
12706
12707 static void
12708 do_t_setend (void)
12709 {
12710 if (warn_on_deprecated
12711 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12712 as_tsktsk (_("setend use is deprecated for ARMv8"));
12713
12714 set_it_insn_type (OUTSIDE_IT_INSN);
12715 if (inst.operands[0].imm)
12716 inst.instruction |= 0x8;
12717 }
12718
12719 static void
12720 do_t_shift (void)
12721 {
12722 if (!inst.operands[1].present)
12723 inst.operands[1].reg = inst.operands[0].reg;
12724
12725 if (unified_syntax)
12726 {
12727 bfd_boolean narrow;
12728 int shift_kind;
12729
12730 switch (inst.instruction)
12731 {
12732 case T_MNEM_asr:
12733 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12734 case T_MNEM_lsl:
12735 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12736 case T_MNEM_lsr:
12737 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12738 case T_MNEM_ror:
12739 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12740 default: abort ();
12741 }
12742
12743 if (THUMB_SETS_FLAGS (inst.instruction))
12744 narrow = !in_it_block ();
12745 else
12746 narrow = in_it_block ();
12747 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12748 narrow = FALSE;
12749 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12750 narrow = FALSE;
12751 if (inst.operands[2].isreg
12752 && (inst.operands[1].reg != inst.operands[0].reg
12753 || inst.operands[2].reg > 7))
12754 narrow = FALSE;
12755 if (inst.size_req == 4)
12756 narrow = FALSE;
12757
12758 reject_bad_reg (inst.operands[0].reg);
12759 reject_bad_reg (inst.operands[1].reg);
12760
12761 if (!narrow)
12762 {
12763 if (inst.operands[2].isreg)
12764 {
12765 reject_bad_reg (inst.operands[2].reg);
12766 inst.instruction = THUMB_OP32 (inst.instruction);
12767 inst.instruction |= inst.operands[0].reg << 8;
12768 inst.instruction |= inst.operands[1].reg << 16;
12769 inst.instruction |= inst.operands[2].reg;
12770
12771 /* PR 12854: Error on extraneous shifts. */
12772 constraint (inst.operands[2].shifted,
12773 _("extraneous shift as part of operand to shift insn"));
12774 }
12775 else
12776 {
12777 inst.operands[1].shifted = 1;
12778 inst.operands[1].shift_kind = shift_kind;
12779 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
12780 ? T_MNEM_movs : T_MNEM_mov);
12781 inst.instruction |= inst.operands[0].reg << 8;
12782 encode_thumb32_shifted_operand (1);
12783 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12784 inst.reloc.type = BFD_RELOC_UNUSED;
12785 }
12786 }
12787 else
12788 {
12789 if (inst.operands[2].isreg)
12790 {
12791 switch (shift_kind)
12792 {
12793 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
12794 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
12795 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
12796 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
12797 default: abort ();
12798 }
12799
12800 inst.instruction |= inst.operands[0].reg;
12801 inst.instruction |= inst.operands[2].reg << 3;
12802
12803 /* PR 12854: Error on extraneous shifts. */
12804 constraint (inst.operands[2].shifted,
12805 _("extraneous shift as part of operand to shift insn"));
12806 }
12807 else
12808 {
12809 switch (shift_kind)
12810 {
12811 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12812 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12813 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12814 default: abort ();
12815 }
12816 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12817 inst.instruction |= inst.operands[0].reg;
12818 inst.instruction |= inst.operands[1].reg << 3;
12819 }
12820 }
12821 }
12822 else
12823 {
12824 constraint (inst.operands[0].reg > 7
12825 || inst.operands[1].reg > 7, BAD_HIREG);
12826 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12827
12828 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
12829 {
12830 constraint (inst.operands[2].reg > 7, BAD_HIREG);
12831 constraint (inst.operands[0].reg != inst.operands[1].reg,
12832 _("source1 and dest must be same register"));
12833
12834 switch (inst.instruction)
12835 {
12836 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
12837 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
12838 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
12839 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
12840 default: abort ();
12841 }
12842
12843 inst.instruction |= inst.operands[0].reg;
12844 inst.instruction |= inst.operands[2].reg << 3;
12845
12846 /* PR 12854: Error on extraneous shifts. */
12847 constraint (inst.operands[2].shifted,
12848 _("extraneous shift as part of operand to shift insn"));
12849 }
12850 else
12851 {
12852 switch (inst.instruction)
12853 {
12854 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
12855 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
12856 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
12857 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
12858 default: abort ();
12859 }
12860 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12861 inst.instruction |= inst.operands[0].reg;
12862 inst.instruction |= inst.operands[1].reg << 3;
12863 }
12864 }
12865 }
12866
12867 static void
12868 do_t_simd (void)
12869 {
12870 unsigned Rd, Rn, Rm;
12871
12872 Rd = inst.operands[0].reg;
12873 Rn = inst.operands[1].reg;
12874 Rm = inst.operands[2].reg;
12875
12876 reject_bad_reg (Rd);
12877 reject_bad_reg (Rn);
12878 reject_bad_reg (Rm);
12879
12880 inst.instruction |= Rd << 8;
12881 inst.instruction |= Rn << 16;
12882 inst.instruction |= Rm;
12883 }
12884
12885 static void
12886 do_t_simd2 (void)
12887 {
12888 unsigned Rd, Rn, Rm;
12889
12890 Rd = inst.operands[0].reg;
12891 Rm = inst.operands[1].reg;
12892 Rn = inst.operands[2].reg;
12893
12894 reject_bad_reg (Rd);
12895 reject_bad_reg (Rn);
12896 reject_bad_reg (Rm);
12897
12898 inst.instruction |= Rd << 8;
12899 inst.instruction |= Rn << 16;
12900 inst.instruction |= Rm;
12901 }
12902
12903 static void
12904 do_t_smc (void)
12905 {
12906 unsigned int value = inst.reloc.exp.X_add_number;
12907 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
12908 _("SMC is not permitted on this architecture"));
12909 constraint (inst.reloc.exp.X_op != O_constant,
12910 _("expression too complex"));
12911 inst.reloc.type = BFD_RELOC_UNUSED;
12912 inst.instruction |= (value & 0xf000) >> 12;
12913 inst.instruction |= (value & 0x0ff0);
12914 inst.instruction |= (value & 0x000f) << 16;
12915 /* PR gas/15623: SMC instructions must be last in an IT block. */
12916 set_it_insn_type_last ();
12917 }
12918
12919 static void
12920 do_t_hvc (void)
12921 {
12922 unsigned int value = inst.reloc.exp.X_add_number;
12923
12924 inst.reloc.type = BFD_RELOC_UNUSED;
12925 inst.instruction |= (value & 0x0fff);
12926 inst.instruction |= (value & 0xf000) << 4;
12927 }
12928
12929 static void
12930 do_t_ssat_usat (int bias)
12931 {
12932 unsigned Rd, Rn;
12933
12934 Rd = inst.operands[0].reg;
12935 Rn = inst.operands[2].reg;
12936
12937 reject_bad_reg (Rd);
12938 reject_bad_reg (Rn);
12939
12940 inst.instruction |= Rd << 8;
12941 inst.instruction |= inst.operands[1].imm - bias;
12942 inst.instruction |= Rn << 16;
12943
12944 if (inst.operands[3].present)
12945 {
12946 offsetT shift_amount = inst.reloc.exp.X_add_number;
12947
12948 inst.reloc.type = BFD_RELOC_UNUSED;
12949
12950 constraint (inst.reloc.exp.X_op != O_constant,
12951 _("expression too complex"));
12952
12953 if (shift_amount != 0)
12954 {
12955 constraint (shift_amount > 31,
12956 _("shift expression is too large"));
12957
12958 if (inst.operands[3].shift_kind == SHIFT_ASR)
12959 inst.instruction |= 0x00200000; /* sh bit. */
12960
12961 inst.instruction |= (shift_amount & 0x1c) << 10;
12962 inst.instruction |= (shift_amount & 0x03) << 6;
12963 }
12964 }
12965 }
12966
12967 static void
12968 do_t_ssat (void)
12969 {
12970 do_t_ssat_usat (1);
12971 }
12972
12973 static void
12974 do_t_ssat16 (void)
12975 {
12976 unsigned Rd, Rn;
12977
12978 Rd = inst.operands[0].reg;
12979 Rn = inst.operands[2].reg;
12980
12981 reject_bad_reg (Rd);
12982 reject_bad_reg (Rn);
12983
12984 inst.instruction |= Rd << 8;
12985 inst.instruction |= inst.operands[1].imm - 1;
12986 inst.instruction |= Rn << 16;
12987 }
12988
12989 static void
12990 do_t_strex (void)
12991 {
12992 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
12993 || inst.operands[2].postind || inst.operands[2].writeback
12994 || inst.operands[2].immisreg || inst.operands[2].shifted
12995 || inst.operands[2].negative,
12996 BAD_ADDR_MODE);
12997
12998 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
12999
13000 inst.instruction |= inst.operands[0].reg << 8;
13001 inst.instruction |= inst.operands[1].reg << 12;
13002 inst.instruction |= inst.operands[2].reg << 16;
13003 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
13004 }
13005
13006 static void
13007 do_t_strexd (void)
13008 {
13009 if (!inst.operands[2].present)
13010 inst.operands[2].reg = inst.operands[1].reg + 1;
13011
13012 constraint (inst.operands[0].reg == inst.operands[1].reg
13013 || inst.operands[0].reg == inst.operands[2].reg
13014 || inst.operands[0].reg == inst.operands[3].reg,
13015 BAD_OVERLAP);
13016
13017 inst.instruction |= inst.operands[0].reg;
13018 inst.instruction |= inst.operands[1].reg << 12;
13019 inst.instruction |= inst.operands[2].reg << 8;
13020 inst.instruction |= inst.operands[3].reg << 16;
13021 }
13022
13023 static void
13024 do_t_sxtah (void)
13025 {
13026 unsigned Rd, Rn, Rm;
13027
13028 Rd = inst.operands[0].reg;
13029 Rn = inst.operands[1].reg;
13030 Rm = inst.operands[2].reg;
13031
13032 reject_bad_reg (Rd);
13033 reject_bad_reg (Rn);
13034 reject_bad_reg (Rm);
13035
13036 inst.instruction |= Rd << 8;
13037 inst.instruction |= Rn << 16;
13038 inst.instruction |= Rm;
13039 inst.instruction |= inst.operands[3].imm << 4;
13040 }
13041
13042 static void
13043 do_t_sxth (void)
13044 {
13045 unsigned Rd, Rm;
13046
13047 Rd = inst.operands[0].reg;
13048 Rm = inst.operands[1].reg;
13049
13050 reject_bad_reg (Rd);
13051 reject_bad_reg (Rm);
13052
13053 if (inst.instruction <= 0xffff
13054 && inst.size_req != 4
13055 && Rd <= 7 && Rm <= 7
13056 && (!inst.operands[2].present || inst.operands[2].imm == 0))
13057 {
13058 inst.instruction = THUMB_OP16 (inst.instruction);
13059 inst.instruction |= Rd;
13060 inst.instruction |= Rm << 3;
13061 }
13062 else if (unified_syntax)
13063 {
13064 if (inst.instruction <= 0xffff)
13065 inst.instruction = THUMB_OP32 (inst.instruction);
13066 inst.instruction |= Rd << 8;
13067 inst.instruction |= Rm;
13068 inst.instruction |= inst.operands[2].imm << 4;
13069 }
13070 else
13071 {
13072 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
13073 _("Thumb encoding does not support rotation"));
13074 constraint (1, BAD_HIREG);
13075 }
13076 }
13077
13078 static void
13079 do_t_swi (void)
13080 {
13081 /* We have to do the following check manually as ARM_EXT_OS only applies
13082 to ARM_EXT_V6M. */
13083 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
13084 {
13085 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
13086 /* This only applies to the v6m however, not later architectures. */
13087 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
13088 as_bad (_("SVC is not permitted on this architecture"));
13089 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
13090 }
13091
13092 inst.reloc.type = BFD_RELOC_ARM_SWI;
13093 }
13094
13095 static void
13096 do_t_tb (void)
13097 {
13098 unsigned Rn, Rm;
13099 int half;
13100
13101 half = (inst.instruction & 0x10) != 0;
13102 set_it_insn_type_last ();
13103 constraint (inst.operands[0].immisreg,
13104 _("instruction requires register index"));
13105
13106 Rn = inst.operands[0].reg;
13107 Rm = inst.operands[0].imm;
13108
13109 constraint (Rn == REG_SP, BAD_SP);
13110 reject_bad_reg (Rm);
13111
13112 constraint (!half && inst.operands[0].shifted,
13113 _("instruction does not allow shifted index"));
13114 inst.instruction |= (Rn << 16) | Rm;
13115 }
13116
13117 static void
13118 do_t_udf (void)
13119 {
13120 if (!inst.operands[0].present)
13121 inst.operands[0].imm = 0;
13122
13123 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
13124 {
13125 constraint (inst.size_req == 2,
13126 _("immediate value out of range"));
13127 inst.instruction = THUMB_OP32 (inst.instruction);
13128 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
13129 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
13130 }
13131 else
13132 {
13133 inst.instruction = THUMB_OP16 (inst.instruction);
13134 inst.instruction |= inst.operands[0].imm;
13135 }
13136
13137 set_it_insn_type (NEUTRAL_IT_INSN);
13138 }
13139
13140
13141 static void
13142 do_t_usat (void)
13143 {
13144 do_t_ssat_usat (0);
13145 }
13146
13147 static void
13148 do_t_usat16 (void)
13149 {
13150 unsigned Rd, Rn;
13151
13152 Rd = inst.operands[0].reg;
13153 Rn = inst.operands[2].reg;
13154
13155 reject_bad_reg (Rd);
13156 reject_bad_reg (Rn);
13157
13158 inst.instruction |= Rd << 8;
13159 inst.instruction |= inst.operands[1].imm;
13160 inst.instruction |= Rn << 16;
13161 }
13162
13163 /* Neon instruction encoder helpers. */
13164
13165 /* Encodings for the different types for various Neon opcodes. */
13166
13167 /* An "invalid" code for the following tables. */
13168 #define N_INV -1u
13169
13170 struct neon_tab_entry
13171 {
13172 unsigned integer;
13173 unsigned float_or_poly;
13174 unsigned scalar_or_imm;
13175 };
13176
13177 /* Map overloaded Neon opcodes to their respective encodings. */
13178 #define NEON_ENC_TAB \
13179 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13180 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13181 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13182 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13183 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13184 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13185 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13186 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13187 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13188 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13189 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13190 /* Register variants of the following two instructions are encoded as
13191 vcge / vcgt with the operands reversed. */ \
13192 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13193 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13194 X(vfma, N_INV, 0x0000c10, N_INV), \
13195 X(vfms, N_INV, 0x0200c10, N_INV), \
13196 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13197 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13198 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13199 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13200 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13201 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13202 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13203 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13204 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13205 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13206 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13207 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13208 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13209 X(vshl, 0x0000400, N_INV, 0x0800510), \
13210 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13211 X(vand, 0x0000110, N_INV, 0x0800030), \
13212 X(vbic, 0x0100110, N_INV, 0x0800030), \
13213 X(veor, 0x1000110, N_INV, N_INV), \
13214 X(vorn, 0x0300110, N_INV, 0x0800010), \
13215 X(vorr, 0x0200110, N_INV, 0x0800010), \
13216 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13217 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13218 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13219 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13220 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13221 X(vst1, 0x0000000, 0x0800000, N_INV), \
13222 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13223 X(vst2, 0x0000100, 0x0800100, N_INV), \
13224 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13225 X(vst3, 0x0000200, 0x0800200, N_INV), \
13226 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13227 X(vst4, 0x0000300, 0x0800300, N_INV), \
13228 X(vmovn, 0x1b20200, N_INV, N_INV), \
13229 X(vtrn, 0x1b20080, N_INV, N_INV), \
13230 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13231 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13232 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13233 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13234 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13235 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13236 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13237 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13238 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13239 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13240 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13241 X(vseleq, 0xe000a00, N_INV, N_INV), \
13242 X(vselvs, 0xe100a00, N_INV, N_INV), \
13243 X(vselge, 0xe200a00, N_INV, N_INV), \
13244 X(vselgt, 0xe300a00, N_INV, N_INV), \
13245 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13246 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13247 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13248 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13249 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13250 X(aes, 0x3b00300, N_INV, N_INV), \
13251 X(sha3op, 0x2000c00, N_INV, N_INV), \
13252 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13253 X(sha2op, 0x3ba0380, N_INV, N_INV)
13254
13255 enum neon_opc
13256 {
13257 #define X(OPC,I,F,S) N_MNEM_##OPC
13258 NEON_ENC_TAB
13259 #undef X
13260 };
13261
13262 static const struct neon_tab_entry neon_enc_tab[] =
13263 {
13264 #define X(OPC,I,F,S) { (I), (F), (S) }
13265 NEON_ENC_TAB
13266 #undef X
13267 };
13268
13269 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13270 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13271 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13272 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13273 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13274 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13275 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13276 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13277 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13278 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13279 #define NEON_ENC_SINGLE_(X) \
13280 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13281 #define NEON_ENC_DOUBLE_(X) \
13282 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13283 #define NEON_ENC_FPV8_(X) \
13284 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13285
13286 #define NEON_ENCODE(type, inst) \
13287 do \
13288 { \
13289 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13290 inst.is_neon = 1; \
13291 } \
13292 while (0)
13293
13294 #define check_neon_suffixes \
13295 do \
13296 { \
13297 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13298 { \
13299 as_bad (_("invalid neon suffix for non neon instruction")); \
13300 return; \
13301 } \
13302 } \
13303 while (0)
13304
13305 /* Define shapes for instruction operands. The following mnemonic characters
13306 are used in this table:
13307
13308 F - VFP S<n> register
13309 D - Neon D<n> register
13310 Q - Neon Q<n> register
13311 I - Immediate
13312 S - Scalar
13313 R - ARM register
13314 L - D<n> register list
13315
13316 This table is used to generate various data:
13317 - enumerations of the form NS_DDR to be used as arguments to
13318 neon_select_shape.
13319 - a table classifying shapes into single, double, quad, mixed.
13320 - a table used to drive neon_select_shape. */
13321
13322 #define NEON_SHAPE_DEF \
13323 X(3, (D, D, D), DOUBLE), \
13324 X(3, (Q, Q, Q), QUAD), \
13325 X(3, (D, D, I), DOUBLE), \
13326 X(3, (Q, Q, I), QUAD), \
13327 X(3, (D, D, S), DOUBLE), \
13328 X(3, (Q, Q, S), QUAD), \
13329 X(2, (D, D), DOUBLE), \
13330 X(2, (Q, Q), QUAD), \
13331 X(2, (D, S), DOUBLE), \
13332 X(2, (Q, S), QUAD), \
13333 X(2, (D, R), DOUBLE), \
13334 X(2, (Q, R), QUAD), \
13335 X(2, (D, I), DOUBLE), \
13336 X(2, (Q, I), QUAD), \
13337 X(3, (D, L, D), DOUBLE), \
13338 X(2, (D, Q), MIXED), \
13339 X(2, (Q, D), MIXED), \
13340 X(3, (D, Q, I), MIXED), \
13341 X(3, (Q, D, I), MIXED), \
13342 X(3, (Q, D, D), MIXED), \
13343 X(3, (D, Q, Q), MIXED), \
13344 X(3, (Q, Q, D), MIXED), \
13345 X(3, (Q, D, S), MIXED), \
13346 X(3, (D, Q, S), MIXED), \
13347 X(4, (D, D, D, I), DOUBLE), \
13348 X(4, (Q, Q, Q, I), QUAD), \
13349 X(2, (F, F), SINGLE), \
13350 X(3, (F, F, F), SINGLE), \
13351 X(2, (F, I), SINGLE), \
13352 X(2, (F, D), MIXED), \
13353 X(2, (D, F), MIXED), \
13354 X(3, (F, F, I), MIXED), \
13355 X(4, (R, R, F, F), SINGLE), \
13356 X(4, (F, F, R, R), SINGLE), \
13357 X(3, (D, R, R), DOUBLE), \
13358 X(3, (R, R, D), DOUBLE), \
13359 X(2, (S, R), SINGLE), \
13360 X(2, (R, S), SINGLE), \
13361 X(2, (F, R), SINGLE), \
13362 X(2, (R, F), SINGLE), \
13363 /* Half float shape supported so far. */\
13364 X (2, (H, D), MIXED), \
13365 X (2, (D, H), MIXED), \
13366 X (2, (H, F), MIXED), \
13367 X (2, (F, H), MIXED), \
13368 X (2, (H, H), HALF), \
13369 X (2, (H, R), HALF), \
13370 X (2, (R, H), HALF), \
13371 X (2, (H, I), HALF), \
13372 X (3, (H, H, H), HALF), \
13373 X (3, (H, F, I), MIXED), \
13374 X (3, (F, H, I), MIXED)
13375
13376 #define S2(A,B) NS_##A##B
13377 #define S3(A,B,C) NS_##A##B##C
13378 #define S4(A,B,C,D) NS_##A##B##C##D
13379
13380 #define X(N, L, C) S##N L
13381
13382 enum neon_shape
13383 {
13384 NEON_SHAPE_DEF,
13385 NS_NULL
13386 };
13387
13388 #undef X
13389 #undef S2
13390 #undef S3
13391 #undef S4
13392
13393 enum neon_shape_class
13394 {
13395 SC_HALF,
13396 SC_SINGLE,
13397 SC_DOUBLE,
13398 SC_QUAD,
13399 SC_MIXED
13400 };
13401
13402 #define X(N, L, C) SC_##C
13403
13404 static enum neon_shape_class neon_shape_class[] =
13405 {
13406 NEON_SHAPE_DEF
13407 };
13408
13409 #undef X
13410
13411 enum neon_shape_el
13412 {
13413 SE_H,
13414 SE_F,
13415 SE_D,
13416 SE_Q,
13417 SE_I,
13418 SE_S,
13419 SE_R,
13420 SE_L
13421 };
13422
13423 /* Register widths of above. */
13424 static unsigned neon_shape_el_size[] =
13425 {
13426 16,
13427 32,
13428 64,
13429 128,
13430 0,
13431 32,
13432 32,
13433 0
13434 };
13435
13436 struct neon_shape_info
13437 {
13438 unsigned els;
13439 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13440 };
13441
13442 #define S2(A,B) { SE_##A, SE_##B }
13443 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13444 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13445
13446 #define X(N, L, C) { N, S##N L }
13447
13448 static struct neon_shape_info neon_shape_tab[] =
13449 {
13450 NEON_SHAPE_DEF
13451 };
13452
13453 #undef X
13454 #undef S2
13455 #undef S3
13456 #undef S4
13457
13458 /* Bit masks used in type checking given instructions.
13459 'N_EQK' means the type must be the same as (or based on in some way) the key
13460 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13461 set, various other bits can be set as well in order to modify the meaning of
13462 the type constraint. */
13463
13464 enum neon_type_mask
13465 {
13466 N_S8 = 0x0000001,
13467 N_S16 = 0x0000002,
13468 N_S32 = 0x0000004,
13469 N_S64 = 0x0000008,
13470 N_U8 = 0x0000010,
13471 N_U16 = 0x0000020,
13472 N_U32 = 0x0000040,
13473 N_U64 = 0x0000080,
13474 N_I8 = 0x0000100,
13475 N_I16 = 0x0000200,
13476 N_I32 = 0x0000400,
13477 N_I64 = 0x0000800,
13478 N_8 = 0x0001000,
13479 N_16 = 0x0002000,
13480 N_32 = 0x0004000,
13481 N_64 = 0x0008000,
13482 N_P8 = 0x0010000,
13483 N_P16 = 0x0020000,
13484 N_F16 = 0x0040000,
13485 N_F32 = 0x0080000,
13486 N_F64 = 0x0100000,
13487 N_P64 = 0x0200000,
13488 N_KEY = 0x1000000, /* Key element (main type specifier). */
13489 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
13490 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
13491 N_UNT = 0x8000000, /* Must be explicitly untyped. */
13492 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
13493 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
13494 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13495 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13496 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13497 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
13498 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13499 N_UTYP = 0,
13500 N_MAX_NONSPECIAL = N_P64
13501 };
13502
13503 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13504
13505 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13506 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13507 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13508 #define N_S_32 (N_S8 | N_S16 | N_S32)
13509 #define N_F_16_32 (N_F16 | N_F32)
13510 #define N_SUF_32 (N_SU_32 | N_F_16_32)
13511 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13512 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
13513 #define N_F_ALL (N_F16 | N_F32 | N_F64)
13514
13515 /* Pass this as the first type argument to neon_check_type to ignore types
13516 altogether. */
13517 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13518
13519 /* Select a "shape" for the current instruction (describing register types or
13520 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13521 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13522 function of operand parsing, so this function doesn't need to be called.
13523 Shapes should be listed in order of decreasing length. */
13524
13525 static enum neon_shape
13526 neon_select_shape (enum neon_shape shape, ...)
13527 {
13528 va_list ap;
13529 enum neon_shape first_shape = shape;
13530
13531 /* Fix missing optional operands. FIXME: we don't know at this point how
13532 many arguments we should have, so this makes the assumption that we have
13533 > 1. This is true of all current Neon opcodes, I think, but may not be
13534 true in the future. */
13535 if (!inst.operands[1].present)
13536 inst.operands[1] = inst.operands[0];
13537
13538 va_start (ap, shape);
13539
13540 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13541 {
13542 unsigned j;
13543 int matches = 1;
13544
13545 for (j = 0; j < neon_shape_tab[shape].els; j++)
13546 {
13547 if (!inst.operands[j].present)
13548 {
13549 matches = 0;
13550 break;
13551 }
13552
13553 switch (neon_shape_tab[shape].el[j])
13554 {
13555 /* If a .f16, .16, .u16, .s16 type specifier is given over
13556 a VFP single precision register operand, it's essentially
13557 means only half of the register is used.
13558
13559 If the type specifier is given after the mnemonics, the
13560 information is stored in inst.vectype. If the type specifier
13561 is given after register operand, the information is stored
13562 in inst.operands[].vectype.
13563
13564 When there is only one type specifier, and all the register
13565 operands are the same type of hardware register, the type
13566 specifier applies to all register operands.
13567
13568 If no type specifier is given, the shape is inferred from
13569 operand information.
13570
13571 for example:
13572 vadd.f16 s0, s1, s2: NS_HHH
13573 vabs.f16 s0, s1: NS_HH
13574 vmov.f16 s0, r1: NS_HR
13575 vmov.f16 r0, s1: NS_RH
13576 vcvt.f16 r0, s1: NS_RH
13577 vcvt.f16.s32 s2, s2, #29: NS_HFI
13578 vcvt.f16.s32 s2, s2: NS_HF
13579 */
13580 case SE_H:
13581 if (!(inst.operands[j].isreg
13582 && inst.operands[j].isvec
13583 && inst.operands[j].issingle
13584 && !inst.operands[j].isquad
13585 && ((inst.vectype.elems == 1
13586 && inst.vectype.el[0].size == 16)
13587 || (inst.vectype.elems > 1
13588 && inst.vectype.el[j].size == 16)
13589 || (inst.vectype.elems == 0
13590 && inst.operands[j].vectype.type != NT_invtype
13591 && inst.operands[j].vectype.size == 16))))
13592 matches = 0;
13593 break;
13594
13595 case SE_F:
13596 if (!(inst.operands[j].isreg
13597 && inst.operands[j].isvec
13598 && inst.operands[j].issingle
13599 && !inst.operands[j].isquad
13600 && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
13601 || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
13602 || (inst.vectype.elems == 0
13603 && (inst.operands[j].vectype.size == 32
13604 || inst.operands[j].vectype.type == NT_invtype)))))
13605 matches = 0;
13606 break;
13607
13608 case SE_D:
13609 if (!(inst.operands[j].isreg
13610 && inst.operands[j].isvec
13611 && !inst.operands[j].isquad
13612 && !inst.operands[j].issingle))
13613 matches = 0;
13614 break;
13615
13616 case SE_R:
13617 if (!(inst.operands[j].isreg
13618 && !inst.operands[j].isvec))
13619 matches = 0;
13620 break;
13621
13622 case SE_Q:
13623 if (!(inst.operands[j].isreg
13624 && inst.operands[j].isvec
13625 && inst.operands[j].isquad
13626 && !inst.operands[j].issingle))
13627 matches = 0;
13628 break;
13629
13630 case SE_I:
13631 if (!(!inst.operands[j].isreg
13632 && !inst.operands[j].isscalar))
13633 matches = 0;
13634 break;
13635
13636 case SE_S:
13637 if (!(!inst.operands[j].isreg
13638 && inst.operands[j].isscalar))
13639 matches = 0;
13640 break;
13641
13642 case SE_L:
13643 break;
13644 }
13645 if (!matches)
13646 break;
13647 }
13648 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
13649 /* We've matched all the entries in the shape table, and we don't
13650 have any left over operands which have not been matched. */
13651 break;
13652 }
13653
13654 va_end (ap);
13655
13656 if (shape == NS_NULL && first_shape != NS_NULL)
13657 first_error (_("invalid instruction shape"));
13658
13659 return shape;
13660 }
13661
13662 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13663 means the Q bit should be set). */
13664
13665 static int
13666 neon_quad (enum neon_shape shape)
13667 {
13668 return neon_shape_class[shape] == SC_QUAD;
13669 }
13670
13671 static void
13672 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
13673 unsigned *g_size)
13674 {
13675 /* Allow modification to be made to types which are constrained to be
13676 based on the key element, based on bits set alongside N_EQK. */
13677 if ((typebits & N_EQK) != 0)
13678 {
13679 if ((typebits & N_HLF) != 0)
13680 *g_size /= 2;
13681 else if ((typebits & N_DBL) != 0)
13682 *g_size *= 2;
13683 if ((typebits & N_SGN) != 0)
13684 *g_type = NT_signed;
13685 else if ((typebits & N_UNS) != 0)
13686 *g_type = NT_unsigned;
13687 else if ((typebits & N_INT) != 0)
13688 *g_type = NT_integer;
13689 else if ((typebits & N_FLT) != 0)
13690 *g_type = NT_float;
13691 else if ((typebits & N_SIZ) != 0)
13692 *g_type = NT_untyped;
13693 }
13694 }
13695
13696 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13697 operand type, i.e. the single type specified in a Neon instruction when it
13698 is the only one given. */
13699
13700 static struct neon_type_el
13701 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
13702 {
13703 struct neon_type_el dest = *key;
13704
13705 gas_assert ((thisarg & N_EQK) != 0);
13706
13707 neon_modify_type_size (thisarg, &dest.type, &dest.size);
13708
13709 return dest;
13710 }
13711
13712 /* Convert Neon type and size into compact bitmask representation. */
13713
13714 static enum neon_type_mask
13715 type_chk_of_el_type (enum neon_el_type type, unsigned size)
13716 {
13717 switch (type)
13718 {
13719 case NT_untyped:
13720 switch (size)
13721 {
13722 case 8: return N_8;
13723 case 16: return N_16;
13724 case 32: return N_32;
13725 case 64: return N_64;
13726 default: ;
13727 }
13728 break;
13729
13730 case NT_integer:
13731 switch (size)
13732 {
13733 case 8: return N_I8;
13734 case 16: return N_I16;
13735 case 32: return N_I32;
13736 case 64: return N_I64;
13737 default: ;
13738 }
13739 break;
13740
13741 case NT_float:
13742 switch (size)
13743 {
13744 case 16: return N_F16;
13745 case 32: return N_F32;
13746 case 64: return N_F64;
13747 default: ;
13748 }
13749 break;
13750
13751 case NT_poly:
13752 switch (size)
13753 {
13754 case 8: return N_P8;
13755 case 16: return N_P16;
13756 case 64: return N_P64;
13757 default: ;
13758 }
13759 break;
13760
13761 case NT_signed:
13762 switch (size)
13763 {
13764 case 8: return N_S8;
13765 case 16: return N_S16;
13766 case 32: return N_S32;
13767 case 64: return N_S64;
13768 default: ;
13769 }
13770 break;
13771
13772 case NT_unsigned:
13773 switch (size)
13774 {
13775 case 8: return N_U8;
13776 case 16: return N_U16;
13777 case 32: return N_U32;
13778 case 64: return N_U64;
13779 default: ;
13780 }
13781 break;
13782
13783 default: ;
13784 }
13785
13786 return N_UTYP;
13787 }
13788
13789 /* Convert compact Neon bitmask type representation to a type and size. Only
13790 handles the case where a single bit is set in the mask. */
13791
13792 static int
13793 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
13794 enum neon_type_mask mask)
13795 {
13796 if ((mask & N_EQK) != 0)
13797 return FAIL;
13798
13799 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
13800 *size = 8;
13801 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
13802 *size = 16;
13803 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
13804 *size = 32;
13805 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
13806 *size = 64;
13807 else
13808 return FAIL;
13809
13810 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
13811 *type = NT_signed;
13812 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
13813 *type = NT_unsigned;
13814 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
13815 *type = NT_integer;
13816 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
13817 *type = NT_untyped;
13818 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
13819 *type = NT_poly;
13820 else if ((mask & (N_F_ALL)) != 0)
13821 *type = NT_float;
13822 else
13823 return FAIL;
13824
13825 return SUCCESS;
13826 }
13827
13828 /* Modify a bitmask of allowed types. This is only needed for type
13829 relaxation. */
13830
13831 static unsigned
13832 modify_types_allowed (unsigned allowed, unsigned mods)
13833 {
13834 unsigned size;
13835 enum neon_el_type type;
13836 unsigned destmask;
13837 int i;
13838
13839 destmask = 0;
13840
13841 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
13842 {
13843 if (el_type_of_type_chk (&type, &size,
13844 (enum neon_type_mask) (allowed & i)) == SUCCESS)
13845 {
13846 neon_modify_type_size (mods, &type, &size);
13847 destmask |= type_chk_of_el_type (type, size);
13848 }
13849 }
13850
13851 return destmask;
13852 }
13853
13854 /* Check type and return type classification.
13855 The manual states (paraphrase): If one datatype is given, it indicates the
13856 type given in:
13857 - the second operand, if there is one
13858 - the operand, if there is no second operand
13859 - the result, if there are no operands.
13860 This isn't quite good enough though, so we use a concept of a "key" datatype
13861 which is set on a per-instruction basis, which is the one which matters when
13862 only one data type is written.
13863 Note: this function has side-effects (e.g. filling in missing operands). All
13864 Neon instructions should call it before performing bit encoding. */
13865
13866 static struct neon_type_el
13867 neon_check_type (unsigned els, enum neon_shape ns, ...)
13868 {
13869 va_list ap;
13870 unsigned i, pass, key_el = 0;
13871 unsigned types[NEON_MAX_TYPE_ELS];
13872 enum neon_el_type k_type = NT_invtype;
13873 unsigned k_size = -1u;
13874 struct neon_type_el badtype = {NT_invtype, -1};
13875 unsigned key_allowed = 0;
13876
13877 /* Optional registers in Neon instructions are always (not) in operand 1.
13878 Fill in the missing operand here, if it was omitted. */
13879 if (els > 1 && !inst.operands[1].present)
13880 inst.operands[1] = inst.operands[0];
13881
13882 /* Suck up all the varargs. */
13883 va_start (ap, ns);
13884 for (i = 0; i < els; i++)
13885 {
13886 unsigned thisarg = va_arg (ap, unsigned);
13887 if (thisarg == N_IGNORE_TYPE)
13888 {
13889 va_end (ap);
13890 return badtype;
13891 }
13892 types[i] = thisarg;
13893 if ((thisarg & N_KEY) != 0)
13894 key_el = i;
13895 }
13896 va_end (ap);
13897
13898 if (inst.vectype.elems > 0)
13899 for (i = 0; i < els; i++)
13900 if (inst.operands[i].vectype.type != NT_invtype)
13901 {
13902 first_error (_("types specified in both the mnemonic and operands"));
13903 return badtype;
13904 }
13905
13906 /* Duplicate inst.vectype elements here as necessary.
13907 FIXME: No idea if this is exactly the same as the ARM assembler,
13908 particularly when an insn takes one register and one non-register
13909 operand. */
13910 if (inst.vectype.elems == 1 && els > 1)
13911 {
13912 unsigned j;
13913 inst.vectype.elems = els;
13914 inst.vectype.el[key_el] = inst.vectype.el[0];
13915 for (j = 0; j < els; j++)
13916 if (j != key_el)
13917 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13918 types[j]);
13919 }
13920 else if (inst.vectype.elems == 0 && els > 0)
13921 {
13922 unsigned j;
13923 /* No types were given after the mnemonic, so look for types specified
13924 after each operand. We allow some flexibility here; as long as the
13925 "key" operand has a type, we can infer the others. */
13926 for (j = 0; j < els; j++)
13927 if (inst.operands[j].vectype.type != NT_invtype)
13928 inst.vectype.el[j] = inst.operands[j].vectype;
13929
13930 if (inst.operands[key_el].vectype.type != NT_invtype)
13931 {
13932 for (j = 0; j < els; j++)
13933 if (inst.operands[j].vectype.type == NT_invtype)
13934 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13935 types[j]);
13936 }
13937 else
13938 {
13939 first_error (_("operand types can't be inferred"));
13940 return badtype;
13941 }
13942 }
13943 else if (inst.vectype.elems != els)
13944 {
13945 first_error (_("type specifier has the wrong number of parts"));
13946 return badtype;
13947 }
13948
13949 for (pass = 0; pass < 2; pass++)
13950 {
13951 for (i = 0; i < els; i++)
13952 {
13953 unsigned thisarg = types[i];
13954 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
13955 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
13956 enum neon_el_type g_type = inst.vectype.el[i].type;
13957 unsigned g_size = inst.vectype.el[i].size;
13958
13959 /* Decay more-specific signed & unsigned types to sign-insensitive
13960 integer types if sign-specific variants are unavailable. */
13961 if ((g_type == NT_signed || g_type == NT_unsigned)
13962 && (types_allowed & N_SU_ALL) == 0)
13963 g_type = NT_integer;
13964
13965 /* If only untyped args are allowed, decay any more specific types to
13966 them. Some instructions only care about signs for some element
13967 sizes, so handle that properly. */
13968 if (((types_allowed & N_UNT) == 0)
13969 && ((g_size == 8 && (types_allowed & N_8) != 0)
13970 || (g_size == 16 && (types_allowed & N_16) != 0)
13971 || (g_size == 32 && (types_allowed & N_32) != 0)
13972 || (g_size == 64 && (types_allowed & N_64) != 0)))
13973 g_type = NT_untyped;
13974
13975 if (pass == 0)
13976 {
13977 if ((thisarg & N_KEY) != 0)
13978 {
13979 k_type = g_type;
13980 k_size = g_size;
13981 key_allowed = thisarg & ~N_KEY;
13982
13983 /* Check architecture constraint on FP16 extension. */
13984 if (k_size == 16
13985 && k_type == NT_float
13986 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
13987 {
13988 inst.error = _(BAD_FP16);
13989 return badtype;
13990 }
13991 }
13992 }
13993 else
13994 {
13995 if ((thisarg & N_VFP) != 0)
13996 {
13997 enum neon_shape_el regshape;
13998 unsigned regwidth, match;
13999
14000 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14001 if (ns == NS_NULL)
14002 {
14003 first_error (_("invalid instruction shape"));
14004 return badtype;
14005 }
14006 regshape = neon_shape_tab[ns].el[i];
14007 regwidth = neon_shape_el_size[regshape];
14008
14009 /* In VFP mode, operands must match register widths. If we
14010 have a key operand, use its width, else use the width of
14011 the current operand. */
14012 if (k_size != -1u)
14013 match = k_size;
14014 else
14015 match = g_size;
14016
14017 /* FP16 will use a single precision register. */
14018 if (regwidth == 32 && match == 16)
14019 {
14020 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14021 match = regwidth;
14022 else
14023 {
14024 inst.error = _(BAD_FP16);
14025 return badtype;
14026 }
14027 }
14028
14029 if (regwidth != match)
14030 {
14031 first_error (_("operand size must match register width"));
14032 return badtype;
14033 }
14034 }
14035
14036 if ((thisarg & N_EQK) == 0)
14037 {
14038 unsigned given_type = type_chk_of_el_type (g_type, g_size);
14039
14040 if ((given_type & types_allowed) == 0)
14041 {
14042 first_error (_("bad type in Neon instruction"));
14043 return badtype;
14044 }
14045 }
14046 else
14047 {
14048 enum neon_el_type mod_k_type = k_type;
14049 unsigned mod_k_size = k_size;
14050 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
14051 if (g_type != mod_k_type || g_size != mod_k_size)
14052 {
14053 first_error (_("inconsistent types in Neon instruction"));
14054 return badtype;
14055 }
14056 }
14057 }
14058 }
14059 }
14060
14061 return inst.vectype.el[key_el];
14062 }
14063
14064 /* Neon-style VFP instruction forwarding. */
14065
14066 /* Thumb VFP instructions have 0xE in the condition field. */
14067
14068 static void
14069 do_vfp_cond_or_thumb (void)
14070 {
14071 inst.is_neon = 1;
14072
14073 if (thumb_mode)
14074 inst.instruction |= 0xe0000000;
14075 else
14076 inst.instruction |= inst.cond << 28;
14077 }
14078
14079 /* Look up and encode a simple mnemonic, for use as a helper function for the
14080 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14081 etc. It is assumed that operand parsing has already been done, and that the
14082 operands are in the form expected by the given opcode (this isn't necessarily
14083 the same as the form in which they were parsed, hence some massaging must
14084 take place before this function is called).
14085 Checks current arch version against that in the looked-up opcode. */
14086
14087 static void
14088 do_vfp_nsyn_opcode (const char *opname)
14089 {
14090 const struct asm_opcode *opcode;
14091
14092 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
14093
14094 if (!opcode)
14095 abort ();
14096
14097 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
14098 thumb_mode ? *opcode->tvariant : *opcode->avariant),
14099 _(BAD_FPU));
14100
14101 inst.is_neon = 1;
14102
14103 if (thumb_mode)
14104 {
14105 inst.instruction = opcode->tvalue;
14106 opcode->tencode ();
14107 }
14108 else
14109 {
14110 inst.instruction = (inst.cond << 28) | opcode->avalue;
14111 opcode->aencode ();
14112 }
14113 }
14114
14115 static void
14116 do_vfp_nsyn_add_sub (enum neon_shape rs)
14117 {
14118 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
14119
14120 if (rs == NS_FFF || rs == NS_HHH)
14121 {
14122 if (is_add)
14123 do_vfp_nsyn_opcode ("fadds");
14124 else
14125 do_vfp_nsyn_opcode ("fsubs");
14126
14127 /* ARMv8.2 fp16 instruction. */
14128 if (rs == NS_HHH)
14129 do_scalar_fp16_v82_encode ();
14130 }
14131 else
14132 {
14133 if (is_add)
14134 do_vfp_nsyn_opcode ("faddd");
14135 else
14136 do_vfp_nsyn_opcode ("fsubd");
14137 }
14138 }
14139
14140 /* Check operand types to see if this is a VFP instruction, and if so call
14141 PFN (). */
14142
14143 static int
14144 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
14145 {
14146 enum neon_shape rs;
14147 struct neon_type_el et;
14148
14149 switch (args)
14150 {
14151 case 2:
14152 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14153 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14154 break;
14155
14156 case 3:
14157 rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14158 et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14159 N_F_ALL | N_KEY | N_VFP);
14160 break;
14161
14162 default:
14163 abort ();
14164 }
14165
14166 if (et.type != NT_invtype)
14167 {
14168 pfn (rs);
14169 return SUCCESS;
14170 }
14171
14172 inst.error = NULL;
14173 return FAIL;
14174 }
14175
14176 static void
14177 do_vfp_nsyn_mla_mls (enum neon_shape rs)
14178 {
14179 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
14180
14181 if (rs == NS_FFF || rs == NS_HHH)
14182 {
14183 if (is_mla)
14184 do_vfp_nsyn_opcode ("fmacs");
14185 else
14186 do_vfp_nsyn_opcode ("fnmacs");
14187
14188 /* ARMv8.2 fp16 instruction. */
14189 if (rs == NS_HHH)
14190 do_scalar_fp16_v82_encode ();
14191 }
14192 else
14193 {
14194 if (is_mla)
14195 do_vfp_nsyn_opcode ("fmacd");
14196 else
14197 do_vfp_nsyn_opcode ("fnmacd");
14198 }
14199 }
14200
14201 static void
14202 do_vfp_nsyn_fma_fms (enum neon_shape rs)
14203 {
14204 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
14205
14206 if (rs == NS_FFF || rs == NS_HHH)
14207 {
14208 if (is_fma)
14209 do_vfp_nsyn_opcode ("ffmas");
14210 else
14211 do_vfp_nsyn_opcode ("ffnmas");
14212
14213 /* ARMv8.2 fp16 instruction. */
14214 if (rs == NS_HHH)
14215 do_scalar_fp16_v82_encode ();
14216 }
14217 else
14218 {
14219 if (is_fma)
14220 do_vfp_nsyn_opcode ("ffmad");
14221 else
14222 do_vfp_nsyn_opcode ("ffnmad");
14223 }
14224 }
14225
14226 static void
14227 do_vfp_nsyn_mul (enum neon_shape rs)
14228 {
14229 if (rs == NS_FFF || rs == NS_HHH)
14230 {
14231 do_vfp_nsyn_opcode ("fmuls");
14232
14233 /* ARMv8.2 fp16 instruction. */
14234 if (rs == NS_HHH)
14235 do_scalar_fp16_v82_encode ();
14236 }
14237 else
14238 do_vfp_nsyn_opcode ("fmuld");
14239 }
14240
14241 static void
14242 do_vfp_nsyn_abs_neg (enum neon_shape rs)
14243 {
14244 int is_neg = (inst.instruction & 0x80) != 0;
14245 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
14246
14247 if (rs == NS_FF || rs == NS_HH)
14248 {
14249 if (is_neg)
14250 do_vfp_nsyn_opcode ("fnegs");
14251 else
14252 do_vfp_nsyn_opcode ("fabss");
14253
14254 /* ARMv8.2 fp16 instruction. */
14255 if (rs == NS_HH)
14256 do_scalar_fp16_v82_encode ();
14257 }
14258 else
14259 {
14260 if (is_neg)
14261 do_vfp_nsyn_opcode ("fnegd");
14262 else
14263 do_vfp_nsyn_opcode ("fabsd");
14264 }
14265 }
14266
14267 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14268 insns belong to Neon, and are handled elsewhere. */
14269
14270 static void
14271 do_vfp_nsyn_ldm_stm (int is_dbmode)
14272 {
14273 int is_ldm = (inst.instruction & (1 << 20)) != 0;
14274 if (is_ldm)
14275 {
14276 if (is_dbmode)
14277 do_vfp_nsyn_opcode ("fldmdbs");
14278 else
14279 do_vfp_nsyn_opcode ("fldmias");
14280 }
14281 else
14282 {
14283 if (is_dbmode)
14284 do_vfp_nsyn_opcode ("fstmdbs");
14285 else
14286 do_vfp_nsyn_opcode ("fstmias");
14287 }
14288 }
14289
14290 static void
14291 do_vfp_nsyn_sqrt (void)
14292 {
14293 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14294 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14295
14296 if (rs == NS_FF || rs == NS_HH)
14297 {
14298 do_vfp_nsyn_opcode ("fsqrts");
14299
14300 /* ARMv8.2 fp16 instruction. */
14301 if (rs == NS_HH)
14302 do_scalar_fp16_v82_encode ();
14303 }
14304 else
14305 do_vfp_nsyn_opcode ("fsqrtd");
14306 }
14307
14308 static void
14309 do_vfp_nsyn_div (void)
14310 {
14311 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14312 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14313 N_F_ALL | N_KEY | N_VFP);
14314
14315 if (rs == NS_FFF || rs == NS_HHH)
14316 {
14317 do_vfp_nsyn_opcode ("fdivs");
14318
14319 /* ARMv8.2 fp16 instruction. */
14320 if (rs == NS_HHH)
14321 do_scalar_fp16_v82_encode ();
14322 }
14323 else
14324 do_vfp_nsyn_opcode ("fdivd");
14325 }
14326
14327 static void
14328 do_vfp_nsyn_nmul (void)
14329 {
14330 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14331 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14332 N_F_ALL | N_KEY | N_VFP);
14333
14334 if (rs == NS_FFF || rs == NS_HHH)
14335 {
14336 NEON_ENCODE (SINGLE, inst);
14337 do_vfp_sp_dyadic ();
14338
14339 /* ARMv8.2 fp16 instruction. */
14340 if (rs == NS_HHH)
14341 do_scalar_fp16_v82_encode ();
14342 }
14343 else
14344 {
14345 NEON_ENCODE (DOUBLE, inst);
14346 do_vfp_dp_rd_rn_rm ();
14347 }
14348 do_vfp_cond_or_thumb ();
14349
14350 }
14351
14352 static void
14353 do_vfp_nsyn_cmp (void)
14354 {
14355 enum neon_shape rs;
14356 if (inst.operands[1].isreg)
14357 {
14358 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14359 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14360
14361 if (rs == NS_FF || rs == NS_HH)
14362 {
14363 NEON_ENCODE (SINGLE, inst);
14364 do_vfp_sp_monadic ();
14365 }
14366 else
14367 {
14368 NEON_ENCODE (DOUBLE, inst);
14369 do_vfp_dp_rd_rm ();
14370 }
14371 }
14372 else
14373 {
14374 rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
14375 neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
14376
14377 switch (inst.instruction & 0x0fffffff)
14378 {
14379 case N_MNEM_vcmp:
14380 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
14381 break;
14382 case N_MNEM_vcmpe:
14383 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
14384 break;
14385 default:
14386 abort ();
14387 }
14388
14389 if (rs == NS_FI || rs == NS_HI)
14390 {
14391 NEON_ENCODE (SINGLE, inst);
14392 do_vfp_sp_compare_z ();
14393 }
14394 else
14395 {
14396 NEON_ENCODE (DOUBLE, inst);
14397 do_vfp_dp_rd ();
14398 }
14399 }
14400 do_vfp_cond_or_thumb ();
14401
14402 /* ARMv8.2 fp16 instruction. */
14403 if (rs == NS_HI || rs == NS_HH)
14404 do_scalar_fp16_v82_encode ();
14405 }
14406
14407 static void
14408 nsyn_insert_sp (void)
14409 {
14410 inst.operands[1] = inst.operands[0];
14411 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
14412 inst.operands[0].reg = REG_SP;
14413 inst.operands[0].isreg = 1;
14414 inst.operands[0].writeback = 1;
14415 inst.operands[0].present = 1;
14416 }
14417
14418 static void
14419 do_vfp_nsyn_push (void)
14420 {
14421 nsyn_insert_sp ();
14422
14423 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14424 _("register list must contain at least 1 and at most 16 "
14425 "registers"));
14426
14427 if (inst.operands[1].issingle)
14428 do_vfp_nsyn_opcode ("fstmdbs");
14429 else
14430 do_vfp_nsyn_opcode ("fstmdbd");
14431 }
14432
14433 static void
14434 do_vfp_nsyn_pop (void)
14435 {
14436 nsyn_insert_sp ();
14437
14438 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14439 _("register list must contain at least 1 and at most 16 "
14440 "registers"));
14441
14442 if (inst.operands[1].issingle)
14443 do_vfp_nsyn_opcode ("fldmias");
14444 else
14445 do_vfp_nsyn_opcode ("fldmiad");
14446 }
14447
14448 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14449 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14450
14451 static void
14452 neon_dp_fixup (struct arm_it* insn)
14453 {
14454 unsigned int i = insn->instruction;
14455 insn->is_neon = 1;
14456
14457 if (thumb_mode)
14458 {
14459 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14460 if (i & (1 << 24))
14461 i |= 1 << 28;
14462
14463 i &= ~(1 << 24);
14464
14465 i |= 0xef000000;
14466 }
14467 else
14468 i |= 0xf2000000;
14469
14470 insn->instruction = i;
14471 }
14472
14473 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14474 (0, 1, 2, 3). */
14475
14476 static unsigned
14477 neon_logbits (unsigned x)
14478 {
14479 return ffs (x) - 4;
14480 }
14481
14482 #define LOW4(R) ((R) & 0xf)
14483 #define HI1(R) (((R) >> 4) & 1)
14484
14485 /* Encode insns with bit pattern:
14486
14487 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14488 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14489
14490 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14491 different meaning for some instruction. */
14492
14493 static void
14494 neon_three_same (int isquad, int ubit, int size)
14495 {
14496 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14497 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14498 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14499 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14500 inst.instruction |= LOW4 (inst.operands[2].reg);
14501 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14502 inst.instruction |= (isquad != 0) << 6;
14503 inst.instruction |= (ubit != 0) << 24;
14504 if (size != -1)
14505 inst.instruction |= neon_logbits (size) << 20;
14506
14507 neon_dp_fixup (&inst);
14508 }
14509
14510 /* Encode instructions of the form:
14511
14512 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14513 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14514
14515 Don't write size if SIZE == -1. */
14516
14517 static void
14518 neon_two_same (int qbit, int ubit, int size)
14519 {
14520 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14521 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14522 inst.instruction |= LOW4 (inst.operands[1].reg);
14523 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14524 inst.instruction |= (qbit != 0) << 6;
14525 inst.instruction |= (ubit != 0) << 24;
14526
14527 if (size != -1)
14528 inst.instruction |= neon_logbits (size) << 18;
14529
14530 neon_dp_fixup (&inst);
14531 }
14532
14533 /* Neon instruction encoders, in approximate order of appearance. */
14534
14535 static void
14536 do_neon_dyadic_i_su (void)
14537 {
14538 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14539 struct neon_type_el et = neon_check_type (3, rs,
14540 N_EQK, N_EQK, N_SU_32 | N_KEY);
14541 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14542 }
14543
14544 static void
14545 do_neon_dyadic_i64_su (void)
14546 {
14547 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14548 struct neon_type_el et = neon_check_type (3, rs,
14549 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14550 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14551 }
14552
14553 static void
14554 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14555 unsigned immbits)
14556 {
14557 unsigned size = et.size >> 3;
14558 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14559 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14560 inst.instruction |= LOW4 (inst.operands[1].reg);
14561 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14562 inst.instruction |= (isquad != 0) << 6;
14563 inst.instruction |= immbits << 16;
14564 inst.instruction |= (size >> 3) << 7;
14565 inst.instruction |= (size & 0x7) << 19;
14566 if (write_ubit)
14567 inst.instruction |= (uval != 0) << 24;
14568
14569 neon_dp_fixup (&inst);
14570 }
14571
14572 static void
14573 do_neon_shl_imm (void)
14574 {
14575 if (!inst.operands[2].isreg)
14576 {
14577 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14578 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14579 int imm = inst.operands[2].imm;
14580
14581 constraint (imm < 0 || (unsigned)imm >= et.size,
14582 _("immediate out of range for shift"));
14583 NEON_ENCODE (IMMED, inst);
14584 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14585 }
14586 else
14587 {
14588 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14589 struct neon_type_el et = neon_check_type (3, rs,
14590 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14591 unsigned int tmp;
14592
14593 /* VSHL/VQSHL 3-register variants have syntax such as:
14594 vshl.xx Dd, Dm, Dn
14595 whereas other 3-register operations encoded by neon_three_same have
14596 syntax like:
14597 vadd.xx Dd, Dn, Dm
14598 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14599 here. */
14600 tmp = inst.operands[2].reg;
14601 inst.operands[2].reg = inst.operands[1].reg;
14602 inst.operands[1].reg = tmp;
14603 NEON_ENCODE (INTEGER, inst);
14604 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14605 }
14606 }
14607
14608 static void
14609 do_neon_qshl_imm (void)
14610 {
14611 if (!inst.operands[2].isreg)
14612 {
14613 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14614 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14615 int imm = inst.operands[2].imm;
14616
14617 constraint (imm < 0 || (unsigned)imm >= et.size,
14618 _("immediate out of range for shift"));
14619 NEON_ENCODE (IMMED, inst);
14620 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
14621 }
14622 else
14623 {
14624 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14625 struct neon_type_el et = neon_check_type (3, rs,
14626 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14627 unsigned int tmp;
14628
14629 /* See note in do_neon_shl_imm. */
14630 tmp = inst.operands[2].reg;
14631 inst.operands[2].reg = inst.operands[1].reg;
14632 inst.operands[1].reg = tmp;
14633 NEON_ENCODE (INTEGER, inst);
14634 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14635 }
14636 }
14637
14638 static void
14639 do_neon_rshl (void)
14640 {
14641 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14642 struct neon_type_el et = neon_check_type (3, rs,
14643 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14644 unsigned int tmp;
14645
14646 tmp = inst.operands[2].reg;
14647 inst.operands[2].reg = inst.operands[1].reg;
14648 inst.operands[1].reg = tmp;
14649 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14650 }
14651
14652 static int
14653 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
14654 {
14655 /* Handle .I8 pseudo-instructions. */
14656 if (size == 8)
14657 {
14658 /* Unfortunately, this will make everything apart from zero out-of-range.
14659 FIXME is this the intended semantics? There doesn't seem much point in
14660 accepting .I8 if so. */
14661 immediate |= immediate << 8;
14662 size = 16;
14663 }
14664
14665 if (size >= 32)
14666 {
14667 if (immediate == (immediate & 0x000000ff))
14668 {
14669 *immbits = immediate;
14670 return 0x1;
14671 }
14672 else if (immediate == (immediate & 0x0000ff00))
14673 {
14674 *immbits = immediate >> 8;
14675 return 0x3;
14676 }
14677 else if (immediate == (immediate & 0x00ff0000))
14678 {
14679 *immbits = immediate >> 16;
14680 return 0x5;
14681 }
14682 else if (immediate == (immediate & 0xff000000))
14683 {
14684 *immbits = immediate >> 24;
14685 return 0x7;
14686 }
14687 if ((immediate & 0xffff) != (immediate >> 16))
14688 goto bad_immediate;
14689 immediate &= 0xffff;
14690 }
14691
14692 if (immediate == (immediate & 0x000000ff))
14693 {
14694 *immbits = immediate;
14695 return 0x9;
14696 }
14697 else if (immediate == (immediate & 0x0000ff00))
14698 {
14699 *immbits = immediate >> 8;
14700 return 0xb;
14701 }
14702
14703 bad_immediate:
14704 first_error (_("immediate value out of range"));
14705 return FAIL;
14706 }
14707
14708 static void
14709 do_neon_logic (void)
14710 {
14711 if (inst.operands[2].present && inst.operands[2].isreg)
14712 {
14713 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14714 neon_check_type (3, rs, N_IGNORE_TYPE);
14715 /* U bit and size field were set as part of the bitmask. */
14716 NEON_ENCODE (INTEGER, inst);
14717 neon_three_same (neon_quad (rs), 0, -1);
14718 }
14719 else
14720 {
14721 const int three_ops_form = (inst.operands[2].present
14722 && !inst.operands[2].isreg);
14723 const int immoperand = (three_ops_form ? 2 : 1);
14724 enum neon_shape rs = (three_ops_form
14725 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
14726 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
14727 struct neon_type_el et = neon_check_type (2, rs,
14728 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14729 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
14730 unsigned immbits;
14731 int cmode;
14732
14733 if (et.type == NT_invtype)
14734 return;
14735
14736 if (three_ops_form)
14737 constraint (inst.operands[0].reg != inst.operands[1].reg,
14738 _("first and second operands shall be the same register"));
14739
14740 NEON_ENCODE (IMMED, inst);
14741
14742 immbits = inst.operands[immoperand].imm;
14743 if (et.size == 64)
14744 {
14745 /* .i64 is a pseudo-op, so the immediate must be a repeating
14746 pattern. */
14747 if (immbits != (inst.operands[immoperand].regisimm ?
14748 inst.operands[immoperand].reg : 0))
14749 {
14750 /* Set immbits to an invalid constant. */
14751 immbits = 0xdeadbeef;
14752 }
14753 }
14754
14755 switch (opcode)
14756 {
14757 case N_MNEM_vbic:
14758 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14759 break;
14760
14761 case N_MNEM_vorr:
14762 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14763 break;
14764
14765 case N_MNEM_vand:
14766 /* Pseudo-instruction for VBIC. */
14767 neon_invert_size (&immbits, 0, et.size);
14768 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14769 break;
14770
14771 case N_MNEM_vorn:
14772 /* Pseudo-instruction for VORR. */
14773 neon_invert_size (&immbits, 0, et.size);
14774 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14775 break;
14776
14777 default:
14778 abort ();
14779 }
14780
14781 if (cmode == FAIL)
14782 return;
14783
14784 inst.instruction |= neon_quad (rs) << 6;
14785 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14786 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14787 inst.instruction |= cmode << 8;
14788 neon_write_immbits (immbits);
14789
14790 neon_dp_fixup (&inst);
14791 }
14792 }
14793
14794 static void
14795 do_neon_bitfield (void)
14796 {
14797 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14798 neon_check_type (3, rs, N_IGNORE_TYPE);
14799 neon_three_same (neon_quad (rs), 0, -1);
14800 }
14801
14802 static void
14803 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
14804 unsigned destbits)
14805 {
14806 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14807 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
14808 types | N_KEY);
14809 if (et.type == NT_float)
14810 {
14811 NEON_ENCODE (FLOAT, inst);
14812 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
14813 }
14814 else
14815 {
14816 NEON_ENCODE (INTEGER, inst);
14817 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
14818 }
14819 }
14820
14821 static void
14822 do_neon_dyadic_if_su (void)
14823 {
14824 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14825 }
14826
14827 static void
14828 do_neon_dyadic_if_su_d (void)
14829 {
14830 /* This version only allow D registers, but that constraint is enforced during
14831 operand parsing so we don't need to do anything extra here. */
14832 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14833 }
14834
14835 static void
14836 do_neon_dyadic_if_i_d (void)
14837 {
14838 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14839 affected if we specify unsigned args. */
14840 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14841 }
14842
14843 enum vfp_or_neon_is_neon_bits
14844 {
14845 NEON_CHECK_CC = 1,
14846 NEON_CHECK_ARCH = 2,
14847 NEON_CHECK_ARCH8 = 4
14848 };
14849
14850 /* Call this function if an instruction which may have belonged to the VFP or
14851 Neon instruction sets, but turned out to be a Neon instruction (due to the
14852 operand types involved, etc.). We have to check and/or fix-up a couple of
14853 things:
14854
14855 - Make sure the user hasn't attempted to make a Neon instruction
14856 conditional.
14857 - Alter the value in the condition code field if necessary.
14858 - Make sure that the arch supports Neon instructions.
14859
14860 Which of these operations take place depends on bits from enum
14861 vfp_or_neon_is_neon_bits.
14862
14863 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14864 current instruction's condition is COND_ALWAYS, the condition field is
14865 changed to inst.uncond_value. This is necessary because instructions shared
14866 between VFP and Neon may be conditional for the VFP variants only, and the
14867 unconditional Neon version must have, e.g., 0xF in the condition field. */
14868
14869 static int
14870 vfp_or_neon_is_neon (unsigned check)
14871 {
14872 /* Conditions are always legal in Thumb mode (IT blocks). */
14873 if (!thumb_mode && (check & NEON_CHECK_CC))
14874 {
14875 if (inst.cond != COND_ALWAYS)
14876 {
14877 first_error (_(BAD_COND));
14878 return FAIL;
14879 }
14880 if (inst.uncond_value != -1)
14881 inst.instruction |= inst.uncond_value << 28;
14882 }
14883
14884 if ((check & NEON_CHECK_ARCH)
14885 && !mark_feature_used (&fpu_neon_ext_v1))
14886 {
14887 first_error (_(BAD_FPU));
14888 return FAIL;
14889 }
14890
14891 if ((check & NEON_CHECK_ARCH8)
14892 && !mark_feature_used (&fpu_neon_ext_armv8))
14893 {
14894 first_error (_(BAD_FPU));
14895 return FAIL;
14896 }
14897
14898 return SUCCESS;
14899 }
14900
14901 static void
14902 do_neon_addsub_if_i (void)
14903 {
14904 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
14905 return;
14906
14907 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14908 return;
14909
14910 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14911 affected if we specify unsigned args. */
14912 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
14913 }
14914
14915 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14916 result to be:
14917 V<op> A,B (A is operand 0, B is operand 2)
14918 to mean:
14919 V<op> A,B,A
14920 not:
14921 V<op> A,B,B
14922 so handle that case specially. */
14923
14924 static void
14925 neon_exchange_operands (void)
14926 {
14927 if (inst.operands[1].present)
14928 {
14929 void *scratch = xmalloc (sizeof (inst.operands[0]));
14930
14931 /* Swap operands[1] and operands[2]. */
14932 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
14933 inst.operands[1] = inst.operands[2];
14934 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
14935 free (scratch);
14936 }
14937 else
14938 {
14939 inst.operands[1] = inst.operands[2];
14940 inst.operands[2] = inst.operands[0];
14941 }
14942 }
14943
14944 static void
14945 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
14946 {
14947 if (inst.operands[2].isreg)
14948 {
14949 if (invert)
14950 neon_exchange_operands ();
14951 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
14952 }
14953 else
14954 {
14955 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14956 struct neon_type_el et = neon_check_type (2, rs,
14957 N_EQK | N_SIZ, immtypes | N_KEY);
14958
14959 NEON_ENCODE (IMMED, inst);
14960 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14961 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14962 inst.instruction |= LOW4 (inst.operands[1].reg);
14963 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14964 inst.instruction |= neon_quad (rs) << 6;
14965 inst.instruction |= (et.type == NT_float) << 10;
14966 inst.instruction |= neon_logbits (et.size) << 18;
14967
14968 neon_dp_fixup (&inst);
14969 }
14970 }
14971
14972 static void
14973 do_neon_cmp (void)
14974 {
14975 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
14976 }
14977
14978 static void
14979 do_neon_cmp_inv (void)
14980 {
14981 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
14982 }
14983
14984 static void
14985 do_neon_ceq (void)
14986 {
14987 neon_compare (N_IF_32, N_IF_32, FALSE);
14988 }
14989
14990 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14991 scalars, which are encoded in 5 bits, M : Rm.
14992 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14993 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14994 index in M. */
14995
14996 static unsigned
14997 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
14998 {
14999 unsigned regno = NEON_SCALAR_REG (scalar);
15000 unsigned elno = NEON_SCALAR_INDEX (scalar);
15001
15002 switch (elsize)
15003 {
15004 case 16:
15005 if (regno > 7 || elno > 3)
15006 goto bad_scalar;
15007 return regno | (elno << 3);
15008
15009 case 32:
15010 if (regno > 15 || elno > 1)
15011 goto bad_scalar;
15012 return regno | (elno << 4);
15013
15014 default:
15015 bad_scalar:
15016 first_error (_("scalar out of range for multiply instruction"));
15017 }
15018
15019 return 0;
15020 }
15021
15022 /* Encode multiply / multiply-accumulate scalar instructions. */
15023
15024 static void
15025 neon_mul_mac (struct neon_type_el et, int ubit)
15026 {
15027 unsigned scalar;
15028
15029 /* Give a more helpful error message if we have an invalid type. */
15030 if (et.type == NT_invtype)
15031 return;
15032
15033 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
15034 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15035 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15036 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15037 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15038 inst.instruction |= LOW4 (scalar);
15039 inst.instruction |= HI1 (scalar) << 5;
15040 inst.instruction |= (et.type == NT_float) << 8;
15041 inst.instruction |= neon_logbits (et.size) << 20;
15042 inst.instruction |= (ubit != 0) << 24;
15043
15044 neon_dp_fixup (&inst);
15045 }
15046
15047 static void
15048 do_neon_mac_maybe_scalar (void)
15049 {
15050 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
15051 return;
15052
15053 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15054 return;
15055
15056 if (inst.operands[2].isscalar)
15057 {
15058 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15059 struct neon_type_el et = neon_check_type (3, rs,
15060 N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
15061 NEON_ENCODE (SCALAR, inst);
15062 neon_mul_mac (et, neon_quad (rs));
15063 }
15064 else
15065 {
15066 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15067 affected if we specify unsigned args. */
15068 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15069 }
15070 }
15071
15072 static void
15073 do_neon_fmac (void)
15074 {
15075 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
15076 return;
15077
15078 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15079 return;
15080
15081 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15082 }
15083
15084 static void
15085 do_neon_tst (void)
15086 {
15087 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15088 struct neon_type_el et = neon_check_type (3, rs,
15089 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
15090 neon_three_same (neon_quad (rs), 0, et.size);
15091 }
15092
15093 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15094 same types as the MAC equivalents. The polynomial type for this instruction
15095 is encoded the same as the integer type. */
15096
15097 static void
15098 do_neon_mul (void)
15099 {
15100 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
15101 return;
15102
15103 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15104 return;
15105
15106 if (inst.operands[2].isscalar)
15107 do_neon_mac_maybe_scalar ();
15108 else
15109 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
15110 }
15111
15112 static void
15113 do_neon_qdmulh (void)
15114 {
15115 if (inst.operands[2].isscalar)
15116 {
15117 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15118 struct neon_type_el et = neon_check_type (3, rs,
15119 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15120 NEON_ENCODE (SCALAR, inst);
15121 neon_mul_mac (et, neon_quad (rs));
15122 }
15123 else
15124 {
15125 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15126 struct neon_type_el et = neon_check_type (3, rs,
15127 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15128 NEON_ENCODE (INTEGER, inst);
15129 /* The U bit (rounding) comes from bit mask. */
15130 neon_three_same (neon_quad (rs), 0, et.size);
15131 }
15132 }
15133
15134 static void
15135 do_neon_qrdmlah (void)
15136 {
15137 /* Check we're on the correct architecture. */
15138 if (!mark_feature_used (&fpu_neon_ext_armv8))
15139 inst.error =
15140 _("instruction form not available on this architecture.");
15141 else if (!mark_feature_used (&fpu_neon_ext_v8_1))
15142 {
15143 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
15144 record_feature_use (&fpu_neon_ext_v8_1);
15145 }
15146
15147 if (inst.operands[2].isscalar)
15148 {
15149 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15150 struct neon_type_el et = neon_check_type (3, rs,
15151 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15152 NEON_ENCODE (SCALAR, inst);
15153 neon_mul_mac (et, neon_quad (rs));
15154 }
15155 else
15156 {
15157 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15158 struct neon_type_el et = neon_check_type (3, rs,
15159 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15160 NEON_ENCODE (INTEGER, inst);
15161 /* The U bit (rounding) comes from bit mask. */
15162 neon_three_same (neon_quad (rs), 0, et.size);
15163 }
15164 }
15165
15166 static void
15167 do_neon_fcmp_absolute (void)
15168 {
15169 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15170 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15171 N_F_16_32 | N_KEY);
15172 /* Size field comes from bit mask. */
15173 neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
15174 }
15175
15176 static void
15177 do_neon_fcmp_absolute_inv (void)
15178 {
15179 neon_exchange_operands ();
15180 do_neon_fcmp_absolute ();
15181 }
15182
15183 static void
15184 do_neon_step (void)
15185 {
15186 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15187 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15188 N_F_16_32 | N_KEY);
15189 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
15190 }
15191
15192 static void
15193 do_neon_abs_neg (void)
15194 {
15195 enum neon_shape rs;
15196 struct neon_type_el et;
15197
15198 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
15199 return;
15200
15201 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15202 return;
15203
15204 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15205 et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
15206
15207 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15208 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15209 inst.instruction |= LOW4 (inst.operands[1].reg);
15210 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15211 inst.instruction |= neon_quad (rs) << 6;
15212 inst.instruction |= (et.type == NT_float) << 10;
15213 inst.instruction |= neon_logbits (et.size) << 18;
15214
15215 neon_dp_fixup (&inst);
15216 }
15217
15218 static void
15219 do_neon_sli (void)
15220 {
15221 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15222 struct neon_type_el et = neon_check_type (2, rs,
15223 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15224 int imm = inst.operands[2].imm;
15225 constraint (imm < 0 || (unsigned)imm >= et.size,
15226 _("immediate out of range for insert"));
15227 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15228 }
15229
15230 static void
15231 do_neon_sri (void)
15232 {
15233 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15234 struct neon_type_el et = neon_check_type (2, rs,
15235 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15236 int imm = inst.operands[2].imm;
15237 constraint (imm < 1 || (unsigned)imm > et.size,
15238 _("immediate out of range for insert"));
15239 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
15240 }
15241
15242 static void
15243 do_neon_qshlu_imm (void)
15244 {
15245 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15246 struct neon_type_el et = neon_check_type (2, rs,
15247 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
15248 int imm = inst.operands[2].imm;
15249 constraint (imm < 0 || (unsigned)imm >= et.size,
15250 _("immediate out of range for shift"));
15251 /* Only encodes the 'U present' variant of the instruction.
15252 In this case, signed types have OP (bit 8) set to 0.
15253 Unsigned types have OP set to 1. */
15254 inst.instruction |= (et.type == NT_unsigned) << 8;
15255 /* The rest of the bits are the same as other immediate shifts. */
15256 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15257 }
15258
15259 static void
15260 do_neon_qmovn (void)
15261 {
15262 struct neon_type_el et = neon_check_type (2, NS_DQ,
15263 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15264 /* Saturating move where operands can be signed or unsigned, and the
15265 destination has the same signedness. */
15266 NEON_ENCODE (INTEGER, inst);
15267 if (et.type == NT_unsigned)
15268 inst.instruction |= 0xc0;
15269 else
15270 inst.instruction |= 0x80;
15271 neon_two_same (0, 1, et.size / 2);
15272 }
15273
15274 static void
15275 do_neon_qmovun (void)
15276 {
15277 struct neon_type_el et = neon_check_type (2, NS_DQ,
15278 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15279 /* Saturating move with unsigned results. Operands must be signed. */
15280 NEON_ENCODE (INTEGER, inst);
15281 neon_two_same (0, 1, et.size / 2);
15282 }
15283
15284 static void
15285 do_neon_rshift_sat_narrow (void)
15286 {
15287 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15288 or unsigned. If operands are unsigned, results must also be unsigned. */
15289 struct neon_type_el et = neon_check_type (2, NS_DQI,
15290 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15291 int imm = inst.operands[2].imm;
15292 /* This gets the bounds check, size encoding and immediate bits calculation
15293 right. */
15294 et.size /= 2;
15295
15296 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15297 VQMOVN.I<size> <Dd>, <Qm>. */
15298 if (imm == 0)
15299 {
15300 inst.operands[2].present = 0;
15301 inst.instruction = N_MNEM_vqmovn;
15302 do_neon_qmovn ();
15303 return;
15304 }
15305
15306 constraint (imm < 1 || (unsigned)imm > et.size,
15307 _("immediate out of range"));
15308 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
15309 }
15310
15311 static void
15312 do_neon_rshift_sat_narrow_u (void)
15313 {
15314 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15315 or unsigned. If operands are unsigned, results must also be unsigned. */
15316 struct neon_type_el et = neon_check_type (2, NS_DQI,
15317 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15318 int imm = inst.operands[2].imm;
15319 /* This gets the bounds check, size encoding and immediate bits calculation
15320 right. */
15321 et.size /= 2;
15322
15323 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15324 VQMOVUN.I<size> <Dd>, <Qm>. */
15325 if (imm == 0)
15326 {
15327 inst.operands[2].present = 0;
15328 inst.instruction = N_MNEM_vqmovun;
15329 do_neon_qmovun ();
15330 return;
15331 }
15332
15333 constraint (imm < 1 || (unsigned)imm > et.size,
15334 _("immediate out of range"));
15335 /* FIXME: The manual is kind of unclear about what value U should have in
15336 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15337 must be 1. */
15338 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
15339 }
15340
15341 static void
15342 do_neon_movn (void)
15343 {
15344 struct neon_type_el et = neon_check_type (2, NS_DQ,
15345 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15346 NEON_ENCODE (INTEGER, inst);
15347 neon_two_same (0, 1, et.size / 2);
15348 }
15349
15350 static void
15351 do_neon_rshift_narrow (void)
15352 {
15353 struct neon_type_el et = neon_check_type (2, NS_DQI,
15354 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15355 int imm = inst.operands[2].imm;
15356 /* This gets the bounds check, size encoding and immediate bits calculation
15357 right. */
15358 et.size /= 2;
15359
15360 /* If immediate is zero then we are a pseudo-instruction for
15361 VMOVN.I<size> <Dd>, <Qm> */
15362 if (imm == 0)
15363 {
15364 inst.operands[2].present = 0;
15365 inst.instruction = N_MNEM_vmovn;
15366 do_neon_movn ();
15367 return;
15368 }
15369
15370 constraint (imm < 1 || (unsigned)imm > et.size,
15371 _("immediate out of range for narrowing operation"));
15372 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
15373 }
15374
15375 static void
15376 do_neon_shll (void)
15377 {
15378 /* FIXME: Type checking when lengthening. */
15379 struct neon_type_el et = neon_check_type (2, NS_QDI,
15380 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
15381 unsigned imm = inst.operands[2].imm;
15382
15383 if (imm == et.size)
15384 {
15385 /* Maximum shift variant. */
15386 NEON_ENCODE (INTEGER, inst);
15387 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15388 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15389 inst.instruction |= LOW4 (inst.operands[1].reg);
15390 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15391 inst.instruction |= neon_logbits (et.size) << 18;
15392
15393 neon_dp_fixup (&inst);
15394 }
15395 else
15396 {
15397 /* A more-specific type check for non-max versions. */
15398 et = neon_check_type (2, NS_QDI,
15399 N_EQK | N_DBL, N_SU_32 | N_KEY);
15400 NEON_ENCODE (IMMED, inst);
15401 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
15402 }
15403 }
15404
15405 /* Check the various types for the VCVT instruction, and return which version
15406 the current instruction is. */
15407
15408 #define CVT_FLAVOUR_VAR \
15409 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15410 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15411 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15412 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15413 /* Half-precision conversions. */ \
15414 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15415 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15416 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
15417 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
15418 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15419 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15420 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
15421 Compared with single/double precision variants, only the co-processor \
15422 field is different, so the encoding flow is reused here. */ \
15423 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
15424 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
15425 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15426 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15427 /* VFP instructions. */ \
15428 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15429 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15430 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15431 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15432 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15433 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15434 /* VFP instructions with bitshift. */ \
15435 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15436 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15437 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15438 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15439 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15440 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15441 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15442 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15443
15444 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15445 neon_cvt_flavour_##C,
15446
15447 /* The different types of conversions we can do. */
15448 enum neon_cvt_flavour
15449 {
15450 CVT_FLAVOUR_VAR
15451 neon_cvt_flavour_invalid,
15452 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
15453 };
15454
15455 #undef CVT_VAR
15456
15457 static enum neon_cvt_flavour
15458 get_neon_cvt_flavour (enum neon_shape rs)
15459 {
15460 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15461 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15462 if (et.type != NT_invtype) \
15463 { \
15464 inst.error = NULL; \
15465 return (neon_cvt_flavour_##C); \
15466 }
15467
15468 struct neon_type_el et;
15469 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
15470 || rs == NS_FF) ? N_VFP : 0;
15471 /* The instruction versions which take an immediate take one register
15472 argument, which is extended to the width of the full register. Thus the
15473 "source" and "destination" registers must have the same width. Hack that
15474 here by making the size equal to the key (wider, in this case) operand. */
15475 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
15476
15477 CVT_FLAVOUR_VAR;
15478
15479 return neon_cvt_flavour_invalid;
15480 #undef CVT_VAR
15481 }
15482
15483 enum neon_cvt_mode
15484 {
15485 neon_cvt_mode_a,
15486 neon_cvt_mode_n,
15487 neon_cvt_mode_p,
15488 neon_cvt_mode_m,
15489 neon_cvt_mode_z,
15490 neon_cvt_mode_x,
15491 neon_cvt_mode_r
15492 };
15493
15494 /* Neon-syntax VFP conversions. */
15495
15496 static void
15497 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
15498 {
15499 const char *opname = 0;
15500
15501 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
15502 || rs == NS_FHI || rs == NS_HFI)
15503 {
15504 /* Conversions with immediate bitshift. */
15505 const char *enc[] =
15506 {
15507 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15508 CVT_FLAVOUR_VAR
15509 NULL
15510 #undef CVT_VAR
15511 };
15512
15513 if (flavour < (int) ARRAY_SIZE (enc))
15514 {
15515 opname = enc[flavour];
15516 constraint (inst.operands[0].reg != inst.operands[1].reg,
15517 _("operands 0 and 1 must be the same register"));
15518 inst.operands[1] = inst.operands[2];
15519 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
15520 }
15521 }
15522 else
15523 {
15524 /* Conversions without bitshift. */
15525 const char *enc[] =
15526 {
15527 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15528 CVT_FLAVOUR_VAR
15529 NULL
15530 #undef CVT_VAR
15531 };
15532
15533 if (flavour < (int) ARRAY_SIZE (enc))
15534 opname = enc[flavour];
15535 }
15536
15537 if (opname)
15538 do_vfp_nsyn_opcode (opname);
15539
15540 /* ARMv8.2 fp16 VCVT instruction. */
15541 if (flavour == neon_cvt_flavour_s32_f16
15542 || flavour == neon_cvt_flavour_u32_f16
15543 || flavour == neon_cvt_flavour_f16_u32
15544 || flavour == neon_cvt_flavour_f16_s32)
15545 do_scalar_fp16_v82_encode ();
15546 }
15547
15548 static void
15549 do_vfp_nsyn_cvtz (void)
15550 {
15551 enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
15552 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15553 const char *enc[] =
15554 {
15555 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15556 CVT_FLAVOUR_VAR
15557 NULL
15558 #undef CVT_VAR
15559 };
15560
15561 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
15562 do_vfp_nsyn_opcode (enc[flavour]);
15563 }
15564
15565 static void
15566 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
15567 enum neon_cvt_mode mode)
15568 {
15569 int sz, op;
15570 int rm;
15571
15572 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15573 D register operands. */
15574 if (flavour == neon_cvt_flavour_s32_f64
15575 || flavour == neon_cvt_flavour_u32_f64)
15576 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15577 _(BAD_FPU));
15578
15579 if (flavour == neon_cvt_flavour_s32_f16
15580 || flavour == neon_cvt_flavour_u32_f16)
15581 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
15582 _(BAD_FP16));
15583
15584 set_it_insn_type (OUTSIDE_IT_INSN);
15585
15586 switch (flavour)
15587 {
15588 case neon_cvt_flavour_s32_f64:
15589 sz = 1;
15590 op = 1;
15591 break;
15592 case neon_cvt_flavour_s32_f32:
15593 sz = 0;
15594 op = 1;
15595 break;
15596 case neon_cvt_flavour_s32_f16:
15597 sz = 0;
15598 op = 1;
15599 break;
15600 case neon_cvt_flavour_u32_f64:
15601 sz = 1;
15602 op = 0;
15603 break;
15604 case neon_cvt_flavour_u32_f32:
15605 sz = 0;
15606 op = 0;
15607 break;
15608 case neon_cvt_flavour_u32_f16:
15609 sz = 0;
15610 op = 0;
15611 break;
15612 default:
15613 first_error (_("invalid instruction shape"));
15614 return;
15615 }
15616
15617 switch (mode)
15618 {
15619 case neon_cvt_mode_a: rm = 0; break;
15620 case neon_cvt_mode_n: rm = 1; break;
15621 case neon_cvt_mode_p: rm = 2; break;
15622 case neon_cvt_mode_m: rm = 3; break;
15623 default: first_error (_("invalid rounding mode")); return;
15624 }
15625
15626 NEON_ENCODE (FPV8, inst);
15627 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
15628 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
15629 inst.instruction |= sz << 8;
15630
15631 /* ARMv8.2 fp16 VCVT instruction. */
15632 if (flavour == neon_cvt_flavour_s32_f16
15633 ||flavour == neon_cvt_flavour_u32_f16)
15634 do_scalar_fp16_v82_encode ();
15635 inst.instruction |= op << 7;
15636 inst.instruction |= rm << 16;
15637 inst.instruction |= 0xf0000000;
15638 inst.is_neon = TRUE;
15639 }
15640
15641 static void
15642 do_neon_cvt_1 (enum neon_cvt_mode mode)
15643 {
15644 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
15645 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
15646 NS_FH, NS_HF, NS_FHI, NS_HFI,
15647 NS_NULL);
15648 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15649
15650 if (flavour == neon_cvt_flavour_invalid)
15651 return;
15652
15653 /* PR11109: Handle round-to-zero for VCVT conversions. */
15654 if (mode == neon_cvt_mode_z
15655 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
15656 && (flavour == neon_cvt_flavour_s16_f16
15657 || flavour == neon_cvt_flavour_u16_f16
15658 || flavour == neon_cvt_flavour_s32_f32
15659 || flavour == neon_cvt_flavour_u32_f32
15660 || flavour == neon_cvt_flavour_s32_f64
15661 || flavour == neon_cvt_flavour_u32_f64)
15662 && (rs == NS_FD || rs == NS_FF))
15663 {
15664 do_vfp_nsyn_cvtz ();
15665 return;
15666 }
15667
15668 /* ARMv8.2 fp16 VCVT conversions. */
15669 if (mode == neon_cvt_mode_z
15670 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
15671 && (flavour == neon_cvt_flavour_s32_f16
15672 || flavour == neon_cvt_flavour_u32_f16)
15673 && (rs == NS_FH))
15674 {
15675 do_vfp_nsyn_cvtz ();
15676 do_scalar_fp16_v82_encode ();
15677 return;
15678 }
15679
15680 /* VFP rather than Neon conversions. */
15681 if (flavour >= neon_cvt_flavour_first_fp)
15682 {
15683 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15684 do_vfp_nsyn_cvt (rs, flavour);
15685 else
15686 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15687
15688 return;
15689 }
15690
15691 switch (rs)
15692 {
15693 case NS_DDI:
15694 case NS_QQI:
15695 {
15696 unsigned immbits;
15697 unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
15698 0x0000100, 0x1000100, 0x0, 0x1000000};
15699
15700 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15701 return;
15702
15703 /* Fixed-point conversion with #0 immediate is encoded as an
15704 integer conversion. */
15705 if (inst.operands[2].present && inst.operands[2].imm == 0)
15706 goto int_encode;
15707 NEON_ENCODE (IMMED, inst);
15708 if (flavour != neon_cvt_flavour_invalid)
15709 inst.instruction |= enctab[flavour];
15710 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15711 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15712 inst.instruction |= LOW4 (inst.operands[1].reg);
15713 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15714 inst.instruction |= neon_quad (rs) << 6;
15715 inst.instruction |= 1 << 21;
15716 if (flavour < neon_cvt_flavour_s16_f16)
15717 {
15718 inst.instruction |= 1 << 21;
15719 immbits = 32 - inst.operands[2].imm;
15720 inst.instruction |= immbits << 16;
15721 }
15722 else
15723 {
15724 inst.instruction |= 3 << 20;
15725 immbits = 16 - inst.operands[2].imm;
15726 inst.instruction |= immbits << 16;
15727 inst.instruction &= ~(1 << 9);
15728 }
15729
15730 neon_dp_fixup (&inst);
15731 }
15732 break;
15733
15734 case NS_DD:
15735 case NS_QQ:
15736 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
15737 {
15738 NEON_ENCODE (FLOAT, inst);
15739 set_it_insn_type (OUTSIDE_IT_INSN);
15740
15741 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
15742 return;
15743
15744 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15745 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15746 inst.instruction |= LOW4 (inst.operands[1].reg);
15747 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15748 inst.instruction |= neon_quad (rs) << 6;
15749 inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
15750 || flavour == neon_cvt_flavour_u32_f32) << 7;
15751 inst.instruction |= mode << 8;
15752 if (flavour == neon_cvt_flavour_u16_f16
15753 || flavour == neon_cvt_flavour_s16_f16)
15754 /* Mask off the original size bits and reencode them. */
15755 inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
15756
15757 if (thumb_mode)
15758 inst.instruction |= 0xfc000000;
15759 else
15760 inst.instruction |= 0xf0000000;
15761 }
15762 else
15763 {
15764 int_encode:
15765 {
15766 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
15767 0x100, 0x180, 0x0, 0x080};
15768
15769 NEON_ENCODE (INTEGER, inst);
15770
15771 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15772 return;
15773
15774 if (flavour != neon_cvt_flavour_invalid)
15775 inst.instruction |= enctab[flavour];
15776
15777 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15778 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15779 inst.instruction |= LOW4 (inst.operands[1].reg);
15780 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15781 inst.instruction |= neon_quad (rs) << 6;
15782 if (flavour >= neon_cvt_flavour_s16_f16
15783 && flavour <= neon_cvt_flavour_f16_u16)
15784 /* Half precision. */
15785 inst.instruction |= 1 << 18;
15786 else
15787 inst.instruction |= 2 << 18;
15788
15789 neon_dp_fixup (&inst);
15790 }
15791 }
15792 break;
15793
15794 /* Half-precision conversions for Advanced SIMD -- neon. */
15795 case NS_QD:
15796 case NS_DQ:
15797
15798 if ((rs == NS_DQ)
15799 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
15800 {
15801 as_bad (_("operand size must match register width"));
15802 break;
15803 }
15804
15805 if ((rs == NS_QD)
15806 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
15807 {
15808 as_bad (_("operand size must match register width"));
15809 break;
15810 }
15811
15812 if (rs == NS_DQ)
15813 inst.instruction = 0x3b60600;
15814 else
15815 inst.instruction = 0x3b60700;
15816
15817 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15818 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15819 inst.instruction |= LOW4 (inst.operands[1].reg);
15820 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15821 neon_dp_fixup (&inst);
15822 break;
15823
15824 default:
15825 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15826 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15827 do_vfp_nsyn_cvt (rs, flavour);
15828 else
15829 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15830 }
15831 }
15832
15833 static void
15834 do_neon_cvtr (void)
15835 {
15836 do_neon_cvt_1 (neon_cvt_mode_x);
15837 }
15838
15839 static void
15840 do_neon_cvt (void)
15841 {
15842 do_neon_cvt_1 (neon_cvt_mode_z);
15843 }
15844
15845 static void
15846 do_neon_cvta (void)
15847 {
15848 do_neon_cvt_1 (neon_cvt_mode_a);
15849 }
15850
15851 static void
15852 do_neon_cvtn (void)
15853 {
15854 do_neon_cvt_1 (neon_cvt_mode_n);
15855 }
15856
15857 static void
15858 do_neon_cvtp (void)
15859 {
15860 do_neon_cvt_1 (neon_cvt_mode_p);
15861 }
15862
15863 static void
15864 do_neon_cvtm (void)
15865 {
15866 do_neon_cvt_1 (neon_cvt_mode_m);
15867 }
15868
15869 static void
15870 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
15871 {
15872 if (is_double)
15873 mark_feature_used (&fpu_vfp_ext_armv8);
15874
15875 encode_arm_vfp_reg (inst.operands[0].reg,
15876 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
15877 encode_arm_vfp_reg (inst.operands[1].reg,
15878 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
15879 inst.instruction |= to ? 0x10000 : 0;
15880 inst.instruction |= t ? 0x80 : 0;
15881 inst.instruction |= is_double ? 0x100 : 0;
15882 do_vfp_cond_or_thumb ();
15883 }
15884
15885 static void
15886 do_neon_cvttb_1 (bfd_boolean t)
15887 {
15888 enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
15889 NS_DF, NS_DH, NS_NULL);
15890
15891 if (rs == NS_NULL)
15892 return;
15893 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
15894 {
15895 inst.error = NULL;
15896 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
15897 }
15898 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
15899 {
15900 inst.error = NULL;
15901 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
15902 }
15903 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
15904 {
15905 /* The VCVTB and VCVTT instructions with D-register operands
15906 don't work for SP only targets. */
15907 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15908 _(BAD_FPU));
15909
15910 inst.error = NULL;
15911 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
15912 }
15913 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
15914 {
15915 /* The VCVTB and VCVTT instructions with D-register operands
15916 don't work for SP only targets. */
15917 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15918 _(BAD_FPU));
15919
15920 inst.error = NULL;
15921 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
15922 }
15923 else
15924 return;
15925 }
15926
15927 static void
15928 do_neon_cvtb (void)
15929 {
15930 do_neon_cvttb_1 (FALSE);
15931 }
15932
15933
15934 static void
15935 do_neon_cvtt (void)
15936 {
15937 do_neon_cvttb_1 (TRUE);
15938 }
15939
15940 static void
15941 neon_move_immediate (void)
15942 {
15943 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
15944 struct neon_type_el et = neon_check_type (2, rs,
15945 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15946 unsigned immlo, immhi = 0, immbits;
15947 int op, cmode, float_p;
15948
15949 constraint (et.type == NT_invtype,
15950 _("operand size must be specified for immediate VMOV"));
15951
15952 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15953 op = (inst.instruction & (1 << 5)) != 0;
15954
15955 immlo = inst.operands[1].imm;
15956 if (inst.operands[1].regisimm)
15957 immhi = inst.operands[1].reg;
15958
15959 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
15960 _("immediate has bits set outside the operand size"));
15961
15962 float_p = inst.operands[1].immisfloat;
15963
15964 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
15965 et.size, et.type)) == FAIL)
15966 {
15967 /* Invert relevant bits only. */
15968 neon_invert_size (&immlo, &immhi, et.size);
15969 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15970 with one or the other; those cases are caught by
15971 neon_cmode_for_move_imm. */
15972 op = !op;
15973 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
15974 &op, et.size, et.type)) == FAIL)
15975 {
15976 first_error (_("immediate out of range"));
15977 return;
15978 }
15979 }
15980
15981 inst.instruction &= ~(1 << 5);
15982 inst.instruction |= op << 5;
15983
15984 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15985 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15986 inst.instruction |= neon_quad (rs) << 6;
15987 inst.instruction |= cmode << 8;
15988
15989 neon_write_immbits (immbits);
15990 }
15991
15992 static void
15993 do_neon_mvn (void)
15994 {
15995 if (inst.operands[1].isreg)
15996 {
15997 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15998
15999 NEON_ENCODE (INTEGER, inst);
16000 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16001 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16002 inst.instruction |= LOW4 (inst.operands[1].reg);
16003 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16004 inst.instruction |= neon_quad (rs) << 6;
16005 }
16006 else
16007 {
16008 NEON_ENCODE (IMMED, inst);
16009 neon_move_immediate ();
16010 }
16011
16012 neon_dp_fixup (&inst);
16013 }
16014
16015 /* Encode instructions of form:
16016
16017 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16018 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
16019
16020 static void
16021 neon_mixed_length (struct neon_type_el et, unsigned size)
16022 {
16023 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16024 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16025 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16026 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16027 inst.instruction |= LOW4 (inst.operands[2].reg);
16028 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16029 inst.instruction |= (et.type == NT_unsigned) << 24;
16030 inst.instruction |= neon_logbits (size) << 20;
16031
16032 neon_dp_fixup (&inst);
16033 }
16034
16035 static void
16036 do_neon_dyadic_long (void)
16037 {
16038 /* FIXME: Type checking for lengthening op. */
16039 struct neon_type_el et = neon_check_type (3, NS_QDD,
16040 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
16041 neon_mixed_length (et, et.size);
16042 }
16043
16044 static void
16045 do_neon_abal (void)
16046 {
16047 struct neon_type_el et = neon_check_type (3, NS_QDD,
16048 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
16049 neon_mixed_length (et, et.size);
16050 }
16051
16052 static void
16053 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
16054 {
16055 if (inst.operands[2].isscalar)
16056 {
16057 struct neon_type_el et = neon_check_type (3, NS_QDS,
16058 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
16059 NEON_ENCODE (SCALAR, inst);
16060 neon_mul_mac (et, et.type == NT_unsigned);
16061 }
16062 else
16063 {
16064 struct neon_type_el et = neon_check_type (3, NS_QDD,
16065 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
16066 NEON_ENCODE (INTEGER, inst);
16067 neon_mixed_length (et, et.size);
16068 }
16069 }
16070
16071 static void
16072 do_neon_mac_maybe_scalar_long (void)
16073 {
16074 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
16075 }
16076
16077 static void
16078 do_neon_dyadic_wide (void)
16079 {
16080 struct neon_type_el et = neon_check_type (3, NS_QQD,
16081 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
16082 neon_mixed_length (et, et.size);
16083 }
16084
16085 static void
16086 do_neon_dyadic_narrow (void)
16087 {
16088 struct neon_type_el et = neon_check_type (3, NS_QDD,
16089 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
16090 /* Operand sign is unimportant, and the U bit is part of the opcode,
16091 so force the operand type to integer. */
16092 et.type = NT_integer;
16093 neon_mixed_length (et, et.size / 2);
16094 }
16095
16096 static void
16097 do_neon_mul_sat_scalar_long (void)
16098 {
16099 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
16100 }
16101
16102 static void
16103 do_neon_vmull (void)
16104 {
16105 if (inst.operands[2].isscalar)
16106 do_neon_mac_maybe_scalar_long ();
16107 else
16108 {
16109 struct neon_type_el et = neon_check_type (3, NS_QDD,
16110 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
16111
16112 if (et.type == NT_poly)
16113 NEON_ENCODE (POLY, inst);
16114 else
16115 NEON_ENCODE (INTEGER, inst);
16116
16117 /* For polynomial encoding the U bit must be zero, and the size must
16118 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
16119 obviously, as 0b10). */
16120 if (et.size == 64)
16121 {
16122 /* Check we're on the correct architecture. */
16123 if (!mark_feature_used (&fpu_crypto_ext_armv8))
16124 inst.error =
16125 _("Instruction form not available on this architecture.");
16126
16127 et.size = 32;
16128 }
16129
16130 neon_mixed_length (et, et.size);
16131 }
16132 }
16133
16134 static void
16135 do_neon_ext (void)
16136 {
16137 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
16138 struct neon_type_el et = neon_check_type (3, rs,
16139 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16140 unsigned imm = (inst.operands[3].imm * et.size) / 8;
16141
16142 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
16143 _("shift out of range"));
16144 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16145 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16146 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16147 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16148 inst.instruction |= LOW4 (inst.operands[2].reg);
16149 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16150 inst.instruction |= neon_quad (rs) << 6;
16151 inst.instruction |= imm << 8;
16152
16153 neon_dp_fixup (&inst);
16154 }
16155
16156 static void
16157 do_neon_rev (void)
16158 {
16159 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16160 struct neon_type_el et = neon_check_type (2, rs,
16161 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16162 unsigned op = (inst.instruction >> 7) & 3;
16163 /* N (width of reversed regions) is encoded as part of the bitmask. We
16164 extract it here to check the elements to be reversed are smaller.
16165 Otherwise we'd get a reserved instruction. */
16166 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
16167 gas_assert (elsize != 0);
16168 constraint (et.size >= elsize,
16169 _("elements must be smaller than reversal region"));
16170 neon_two_same (neon_quad (rs), 1, et.size);
16171 }
16172
16173 static void
16174 do_neon_dup (void)
16175 {
16176 if (inst.operands[1].isscalar)
16177 {
16178 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
16179 struct neon_type_el et = neon_check_type (2, rs,
16180 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16181 unsigned sizebits = et.size >> 3;
16182 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
16183 int logsize = neon_logbits (et.size);
16184 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
16185
16186 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
16187 return;
16188
16189 NEON_ENCODE (SCALAR, inst);
16190 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16191 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16192 inst.instruction |= LOW4 (dm);
16193 inst.instruction |= HI1 (dm) << 5;
16194 inst.instruction |= neon_quad (rs) << 6;
16195 inst.instruction |= x << 17;
16196 inst.instruction |= sizebits << 16;
16197
16198 neon_dp_fixup (&inst);
16199 }
16200 else
16201 {
16202 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
16203 struct neon_type_el et = neon_check_type (2, rs,
16204 N_8 | N_16 | N_32 | N_KEY, N_EQK);
16205 /* Duplicate ARM register to lanes of vector. */
16206 NEON_ENCODE (ARMREG, inst);
16207 switch (et.size)
16208 {
16209 case 8: inst.instruction |= 0x400000; break;
16210 case 16: inst.instruction |= 0x000020; break;
16211 case 32: inst.instruction |= 0x000000; break;
16212 default: break;
16213 }
16214 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16215 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
16216 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
16217 inst.instruction |= neon_quad (rs) << 21;
16218 /* The encoding for this instruction is identical for the ARM and Thumb
16219 variants, except for the condition field. */
16220 do_vfp_cond_or_thumb ();
16221 }
16222 }
16223
16224 /* VMOV has particularly many variations. It can be one of:
16225 0. VMOV<c><q> <Qd>, <Qm>
16226 1. VMOV<c><q> <Dd>, <Dm>
16227 (Register operations, which are VORR with Rm = Rn.)
16228 2. VMOV<c><q>.<dt> <Qd>, #<imm>
16229 3. VMOV<c><q>.<dt> <Dd>, #<imm>
16230 (Immediate loads.)
16231 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16232 (ARM register to scalar.)
16233 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16234 (Two ARM registers to vector.)
16235 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16236 (Scalar to ARM register.)
16237 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16238 (Vector to two ARM registers.)
16239 8. VMOV.F32 <Sd>, <Sm>
16240 9. VMOV.F64 <Dd>, <Dm>
16241 (VFP register moves.)
16242 10. VMOV.F32 <Sd>, #imm
16243 11. VMOV.F64 <Dd>, #imm
16244 (VFP float immediate load.)
16245 12. VMOV <Rd>, <Sm>
16246 (VFP single to ARM reg.)
16247 13. VMOV <Sd>, <Rm>
16248 (ARM reg to VFP single.)
16249 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16250 (Two ARM regs to two VFP singles.)
16251 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16252 (Two VFP singles to two ARM regs.)
16253
16254 These cases can be disambiguated using neon_select_shape, except cases 1/9
16255 and 3/11 which depend on the operand type too.
16256
16257 All the encoded bits are hardcoded by this function.
16258
16259 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16260 Cases 5, 7 may be used with VFPv2 and above.
16261
16262 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16263 can specify a type where it doesn't make sense to, and is ignored). */
16264
16265 static void
16266 do_neon_mov (void)
16267 {
16268 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
16269 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR,
16270 NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
16271 NS_HR, NS_RH, NS_HI, NS_NULL);
16272 struct neon_type_el et;
16273 const char *ldconst = 0;
16274
16275 switch (rs)
16276 {
16277 case NS_DD: /* case 1/9. */
16278 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16279 /* It is not an error here if no type is given. */
16280 inst.error = NULL;
16281 if (et.type == NT_float && et.size == 64)
16282 {
16283 do_vfp_nsyn_opcode ("fcpyd");
16284 break;
16285 }
16286 /* fall through. */
16287
16288 case NS_QQ: /* case 0/1. */
16289 {
16290 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16291 return;
16292 /* The architecture manual I have doesn't explicitly state which
16293 value the U bit should have for register->register moves, but
16294 the equivalent VORR instruction has U = 0, so do that. */
16295 inst.instruction = 0x0200110;
16296 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16297 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16298 inst.instruction |= LOW4 (inst.operands[1].reg);
16299 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16300 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16301 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16302 inst.instruction |= neon_quad (rs) << 6;
16303
16304 neon_dp_fixup (&inst);
16305 }
16306 break;
16307
16308 case NS_DI: /* case 3/11. */
16309 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16310 inst.error = NULL;
16311 if (et.type == NT_float && et.size == 64)
16312 {
16313 /* case 11 (fconstd). */
16314 ldconst = "fconstd";
16315 goto encode_fconstd;
16316 }
16317 /* fall through. */
16318
16319 case NS_QI: /* case 2/3. */
16320 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16321 return;
16322 inst.instruction = 0x0800010;
16323 neon_move_immediate ();
16324 neon_dp_fixup (&inst);
16325 break;
16326
16327 case NS_SR: /* case 4. */
16328 {
16329 unsigned bcdebits = 0;
16330 int logsize;
16331 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
16332 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
16333
16334 /* .<size> is optional here, defaulting to .32. */
16335 if (inst.vectype.elems == 0
16336 && inst.operands[0].vectype.type == NT_invtype
16337 && inst.operands[1].vectype.type == NT_invtype)
16338 {
16339 inst.vectype.el[0].type = NT_untyped;
16340 inst.vectype.el[0].size = 32;
16341 inst.vectype.elems = 1;
16342 }
16343
16344 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
16345 logsize = neon_logbits (et.size);
16346
16347 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16348 _(BAD_FPU));
16349 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16350 && et.size != 32, _(BAD_FPU));
16351 constraint (et.type == NT_invtype, _("bad type for scalar"));
16352 constraint (x >= 64 / et.size, _("scalar index out of range"));
16353
16354 switch (et.size)
16355 {
16356 case 8: bcdebits = 0x8; break;
16357 case 16: bcdebits = 0x1; break;
16358 case 32: bcdebits = 0x0; break;
16359 default: ;
16360 }
16361
16362 bcdebits |= x << logsize;
16363
16364 inst.instruction = 0xe000b10;
16365 do_vfp_cond_or_thumb ();
16366 inst.instruction |= LOW4 (dn) << 16;
16367 inst.instruction |= HI1 (dn) << 7;
16368 inst.instruction |= inst.operands[1].reg << 12;
16369 inst.instruction |= (bcdebits & 3) << 5;
16370 inst.instruction |= (bcdebits >> 2) << 21;
16371 }
16372 break;
16373
16374 case NS_DRR: /* case 5 (fmdrr). */
16375 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16376 _(BAD_FPU));
16377
16378 inst.instruction = 0xc400b10;
16379 do_vfp_cond_or_thumb ();
16380 inst.instruction |= LOW4 (inst.operands[0].reg);
16381 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
16382 inst.instruction |= inst.operands[1].reg << 12;
16383 inst.instruction |= inst.operands[2].reg << 16;
16384 break;
16385
16386 case NS_RS: /* case 6. */
16387 {
16388 unsigned logsize;
16389 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
16390 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
16391 unsigned abcdebits = 0;
16392
16393 /* .<dt> is optional here, defaulting to .32. */
16394 if (inst.vectype.elems == 0
16395 && inst.operands[0].vectype.type == NT_invtype
16396 && inst.operands[1].vectype.type == NT_invtype)
16397 {
16398 inst.vectype.el[0].type = NT_untyped;
16399 inst.vectype.el[0].size = 32;
16400 inst.vectype.elems = 1;
16401 }
16402
16403 et = neon_check_type (2, NS_NULL,
16404 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
16405 logsize = neon_logbits (et.size);
16406
16407 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16408 _(BAD_FPU));
16409 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16410 && et.size != 32, _(BAD_FPU));
16411 constraint (et.type == NT_invtype, _("bad type for scalar"));
16412 constraint (x >= 64 / et.size, _("scalar index out of range"));
16413
16414 switch (et.size)
16415 {
16416 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
16417 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
16418 case 32: abcdebits = 0x00; break;
16419 default: ;
16420 }
16421
16422 abcdebits |= x << logsize;
16423 inst.instruction = 0xe100b10;
16424 do_vfp_cond_or_thumb ();
16425 inst.instruction |= LOW4 (dn) << 16;
16426 inst.instruction |= HI1 (dn) << 7;
16427 inst.instruction |= inst.operands[0].reg << 12;
16428 inst.instruction |= (abcdebits & 3) << 5;
16429 inst.instruction |= (abcdebits >> 2) << 21;
16430 }
16431 break;
16432
16433 case NS_RRD: /* case 7 (fmrrd). */
16434 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16435 _(BAD_FPU));
16436
16437 inst.instruction = 0xc500b10;
16438 do_vfp_cond_or_thumb ();
16439 inst.instruction |= inst.operands[0].reg << 12;
16440 inst.instruction |= inst.operands[1].reg << 16;
16441 inst.instruction |= LOW4 (inst.operands[2].reg);
16442 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16443 break;
16444
16445 case NS_FF: /* case 8 (fcpys). */
16446 do_vfp_nsyn_opcode ("fcpys");
16447 break;
16448
16449 case NS_HI:
16450 case NS_FI: /* case 10 (fconsts). */
16451 ldconst = "fconsts";
16452 encode_fconstd:
16453 if (is_quarter_float (inst.operands[1].imm))
16454 {
16455 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
16456 do_vfp_nsyn_opcode (ldconst);
16457
16458 /* ARMv8.2 fp16 vmov.f16 instruction. */
16459 if (rs == NS_HI)
16460 do_scalar_fp16_v82_encode ();
16461 }
16462 else
16463 first_error (_("immediate out of range"));
16464 break;
16465
16466 case NS_RH:
16467 case NS_RF: /* case 12 (fmrs). */
16468 do_vfp_nsyn_opcode ("fmrs");
16469 /* ARMv8.2 fp16 vmov.f16 instruction. */
16470 if (rs == NS_RH)
16471 do_scalar_fp16_v82_encode ();
16472 break;
16473
16474 case NS_HR:
16475 case NS_FR: /* case 13 (fmsr). */
16476 do_vfp_nsyn_opcode ("fmsr");
16477 /* ARMv8.2 fp16 vmov.f16 instruction. */
16478 if (rs == NS_HR)
16479 do_scalar_fp16_v82_encode ();
16480 break;
16481
16482 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16483 (one of which is a list), but we have parsed four. Do some fiddling to
16484 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16485 expect. */
16486 case NS_RRFF: /* case 14 (fmrrs). */
16487 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
16488 _("VFP registers must be adjacent"));
16489 inst.operands[2].imm = 2;
16490 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16491 do_vfp_nsyn_opcode ("fmrrs");
16492 break;
16493
16494 case NS_FFRR: /* case 15 (fmsrr). */
16495 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
16496 _("VFP registers must be adjacent"));
16497 inst.operands[1] = inst.operands[2];
16498 inst.operands[2] = inst.operands[3];
16499 inst.operands[0].imm = 2;
16500 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16501 do_vfp_nsyn_opcode ("fmsrr");
16502 break;
16503
16504 case NS_NULL:
16505 /* neon_select_shape has determined that the instruction
16506 shape is wrong and has already set the error message. */
16507 break;
16508
16509 default:
16510 abort ();
16511 }
16512 }
16513
16514 static void
16515 do_neon_rshift_round_imm (void)
16516 {
16517 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16518 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
16519 int imm = inst.operands[2].imm;
16520
16521 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16522 if (imm == 0)
16523 {
16524 inst.operands[2].present = 0;
16525 do_neon_mov ();
16526 return;
16527 }
16528
16529 constraint (imm < 1 || (unsigned)imm > et.size,
16530 _("immediate out of range for shift"));
16531 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
16532 et.size - imm);
16533 }
16534
16535 static void
16536 do_neon_movhf (void)
16537 {
16538 enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
16539 constraint (rs != NS_HH, _("invalid suffix"));
16540
16541 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16542 _(BAD_FPU));
16543
16544 do_vfp_sp_monadic ();
16545
16546 inst.is_neon = 1;
16547 inst.instruction |= 0xf0000000;
16548 }
16549
16550 static void
16551 do_neon_movl (void)
16552 {
16553 struct neon_type_el et = neon_check_type (2, NS_QD,
16554 N_EQK | N_DBL, N_SU_32 | N_KEY);
16555 unsigned sizebits = et.size >> 3;
16556 inst.instruction |= sizebits << 19;
16557 neon_two_same (0, et.type == NT_unsigned, -1);
16558 }
16559
16560 static void
16561 do_neon_trn (void)
16562 {
16563 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16564 struct neon_type_el et = neon_check_type (2, rs,
16565 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16566 NEON_ENCODE (INTEGER, inst);
16567 neon_two_same (neon_quad (rs), 1, et.size);
16568 }
16569
16570 static void
16571 do_neon_zip_uzp (void)
16572 {
16573 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16574 struct neon_type_el et = neon_check_type (2, rs,
16575 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16576 if (rs == NS_DD && et.size == 32)
16577 {
16578 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16579 inst.instruction = N_MNEM_vtrn;
16580 do_neon_trn ();
16581 return;
16582 }
16583 neon_two_same (neon_quad (rs), 1, et.size);
16584 }
16585
16586 static void
16587 do_neon_sat_abs_neg (void)
16588 {
16589 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16590 struct neon_type_el et = neon_check_type (2, rs,
16591 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16592 neon_two_same (neon_quad (rs), 1, et.size);
16593 }
16594
16595 static void
16596 do_neon_pair_long (void)
16597 {
16598 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16599 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
16600 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16601 inst.instruction |= (et.type == NT_unsigned) << 7;
16602 neon_two_same (neon_quad (rs), 1, et.size);
16603 }
16604
16605 static void
16606 do_neon_recip_est (void)
16607 {
16608 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16609 struct neon_type_el et = neon_check_type (2, rs,
16610 N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
16611 inst.instruction |= (et.type == NT_float) << 8;
16612 neon_two_same (neon_quad (rs), 1, et.size);
16613 }
16614
16615 static void
16616 do_neon_cls (void)
16617 {
16618 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16619 struct neon_type_el et = neon_check_type (2, rs,
16620 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16621 neon_two_same (neon_quad (rs), 1, et.size);
16622 }
16623
16624 static void
16625 do_neon_clz (void)
16626 {
16627 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16628 struct neon_type_el et = neon_check_type (2, rs,
16629 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
16630 neon_two_same (neon_quad (rs), 1, et.size);
16631 }
16632
16633 static void
16634 do_neon_cnt (void)
16635 {
16636 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16637 struct neon_type_el et = neon_check_type (2, rs,
16638 N_EQK | N_INT, N_8 | N_KEY);
16639 neon_two_same (neon_quad (rs), 1, et.size);
16640 }
16641
16642 static void
16643 do_neon_swp (void)
16644 {
16645 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16646 neon_two_same (neon_quad (rs), 1, -1);
16647 }
16648
16649 static void
16650 do_neon_tbl_tbx (void)
16651 {
16652 unsigned listlenbits;
16653 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
16654
16655 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
16656 {
16657 first_error (_("bad list length for table lookup"));
16658 return;
16659 }
16660
16661 listlenbits = inst.operands[1].imm - 1;
16662 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16663 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16664 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16665 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16666 inst.instruction |= LOW4 (inst.operands[2].reg);
16667 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16668 inst.instruction |= listlenbits << 8;
16669
16670 neon_dp_fixup (&inst);
16671 }
16672
16673 static void
16674 do_neon_ldm_stm (void)
16675 {
16676 /* P, U and L bits are part of bitmask. */
16677 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
16678 unsigned offsetbits = inst.operands[1].imm * 2;
16679
16680 if (inst.operands[1].issingle)
16681 {
16682 do_vfp_nsyn_ldm_stm (is_dbmode);
16683 return;
16684 }
16685
16686 constraint (is_dbmode && !inst.operands[0].writeback,
16687 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16688
16689 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16690 _("register list must contain at least 1 and at most 16 "
16691 "registers"));
16692
16693 inst.instruction |= inst.operands[0].reg << 16;
16694 inst.instruction |= inst.operands[0].writeback << 21;
16695 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16696 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
16697
16698 inst.instruction |= offsetbits;
16699
16700 do_vfp_cond_or_thumb ();
16701 }
16702
16703 static void
16704 do_neon_ldr_str (void)
16705 {
16706 int is_ldr = (inst.instruction & (1 << 20)) != 0;
16707
16708 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16709 And is UNPREDICTABLE in thumb mode. */
16710 if (!is_ldr
16711 && inst.operands[1].reg == REG_PC
16712 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
16713 {
16714 if (thumb_mode)
16715 inst.error = _("Use of PC here is UNPREDICTABLE");
16716 else if (warn_on_deprecated)
16717 as_tsktsk (_("Use of PC here is deprecated"));
16718 }
16719
16720 if (inst.operands[0].issingle)
16721 {
16722 if (is_ldr)
16723 do_vfp_nsyn_opcode ("flds");
16724 else
16725 do_vfp_nsyn_opcode ("fsts");
16726
16727 /* ARMv8.2 vldr.16/vstr.16 instruction. */
16728 if (inst.vectype.el[0].size == 16)
16729 do_scalar_fp16_v82_encode ();
16730 }
16731 else
16732 {
16733 if (is_ldr)
16734 do_vfp_nsyn_opcode ("fldd");
16735 else
16736 do_vfp_nsyn_opcode ("fstd");
16737 }
16738 }
16739
16740 /* "interleave" version also handles non-interleaving register VLD1/VST1
16741 instructions. */
16742
16743 static void
16744 do_neon_ld_st_interleave (void)
16745 {
16746 struct neon_type_el et = neon_check_type (1, NS_NULL,
16747 N_8 | N_16 | N_32 | N_64);
16748 unsigned alignbits = 0;
16749 unsigned idx;
16750 /* The bits in this table go:
16751 0: register stride of one (0) or two (1)
16752 1,2: register list length, minus one (1, 2, 3, 4).
16753 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16754 We use -1 for invalid entries. */
16755 const int typetable[] =
16756 {
16757 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16758 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16759 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16760 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16761 };
16762 int typebits;
16763
16764 if (et.type == NT_invtype)
16765 return;
16766
16767 if (inst.operands[1].immisalign)
16768 switch (inst.operands[1].imm >> 8)
16769 {
16770 case 64: alignbits = 1; break;
16771 case 128:
16772 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
16773 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16774 goto bad_alignment;
16775 alignbits = 2;
16776 break;
16777 case 256:
16778 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16779 goto bad_alignment;
16780 alignbits = 3;
16781 break;
16782 default:
16783 bad_alignment:
16784 first_error (_("bad alignment"));
16785 return;
16786 }
16787
16788 inst.instruction |= alignbits << 4;
16789 inst.instruction |= neon_logbits (et.size) << 6;
16790
16791 /* Bits [4:6] of the immediate in a list specifier encode register stride
16792 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16793 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16794 up the right value for "type" in a table based on this value and the given
16795 list style, then stick it back. */
16796 idx = ((inst.operands[0].imm >> 4) & 7)
16797 | (((inst.instruction >> 8) & 3) << 3);
16798
16799 typebits = typetable[idx];
16800
16801 constraint (typebits == -1, _("bad list type for instruction"));
16802 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
16803 _("bad element type for instruction"));
16804
16805 inst.instruction &= ~0xf00;
16806 inst.instruction |= typebits << 8;
16807 }
16808
16809 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16810 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16811 otherwise. The variable arguments are a list of pairs of legal (size, align)
16812 values, terminated with -1. */
16813
16814 static int
16815 neon_alignment_bit (int size, int align, int *do_alignment, ...)
16816 {
16817 va_list ap;
16818 int result = FAIL, thissize, thisalign;
16819
16820 if (!inst.operands[1].immisalign)
16821 {
16822 *do_alignment = 0;
16823 return SUCCESS;
16824 }
16825
16826 va_start (ap, do_alignment);
16827
16828 do
16829 {
16830 thissize = va_arg (ap, int);
16831 if (thissize == -1)
16832 break;
16833 thisalign = va_arg (ap, int);
16834
16835 if (size == thissize && align == thisalign)
16836 result = SUCCESS;
16837 }
16838 while (result != SUCCESS);
16839
16840 va_end (ap);
16841
16842 if (result == SUCCESS)
16843 *do_alignment = 1;
16844 else
16845 first_error (_("unsupported alignment for instruction"));
16846
16847 return result;
16848 }
16849
16850 static void
16851 do_neon_ld_st_lane (void)
16852 {
16853 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16854 int align_good, do_alignment = 0;
16855 int logsize = neon_logbits (et.size);
16856 int align = inst.operands[1].imm >> 8;
16857 int n = (inst.instruction >> 8) & 3;
16858 int max_el = 64 / et.size;
16859
16860 if (et.type == NT_invtype)
16861 return;
16862
16863 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
16864 _("bad list length"));
16865 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
16866 _("scalar index out of range"));
16867 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
16868 && et.size == 8,
16869 _("stride of 2 unavailable when element size is 8"));
16870
16871 switch (n)
16872 {
16873 case 0: /* VLD1 / VST1. */
16874 align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
16875 32, 32, -1);
16876 if (align_good == FAIL)
16877 return;
16878 if (do_alignment)
16879 {
16880 unsigned alignbits = 0;
16881 switch (et.size)
16882 {
16883 case 16: alignbits = 0x1; break;
16884 case 32: alignbits = 0x3; break;
16885 default: ;
16886 }
16887 inst.instruction |= alignbits << 4;
16888 }
16889 break;
16890
16891 case 1: /* VLD2 / VST2. */
16892 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
16893 16, 32, 32, 64, -1);
16894 if (align_good == FAIL)
16895 return;
16896 if (do_alignment)
16897 inst.instruction |= 1 << 4;
16898 break;
16899
16900 case 2: /* VLD3 / VST3. */
16901 constraint (inst.operands[1].immisalign,
16902 _("can't use alignment with this instruction"));
16903 break;
16904
16905 case 3: /* VLD4 / VST4. */
16906 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
16907 16, 64, 32, 64, 32, 128, -1);
16908 if (align_good == FAIL)
16909 return;
16910 if (do_alignment)
16911 {
16912 unsigned alignbits = 0;
16913 switch (et.size)
16914 {
16915 case 8: alignbits = 0x1; break;
16916 case 16: alignbits = 0x1; break;
16917 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
16918 default: ;
16919 }
16920 inst.instruction |= alignbits << 4;
16921 }
16922 break;
16923
16924 default: ;
16925 }
16926
16927 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16928 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16929 inst.instruction |= 1 << (4 + logsize);
16930
16931 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
16932 inst.instruction |= logsize << 10;
16933 }
16934
16935 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16936
16937 static void
16938 do_neon_ld_dup (void)
16939 {
16940 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16941 int align_good, do_alignment = 0;
16942
16943 if (et.type == NT_invtype)
16944 return;
16945
16946 switch ((inst.instruction >> 8) & 3)
16947 {
16948 case 0: /* VLD1. */
16949 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
16950 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16951 &do_alignment, 16, 16, 32, 32, -1);
16952 if (align_good == FAIL)
16953 return;
16954 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
16955 {
16956 case 1: break;
16957 case 2: inst.instruction |= 1 << 5; break;
16958 default: first_error (_("bad list length")); return;
16959 }
16960 inst.instruction |= neon_logbits (et.size) << 6;
16961 break;
16962
16963 case 1: /* VLD2. */
16964 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16965 &do_alignment, 8, 16, 16, 32, 32, 64,
16966 -1);
16967 if (align_good == FAIL)
16968 return;
16969 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
16970 _("bad list length"));
16971 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16972 inst.instruction |= 1 << 5;
16973 inst.instruction |= neon_logbits (et.size) << 6;
16974 break;
16975
16976 case 2: /* VLD3. */
16977 constraint (inst.operands[1].immisalign,
16978 _("can't use alignment with this instruction"));
16979 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
16980 _("bad list length"));
16981 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16982 inst.instruction |= 1 << 5;
16983 inst.instruction |= neon_logbits (et.size) << 6;
16984 break;
16985
16986 case 3: /* VLD4. */
16987 {
16988 int align = inst.operands[1].imm >> 8;
16989 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
16990 16, 64, 32, 64, 32, 128, -1);
16991 if (align_good == FAIL)
16992 return;
16993 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
16994 _("bad list length"));
16995 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16996 inst.instruction |= 1 << 5;
16997 if (et.size == 32 && align == 128)
16998 inst.instruction |= 0x3 << 6;
16999 else
17000 inst.instruction |= neon_logbits (et.size) << 6;
17001 }
17002 break;
17003
17004 default: ;
17005 }
17006
17007 inst.instruction |= do_alignment << 4;
17008 }
17009
17010 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
17011 apart from bits [11:4]. */
17012
17013 static void
17014 do_neon_ldx_stx (void)
17015 {
17016 if (inst.operands[1].isreg)
17017 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
17018
17019 switch (NEON_LANE (inst.operands[0].imm))
17020 {
17021 case NEON_INTERLEAVE_LANES:
17022 NEON_ENCODE (INTERLV, inst);
17023 do_neon_ld_st_interleave ();
17024 break;
17025
17026 case NEON_ALL_LANES:
17027 NEON_ENCODE (DUP, inst);
17028 if (inst.instruction == N_INV)
17029 {
17030 first_error ("only loads support such operands");
17031 break;
17032 }
17033 do_neon_ld_dup ();
17034 break;
17035
17036 default:
17037 NEON_ENCODE (LANE, inst);
17038 do_neon_ld_st_lane ();
17039 }
17040
17041 /* L bit comes from bit mask. */
17042 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17043 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17044 inst.instruction |= inst.operands[1].reg << 16;
17045
17046 if (inst.operands[1].postind)
17047 {
17048 int postreg = inst.operands[1].imm & 0xf;
17049 constraint (!inst.operands[1].immisreg,
17050 _("post-index must be a register"));
17051 constraint (postreg == 0xd || postreg == 0xf,
17052 _("bad register for post-index"));
17053 inst.instruction |= postreg;
17054 }
17055 else
17056 {
17057 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
17058 constraint (inst.reloc.exp.X_op != O_constant
17059 || inst.reloc.exp.X_add_number != 0,
17060 BAD_ADDR_MODE);
17061
17062 if (inst.operands[1].writeback)
17063 {
17064 inst.instruction |= 0xd;
17065 }
17066 else
17067 inst.instruction |= 0xf;
17068 }
17069
17070 if (thumb_mode)
17071 inst.instruction |= 0xf9000000;
17072 else
17073 inst.instruction |= 0xf4000000;
17074 }
17075
17076 /* FP v8. */
17077 static void
17078 do_vfp_nsyn_fpv8 (enum neon_shape rs)
17079 {
17080 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17081 D register operands. */
17082 if (neon_shape_class[rs] == SC_DOUBLE)
17083 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17084 _(BAD_FPU));
17085
17086 NEON_ENCODE (FPV8, inst);
17087
17088 if (rs == NS_FFF || rs == NS_HHH)
17089 {
17090 do_vfp_sp_dyadic ();
17091
17092 /* ARMv8.2 fp16 instruction. */
17093 if (rs == NS_HHH)
17094 do_scalar_fp16_v82_encode ();
17095 }
17096 else
17097 do_vfp_dp_rd_rn_rm ();
17098
17099 if (rs == NS_DDD)
17100 inst.instruction |= 0x100;
17101
17102 inst.instruction |= 0xf0000000;
17103 }
17104
17105 static void
17106 do_vsel (void)
17107 {
17108 set_it_insn_type (OUTSIDE_IT_INSN);
17109
17110 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
17111 first_error (_("invalid instruction shape"));
17112 }
17113
17114 static void
17115 do_vmaxnm (void)
17116 {
17117 set_it_insn_type (OUTSIDE_IT_INSN);
17118
17119 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
17120 return;
17121
17122 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17123 return;
17124
17125 neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
17126 }
17127
17128 static void
17129 do_vrint_1 (enum neon_cvt_mode mode)
17130 {
17131 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
17132 struct neon_type_el et;
17133
17134 if (rs == NS_NULL)
17135 return;
17136
17137 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17138 D register operands. */
17139 if (neon_shape_class[rs] == SC_DOUBLE)
17140 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17141 _(BAD_FPU));
17142
17143 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
17144 | N_VFP);
17145 if (et.type != NT_invtype)
17146 {
17147 /* VFP encodings. */
17148 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
17149 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
17150 set_it_insn_type (OUTSIDE_IT_INSN);
17151
17152 NEON_ENCODE (FPV8, inst);
17153 if (rs == NS_FF || rs == NS_HH)
17154 do_vfp_sp_monadic ();
17155 else
17156 do_vfp_dp_rd_rm ();
17157
17158 switch (mode)
17159 {
17160 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
17161 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
17162 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
17163 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
17164 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
17165 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
17166 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
17167 default: abort ();
17168 }
17169
17170 inst.instruction |= (rs == NS_DD) << 8;
17171 do_vfp_cond_or_thumb ();
17172
17173 /* ARMv8.2 fp16 vrint instruction. */
17174 if (rs == NS_HH)
17175 do_scalar_fp16_v82_encode ();
17176 }
17177 else
17178 {
17179 /* Neon encodings (or something broken...). */
17180 inst.error = NULL;
17181 et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
17182
17183 if (et.type == NT_invtype)
17184 return;
17185
17186 set_it_insn_type (OUTSIDE_IT_INSN);
17187 NEON_ENCODE (FLOAT, inst);
17188
17189 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17190 return;
17191
17192 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17193 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17194 inst.instruction |= LOW4 (inst.operands[1].reg);
17195 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17196 inst.instruction |= neon_quad (rs) << 6;
17197 /* Mask off the original size bits and reencode them. */
17198 inst.instruction = ((inst.instruction & 0xfff3ffff)
17199 | neon_logbits (et.size) << 18);
17200
17201 switch (mode)
17202 {
17203 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
17204 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
17205 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
17206 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
17207 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
17208 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
17209 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
17210 default: abort ();
17211 }
17212
17213 if (thumb_mode)
17214 inst.instruction |= 0xfc000000;
17215 else
17216 inst.instruction |= 0xf0000000;
17217 }
17218 }
17219
17220 static void
17221 do_vrintx (void)
17222 {
17223 do_vrint_1 (neon_cvt_mode_x);
17224 }
17225
17226 static void
17227 do_vrintz (void)
17228 {
17229 do_vrint_1 (neon_cvt_mode_z);
17230 }
17231
17232 static void
17233 do_vrintr (void)
17234 {
17235 do_vrint_1 (neon_cvt_mode_r);
17236 }
17237
17238 static void
17239 do_vrinta (void)
17240 {
17241 do_vrint_1 (neon_cvt_mode_a);
17242 }
17243
17244 static void
17245 do_vrintn (void)
17246 {
17247 do_vrint_1 (neon_cvt_mode_n);
17248 }
17249
17250 static void
17251 do_vrintp (void)
17252 {
17253 do_vrint_1 (neon_cvt_mode_p);
17254 }
17255
17256 static void
17257 do_vrintm (void)
17258 {
17259 do_vrint_1 (neon_cvt_mode_m);
17260 }
17261
17262 /* Crypto v1 instructions. */
17263 static void
17264 do_crypto_2op_1 (unsigned elttype, int op)
17265 {
17266 set_it_insn_type (OUTSIDE_IT_INSN);
17267
17268 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
17269 == NT_invtype)
17270 return;
17271
17272 inst.error = NULL;
17273
17274 NEON_ENCODE (INTEGER, inst);
17275 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17276 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17277 inst.instruction |= LOW4 (inst.operands[1].reg);
17278 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17279 if (op != -1)
17280 inst.instruction |= op << 6;
17281
17282 if (thumb_mode)
17283 inst.instruction |= 0xfc000000;
17284 else
17285 inst.instruction |= 0xf0000000;
17286 }
17287
17288 static void
17289 do_crypto_3op_1 (int u, int op)
17290 {
17291 set_it_insn_type (OUTSIDE_IT_INSN);
17292
17293 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
17294 N_32 | N_UNT | N_KEY).type == NT_invtype)
17295 return;
17296
17297 inst.error = NULL;
17298
17299 NEON_ENCODE (INTEGER, inst);
17300 neon_three_same (1, u, 8 << op);
17301 }
17302
17303 static void
17304 do_aese (void)
17305 {
17306 do_crypto_2op_1 (N_8, 0);
17307 }
17308
17309 static void
17310 do_aesd (void)
17311 {
17312 do_crypto_2op_1 (N_8, 1);
17313 }
17314
17315 static void
17316 do_aesmc (void)
17317 {
17318 do_crypto_2op_1 (N_8, 2);
17319 }
17320
17321 static void
17322 do_aesimc (void)
17323 {
17324 do_crypto_2op_1 (N_8, 3);
17325 }
17326
17327 static void
17328 do_sha1c (void)
17329 {
17330 do_crypto_3op_1 (0, 0);
17331 }
17332
17333 static void
17334 do_sha1p (void)
17335 {
17336 do_crypto_3op_1 (0, 1);
17337 }
17338
17339 static void
17340 do_sha1m (void)
17341 {
17342 do_crypto_3op_1 (0, 2);
17343 }
17344
17345 static void
17346 do_sha1su0 (void)
17347 {
17348 do_crypto_3op_1 (0, 3);
17349 }
17350
17351 static void
17352 do_sha256h (void)
17353 {
17354 do_crypto_3op_1 (1, 0);
17355 }
17356
17357 static void
17358 do_sha256h2 (void)
17359 {
17360 do_crypto_3op_1 (1, 1);
17361 }
17362
17363 static void
17364 do_sha256su1 (void)
17365 {
17366 do_crypto_3op_1 (1, 2);
17367 }
17368
17369 static void
17370 do_sha1h (void)
17371 {
17372 do_crypto_2op_1 (N_32, -1);
17373 }
17374
17375 static void
17376 do_sha1su1 (void)
17377 {
17378 do_crypto_2op_1 (N_32, 0);
17379 }
17380
17381 static void
17382 do_sha256su0 (void)
17383 {
17384 do_crypto_2op_1 (N_32, 1);
17385 }
17386
17387 static void
17388 do_crc32_1 (unsigned int poly, unsigned int sz)
17389 {
17390 unsigned int Rd = inst.operands[0].reg;
17391 unsigned int Rn = inst.operands[1].reg;
17392 unsigned int Rm = inst.operands[2].reg;
17393
17394 set_it_insn_type (OUTSIDE_IT_INSN);
17395 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
17396 inst.instruction |= LOW4 (Rn) << 16;
17397 inst.instruction |= LOW4 (Rm);
17398 inst.instruction |= sz << (thumb_mode ? 4 : 21);
17399 inst.instruction |= poly << (thumb_mode ? 20 : 9);
17400
17401 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
17402 as_warn (UNPRED_REG ("r15"));
17403 if (thumb_mode && (Rd == REG_SP || Rn == REG_SP || Rm == REG_SP))
17404 as_warn (UNPRED_REG ("r13"));
17405 }
17406
17407 static void
17408 do_crc32b (void)
17409 {
17410 do_crc32_1 (0, 0);
17411 }
17412
17413 static void
17414 do_crc32h (void)
17415 {
17416 do_crc32_1 (0, 1);
17417 }
17418
17419 static void
17420 do_crc32w (void)
17421 {
17422 do_crc32_1 (0, 2);
17423 }
17424
17425 static void
17426 do_crc32cb (void)
17427 {
17428 do_crc32_1 (1, 0);
17429 }
17430
17431 static void
17432 do_crc32ch (void)
17433 {
17434 do_crc32_1 (1, 1);
17435 }
17436
17437 static void
17438 do_crc32cw (void)
17439 {
17440 do_crc32_1 (1, 2);
17441 }
17442
17443 \f
17444 /* Overall per-instruction processing. */
17445
17446 /* We need to be able to fix up arbitrary expressions in some statements.
17447 This is so that we can handle symbols that are an arbitrary distance from
17448 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17449 which returns part of an address in a form which will be valid for
17450 a data instruction. We do this by pushing the expression into a symbol
17451 in the expr_section, and creating a fix for that. */
17452
17453 static void
17454 fix_new_arm (fragS * frag,
17455 int where,
17456 short int size,
17457 expressionS * exp,
17458 int pc_rel,
17459 int reloc)
17460 {
17461 fixS * new_fix;
17462
17463 switch (exp->X_op)
17464 {
17465 case O_constant:
17466 if (pc_rel)
17467 {
17468 /* Create an absolute valued symbol, so we have something to
17469 refer to in the object file. Unfortunately for us, gas's
17470 generic expression parsing will already have folded out
17471 any use of .set foo/.type foo %function that may have
17472 been used to set type information of the target location,
17473 that's being specified symbolically. We have to presume
17474 the user knows what they are doing. */
17475 char name[16 + 8];
17476 symbolS *symbol;
17477
17478 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
17479
17480 symbol = symbol_find_or_make (name);
17481 S_SET_SEGMENT (symbol, absolute_section);
17482 symbol_set_frag (symbol, &zero_address_frag);
17483 S_SET_VALUE (symbol, exp->X_add_number);
17484 exp->X_op = O_symbol;
17485 exp->X_add_symbol = symbol;
17486 exp->X_add_number = 0;
17487 }
17488 /* FALLTHROUGH */
17489 case O_symbol:
17490 case O_add:
17491 case O_subtract:
17492 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
17493 (enum bfd_reloc_code_real) reloc);
17494 break;
17495
17496 default:
17497 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
17498 pc_rel, (enum bfd_reloc_code_real) reloc);
17499 break;
17500 }
17501
17502 /* Mark whether the fix is to a THUMB instruction, or an ARM
17503 instruction. */
17504 new_fix->tc_fix_data = thumb_mode;
17505 }
17506
17507 /* Create a frg for an instruction requiring relaxation. */
17508 static void
17509 output_relax_insn (void)
17510 {
17511 char * to;
17512 symbolS *sym;
17513 int offset;
17514
17515 /* The size of the instruction is unknown, so tie the debug info to the
17516 start of the instruction. */
17517 dwarf2_emit_insn (0);
17518
17519 switch (inst.reloc.exp.X_op)
17520 {
17521 case O_symbol:
17522 sym = inst.reloc.exp.X_add_symbol;
17523 offset = inst.reloc.exp.X_add_number;
17524 break;
17525 case O_constant:
17526 sym = NULL;
17527 offset = inst.reloc.exp.X_add_number;
17528 break;
17529 default:
17530 sym = make_expr_symbol (&inst.reloc.exp);
17531 offset = 0;
17532 break;
17533 }
17534 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
17535 inst.relax, sym, offset, NULL/*offset, opcode*/);
17536 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
17537 }
17538
17539 /* Write a 32-bit thumb instruction to buf. */
17540 static void
17541 put_thumb32_insn (char * buf, unsigned long insn)
17542 {
17543 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
17544 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
17545 }
17546
17547 static void
17548 output_inst (const char * str)
17549 {
17550 char * to = NULL;
17551
17552 if (inst.error)
17553 {
17554 as_bad ("%s -- `%s'", inst.error, str);
17555 return;
17556 }
17557 if (inst.relax)
17558 {
17559 output_relax_insn ();
17560 return;
17561 }
17562 if (inst.size == 0)
17563 return;
17564
17565 to = frag_more (inst.size);
17566 /* PR 9814: Record the thumb mode into the current frag so that we know
17567 what type of NOP padding to use, if necessary. We override any previous
17568 setting so that if the mode has changed then the NOPS that we use will
17569 match the encoding of the last instruction in the frag. */
17570 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
17571
17572 if (thumb_mode && (inst.size > THUMB_SIZE))
17573 {
17574 gas_assert (inst.size == (2 * THUMB_SIZE));
17575 put_thumb32_insn (to, inst.instruction);
17576 }
17577 else if (inst.size > INSN_SIZE)
17578 {
17579 gas_assert (inst.size == (2 * INSN_SIZE));
17580 md_number_to_chars (to, inst.instruction, INSN_SIZE);
17581 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
17582 }
17583 else
17584 md_number_to_chars (to, inst.instruction, inst.size);
17585
17586 if (inst.reloc.type != BFD_RELOC_UNUSED)
17587 fix_new_arm (frag_now, to - frag_now->fr_literal,
17588 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
17589 inst.reloc.type);
17590
17591 dwarf2_emit_insn (inst.size);
17592 }
17593
17594 static char *
17595 output_it_inst (int cond, int mask, char * to)
17596 {
17597 unsigned long instruction = 0xbf00;
17598
17599 mask &= 0xf;
17600 instruction |= mask;
17601 instruction |= cond << 4;
17602
17603 if (to == NULL)
17604 {
17605 to = frag_more (2);
17606 #ifdef OBJ_ELF
17607 dwarf2_emit_insn (2);
17608 #endif
17609 }
17610
17611 md_number_to_chars (to, instruction, 2);
17612
17613 return to;
17614 }
17615
17616 /* Tag values used in struct asm_opcode's tag field. */
17617 enum opcode_tag
17618 {
17619 OT_unconditional, /* Instruction cannot be conditionalized.
17620 The ARM condition field is still 0xE. */
17621 OT_unconditionalF, /* Instruction cannot be conditionalized
17622 and carries 0xF in its ARM condition field. */
17623 OT_csuffix, /* Instruction takes a conditional suffix. */
17624 OT_csuffixF, /* Some forms of the instruction take a conditional
17625 suffix, others place 0xF where the condition field
17626 would be. */
17627 OT_cinfix3, /* Instruction takes a conditional infix,
17628 beginning at character index 3. (In
17629 unified mode, it becomes a suffix.) */
17630 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
17631 tsts, cmps, cmns, and teqs. */
17632 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
17633 character index 3, even in unified mode. Used for
17634 legacy instructions where suffix and infix forms
17635 may be ambiguous. */
17636 OT_csuf_or_in3, /* Instruction takes either a conditional
17637 suffix or an infix at character index 3. */
17638 OT_odd_infix_unc, /* This is the unconditional variant of an
17639 instruction that takes a conditional infix
17640 at an unusual position. In unified mode,
17641 this variant will accept a suffix. */
17642 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
17643 are the conditional variants of instructions that
17644 take conditional infixes in unusual positions.
17645 The infix appears at character index
17646 (tag - OT_odd_infix_0). These are not accepted
17647 in unified mode. */
17648 };
17649
17650 /* Subroutine of md_assemble, responsible for looking up the primary
17651 opcode from the mnemonic the user wrote. STR points to the
17652 beginning of the mnemonic.
17653
17654 This is not simply a hash table lookup, because of conditional
17655 variants. Most instructions have conditional variants, which are
17656 expressed with a _conditional affix_ to the mnemonic. If we were
17657 to encode each conditional variant as a literal string in the opcode
17658 table, it would have approximately 20,000 entries.
17659
17660 Most mnemonics take this affix as a suffix, and in unified syntax,
17661 'most' is upgraded to 'all'. However, in the divided syntax, some
17662 instructions take the affix as an infix, notably the s-variants of
17663 the arithmetic instructions. Of those instructions, all but six
17664 have the infix appear after the third character of the mnemonic.
17665
17666 Accordingly, the algorithm for looking up primary opcodes given
17667 an identifier is:
17668
17669 1. Look up the identifier in the opcode table.
17670 If we find a match, go to step U.
17671
17672 2. Look up the last two characters of the identifier in the
17673 conditions table. If we find a match, look up the first N-2
17674 characters of the identifier in the opcode table. If we
17675 find a match, go to step CE.
17676
17677 3. Look up the fourth and fifth characters of the identifier in
17678 the conditions table. If we find a match, extract those
17679 characters from the identifier, and look up the remaining
17680 characters in the opcode table. If we find a match, go
17681 to step CM.
17682
17683 4. Fail.
17684
17685 U. Examine the tag field of the opcode structure, in case this is
17686 one of the six instructions with its conditional infix in an
17687 unusual place. If it is, the tag tells us where to find the
17688 infix; look it up in the conditions table and set inst.cond
17689 accordingly. Otherwise, this is an unconditional instruction.
17690 Again set inst.cond accordingly. Return the opcode structure.
17691
17692 CE. Examine the tag field to make sure this is an instruction that
17693 should receive a conditional suffix. If it is not, fail.
17694 Otherwise, set inst.cond from the suffix we already looked up,
17695 and return the opcode structure.
17696
17697 CM. Examine the tag field to make sure this is an instruction that
17698 should receive a conditional infix after the third character.
17699 If it is not, fail. Otherwise, undo the edits to the current
17700 line of input and proceed as for case CE. */
17701
17702 static const struct asm_opcode *
17703 opcode_lookup (char **str)
17704 {
17705 char *end, *base;
17706 char *affix;
17707 const struct asm_opcode *opcode;
17708 const struct asm_cond *cond;
17709 char save[2];
17710
17711 /* Scan up to the end of the mnemonic, which must end in white space,
17712 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17713 for (base = end = *str; *end != '\0'; end++)
17714 if (*end == ' ' || *end == '.')
17715 break;
17716
17717 if (end == base)
17718 return NULL;
17719
17720 /* Handle a possible width suffix and/or Neon type suffix. */
17721 if (end[0] == '.')
17722 {
17723 int offset = 2;
17724
17725 /* The .w and .n suffixes are only valid if the unified syntax is in
17726 use. */
17727 if (unified_syntax && end[1] == 'w')
17728 inst.size_req = 4;
17729 else if (unified_syntax && end[1] == 'n')
17730 inst.size_req = 2;
17731 else
17732 offset = 0;
17733
17734 inst.vectype.elems = 0;
17735
17736 *str = end + offset;
17737
17738 if (end[offset] == '.')
17739 {
17740 /* See if we have a Neon type suffix (possible in either unified or
17741 non-unified ARM syntax mode). */
17742 if (parse_neon_type (&inst.vectype, str) == FAIL)
17743 return NULL;
17744 }
17745 else if (end[offset] != '\0' && end[offset] != ' ')
17746 return NULL;
17747 }
17748 else
17749 *str = end;
17750
17751 /* Look for unaffixed or special-case affixed mnemonic. */
17752 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17753 end - base);
17754 if (opcode)
17755 {
17756 /* step U */
17757 if (opcode->tag < OT_odd_infix_0)
17758 {
17759 inst.cond = COND_ALWAYS;
17760 return opcode;
17761 }
17762
17763 if (warn_on_deprecated && unified_syntax)
17764 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17765 affix = base + (opcode->tag - OT_odd_infix_0);
17766 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17767 gas_assert (cond);
17768
17769 inst.cond = cond->value;
17770 return opcode;
17771 }
17772
17773 /* Cannot have a conditional suffix on a mnemonic of less than two
17774 characters. */
17775 if (end - base < 3)
17776 return NULL;
17777
17778 /* Look for suffixed mnemonic. */
17779 affix = end - 2;
17780 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17781 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17782 affix - base);
17783 if (opcode && cond)
17784 {
17785 /* step CE */
17786 switch (opcode->tag)
17787 {
17788 case OT_cinfix3_legacy:
17789 /* Ignore conditional suffixes matched on infix only mnemonics. */
17790 break;
17791
17792 case OT_cinfix3:
17793 case OT_cinfix3_deprecated:
17794 case OT_odd_infix_unc:
17795 if (!unified_syntax)
17796 return 0;
17797 /* Fall through. */
17798
17799 case OT_csuffix:
17800 case OT_csuffixF:
17801 case OT_csuf_or_in3:
17802 inst.cond = cond->value;
17803 return opcode;
17804
17805 case OT_unconditional:
17806 case OT_unconditionalF:
17807 if (thumb_mode)
17808 inst.cond = cond->value;
17809 else
17810 {
17811 /* Delayed diagnostic. */
17812 inst.error = BAD_COND;
17813 inst.cond = COND_ALWAYS;
17814 }
17815 return opcode;
17816
17817 default:
17818 return NULL;
17819 }
17820 }
17821
17822 /* Cannot have a usual-position infix on a mnemonic of less than
17823 six characters (five would be a suffix). */
17824 if (end - base < 6)
17825 return NULL;
17826
17827 /* Look for infixed mnemonic in the usual position. */
17828 affix = base + 3;
17829 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17830 if (!cond)
17831 return NULL;
17832
17833 memcpy (save, affix, 2);
17834 memmove (affix, affix + 2, (end - affix) - 2);
17835 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17836 (end - base) - 2);
17837 memmove (affix + 2, affix, (end - affix) - 2);
17838 memcpy (affix, save, 2);
17839
17840 if (opcode
17841 && (opcode->tag == OT_cinfix3
17842 || opcode->tag == OT_cinfix3_deprecated
17843 || opcode->tag == OT_csuf_or_in3
17844 || opcode->tag == OT_cinfix3_legacy))
17845 {
17846 /* Step CM. */
17847 if (warn_on_deprecated && unified_syntax
17848 && (opcode->tag == OT_cinfix3
17849 || opcode->tag == OT_cinfix3_deprecated))
17850 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17851
17852 inst.cond = cond->value;
17853 return opcode;
17854 }
17855
17856 return NULL;
17857 }
17858
17859 /* This function generates an initial IT instruction, leaving its block
17860 virtually open for the new instructions. Eventually,
17861 the mask will be updated by now_it_add_mask () each time
17862 a new instruction needs to be included in the IT block.
17863 Finally, the block is closed with close_automatic_it_block ().
17864 The block closure can be requested either from md_assemble (),
17865 a tencode (), or due to a label hook. */
17866
17867 static void
17868 new_automatic_it_block (int cond)
17869 {
17870 now_it.state = AUTOMATIC_IT_BLOCK;
17871 now_it.mask = 0x18;
17872 now_it.cc = cond;
17873 now_it.block_length = 1;
17874 mapping_state (MAP_THUMB);
17875 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
17876 now_it.warn_deprecated = FALSE;
17877 now_it.insn_cond = TRUE;
17878 }
17879
17880 /* Close an automatic IT block.
17881 See comments in new_automatic_it_block (). */
17882
17883 static void
17884 close_automatic_it_block (void)
17885 {
17886 now_it.mask = 0x10;
17887 now_it.block_length = 0;
17888 }
17889
17890 /* Update the mask of the current automatically-generated IT
17891 instruction. See comments in new_automatic_it_block (). */
17892
17893 static void
17894 now_it_add_mask (int cond)
17895 {
17896 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
17897 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
17898 | ((bitvalue) << (nbit)))
17899 const int resulting_bit = (cond & 1);
17900
17901 now_it.mask &= 0xf;
17902 now_it.mask = SET_BIT_VALUE (now_it.mask,
17903 resulting_bit,
17904 (5 - now_it.block_length));
17905 now_it.mask = SET_BIT_VALUE (now_it.mask,
17906 1,
17907 ((5 - now_it.block_length) - 1) );
17908 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
17909
17910 #undef CLEAR_BIT
17911 #undef SET_BIT_VALUE
17912 }
17913
17914 /* The IT blocks handling machinery is accessed through the these functions:
17915 it_fsm_pre_encode () from md_assemble ()
17916 set_it_insn_type () optional, from the tencode functions
17917 set_it_insn_type_last () ditto
17918 in_it_block () ditto
17919 it_fsm_post_encode () from md_assemble ()
17920 force_automatic_it_block_close () from label habdling functions
17921
17922 Rationale:
17923 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17924 initializing the IT insn type with a generic initial value depending
17925 on the inst.condition.
17926 2) During the tencode function, two things may happen:
17927 a) The tencode function overrides the IT insn type by
17928 calling either set_it_insn_type (type) or set_it_insn_type_last ().
17929 b) The tencode function queries the IT block state by
17930 calling in_it_block () (i.e. to determine narrow/not narrow mode).
17931
17932 Both set_it_insn_type and in_it_block run the internal FSM state
17933 handling function (handle_it_state), because: a) setting the IT insn
17934 type may incur in an invalid state (exiting the function),
17935 and b) querying the state requires the FSM to be updated.
17936 Specifically we want to avoid creating an IT block for conditional
17937 branches, so it_fsm_pre_encode is actually a guess and we can't
17938 determine whether an IT block is required until the tencode () routine
17939 has decided what type of instruction this actually it.
17940 Because of this, if set_it_insn_type and in_it_block have to be used,
17941 set_it_insn_type has to be called first.
17942
17943 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17944 determines the insn IT type depending on the inst.cond code.
17945 When a tencode () routine encodes an instruction that can be
17946 either outside an IT block, or, in the case of being inside, has to be
17947 the last one, set_it_insn_type_last () will determine the proper
17948 IT instruction type based on the inst.cond code. Otherwise,
17949 set_it_insn_type can be called for overriding that logic or
17950 for covering other cases.
17951
17952 Calling handle_it_state () may not transition the IT block state to
17953 OUTSIDE_IT_BLOCK immediately, since the (current) state could be
17954 still queried. Instead, if the FSM determines that the state should
17955 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17956 after the tencode () function: that's what it_fsm_post_encode () does.
17957
17958 Since in_it_block () calls the state handling function to get an
17959 updated state, an error may occur (due to invalid insns combination).
17960 In that case, inst.error is set.
17961 Therefore, inst.error has to be checked after the execution of
17962 the tencode () routine.
17963
17964 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17965 any pending state change (if any) that didn't take place in
17966 handle_it_state () as explained above. */
17967
17968 static void
17969 it_fsm_pre_encode (void)
17970 {
17971 if (inst.cond != COND_ALWAYS)
17972 inst.it_insn_type = INSIDE_IT_INSN;
17973 else
17974 inst.it_insn_type = OUTSIDE_IT_INSN;
17975
17976 now_it.state_handled = 0;
17977 }
17978
17979 /* IT state FSM handling function. */
17980
17981 static int
17982 handle_it_state (void)
17983 {
17984 now_it.state_handled = 1;
17985 now_it.insn_cond = FALSE;
17986
17987 switch (now_it.state)
17988 {
17989 case OUTSIDE_IT_BLOCK:
17990 switch (inst.it_insn_type)
17991 {
17992 case OUTSIDE_IT_INSN:
17993 break;
17994
17995 case INSIDE_IT_INSN:
17996 case INSIDE_IT_LAST_INSN:
17997 if (thumb_mode == 0)
17998 {
17999 if (unified_syntax
18000 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
18001 as_tsktsk (_("Warning: conditional outside an IT block"\
18002 " for Thumb."));
18003 }
18004 else
18005 {
18006 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
18007 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
18008 {
18009 /* Automatically generate the IT instruction. */
18010 new_automatic_it_block (inst.cond);
18011 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
18012 close_automatic_it_block ();
18013 }
18014 else
18015 {
18016 inst.error = BAD_OUT_IT;
18017 return FAIL;
18018 }
18019 }
18020 break;
18021
18022 case IF_INSIDE_IT_LAST_INSN:
18023 case NEUTRAL_IT_INSN:
18024 break;
18025
18026 case IT_INSN:
18027 now_it.state = MANUAL_IT_BLOCK;
18028 now_it.block_length = 0;
18029 break;
18030 }
18031 break;
18032
18033 case AUTOMATIC_IT_BLOCK:
18034 /* Three things may happen now:
18035 a) We should increment current it block size;
18036 b) We should close current it block (closing insn or 4 insns);
18037 c) We should close current it block and start a new one (due
18038 to incompatible conditions or
18039 4 insns-length block reached). */
18040
18041 switch (inst.it_insn_type)
18042 {
18043 case OUTSIDE_IT_INSN:
18044 /* The closure of the block shall happen immediately,
18045 so any in_it_block () call reports the block as closed. */
18046 force_automatic_it_block_close ();
18047 break;
18048
18049 case INSIDE_IT_INSN:
18050 case INSIDE_IT_LAST_INSN:
18051 case IF_INSIDE_IT_LAST_INSN:
18052 now_it.block_length++;
18053
18054 if (now_it.block_length > 4
18055 || !now_it_compatible (inst.cond))
18056 {
18057 force_automatic_it_block_close ();
18058 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
18059 new_automatic_it_block (inst.cond);
18060 }
18061 else
18062 {
18063 now_it.insn_cond = TRUE;
18064 now_it_add_mask (inst.cond);
18065 }
18066
18067 if (now_it.state == AUTOMATIC_IT_BLOCK
18068 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
18069 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
18070 close_automatic_it_block ();
18071 break;
18072
18073 case NEUTRAL_IT_INSN:
18074 now_it.block_length++;
18075 now_it.insn_cond = TRUE;
18076
18077 if (now_it.block_length > 4)
18078 force_automatic_it_block_close ();
18079 else
18080 now_it_add_mask (now_it.cc & 1);
18081 break;
18082
18083 case IT_INSN:
18084 close_automatic_it_block ();
18085 now_it.state = MANUAL_IT_BLOCK;
18086 break;
18087 }
18088 break;
18089
18090 case MANUAL_IT_BLOCK:
18091 {
18092 /* Check conditional suffixes. */
18093 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
18094 int is_last;
18095 now_it.mask <<= 1;
18096 now_it.mask &= 0x1f;
18097 is_last = (now_it.mask == 0x10);
18098 now_it.insn_cond = TRUE;
18099
18100 switch (inst.it_insn_type)
18101 {
18102 case OUTSIDE_IT_INSN:
18103 inst.error = BAD_NOT_IT;
18104 return FAIL;
18105
18106 case INSIDE_IT_INSN:
18107 if (cond != inst.cond)
18108 {
18109 inst.error = BAD_IT_COND;
18110 return FAIL;
18111 }
18112 break;
18113
18114 case INSIDE_IT_LAST_INSN:
18115 case IF_INSIDE_IT_LAST_INSN:
18116 if (cond != inst.cond)
18117 {
18118 inst.error = BAD_IT_COND;
18119 return FAIL;
18120 }
18121 if (!is_last)
18122 {
18123 inst.error = BAD_BRANCH;
18124 return FAIL;
18125 }
18126 break;
18127
18128 case NEUTRAL_IT_INSN:
18129 /* The BKPT instruction is unconditional even in an IT block. */
18130 break;
18131
18132 case IT_INSN:
18133 inst.error = BAD_IT_IT;
18134 return FAIL;
18135 }
18136 }
18137 break;
18138 }
18139
18140 return SUCCESS;
18141 }
18142
18143 struct depr_insn_mask
18144 {
18145 unsigned long pattern;
18146 unsigned long mask;
18147 const char* description;
18148 };
18149
18150 /* List of 16-bit instruction patterns deprecated in an IT block in
18151 ARMv8. */
18152 static const struct depr_insn_mask depr_it_insns[] = {
18153 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
18154 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
18155 { 0xa000, 0xb800, N_("ADR") },
18156 { 0x4800, 0xf800, N_("Literal loads") },
18157 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
18158 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
18159 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
18160 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
18161 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
18162 { 0, 0, NULL }
18163 };
18164
18165 static void
18166 it_fsm_post_encode (void)
18167 {
18168 int is_last;
18169
18170 if (!now_it.state_handled)
18171 handle_it_state ();
18172
18173 if (now_it.insn_cond
18174 && !now_it.warn_deprecated
18175 && warn_on_deprecated
18176 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
18177 {
18178 if (inst.instruction >= 0x10000)
18179 {
18180 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
18181 "deprecated in ARMv8"));
18182 now_it.warn_deprecated = TRUE;
18183 }
18184 else
18185 {
18186 const struct depr_insn_mask *p = depr_it_insns;
18187
18188 while (p->mask != 0)
18189 {
18190 if ((inst.instruction & p->mask) == p->pattern)
18191 {
18192 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
18193 "of the following class are deprecated in ARMv8: "
18194 "%s"), p->description);
18195 now_it.warn_deprecated = TRUE;
18196 break;
18197 }
18198
18199 ++p;
18200 }
18201 }
18202
18203 if (now_it.block_length > 1)
18204 {
18205 as_tsktsk (_("IT blocks containing more than one conditional "
18206 "instruction are deprecated in ARMv8"));
18207 now_it.warn_deprecated = TRUE;
18208 }
18209 }
18210
18211 is_last = (now_it.mask == 0x10);
18212 if (is_last)
18213 {
18214 now_it.state = OUTSIDE_IT_BLOCK;
18215 now_it.mask = 0;
18216 }
18217 }
18218
18219 static void
18220 force_automatic_it_block_close (void)
18221 {
18222 if (now_it.state == AUTOMATIC_IT_BLOCK)
18223 {
18224 close_automatic_it_block ();
18225 now_it.state = OUTSIDE_IT_BLOCK;
18226 now_it.mask = 0;
18227 }
18228 }
18229
18230 static int
18231 in_it_block (void)
18232 {
18233 if (!now_it.state_handled)
18234 handle_it_state ();
18235
18236 return now_it.state != OUTSIDE_IT_BLOCK;
18237 }
18238
18239 /* Whether OPCODE only has T32 encoding. Since this function is only used by
18240 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
18241 here, hence the "known" in the function name. */
18242
18243 static bfd_boolean
18244 known_t32_only_insn (const struct asm_opcode *opcode)
18245 {
18246 /* Original Thumb-1 wide instruction. */
18247 if (opcode->tencode == do_t_blx
18248 || opcode->tencode == do_t_branch23
18249 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
18250 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
18251 return TRUE;
18252
18253 /* Wide-only instruction added to ARMv8-M Baseline. */
18254 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
18255 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
18256 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
18257 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
18258 return TRUE;
18259
18260 return FALSE;
18261 }
18262
18263 /* Whether wide instruction variant can be used if available for a valid OPCODE
18264 in ARCH. */
18265
18266 static bfd_boolean
18267 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
18268 {
18269 if (known_t32_only_insn (opcode))
18270 return TRUE;
18271
18272 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
18273 of variant T3 of B.W is checked in do_t_branch. */
18274 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
18275 && opcode->tencode == do_t_branch)
18276 return TRUE;
18277
18278 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
18279 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
18280 && opcode->tencode == do_t_mov_cmp
18281 /* Make sure CMP instruction is not affected. */
18282 && opcode->aencode == do_mov)
18283 return TRUE;
18284
18285 /* Wide instruction variants of all instructions with narrow *and* wide
18286 variants become available with ARMv6t2. Other opcodes are either
18287 narrow-only or wide-only and are thus available if OPCODE is valid. */
18288 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
18289 return TRUE;
18290
18291 /* OPCODE with narrow only instruction variant or wide variant not
18292 available. */
18293 return FALSE;
18294 }
18295
18296 void
18297 md_assemble (char *str)
18298 {
18299 char *p = str;
18300 const struct asm_opcode * opcode;
18301
18302 /* Align the previous label if needed. */
18303 if (last_label_seen != NULL)
18304 {
18305 symbol_set_frag (last_label_seen, frag_now);
18306 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
18307 S_SET_SEGMENT (last_label_seen, now_seg);
18308 }
18309
18310 memset (&inst, '\0', sizeof (inst));
18311 inst.reloc.type = BFD_RELOC_UNUSED;
18312
18313 opcode = opcode_lookup (&p);
18314 if (!opcode)
18315 {
18316 /* It wasn't an instruction, but it might be a register alias of
18317 the form alias .req reg, or a Neon .dn/.qn directive. */
18318 if (! create_register_alias (str, p)
18319 && ! create_neon_reg_alias (str, p))
18320 as_bad (_("bad instruction `%s'"), str);
18321
18322 return;
18323 }
18324
18325 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
18326 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
18327
18328 /* The value which unconditional instructions should have in place of the
18329 condition field. */
18330 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
18331
18332 if (thumb_mode)
18333 {
18334 arm_feature_set variant;
18335
18336 variant = cpu_variant;
18337 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
18338 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
18339 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
18340 /* Check that this instruction is supported for this CPU. */
18341 if (!opcode->tvariant
18342 || (thumb_mode == 1
18343 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
18344 {
18345 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
18346 return;
18347 }
18348 if (inst.cond != COND_ALWAYS && !unified_syntax
18349 && opcode->tencode != do_t_branch)
18350 {
18351 as_bad (_("Thumb does not support conditional execution"));
18352 return;
18353 }
18354
18355 /* Two things are addressed here:
18356 1) Implicit require narrow instructions on Thumb-1.
18357 This avoids relaxation accidentally introducing Thumb-2
18358 instructions.
18359 2) Reject wide instructions in non Thumb-2 cores.
18360
18361 Only instructions with narrow and wide variants need to be handled
18362 but selecting all non wide-only instructions is easier. */
18363 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
18364 && !t32_insn_ok (variant, opcode))
18365 {
18366 if (inst.size_req == 0)
18367 inst.size_req = 2;
18368 else if (inst.size_req == 4)
18369 {
18370 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
18371 as_bad (_("selected processor does not support 32bit wide "
18372 "variant of instruction `%s'"), str);
18373 else
18374 as_bad (_("selected processor does not support `%s' in "
18375 "Thumb-2 mode"), str);
18376 return;
18377 }
18378 }
18379
18380 inst.instruction = opcode->tvalue;
18381
18382 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
18383 {
18384 /* Prepare the it_insn_type for those encodings that don't set
18385 it. */
18386 it_fsm_pre_encode ();
18387
18388 opcode->tencode ();
18389
18390 it_fsm_post_encode ();
18391 }
18392
18393 if (!(inst.error || inst.relax))
18394 {
18395 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
18396 inst.size = (inst.instruction > 0xffff ? 4 : 2);
18397 if (inst.size_req && inst.size_req != inst.size)
18398 {
18399 as_bad (_("cannot honor width suffix -- `%s'"), str);
18400 return;
18401 }
18402 }
18403
18404 /* Something has gone badly wrong if we try to relax a fixed size
18405 instruction. */
18406 gas_assert (inst.size_req == 0 || !inst.relax);
18407
18408 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18409 *opcode->tvariant);
18410 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
18411 set those bits when Thumb-2 32-bit instructions are seen. The impact
18412 of relaxable instructions will be considered later after we finish all
18413 relaxation. */
18414 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
18415 variant = arm_arch_none;
18416 else
18417 variant = cpu_variant;
18418 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
18419 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18420 arm_ext_v6t2);
18421
18422 check_neon_suffixes;
18423
18424 if (!inst.error)
18425 {
18426 mapping_state (MAP_THUMB);
18427 }
18428 }
18429 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
18430 {
18431 bfd_boolean is_bx;
18432
18433 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18434 is_bx = (opcode->aencode == do_bx);
18435
18436 /* Check that this instruction is supported for this CPU. */
18437 if (!(is_bx && fix_v4bx)
18438 && !(opcode->avariant &&
18439 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
18440 {
18441 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
18442 return;
18443 }
18444 if (inst.size_req)
18445 {
18446 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
18447 return;
18448 }
18449
18450 inst.instruction = opcode->avalue;
18451 if (opcode->tag == OT_unconditionalF)
18452 inst.instruction |= 0xFU << 28;
18453 else
18454 inst.instruction |= inst.cond << 28;
18455 inst.size = INSN_SIZE;
18456 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
18457 {
18458 it_fsm_pre_encode ();
18459 opcode->aencode ();
18460 it_fsm_post_encode ();
18461 }
18462 /* Arm mode bx is marked as both v4T and v5 because it's still required
18463 on a hypothetical non-thumb v5 core. */
18464 if (is_bx)
18465 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
18466 else
18467 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
18468 *opcode->avariant);
18469
18470 check_neon_suffixes;
18471
18472 if (!inst.error)
18473 {
18474 mapping_state (MAP_ARM);
18475 }
18476 }
18477 else
18478 {
18479 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18480 "-- `%s'"), str);
18481 return;
18482 }
18483 output_inst (str);
18484 }
18485
18486 static void
18487 check_it_blocks_finished (void)
18488 {
18489 #ifdef OBJ_ELF
18490 asection *sect;
18491
18492 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
18493 if (seg_info (sect)->tc_segment_info_data.current_it.state
18494 == MANUAL_IT_BLOCK)
18495 {
18496 as_warn (_("section '%s' finished with an open IT block."),
18497 sect->name);
18498 }
18499 #else
18500 if (now_it.state == MANUAL_IT_BLOCK)
18501 as_warn (_("file finished with an open IT block."));
18502 #endif
18503 }
18504
18505 /* Various frobbings of labels and their addresses. */
18506
18507 void
18508 arm_start_line_hook (void)
18509 {
18510 last_label_seen = NULL;
18511 }
18512
18513 void
18514 arm_frob_label (symbolS * sym)
18515 {
18516 last_label_seen = sym;
18517
18518 ARM_SET_THUMB (sym, thumb_mode);
18519
18520 #if defined OBJ_COFF || defined OBJ_ELF
18521 ARM_SET_INTERWORK (sym, support_interwork);
18522 #endif
18523
18524 force_automatic_it_block_close ();
18525
18526 /* Note - do not allow local symbols (.Lxxx) to be labelled
18527 as Thumb functions. This is because these labels, whilst
18528 they exist inside Thumb code, are not the entry points for
18529 possible ARM->Thumb calls. Also, these labels can be used
18530 as part of a computed goto or switch statement. eg gcc
18531 can generate code that looks like this:
18532
18533 ldr r2, [pc, .Laaa]
18534 lsl r3, r3, #2
18535 ldr r2, [r3, r2]
18536 mov pc, r2
18537
18538 .Lbbb: .word .Lxxx
18539 .Lccc: .word .Lyyy
18540 ..etc...
18541 .Laaa: .word Lbbb
18542
18543 The first instruction loads the address of the jump table.
18544 The second instruction converts a table index into a byte offset.
18545 The third instruction gets the jump address out of the table.
18546 The fourth instruction performs the jump.
18547
18548 If the address stored at .Laaa is that of a symbol which has the
18549 Thumb_Func bit set, then the linker will arrange for this address
18550 to have the bottom bit set, which in turn would mean that the
18551 address computation performed by the third instruction would end
18552 up with the bottom bit set. Since the ARM is capable of unaligned
18553 word loads, the instruction would then load the incorrect address
18554 out of the jump table, and chaos would ensue. */
18555 if (label_is_thumb_function_name
18556 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
18557 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
18558 {
18559 /* When the address of a Thumb function is taken the bottom
18560 bit of that address should be set. This will allow
18561 interworking between Arm and Thumb functions to work
18562 correctly. */
18563
18564 THUMB_SET_FUNC (sym, 1);
18565
18566 label_is_thumb_function_name = FALSE;
18567 }
18568
18569 dwarf2_emit_label (sym);
18570 }
18571
18572 bfd_boolean
18573 arm_data_in_code (void)
18574 {
18575 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
18576 {
18577 *input_line_pointer = '/';
18578 input_line_pointer += 5;
18579 *input_line_pointer = 0;
18580 return TRUE;
18581 }
18582
18583 return FALSE;
18584 }
18585
18586 char *
18587 arm_canonicalize_symbol_name (char * name)
18588 {
18589 int len;
18590
18591 if (thumb_mode && (len = strlen (name)) > 5
18592 && streq (name + len - 5, "/data"))
18593 *(name + len - 5) = 0;
18594
18595 return name;
18596 }
18597 \f
18598 /* Table of all register names defined by default. The user can
18599 define additional names with .req. Note that all register names
18600 should appear in both upper and lowercase variants. Some registers
18601 also have mixed-case names. */
18602
18603 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
18604 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
18605 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
18606 #define REGSET(p,t) \
18607 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
18608 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
18609 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
18610 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
18611 #define REGSETH(p,t) \
18612 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
18613 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
18614 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
18615 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
18616 #define REGSET2(p,t) \
18617 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
18618 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
18619 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
18620 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
18621 #define SPLRBANK(base,bank,t) \
18622 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
18623 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
18624 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
18625 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
18626 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
18627 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
18628
18629 static const struct reg_entry reg_names[] =
18630 {
18631 /* ARM integer registers. */
18632 REGSET(r, RN), REGSET(R, RN),
18633
18634 /* ATPCS synonyms. */
18635 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
18636 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
18637 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
18638
18639 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
18640 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
18641 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
18642
18643 /* Well-known aliases. */
18644 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
18645 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
18646
18647 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
18648 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
18649
18650 /* Coprocessor numbers. */
18651 REGSET(p, CP), REGSET(P, CP),
18652
18653 /* Coprocessor register numbers. The "cr" variants are for backward
18654 compatibility. */
18655 REGSET(c, CN), REGSET(C, CN),
18656 REGSET(cr, CN), REGSET(CR, CN),
18657
18658 /* ARM banked registers. */
18659 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
18660 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
18661 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
18662 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
18663 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
18664 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
18665 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
18666
18667 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
18668 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
18669 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
18670 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
18671 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
18672 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
18673 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
18674 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
18675
18676 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
18677 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
18678 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
18679 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
18680 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
18681 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
18682 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
18683 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
18684 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
18685
18686 /* FPA registers. */
18687 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
18688 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
18689
18690 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
18691 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
18692
18693 /* VFP SP registers. */
18694 REGSET(s,VFS), REGSET(S,VFS),
18695 REGSETH(s,VFS), REGSETH(S,VFS),
18696
18697 /* VFP DP Registers. */
18698 REGSET(d,VFD), REGSET(D,VFD),
18699 /* Extra Neon DP registers. */
18700 REGSETH(d,VFD), REGSETH(D,VFD),
18701
18702 /* Neon QP registers. */
18703 REGSET2(q,NQ), REGSET2(Q,NQ),
18704
18705 /* VFP control registers. */
18706 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
18707 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
18708 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
18709 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
18710 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
18711 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
18712
18713 /* Maverick DSP coprocessor registers. */
18714 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
18715 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
18716
18717 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
18718 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
18719 REGDEF(dspsc,0,DSPSC),
18720
18721 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
18722 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
18723 REGDEF(DSPSC,0,DSPSC),
18724
18725 /* iWMMXt data registers - p0, c0-15. */
18726 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
18727
18728 /* iWMMXt control registers - p1, c0-3. */
18729 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
18730 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
18731 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
18732 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
18733
18734 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
18735 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
18736 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
18737 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
18738 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
18739
18740 /* XScale accumulator registers. */
18741 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
18742 };
18743 #undef REGDEF
18744 #undef REGNUM
18745 #undef REGSET
18746
18747 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18748 within psr_required_here. */
18749 static const struct asm_psr psrs[] =
18750 {
18751 /* Backward compatibility notation. Note that "all" is no longer
18752 truly all possible PSR bits. */
18753 {"all", PSR_c | PSR_f},
18754 {"flg", PSR_f},
18755 {"ctl", PSR_c},
18756
18757 /* Individual flags. */
18758 {"f", PSR_f},
18759 {"c", PSR_c},
18760 {"x", PSR_x},
18761 {"s", PSR_s},
18762
18763 /* Combinations of flags. */
18764 {"fs", PSR_f | PSR_s},
18765 {"fx", PSR_f | PSR_x},
18766 {"fc", PSR_f | PSR_c},
18767 {"sf", PSR_s | PSR_f},
18768 {"sx", PSR_s | PSR_x},
18769 {"sc", PSR_s | PSR_c},
18770 {"xf", PSR_x | PSR_f},
18771 {"xs", PSR_x | PSR_s},
18772 {"xc", PSR_x | PSR_c},
18773 {"cf", PSR_c | PSR_f},
18774 {"cs", PSR_c | PSR_s},
18775 {"cx", PSR_c | PSR_x},
18776 {"fsx", PSR_f | PSR_s | PSR_x},
18777 {"fsc", PSR_f | PSR_s | PSR_c},
18778 {"fxs", PSR_f | PSR_x | PSR_s},
18779 {"fxc", PSR_f | PSR_x | PSR_c},
18780 {"fcs", PSR_f | PSR_c | PSR_s},
18781 {"fcx", PSR_f | PSR_c | PSR_x},
18782 {"sfx", PSR_s | PSR_f | PSR_x},
18783 {"sfc", PSR_s | PSR_f | PSR_c},
18784 {"sxf", PSR_s | PSR_x | PSR_f},
18785 {"sxc", PSR_s | PSR_x | PSR_c},
18786 {"scf", PSR_s | PSR_c | PSR_f},
18787 {"scx", PSR_s | PSR_c | PSR_x},
18788 {"xfs", PSR_x | PSR_f | PSR_s},
18789 {"xfc", PSR_x | PSR_f | PSR_c},
18790 {"xsf", PSR_x | PSR_s | PSR_f},
18791 {"xsc", PSR_x | PSR_s | PSR_c},
18792 {"xcf", PSR_x | PSR_c | PSR_f},
18793 {"xcs", PSR_x | PSR_c | PSR_s},
18794 {"cfs", PSR_c | PSR_f | PSR_s},
18795 {"cfx", PSR_c | PSR_f | PSR_x},
18796 {"csf", PSR_c | PSR_s | PSR_f},
18797 {"csx", PSR_c | PSR_s | PSR_x},
18798 {"cxf", PSR_c | PSR_x | PSR_f},
18799 {"cxs", PSR_c | PSR_x | PSR_s},
18800 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
18801 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
18802 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
18803 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
18804 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
18805 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
18806 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
18807 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
18808 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
18809 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
18810 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
18811 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
18812 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
18813 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
18814 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
18815 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
18816 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
18817 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
18818 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
18819 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
18820 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
18821 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
18822 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
18823 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
18824 };
18825
18826 /* Table of V7M psr names. */
18827 static const struct asm_psr v7m_psrs[] =
18828 {
18829 {"apsr", 0x0 }, {"APSR", 0x0 },
18830 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
18831 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
18832 {"psr", 0x3 }, {"PSR", 0x3 },
18833 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
18834 {"ipsr", 0x5 }, {"IPSR", 0x5 },
18835 {"epsr", 0x6 }, {"EPSR", 0x6 },
18836 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
18837 {"msp", 0x8 }, {"MSP", 0x8 },
18838 {"psp", 0x9 }, {"PSP", 0x9 },
18839 {"msplim", 0xa }, {"MSPLIM", 0xa },
18840 {"psplim", 0xb }, {"PSPLIM", 0xb },
18841 {"primask", 0x10}, {"PRIMASK", 0x10},
18842 {"basepri", 0x11}, {"BASEPRI", 0x11},
18843 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
18844 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
18845 {"control", 0x14}, {"CONTROL", 0x14},
18846 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
18847 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
18848 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
18849 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
18850 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
18851 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
18852 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
18853 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
18854 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
18855 };
18856
18857 /* Table of all shift-in-operand names. */
18858 static const struct asm_shift_name shift_names [] =
18859 {
18860 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
18861 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
18862 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
18863 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
18864 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
18865 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
18866 };
18867
18868 /* Table of all explicit relocation names. */
18869 #ifdef OBJ_ELF
18870 static struct reloc_entry reloc_names[] =
18871 {
18872 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
18873 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
18874 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
18875 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
18876 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
18877 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
18878 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
18879 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
18880 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
18881 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
18882 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
18883 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
18884 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
18885 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
18886 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
18887 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
18888 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
18889 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
18890 };
18891 #endif
18892
18893 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
18894 static const struct asm_cond conds[] =
18895 {
18896 {"eq", 0x0},
18897 {"ne", 0x1},
18898 {"cs", 0x2}, {"hs", 0x2},
18899 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18900 {"mi", 0x4},
18901 {"pl", 0x5},
18902 {"vs", 0x6},
18903 {"vc", 0x7},
18904 {"hi", 0x8},
18905 {"ls", 0x9},
18906 {"ge", 0xa},
18907 {"lt", 0xb},
18908 {"gt", 0xc},
18909 {"le", 0xd},
18910 {"al", 0xe}
18911 };
18912
18913 #define UL_BARRIER(L,U,CODE,FEAT) \
18914 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
18915 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
18916
18917 static struct asm_barrier_opt barrier_opt_names[] =
18918 {
18919 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
18920 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
18921 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
18922 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
18923 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
18924 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
18925 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
18926 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
18927 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
18928 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
18929 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
18930 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
18931 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
18932 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
18933 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
18934 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
18935 };
18936
18937 #undef UL_BARRIER
18938
18939 /* Table of ARM-format instructions. */
18940
18941 /* Macros for gluing together operand strings. N.B. In all cases
18942 other than OPS0, the trailing OP_stop comes from default
18943 zero-initialization of the unspecified elements of the array. */
18944 #define OPS0() { OP_stop, }
18945 #define OPS1(a) { OP_##a, }
18946 #define OPS2(a,b) { OP_##a,OP_##b, }
18947 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
18948 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
18949 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18950 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18951
18952 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18953 This is useful when mixing operands for ARM and THUMB, i.e. using the
18954 MIX_ARM_THUMB_OPERANDS macro.
18955 In order to use these macros, prefix the number of operands with _
18956 e.g. _3. */
18957 #define OPS_1(a) { a, }
18958 #define OPS_2(a,b) { a,b, }
18959 #define OPS_3(a,b,c) { a,b,c, }
18960 #define OPS_4(a,b,c,d) { a,b,c,d, }
18961 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
18962 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18963
18964 /* These macros abstract out the exact format of the mnemonic table and
18965 save some repeated characters. */
18966
18967 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
18968 #define TxCE(mnem, op, top, nops, ops, ae, te) \
18969 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18970 THUMB_VARIANT, do_##ae, do_##te }
18971
18972 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18973 a T_MNEM_xyz enumerator. */
18974 #define TCE(mnem, aop, top, nops, ops, ae, te) \
18975 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18976 #define tCE(mnem, aop, top, nops, ops, ae, te) \
18977 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18978
18979 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18980 infix after the third character. */
18981 #define TxC3(mnem, op, top, nops, ops, ae, te) \
18982 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18983 THUMB_VARIANT, do_##ae, do_##te }
18984 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
18985 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18986 THUMB_VARIANT, do_##ae, do_##te }
18987 #define TC3(mnem, aop, top, nops, ops, ae, te) \
18988 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18989 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
18990 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18991 #define tC3(mnem, aop, top, nops, ops, ae, te) \
18992 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18993 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
18994 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18995
18996 /* Mnemonic that cannot be conditionalized. The ARM condition-code
18997 field is still 0xE. Many of the Thumb variants can be executed
18998 conditionally, so this is checked separately. */
18999 #define TUE(mnem, op, top, nops, ops, ae, te) \
19000 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19001 THUMB_VARIANT, do_##ae, do_##te }
19002
19003 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
19004 Used by mnemonics that have very minimal differences in the encoding for
19005 ARM and Thumb variants and can be handled in a common function. */
19006 #define TUEc(mnem, op, top, nops, ops, en) \
19007 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19008 THUMB_VARIANT, do_##en, do_##en }
19009
19010 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
19011 condition code field. */
19012 #define TUF(mnem, op, top, nops, ops, ae, te) \
19013 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
19014 THUMB_VARIANT, do_##ae, do_##te }
19015
19016 /* ARM-only variants of all the above. */
19017 #define CE(mnem, op, nops, ops, ae) \
19018 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19019
19020 #define C3(mnem, op, nops, ops, ae) \
19021 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19022
19023 /* Legacy mnemonics that always have conditional infix after the third
19024 character. */
19025 #define CL(mnem, op, nops, ops, ae) \
19026 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19027 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19028
19029 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
19030 #define cCE(mnem, op, nops, ops, ae) \
19031 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19032
19033 /* Legacy coprocessor instructions where conditional infix and conditional
19034 suffix are ambiguous. For consistency this includes all FPA instructions,
19035 not just the potentially ambiguous ones. */
19036 #define cCL(mnem, op, nops, ops, ae) \
19037 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19038 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19039
19040 /* Coprocessor, takes either a suffix or a position-3 infix
19041 (for an FPA corner case). */
19042 #define C3E(mnem, op, nops, ops, ae) \
19043 { mnem, OPS##nops ops, OT_csuf_or_in3, \
19044 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19045
19046 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
19047 { m1 #m2 m3, OPS##nops ops, \
19048 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
19049 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19050
19051 #define CM(m1, m2, op, nops, ops, ae) \
19052 xCM_ (m1, , m2, op, nops, ops, ae), \
19053 xCM_ (m1, eq, m2, op, nops, ops, ae), \
19054 xCM_ (m1, ne, m2, op, nops, ops, ae), \
19055 xCM_ (m1, cs, m2, op, nops, ops, ae), \
19056 xCM_ (m1, hs, m2, op, nops, ops, ae), \
19057 xCM_ (m1, cc, m2, op, nops, ops, ae), \
19058 xCM_ (m1, ul, m2, op, nops, ops, ae), \
19059 xCM_ (m1, lo, m2, op, nops, ops, ae), \
19060 xCM_ (m1, mi, m2, op, nops, ops, ae), \
19061 xCM_ (m1, pl, m2, op, nops, ops, ae), \
19062 xCM_ (m1, vs, m2, op, nops, ops, ae), \
19063 xCM_ (m1, vc, m2, op, nops, ops, ae), \
19064 xCM_ (m1, hi, m2, op, nops, ops, ae), \
19065 xCM_ (m1, ls, m2, op, nops, ops, ae), \
19066 xCM_ (m1, ge, m2, op, nops, ops, ae), \
19067 xCM_ (m1, lt, m2, op, nops, ops, ae), \
19068 xCM_ (m1, gt, m2, op, nops, ops, ae), \
19069 xCM_ (m1, le, m2, op, nops, ops, ae), \
19070 xCM_ (m1, al, m2, op, nops, ops, ae)
19071
19072 #define UE(mnem, op, nops, ops, ae) \
19073 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19074
19075 #define UF(mnem, op, nops, ops, ae) \
19076 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19077
19078 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
19079 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
19080 use the same encoding function for each. */
19081 #define NUF(mnem, op, nops, ops, enc) \
19082 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
19083 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19084
19085 /* Neon data processing, version which indirects through neon_enc_tab for
19086 the various overloaded versions of opcodes. */
19087 #define nUF(mnem, op, nops, ops, enc) \
19088 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
19089 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19090
19091 /* Neon insn with conditional suffix for the ARM version, non-overloaded
19092 version. */
19093 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
19094 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
19095 THUMB_VARIANT, do_##enc, do_##enc }
19096
19097 #define NCE(mnem, op, nops, ops, enc) \
19098 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19099
19100 #define NCEF(mnem, op, nops, ops, enc) \
19101 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19102
19103 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
19104 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
19105 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
19106 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19107
19108 #define nCE(mnem, op, nops, ops, enc) \
19109 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19110
19111 #define nCEF(mnem, op, nops, ops, enc) \
19112 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19113
19114 #define do_0 0
19115
19116 static const struct asm_opcode insns[] =
19117 {
19118 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
19119 #define THUMB_VARIANT & arm_ext_v4t
19120 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
19121 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
19122 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
19123 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
19124 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
19125 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
19126 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
19127 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
19128 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
19129 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
19130 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
19131 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
19132 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
19133 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
19134 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
19135 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
19136
19137 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
19138 for setting PSR flag bits. They are obsolete in V6 and do not
19139 have Thumb equivalents. */
19140 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
19141 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
19142 CL("tstp", 110f000, 2, (RR, SH), cmp),
19143 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
19144 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
19145 CL("cmpp", 150f000, 2, (RR, SH), cmp),
19146 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
19147 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
19148 CL("cmnp", 170f000, 2, (RR, SH), cmp),
19149
19150 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
19151 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
19152 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
19153 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
19154
19155 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
19156 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19157 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
19158 OP_RRnpc),
19159 OP_ADDRGLDR),ldst, t_ldst),
19160 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19161
19162 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19163 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19164 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19165 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19166 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19167 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19168
19169 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
19170 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
19171 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
19172 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
19173
19174 /* Pseudo ops. */
19175 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
19176 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
19177 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
19178 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
19179
19180 /* Thumb-compatibility pseudo ops. */
19181 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
19182 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
19183 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
19184 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
19185 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
19186 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
19187 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
19188 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
19189 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
19190 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
19191 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
19192 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
19193
19194 /* These may simplify to neg. */
19195 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
19196 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
19197
19198 #undef THUMB_VARIANT
19199 #define THUMB_VARIANT & arm_ext_v6
19200
19201 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
19202
19203 /* V1 instructions with no Thumb analogue prior to V6T2. */
19204 #undef THUMB_VARIANT
19205 #define THUMB_VARIANT & arm_ext_v6t2
19206
19207 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19208 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19209 CL("teqp", 130f000, 2, (RR, SH), cmp),
19210
19211 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19212 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19213 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
19214 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19215
19216 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19217 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19218
19219 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19220 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19221
19222 /* V1 instructions with no Thumb analogue at all. */
19223 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
19224 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
19225
19226 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
19227 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
19228 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
19229 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
19230 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
19231 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
19232 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
19233 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
19234
19235 #undef ARM_VARIANT
19236 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
19237 #undef THUMB_VARIANT
19238 #define THUMB_VARIANT & arm_ext_v4t
19239
19240 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
19241 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
19242
19243 #undef THUMB_VARIANT
19244 #define THUMB_VARIANT & arm_ext_v6t2
19245
19246 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19247 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
19248
19249 /* Generic coprocessor instructions. */
19250 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
19251 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19252 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19253 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19254 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19255 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19256 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
19257
19258 #undef ARM_VARIANT
19259 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
19260
19261 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19262 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19263
19264 #undef ARM_VARIANT
19265 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
19266 #undef THUMB_VARIANT
19267 #define THUMB_VARIANT & arm_ext_msr
19268
19269 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
19270 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
19271
19272 #undef ARM_VARIANT
19273 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
19274 #undef THUMB_VARIANT
19275 #define THUMB_VARIANT & arm_ext_v6t2
19276
19277 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19278 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19279 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19280 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19281 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19282 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19283 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19284 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19285
19286 #undef ARM_VARIANT
19287 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
19288 #undef THUMB_VARIANT
19289 #define THUMB_VARIANT & arm_ext_v4t
19290
19291 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19292 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19293 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19294 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19295 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19296 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19297
19298 #undef ARM_VARIANT
19299 #define ARM_VARIANT & arm_ext_v4t_5
19300
19301 /* ARM Architecture 4T. */
19302 /* Note: bx (and blx) are required on V5, even if the processor does
19303 not support Thumb. */
19304 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
19305
19306 #undef ARM_VARIANT
19307 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
19308 #undef THUMB_VARIANT
19309 #define THUMB_VARIANT & arm_ext_v5t
19310
19311 /* Note: blx has 2 variants; the .value coded here is for
19312 BLX(2). Only this variant has conditional execution. */
19313 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
19314 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
19315
19316 #undef THUMB_VARIANT
19317 #define THUMB_VARIANT & arm_ext_v6t2
19318
19319 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
19320 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19321 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19322 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19323 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19324 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
19325 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19326 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19327
19328 #undef ARM_VARIANT
19329 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
19330 #undef THUMB_VARIANT
19331 #define THUMB_VARIANT & arm_ext_v5exp
19332
19333 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19334 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19335 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19336 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19337
19338 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19339 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19340
19341 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19342 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19343 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19344 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19345
19346 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19347 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19348 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19349 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19350
19351 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19352 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19353
19354 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19355 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19356 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19357 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19358
19359 #undef ARM_VARIANT
19360 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
19361 #undef THUMB_VARIANT
19362 #define THUMB_VARIANT & arm_ext_v6t2
19363
19364 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
19365 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
19366 ldrd, t_ldstd),
19367 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
19368 ADDRGLDRS), ldrd, t_ldstd),
19369
19370 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19371 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19372
19373 #undef ARM_VARIANT
19374 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
19375
19376 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
19377
19378 #undef ARM_VARIANT
19379 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
19380 #undef THUMB_VARIANT
19381 #define THUMB_VARIANT & arm_ext_v6
19382
19383 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
19384 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
19385 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19386 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19387 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19388 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19389 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19390 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19391 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19392 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
19393
19394 #undef THUMB_VARIANT
19395 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19396
19397 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
19398 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19399 strex, t_strex),
19400 #undef THUMB_VARIANT
19401 #define THUMB_VARIANT & arm_ext_v6t2
19402
19403 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19404 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19405
19406 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
19407 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
19408
19409 /* ARM V6 not included in V7M. */
19410 #undef THUMB_VARIANT
19411 #define THUMB_VARIANT & arm_ext_v6_notm
19412 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19413 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19414 UF(rfeib, 9900a00, 1, (RRw), rfe),
19415 UF(rfeda, 8100a00, 1, (RRw), rfe),
19416 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19417 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19418 UF(rfefa, 8100a00, 1, (RRw), rfe),
19419 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19420 UF(rfeed, 9900a00, 1, (RRw), rfe),
19421 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19422 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19423 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19424 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
19425 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
19426 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
19427 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
19428 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19429 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19430 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
19431
19432 /* ARM V6 not included in V7M (eg. integer SIMD). */
19433 #undef THUMB_VARIANT
19434 #define THUMB_VARIANT & arm_ext_v6_dsp
19435 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
19436 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
19437 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19438 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19439 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19440 /* Old name for QASX. */
19441 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19442 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19443 /* Old name for QSAX. */
19444 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19445 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19446 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19447 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19448 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19449 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19450 /* Old name for SASX. */
19451 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19452 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19453 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19454 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19455 /* Old name for SHASX. */
19456 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19457 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19458 /* Old name for SHSAX. */
19459 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19460 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19461 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19462 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19463 /* Old name for SSAX. */
19464 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19465 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19466 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19467 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19468 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19469 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19470 /* Old name for UASX. */
19471 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19472 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19473 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19474 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19475 /* Old name for UHASX. */
19476 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19477 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19478 /* Old name for UHSAX. */
19479 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19480 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19481 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19482 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19483 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19484 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19485 /* Old name for UQASX. */
19486 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19487 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19488 /* Old name for UQSAX. */
19489 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19490 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19491 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19492 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19493 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19494 /* Old name for USAX. */
19495 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19496 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19497 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19498 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19499 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19500 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19501 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19502 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19503 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19504 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19505 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19506 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19507 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19508 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19509 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19510 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19511 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19512 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19513 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19514 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19515 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19516 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19517 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19518 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19519 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19520 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19521 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19522 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19523 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19524 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
19525 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
19526 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19527 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19528 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
19529
19530 #undef ARM_VARIANT
19531 #define ARM_VARIANT & arm_ext_v6k
19532 #undef THUMB_VARIANT
19533 #define THUMB_VARIANT & arm_ext_v6k
19534
19535 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
19536 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
19537 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
19538 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
19539
19540 #undef THUMB_VARIANT
19541 #define THUMB_VARIANT & arm_ext_v6_notm
19542 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
19543 ldrexd, t_ldrexd),
19544 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
19545 RRnpcb), strexd, t_strexd),
19546
19547 #undef THUMB_VARIANT
19548 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19549 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
19550 rd_rn, rd_rn),
19551 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
19552 rd_rn, rd_rn),
19553 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19554 strex, t_strexbh),
19555 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19556 strex, t_strexbh),
19557 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
19558
19559 #undef ARM_VARIANT
19560 #define ARM_VARIANT & arm_ext_sec
19561 #undef THUMB_VARIANT
19562 #define THUMB_VARIANT & arm_ext_sec
19563
19564 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
19565
19566 #undef ARM_VARIANT
19567 #define ARM_VARIANT & arm_ext_virt
19568 #undef THUMB_VARIANT
19569 #define THUMB_VARIANT & arm_ext_virt
19570
19571 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
19572 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
19573
19574 #undef ARM_VARIANT
19575 #define ARM_VARIANT & arm_ext_pan
19576 #undef THUMB_VARIANT
19577 #define THUMB_VARIANT & arm_ext_pan
19578
19579 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
19580
19581 #undef ARM_VARIANT
19582 #define ARM_VARIANT & arm_ext_v6t2
19583 #undef THUMB_VARIANT
19584 #define THUMB_VARIANT & arm_ext_v6t2
19585
19586 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
19587 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
19588 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
19589 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
19590
19591 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19592 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
19593
19594 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19595 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19596 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19597 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19598
19599 #undef THUMB_VARIANT
19600 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19601 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
19602 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
19603
19604 /* Thumb-only instructions. */
19605 #undef ARM_VARIANT
19606 #define ARM_VARIANT NULL
19607 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
19608 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
19609
19610 /* ARM does not really have an IT instruction, so always allow it.
19611 The opcode is copied from Thumb in order to allow warnings in
19612 -mimplicit-it=[never | arm] modes. */
19613 #undef ARM_VARIANT
19614 #define ARM_VARIANT & arm_ext_v1
19615 #undef THUMB_VARIANT
19616 #define THUMB_VARIANT & arm_ext_v6t2
19617
19618 TUE("it", bf08, bf08, 1, (COND), it, t_it),
19619 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
19620 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
19621 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
19622 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
19623 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
19624 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
19625 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
19626 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
19627 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
19628 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
19629 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
19630 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
19631 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
19632 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
19633 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
19634 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
19635 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
19636
19637 /* Thumb2 only instructions. */
19638 #undef ARM_VARIANT
19639 #define ARM_VARIANT NULL
19640
19641 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19642 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19643 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
19644 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
19645 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
19646 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
19647
19648 /* Hardware division instructions. */
19649 #undef ARM_VARIANT
19650 #define ARM_VARIANT & arm_ext_adiv
19651 #undef THUMB_VARIANT
19652 #define THUMB_VARIANT & arm_ext_div
19653
19654 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
19655 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
19656
19657 /* ARM V6M/V7 instructions. */
19658 #undef ARM_VARIANT
19659 #define ARM_VARIANT & arm_ext_barrier
19660 #undef THUMB_VARIANT
19661 #define THUMB_VARIANT & arm_ext_barrier
19662
19663 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
19664 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
19665 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
19666
19667 /* ARM V7 instructions. */
19668 #undef ARM_VARIANT
19669 #define ARM_VARIANT & arm_ext_v7
19670 #undef THUMB_VARIANT
19671 #define THUMB_VARIANT & arm_ext_v7
19672
19673 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
19674 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
19675
19676 #undef ARM_VARIANT
19677 #define ARM_VARIANT & arm_ext_mp
19678 #undef THUMB_VARIANT
19679 #define THUMB_VARIANT & arm_ext_mp
19680
19681 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
19682
19683 /* AArchv8 instructions. */
19684 #undef ARM_VARIANT
19685 #define ARM_VARIANT & arm_ext_v8
19686
19687 /* Instructions shared between armv8-a and armv8-m. */
19688 #undef THUMB_VARIANT
19689 #define THUMB_VARIANT & arm_ext_atomics
19690
19691 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19692 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19693 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19694 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19695 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19696 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19697 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19698 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
19699 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19700 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
19701 stlex, t_stlex),
19702 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
19703 stlex, t_stlex),
19704 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
19705 stlex, t_stlex),
19706 #undef THUMB_VARIANT
19707 #define THUMB_VARIANT & arm_ext_v8
19708
19709 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
19710 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
19711 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
19712 ldrexd, t_ldrexd),
19713 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
19714 strexd, t_strexd),
19715 /* ARMv8 T32 only. */
19716 #undef ARM_VARIANT
19717 #define ARM_VARIANT NULL
19718 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
19719 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
19720 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
19721
19722 /* FP for ARMv8. */
19723 #undef ARM_VARIANT
19724 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
19725 #undef THUMB_VARIANT
19726 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19727
19728 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
19729 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
19730 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
19731 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
19732 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19733 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19734 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
19735 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
19736 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
19737 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
19738 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
19739 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
19740 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
19741 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
19742 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
19743 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
19744 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
19745
19746 /* Crypto v1 extensions. */
19747 #undef ARM_VARIANT
19748 #define ARM_VARIANT & fpu_crypto_ext_armv8
19749 #undef THUMB_VARIANT
19750 #define THUMB_VARIANT & fpu_crypto_ext_armv8
19751
19752 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
19753 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
19754 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
19755 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
19756 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
19757 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
19758 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
19759 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
19760 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
19761 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
19762 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
19763 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
19764 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
19765 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
19766
19767 #undef ARM_VARIANT
19768 #define ARM_VARIANT & crc_ext_armv8
19769 #undef THUMB_VARIANT
19770 #define THUMB_VARIANT & crc_ext_armv8
19771 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
19772 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
19773 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
19774 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
19775 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
19776 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
19777
19778 /* ARMv8.2 RAS extension. */
19779 #undef ARM_VARIANT
19780 #define ARM_VARIANT & arm_ext_ras
19781 #undef THUMB_VARIANT
19782 #define THUMB_VARIANT & arm_ext_ras
19783 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
19784
19785 #undef ARM_VARIANT
19786 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
19787 #undef THUMB_VARIANT
19788 #define THUMB_VARIANT NULL
19789
19790 cCE("wfs", e200110, 1, (RR), rd),
19791 cCE("rfs", e300110, 1, (RR), rd),
19792 cCE("wfc", e400110, 1, (RR), rd),
19793 cCE("rfc", e500110, 1, (RR), rd),
19794
19795 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
19796 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
19797 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
19798 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
19799
19800 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
19801 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
19802 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
19803 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
19804
19805 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
19806 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
19807 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
19808 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
19809 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
19810 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
19811 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
19812 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
19813 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
19814 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
19815 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
19816 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
19817
19818 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
19819 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
19820 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
19821 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
19822 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
19823 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
19824 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
19825 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
19826 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
19827 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
19828 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
19829 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
19830
19831 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
19832 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
19833 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
19834 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
19835 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
19836 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
19837 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
19838 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
19839 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
19840 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
19841 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
19842 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
19843
19844 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
19845 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
19846 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
19847 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
19848 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
19849 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
19850 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
19851 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
19852 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
19853 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
19854 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
19855 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
19856
19857 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
19858 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
19859 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
19860 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
19861 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
19862 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
19863 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
19864 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
19865 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
19866 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
19867 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
19868 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
19869
19870 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
19871 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
19872 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
19873 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
19874 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
19875 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
19876 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
19877 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
19878 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
19879 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
19880 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
19881 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
19882
19883 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
19884 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
19885 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
19886 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
19887 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
19888 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
19889 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
19890 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
19891 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
19892 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
19893 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
19894 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
19895
19896 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
19897 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
19898 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
19899 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
19900 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
19901 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
19902 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
19903 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
19904 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
19905 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
19906 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
19907 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
19908
19909 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
19910 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
19911 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
19912 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
19913 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
19914 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
19915 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
19916 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
19917 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
19918 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
19919 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
19920 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
19921
19922 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
19923 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
19924 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
19925 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
19926 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
19927 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
19928 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
19929 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
19930 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
19931 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
19932 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
19933 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
19934
19935 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
19936 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
19937 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
19938 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
19939 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
19940 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
19941 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
19942 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
19943 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
19944 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
19945 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
19946 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
19947
19948 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
19949 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
19950 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
19951 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
19952 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
19953 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
19954 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
19955 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
19956 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
19957 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
19958 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
19959 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
19960
19961 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
19962 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
19963 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
19964 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
19965 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
19966 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
19967 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
19968 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
19969 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
19970 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
19971 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
19972 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
19973
19974 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
19975 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
19976 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
19977 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
19978 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
19979 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
19980 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
19981 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
19982 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
19983 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
19984 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
19985 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
19986
19987 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
19988 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
19989 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
19990 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
19991 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
19992 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
19993 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
19994 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
19995 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
19996 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
19997 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
19998 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
19999
20000 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
20001 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
20002 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
20003 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
20004 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
20005 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
20006 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
20007 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
20008 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
20009 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
20010 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
20011 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
20012
20013 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
20014 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
20015 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
20016 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
20017 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
20018 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20019 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20020 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20021 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
20022 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
20023 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
20024 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
20025
20026 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
20027 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
20028 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
20029 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
20030 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
20031 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20032 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20033 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20034 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
20035 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
20036 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
20037 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
20038
20039 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
20040 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
20041 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
20042 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
20043 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
20044 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20045 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20046 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20047 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
20048 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
20049 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
20050 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
20051
20052 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
20053 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
20054 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
20055 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
20056 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
20057 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20058 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20059 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20060 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
20061 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
20062 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
20063 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
20064
20065 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
20066 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
20067 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
20068 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
20069 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
20070 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20071 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20072 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20073 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
20074 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
20075 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
20076 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
20077
20078 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
20079 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
20080 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
20081 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
20082 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
20083 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20084 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20085 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20086 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
20087 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
20088 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
20089 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
20090
20091 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
20092 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
20093 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
20094 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
20095 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
20096 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20097 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20098 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20099 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
20100 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
20101 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
20102 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
20103
20104 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
20105 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
20106 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
20107 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
20108 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
20109 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20110 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20111 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20112 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
20113 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
20114 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
20115 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
20116
20117 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
20118 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
20119 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
20120 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
20121 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
20122 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20123 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20124 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20125 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
20126 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
20127 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
20128 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
20129
20130 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
20131 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
20132 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
20133 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
20134 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
20135 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20136 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20137 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20138 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
20139 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
20140 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
20141 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
20142
20143 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20144 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20145 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20146 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20147 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20148 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20149 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20150 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20151 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20152 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20153 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20154 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20155
20156 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20157 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20158 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20159 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20160 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20161 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20162 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20163 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20164 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20165 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20166 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20167 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20168
20169 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20170 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20171 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20172 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20173 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20174 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20175 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20176 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20177 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20178 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20179 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20180 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20181
20182 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
20183 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
20184 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
20185 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
20186
20187 cCL("flts", e000110, 2, (RF, RR), rn_rd),
20188 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
20189 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
20190 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
20191 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
20192 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
20193 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
20194 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
20195 cCL("flte", e080110, 2, (RF, RR), rn_rd),
20196 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
20197 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
20198 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
20199
20200 /* The implementation of the FIX instruction is broken on some
20201 assemblers, in that it accepts a precision specifier as well as a
20202 rounding specifier, despite the fact that this is meaningless.
20203 To be more compatible, we accept it as well, though of course it
20204 does not set any bits. */
20205 cCE("fix", e100110, 2, (RR, RF), rd_rm),
20206 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
20207 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
20208 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
20209 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
20210 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
20211 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
20212 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
20213 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
20214 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
20215 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
20216 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
20217 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
20218
20219 /* Instructions that were new with the real FPA, call them V2. */
20220 #undef ARM_VARIANT
20221 #define ARM_VARIANT & fpu_fpa_ext_v2
20222
20223 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20224 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20225 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20226 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20227 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20228 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20229
20230 #undef ARM_VARIANT
20231 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
20232
20233 /* Moves and type conversions. */
20234 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
20235 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
20236 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
20237 cCE("fmstat", ef1fa10, 0, (), noargs),
20238 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
20239 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
20240 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
20241 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
20242 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
20243 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
20244 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
20245 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
20246 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
20247 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
20248
20249 /* Memory operations. */
20250 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
20251 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
20252 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20253 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20254 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20255 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20256 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20257 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20258 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20259 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20260 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20261 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20262 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20263 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20264 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20265 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20266 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20267 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20268
20269 /* Monadic operations. */
20270 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
20271 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
20272 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
20273
20274 /* Dyadic operations. */
20275 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20276 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20277 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20278 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20279 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20280 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20281 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20282 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20283 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20284
20285 /* Comparisons. */
20286 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
20287 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
20288 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
20289 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
20290
20291 /* Double precision load/store are still present on single precision
20292 implementations. */
20293 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
20294 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
20295 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20296 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20297 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20298 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20299 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20300 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20301 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20302 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20303
20304 #undef ARM_VARIANT
20305 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
20306
20307 /* Moves and type conversions. */
20308 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20309 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
20310 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20311 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
20312 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
20313 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
20314 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
20315 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
20316 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
20317 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
20318 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20319 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
20320 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20321
20322 /* Monadic operations. */
20323 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20324 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20325 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20326
20327 /* Dyadic operations. */
20328 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20329 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20330 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20331 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20332 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20333 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20334 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20335 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20336 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20337
20338 /* Comparisons. */
20339 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20340 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
20341 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20342 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
20343
20344 #undef ARM_VARIANT
20345 #define ARM_VARIANT & fpu_vfp_ext_v2
20346
20347 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
20348 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
20349 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
20350 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
20351
20352 /* Instructions which may belong to either the Neon or VFP instruction sets.
20353 Individual encoder functions perform additional architecture checks. */
20354 #undef ARM_VARIANT
20355 #define ARM_VARIANT & fpu_vfp_ext_v1xd
20356 #undef THUMB_VARIANT
20357 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
20358
20359 /* These mnemonics are unique to VFP. */
20360 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
20361 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
20362 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20363 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20364 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20365 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20366 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20367 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
20368 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
20369 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
20370
20371 /* Mnemonics shared by Neon and VFP. */
20372 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
20373 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20374 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20375
20376 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20377 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20378
20379 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20380 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20381
20382 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20383 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20384 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20385 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20386 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20387 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20388 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20389 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20390
20391 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
20392 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
20393 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
20394 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
20395
20396
20397 /* NOTE: All VMOV encoding is special-cased! */
20398 NCE(vmov, 0, 1, (VMOV), neon_mov),
20399 NCE(vmovq, 0, 1, (VMOV), neon_mov),
20400
20401 #undef ARM_VARIANT
20402 #define ARM_VARIANT & arm_ext_fp16
20403 #undef THUMB_VARIANT
20404 #define THUMB_VARIANT & arm_ext_fp16
20405 /* New instructions added from v8.2, allowing the extraction and insertion of
20406 the upper 16 bits of a 32-bit vector register. */
20407 NCE (vmovx, eb00a40, 2, (RVS, RVS), neon_movhf),
20408 NCE (vins, eb00ac0, 2, (RVS, RVS), neon_movhf),
20409
20410 #undef THUMB_VARIANT
20411 #define THUMB_VARIANT & fpu_neon_ext_v1
20412 #undef ARM_VARIANT
20413 #define ARM_VARIANT & fpu_neon_ext_v1
20414
20415 /* Data processing with three registers of the same length. */
20416 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
20417 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
20418 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
20419 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20420 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20421 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20422 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20423 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20424 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20425 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
20426 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20427 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20428 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20429 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20430 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20431 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20432 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20433 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20434 /* If not immediate, fall back to neon_dyadic_i64_su.
20435 shl_imm should accept I8 I16 I32 I64,
20436 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
20437 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
20438 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
20439 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
20440 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
20441 /* Logic ops, types optional & ignored. */
20442 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20443 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20444 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20445 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20446 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20447 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20448 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20449 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20450 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
20451 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
20452 /* Bitfield ops, untyped. */
20453 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20454 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20455 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20456 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20457 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20458 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20459 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
20460 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20461 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20462 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20463 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20464 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20465 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20466 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20467 back to neon_dyadic_if_su. */
20468 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20469 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20470 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20471 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20472 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20473 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20474 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20475 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20476 /* Comparison. Type I8 I16 I32 F32. */
20477 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
20478 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
20479 /* As above, D registers only. */
20480 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20481 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20482 /* Int and float variants, signedness unimportant. */
20483 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20484 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20485 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
20486 /* Add/sub take types I8 I16 I32 I64 F32. */
20487 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20488 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20489 /* vtst takes sizes 8, 16, 32. */
20490 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
20491 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
20492 /* VMUL takes I8 I16 I32 F32 P8. */
20493 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
20494 /* VQD{R}MULH takes S16 S32. */
20495 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20496 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20497 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20498 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20499 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20500 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20501 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20502 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20503 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20504 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20505 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20506 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20507 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20508 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20509 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20510 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20511 /* ARM v8.1 extension. */
20512 nUF (vqrdmlah, _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
20513 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
20514 nUF (vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
20515 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
20516
20517 /* Two address, int/float. Types S8 S16 S32 F32. */
20518 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
20519 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
20520
20521 /* Data processing with two registers and a shift amount. */
20522 /* Right shifts, and variants with rounding.
20523 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
20524 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20525 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20526 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20527 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20528 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20529 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20530 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20531 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20532 /* Shift and insert. Sizes accepted 8 16 32 64. */
20533 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
20534 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
20535 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
20536 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
20537 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
20538 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
20539 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
20540 /* Right shift immediate, saturating & narrowing, with rounding variants.
20541 Types accepted S16 S32 S64 U16 U32 U64. */
20542 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20543 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20544 /* As above, unsigned. Types accepted S16 S32 S64. */
20545 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20546 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20547 /* Right shift narrowing. Types accepted I16 I32 I64. */
20548 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20549 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20550 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
20551 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
20552 /* CVT with optional immediate for fixed-point variant. */
20553 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
20554
20555 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
20556 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
20557
20558 /* Data processing, three registers of different lengths. */
20559 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
20560 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
20561 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
20562 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
20563 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
20564 /* If not scalar, fall back to neon_dyadic_long.
20565 Vector types as above, scalar types S16 S32 U16 U32. */
20566 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20567 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20568 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
20569 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20570 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20571 /* Dyadic, narrowing insns. Types I16 I32 I64. */
20572 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20573 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20574 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20575 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20576 /* Saturating doubling multiplies. Types S16 S32. */
20577 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20578 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20579 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20580 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
20581 S16 S32 U16 U32. */
20582 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
20583
20584 /* Extract. Size 8. */
20585 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
20586 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
20587
20588 /* Two registers, miscellaneous. */
20589 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
20590 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
20591 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
20592 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
20593 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
20594 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
20595 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
20596 /* Vector replicate. Sizes 8 16 32. */
20597 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
20598 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
20599 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
20600 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
20601 /* VMOVN. Types I16 I32 I64. */
20602 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
20603 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
20604 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
20605 /* VQMOVUN. Types S16 S32 S64. */
20606 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
20607 /* VZIP / VUZP. Sizes 8 16 32. */
20608 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
20609 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
20610 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
20611 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
20612 /* VQABS / VQNEG. Types S8 S16 S32. */
20613 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
20614 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
20615 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
20616 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
20617 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
20618 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
20619 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
20620 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
20621 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
20622 /* Reciprocal estimates. Types U32 F16 F32. */
20623 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
20624 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
20625 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
20626 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
20627 /* VCLS. Types S8 S16 S32. */
20628 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
20629 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
20630 /* VCLZ. Types I8 I16 I32. */
20631 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
20632 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
20633 /* VCNT. Size 8. */
20634 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
20635 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
20636 /* Two address, untyped. */
20637 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
20638 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
20639 /* VTRN. Sizes 8 16 32. */
20640 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
20641 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
20642
20643 /* Table lookup. Size 8. */
20644 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20645 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20646
20647 #undef THUMB_VARIANT
20648 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
20649 #undef ARM_VARIANT
20650 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
20651
20652 /* Neon element/structure load/store. */
20653 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
20654 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
20655 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
20656 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
20657 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
20658 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
20659 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
20660 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
20661
20662 #undef THUMB_VARIANT
20663 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
20664 #undef ARM_VARIANT
20665 #define ARM_VARIANT & fpu_vfp_ext_v3xd
20666 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
20667 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20668 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20669 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20670 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20671 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20672 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20673 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20674 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20675
20676 #undef THUMB_VARIANT
20677 #define THUMB_VARIANT & fpu_vfp_ext_v3
20678 #undef ARM_VARIANT
20679 #define ARM_VARIANT & fpu_vfp_ext_v3
20680
20681 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
20682 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20683 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20684 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20685 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20686 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20687 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20688 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20689 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20690
20691 #undef ARM_VARIANT
20692 #define ARM_VARIANT & fpu_vfp_ext_fma
20693 #undef THUMB_VARIANT
20694 #define THUMB_VARIANT & fpu_vfp_ext_fma
20695 /* Mnemonics shared by Neon and VFP. These are included in the
20696 VFP FMA variant; NEON and VFP FMA always includes the NEON
20697 FMA instructions. */
20698 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20699 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20700 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
20701 the v form should always be used. */
20702 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20703 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20704 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20705 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20706 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20707 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20708
20709 #undef THUMB_VARIANT
20710 #undef ARM_VARIANT
20711 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
20712
20713 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20714 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20715 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20716 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20717 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20718 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20719 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
20720 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
20721
20722 #undef ARM_VARIANT
20723 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
20724
20725 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
20726 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
20727 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
20728 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
20729 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
20730 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
20731 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
20732 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
20733 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
20734 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20735 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20736 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20737 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20738 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20739 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20740 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20741 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20742 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20743 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
20744 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
20745 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20746 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20747 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20748 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20749 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20750 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20751 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
20752 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
20753 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
20754 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
20755 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
20756 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
20757 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
20758 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
20759 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
20760 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
20761 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
20762 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20763 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20764 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20765 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20766 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20767 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20768 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20769 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20770 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20771 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
20772 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20773 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20774 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20775 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20776 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20777 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20778 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20779 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20780 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20781 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20782 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20783 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20784 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20785 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20786 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20787 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20788 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20789 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20790 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20791 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20792 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20793 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20794 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20795 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20796 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20797 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20798 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20799 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20800 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20801 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20802 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20803 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20804 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20805 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20806 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20807 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20808 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20809 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20810 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20811 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20812 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20813 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
20814 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20815 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20816 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20817 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20818 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20819 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20820 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20821 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20822 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20823 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20824 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20825 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20826 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20827 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20828 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20829 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20830 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20831 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20832 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20833 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20834 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20835 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
20836 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20837 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20838 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20839 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20840 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20841 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20842 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20843 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20844 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20845 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20846 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20847 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20848 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20849 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20850 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20851 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20852 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20853 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20854 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20855 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20856 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20857 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20858 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20859 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20860 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20861 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20862 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20863 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20864 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20865 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20866 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20867 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
20868 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
20869 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
20870 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
20871 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
20872 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
20873 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20874 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20875 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20876 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
20877 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
20878 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
20879 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
20880 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
20881 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
20882 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20883 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20884 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20885 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20886 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
20887
20888 #undef ARM_VARIANT
20889 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
20890
20891 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
20892 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
20893 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
20894 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
20895 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
20896 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
20897 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20898 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20899 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20900 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20901 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20902 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20903 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20904 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20905 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20906 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20907 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20908 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20909 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20910 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20911 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
20912 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20913 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20914 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20915 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20916 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20917 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20918 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20919 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20920 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20921 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20922 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20923 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20924 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20925 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20926 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20927 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20928 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20929 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20930 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20931 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20932 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20933 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20934 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20935 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20936 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20937 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20938 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20939 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20940 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20941 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20942 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20943 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20944 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20945 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20946 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20947 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20948
20949 #undef ARM_VARIANT
20950 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
20951
20952 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20953 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20954 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20955 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20956 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20957 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20958 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20959 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20960 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
20961 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
20962 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
20963 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
20964 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
20965 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
20966 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
20967 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
20968 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
20969 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
20970 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
20971 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
20972 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
20973 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
20974 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
20975 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
20976 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
20977 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
20978 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
20979 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
20980 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
20981 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
20982 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
20983 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
20984 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
20985 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
20986 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
20987 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
20988 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
20989 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
20990 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
20991 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
20992 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
20993 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
20994 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
20995 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
20996 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
20997 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
20998 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
20999 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
21000 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
21001 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
21002 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
21003 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
21004 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
21005 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
21006 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
21007 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
21008 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
21009 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
21010 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
21011 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
21012 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
21013 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
21014 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
21015 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
21016 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21017 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21018 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21019 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21020 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21021 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21022 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21023 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21024 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
21025 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
21026 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
21027 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
21028
21029 /* ARMv8-M instructions. */
21030 #undef ARM_VARIANT
21031 #define ARM_VARIANT NULL
21032 #undef THUMB_VARIANT
21033 #define THUMB_VARIANT & arm_ext_v8m
21034 TUE("sg", 0, e97fe97f, 0, (), 0, noargs),
21035 TUE("blxns", 0, 4784, 1, (RRnpc), 0, t_blx),
21036 TUE("bxns", 0, 4704, 1, (RRnpc), 0, t_bx),
21037 TUE("tt", 0, e840f000, 2, (RRnpc, RRnpc), 0, tt),
21038 TUE("ttt", 0, e840f040, 2, (RRnpc, RRnpc), 0, tt),
21039 TUE("tta", 0, e840f080, 2, (RRnpc, RRnpc), 0, tt),
21040 TUE("ttat", 0, e840f0c0, 2, (RRnpc, RRnpc), 0, tt),
21041
21042 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
21043 instructions behave as nop if no VFP is present. */
21044 #undef THUMB_VARIANT
21045 #define THUMB_VARIANT & arm_ext_v8m_main
21046 TUEc("vlldm", 0, ec300a00, 1, (RRnpc), rn),
21047 TUEc("vlstm", 0, ec200a00, 1, (RRnpc), rn),
21048 };
21049 #undef ARM_VARIANT
21050 #undef THUMB_VARIANT
21051 #undef TCE
21052 #undef TUE
21053 #undef TUF
21054 #undef TCC
21055 #undef cCE
21056 #undef cCL
21057 #undef C3E
21058 #undef CE
21059 #undef CM
21060 #undef UE
21061 #undef UF
21062 #undef UT
21063 #undef NUF
21064 #undef nUF
21065 #undef NCE
21066 #undef nCE
21067 #undef OPS0
21068 #undef OPS1
21069 #undef OPS2
21070 #undef OPS3
21071 #undef OPS4
21072 #undef OPS5
21073 #undef OPS6
21074 #undef do_0
21075 \f
21076 /* MD interface: bits in the object file. */
21077
21078 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
21079 for use in the a.out file, and stores them in the array pointed to by buf.
21080 This knows about the endian-ness of the target machine and does
21081 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
21082 2 (short) and 4 (long) Floating numbers are put out as a series of
21083 LITTLENUMS (shorts, here at least). */
21084
21085 void
21086 md_number_to_chars (char * buf, valueT val, int n)
21087 {
21088 if (target_big_endian)
21089 number_to_chars_bigendian (buf, val, n);
21090 else
21091 number_to_chars_littleendian (buf, val, n);
21092 }
21093
21094 static valueT
21095 md_chars_to_number (char * buf, int n)
21096 {
21097 valueT result = 0;
21098 unsigned char * where = (unsigned char *) buf;
21099
21100 if (target_big_endian)
21101 {
21102 while (n--)
21103 {
21104 result <<= 8;
21105 result |= (*where++ & 255);
21106 }
21107 }
21108 else
21109 {
21110 while (n--)
21111 {
21112 result <<= 8;
21113 result |= (where[n] & 255);
21114 }
21115 }
21116
21117 return result;
21118 }
21119
21120 /* MD interface: Sections. */
21121
21122 /* Calculate the maximum variable size (i.e., excluding fr_fix)
21123 that an rs_machine_dependent frag may reach. */
21124
21125 unsigned int
21126 arm_frag_max_var (fragS *fragp)
21127 {
21128 /* We only use rs_machine_dependent for variable-size Thumb instructions,
21129 which are either THUMB_SIZE (2) or INSN_SIZE (4).
21130
21131 Note that we generate relaxable instructions even for cases that don't
21132 really need it, like an immediate that's a trivial constant. So we're
21133 overestimating the instruction size for some of those cases. Rather
21134 than putting more intelligence here, it would probably be better to
21135 avoid generating a relaxation frag in the first place when it can be
21136 determined up front that a short instruction will suffice. */
21137
21138 gas_assert (fragp->fr_type == rs_machine_dependent);
21139 return INSN_SIZE;
21140 }
21141
21142 /* Estimate the size of a frag before relaxing. Assume everything fits in
21143 2 bytes. */
21144
21145 int
21146 md_estimate_size_before_relax (fragS * fragp,
21147 segT segtype ATTRIBUTE_UNUSED)
21148 {
21149 fragp->fr_var = 2;
21150 return 2;
21151 }
21152
21153 /* Convert a machine dependent frag. */
21154
21155 void
21156 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
21157 {
21158 unsigned long insn;
21159 unsigned long old_op;
21160 char *buf;
21161 expressionS exp;
21162 fixS *fixp;
21163 int reloc_type;
21164 int pc_rel;
21165 int opcode;
21166
21167 buf = fragp->fr_literal + fragp->fr_fix;
21168
21169 old_op = bfd_get_16(abfd, buf);
21170 if (fragp->fr_symbol)
21171 {
21172 exp.X_op = O_symbol;
21173 exp.X_add_symbol = fragp->fr_symbol;
21174 }
21175 else
21176 {
21177 exp.X_op = O_constant;
21178 }
21179 exp.X_add_number = fragp->fr_offset;
21180 opcode = fragp->fr_subtype;
21181 switch (opcode)
21182 {
21183 case T_MNEM_ldr_pc:
21184 case T_MNEM_ldr_pc2:
21185 case T_MNEM_ldr_sp:
21186 case T_MNEM_str_sp:
21187 case T_MNEM_ldr:
21188 case T_MNEM_ldrb:
21189 case T_MNEM_ldrh:
21190 case T_MNEM_str:
21191 case T_MNEM_strb:
21192 case T_MNEM_strh:
21193 if (fragp->fr_var == 4)
21194 {
21195 insn = THUMB_OP32 (opcode);
21196 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
21197 {
21198 insn |= (old_op & 0x700) << 4;
21199 }
21200 else
21201 {
21202 insn |= (old_op & 7) << 12;
21203 insn |= (old_op & 0x38) << 13;
21204 }
21205 insn |= 0x00000c00;
21206 put_thumb32_insn (buf, insn);
21207 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
21208 }
21209 else
21210 {
21211 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
21212 }
21213 pc_rel = (opcode == T_MNEM_ldr_pc2);
21214 break;
21215 case T_MNEM_adr:
21216 if (fragp->fr_var == 4)
21217 {
21218 insn = THUMB_OP32 (opcode);
21219 insn |= (old_op & 0xf0) << 4;
21220 put_thumb32_insn (buf, insn);
21221 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
21222 }
21223 else
21224 {
21225 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21226 exp.X_add_number -= 4;
21227 }
21228 pc_rel = 1;
21229 break;
21230 case T_MNEM_mov:
21231 case T_MNEM_movs:
21232 case T_MNEM_cmp:
21233 case T_MNEM_cmn:
21234 if (fragp->fr_var == 4)
21235 {
21236 int r0off = (opcode == T_MNEM_mov
21237 || opcode == T_MNEM_movs) ? 0 : 8;
21238 insn = THUMB_OP32 (opcode);
21239 insn = (insn & 0xe1ffffff) | 0x10000000;
21240 insn |= (old_op & 0x700) << r0off;
21241 put_thumb32_insn (buf, insn);
21242 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21243 }
21244 else
21245 {
21246 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
21247 }
21248 pc_rel = 0;
21249 break;
21250 case T_MNEM_b:
21251 if (fragp->fr_var == 4)
21252 {
21253 insn = THUMB_OP32(opcode);
21254 put_thumb32_insn (buf, insn);
21255 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
21256 }
21257 else
21258 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
21259 pc_rel = 1;
21260 break;
21261 case T_MNEM_bcond:
21262 if (fragp->fr_var == 4)
21263 {
21264 insn = THUMB_OP32(opcode);
21265 insn |= (old_op & 0xf00) << 14;
21266 put_thumb32_insn (buf, insn);
21267 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
21268 }
21269 else
21270 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
21271 pc_rel = 1;
21272 break;
21273 case T_MNEM_add_sp:
21274 case T_MNEM_add_pc:
21275 case T_MNEM_inc_sp:
21276 case T_MNEM_dec_sp:
21277 if (fragp->fr_var == 4)
21278 {
21279 /* ??? Choose between add and addw. */
21280 insn = THUMB_OP32 (opcode);
21281 insn |= (old_op & 0xf0) << 4;
21282 put_thumb32_insn (buf, insn);
21283 if (opcode == T_MNEM_add_pc)
21284 reloc_type = BFD_RELOC_ARM_T32_IMM12;
21285 else
21286 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21287 }
21288 else
21289 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21290 pc_rel = 0;
21291 break;
21292
21293 case T_MNEM_addi:
21294 case T_MNEM_addis:
21295 case T_MNEM_subi:
21296 case T_MNEM_subis:
21297 if (fragp->fr_var == 4)
21298 {
21299 insn = THUMB_OP32 (opcode);
21300 insn |= (old_op & 0xf0) << 4;
21301 insn |= (old_op & 0xf) << 16;
21302 put_thumb32_insn (buf, insn);
21303 if (insn & (1 << 20))
21304 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21305 else
21306 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21307 }
21308 else
21309 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21310 pc_rel = 0;
21311 break;
21312 default:
21313 abort ();
21314 }
21315 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
21316 (enum bfd_reloc_code_real) reloc_type);
21317 fixp->fx_file = fragp->fr_file;
21318 fixp->fx_line = fragp->fr_line;
21319 fragp->fr_fix += fragp->fr_var;
21320
21321 /* Set whether we use thumb-2 ISA based on final relaxation results. */
21322 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
21323 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
21324 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
21325 }
21326
21327 /* Return the size of a relaxable immediate operand instruction.
21328 SHIFT and SIZE specify the form of the allowable immediate. */
21329 static int
21330 relax_immediate (fragS *fragp, int size, int shift)
21331 {
21332 offsetT offset;
21333 offsetT mask;
21334 offsetT low;
21335
21336 /* ??? Should be able to do better than this. */
21337 if (fragp->fr_symbol)
21338 return 4;
21339
21340 low = (1 << shift) - 1;
21341 mask = (1 << (shift + size)) - (1 << shift);
21342 offset = fragp->fr_offset;
21343 /* Force misaligned offsets to 32-bit variant. */
21344 if (offset & low)
21345 return 4;
21346 if (offset & ~mask)
21347 return 4;
21348 return 2;
21349 }
21350
21351 /* Get the address of a symbol during relaxation. */
21352 static addressT
21353 relaxed_symbol_addr (fragS *fragp, long stretch)
21354 {
21355 fragS *sym_frag;
21356 addressT addr;
21357 symbolS *sym;
21358
21359 sym = fragp->fr_symbol;
21360 sym_frag = symbol_get_frag (sym);
21361 know (S_GET_SEGMENT (sym) != absolute_section
21362 || sym_frag == &zero_address_frag);
21363 addr = S_GET_VALUE (sym) + fragp->fr_offset;
21364
21365 /* If frag has yet to be reached on this pass, assume it will
21366 move by STRETCH just as we did. If this is not so, it will
21367 be because some frag between grows, and that will force
21368 another pass. */
21369
21370 if (stretch != 0
21371 && sym_frag->relax_marker != fragp->relax_marker)
21372 {
21373 fragS *f;
21374
21375 /* Adjust stretch for any alignment frag. Note that if have
21376 been expanding the earlier code, the symbol may be
21377 defined in what appears to be an earlier frag. FIXME:
21378 This doesn't handle the fr_subtype field, which specifies
21379 a maximum number of bytes to skip when doing an
21380 alignment. */
21381 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
21382 {
21383 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
21384 {
21385 if (stretch < 0)
21386 stretch = - ((- stretch)
21387 & ~ ((1 << (int) f->fr_offset) - 1));
21388 else
21389 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
21390 if (stretch == 0)
21391 break;
21392 }
21393 }
21394 if (f != NULL)
21395 addr += stretch;
21396 }
21397
21398 return addr;
21399 }
21400
21401 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
21402 load. */
21403 static int
21404 relax_adr (fragS *fragp, asection *sec, long stretch)
21405 {
21406 addressT addr;
21407 offsetT val;
21408
21409 /* Assume worst case for symbols not known to be in the same section. */
21410 if (fragp->fr_symbol == NULL
21411 || !S_IS_DEFINED (fragp->fr_symbol)
21412 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21413 || S_IS_WEAK (fragp->fr_symbol))
21414 return 4;
21415
21416 val = relaxed_symbol_addr (fragp, stretch);
21417 addr = fragp->fr_address + fragp->fr_fix;
21418 addr = (addr + 4) & ~3;
21419 /* Force misaligned targets to 32-bit variant. */
21420 if (val & 3)
21421 return 4;
21422 val -= addr;
21423 if (val < 0 || val > 1020)
21424 return 4;
21425 return 2;
21426 }
21427
21428 /* Return the size of a relaxable add/sub immediate instruction. */
21429 static int
21430 relax_addsub (fragS *fragp, asection *sec)
21431 {
21432 char *buf;
21433 int op;
21434
21435 buf = fragp->fr_literal + fragp->fr_fix;
21436 op = bfd_get_16(sec->owner, buf);
21437 if ((op & 0xf) == ((op >> 4) & 0xf))
21438 return relax_immediate (fragp, 8, 0);
21439 else
21440 return relax_immediate (fragp, 3, 0);
21441 }
21442
21443 /* Return TRUE iff the definition of symbol S could be pre-empted
21444 (overridden) at link or load time. */
21445 static bfd_boolean
21446 symbol_preemptible (symbolS *s)
21447 {
21448 /* Weak symbols can always be pre-empted. */
21449 if (S_IS_WEAK (s))
21450 return TRUE;
21451
21452 /* Non-global symbols cannot be pre-empted. */
21453 if (! S_IS_EXTERNAL (s))
21454 return FALSE;
21455
21456 #ifdef OBJ_ELF
21457 /* In ELF, a global symbol can be marked protected, or private. In that
21458 case it can't be pre-empted (other definitions in the same link unit
21459 would violate the ODR). */
21460 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
21461 return FALSE;
21462 #endif
21463
21464 /* Other global symbols might be pre-empted. */
21465 return TRUE;
21466 }
21467
21468 /* Return the size of a relaxable branch instruction. BITS is the
21469 size of the offset field in the narrow instruction. */
21470
21471 static int
21472 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
21473 {
21474 addressT addr;
21475 offsetT val;
21476 offsetT limit;
21477
21478 /* Assume worst case for symbols not known to be in the same section. */
21479 if (!S_IS_DEFINED (fragp->fr_symbol)
21480 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21481 || S_IS_WEAK (fragp->fr_symbol))
21482 return 4;
21483
21484 #ifdef OBJ_ELF
21485 /* A branch to a function in ARM state will require interworking. */
21486 if (S_IS_DEFINED (fragp->fr_symbol)
21487 && ARM_IS_FUNC (fragp->fr_symbol))
21488 return 4;
21489 #endif
21490
21491 if (symbol_preemptible (fragp->fr_symbol))
21492 return 4;
21493
21494 val = relaxed_symbol_addr (fragp, stretch);
21495 addr = fragp->fr_address + fragp->fr_fix + 4;
21496 val -= addr;
21497
21498 /* Offset is a signed value *2 */
21499 limit = 1 << bits;
21500 if (val >= limit || val < -limit)
21501 return 4;
21502 return 2;
21503 }
21504
21505
21506 /* Relax a machine dependent frag. This returns the amount by which
21507 the current size of the frag should change. */
21508
21509 int
21510 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
21511 {
21512 int oldsize;
21513 int newsize;
21514
21515 oldsize = fragp->fr_var;
21516 switch (fragp->fr_subtype)
21517 {
21518 case T_MNEM_ldr_pc2:
21519 newsize = relax_adr (fragp, sec, stretch);
21520 break;
21521 case T_MNEM_ldr_pc:
21522 case T_MNEM_ldr_sp:
21523 case T_MNEM_str_sp:
21524 newsize = relax_immediate (fragp, 8, 2);
21525 break;
21526 case T_MNEM_ldr:
21527 case T_MNEM_str:
21528 newsize = relax_immediate (fragp, 5, 2);
21529 break;
21530 case T_MNEM_ldrh:
21531 case T_MNEM_strh:
21532 newsize = relax_immediate (fragp, 5, 1);
21533 break;
21534 case T_MNEM_ldrb:
21535 case T_MNEM_strb:
21536 newsize = relax_immediate (fragp, 5, 0);
21537 break;
21538 case T_MNEM_adr:
21539 newsize = relax_adr (fragp, sec, stretch);
21540 break;
21541 case T_MNEM_mov:
21542 case T_MNEM_movs:
21543 case T_MNEM_cmp:
21544 case T_MNEM_cmn:
21545 newsize = relax_immediate (fragp, 8, 0);
21546 break;
21547 case T_MNEM_b:
21548 newsize = relax_branch (fragp, sec, 11, stretch);
21549 break;
21550 case T_MNEM_bcond:
21551 newsize = relax_branch (fragp, sec, 8, stretch);
21552 break;
21553 case T_MNEM_add_sp:
21554 case T_MNEM_add_pc:
21555 newsize = relax_immediate (fragp, 8, 2);
21556 break;
21557 case T_MNEM_inc_sp:
21558 case T_MNEM_dec_sp:
21559 newsize = relax_immediate (fragp, 7, 2);
21560 break;
21561 case T_MNEM_addi:
21562 case T_MNEM_addis:
21563 case T_MNEM_subi:
21564 case T_MNEM_subis:
21565 newsize = relax_addsub (fragp, sec);
21566 break;
21567 default:
21568 abort ();
21569 }
21570
21571 fragp->fr_var = newsize;
21572 /* Freeze wide instructions that are at or before the same location as
21573 in the previous pass. This avoids infinite loops.
21574 Don't freeze them unconditionally because targets may be artificially
21575 misaligned by the expansion of preceding frags. */
21576 if (stretch <= 0 && newsize > 2)
21577 {
21578 md_convert_frag (sec->owner, sec, fragp);
21579 frag_wane (fragp);
21580 }
21581
21582 return newsize - oldsize;
21583 }
21584
21585 /* Round up a section size to the appropriate boundary. */
21586
21587 valueT
21588 md_section_align (segT segment ATTRIBUTE_UNUSED,
21589 valueT size)
21590 {
21591 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
21592 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
21593 {
21594 /* For a.out, force the section size to be aligned. If we don't do
21595 this, BFD will align it for us, but it will not write out the
21596 final bytes of the section. This may be a bug in BFD, but it is
21597 easier to fix it here since that is how the other a.out targets
21598 work. */
21599 int align;
21600
21601 align = bfd_get_section_alignment (stdoutput, segment);
21602 size = ((size + (1 << align) - 1) & (-((valueT) 1 << align)));
21603 }
21604 #endif
21605
21606 return size;
21607 }
21608
21609 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
21610 of an rs_align_code fragment. */
21611
21612 void
21613 arm_handle_align (fragS * fragP)
21614 {
21615 static unsigned char const arm_noop[2][2][4] =
21616 {
21617 { /* ARMv1 */
21618 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
21619 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
21620 },
21621 { /* ARMv6k */
21622 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
21623 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
21624 },
21625 };
21626 static unsigned char const thumb_noop[2][2][2] =
21627 {
21628 { /* Thumb-1 */
21629 {0xc0, 0x46}, /* LE */
21630 {0x46, 0xc0}, /* BE */
21631 },
21632 { /* Thumb-2 */
21633 {0x00, 0xbf}, /* LE */
21634 {0xbf, 0x00} /* BE */
21635 }
21636 };
21637 static unsigned char const wide_thumb_noop[2][4] =
21638 { /* Wide Thumb-2 */
21639 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
21640 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
21641 };
21642
21643 unsigned bytes, fix, noop_size;
21644 char * p;
21645 const unsigned char * noop;
21646 const unsigned char *narrow_noop = NULL;
21647 #ifdef OBJ_ELF
21648 enum mstate state;
21649 #endif
21650
21651 if (fragP->fr_type != rs_align_code)
21652 return;
21653
21654 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
21655 p = fragP->fr_literal + fragP->fr_fix;
21656 fix = 0;
21657
21658 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
21659 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
21660
21661 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
21662
21663 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
21664 {
21665 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21666 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
21667 {
21668 narrow_noop = thumb_noop[1][target_big_endian];
21669 noop = wide_thumb_noop[target_big_endian];
21670 }
21671 else
21672 noop = thumb_noop[0][target_big_endian];
21673 noop_size = 2;
21674 #ifdef OBJ_ELF
21675 state = MAP_THUMB;
21676 #endif
21677 }
21678 else
21679 {
21680 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21681 ? selected_cpu : arm_arch_none,
21682 arm_ext_v6k) != 0]
21683 [target_big_endian];
21684 noop_size = 4;
21685 #ifdef OBJ_ELF
21686 state = MAP_ARM;
21687 #endif
21688 }
21689
21690 fragP->fr_var = noop_size;
21691
21692 if (bytes & (noop_size - 1))
21693 {
21694 fix = bytes & (noop_size - 1);
21695 #ifdef OBJ_ELF
21696 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
21697 #endif
21698 memset (p, 0, fix);
21699 p += fix;
21700 bytes -= fix;
21701 }
21702
21703 if (narrow_noop)
21704 {
21705 if (bytes & noop_size)
21706 {
21707 /* Insert a narrow noop. */
21708 memcpy (p, narrow_noop, noop_size);
21709 p += noop_size;
21710 bytes -= noop_size;
21711 fix += noop_size;
21712 }
21713
21714 /* Use wide noops for the remainder */
21715 noop_size = 4;
21716 }
21717
21718 while (bytes >= noop_size)
21719 {
21720 memcpy (p, noop, noop_size);
21721 p += noop_size;
21722 bytes -= noop_size;
21723 fix += noop_size;
21724 }
21725
21726 fragP->fr_fix += fix;
21727 }
21728
21729 /* Called from md_do_align. Used to create an alignment
21730 frag in a code section. */
21731
21732 void
21733 arm_frag_align_code (int n, int max)
21734 {
21735 char * p;
21736
21737 /* We assume that there will never be a requirement
21738 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
21739 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
21740 {
21741 char err_msg[128];
21742
21743 sprintf (err_msg,
21744 _("alignments greater than %d bytes not supported in .text sections."),
21745 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
21746 as_fatal ("%s", err_msg);
21747 }
21748
21749 p = frag_var (rs_align_code,
21750 MAX_MEM_FOR_RS_ALIGN_CODE,
21751 1,
21752 (relax_substateT) max,
21753 (symbolS *) NULL,
21754 (offsetT) n,
21755 (char *) NULL);
21756 *p = 0;
21757 }
21758
21759 /* Perform target specific initialisation of a frag.
21760 Note - despite the name this initialisation is not done when the frag
21761 is created, but only when its type is assigned. A frag can be created
21762 and used a long time before its type is set, so beware of assuming that
21763 this initialisationis performed first. */
21764
21765 #ifndef OBJ_ELF
21766 void
21767 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
21768 {
21769 /* Record whether this frag is in an ARM or a THUMB area. */
21770 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21771 }
21772
21773 #else /* OBJ_ELF is defined. */
21774 void
21775 arm_init_frag (fragS * fragP, int max_chars)
21776 {
21777 int frag_thumb_mode;
21778
21779 /* If the current ARM vs THUMB mode has not already
21780 been recorded into this frag then do so now. */
21781 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
21782 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21783
21784 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
21785
21786 /* Record a mapping symbol for alignment frags. We will delete this
21787 later if the alignment ends up empty. */
21788 switch (fragP->fr_type)
21789 {
21790 case rs_align:
21791 case rs_align_test:
21792 case rs_fill:
21793 mapping_state_2 (MAP_DATA, max_chars);
21794 break;
21795 case rs_align_code:
21796 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
21797 break;
21798 default:
21799 break;
21800 }
21801 }
21802
21803 /* When we change sections we need to issue a new mapping symbol. */
21804
21805 void
21806 arm_elf_change_section (void)
21807 {
21808 /* Link an unlinked unwind index table section to the .text section. */
21809 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
21810 && elf_linked_to_section (now_seg) == NULL)
21811 elf_linked_to_section (now_seg) = text_section;
21812 }
21813
21814 int
21815 arm_elf_section_type (const char * str, size_t len)
21816 {
21817 if (len == 5 && strncmp (str, "exidx", 5) == 0)
21818 return SHT_ARM_EXIDX;
21819
21820 return -1;
21821 }
21822 \f
21823 /* Code to deal with unwinding tables. */
21824
21825 static void add_unwind_adjustsp (offsetT);
21826
21827 /* Generate any deferred unwind frame offset. */
21828
21829 static void
21830 flush_pending_unwind (void)
21831 {
21832 offsetT offset;
21833
21834 offset = unwind.pending_offset;
21835 unwind.pending_offset = 0;
21836 if (offset != 0)
21837 add_unwind_adjustsp (offset);
21838 }
21839
21840 /* Add an opcode to this list for this function. Two-byte opcodes should
21841 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
21842 order. */
21843
21844 static void
21845 add_unwind_opcode (valueT op, int length)
21846 {
21847 /* Add any deferred stack adjustment. */
21848 if (unwind.pending_offset)
21849 flush_pending_unwind ();
21850
21851 unwind.sp_restored = 0;
21852
21853 if (unwind.opcode_count + length > unwind.opcode_alloc)
21854 {
21855 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
21856 if (unwind.opcodes)
21857 unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
21858 unwind.opcode_alloc);
21859 else
21860 unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
21861 }
21862 while (length > 0)
21863 {
21864 length--;
21865 unwind.opcodes[unwind.opcode_count] = op & 0xff;
21866 op >>= 8;
21867 unwind.opcode_count++;
21868 }
21869 }
21870
21871 /* Add unwind opcodes to adjust the stack pointer. */
21872
21873 static void
21874 add_unwind_adjustsp (offsetT offset)
21875 {
21876 valueT op;
21877
21878 if (offset > 0x200)
21879 {
21880 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
21881 char bytes[5];
21882 int n;
21883 valueT o;
21884
21885 /* Long form: 0xb2, uleb128. */
21886 /* This might not fit in a word so add the individual bytes,
21887 remembering the list is built in reverse order. */
21888 o = (valueT) ((offset - 0x204) >> 2);
21889 if (o == 0)
21890 add_unwind_opcode (0, 1);
21891
21892 /* Calculate the uleb128 encoding of the offset. */
21893 n = 0;
21894 while (o)
21895 {
21896 bytes[n] = o & 0x7f;
21897 o >>= 7;
21898 if (o)
21899 bytes[n] |= 0x80;
21900 n++;
21901 }
21902 /* Add the insn. */
21903 for (; n; n--)
21904 add_unwind_opcode (bytes[n - 1], 1);
21905 add_unwind_opcode (0xb2, 1);
21906 }
21907 else if (offset > 0x100)
21908 {
21909 /* Two short opcodes. */
21910 add_unwind_opcode (0x3f, 1);
21911 op = (offset - 0x104) >> 2;
21912 add_unwind_opcode (op, 1);
21913 }
21914 else if (offset > 0)
21915 {
21916 /* Short opcode. */
21917 op = (offset - 4) >> 2;
21918 add_unwind_opcode (op, 1);
21919 }
21920 else if (offset < 0)
21921 {
21922 offset = -offset;
21923 while (offset > 0x100)
21924 {
21925 add_unwind_opcode (0x7f, 1);
21926 offset -= 0x100;
21927 }
21928 op = ((offset - 4) >> 2) | 0x40;
21929 add_unwind_opcode (op, 1);
21930 }
21931 }
21932
21933 /* Finish the list of unwind opcodes for this function. */
21934 static void
21935 finish_unwind_opcodes (void)
21936 {
21937 valueT op;
21938
21939 if (unwind.fp_used)
21940 {
21941 /* Adjust sp as necessary. */
21942 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
21943 flush_pending_unwind ();
21944
21945 /* After restoring sp from the frame pointer. */
21946 op = 0x90 | unwind.fp_reg;
21947 add_unwind_opcode (op, 1);
21948 }
21949 else
21950 flush_pending_unwind ();
21951 }
21952
21953
21954 /* Start an exception table entry. If idx is nonzero this is an index table
21955 entry. */
21956
21957 static void
21958 start_unwind_section (const segT text_seg, int idx)
21959 {
21960 const char * text_name;
21961 const char * prefix;
21962 const char * prefix_once;
21963 const char * group_name;
21964 char * sec_name;
21965 int type;
21966 int flags;
21967 int linkonce;
21968
21969 if (idx)
21970 {
21971 prefix = ELF_STRING_ARM_unwind;
21972 prefix_once = ELF_STRING_ARM_unwind_once;
21973 type = SHT_ARM_EXIDX;
21974 }
21975 else
21976 {
21977 prefix = ELF_STRING_ARM_unwind_info;
21978 prefix_once = ELF_STRING_ARM_unwind_info_once;
21979 type = SHT_PROGBITS;
21980 }
21981
21982 text_name = segment_name (text_seg);
21983 if (streq (text_name, ".text"))
21984 text_name = "";
21985
21986 if (strncmp (text_name, ".gnu.linkonce.t.",
21987 strlen (".gnu.linkonce.t.")) == 0)
21988 {
21989 prefix = prefix_once;
21990 text_name += strlen (".gnu.linkonce.t.");
21991 }
21992
21993 sec_name = concat (prefix, text_name, (char *) NULL);
21994
21995 flags = SHF_ALLOC;
21996 linkonce = 0;
21997 group_name = 0;
21998
21999 /* Handle COMDAT group. */
22000 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
22001 {
22002 group_name = elf_group_name (text_seg);
22003 if (group_name == NULL)
22004 {
22005 as_bad (_("Group section `%s' has no group signature"),
22006 segment_name (text_seg));
22007 ignore_rest_of_line ();
22008 return;
22009 }
22010 flags |= SHF_GROUP;
22011 linkonce = 1;
22012 }
22013
22014 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
22015
22016 /* Set the section link for index tables. */
22017 if (idx)
22018 elf_linked_to_section (now_seg) = text_seg;
22019 }
22020
22021
22022 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
22023 personality routine data. Returns zero, or the index table value for
22024 an inline entry. */
22025
22026 static valueT
22027 create_unwind_entry (int have_data)
22028 {
22029 int size;
22030 addressT where;
22031 char *ptr;
22032 /* The current word of data. */
22033 valueT data;
22034 /* The number of bytes left in this word. */
22035 int n;
22036
22037 finish_unwind_opcodes ();
22038
22039 /* Remember the current text section. */
22040 unwind.saved_seg = now_seg;
22041 unwind.saved_subseg = now_subseg;
22042
22043 start_unwind_section (now_seg, 0);
22044
22045 if (unwind.personality_routine == NULL)
22046 {
22047 if (unwind.personality_index == -2)
22048 {
22049 if (have_data)
22050 as_bad (_("handlerdata in cantunwind frame"));
22051 return 1; /* EXIDX_CANTUNWIND. */
22052 }
22053
22054 /* Use a default personality routine if none is specified. */
22055 if (unwind.personality_index == -1)
22056 {
22057 if (unwind.opcode_count > 3)
22058 unwind.personality_index = 1;
22059 else
22060 unwind.personality_index = 0;
22061 }
22062
22063 /* Space for the personality routine entry. */
22064 if (unwind.personality_index == 0)
22065 {
22066 if (unwind.opcode_count > 3)
22067 as_bad (_("too many unwind opcodes for personality routine 0"));
22068
22069 if (!have_data)
22070 {
22071 /* All the data is inline in the index table. */
22072 data = 0x80;
22073 n = 3;
22074 while (unwind.opcode_count > 0)
22075 {
22076 unwind.opcode_count--;
22077 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22078 n--;
22079 }
22080
22081 /* Pad with "finish" opcodes. */
22082 while (n--)
22083 data = (data << 8) | 0xb0;
22084
22085 return data;
22086 }
22087 size = 0;
22088 }
22089 else
22090 /* We get two opcodes "free" in the first word. */
22091 size = unwind.opcode_count - 2;
22092 }
22093 else
22094 {
22095 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
22096 if (unwind.personality_index != -1)
22097 {
22098 as_bad (_("attempt to recreate an unwind entry"));
22099 return 1;
22100 }
22101
22102 /* An extra byte is required for the opcode count. */
22103 size = unwind.opcode_count + 1;
22104 }
22105
22106 size = (size + 3) >> 2;
22107 if (size > 0xff)
22108 as_bad (_("too many unwind opcodes"));
22109
22110 frag_align (2, 0, 0);
22111 record_alignment (now_seg, 2);
22112 unwind.table_entry = expr_build_dot ();
22113
22114 /* Allocate the table entry. */
22115 ptr = frag_more ((size << 2) + 4);
22116 /* PR 13449: Zero the table entries in case some of them are not used. */
22117 memset (ptr, 0, (size << 2) + 4);
22118 where = frag_now_fix () - ((size << 2) + 4);
22119
22120 switch (unwind.personality_index)
22121 {
22122 case -1:
22123 /* ??? Should this be a PLT generating relocation? */
22124 /* Custom personality routine. */
22125 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
22126 BFD_RELOC_ARM_PREL31);
22127
22128 where += 4;
22129 ptr += 4;
22130
22131 /* Set the first byte to the number of additional words. */
22132 data = size > 0 ? size - 1 : 0;
22133 n = 3;
22134 break;
22135
22136 /* ABI defined personality routines. */
22137 case 0:
22138 /* Three opcodes bytes are packed into the first word. */
22139 data = 0x80;
22140 n = 3;
22141 break;
22142
22143 case 1:
22144 case 2:
22145 /* The size and first two opcode bytes go in the first word. */
22146 data = ((0x80 + unwind.personality_index) << 8) | size;
22147 n = 2;
22148 break;
22149
22150 default:
22151 /* Should never happen. */
22152 abort ();
22153 }
22154
22155 /* Pack the opcodes into words (MSB first), reversing the list at the same
22156 time. */
22157 while (unwind.opcode_count > 0)
22158 {
22159 if (n == 0)
22160 {
22161 md_number_to_chars (ptr, data, 4);
22162 ptr += 4;
22163 n = 4;
22164 data = 0;
22165 }
22166 unwind.opcode_count--;
22167 n--;
22168 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22169 }
22170
22171 /* Finish off the last word. */
22172 if (n < 4)
22173 {
22174 /* Pad with "finish" opcodes. */
22175 while (n--)
22176 data = (data << 8) | 0xb0;
22177
22178 md_number_to_chars (ptr, data, 4);
22179 }
22180
22181 if (!have_data)
22182 {
22183 /* Add an empty descriptor if there is no user-specified data. */
22184 ptr = frag_more (4);
22185 md_number_to_chars (ptr, 0, 4);
22186 }
22187
22188 return 0;
22189 }
22190
22191
22192 /* Initialize the DWARF-2 unwind information for this procedure. */
22193
22194 void
22195 tc_arm_frame_initial_instructions (void)
22196 {
22197 cfi_add_CFA_def_cfa (REG_SP, 0);
22198 }
22199 #endif /* OBJ_ELF */
22200
22201 /* Convert REGNAME to a DWARF-2 register number. */
22202
22203 int
22204 tc_arm_regname_to_dw2regnum (char *regname)
22205 {
22206 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
22207 if (reg != FAIL)
22208 return reg;
22209
22210 /* PR 16694: Allow VFP registers as well. */
22211 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
22212 if (reg != FAIL)
22213 return 64 + reg;
22214
22215 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
22216 if (reg != FAIL)
22217 return reg + 256;
22218
22219 return -1;
22220 }
22221
22222 #ifdef TE_PE
22223 void
22224 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
22225 {
22226 expressionS exp;
22227
22228 exp.X_op = O_secrel;
22229 exp.X_add_symbol = symbol;
22230 exp.X_add_number = 0;
22231 emit_expr (&exp, size);
22232 }
22233 #endif
22234
22235 /* MD interface: Symbol and relocation handling. */
22236
22237 /* Return the address within the segment that a PC-relative fixup is
22238 relative to. For ARM, PC-relative fixups applied to instructions
22239 are generally relative to the location of the fixup plus 8 bytes.
22240 Thumb branches are offset by 4, and Thumb loads relative to PC
22241 require special handling. */
22242
22243 long
22244 md_pcrel_from_section (fixS * fixP, segT seg)
22245 {
22246 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
22247
22248 /* If this is pc-relative and we are going to emit a relocation
22249 then we just want to put out any pipeline compensation that the linker
22250 will need. Otherwise we want to use the calculated base.
22251 For WinCE we skip the bias for externals as well, since this
22252 is how the MS ARM-CE assembler behaves and we want to be compatible. */
22253 if (fixP->fx_pcrel
22254 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
22255 || (arm_force_relocation (fixP)
22256 #ifdef TE_WINCE
22257 && !S_IS_EXTERNAL (fixP->fx_addsy)
22258 #endif
22259 )))
22260 base = 0;
22261
22262
22263 switch (fixP->fx_r_type)
22264 {
22265 /* PC relative addressing on the Thumb is slightly odd as the
22266 bottom two bits of the PC are forced to zero for the
22267 calculation. This happens *after* application of the
22268 pipeline offset. However, Thumb adrl already adjusts for
22269 this, so we need not do it again. */
22270 case BFD_RELOC_ARM_THUMB_ADD:
22271 return base & ~3;
22272
22273 case BFD_RELOC_ARM_THUMB_OFFSET:
22274 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22275 case BFD_RELOC_ARM_T32_ADD_PC12:
22276 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
22277 return (base + 4) & ~3;
22278
22279 /* Thumb branches are simply offset by +4. */
22280 case BFD_RELOC_THUMB_PCREL_BRANCH7:
22281 case BFD_RELOC_THUMB_PCREL_BRANCH9:
22282 case BFD_RELOC_THUMB_PCREL_BRANCH12:
22283 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22284 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22285 return base + 4;
22286
22287 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22288 if (fixP->fx_addsy
22289 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22290 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22291 && ARM_IS_FUNC (fixP->fx_addsy)
22292 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22293 base = fixP->fx_where + fixP->fx_frag->fr_address;
22294 return base + 4;
22295
22296 /* BLX is like branches above, but forces the low two bits of PC to
22297 zero. */
22298 case BFD_RELOC_THUMB_PCREL_BLX:
22299 if (fixP->fx_addsy
22300 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22301 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22302 && THUMB_IS_FUNC (fixP->fx_addsy)
22303 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22304 base = fixP->fx_where + fixP->fx_frag->fr_address;
22305 return (base + 4) & ~3;
22306
22307 /* ARM mode branches are offset by +8. However, the Windows CE
22308 loader expects the relocation not to take this into account. */
22309 case BFD_RELOC_ARM_PCREL_BLX:
22310 if (fixP->fx_addsy
22311 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22312 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22313 && ARM_IS_FUNC (fixP->fx_addsy)
22314 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22315 base = fixP->fx_where + fixP->fx_frag->fr_address;
22316 return base + 8;
22317
22318 case BFD_RELOC_ARM_PCREL_CALL:
22319 if (fixP->fx_addsy
22320 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22321 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22322 && THUMB_IS_FUNC (fixP->fx_addsy)
22323 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22324 base = fixP->fx_where + fixP->fx_frag->fr_address;
22325 return base + 8;
22326
22327 case BFD_RELOC_ARM_PCREL_BRANCH:
22328 case BFD_RELOC_ARM_PCREL_JUMP:
22329 case BFD_RELOC_ARM_PLT32:
22330 #ifdef TE_WINCE
22331 /* When handling fixups immediately, because we have already
22332 discovered the value of a symbol, or the address of the frag involved
22333 we must account for the offset by +8, as the OS loader will never see the reloc.
22334 see fixup_segment() in write.c
22335 The S_IS_EXTERNAL test handles the case of global symbols.
22336 Those need the calculated base, not just the pipe compensation the linker will need. */
22337 if (fixP->fx_pcrel
22338 && fixP->fx_addsy != NULL
22339 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22340 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
22341 return base + 8;
22342 return base;
22343 #else
22344 return base + 8;
22345 #endif
22346
22347
22348 /* ARM mode loads relative to PC are also offset by +8. Unlike
22349 branches, the Windows CE loader *does* expect the relocation
22350 to take this into account. */
22351 case BFD_RELOC_ARM_OFFSET_IMM:
22352 case BFD_RELOC_ARM_OFFSET_IMM8:
22353 case BFD_RELOC_ARM_HWLITERAL:
22354 case BFD_RELOC_ARM_LITERAL:
22355 case BFD_RELOC_ARM_CP_OFF_IMM:
22356 return base + 8;
22357
22358
22359 /* Other PC-relative relocations are un-offset. */
22360 default:
22361 return base;
22362 }
22363 }
22364
22365 static bfd_boolean flag_warn_syms = TRUE;
22366
22367 bfd_boolean
22368 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
22369 {
22370 /* PR 18347 - Warn if the user attempts to create a symbol with the same
22371 name as an ARM instruction. Whilst strictly speaking it is allowed, it
22372 does mean that the resulting code might be very confusing to the reader.
22373 Also this warning can be triggered if the user omits an operand before
22374 an immediate address, eg:
22375
22376 LDR =foo
22377
22378 GAS treats this as an assignment of the value of the symbol foo to a
22379 symbol LDR, and so (without this code) it will not issue any kind of
22380 warning or error message.
22381
22382 Note - ARM instructions are case-insensitive but the strings in the hash
22383 table are all stored in lower case, so we must first ensure that name is
22384 lower case too. */
22385 if (flag_warn_syms && arm_ops_hsh)
22386 {
22387 char * nbuf = strdup (name);
22388 char * p;
22389
22390 for (p = nbuf; *p; p++)
22391 *p = TOLOWER (*p);
22392 if (hash_find (arm_ops_hsh, nbuf) != NULL)
22393 {
22394 static struct hash_control * already_warned = NULL;
22395
22396 if (already_warned == NULL)
22397 already_warned = hash_new ();
22398 /* Only warn about the symbol once. To keep the code
22399 simple we let hash_insert do the lookup for us. */
22400 if (hash_insert (already_warned, name, NULL) == NULL)
22401 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
22402 }
22403 else
22404 free (nbuf);
22405 }
22406
22407 return FALSE;
22408 }
22409
22410 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
22411 Otherwise we have no need to default values of symbols. */
22412
22413 symbolS *
22414 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
22415 {
22416 #ifdef OBJ_ELF
22417 if (name[0] == '_' && name[1] == 'G'
22418 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
22419 {
22420 if (!GOT_symbol)
22421 {
22422 if (symbol_find (name))
22423 as_bad (_("GOT already in the symbol table"));
22424
22425 GOT_symbol = symbol_new (name, undefined_section,
22426 (valueT) 0, & zero_address_frag);
22427 }
22428
22429 return GOT_symbol;
22430 }
22431 #endif
22432
22433 return NULL;
22434 }
22435
22436 /* Subroutine of md_apply_fix. Check to see if an immediate can be
22437 computed as two separate immediate values, added together. We
22438 already know that this value cannot be computed by just one ARM
22439 instruction. */
22440
22441 static unsigned int
22442 validate_immediate_twopart (unsigned int val,
22443 unsigned int * highpart)
22444 {
22445 unsigned int a;
22446 unsigned int i;
22447
22448 for (i = 0; i < 32; i += 2)
22449 if (((a = rotate_left (val, i)) & 0xff) != 0)
22450 {
22451 if (a & 0xff00)
22452 {
22453 if (a & ~ 0xffff)
22454 continue;
22455 * highpart = (a >> 8) | ((i + 24) << 7);
22456 }
22457 else if (a & 0xff0000)
22458 {
22459 if (a & 0xff000000)
22460 continue;
22461 * highpart = (a >> 16) | ((i + 16) << 7);
22462 }
22463 else
22464 {
22465 gas_assert (a & 0xff000000);
22466 * highpart = (a >> 24) | ((i + 8) << 7);
22467 }
22468
22469 return (a & 0xff) | (i << 7);
22470 }
22471
22472 return FAIL;
22473 }
22474
22475 static int
22476 validate_offset_imm (unsigned int val, int hwse)
22477 {
22478 if ((hwse && val > 255) || val > 4095)
22479 return FAIL;
22480 return val;
22481 }
22482
22483 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22484 negative immediate constant by altering the instruction. A bit of
22485 a hack really.
22486 MOV <-> MVN
22487 AND <-> BIC
22488 ADC <-> SBC
22489 by inverting the second operand, and
22490 ADD <-> SUB
22491 CMP <-> CMN
22492 by negating the second operand. */
22493
22494 static int
22495 negate_data_op (unsigned long * instruction,
22496 unsigned long value)
22497 {
22498 int op, new_inst;
22499 unsigned long negated, inverted;
22500
22501 negated = encode_arm_immediate (-value);
22502 inverted = encode_arm_immediate (~value);
22503
22504 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
22505 switch (op)
22506 {
22507 /* First negates. */
22508 case OPCODE_SUB: /* ADD <-> SUB */
22509 new_inst = OPCODE_ADD;
22510 value = negated;
22511 break;
22512
22513 case OPCODE_ADD:
22514 new_inst = OPCODE_SUB;
22515 value = negated;
22516 break;
22517
22518 case OPCODE_CMP: /* CMP <-> CMN */
22519 new_inst = OPCODE_CMN;
22520 value = negated;
22521 break;
22522
22523 case OPCODE_CMN:
22524 new_inst = OPCODE_CMP;
22525 value = negated;
22526 break;
22527
22528 /* Now Inverted ops. */
22529 case OPCODE_MOV: /* MOV <-> MVN */
22530 new_inst = OPCODE_MVN;
22531 value = inverted;
22532 break;
22533
22534 case OPCODE_MVN:
22535 new_inst = OPCODE_MOV;
22536 value = inverted;
22537 break;
22538
22539 case OPCODE_AND: /* AND <-> BIC */
22540 new_inst = OPCODE_BIC;
22541 value = inverted;
22542 break;
22543
22544 case OPCODE_BIC:
22545 new_inst = OPCODE_AND;
22546 value = inverted;
22547 break;
22548
22549 case OPCODE_ADC: /* ADC <-> SBC */
22550 new_inst = OPCODE_SBC;
22551 value = inverted;
22552 break;
22553
22554 case OPCODE_SBC:
22555 new_inst = OPCODE_ADC;
22556 value = inverted;
22557 break;
22558
22559 /* We cannot do anything. */
22560 default:
22561 return FAIL;
22562 }
22563
22564 if (value == (unsigned) FAIL)
22565 return FAIL;
22566
22567 *instruction &= OPCODE_MASK;
22568 *instruction |= new_inst << DATA_OP_SHIFT;
22569 return value;
22570 }
22571
22572 /* Like negate_data_op, but for Thumb-2. */
22573
22574 static unsigned int
22575 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
22576 {
22577 int op, new_inst;
22578 int rd;
22579 unsigned int negated, inverted;
22580
22581 negated = encode_thumb32_immediate (-value);
22582 inverted = encode_thumb32_immediate (~value);
22583
22584 rd = (*instruction >> 8) & 0xf;
22585 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
22586 switch (op)
22587 {
22588 /* ADD <-> SUB. Includes CMP <-> CMN. */
22589 case T2_OPCODE_SUB:
22590 new_inst = T2_OPCODE_ADD;
22591 value = negated;
22592 break;
22593
22594 case T2_OPCODE_ADD:
22595 new_inst = T2_OPCODE_SUB;
22596 value = negated;
22597 break;
22598
22599 /* ORR <-> ORN. Includes MOV <-> MVN. */
22600 case T2_OPCODE_ORR:
22601 new_inst = T2_OPCODE_ORN;
22602 value = inverted;
22603 break;
22604
22605 case T2_OPCODE_ORN:
22606 new_inst = T2_OPCODE_ORR;
22607 value = inverted;
22608 break;
22609
22610 /* AND <-> BIC. TST has no inverted equivalent. */
22611 case T2_OPCODE_AND:
22612 new_inst = T2_OPCODE_BIC;
22613 if (rd == 15)
22614 value = FAIL;
22615 else
22616 value = inverted;
22617 break;
22618
22619 case T2_OPCODE_BIC:
22620 new_inst = T2_OPCODE_AND;
22621 value = inverted;
22622 break;
22623
22624 /* ADC <-> SBC */
22625 case T2_OPCODE_ADC:
22626 new_inst = T2_OPCODE_SBC;
22627 value = inverted;
22628 break;
22629
22630 case T2_OPCODE_SBC:
22631 new_inst = T2_OPCODE_ADC;
22632 value = inverted;
22633 break;
22634
22635 /* We cannot do anything. */
22636 default:
22637 return FAIL;
22638 }
22639
22640 if (value == (unsigned int)FAIL)
22641 return FAIL;
22642
22643 *instruction &= T2_OPCODE_MASK;
22644 *instruction |= new_inst << T2_DATA_OP_SHIFT;
22645 return value;
22646 }
22647
22648 /* Read a 32-bit thumb instruction from buf. */
22649 static unsigned long
22650 get_thumb32_insn (char * buf)
22651 {
22652 unsigned long insn;
22653 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
22654 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22655
22656 return insn;
22657 }
22658
22659
22660 /* We usually want to set the low bit on the address of thumb function
22661 symbols. In particular .word foo - . should have the low bit set.
22662 Generic code tries to fold the difference of two symbols to
22663 a constant. Prevent this and force a relocation when the first symbols
22664 is a thumb function. */
22665
22666 bfd_boolean
22667 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
22668 {
22669 if (op == O_subtract
22670 && l->X_op == O_symbol
22671 && r->X_op == O_symbol
22672 && THUMB_IS_FUNC (l->X_add_symbol))
22673 {
22674 l->X_op = O_subtract;
22675 l->X_op_symbol = r->X_add_symbol;
22676 l->X_add_number -= r->X_add_number;
22677 return TRUE;
22678 }
22679
22680 /* Process as normal. */
22681 return FALSE;
22682 }
22683
22684 /* Encode Thumb2 unconditional branches and calls. The encoding
22685 for the 2 are identical for the immediate values. */
22686
22687 static void
22688 encode_thumb2_b_bl_offset (char * buf, offsetT value)
22689 {
22690 #define T2I1I2MASK ((1 << 13) | (1 << 11))
22691 offsetT newval;
22692 offsetT newval2;
22693 addressT S, I1, I2, lo, hi;
22694
22695 S = (value >> 24) & 0x01;
22696 I1 = (value >> 23) & 0x01;
22697 I2 = (value >> 22) & 0x01;
22698 hi = (value >> 12) & 0x3ff;
22699 lo = (value >> 1) & 0x7ff;
22700 newval = md_chars_to_number (buf, THUMB_SIZE);
22701 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22702 newval |= (S << 10) | hi;
22703 newval2 &= ~T2I1I2MASK;
22704 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
22705 md_number_to_chars (buf, newval, THUMB_SIZE);
22706 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22707 }
22708
22709 void
22710 md_apply_fix (fixS * fixP,
22711 valueT * valP,
22712 segT seg)
22713 {
22714 offsetT value = * valP;
22715 offsetT newval;
22716 unsigned int newimm;
22717 unsigned long temp;
22718 int sign;
22719 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
22720
22721 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
22722
22723 /* Note whether this will delete the relocation. */
22724
22725 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
22726 fixP->fx_done = 1;
22727
22728 /* On a 64-bit host, silently truncate 'value' to 32 bits for
22729 consistency with the behaviour on 32-bit hosts. Remember value
22730 for emit_reloc. */
22731 value &= 0xffffffff;
22732 value ^= 0x80000000;
22733 value -= 0x80000000;
22734
22735 *valP = value;
22736 fixP->fx_addnumber = value;
22737
22738 /* Same treatment for fixP->fx_offset. */
22739 fixP->fx_offset &= 0xffffffff;
22740 fixP->fx_offset ^= 0x80000000;
22741 fixP->fx_offset -= 0x80000000;
22742
22743 switch (fixP->fx_r_type)
22744 {
22745 case BFD_RELOC_NONE:
22746 /* This will need to go in the object file. */
22747 fixP->fx_done = 0;
22748 break;
22749
22750 case BFD_RELOC_ARM_IMMEDIATE:
22751 /* We claim that this fixup has been processed here,
22752 even if in fact we generate an error because we do
22753 not have a reloc for it, so tc_gen_reloc will reject it. */
22754 fixP->fx_done = 1;
22755
22756 if (fixP->fx_addsy)
22757 {
22758 const char *msg = 0;
22759
22760 if (! S_IS_DEFINED (fixP->fx_addsy))
22761 msg = _("undefined symbol %s used as an immediate value");
22762 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22763 msg = _("symbol %s is in a different section");
22764 else if (S_IS_WEAK (fixP->fx_addsy))
22765 msg = _("symbol %s is weak and may be overridden later");
22766
22767 if (msg)
22768 {
22769 as_bad_where (fixP->fx_file, fixP->fx_line,
22770 msg, S_GET_NAME (fixP->fx_addsy));
22771 break;
22772 }
22773 }
22774
22775 temp = md_chars_to_number (buf, INSN_SIZE);
22776
22777 /* If the offset is negative, we should use encoding A2 for ADR. */
22778 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
22779 newimm = negate_data_op (&temp, value);
22780 else
22781 {
22782 newimm = encode_arm_immediate (value);
22783
22784 /* If the instruction will fail, see if we can fix things up by
22785 changing the opcode. */
22786 if (newimm == (unsigned int) FAIL)
22787 newimm = negate_data_op (&temp, value);
22788 /* MOV accepts both ARM modified immediate (A1 encoding) and
22789 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
22790 When disassembling, MOV is preferred when there is no encoding
22791 overlap. */
22792 if (newimm == (unsigned int) FAIL
22793 && ((temp >> DATA_OP_SHIFT) & 0xf) == OPCODE_MOV
22794 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
22795 && !((temp >> SBIT_SHIFT) & 0x1)
22796 && value >= 0 && value <= 0xffff)
22797 {
22798 /* Clear bits[23:20] to change encoding from A1 to A2. */
22799 temp &= 0xff0fffff;
22800 /* Encoding high 4bits imm. Code below will encode the remaining
22801 low 12bits. */
22802 temp |= (value & 0x0000f000) << 4;
22803 newimm = value & 0x00000fff;
22804 }
22805 }
22806
22807 if (newimm == (unsigned int) FAIL)
22808 {
22809 as_bad_where (fixP->fx_file, fixP->fx_line,
22810 _("invalid constant (%lx) after fixup"),
22811 (unsigned long) value);
22812 break;
22813 }
22814
22815 newimm |= (temp & 0xfffff000);
22816 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22817 break;
22818
22819 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
22820 {
22821 unsigned int highpart = 0;
22822 unsigned int newinsn = 0xe1a00000; /* nop. */
22823
22824 if (fixP->fx_addsy)
22825 {
22826 const char *msg = 0;
22827
22828 if (! S_IS_DEFINED (fixP->fx_addsy))
22829 msg = _("undefined symbol %s used as an immediate value");
22830 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22831 msg = _("symbol %s is in a different section");
22832 else if (S_IS_WEAK (fixP->fx_addsy))
22833 msg = _("symbol %s is weak and may be overridden later");
22834
22835 if (msg)
22836 {
22837 as_bad_where (fixP->fx_file, fixP->fx_line,
22838 msg, S_GET_NAME (fixP->fx_addsy));
22839 break;
22840 }
22841 }
22842
22843 newimm = encode_arm_immediate (value);
22844 temp = md_chars_to_number (buf, INSN_SIZE);
22845
22846 /* If the instruction will fail, see if we can fix things up by
22847 changing the opcode. */
22848 if (newimm == (unsigned int) FAIL
22849 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
22850 {
22851 /* No ? OK - try using two ADD instructions to generate
22852 the value. */
22853 newimm = validate_immediate_twopart (value, & highpart);
22854
22855 /* Yes - then make sure that the second instruction is
22856 also an add. */
22857 if (newimm != (unsigned int) FAIL)
22858 newinsn = temp;
22859 /* Still No ? Try using a negated value. */
22860 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
22861 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
22862 /* Otherwise - give up. */
22863 else
22864 {
22865 as_bad_where (fixP->fx_file, fixP->fx_line,
22866 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
22867 (long) value);
22868 break;
22869 }
22870
22871 /* Replace the first operand in the 2nd instruction (which
22872 is the PC) with the destination register. We have
22873 already added in the PC in the first instruction and we
22874 do not want to do it again. */
22875 newinsn &= ~ 0xf0000;
22876 newinsn |= ((newinsn & 0x0f000) << 4);
22877 }
22878
22879 newimm |= (temp & 0xfffff000);
22880 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22881
22882 highpart |= (newinsn & 0xfffff000);
22883 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
22884 }
22885 break;
22886
22887 case BFD_RELOC_ARM_OFFSET_IMM:
22888 if (!fixP->fx_done && seg->use_rela_p)
22889 value = 0;
22890 /* Fall through. */
22891
22892 case BFD_RELOC_ARM_LITERAL:
22893 sign = value > 0;
22894
22895 if (value < 0)
22896 value = - value;
22897
22898 if (validate_offset_imm (value, 0) == FAIL)
22899 {
22900 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
22901 as_bad_where (fixP->fx_file, fixP->fx_line,
22902 _("invalid literal constant: pool needs to be closer"));
22903 else
22904 as_bad_where (fixP->fx_file, fixP->fx_line,
22905 _("bad immediate value for offset (%ld)"),
22906 (long) value);
22907 break;
22908 }
22909
22910 newval = md_chars_to_number (buf, INSN_SIZE);
22911 if (value == 0)
22912 newval &= 0xfffff000;
22913 else
22914 {
22915 newval &= 0xff7ff000;
22916 newval |= value | (sign ? INDEX_UP : 0);
22917 }
22918 md_number_to_chars (buf, newval, INSN_SIZE);
22919 break;
22920
22921 case BFD_RELOC_ARM_OFFSET_IMM8:
22922 case BFD_RELOC_ARM_HWLITERAL:
22923 sign = value > 0;
22924
22925 if (value < 0)
22926 value = - value;
22927
22928 if (validate_offset_imm (value, 1) == FAIL)
22929 {
22930 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
22931 as_bad_where (fixP->fx_file, fixP->fx_line,
22932 _("invalid literal constant: pool needs to be closer"));
22933 else
22934 as_bad_where (fixP->fx_file, fixP->fx_line,
22935 _("bad immediate value for 8-bit offset (%ld)"),
22936 (long) value);
22937 break;
22938 }
22939
22940 newval = md_chars_to_number (buf, INSN_SIZE);
22941 if (value == 0)
22942 newval &= 0xfffff0f0;
22943 else
22944 {
22945 newval &= 0xff7ff0f0;
22946 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
22947 }
22948 md_number_to_chars (buf, newval, INSN_SIZE);
22949 break;
22950
22951 case BFD_RELOC_ARM_T32_OFFSET_U8:
22952 if (value < 0 || value > 1020 || value % 4 != 0)
22953 as_bad_where (fixP->fx_file, fixP->fx_line,
22954 _("bad immediate value for offset (%ld)"), (long) value);
22955 value /= 4;
22956
22957 newval = md_chars_to_number (buf+2, THUMB_SIZE);
22958 newval |= value;
22959 md_number_to_chars (buf+2, newval, THUMB_SIZE);
22960 break;
22961
22962 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22963 /* This is a complicated relocation used for all varieties of Thumb32
22964 load/store instruction with immediate offset:
22965
22966 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22967 *4, optional writeback(W)
22968 (doubleword load/store)
22969
22970 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22971 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22972 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22973 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22974 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22975
22976 Uppercase letters indicate bits that are already encoded at
22977 this point. Lowercase letters are our problem. For the
22978 second block of instructions, the secondary opcode nybble
22979 (bits 8..11) is present, and bit 23 is zero, even if this is
22980 a PC-relative operation. */
22981 newval = md_chars_to_number (buf, THUMB_SIZE);
22982 newval <<= 16;
22983 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
22984
22985 if ((newval & 0xf0000000) == 0xe0000000)
22986 {
22987 /* Doubleword load/store: 8-bit offset, scaled by 4. */
22988 if (value >= 0)
22989 newval |= (1 << 23);
22990 else
22991 value = -value;
22992 if (value % 4 != 0)
22993 {
22994 as_bad_where (fixP->fx_file, fixP->fx_line,
22995 _("offset not a multiple of 4"));
22996 break;
22997 }
22998 value /= 4;
22999 if (value > 0xff)
23000 {
23001 as_bad_where (fixP->fx_file, fixP->fx_line,
23002 _("offset out of range"));
23003 break;
23004 }
23005 newval &= ~0xff;
23006 }
23007 else if ((newval & 0x000f0000) == 0x000f0000)
23008 {
23009 /* PC-relative, 12-bit offset. */
23010 if (value >= 0)
23011 newval |= (1 << 23);
23012 else
23013 value = -value;
23014 if (value > 0xfff)
23015 {
23016 as_bad_where (fixP->fx_file, fixP->fx_line,
23017 _("offset out of range"));
23018 break;
23019 }
23020 newval &= ~0xfff;
23021 }
23022 else if ((newval & 0x00000100) == 0x00000100)
23023 {
23024 /* Writeback: 8-bit, +/- offset. */
23025 if (value >= 0)
23026 newval |= (1 << 9);
23027 else
23028 value = -value;
23029 if (value > 0xff)
23030 {
23031 as_bad_where (fixP->fx_file, fixP->fx_line,
23032 _("offset out of range"));
23033 break;
23034 }
23035 newval &= ~0xff;
23036 }
23037 else if ((newval & 0x00000f00) == 0x00000e00)
23038 {
23039 /* T-instruction: positive 8-bit offset. */
23040 if (value < 0 || value > 0xff)
23041 {
23042 as_bad_where (fixP->fx_file, fixP->fx_line,
23043 _("offset out of range"));
23044 break;
23045 }
23046 newval &= ~0xff;
23047 newval |= value;
23048 }
23049 else
23050 {
23051 /* Positive 12-bit or negative 8-bit offset. */
23052 int limit;
23053 if (value >= 0)
23054 {
23055 newval |= (1 << 23);
23056 limit = 0xfff;
23057 }
23058 else
23059 {
23060 value = -value;
23061 limit = 0xff;
23062 }
23063 if (value > limit)
23064 {
23065 as_bad_where (fixP->fx_file, fixP->fx_line,
23066 _("offset out of range"));
23067 break;
23068 }
23069 newval &= ~limit;
23070 }
23071
23072 newval |= value;
23073 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
23074 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
23075 break;
23076
23077 case BFD_RELOC_ARM_SHIFT_IMM:
23078 newval = md_chars_to_number (buf, INSN_SIZE);
23079 if (((unsigned long) value) > 32
23080 || (value == 32
23081 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
23082 {
23083 as_bad_where (fixP->fx_file, fixP->fx_line,
23084 _("shift expression is too large"));
23085 break;
23086 }
23087
23088 if (value == 0)
23089 /* Shifts of zero must be done as lsl. */
23090 newval &= ~0x60;
23091 else if (value == 32)
23092 value = 0;
23093 newval &= 0xfffff07f;
23094 newval |= (value & 0x1f) << 7;
23095 md_number_to_chars (buf, newval, INSN_SIZE);
23096 break;
23097
23098 case BFD_RELOC_ARM_T32_IMMEDIATE:
23099 case BFD_RELOC_ARM_T32_ADD_IMM:
23100 case BFD_RELOC_ARM_T32_IMM12:
23101 case BFD_RELOC_ARM_T32_ADD_PC12:
23102 /* We claim that this fixup has been processed here,
23103 even if in fact we generate an error because we do
23104 not have a reloc for it, so tc_gen_reloc will reject it. */
23105 fixP->fx_done = 1;
23106
23107 if (fixP->fx_addsy
23108 && ! S_IS_DEFINED (fixP->fx_addsy))
23109 {
23110 as_bad_where (fixP->fx_file, fixP->fx_line,
23111 _("undefined symbol %s used as an immediate value"),
23112 S_GET_NAME (fixP->fx_addsy));
23113 break;
23114 }
23115
23116 newval = md_chars_to_number (buf, THUMB_SIZE);
23117 newval <<= 16;
23118 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
23119
23120 newimm = FAIL;
23121 if ((fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
23122 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
23123 Thumb2 modified immediate encoding (T2). */
23124 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
23125 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23126 {
23127 newimm = encode_thumb32_immediate (value);
23128 if (newimm == (unsigned int) FAIL)
23129 newimm = thumb32_negate_data_op (&newval, value);
23130 }
23131 if (newimm == (unsigned int) FAIL)
23132 {
23133 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE)
23134 {
23135 /* Turn add/sum into addw/subw. */
23136 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23137 newval = (newval & 0xfeffffff) | 0x02000000;
23138 /* No flat 12-bit imm encoding for addsw/subsw. */
23139 if ((newval & 0x00100000) == 0)
23140 {
23141 /* 12 bit immediate for addw/subw. */
23142 if (value < 0)
23143 {
23144 value = -value;
23145 newval ^= 0x00a00000;
23146 }
23147 if (value > 0xfff)
23148 newimm = (unsigned int) FAIL;
23149 else
23150 newimm = value;
23151 }
23152 }
23153 else
23154 {
23155 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
23156 UINT16 (T3 encoding), MOVW only accepts UINT16. When
23157 disassembling, MOV is preferred when there is no encoding
23158 overlap.
23159 NOTE: MOV is using ORR opcode under Thumb 2 mode. */
23160 if (((newval >> T2_DATA_OP_SHIFT) & 0xf) == T2_OPCODE_ORR
23161 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
23162 && !((newval >> T2_SBIT_SHIFT) & 0x1)
23163 && value >= 0 && value <=0xffff)
23164 {
23165 /* Toggle bit[25] to change encoding from T2 to T3. */
23166 newval ^= 1 << 25;
23167 /* Clear bits[19:16]. */
23168 newval &= 0xfff0ffff;
23169 /* Encoding high 4bits imm. Code below will encode the
23170 remaining low 12bits. */
23171 newval |= (value & 0x0000f000) << 4;
23172 newimm = value & 0x00000fff;
23173 }
23174 }
23175 }
23176
23177 if (newimm == (unsigned int)FAIL)
23178 {
23179 as_bad_where (fixP->fx_file, fixP->fx_line,
23180 _("invalid constant (%lx) after fixup"),
23181 (unsigned long) value);
23182 break;
23183 }
23184
23185 newval |= (newimm & 0x800) << 15;
23186 newval |= (newimm & 0x700) << 4;
23187 newval |= (newimm & 0x0ff);
23188
23189 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
23190 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
23191 break;
23192
23193 case BFD_RELOC_ARM_SMC:
23194 if (((unsigned long) value) > 0xffff)
23195 as_bad_where (fixP->fx_file, fixP->fx_line,
23196 _("invalid smc expression"));
23197 newval = md_chars_to_number (buf, INSN_SIZE);
23198 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23199 md_number_to_chars (buf, newval, INSN_SIZE);
23200 break;
23201
23202 case BFD_RELOC_ARM_HVC:
23203 if (((unsigned long) value) > 0xffff)
23204 as_bad_where (fixP->fx_file, fixP->fx_line,
23205 _("invalid hvc expression"));
23206 newval = md_chars_to_number (buf, INSN_SIZE);
23207 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23208 md_number_to_chars (buf, newval, INSN_SIZE);
23209 break;
23210
23211 case BFD_RELOC_ARM_SWI:
23212 if (fixP->tc_fix_data != 0)
23213 {
23214 if (((unsigned long) value) > 0xff)
23215 as_bad_where (fixP->fx_file, fixP->fx_line,
23216 _("invalid swi expression"));
23217 newval = md_chars_to_number (buf, THUMB_SIZE);
23218 newval |= value;
23219 md_number_to_chars (buf, newval, THUMB_SIZE);
23220 }
23221 else
23222 {
23223 if (((unsigned long) value) > 0x00ffffff)
23224 as_bad_where (fixP->fx_file, fixP->fx_line,
23225 _("invalid swi expression"));
23226 newval = md_chars_to_number (buf, INSN_SIZE);
23227 newval |= value;
23228 md_number_to_chars (buf, newval, INSN_SIZE);
23229 }
23230 break;
23231
23232 case BFD_RELOC_ARM_MULTI:
23233 if (((unsigned long) value) > 0xffff)
23234 as_bad_where (fixP->fx_file, fixP->fx_line,
23235 _("invalid expression in load/store multiple"));
23236 newval = value | md_chars_to_number (buf, INSN_SIZE);
23237 md_number_to_chars (buf, newval, INSN_SIZE);
23238 break;
23239
23240 #ifdef OBJ_ELF
23241 case BFD_RELOC_ARM_PCREL_CALL:
23242
23243 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23244 && fixP->fx_addsy
23245 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23246 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23247 && THUMB_IS_FUNC (fixP->fx_addsy))
23248 /* Flip the bl to blx. This is a simple flip
23249 bit here because we generate PCREL_CALL for
23250 unconditional bls. */
23251 {
23252 newval = md_chars_to_number (buf, INSN_SIZE);
23253 newval = newval | 0x10000000;
23254 md_number_to_chars (buf, newval, INSN_SIZE);
23255 temp = 1;
23256 fixP->fx_done = 1;
23257 }
23258 else
23259 temp = 3;
23260 goto arm_branch_common;
23261
23262 case BFD_RELOC_ARM_PCREL_JUMP:
23263 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23264 && fixP->fx_addsy
23265 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23266 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23267 && THUMB_IS_FUNC (fixP->fx_addsy))
23268 {
23269 /* This would map to a bl<cond>, b<cond>,
23270 b<always> to a Thumb function. We
23271 need to force a relocation for this particular
23272 case. */
23273 newval = md_chars_to_number (buf, INSN_SIZE);
23274 fixP->fx_done = 0;
23275 }
23276 /* Fall through. */
23277
23278 case BFD_RELOC_ARM_PLT32:
23279 #endif
23280 case BFD_RELOC_ARM_PCREL_BRANCH:
23281 temp = 3;
23282 goto arm_branch_common;
23283
23284 case BFD_RELOC_ARM_PCREL_BLX:
23285
23286 temp = 1;
23287 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23288 && fixP->fx_addsy
23289 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23290 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23291 && ARM_IS_FUNC (fixP->fx_addsy))
23292 {
23293 /* Flip the blx to a bl and warn. */
23294 const char *name = S_GET_NAME (fixP->fx_addsy);
23295 newval = 0xeb000000;
23296 as_warn_where (fixP->fx_file, fixP->fx_line,
23297 _("blx to '%s' an ARM ISA state function changed to bl"),
23298 name);
23299 md_number_to_chars (buf, newval, INSN_SIZE);
23300 temp = 3;
23301 fixP->fx_done = 1;
23302 }
23303
23304 #ifdef OBJ_ELF
23305 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23306 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
23307 #endif
23308
23309 arm_branch_common:
23310 /* We are going to store value (shifted right by two) in the
23311 instruction, in a 24 bit, signed field. Bits 26 through 32 either
23312 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
23313 also be be clear. */
23314 if (value & temp)
23315 as_bad_where (fixP->fx_file, fixP->fx_line,
23316 _("misaligned branch destination"));
23317 if ((value & (offsetT)0xfe000000) != (offsetT)0
23318 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
23319 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23320
23321 if (fixP->fx_done || !seg->use_rela_p)
23322 {
23323 newval = md_chars_to_number (buf, INSN_SIZE);
23324 newval |= (value >> 2) & 0x00ffffff;
23325 /* Set the H bit on BLX instructions. */
23326 if (temp == 1)
23327 {
23328 if (value & 2)
23329 newval |= 0x01000000;
23330 else
23331 newval &= ~0x01000000;
23332 }
23333 md_number_to_chars (buf, newval, INSN_SIZE);
23334 }
23335 break;
23336
23337 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
23338 /* CBZ can only branch forward. */
23339
23340 /* Attempts to use CBZ to branch to the next instruction
23341 (which, strictly speaking, are prohibited) will be turned into
23342 no-ops.
23343
23344 FIXME: It may be better to remove the instruction completely and
23345 perform relaxation. */
23346 if (value == -2)
23347 {
23348 newval = md_chars_to_number (buf, THUMB_SIZE);
23349 newval = 0xbf00; /* NOP encoding T1 */
23350 md_number_to_chars (buf, newval, THUMB_SIZE);
23351 }
23352 else
23353 {
23354 if (value & ~0x7e)
23355 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23356
23357 if (fixP->fx_done || !seg->use_rela_p)
23358 {
23359 newval = md_chars_to_number (buf, THUMB_SIZE);
23360 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
23361 md_number_to_chars (buf, newval, THUMB_SIZE);
23362 }
23363 }
23364 break;
23365
23366 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
23367 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
23368 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23369
23370 if (fixP->fx_done || !seg->use_rela_p)
23371 {
23372 newval = md_chars_to_number (buf, THUMB_SIZE);
23373 newval |= (value & 0x1ff) >> 1;
23374 md_number_to_chars (buf, newval, THUMB_SIZE);
23375 }
23376 break;
23377
23378 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
23379 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
23380 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23381
23382 if (fixP->fx_done || !seg->use_rela_p)
23383 {
23384 newval = md_chars_to_number (buf, THUMB_SIZE);
23385 newval |= (value & 0xfff) >> 1;
23386 md_number_to_chars (buf, newval, THUMB_SIZE);
23387 }
23388 break;
23389
23390 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23391 if (fixP->fx_addsy
23392 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23393 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23394 && ARM_IS_FUNC (fixP->fx_addsy)
23395 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23396 {
23397 /* Force a relocation for a branch 20 bits wide. */
23398 fixP->fx_done = 0;
23399 }
23400 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
23401 as_bad_where (fixP->fx_file, fixP->fx_line,
23402 _("conditional branch out of range"));
23403
23404 if (fixP->fx_done || !seg->use_rela_p)
23405 {
23406 offsetT newval2;
23407 addressT S, J1, J2, lo, hi;
23408
23409 S = (value & 0x00100000) >> 20;
23410 J2 = (value & 0x00080000) >> 19;
23411 J1 = (value & 0x00040000) >> 18;
23412 hi = (value & 0x0003f000) >> 12;
23413 lo = (value & 0x00000ffe) >> 1;
23414
23415 newval = md_chars_to_number (buf, THUMB_SIZE);
23416 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23417 newval |= (S << 10) | hi;
23418 newval2 |= (J1 << 13) | (J2 << 11) | lo;
23419 md_number_to_chars (buf, newval, THUMB_SIZE);
23420 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
23421 }
23422 break;
23423
23424 case BFD_RELOC_THUMB_PCREL_BLX:
23425 /* If there is a blx from a thumb state function to
23426 another thumb function flip this to a bl and warn
23427 about it. */
23428
23429 if (fixP->fx_addsy
23430 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23431 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23432 && THUMB_IS_FUNC (fixP->fx_addsy))
23433 {
23434 const char *name = S_GET_NAME (fixP->fx_addsy);
23435 as_warn_where (fixP->fx_file, fixP->fx_line,
23436 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
23437 name);
23438 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23439 newval = newval | 0x1000;
23440 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23441 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23442 fixP->fx_done = 1;
23443 }
23444
23445
23446 goto thumb_bl_common;
23447
23448 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23449 /* A bl from Thumb state ISA to an internal ARM state function
23450 is converted to a blx. */
23451 if (fixP->fx_addsy
23452 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23453 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23454 && ARM_IS_FUNC (fixP->fx_addsy)
23455 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23456 {
23457 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23458 newval = newval & ~0x1000;
23459 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23460 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
23461 fixP->fx_done = 1;
23462 }
23463
23464 thumb_bl_common:
23465
23466 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23467 /* For a BLX instruction, make sure that the relocation is rounded up
23468 to a word boundary. This follows the semantics of the instruction
23469 which specifies that bit 1 of the target address will come from bit
23470 1 of the base address. */
23471 value = (value + 3) & ~ 3;
23472
23473 #ifdef OBJ_ELF
23474 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
23475 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23476 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23477 #endif
23478
23479 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
23480 {
23481 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
23482 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23483 else if ((value & ~0x1ffffff)
23484 && ((value & ~0x1ffffff) != ~0x1ffffff))
23485 as_bad_where (fixP->fx_file, fixP->fx_line,
23486 _("Thumb2 branch out of range"));
23487 }
23488
23489 if (fixP->fx_done || !seg->use_rela_p)
23490 encode_thumb2_b_bl_offset (buf, value);
23491
23492 break;
23493
23494 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23495 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
23496 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23497
23498 if (fixP->fx_done || !seg->use_rela_p)
23499 encode_thumb2_b_bl_offset (buf, value);
23500
23501 break;
23502
23503 case BFD_RELOC_8:
23504 if (fixP->fx_done || !seg->use_rela_p)
23505 *buf = value;
23506 break;
23507
23508 case BFD_RELOC_16:
23509 if (fixP->fx_done || !seg->use_rela_p)
23510 md_number_to_chars (buf, value, 2);
23511 break;
23512
23513 #ifdef OBJ_ELF
23514 case BFD_RELOC_ARM_TLS_CALL:
23515 case BFD_RELOC_ARM_THM_TLS_CALL:
23516 case BFD_RELOC_ARM_TLS_DESCSEQ:
23517 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23518 case BFD_RELOC_ARM_TLS_GOTDESC:
23519 case BFD_RELOC_ARM_TLS_GD32:
23520 case BFD_RELOC_ARM_TLS_LE32:
23521 case BFD_RELOC_ARM_TLS_IE32:
23522 case BFD_RELOC_ARM_TLS_LDM32:
23523 case BFD_RELOC_ARM_TLS_LDO32:
23524 S_SET_THREAD_LOCAL (fixP->fx_addsy);
23525 break;
23526
23527 case BFD_RELOC_ARM_GOT32:
23528 case BFD_RELOC_ARM_GOTOFF:
23529 break;
23530
23531 case BFD_RELOC_ARM_GOT_PREL:
23532 if (fixP->fx_done || !seg->use_rela_p)
23533 md_number_to_chars (buf, value, 4);
23534 break;
23535
23536 case BFD_RELOC_ARM_TARGET2:
23537 /* TARGET2 is not partial-inplace, so we need to write the
23538 addend here for REL targets, because it won't be written out
23539 during reloc processing later. */
23540 if (fixP->fx_done || !seg->use_rela_p)
23541 md_number_to_chars (buf, fixP->fx_offset, 4);
23542 break;
23543 #endif
23544
23545 case BFD_RELOC_RVA:
23546 case BFD_RELOC_32:
23547 case BFD_RELOC_ARM_TARGET1:
23548 case BFD_RELOC_ARM_ROSEGREL32:
23549 case BFD_RELOC_ARM_SBREL32:
23550 case BFD_RELOC_32_PCREL:
23551 #ifdef TE_PE
23552 case BFD_RELOC_32_SECREL:
23553 #endif
23554 if (fixP->fx_done || !seg->use_rela_p)
23555 #ifdef TE_WINCE
23556 /* For WinCE we only do this for pcrel fixups. */
23557 if (fixP->fx_done || fixP->fx_pcrel)
23558 #endif
23559 md_number_to_chars (buf, value, 4);
23560 break;
23561
23562 #ifdef OBJ_ELF
23563 case BFD_RELOC_ARM_PREL31:
23564 if (fixP->fx_done || !seg->use_rela_p)
23565 {
23566 newval = md_chars_to_number (buf, 4) & 0x80000000;
23567 if ((value ^ (value >> 1)) & 0x40000000)
23568 {
23569 as_bad_where (fixP->fx_file, fixP->fx_line,
23570 _("rel31 relocation overflow"));
23571 }
23572 newval |= value & 0x7fffffff;
23573 md_number_to_chars (buf, newval, 4);
23574 }
23575 break;
23576 #endif
23577
23578 case BFD_RELOC_ARM_CP_OFF_IMM:
23579 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
23580 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
23581 newval = md_chars_to_number (buf, INSN_SIZE);
23582 else
23583 newval = get_thumb32_insn (buf);
23584 if ((newval & 0x0f200f00) == 0x0d000900)
23585 {
23586 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
23587 has permitted values that are multiples of 2, in the range 0
23588 to 510. */
23589 if (value < -510 || value > 510 || (value & 1))
23590 as_bad_where (fixP->fx_file, fixP->fx_line,
23591 _("co-processor offset out of range"));
23592 }
23593 else if (value < -1023 || value > 1023 || (value & 3))
23594 as_bad_where (fixP->fx_file, fixP->fx_line,
23595 _("co-processor offset out of range"));
23596 cp_off_common:
23597 sign = value > 0;
23598 if (value < 0)
23599 value = -value;
23600 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23601 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23602 newval = md_chars_to_number (buf, INSN_SIZE);
23603 else
23604 newval = get_thumb32_insn (buf);
23605 if (value == 0)
23606 newval &= 0xffffff00;
23607 else
23608 {
23609 newval &= 0xff7fff00;
23610 if ((newval & 0x0f200f00) == 0x0d000900)
23611 {
23612 /* This is a fp16 vstr/vldr.
23613
23614 It requires the immediate offset in the instruction is shifted
23615 left by 1 to be a half-word offset.
23616
23617 Here, left shift by 1 first, and later right shift by 2
23618 should get the right offset. */
23619 value <<= 1;
23620 }
23621 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
23622 }
23623 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23624 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23625 md_number_to_chars (buf, newval, INSN_SIZE);
23626 else
23627 put_thumb32_insn (buf, newval);
23628 break;
23629
23630 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
23631 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
23632 if (value < -255 || value > 255)
23633 as_bad_where (fixP->fx_file, fixP->fx_line,
23634 _("co-processor offset out of range"));
23635 value *= 4;
23636 goto cp_off_common;
23637
23638 case BFD_RELOC_ARM_THUMB_OFFSET:
23639 newval = md_chars_to_number (buf, THUMB_SIZE);
23640 /* Exactly what ranges, and where the offset is inserted depends
23641 on the type of instruction, we can establish this from the
23642 top 4 bits. */
23643 switch (newval >> 12)
23644 {
23645 case 4: /* PC load. */
23646 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
23647 forced to zero for these loads; md_pcrel_from has already
23648 compensated for this. */
23649 if (value & 3)
23650 as_bad_where (fixP->fx_file, fixP->fx_line,
23651 _("invalid offset, target not word aligned (0x%08lX)"),
23652 (((unsigned long) fixP->fx_frag->fr_address
23653 + (unsigned long) fixP->fx_where) & ~3)
23654 + (unsigned long) value);
23655
23656 if (value & ~0x3fc)
23657 as_bad_where (fixP->fx_file, fixP->fx_line,
23658 _("invalid offset, value too big (0x%08lX)"),
23659 (long) value);
23660
23661 newval |= value >> 2;
23662 break;
23663
23664 case 9: /* SP load/store. */
23665 if (value & ~0x3fc)
23666 as_bad_where (fixP->fx_file, fixP->fx_line,
23667 _("invalid offset, value too big (0x%08lX)"),
23668 (long) value);
23669 newval |= value >> 2;
23670 break;
23671
23672 case 6: /* Word load/store. */
23673 if (value & ~0x7c)
23674 as_bad_where (fixP->fx_file, fixP->fx_line,
23675 _("invalid offset, value too big (0x%08lX)"),
23676 (long) value);
23677 newval |= value << 4; /* 6 - 2. */
23678 break;
23679
23680 case 7: /* Byte load/store. */
23681 if (value & ~0x1f)
23682 as_bad_where (fixP->fx_file, fixP->fx_line,
23683 _("invalid offset, value too big (0x%08lX)"),
23684 (long) value);
23685 newval |= value << 6;
23686 break;
23687
23688 case 8: /* Halfword load/store. */
23689 if (value & ~0x3e)
23690 as_bad_where (fixP->fx_file, fixP->fx_line,
23691 _("invalid offset, value too big (0x%08lX)"),
23692 (long) value);
23693 newval |= value << 5; /* 6 - 1. */
23694 break;
23695
23696 default:
23697 as_bad_where (fixP->fx_file, fixP->fx_line,
23698 "Unable to process relocation for thumb opcode: %lx",
23699 (unsigned long) newval);
23700 break;
23701 }
23702 md_number_to_chars (buf, newval, THUMB_SIZE);
23703 break;
23704
23705 case BFD_RELOC_ARM_THUMB_ADD:
23706 /* This is a complicated relocation, since we use it for all of
23707 the following immediate relocations:
23708
23709 3bit ADD/SUB
23710 8bit ADD/SUB
23711 9bit ADD/SUB SP word-aligned
23712 10bit ADD PC/SP word-aligned
23713
23714 The type of instruction being processed is encoded in the
23715 instruction field:
23716
23717 0x8000 SUB
23718 0x00F0 Rd
23719 0x000F Rs
23720 */
23721 newval = md_chars_to_number (buf, THUMB_SIZE);
23722 {
23723 int rd = (newval >> 4) & 0xf;
23724 int rs = newval & 0xf;
23725 int subtract = !!(newval & 0x8000);
23726
23727 /* Check for HI regs, only very restricted cases allowed:
23728 Adjusting SP, and using PC or SP to get an address. */
23729 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
23730 || (rs > 7 && rs != REG_SP && rs != REG_PC))
23731 as_bad_where (fixP->fx_file, fixP->fx_line,
23732 _("invalid Hi register with immediate"));
23733
23734 /* If value is negative, choose the opposite instruction. */
23735 if (value < 0)
23736 {
23737 value = -value;
23738 subtract = !subtract;
23739 if (value < 0)
23740 as_bad_where (fixP->fx_file, fixP->fx_line,
23741 _("immediate value out of range"));
23742 }
23743
23744 if (rd == REG_SP)
23745 {
23746 if (value & ~0x1fc)
23747 as_bad_where (fixP->fx_file, fixP->fx_line,
23748 _("invalid immediate for stack address calculation"));
23749 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
23750 newval |= value >> 2;
23751 }
23752 else if (rs == REG_PC || rs == REG_SP)
23753 {
23754 /* PR gas/18541. If the addition is for a defined symbol
23755 within range of an ADR instruction then accept it. */
23756 if (subtract
23757 && value == 4
23758 && fixP->fx_addsy != NULL)
23759 {
23760 subtract = 0;
23761
23762 if (! S_IS_DEFINED (fixP->fx_addsy)
23763 || S_GET_SEGMENT (fixP->fx_addsy) != seg
23764 || S_IS_WEAK (fixP->fx_addsy))
23765 {
23766 as_bad_where (fixP->fx_file, fixP->fx_line,
23767 _("address calculation needs a strongly defined nearby symbol"));
23768 }
23769 else
23770 {
23771 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
23772
23773 /* Round up to the next 4-byte boundary. */
23774 if (v & 3)
23775 v = (v + 3) & ~ 3;
23776 else
23777 v += 4;
23778 v = S_GET_VALUE (fixP->fx_addsy) - v;
23779
23780 if (v & ~0x3fc)
23781 {
23782 as_bad_where (fixP->fx_file, fixP->fx_line,
23783 _("symbol too far away"));
23784 }
23785 else
23786 {
23787 fixP->fx_done = 1;
23788 value = v;
23789 }
23790 }
23791 }
23792
23793 if (subtract || value & ~0x3fc)
23794 as_bad_where (fixP->fx_file, fixP->fx_line,
23795 _("invalid immediate for address calculation (value = 0x%08lX)"),
23796 (unsigned long) (subtract ? - value : value));
23797 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
23798 newval |= rd << 8;
23799 newval |= value >> 2;
23800 }
23801 else if (rs == rd)
23802 {
23803 if (value & ~0xff)
23804 as_bad_where (fixP->fx_file, fixP->fx_line,
23805 _("immediate value out of range"));
23806 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
23807 newval |= (rd << 8) | value;
23808 }
23809 else
23810 {
23811 if (value & ~0x7)
23812 as_bad_where (fixP->fx_file, fixP->fx_line,
23813 _("immediate value out of range"));
23814 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
23815 newval |= rd | (rs << 3) | (value << 6);
23816 }
23817 }
23818 md_number_to_chars (buf, newval, THUMB_SIZE);
23819 break;
23820
23821 case BFD_RELOC_ARM_THUMB_IMM:
23822 newval = md_chars_to_number (buf, THUMB_SIZE);
23823 if (value < 0 || value > 255)
23824 as_bad_where (fixP->fx_file, fixP->fx_line,
23825 _("invalid immediate: %ld is out of range"),
23826 (long) value);
23827 newval |= value;
23828 md_number_to_chars (buf, newval, THUMB_SIZE);
23829 break;
23830
23831 case BFD_RELOC_ARM_THUMB_SHIFT:
23832 /* 5bit shift value (0..32). LSL cannot take 32. */
23833 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
23834 temp = newval & 0xf800;
23835 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
23836 as_bad_where (fixP->fx_file, fixP->fx_line,
23837 _("invalid shift value: %ld"), (long) value);
23838 /* Shifts of zero must be encoded as LSL. */
23839 if (value == 0)
23840 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
23841 /* Shifts of 32 are encoded as zero. */
23842 else if (value == 32)
23843 value = 0;
23844 newval |= value << 6;
23845 md_number_to_chars (buf, newval, THUMB_SIZE);
23846 break;
23847
23848 case BFD_RELOC_VTABLE_INHERIT:
23849 case BFD_RELOC_VTABLE_ENTRY:
23850 fixP->fx_done = 0;
23851 return;
23852
23853 case BFD_RELOC_ARM_MOVW:
23854 case BFD_RELOC_ARM_MOVT:
23855 case BFD_RELOC_ARM_THUMB_MOVW:
23856 case BFD_RELOC_ARM_THUMB_MOVT:
23857 if (fixP->fx_done || !seg->use_rela_p)
23858 {
23859 /* REL format relocations are limited to a 16-bit addend. */
23860 if (!fixP->fx_done)
23861 {
23862 if (value < -0x8000 || value > 0x7fff)
23863 as_bad_where (fixP->fx_file, fixP->fx_line,
23864 _("offset out of range"));
23865 }
23866 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
23867 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
23868 {
23869 value >>= 16;
23870 }
23871
23872 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
23873 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
23874 {
23875 newval = get_thumb32_insn (buf);
23876 newval &= 0xfbf08f00;
23877 newval |= (value & 0xf000) << 4;
23878 newval |= (value & 0x0800) << 15;
23879 newval |= (value & 0x0700) << 4;
23880 newval |= (value & 0x00ff);
23881 put_thumb32_insn (buf, newval);
23882 }
23883 else
23884 {
23885 newval = md_chars_to_number (buf, 4);
23886 newval &= 0xfff0f000;
23887 newval |= value & 0x0fff;
23888 newval |= (value & 0xf000) << 4;
23889 md_number_to_chars (buf, newval, 4);
23890 }
23891 }
23892 return;
23893
23894 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
23895 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
23896 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
23897 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
23898 gas_assert (!fixP->fx_done);
23899 {
23900 bfd_vma insn;
23901 bfd_boolean is_mov;
23902 bfd_vma encoded_addend = value;
23903
23904 /* Check that addend can be encoded in instruction. */
23905 if (!seg->use_rela_p && (value < 0 || value > 255))
23906 as_bad_where (fixP->fx_file, fixP->fx_line,
23907 _("the offset 0x%08lX is not representable"),
23908 (unsigned long) encoded_addend);
23909
23910 /* Extract the instruction. */
23911 insn = md_chars_to_number (buf, THUMB_SIZE);
23912 is_mov = (insn & 0xf800) == 0x2000;
23913
23914 /* Encode insn. */
23915 if (is_mov)
23916 {
23917 if (!seg->use_rela_p)
23918 insn |= encoded_addend;
23919 }
23920 else
23921 {
23922 int rd, rs;
23923
23924 /* Extract the instruction. */
23925 /* Encoding is the following
23926 0x8000 SUB
23927 0x00F0 Rd
23928 0x000F Rs
23929 */
23930 /* The following conditions must be true :
23931 - ADD
23932 - Rd == Rs
23933 - Rd <= 7
23934 */
23935 rd = (insn >> 4) & 0xf;
23936 rs = insn & 0xf;
23937 if ((insn & 0x8000) || (rd != rs) || rd > 7)
23938 as_bad_where (fixP->fx_file, fixP->fx_line,
23939 _("Unable to process relocation for thumb opcode: %lx"),
23940 (unsigned long) insn);
23941
23942 /* Encode as ADD immediate8 thumb 1 code. */
23943 insn = 0x3000 | (rd << 8);
23944
23945 /* Place the encoded addend into the first 8 bits of the
23946 instruction. */
23947 if (!seg->use_rela_p)
23948 insn |= encoded_addend;
23949 }
23950
23951 /* Update the instruction. */
23952 md_number_to_chars (buf, insn, THUMB_SIZE);
23953 }
23954 break;
23955
23956 case BFD_RELOC_ARM_ALU_PC_G0_NC:
23957 case BFD_RELOC_ARM_ALU_PC_G0:
23958 case BFD_RELOC_ARM_ALU_PC_G1_NC:
23959 case BFD_RELOC_ARM_ALU_PC_G1:
23960 case BFD_RELOC_ARM_ALU_PC_G2:
23961 case BFD_RELOC_ARM_ALU_SB_G0_NC:
23962 case BFD_RELOC_ARM_ALU_SB_G0:
23963 case BFD_RELOC_ARM_ALU_SB_G1_NC:
23964 case BFD_RELOC_ARM_ALU_SB_G1:
23965 case BFD_RELOC_ARM_ALU_SB_G2:
23966 gas_assert (!fixP->fx_done);
23967 if (!seg->use_rela_p)
23968 {
23969 bfd_vma insn;
23970 bfd_vma encoded_addend;
23971 bfd_vma addend_abs = abs (value);
23972
23973 /* Check that the absolute value of the addend can be
23974 expressed as an 8-bit constant plus a rotation. */
23975 encoded_addend = encode_arm_immediate (addend_abs);
23976 if (encoded_addend == (unsigned int) FAIL)
23977 as_bad_where (fixP->fx_file, fixP->fx_line,
23978 _("the offset 0x%08lX is not representable"),
23979 (unsigned long) addend_abs);
23980
23981 /* Extract the instruction. */
23982 insn = md_chars_to_number (buf, INSN_SIZE);
23983
23984 /* If the addend is positive, use an ADD instruction.
23985 Otherwise use a SUB. Take care not to destroy the S bit. */
23986 insn &= 0xff1fffff;
23987 if (value < 0)
23988 insn |= 1 << 22;
23989 else
23990 insn |= 1 << 23;
23991
23992 /* Place the encoded addend into the first 12 bits of the
23993 instruction. */
23994 insn &= 0xfffff000;
23995 insn |= encoded_addend;
23996
23997 /* Update the instruction. */
23998 md_number_to_chars (buf, insn, INSN_SIZE);
23999 }
24000 break;
24001
24002 case BFD_RELOC_ARM_LDR_PC_G0:
24003 case BFD_RELOC_ARM_LDR_PC_G1:
24004 case BFD_RELOC_ARM_LDR_PC_G2:
24005 case BFD_RELOC_ARM_LDR_SB_G0:
24006 case BFD_RELOC_ARM_LDR_SB_G1:
24007 case BFD_RELOC_ARM_LDR_SB_G2:
24008 gas_assert (!fixP->fx_done);
24009 if (!seg->use_rela_p)
24010 {
24011 bfd_vma insn;
24012 bfd_vma addend_abs = abs (value);
24013
24014 /* Check that the absolute value of the addend can be
24015 encoded in 12 bits. */
24016 if (addend_abs >= 0x1000)
24017 as_bad_where (fixP->fx_file, fixP->fx_line,
24018 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
24019 (unsigned long) addend_abs);
24020
24021 /* Extract the instruction. */
24022 insn = md_chars_to_number (buf, INSN_SIZE);
24023
24024 /* If the addend is negative, clear bit 23 of the instruction.
24025 Otherwise set it. */
24026 if (value < 0)
24027 insn &= ~(1 << 23);
24028 else
24029 insn |= 1 << 23;
24030
24031 /* Place the absolute value of the addend into the first 12 bits
24032 of the instruction. */
24033 insn &= 0xfffff000;
24034 insn |= addend_abs;
24035
24036 /* Update the instruction. */
24037 md_number_to_chars (buf, insn, INSN_SIZE);
24038 }
24039 break;
24040
24041 case BFD_RELOC_ARM_LDRS_PC_G0:
24042 case BFD_RELOC_ARM_LDRS_PC_G1:
24043 case BFD_RELOC_ARM_LDRS_PC_G2:
24044 case BFD_RELOC_ARM_LDRS_SB_G0:
24045 case BFD_RELOC_ARM_LDRS_SB_G1:
24046 case BFD_RELOC_ARM_LDRS_SB_G2:
24047 gas_assert (!fixP->fx_done);
24048 if (!seg->use_rela_p)
24049 {
24050 bfd_vma insn;
24051 bfd_vma addend_abs = abs (value);
24052
24053 /* Check that the absolute value of the addend can be
24054 encoded in 8 bits. */
24055 if (addend_abs >= 0x100)
24056 as_bad_where (fixP->fx_file, fixP->fx_line,
24057 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
24058 (unsigned long) addend_abs);
24059
24060 /* Extract the instruction. */
24061 insn = md_chars_to_number (buf, INSN_SIZE);
24062
24063 /* If the addend is negative, clear bit 23 of the instruction.
24064 Otherwise set it. */
24065 if (value < 0)
24066 insn &= ~(1 << 23);
24067 else
24068 insn |= 1 << 23;
24069
24070 /* Place the first four bits of the absolute value of the addend
24071 into the first 4 bits of the instruction, and the remaining
24072 four into bits 8 .. 11. */
24073 insn &= 0xfffff0f0;
24074 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
24075
24076 /* Update the instruction. */
24077 md_number_to_chars (buf, insn, INSN_SIZE);
24078 }
24079 break;
24080
24081 case BFD_RELOC_ARM_LDC_PC_G0:
24082 case BFD_RELOC_ARM_LDC_PC_G1:
24083 case BFD_RELOC_ARM_LDC_PC_G2:
24084 case BFD_RELOC_ARM_LDC_SB_G0:
24085 case BFD_RELOC_ARM_LDC_SB_G1:
24086 case BFD_RELOC_ARM_LDC_SB_G2:
24087 gas_assert (!fixP->fx_done);
24088 if (!seg->use_rela_p)
24089 {
24090 bfd_vma insn;
24091 bfd_vma addend_abs = abs (value);
24092
24093 /* Check that the absolute value of the addend is a multiple of
24094 four and, when divided by four, fits in 8 bits. */
24095 if (addend_abs & 0x3)
24096 as_bad_where (fixP->fx_file, fixP->fx_line,
24097 _("bad offset 0x%08lX (must be word-aligned)"),
24098 (unsigned long) addend_abs);
24099
24100 if ((addend_abs >> 2) > 0xff)
24101 as_bad_where (fixP->fx_file, fixP->fx_line,
24102 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
24103 (unsigned long) addend_abs);
24104
24105 /* Extract the instruction. */
24106 insn = md_chars_to_number (buf, INSN_SIZE);
24107
24108 /* If the addend is negative, clear bit 23 of the instruction.
24109 Otherwise set it. */
24110 if (value < 0)
24111 insn &= ~(1 << 23);
24112 else
24113 insn |= 1 << 23;
24114
24115 /* Place the addend (divided by four) into the first eight
24116 bits of the instruction. */
24117 insn &= 0xfffffff0;
24118 insn |= addend_abs >> 2;
24119
24120 /* Update the instruction. */
24121 md_number_to_chars (buf, insn, INSN_SIZE);
24122 }
24123 break;
24124
24125 case BFD_RELOC_ARM_V4BX:
24126 /* This will need to go in the object file. */
24127 fixP->fx_done = 0;
24128 break;
24129
24130 case BFD_RELOC_UNUSED:
24131 default:
24132 as_bad_where (fixP->fx_file, fixP->fx_line,
24133 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
24134 }
24135 }
24136
24137 /* Translate internal representation of relocation info to BFD target
24138 format. */
24139
24140 arelent *
24141 tc_gen_reloc (asection *section, fixS *fixp)
24142 {
24143 arelent * reloc;
24144 bfd_reloc_code_real_type code;
24145
24146 reloc = XNEW (arelent);
24147
24148 reloc->sym_ptr_ptr = XNEW (asymbol *);
24149 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
24150 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
24151
24152 if (fixp->fx_pcrel)
24153 {
24154 if (section->use_rela_p)
24155 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
24156 else
24157 fixp->fx_offset = reloc->address;
24158 }
24159 reloc->addend = fixp->fx_offset;
24160
24161 switch (fixp->fx_r_type)
24162 {
24163 case BFD_RELOC_8:
24164 if (fixp->fx_pcrel)
24165 {
24166 code = BFD_RELOC_8_PCREL;
24167 break;
24168 }
24169 /* Fall through. */
24170
24171 case BFD_RELOC_16:
24172 if (fixp->fx_pcrel)
24173 {
24174 code = BFD_RELOC_16_PCREL;
24175 break;
24176 }
24177 /* Fall through. */
24178
24179 case BFD_RELOC_32:
24180 if (fixp->fx_pcrel)
24181 {
24182 code = BFD_RELOC_32_PCREL;
24183 break;
24184 }
24185 /* Fall through. */
24186
24187 case BFD_RELOC_ARM_MOVW:
24188 if (fixp->fx_pcrel)
24189 {
24190 code = BFD_RELOC_ARM_MOVW_PCREL;
24191 break;
24192 }
24193 /* Fall through. */
24194
24195 case BFD_RELOC_ARM_MOVT:
24196 if (fixp->fx_pcrel)
24197 {
24198 code = BFD_RELOC_ARM_MOVT_PCREL;
24199 break;
24200 }
24201 /* Fall through. */
24202
24203 case BFD_RELOC_ARM_THUMB_MOVW:
24204 if (fixp->fx_pcrel)
24205 {
24206 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
24207 break;
24208 }
24209 /* Fall through. */
24210
24211 case BFD_RELOC_ARM_THUMB_MOVT:
24212 if (fixp->fx_pcrel)
24213 {
24214 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
24215 break;
24216 }
24217 /* Fall through. */
24218
24219 case BFD_RELOC_NONE:
24220 case BFD_RELOC_ARM_PCREL_BRANCH:
24221 case BFD_RELOC_ARM_PCREL_BLX:
24222 case BFD_RELOC_RVA:
24223 case BFD_RELOC_THUMB_PCREL_BRANCH7:
24224 case BFD_RELOC_THUMB_PCREL_BRANCH9:
24225 case BFD_RELOC_THUMB_PCREL_BRANCH12:
24226 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24227 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24228 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24229 case BFD_RELOC_VTABLE_ENTRY:
24230 case BFD_RELOC_VTABLE_INHERIT:
24231 #ifdef TE_PE
24232 case BFD_RELOC_32_SECREL:
24233 #endif
24234 code = fixp->fx_r_type;
24235 break;
24236
24237 case BFD_RELOC_THUMB_PCREL_BLX:
24238 #ifdef OBJ_ELF
24239 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
24240 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
24241 else
24242 #endif
24243 code = BFD_RELOC_THUMB_PCREL_BLX;
24244 break;
24245
24246 case BFD_RELOC_ARM_LITERAL:
24247 case BFD_RELOC_ARM_HWLITERAL:
24248 /* If this is called then the a literal has
24249 been referenced across a section boundary. */
24250 as_bad_where (fixp->fx_file, fixp->fx_line,
24251 _("literal referenced across section boundary"));
24252 return NULL;
24253
24254 #ifdef OBJ_ELF
24255 case BFD_RELOC_ARM_TLS_CALL:
24256 case BFD_RELOC_ARM_THM_TLS_CALL:
24257 case BFD_RELOC_ARM_TLS_DESCSEQ:
24258 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
24259 case BFD_RELOC_ARM_GOT32:
24260 case BFD_RELOC_ARM_GOTOFF:
24261 case BFD_RELOC_ARM_GOT_PREL:
24262 case BFD_RELOC_ARM_PLT32:
24263 case BFD_RELOC_ARM_TARGET1:
24264 case BFD_RELOC_ARM_ROSEGREL32:
24265 case BFD_RELOC_ARM_SBREL32:
24266 case BFD_RELOC_ARM_PREL31:
24267 case BFD_RELOC_ARM_TARGET2:
24268 case BFD_RELOC_ARM_TLS_LDO32:
24269 case BFD_RELOC_ARM_PCREL_CALL:
24270 case BFD_RELOC_ARM_PCREL_JUMP:
24271 case BFD_RELOC_ARM_ALU_PC_G0_NC:
24272 case BFD_RELOC_ARM_ALU_PC_G0:
24273 case BFD_RELOC_ARM_ALU_PC_G1_NC:
24274 case BFD_RELOC_ARM_ALU_PC_G1:
24275 case BFD_RELOC_ARM_ALU_PC_G2:
24276 case BFD_RELOC_ARM_LDR_PC_G0:
24277 case BFD_RELOC_ARM_LDR_PC_G1:
24278 case BFD_RELOC_ARM_LDR_PC_G2:
24279 case BFD_RELOC_ARM_LDRS_PC_G0:
24280 case BFD_RELOC_ARM_LDRS_PC_G1:
24281 case BFD_RELOC_ARM_LDRS_PC_G2:
24282 case BFD_RELOC_ARM_LDC_PC_G0:
24283 case BFD_RELOC_ARM_LDC_PC_G1:
24284 case BFD_RELOC_ARM_LDC_PC_G2:
24285 case BFD_RELOC_ARM_ALU_SB_G0_NC:
24286 case BFD_RELOC_ARM_ALU_SB_G0:
24287 case BFD_RELOC_ARM_ALU_SB_G1_NC:
24288 case BFD_RELOC_ARM_ALU_SB_G1:
24289 case BFD_RELOC_ARM_ALU_SB_G2:
24290 case BFD_RELOC_ARM_LDR_SB_G0:
24291 case BFD_RELOC_ARM_LDR_SB_G1:
24292 case BFD_RELOC_ARM_LDR_SB_G2:
24293 case BFD_RELOC_ARM_LDRS_SB_G0:
24294 case BFD_RELOC_ARM_LDRS_SB_G1:
24295 case BFD_RELOC_ARM_LDRS_SB_G2:
24296 case BFD_RELOC_ARM_LDC_SB_G0:
24297 case BFD_RELOC_ARM_LDC_SB_G1:
24298 case BFD_RELOC_ARM_LDC_SB_G2:
24299 case BFD_RELOC_ARM_V4BX:
24300 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
24301 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
24302 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
24303 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
24304 code = fixp->fx_r_type;
24305 break;
24306
24307 case BFD_RELOC_ARM_TLS_GOTDESC:
24308 case BFD_RELOC_ARM_TLS_GD32:
24309 case BFD_RELOC_ARM_TLS_LE32:
24310 case BFD_RELOC_ARM_TLS_IE32:
24311 case BFD_RELOC_ARM_TLS_LDM32:
24312 /* BFD will include the symbol's address in the addend.
24313 But we don't want that, so subtract it out again here. */
24314 if (!S_IS_COMMON (fixp->fx_addsy))
24315 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
24316 code = fixp->fx_r_type;
24317 break;
24318 #endif
24319
24320 case BFD_RELOC_ARM_IMMEDIATE:
24321 as_bad_where (fixp->fx_file, fixp->fx_line,
24322 _("internal relocation (type: IMMEDIATE) not fixed up"));
24323 return NULL;
24324
24325 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
24326 as_bad_where (fixp->fx_file, fixp->fx_line,
24327 _("ADRL used for a symbol not defined in the same file"));
24328 return NULL;
24329
24330 case BFD_RELOC_ARM_OFFSET_IMM:
24331 if (section->use_rela_p)
24332 {
24333 code = fixp->fx_r_type;
24334 break;
24335 }
24336
24337 if (fixp->fx_addsy != NULL
24338 && !S_IS_DEFINED (fixp->fx_addsy)
24339 && S_IS_LOCAL (fixp->fx_addsy))
24340 {
24341 as_bad_where (fixp->fx_file, fixp->fx_line,
24342 _("undefined local label `%s'"),
24343 S_GET_NAME (fixp->fx_addsy));
24344 return NULL;
24345 }
24346
24347 as_bad_where (fixp->fx_file, fixp->fx_line,
24348 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
24349 return NULL;
24350
24351 default:
24352 {
24353 const char * type;
24354
24355 switch (fixp->fx_r_type)
24356 {
24357 case BFD_RELOC_NONE: type = "NONE"; break;
24358 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
24359 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
24360 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
24361 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
24362 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
24363 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
24364 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
24365 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
24366 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
24367 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
24368 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
24369 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
24370 default: type = _("<unknown>"); break;
24371 }
24372 as_bad_where (fixp->fx_file, fixp->fx_line,
24373 _("cannot represent %s relocation in this object file format"),
24374 type);
24375 return NULL;
24376 }
24377 }
24378
24379 #ifdef OBJ_ELF
24380 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
24381 && GOT_symbol
24382 && fixp->fx_addsy == GOT_symbol)
24383 {
24384 code = BFD_RELOC_ARM_GOTPC;
24385 reloc->addend = fixp->fx_offset = reloc->address;
24386 }
24387 #endif
24388
24389 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
24390
24391 if (reloc->howto == NULL)
24392 {
24393 as_bad_where (fixp->fx_file, fixp->fx_line,
24394 _("cannot represent %s relocation in this object file format"),
24395 bfd_get_reloc_code_name (code));
24396 return NULL;
24397 }
24398
24399 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
24400 vtable entry to be used in the relocation's section offset. */
24401 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24402 reloc->address = fixp->fx_offset;
24403
24404 return reloc;
24405 }
24406
24407 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
24408
24409 void
24410 cons_fix_new_arm (fragS * frag,
24411 int where,
24412 int size,
24413 expressionS * exp,
24414 bfd_reloc_code_real_type reloc)
24415 {
24416 int pcrel = 0;
24417
24418 /* Pick a reloc.
24419 FIXME: @@ Should look at CPU word size. */
24420 switch (size)
24421 {
24422 case 1:
24423 reloc = BFD_RELOC_8;
24424 break;
24425 case 2:
24426 reloc = BFD_RELOC_16;
24427 break;
24428 case 4:
24429 default:
24430 reloc = BFD_RELOC_32;
24431 break;
24432 case 8:
24433 reloc = BFD_RELOC_64;
24434 break;
24435 }
24436
24437 #ifdef TE_PE
24438 if (exp->X_op == O_secrel)
24439 {
24440 exp->X_op = O_symbol;
24441 reloc = BFD_RELOC_32_SECREL;
24442 }
24443 #endif
24444
24445 fix_new_exp (frag, where, size, exp, pcrel, reloc);
24446 }
24447
24448 #if defined (OBJ_COFF)
24449 void
24450 arm_validate_fix (fixS * fixP)
24451 {
24452 /* If the destination of the branch is a defined symbol which does not have
24453 the THUMB_FUNC attribute, then we must be calling a function which has
24454 the (interfacearm) attribute. We look for the Thumb entry point to that
24455 function and change the branch to refer to that function instead. */
24456 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
24457 && fixP->fx_addsy != NULL
24458 && S_IS_DEFINED (fixP->fx_addsy)
24459 && ! THUMB_IS_FUNC (fixP->fx_addsy))
24460 {
24461 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
24462 }
24463 }
24464 #endif
24465
24466
24467 int
24468 arm_force_relocation (struct fix * fixp)
24469 {
24470 #if defined (OBJ_COFF) && defined (TE_PE)
24471 if (fixp->fx_r_type == BFD_RELOC_RVA)
24472 return 1;
24473 #endif
24474
24475 /* In case we have a call or a branch to a function in ARM ISA mode from
24476 a thumb function or vice-versa force the relocation. These relocations
24477 are cleared off for some cores that might have blx and simple transformations
24478 are possible. */
24479
24480 #ifdef OBJ_ELF
24481 switch (fixp->fx_r_type)
24482 {
24483 case BFD_RELOC_ARM_PCREL_JUMP:
24484 case BFD_RELOC_ARM_PCREL_CALL:
24485 case BFD_RELOC_THUMB_PCREL_BLX:
24486 if (THUMB_IS_FUNC (fixp->fx_addsy))
24487 return 1;
24488 break;
24489
24490 case BFD_RELOC_ARM_PCREL_BLX:
24491 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24492 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24493 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24494 if (ARM_IS_FUNC (fixp->fx_addsy))
24495 return 1;
24496 break;
24497
24498 default:
24499 break;
24500 }
24501 #endif
24502
24503 /* Resolve these relocations even if the symbol is extern or weak.
24504 Technically this is probably wrong due to symbol preemption.
24505 In practice these relocations do not have enough range to be useful
24506 at dynamic link time, and some code (e.g. in the Linux kernel)
24507 expects these references to be resolved. */
24508 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
24509 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
24510 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
24511 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
24512 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24513 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
24514 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
24515 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
24516 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
24517 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
24518 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
24519 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
24520 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
24521 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
24522 return 0;
24523
24524 /* Always leave these relocations for the linker. */
24525 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24526 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24527 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24528 return 1;
24529
24530 /* Always generate relocations against function symbols. */
24531 if (fixp->fx_r_type == BFD_RELOC_32
24532 && fixp->fx_addsy
24533 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
24534 return 1;
24535
24536 return generic_force_reloc (fixp);
24537 }
24538
24539 #if defined (OBJ_ELF) || defined (OBJ_COFF)
24540 /* Relocations against function names must be left unadjusted,
24541 so that the linker can use this information to generate interworking
24542 stubs. The MIPS version of this function
24543 also prevents relocations that are mips-16 specific, but I do not
24544 know why it does this.
24545
24546 FIXME:
24547 There is one other problem that ought to be addressed here, but
24548 which currently is not: Taking the address of a label (rather
24549 than a function) and then later jumping to that address. Such
24550 addresses also ought to have their bottom bit set (assuming that
24551 they reside in Thumb code), but at the moment they will not. */
24552
24553 bfd_boolean
24554 arm_fix_adjustable (fixS * fixP)
24555 {
24556 if (fixP->fx_addsy == NULL)
24557 return 1;
24558
24559 /* Preserve relocations against symbols with function type. */
24560 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
24561 return FALSE;
24562
24563 if (THUMB_IS_FUNC (fixP->fx_addsy)
24564 && fixP->fx_subsy == NULL)
24565 return FALSE;
24566
24567 /* We need the symbol name for the VTABLE entries. */
24568 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
24569 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24570 return FALSE;
24571
24572 /* Don't allow symbols to be discarded on GOT related relocs. */
24573 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
24574 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
24575 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
24576 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
24577 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
24578 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
24579 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
24580 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
24581 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
24582 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
24583 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
24584 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
24585 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
24586 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
24587 return FALSE;
24588
24589 /* Similarly for group relocations. */
24590 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24591 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24592 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24593 return FALSE;
24594
24595 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
24596 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
24597 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
24598 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
24599 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
24600 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
24601 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
24602 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
24603 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
24604 return FALSE;
24605
24606 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
24607 offsets, so keep these symbols. */
24608 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
24609 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
24610 return FALSE;
24611
24612 return TRUE;
24613 }
24614 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
24615
24616 #ifdef OBJ_ELF
24617 const char *
24618 elf32_arm_target_format (void)
24619 {
24620 #ifdef TE_SYMBIAN
24621 return (target_big_endian
24622 ? "elf32-bigarm-symbian"
24623 : "elf32-littlearm-symbian");
24624 #elif defined (TE_VXWORKS)
24625 return (target_big_endian
24626 ? "elf32-bigarm-vxworks"
24627 : "elf32-littlearm-vxworks");
24628 #elif defined (TE_NACL)
24629 return (target_big_endian
24630 ? "elf32-bigarm-nacl"
24631 : "elf32-littlearm-nacl");
24632 #else
24633 if (target_big_endian)
24634 return "elf32-bigarm";
24635 else
24636 return "elf32-littlearm";
24637 #endif
24638 }
24639
24640 void
24641 armelf_frob_symbol (symbolS * symp,
24642 int * puntp)
24643 {
24644 elf_frob_symbol (symp, puntp);
24645 }
24646 #endif
24647
24648 /* MD interface: Finalization. */
24649
24650 void
24651 arm_cleanup (void)
24652 {
24653 literal_pool * pool;
24654
24655 /* Ensure that all the IT blocks are properly closed. */
24656 check_it_blocks_finished ();
24657
24658 for (pool = list_of_pools; pool; pool = pool->next)
24659 {
24660 /* Put it at the end of the relevant section. */
24661 subseg_set (pool->section, pool->sub_section);
24662 #ifdef OBJ_ELF
24663 arm_elf_change_section ();
24664 #endif
24665 s_ltorg (0);
24666 }
24667 }
24668
24669 #ifdef OBJ_ELF
24670 /* Remove any excess mapping symbols generated for alignment frags in
24671 SEC. We may have created a mapping symbol before a zero byte
24672 alignment; remove it if there's a mapping symbol after the
24673 alignment. */
24674 static void
24675 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
24676 void *dummy ATTRIBUTE_UNUSED)
24677 {
24678 segment_info_type *seginfo = seg_info (sec);
24679 fragS *fragp;
24680
24681 if (seginfo == NULL || seginfo->frchainP == NULL)
24682 return;
24683
24684 for (fragp = seginfo->frchainP->frch_root;
24685 fragp != NULL;
24686 fragp = fragp->fr_next)
24687 {
24688 symbolS *sym = fragp->tc_frag_data.last_map;
24689 fragS *next = fragp->fr_next;
24690
24691 /* Variable-sized frags have been converted to fixed size by
24692 this point. But if this was variable-sized to start with,
24693 there will be a fixed-size frag after it. So don't handle
24694 next == NULL. */
24695 if (sym == NULL || next == NULL)
24696 continue;
24697
24698 if (S_GET_VALUE (sym) < next->fr_address)
24699 /* Not at the end of this frag. */
24700 continue;
24701 know (S_GET_VALUE (sym) == next->fr_address);
24702
24703 do
24704 {
24705 if (next->tc_frag_data.first_map != NULL)
24706 {
24707 /* Next frag starts with a mapping symbol. Discard this
24708 one. */
24709 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24710 break;
24711 }
24712
24713 if (next->fr_next == NULL)
24714 {
24715 /* This mapping symbol is at the end of the section. Discard
24716 it. */
24717 know (next->fr_fix == 0 && next->fr_var == 0);
24718 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24719 break;
24720 }
24721
24722 /* As long as we have empty frags without any mapping symbols,
24723 keep looking. */
24724 /* If the next frag is non-empty and does not start with a
24725 mapping symbol, then this mapping symbol is required. */
24726 if (next->fr_address != next->fr_next->fr_address)
24727 break;
24728
24729 next = next->fr_next;
24730 }
24731 while (next != NULL);
24732 }
24733 }
24734 #endif
24735
24736 /* Adjust the symbol table. This marks Thumb symbols as distinct from
24737 ARM ones. */
24738
24739 void
24740 arm_adjust_symtab (void)
24741 {
24742 #ifdef OBJ_COFF
24743 symbolS * sym;
24744
24745 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24746 {
24747 if (ARM_IS_THUMB (sym))
24748 {
24749 if (THUMB_IS_FUNC (sym))
24750 {
24751 /* Mark the symbol as a Thumb function. */
24752 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
24753 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
24754 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
24755
24756 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
24757 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
24758 else
24759 as_bad (_("%s: unexpected function type: %d"),
24760 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
24761 }
24762 else switch (S_GET_STORAGE_CLASS (sym))
24763 {
24764 case C_EXT:
24765 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
24766 break;
24767 case C_STAT:
24768 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
24769 break;
24770 case C_LABEL:
24771 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
24772 break;
24773 default:
24774 /* Do nothing. */
24775 break;
24776 }
24777 }
24778
24779 if (ARM_IS_INTERWORK (sym))
24780 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
24781 }
24782 #endif
24783 #ifdef OBJ_ELF
24784 symbolS * sym;
24785 char bind;
24786
24787 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24788 {
24789 if (ARM_IS_THUMB (sym))
24790 {
24791 elf_symbol_type * elf_sym;
24792
24793 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
24794 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
24795
24796 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
24797 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
24798 {
24799 /* If it's a .thumb_func, declare it as so,
24800 otherwise tag label as .code 16. */
24801 if (THUMB_IS_FUNC (sym))
24802 ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
24803 ST_BRANCH_TO_THUMB);
24804 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
24805 elf_sym->internal_elf_sym.st_info =
24806 ELF_ST_INFO (bind, STT_ARM_16BIT);
24807 }
24808 }
24809 }
24810
24811 /* Remove any overlapping mapping symbols generated by alignment frags. */
24812 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
24813 /* Now do generic ELF adjustments. */
24814 elf_adjust_symtab ();
24815 #endif
24816 }
24817
24818 /* MD interface: Initialization. */
24819
24820 static void
24821 set_constant_flonums (void)
24822 {
24823 int i;
24824
24825 for (i = 0; i < NUM_FLOAT_VALS; i++)
24826 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
24827 abort ();
24828 }
24829
24830 /* Auto-select Thumb mode if it's the only available instruction set for the
24831 given architecture. */
24832
24833 static void
24834 autoselect_thumb_from_cpu_variant (void)
24835 {
24836 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
24837 opcode_select (16);
24838 }
24839
24840 void
24841 md_begin (void)
24842 {
24843 unsigned mach;
24844 unsigned int i;
24845
24846 if ( (arm_ops_hsh = hash_new ()) == NULL
24847 || (arm_cond_hsh = hash_new ()) == NULL
24848 || (arm_shift_hsh = hash_new ()) == NULL
24849 || (arm_psr_hsh = hash_new ()) == NULL
24850 || (arm_v7m_psr_hsh = hash_new ()) == NULL
24851 || (arm_reg_hsh = hash_new ()) == NULL
24852 || (arm_reloc_hsh = hash_new ()) == NULL
24853 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
24854 as_fatal (_("virtual memory exhausted"));
24855
24856 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
24857 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
24858 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
24859 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
24860 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
24861 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
24862 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
24863 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
24864 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
24865 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
24866 (void *) (v7m_psrs + i));
24867 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
24868 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
24869 for (i = 0;
24870 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
24871 i++)
24872 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
24873 (void *) (barrier_opt_names + i));
24874 #ifdef OBJ_ELF
24875 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
24876 {
24877 struct reloc_entry * entry = reloc_names + i;
24878
24879 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
24880 /* This makes encode_branch() use the EABI versions of this relocation. */
24881 entry->reloc = BFD_RELOC_UNUSED;
24882
24883 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
24884 }
24885 #endif
24886
24887 set_constant_flonums ();
24888
24889 /* Set the cpu variant based on the command-line options. We prefer
24890 -mcpu= over -march= if both are set (as for GCC); and we prefer
24891 -mfpu= over any other way of setting the floating point unit.
24892 Use of legacy options with new options are faulted. */
24893 if (legacy_cpu)
24894 {
24895 if (mcpu_cpu_opt || march_cpu_opt)
24896 as_bad (_("use of old and new-style options to set CPU type"));
24897
24898 mcpu_cpu_opt = legacy_cpu;
24899 }
24900 else if (!mcpu_cpu_opt)
24901 mcpu_cpu_opt = march_cpu_opt;
24902
24903 if (legacy_fpu)
24904 {
24905 if (mfpu_opt)
24906 as_bad (_("use of old and new-style options to set FPU type"));
24907
24908 mfpu_opt = legacy_fpu;
24909 }
24910 else if (!mfpu_opt)
24911 {
24912 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
24913 || defined (TE_NetBSD) || defined (TE_VXWORKS))
24914 /* Some environments specify a default FPU. If they don't, infer it
24915 from the processor. */
24916 if (mcpu_fpu_opt)
24917 mfpu_opt = mcpu_fpu_opt;
24918 else
24919 mfpu_opt = march_fpu_opt;
24920 #else
24921 mfpu_opt = &fpu_default;
24922 #endif
24923 }
24924
24925 if (!mfpu_opt)
24926 {
24927 if (mcpu_cpu_opt != NULL)
24928 mfpu_opt = &fpu_default;
24929 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
24930 mfpu_opt = &fpu_arch_vfp_v2;
24931 else
24932 mfpu_opt = &fpu_arch_fpa;
24933 }
24934
24935 #ifdef CPU_DEFAULT
24936 if (!mcpu_cpu_opt)
24937 {
24938 mcpu_cpu_opt = &cpu_default;
24939 selected_cpu = cpu_default;
24940 }
24941 else if (no_cpu_selected ())
24942 selected_cpu = cpu_default;
24943 #else
24944 if (mcpu_cpu_opt)
24945 selected_cpu = *mcpu_cpu_opt;
24946 else
24947 mcpu_cpu_opt = &arm_arch_any;
24948 #endif
24949
24950 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24951
24952 autoselect_thumb_from_cpu_variant ();
24953
24954 arm_arch_used = thumb_arch_used = arm_arch_none;
24955
24956 #if defined OBJ_COFF || defined OBJ_ELF
24957 {
24958 unsigned int flags = 0;
24959
24960 #if defined OBJ_ELF
24961 flags = meabi_flags;
24962
24963 switch (meabi_flags)
24964 {
24965 case EF_ARM_EABI_UNKNOWN:
24966 #endif
24967 /* Set the flags in the private structure. */
24968 if (uses_apcs_26) flags |= F_APCS26;
24969 if (support_interwork) flags |= F_INTERWORK;
24970 if (uses_apcs_float) flags |= F_APCS_FLOAT;
24971 if (pic_code) flags |= F_PIC;
24972 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
24973 flags |= F_SOFT_FLOAT;
24974
24975 switch (mfloat_abi_opt)
24976 {
24977 case ARM_FLOAT_ABI_SOFT:
24978 case ARM_FLOAT_ABI_SOFTFP:
24979 flags |= F_SOFT_FLOAT;
24980 break;
24981
24982 case ARM_FLOAT_ABI_HARD:
24983 if (flags & F_SOFT_FLOAT)
24984 as_bad (_("hard-float conflicts with specified fpu"));
24985 break;
24986 }
24987
24988 /* Using pure-endian doubles (even if soft-float). */
24989 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
24990 flags |= F_VFP_FLOAT;
24991
24992 #if defined OBJ_ELF
24993 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
24994 flags |= EF_ARM_MAVERICK_FLOAT;
24995 break;
24996
24997 case EF_ARM_EABI_VER4:
24998 case EF_ARM_EABI_VER5:
24999 /* No additional flags to set. */
25000 break;
25001
25002 default:
25003 abort ();
25004 }
25005 #endif
25006 bfd_set_private_flags (stdoutput, flags);
25007
25008 /* We have run out flags in the COFF header to encode the
25009 status of ATPCS support, so instead we create a dummy,
25010 empty, debug section called .arm.atpcs. */
25011 if (atpcs)
25012 {
25013 asection * sec;
25014
25015 sec = bfd_make_section (stdoutput, ".arm.atpcs");
25016
25017 if (sec != NULL)
25018 {
25019 bfd_set_section_flags
25020 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
25021 bfd_set_section_size (stdoutput, sec, 0);
25022 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
25023 }
25024 }
25025 }
25026 #endif
25027
25028 /* Record the CPU type as well. */
25029 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
25030 mach = bfd_mach_arm_iWMMXt2;
25031 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
25032 mach = bfd_mach_arm_iWMMXt;
25033 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
25034 mach = bfd_mach_arm_XScale;
25035 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
25036 mach = bfd_mach_arm_ep9312;
25037 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
25038 mach = bfd_mach_arm_5TE;
25039 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
25040 {
25041 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
25042 mach = bfd_mach_arm_5T;
25043 else
25044 mach = bfd_mach_arm_5;
25045 }
25046 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
25047 {
25048 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
25049 mach = bfd_mach_arm_4T;
25050 else
25051 mach = bfd_mach_arm_4;
25052 }
25053 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
25054 mach = bfd_mach_arm_3M;
25055 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
25056 mach = bfd_mach_arm_3;
25057 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
25058 mach = bfd_mach_arm_2a;
25059 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
25060 mach = bfd_mach_arm_2;
25061 else
25062 mach = bfd_mach_arm_unknown;
25063
25064 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
25065 }
25066
25067 /* Command line processing. */
25068
25069 /* md_parse_option
25070 Invocation line includes a switch not recognized by the base assembler.
25071 See if it's a processor-specific option.
25072
25073 This routine is somewhat complicated by the need for backwards
25074 compatibility (since older releases of gcc can't be changed).
25075 The new options try to make the interface as compatible as
25076 possible with GCC.
25077
25078 New options (supported) are:
25079
25080 -mcpu=<cpu name> Assemble for selected processor
25081 -march=<architecture name> Assemble for selected architecture
25082 -mfpu=<fpu architecture> Assemble for selected FPU.
25083 -EB/-mbig-endian Big-endian
25084 -EL/-mlittle-endian Little-endian
25085 -k Generate PIC code
25086 -mthumb Start in Thumb mode
25087 -mthumb-interwork Code supports ARM/Thumb interworking
25088
25089 -m[no-]warn-deprecated Warn about deprecated features
25090 -m[no-]warn-syms Warn when symbols match instructions
25091
25092 For now we will also provide support for:
25093
25094 -mapcs-32 32-bit Program counter
25095 -mapcs-26 26-bit Program counter
25096 -macps-float Floats passed in FP registers
25097 -mapcs-reentrant Reentrant code
25098 -matpcs
25099 (sometime these will probably be replaced with -mapcs=<list of options>
25100 and -matpcs=<list of options>)
25101
25102 The remaining options are only supported for back-wards compatibility.
25103 Cpu variants, the arm part is optional:
25104 -m[arm]1 Currently not supported.
25105 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
25106 -m[arm]3 Arm 3 processor
25107 -m[arm]6[xx], Arm 6 processors
25108 -m[arm]7[xx][t][[d]m] Arm 7 processors
25109 -m[arm]8[10] Arm 8 processors
25110 -m[arm]9[20][tdmi] Arm 9 processors
25111 -mstrongarm[110[0]] StrongARM processors
25112 -mxscale XScale processors
25113 -m[arm]v[2345[t[e]]] Arm architectures
25114 -mall All (except the ARM1)
25115 FP variants:
25116 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
25117 -mfpe-old (No float load/store multiples)
25118 -mvfpxd VFP Single precision
25119 -mvfp All VFP
25120 -mno-fpu Disable all floating point instructions
25121
25122 The following CPU names are recognized:
25123 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
25124 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
25125 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
25126 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
25127 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
25128 arm10t arm10e, arm1020t, arm1020e, arm10200e,
25129 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
25130
25131 */
25132
25133 const char * md_shortopts = "m:k";
25134
25135 #ifdef ARM_BI_ENDIAN
25136 #define OPTION_EB (OPTION_MD_BASE + 0)
25137 #define OPTION_EL (OPTION_MD_BASE + 1)
25138 #else
25139 #if TARGET_BYTES_BIG_ENDIAN
25140 #define OPTION_EB (OPTION_MD_BASE + 0)
25141 #else
25142 #define OPTION_EL (OPTION_MD_BASE + 1)
25143 #endif
25144 #endif
25145 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
25146
25147 struct option md_longopts[] =
25148 {
25149 #ifdef OPTION_EB
25150 {"EB", no_argument, NULL, OPTION_EB},
25151 #endif
25152 #ifdef OPTION_EL
25153 {"EL", no_argument, NULL, OPTION_EL},
25154 #endif
25155 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
25156 {NULL, no_argument, NULL, 0}
25157 };
25158
25159
25160 size_t md_longopts_size = sizeof (md_longopts);
25161
25162 struct arm_option_table
25163 {
25164 const char *option; /* Option name to match. */
25165 const char *help; /* Help information. */
25166 int *var; /* Variable to change. */
25167 int value; /* What to change it to. */
25168 const char *deprecated; /* If non-null, print this message. */
25169 };
25170
25171 struct arm_option_table arm_opts[] =
25172 {
25173 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
25174 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
25175 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
25176 &support_interwork, 1, NULL},
25177 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
25178 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
25179 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
25180 1, NULL},
25181 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
25182 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
25183 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
25184 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
25185 NULL},
25186
25187 /* These are recognized by the assembler, but have no affect on code. */
25188 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
25189 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
25190
25191 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
25192 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
25193 &warn_on_deprecated, 0, NULL},
25194 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
25195 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
25196 {NULL, NULL, NULL, 0, NULL}
25197 };
25198
25199 struct arm_legacy_option_table
25200 {
25201 const char *option; /* Option name to match. */
25202 const arm_feature_set **var; /* Variable to change. */
25203 const arm_feature_set value; /* What to change it to. */
25204 const char *deprecated; /* If non-null, print this message. */
25205 };
25206
25207 const struct arm_legacy_option_table arm_legacy_opts[] =
25208 {
25209 /* DON'T add any new processors to this list -- we want the whole list
25210 to go away... Add them to the processors table instead. */
25211 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
25212 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
25213 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
25214 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
25215 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25216 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25217 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25218 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25219 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
25220 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
25221 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
25222 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
25223 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
25224 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
25225 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
25226 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
25227 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
25228 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
25229 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
25230 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
25231 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
25232 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
25233 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
25234 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
25235 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
25236 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
25237 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
25238 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
25239 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
25240 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
25241 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
25242 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
25243 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
25244 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
25245 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25246 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25247 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25248 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25249 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25250 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25251 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
25252 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
25253 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
25254 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
25255 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
25256 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
25257 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25258 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25259 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25260 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25261 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25262 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25263 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25264 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25265 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25266 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25267 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
25268 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
25269 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
25270 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
25271 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25272 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25273 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25274 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25275 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25276 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25277 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25278 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25279 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
25280 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
25281 N_("use -mcpu=strongarm110")},
25282 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
25283 N_("use -mcpu=strongarm1100")},
25284 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
25285 N_("use -mcpu=strongarm1110")},
25286 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
25287 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
25288 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
25289
25290 /* Architecture variants -- don't add any more to this list either. */
25291 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
25292 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
25293 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25294 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25295 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
25296 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
25297 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25298 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25299 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
25300 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
25301 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25302 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25303 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
25304 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
25305 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25306 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25307 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25308 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25309
25310 /* Floating point variants -- don't add any more to this list either. */
25311 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
25312 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
25313 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
25314 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
25315 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
25316
25317 {NULL, NULL, ARM_ARCH_NONE, NULL}
25318 };
25319
25320 struct arm_cpu_option_table
25321 {
25322 const char *name;
25323 size_t name_len;
25324 const arm_feature_set value;
25325 /* For some CPUs we assume an FPU unless the user explicitly sets
25326 -mfpu=... */
25327 const arm_feature_set default_fpu;
25328 /* The canonical name of the CPU, or NULL to use NAME converted to upper
25329 case. */
25330 const char *canonical_name;
25331 };
25332
25333 /* This list should, at a minimum, contain all the cpu names
25334 recognized by GCC. */
25335 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
25336 static const struct arm_cpu_option_table arm_cpus[] =
25337 {
25338 ARM_CPU_OPT ("all", ARM_ANY, FPU_ARCH_FPA, NULL),
25339 ARM_CPU_OPT ("arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL),
25340 ARM_CPU_OPT ("arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL),
25341 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
25342 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
25343 ARM_CPU_OPT ("arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25344 ARM_CPU_OPT ("arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25345 ARM_CPU_OPT ("arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25346 ARM_CPU_OPT ("arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25347 ARM_CPU_OPT ("arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25348 ARM_CPU_OPT ("arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25349 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
25350 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25351 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
25352 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25353 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
25354 ARM_CPU_OPT ("arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25355 ARM_CPU_OPT ("arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25356 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25357 ARM_CPU_OPT ("arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25358 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25359 ARM_CPU_OPT ("arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25360 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25361 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25362 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25363 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25364 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25365 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25366 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25367 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25368 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25369 ARM_CPU_OPT ("arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25370 ARM_CPU_OPT ("arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25371 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25372 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25373 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25374 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25375 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25376 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25377 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"),
25378 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25379 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25380 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25381 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25382 ARM_CPU_OPT ("fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25383 ARM_CPU_OPT ("fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25384 /* For V5 or later processors we default to using VFP; but the user
25385 should really set the FPU type explicitly. */
25386 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
25387 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25388 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
25389 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
25390 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
25391 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
25392 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"),
25393 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25394 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
25395 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"),
25396 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25397 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25398 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
25399 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
25400 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25401 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"),
25402 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
25403 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25404 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25405 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2,
25406 "ARM1026EJ-S"),
25407 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
25408 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25409 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25410 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25411 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25412 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25413 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"),
25414 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL),
25415 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2,
25416 "ARM1136JF-S"),
25417 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL),
25418 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, "MPCore"),
25419 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, "MPCore"),
25420 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL),
25421 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL),
25422 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6KZ, FPU_NONE, NULL),
25423 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6KZ, FPU_ARCH_VFP_V2, NULL),
25424 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC,
25425 FPU_NONE, "Cortex-A5"),
25426 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
25427 "Cortex-A7"),
25428 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC,
25429 ARM_FEATURE_COPROC (FPU_VFP_V3
25430 | FPU_NEON_EXT_V1),
25431 "Cortex-A8"),
25432 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC,
25433 ARM_FEATURE_COPROC (FPU_VFP_V3
25434 | FPU_NEON_EXT_V1),
25435 "Cortex-A9"),
25436 ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
25437 "Cortex-A12"),
25438 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
25439 "Cortex-A15"),
25440 ARM_CPU_OPT ("cortex-a17", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
25441 "Cortex-A17"),
25442 ARM_CPU_OPT ("cortex-a32", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25443 "Cortex-A32"),
25444 ARM_CPU_OPT ("cortex-a35", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25445 "Cortex-A35"),
25446 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25447 "Cortex-A53"),
25448 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25449 "Cortex-A57"),
25450 ARM_CPU_OPT ("cortex-a72", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25451 "Cortex-A72"),
25452 ARM_CPU_OPT ("cortex-a73", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25453 "Cortex-A73"),
25454 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R, FPU_NONE, "Cortex-R4"),
25455 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R, FPU_ARCH_VFP_V3D16,
25456 "Cortex-R4F"),
25457 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV,
25458 FPU_NONE, "Cortex-R5"),
25459 ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV,
25460 FPU_ARCH_VFP_V3D16,
25461 "Cortex-R7"),
25462 ARM_CPU_OPT ("cortex-r8", ARM_ARCH_V7R_IDIV,
25463 FPU_ARCH_VFP_V3D16,
25464 "Cortex-R8"),
25465 ARM_CPU_OPT ("cortex-m33", ARM_ARCH_V8M_MAIN_DSP,
25466 FPU_NONE, "Cortex-M33"),
25467 ARM_CPU_OPT ("cortex-m23", ARM_ARCH_V8M_BASE,
25468 FPU_NONE, "Cortex-M23"),
25469 ARM_CPU_OPT ("cortex-m7", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M7"),
25470 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M4"),
25471 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M, FPU_NONE, "Cortex-M3"),
25472 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M1"),
25473 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0"),
25474 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0+"),
25475 ARM_CPU_OPT ("exynos-m1", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25476 "Samsung " \
25477 "Exynos M1"),
25478 ARM_CPU_OPT ("falkor", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25479 "Qualcomm "
25480 "Falkor"),
25481 ARM_CPU_OPT ("qdf24xx", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25482 "Qualcomm "
25483 "QDF24XX"),
25484
25485 /* ??? XSCALE is really an architecture. */
25486 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
25487 /* ??? iwmmxt is not a processor. */
25488 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
25489 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
25490 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
25491 /* Maverick */
25492 ARM_CPU_OPT ("ep9312", ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
25493 FPU_ARCH_MAVERICK, "ARM920T"),
25494 /* Marvell processors. */
25495 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP
25496 | ARM_EXT_SEC,
25497 ARM_EXT2_V6T2_V8M),
25498 FPU_ARCH_VFP_V3D16, NULL),
25499 ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP
25500 | ARM_EXT_SEC,
25501 ARM_EXT2_V6T2_V8M),
25502 FPU_ARCH_NEON_VFP_V4, NULL),
25503 /* APM X-Gene family. */
25504 ARM_CPU_OPT ("xgene1", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25505 "APM X-Gene 1"),
25506 ARM_CPU_OPT ("xgene2", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25507 "APM X-Gene 2"),
25508
25509 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
25510 };
25511 #undef ARM_CPU_OPT
25512
25513 struct arm_arch_option_table
25514 {
25515 const char *name;
25516 size_t name_len;
25517 const arm_feature_set value;
25518 const arm_feature_set default_fpu;
25519 };
25520
25521 /* This list should, at a minimum, contain all the architecture names
25522 recognized by GCC. */
25523 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
25524 static const struct arm_arch_option_table arm_archs[] =
25525 {
25526 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
25527 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
25528 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
25529 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
25530 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
25531 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
25532 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
25533 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
25534 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
25535 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
25536 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
25537 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
25538 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
25539 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
25540 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
25541 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
25542 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
25543 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
25544 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
25545 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
25546 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
25547 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
25548 kept to preserve existing behaviour. */
25549 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
25550 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
25551 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
25552 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
25553 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
25554 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
25555 kept to preserve existing behaviour. */
25556 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
25557 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
25558 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
25559 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
25560 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
25561 /* The official spelling of the ARMv7 profile variants is the dashed form.
25562 Accept the non-dashed form for compatibility with old toolchains. */
25563 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
25564 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP),
25565 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
25566 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
25567 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
25568 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
25569 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
25570 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
25571 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
25572 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP),
25573 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
25574 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP),
25575 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP),
25576 ARM_ARCH_OPT ("armv8.3-a", ARM_ARCH_V8_3A, FPU_ARCH_VFP),
25577 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
25578 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
25579 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
25580 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
25581 };
25582 #undef ARM_ARCH_OPT
25583
25584 /* ISA extensions in the co-processor and main instruction set space. */
25585 struct arm_option_extension_value_table
25586 {
25587 const char *name;
25588 size_t name_len;
25589 const arm_feature_set merge_value;
25590 const arm_feature_set clear_value;
25591 /* List of architectures for which an extension is available. ARM_ARCH_NONE
25592 indicates that an extension is available for all architectures while
25593 ARM_ANY marks an empty entry. */
25594 const arm_feature_set allowed_archs[2];
25595 };
25596
25597 /* The following table must be in alphabetical order with a NULL last entry.
25598 */
25599 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
25600 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
25601 static const struct arm_option_extension_value_table arm_extensions[] =
25602 {
25603 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25604 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25605 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25606 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
25607 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25608 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
25609 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
25610 ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
25611 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
25612 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25613 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
25614 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
25615 ARM_ARCH_V8_2A),
25616 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
25617 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
25618 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
25619 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
25620 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
25621 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
25622 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
25623 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
25624 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
25625 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
25626 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
25627 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
25628 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
25629 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
25630 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
25631 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
25632 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
25633 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
25634 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
25635 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25636 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
25637 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
25638 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25639 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1,
25640 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
25641 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25642 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25643 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25644 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
25645 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
25646 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
25647 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
25648 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25649 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
25650 | ARM_EXT_DIV),
25651 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
25652 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
25653 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
25654 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
25655 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
25656 };
25657 #undef ARM_EXT_OPT
25658
25659 /* ISA floating-point and Advanced SIMD extensions. */
25660 struct arm_option_fpu_value_table
25661 {
25662 const char *name;
25663 const arm_feature_set value;
25664 };
25665
25666 /* This list should, at a minimum, contain all the fpu names
25667 recognized by GCC. */
25668 static const struct arm_option_fpu_value_table arm_fpus[] =
25669 {
25670 {"softfpa", FPU_NONE},
25671 {"fpe", FPU_ARCH_FPE},
25672 {"fpe2", FPU_ARCH_FPE},
25673 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
25674 {"fpa", FPU_ARCH_FPA},
25675 {"fpa10", FPU_ARCH_FPA},
25676 {"fpa11", FPU_ARCH_FPA},
25677 {"arm7500fe", FPU_ARCH_FPA},
25678 {"softvfp", FPU_ARCH_VFP},
25679 {"softvfp+vfp", FPU_ARCH_VFP_V2},
25680 {"vfp", FPU_ARCH_VFP_V2},
25681 {"vfp9", FPU_ARCH_VFP_V2},
25682 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
25683 {"vfp10", FPU_ARCH_VFP_V2},
25684 {"vfp10-r0", FPU_ARCH_VFP_V1},
25685 {"vfpxd", FPU_ARCH_VFP_V1xD},
25686 {"vfpv2", FPU_ARCH_VFP_V2},
25687 {"vfpv3", FPU_ARCH_VFP_V3},
25688 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
25689 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
25690 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
25691 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
25692 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
25693 {"arm1020t", FPU_ARCH_VFP_V1},
25694 {"arm1020e", FPU_ARCH_VFP_V2},
25695 {"arm1136jfs", FPU_ARCH_VFP_V2},
25696 {"arm1136jf-s", FPU_ARCH_VFP_V2},
25697 {"maverick", FPU_ARCH_MAVERICK},
25698 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
25699 {"neon-fp16", FPU_ARCH_NEON_FP16},
25700 {"vfpv4", FPU_ARCH_VFP_V4},
25701 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
25702 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
25703 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
25704 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
25705 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
25706 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
25707 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
25708 {"crypto-neon-fp-armv8",
25709 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
25710 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
25711 {"crypto-neon-fp-armv8.1",
25712 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
25713 {NULL, ARM_ARCH_NONE}
25714 };
25715
25716 struct arm_option_value_table
25717 {
25718 const char *name;
25719 long value;
25720 };
25721
25722 static const struct arm_option_value_table arm_float_abis[] =
25723 {
25724 {"hard", ARM_FLOAT_ABI_HARD},
25725 {"softfp", ARM_FLOAT_ABI_SOFTFP},
25726 {"soft", ARM_FLOAT_ABI_SOFT},
25727 {NULL, 0}
25728 };
25729
25730 #ifdef OBJ_ELF
25731 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
25732 static const struct arm_option_value_table arm_eabis[] =
25733 {
25734 {"gnu", EF_ARM_EABI_UNKNOWN},
25735 {"4", EF_ARM_EABI_VER4},
25736 {"5", EF_ARM_EABI_VER5},
25737 {NULL, 0}
25738 };
25739 #endif
25740
25741 struct arm_long_option_table
25742 {
25743 const char * option; /* Substring to match. */
25744 const char * help; /* Help information. */
25745 int (* func) (const char * subopt); /* Function to decode sub-option. */
25746 const char * deprecated; /* If non-null, print this message. */
25747 };
25748
25749 static bfd_boolean
25750 arm_parse_extension (const char *str, const arm_feature_set **opt_p)
25751 {
25752 arm_feature_set *ext_set = XNEW (arm_feature_set);
25753
25754 /* We insist on extensions being specified in alphabetical order, and with
25755 extensions being added before being removed. We achieve this by having
25756 the global ARM_EXTENSIONS table in alphabetical order, and using the
25757 ADDING_VALUE variable to indicate whether we are adding an extension (1)
25758 or removing it (0) and only allowing it to change in the order
25759 -1 -> 1 -> 0. */
25760 const struct arm_option_extension_value_table * opt = NULL;
25761 const arm_feature_set arm_any = ARM_ANY;
25762 int adding_value = -1;
25763
25764 /* Copy the feature set, so that we can modify it. */
25765 *ext_set = **opt_p;
25766 *opt_p = ext_set;
25767
25768 while (str != NULL && *str != 0)
25769 {
25770 const char *ext;
25771 size_t len;
25772
25773 if (*str != '+')
25774 {
25775 as_bad (_("invalid architectural extension"));
25776 return FALSE;
25777 }
25778
25779 str++;
25780 ext = strchr (str, '+');
25781
25782 if (ext != NULL)
25783 len = ext - str;
25784 else
25785 len = strlen (str);
25786
25787 if (len >= 2 && strncmp (str, "no", 2) == 0)
25788 {
25789 if (adding_value != 0)
25790 {
25791 adding_value = 0;
25792 opt = arm_extensions;
25793 }
25794
25795 len -= 2;
25796 str += 2;
25797 }
25798 else if (len > 0)
25799 {
25800 if (adding_value == -1)
25801 {
25802 adding_value = 1;
25803 opt = arm_extensions;
25804 }
25805 else if (adding_value != 1)
25806 {
25807 as_bad (_("must specify extensions to add before specifying "
25808 "those to remove"));
25809 return FALSE;
25810 }
25811 }
25812
25813 if (len == 0)
25814 {
25815 as_bad (_("missing architectural extension"));
25816 return FALSE;
25817 }
25818
25819 gas_assert (adding_value != -1);
25820 gas_assert (opt != NULL);
25821
25822 /* Scan over the options table trying to find an exact match. */
25823 for (; opt->name != NULL; opt++)
25824 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25825 {
25826 int i, nb_allowed_archs =
25827 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
25828 /* Check we can apply the extension to this architecture. */
25829 for (i = 0; i < nb_allowed_archs; i++)
25830 {
25831 /* Empty entry. */
25832 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
25833 continue;
25834 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *ext_set))
25835 break;
25836 }
25837 if (i == nb_allowed_archs)
25838 {
25839 as_bad (_("extension does not apply to the base architecture"));
25840 return FALSE;
25841 }
25842
25843 /* Add or remove the extension. */
25844 if (adding_value)
25845 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
25846 else
25847 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
25848
25849 break;
25850 }
25851
25852 if (opt->name == NULL)
25853 {
25854 /* Did we fail to find an extension because it wasn't specified in
25855 alphabetical order, or because it does not exist? */
25856
25857 for (opt = arm_extensions; opt->name != NULL; opt++)
25858 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25859 break;
25860
25861 if (opt->name == NULL)
25862 as_bad (_("unknown architectural extension `%s'"), str);
25863 else
25864 as_bad (_("architectural extensions must be specified in "
25865 "alphabetical order"));
25866
25867 return FALSE;
25868 }
25869 else
25870 {
25871 /* We should skip the extension we've just matched the next time
25872 round. */
25873 opt++;
25874 }
25875
25876 str = ext;
25877 };
25878
25879 return TRUE;
25880 }
25881
25882 static bfd_boolean
25883 arm_parse_cpu (const char *str)
25884 {
25885 const struct arm_cpu_option_table *opt;
25886 const char *ext = strchr (str, '+');
25887 size_t len;
25888
25889 if (ext != NULL)
25890 len = ext - str;
25891 else
25892 len = strlen (str);
25893
25894 if (len == 0)
25895 {
25896 as_bad (_("missing cpu name `%s'"), str);
25897 return FALSE;
25898 }
25899
25900 for (opt = arm_cpus; opt->name != NULL; opt++)
25901 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25902 {
25903 mcpu_cpu_opt = &opt->value;
25904 mcpu_fpu_opt = &opt->default_fpu;
25905 if (opt->canonical_name)
25906 {
25907 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
25908 strcpy (selected_cpu_name, opt->canonical_name);
25909 }
25910 else
25911 {
25912 size_t i;
25913
25914 if (len >= sizeof selected_cpu_name)
25915 len = (sizeof selected_cpu_name) - 1;
25916
25917 for (i = 0; i < len; i++)
25918 selected_cpu_name[i] = TOUPPER (opt->name[i]);
25919 selected_cpu_name[i] = 0;
25920 }
25921
25922 if (ext != NULL)
25923 return arm_parse_extension (ext, &mcpu_cpu_opt);
25924
25925 return TRUE;
25926 }
25927
25928 as_bad (_("unknown cpu `%s'"), str);
25929 return FALSE;
25930 }
25931
25932 static bfd_boolean
25933 arm_parse_arch (const char *str)
25934 {
25935 const struct arm_arch_option_table *opt;
25936 const char *ext = strchr (str, '+');
25937 size_t len;
25938
25939 if (ext != NULL)
25940 len = ext - str;
25941 else
25942 len = strlen (str);
25943
25944 if (len == 0)
25945 {
25946 as_bad (_("missing architecture name `%s'"), str);
25947 return FALSE;
25948 }
25949
25950 for (opt = arm_archs; opt->name != NULL; opt++)
25951 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25952 {
25953 march_cpu_opt = &opt->value;
25954 march_fpu_opt = &opt->default_fpu;
25955 strcpy (selected_cpu_name, opt->name);
25956
25957 if (ext != NULL)
25958 return arm_parse_extension (ext, &march_cpu_opt);
25959
25960 return TRUE;
25961 }
25962
25963 as_bad (_("unknown architecture `%s'\n"), str);
25964 return FALSE;
25965 }
25966
25967 static bfd_boolean
25968 arm_parse_fpu (const char * str)
25969 {
25970 const struct arm_option_fpu_value_table * opt;
25971
25972 for (opt = arm_fpus; opt->name != NULL; opt++)
25973 if (streq (opt->name, str))
25974 {
25975 mfpu_opt = &opt->value;
25976 return TRUE;
25977 }
25978
25979 as_bad (_("unknown floating point format `%s'\n"), str);
25980 return FALSE;
25981 }
25982
25983 static bfd_boolean
25984 arm_parse_float_abi (const char * str)
25985 {
25986 const struct arm_option_value_table * opt;
25987
25988 for (opt = arm_float_abis; opt->name != NULL; opt++)
25989 if (streq (opt->name, str))
25990 {
25991 mfloat_abi_opt = opt->value;
25992 return TRUE;
25993 }
25994
25995 as_bad (_("unknown floating point abi `%s'\n"), str);
25996 return FALSE;
25997 }
25998
25999 #ifdef OBJ_ELF
26000 static bfd_boolean
26001 arm_parse_eabi (const char * str)
26002 {
26003 const struct arm_option_value_table *opt;
26004
26005 for (opt = arm_eabis; opt->name != NULL; opt++)
26006 if (streq (opt->name, str))
26007 {
26008 meabi_flags = opt->value;
26009 return TRUE;
26010 }
26011 as_bad (_("unknown EABI `%s'\n"), str);
26012 return FALSE;
26013 }
26014 #endif
26015
26016 static bfd_boolean
26017 arm_parse_it_mode (const char * str)
26018 {
26019 bfd_boolean ret = TRUE;
26020
26021 if (streq ("arm", str))
26022 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
26023 else if (streq ("thumb", str))
26024 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
26025 else if (streq ("always", str))
26026 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
26027 else if (streq ("never", str))
26028 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
26029 else
26030 {
26031 as_bad (_("unknown implicit IT mode `%s', should be "\
26032 "arm, thumb, always, or never."), str);
26033 ret = FALSE;
26034 }
26035
26036 return ret;
26037 }
26038
26039 static bfd_boolean
26040 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
26041 {
26042 codecomposer_syntax = TRUE;
26043 arm_comment_chars[0] = ';';
26044 arm_line_separator_chars[0] = 0;
26045 return TRUE;
26046 }
26047
26048 struct arm_long_option_table arm_long_opts[] =
26049 {
26050 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
26051 arm_parse_cpu, NULL},
26052 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
26053 arm_parse_arch, NULL},
26054 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
26055 arm_parse_fpu, NULL},
26056 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
26057 arm_parse_float_abi, NULL},
26058 #ifdef OBJ_ELF
26059 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
26060 arm_parse_eabi, NULL},
26061 #endif
26062 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
26063 arm_parse_it_mode, NULL},
26064 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
26065 arm_ccs_mode, NULL},
26066 {NULL, NULL, 0, NULL}
26067 };
26068
26069 int
26070 md_parse_option (int c, const char * arg)
26071 {
26072 struct arm_option_table *opt;
26073 const struct arm_legacy_option_table *fopt;
26074 struct arm_long_option_table *lopt;
26075
26076 switch (c)
26077 {
26078 #ifdef OPTION_EB
26079 case OPTION_EB:
26080 target_big_endian = 1;
26081 break;
26082 #endif
26083
26084 #ifdef OPTION_EL
26085 case OPTION_EL:
26086 target_big_endian = 0;
26087 break;
26088 #endif
26089
26090 case OPTION_FIX_V4BX:
26091 fix_v4bx = TRUE;
26092 break;
26093
26094 case 'a':
26095 /* Listing option. Just ignore these, we don't support additional
26096 ones. */
26097 return 0;
26098
26099 default:
26100 for (opt = arm_opts; opt->option != NULL; opt++)
26101 {
26102 if (c == opt->option[0]
26103 && ((arg == NULL && opt->option[1] == 0)
26104 || streq (arg, opt->option + 1)))
26105 {
26106 /* If the option is deprecated, tell the user. */
26107 if (warn_on_deprecated && opt->deprecated != NULL)
26108 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
26109 arg ? arg : "", _(opt->deprecated));
26110
26111 if (opt->var != NULL)
26112 *opt->var = opt->value;
26113
26114 return 1;
26115 }
26116 }
26117
26118 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
26119 {
26120 if (c == fopt->option[0]
26121 && ((arg == NULL && fopt->option[1] == 0)
26122 || streq (arg, fopt->option + 1)))
26123 {
26124 /* If the option is deprecated, tell the user. */
26125 if (warn_on_deprecated && fopt->deprecated != NULL)
26126 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
26127 arg ? arg : "", _(fopt->deprecated));
26128
26129 if (fopt->var != NULL)
26130 *fopt->var = &fopt->value;
26131
26132 return 1;
26133 }
26134 }
26135
26136 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26137 {
26138 /* These options are expected to have an argument. */
26139 if (c == lopt->option[0]
26140 && arg != NULL
26141 && strncmp (arg, lopt->option + 1,
26142 strlen (lopt->option + 1)) == 0)
26143 {
26144 /* If the option is deprecated, tell the user. */
26145 if (warn_on_deprecated && lopt->deprecated != NULL)
26146 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
26147 _(lopt->deprecated));
26148
26149 /* Call the sup-option parser. */
26150 return lopt->func (arg + strlen (lopt->option) - 1);
26151 }
26152 }
26153
26154 return 0;
26155 }
26156
26157 return 1;
26158 }
26159
26160 void
26161 md_show_usage (FILE * fp)
26162 {
26163 struct arm_option_table *opt;
26164 struct arm_long_option_table *lopt;
26165
26166 fprintf (fp, _(" ARM-specific assembler options:\n"));
26167
26168 for (opt = arm_opts; opt->option != NULL; opt++)
26169 if (opt->help != NULL)
26170 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
26171
26172 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26173 if (lopt->help != NULL)
26174 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
26175
26176 #ifdef OPTION_EB
26177 fprintf (fp, _("\
26178 -EB assemble code for a big-endian cpu\n"));
26179 #endif
26180
26181 #ifdef OPTION_EL
26182 fprintf (fp, _("\
26183 -EL assemble code for a little-endian cpu\n"));
26184 #endif
26185
26186 fprintf (fp, _("\
26187 --fix-v4bx Allow BX in ARMv4 code\n"));
26188 }
26189
26190
26191 #ifdef OBJ_ELF
26192 typedef struct
26193 {
26194 int val;
26195 arm_feature_set flags;
26196 } cpu_arch_ver_table;
26197
26198 /* Mapping from CPU features to EABI CPU arch values. As a general rule, table
26199 must be sorted least features first but some reordering is needed, eg. for
26200 Thumb-2 instructions to be detected as coming from ARMv6T2. */
26201 static const cpu_arch_ver_table cpu_arch_ver[] =
26202 {
26203 {1, ARM_ARCH_V4},
26204 {2, ARM_ARCH_V4T},
26205 {3, ARM_ARCH_V5},
26206 {3, ARM_ARCH_V5T},
26207 {4, ARM_ARCH_V5TE},
26208 {5, ARM_ARCH_V5TEJ},
26209 {6, ARM_ARCH_V6},
26210 {9, ARM_ARCH_V6K},
26211 {7, ARM_ARCH_V6Z},
26212 {11, ARM_ARCH_V6M},
26213 {12, ARM_ARCH_V6SM},
26214 {8, ARM_ARCH_V6T2},
26215 {10, ARM_ARCH_V7VE},
26216 {10, ARM_ARCH_V7R},
26217 {10, ARM_ARCH_V7M},
26218 {14, ARM_ARCH_V8A},
26219 {16, ARM_ARCH_V8M_BASE},
26220 {17, ARM_ARCH_V8M_MAIN},
26221 {0, ARM_ARCH_NONE}
26222 };
26223
26224 /* Set an attribute if it has not already been set by the user. */
26225 static void
26226 aeabi_set_attribute_int (int tag, int value)
26227 {
26228 if (tag < 1
26229 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
26230 || !attributes_set_explicitly[tag])
26231 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
26232 }
26233
26234 static void
26235 aeabi_set_attribute_string (int tag, const char *value)
26236 {
26237 if (tag < 1
26238 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
26239 || !attributes_set_explicitly[tag])
26240 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
26241 }
26242
26243 /* Set the public EABI object attributes. */
26244 void
26245 aeabi_set_public_attributes (void)
26246 {
26247 int arch;
26248 char profile;
26249 int virt_sec = 0;
26250 int fp16_optional = 0;
26251 arm_feature_set arm_arch = ARM_ARCH_NONE;
26252 arm_feature_set flags;
26253 arm_feature_set tmp;
26254 arm_feature_set arm_arch_v8m_base = ARM_ARCH_V8M_BASE;
26255 const cpu_arch_ver_table *p;
26256
26257 /* Choose the architecture based on the capabilities of the requested cpu
26258 (if any) and/or the instructions actually used. */
26259 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
26260 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
26261 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
26262
26263 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
26264 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
26265
26266 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
26267 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
26268
26269 selected_cpu = flags;
26270
26271 /* Allow the user to override the reported architecture. */
26272 if (object_arch)
26273 {
26274 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
26275 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
26276 }
26277
26278 /* We need to make sure that the attributes do not identify us as v6S-M
26279 when the only v6S-M feature in use is the Operating System Extensions. */
26280 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
26281 if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
26282 ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
26283
26284 tmp = flags;
26285 arch = 0;
26286 for (p = cpu_arch_ver; p->val; p++)
26287 {
26288 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
26289 {
26290 arch = p->val;
26291 arm_arch = p->flags;
26292 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
26293 }
26294 }
26295
26296 /* The table lookup above finds the last architecture to contribute
26297 a new feature. Unfortunately, Tag13 is a subset of the union of
26298 v6T2 and v7-M, so it is never seen as contributing a new feature.
26299 We can not search for the last entry which is entirely used,
26300 because if no CPU is specified we build up only those flags
26301 actually used. Perhaps we should separate out the specified
26302 and implicit cases. Avoid taking this path for -march=all by
26303 checking for contradictory v7-A / v7-M features. */
26304 if (arch == TAG_CPU_ARCH_V7
26305 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
26306 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
26307 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
26308 {
26309 arch = TAG_CPU_ARCH_V7E_M;
26310 arm_arch = (arm_feature_set) ARM_ARCH_V7EM;
26311 }
26312
26313 ARM_CLEAR_FEATURE (tmp, flags, arm_arch_v8m_base);
26314 if (arch == TAG_CPU_ARCH_V8M_BASE && ARM_CPU_HAS_FEATURE (tmp, arm_arch_any))
26315 {
26316 arch = TAG_CPU_ARCH_V8M_MAIN;
26317 arm_arch = (arm_feature_set) ARM_ARCH_V8M_MAIN;
26318 }
26319
26320 /* In cpu_arch_ver ARMv8-A is before ARMv8-M for atomics to be detected as
26321 coming from ARMv8-A. However, since ARMv8-A has more instructions than
26322 ARMv8-M, -march=all must be detected as ARMv8-A. */
26323 if (arch == TAG_CPU_ARCH_V8M_MAIN
26324 && ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
26325 {
26326 arch = TAG_CPU_ARCH_V8;
26327 arm_arch = (arm_feature_set) ARM_ARCH_V8A;
26328 }
26329
26330 /* Tag_CPU_name. */
26331 if (selected_cpu_name[0])
26332 {
26333 char *q;
26334
26335 q = selected_cpu_name;
26336 if (strncmp (q, "armv", 4) == 0)
26337 {
26338 int i;
26339
26340 q += 4;
26341 for (i = 0; q[i]; i++)
26342 q[i] = TOUPPER (q[i]);
26343 }
26344 aeabi_set_attribute_string (Tag_CPU_name, q);
26345 }
26346
26347 /* Tag_CPU_arch. */
26348 aeabi_set_attribute_int (Tag_CPU_arch, arch);
26349
26350 /* Tag_CPU_arch_profile. */
26351 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
26352 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
26353 || (ARM_CPU_HAS_FEATURE (flags, arm_ext_atomics)
26354 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only)))
26355 profile = 'A';
26356 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
26357 profile = 'R';
26358 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
26359 profile = 'M';
26360 else
26361 profile = '\0';
26362
26363 if (profile != '\0')
26364 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
26365
26366 /* Tag_DSP_extension. */
26367 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_dsp))
26368 {
26369 arm_feature_set ext;
26370
26371 /* DSP instructions not in architecture. */
26372 ARM_CLEAR_FEATURE (ext, flags, arm_arch);
26373 if (ARM_CPU_HAS_FEATURE (ext, arm_ext_dsp))
26374 aeabi_set_attribute_int (Tag_DSP_extension, 1);
26375 }
26376
26377 /* Tag_ARM_ISA_use. */
26378 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
26379 || arch == 0)
26380 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
26381
26382 /* Tag_THUMB_ISA_use. */
26383 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
26384 || arch == 0)
26385 {
26386 int thumb_isa_use;
26387
26388 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
26389 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
26390 thumb_isa_use = 3;
26391 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
26392 thumb_isa_use = 2;
26393 else
26394 thumb_isa_use = 1;
26395 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
26396 }
26397
26398 /* Tag_VFP_arch. */
26399 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
26400 aeabi_set_attribute_int (Tag_VFP_arch,
26401 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
26402 ? 7 : 8);
26403 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
26404 aeabi_set_attribute_int (Tag_VFP_arch,
26405 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
26406 ? 5 : 6);
26407 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
26408 {
26409 fp16_optional = 1;
26410 aeabi_set_attribute_int (Tag_VFP_arch, 3);
26411 }
26412 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
26413 {
26414 aeabi_set_attribute_int (Tag_VFP_arch, 4);
26415 fp16_optional = 1;
26416 }
26417 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
26418 aeabi_set_attribute_int (Tag_VFP_arch, 2);
26419 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
26420 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
26421 aeabi_set_attribute_int (Tag_VFP_arch, 1);
26422
26423 /* Tag_ABI_HardFP_use. */
26424 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
26425 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
26426 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
26427
26428 /* Tag_WMMX_arch. */
26429 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
26430 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
26431 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
26432 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
26433
26434 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
26435 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
26436 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
26437 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
26438 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
26439 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
26440 {
26441 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
26442 {
26443 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
26444 }
26445 else
26446 {
26447 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
26448 fp16_optional = 1;
26449 }
26450 }
26451
26452 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
26453 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
26454 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
26455
26456 /* Tag_DIV_use.
26457
26458 We set Tag_DIV_use to two when integer divide instructions have been used
26459 in ARM state, or when Thumb integer divide instructions have been used,
26460 but we have no architecture profile set, nor have we any ARM instructions.
26461
26462 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
26463 by the base architecture.
26464
26465 For new architectures we will have to check these tests. */
26466 gas_assert (arch <= TAG_CPU_ARCH_V8
26467 || (arch >= TAG_CPU_ARCH_V8M_BASE
26468 && arch <= TAG_CPU_ARCH_V8M_MAIN));
26469 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
26470 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
26471 aeabi_set_attribute_int (Tag_DIV_use, 0);
26472 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
26473 || (profile == '\0'
26474 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
26475 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
26476 aeabi_set_attribute_int (Tag_DIV_use, 2);
26477
26478 /* Tag_MP_extension_use. */
26479 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
26480 aeabi_set_attribute_int (Tag_MPextension_use, 1);
26481
26482 /* Tag Virtualization_use. */
26483 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
26484 virt_sec |= 1;
26485 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
26486 virt_sec |= 2;
26487 if (virt_sec != 0)
26488 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
26489 }
26490
26491 /* Add the default contents for the .ARM.attributes section. */
26492 void
26493 arm_md_end (void)
26494 {
26495 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
26496 return;
26497
26498 aeabi_set_public_attributes ();
26499 }
26500 #endif /* OBJ_ELF */
26501
26502
26503 /* Parse a .cpu directive. */
26504
26505 static void
26506 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
26507 {
26508 const struct arm_cpu_option_table *opt;
26509 char *name;
26510 char saved_char;
26511
26512 name = input_line_pointer;
26513 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26514 input_line_pointer++;
26515 saved_char = *input_line_pointer;
26516 *input_line_pointer = 0;
26517
26518 /* Skip the first "all" entry. */
26519 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
26520 if (streq (opt->name, name))
26521 {
26522 mcpu_cpu_opt = &opt->value;
26523 selected_cpu = opt->value;
26524 if (opt->canonical_name)
26525 strcpy (selected_cpu_name, opt->canonical_name);
26526 else
26527 {
26528 int i;
26529 for (i = 0; opt->name[i]; i++)
26530 selected_cpu_name[i] = TOUPPER (opt->name[i]);
26531
26532 selected_cpu_name[i] = 0;
26533 }
26534 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26535 *input_line_pointer = saved_char;
26536 demand_empty_rest_of_line ();
26537 return;
26538 }
26539 as_bad (_("unknown cpu `%s'"), name);
26540 *input_line_pointer = saved_char;
26541 ignore_rest_of_line ();
26542 }
26543
26544
26545 /* Parse a .arch directive. */
26546
26547 static void
26548 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
26549 {
26550 const struct arm_arch_option_table *opt;
26551 char saved_char;
26552 char *name;
26553
26554 name = input_line_pointer;
26555 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26556 input_line_pointer++;
26557 saved_char = *input_line_pointer;
26558 *input_line_pointer = 0;
26559
26560 /* Skip the first "all" entry. */
26561 for (opt = arm_archs + 1; opt->name != NULL; opt++)
26562 if (streq (opt->name, name))
26563 {
26564 mcpu_cpu_opt = &opt->value;
26565 selected_cpu = opt->value;
26566 strcpy (selected_cpu_name, opt->name);
26567 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26568 *input_line_pointer = saved_char;
26569 demand_empty_rest_of_line ();
26570 return;
26571 }
26572
26573 as_bad (_("unknown architecture `%s'\n"), name);
26574 *input_line_pointer = saved_char;
26575 ignore_rest_of_line ();
26576 }
26577
26578
26579 /* Parse a .object_arch directive. */
26580
26581 static void
26582 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
26583 {
26584 const struct arm_arch_option_table *opt;
26585 char saved_char;
26586 char *name;
26587
26588 name = input_line_pointer;
26589 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26590 input_line_pointer++;
26591 saved_char = *input_line_pointer;
26592 *input_line_pointer = 0;
26593
26594 /* Skip the first "all" entry. */
26595 for (opt = arm_archs + 1; opt->name != NULL; opt++)
26596 if (streq (opt->name, name))
26597 {
26598 object_arch = &opt->value;
26599 *input_line_pointer = saved_char;
26600 demand_empty_rest_of_line ();
26601 return;
26602 }
26603
26604 as_bad (_("unknown architecture `%s'\n"), name);
26605 *input_line_pointer = saved_char;
26606 ignore_rest_of_line ();
26607 }
26608
26609 /* Parse a .arch_extension directive. */
26610
26611 static void
26612 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
26613 {
26614 const struct arm_option_extension_value_table *opt;
26615 const arm_feature_set arm_any = ARM_ANY;
26616 char saved_char;
26617 char *name;
26618 int adding_value = 1;
26619
26620 name = input_line_pointer;
26621 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26622 input_line_pointer++;
26623 saved_char = *input_line_pointer;
26624 *input_line_pointer = 0;
26625
26626 if (strlen (name) >= 2
26627 && strncmp (name, "no", 2) == 0)
26628 {
26629 adding_value = 0;
26630 name += 2;
26631 }
26632
26633 for (opt = arm_extensions; opt->name != NULL; opt++)
26634 if (streq (opt->name, name))
26635 {
26636 int i, nb_allowed_archs =
26637 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
26638 for (i = 0; i < nb_allowed_archs; i++)
26639 {
26640 /* Empty entry. */
26641 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
26642 continue;
26643 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *mcpu_cpu_opt))
26644 break;
26645 }
26646
26647 if (i == nb_allowed_archs)
26648 {
26649 as_bad (_("architectural extension `%s' is not allowed for the "
26650 "current base architecture"), name);
26651 break;
26652 }
26653
26654 if (adding_value)
26655 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu,
26656 opt->merge_value);
26657 else
26658 ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->clear_value);
26659
26660 mcpu_cpu_opt = &selected_cpu;
26661 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26662 *input_line_pointer = saved_char;
26663 demand_empty_rest_of_line ();
26664 return;
26665 }
26666
26667 if (opt->name == NULL)
26668 as_bad (_("unknown architecture extension `%s'\n"), name);
26669
26670 *input_line_pointer = saved_char;
26671 ignore_rest_of_line ();
26672 }
26673
26674 /* Parse a .fpu directive. */
26675
26676 static void
26677 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
26678 {
26679 const struct arm_option_fpu_value_table *opt;
26680 char saved_char;
26681 char *name;
26682
26683 name = input_line_pointer;
26684 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26685 input_line_pointer++;
26686 saved_char = *input_line_pointer;
26687 *input_line_pointer = 0;
26688
26689 for (opt = arm_fpus; opt->name != NULL; opt++)
26690 if (streq (opt->name, name))
26691 {
26692 mfpu_opt = &opt->value;
26693 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26694 *input_line_pointer = saved_char;
26695 demand_empty_rest_of_line ();
26696 return;
26697 }
26698
26699 as_bad (_("unknown floating point format `%s'\n"), name);
26700 *input_line_pointer = saved_char;
26701 ignore_rest_of_line ();
26702 }
26703
26704 /* Copy symbol information. */
26705
26706 void
26707 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
26708 {
26709 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
26710 }
26711
26712 #ifdef OBJ_ELF
26713 /* Given a symbolic attribute NAME, return the proper integer value.
26714 Returns -1 if the attribute is not known. */
26715
26716 int
26717 arm_convert_symbolic_attribute (const char *name)
26718 {
26719 static const struct
26720 {
26721 const char * name;
26722 const int tag;
26723 }
26724 attribute_table[] =
26725 {
26726 /* When you modify this table you should
26727 also modify the list in doc/c-arm.texi. */
26728 #define T(tag) {#tag, tag}
26729 T (Tag_CPU_raw_name),
26730 T (Tag_CPU_name),
26731 T (Tag_CPU_arch),
26732 T (Tag_CPU_arch_profile),
26733 T (Tag_ARM_ISA_use),
26734 T (Tag_THUMB_ISA_use),
26735 T (Tag_FP_arch),
26736 T (Tag_VFP_arch),
26737 T (Tag_WMMX_arch),
26738 T (Tag_Advanced_SIMD_arch),
26739 T (Tag_PCS_config),
26740 T (Tag_ABI_PCS_R9_use),
26741 T (Tag_ABI_PCS_RW_data),
26742 T (Tag_ABI_PCS_RO_data),
26743 T (Tag_ABI_PCS_GOT_use),
26744 T (Tag_ABI_PCS_wchar_t),
26745 T (Tag_ABI_FP_rounding),
26746 T (Tag_ABI_FP_denormal),
26747 T (Tag_ABI_FP_exceptions),
26748 T (Tag_ABI_FP_user_exceptions),
26749 T (Tag_ABI_FP_number_model),
26750 T (Tag_ABI_align_needed),
26751 T (Tag_ABI_align8_needed),
26752 T (Tag_ABI_align_preserved),
26753 T (Tag_ABI_align8_preserved),
26754 T (Tag_ABI_enum_size),
26755 T (Tag_ABI_HardFP_use),
26756 T (Tag_ABI_VFP_args),
26757 T (Tag_ABI_WMMX_args),
26758 T (Tag_ABI_optimization_goals),
26759 T (Tag_ABI_FP_optimization_goals),
26760 T (Tag_compatibility),
26761 T (Tag_CPU_unaligned_access),
26762 T (Tag_FP_HP_extension),
26763 T (Tag_VFP_HP_extension),
26764 T (Tag_ABI_FP_16bit_format),
26765 T (Tag_MPextension_use),
26766 T (Tag_DIV_use),
26767 T (Tag_nodefaults),
26768 T (Tag_also_compatible_with),
26769 T (Tag_conformance),
26770 T (Tag_T2EE_use),
26771 T (Tag_Virtualization_use),
26772 T (Tag_DSP_extension),
26773 /* We deliberately do not include Tag_MPextension_use_legacy. */
26774 #undef T
26775 };
26776 unsigned int i;
26777
26778 if (name == NULL)
26779 return -1;
26780
26781 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
26782 if (streq (name, attribute_table[i].name))
26783 return attribute_table[i].tag;
26784
26785 return -1;
26786 }
26787
26788
26789 /* Apply sym value for relocations only in the case that they are for
26790 local symbols in the same segment as the fixup and you have the
26791 respective architectural feature for blx and simple switches. */
26792 int
26793 arm_apply_sym_value (struct fix * fixP, segT this_seg)
26794 {
26795 if (fixP->fx_addsy
26796 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
26797 /* PR 17444: If the local symbol is in a different section then a reloc
26798 will always be generated for it, so applying the symbol value now
26799 will result in a double offset being stored in the relocation. */
26800 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
26801 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
26802 {
26803 switch (fixP->fx_r_type)
26804 {
26805 case BFD_RELOC_ARM_PCREL_BLX:
26806 case BFD_RELOC_THUMB_PCREL_BRANCH23:
26807 if (ARM_IS_FUNC (fixP->fx_addsy))
26808 return 1;
26809 break;
26810
26811 case BFD_RELOC_ARM_PCREL_CALL:
26812 case BFD_RELOC_THUMB_PCREL_BLX:
26813 if (THUMB_IS_FUNC (fixP->fx_addsy))
26814 return 1;
26815 break;
26816
26817 default:
26818 break;
26819 }
26820
26821 }
26822 return 0;
26823 }
26824 #endif /* OBJ_ELF */
This page took 0.720985 seconds and 4 git commands to generate.