[ARM][gas] Add support for Cortex-A32
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2016 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 #endif /* OBJ_ELF */
79
80 /* Results from operand parsing worker functions. */
81
82 typedef enum
83 {
84 PARSE_OPERAND_SUCCESS,
85 PARSE_OPERAND_FAIL,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result;
88
89 enum arm_float_abi
90 {
91 ARM_FLOAT_ABI_HARD,
92 ARM_FLOAT_ABI_SOFTFP,
93 ARM_FLOAT_ABI_SOFT
94 };
95
96 /* Types of processor to assemble for. */
97 #ifndef CPU_DEFAULT
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
101
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
104 #endif
105
106 #ifndef FPU_DEFAULT
107 # ifdef TE_LINUX
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
110 # ifdef OBJ_ELF
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
112 # else
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
115 # endif
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
118 # else
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
121 # endif
122 #endif /* ifndef FPU_DEFAULT */
123
124 #define streq(a, b) (strcmp (a, b) == 0)
125
126 static arm_feature_set cpu_variant;
127 static arm_feature_set arm_arch_used;
128 static arm_feature_set thumb_arch_used;
129
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26 = FALSE;
132 static int atpcs = FALSE;
133 static int support_interwork = FALSE;
134 static int uses_apcs_float = FALSE;
135 static int pic_code = FALSE;
136 static int fix_v4bx = FALSE;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated = TRUE;
139
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax = FALSE;
142
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
145 assembly flags. */
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
148
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
154 static const arm_feature_set *object_arch = NULL;
155
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default = FPU_DEFAULT;
158 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
159 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
161 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
162 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
165 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
166
167 #ifdef CPU_DEFAULT
168 static const arm_feature_set cpu_default = CPU_DEFAULT;
169 #endif
170
171 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
172 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
173 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
174 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
175 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
176 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
177 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
178 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
179 static const arm_feature_set arm_ext_v4t_5 =
180 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
181 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
182 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
183 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
184 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
185 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
186 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
187 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
188 static const arm_feature_set arm_ext_v6m = ARM_FEATURE_CORE_LOW (ARM_EXT_V6M);
189 static const arm_feature_set arm_ext_v6_notm =
190 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
191 static const arm_feature_set arm_ext_v6_dsp =
192 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
193 static const arm_feature_set arm_ext_barrier =
194 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
195 static const arm_feature_set arm_ext_msr =
196 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
197 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
198 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
199 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
200 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
201 static const arm_feature_set arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
202 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
203 static const arm_feature_set arm_ext_m =
204 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M, ARM_EXT2_V8M);
205 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
206 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
207 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
208 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
209 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
210 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
211 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
212 static const arm_feature_set arm_ext_v6t2_v8m =
213 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
214 /* Instructions shared between ARMv8-A and ARMv8-M. */
215 static const arm_feature_set arm_ext_atomics =
216 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
217 static const arm_feature_set arm_ext_v8_2 =
218 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A);
219 /* FP16 instructions. */
220 static const arm_feature_set arm_ext_fp16 =
221 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
222
223 static const arm_feature_set arm_arch_any = ARM_ANY;
224 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1, -1);
225 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
226 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
227 static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
228
229 static const arm_feature_set arm_cext_iwmmxt2 =
230 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
231 static const arm_feature_set arm_cext_iwmmxt =
232 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
233 static const arm_feature_set arm_cext_xscale =
234 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
235 static const arm_feature_set arm_cext_maverick =
236 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
237 static const arm_feature_set fpu_fpa_ext_v1 =
238 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
239 static const arm_feature_set fpu_fpa_ext_v2 =
240 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
241 static const arm_feature_set fpu_vfp_ext_v1xd =
242 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
243 static const arm_feature_set fpu_vfp_ext_v1 =
244 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
245 static const arm_feature_set fpu_vfp_ext_v2 =
246 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
247 static const arm_feature_set fpu_vfp_ext_v3xd =
248 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
249 static const arm_feature_set fpu_vfp_ext_v3 =
250 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
251 static const arm_feature_set fpu_vfp_ext_d32 =
252 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
253 static const arm_feature_set fpu_neon_ext_v1 =
254 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
255 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
256 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
257 static const arm_feature_set fpu_vfp_fp16 =
258 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
259 static const arm_feature_set fpu_neon_ext_fma =
260 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
261 static const arm_feature_set fpu_vfp_ext_fma =
262 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
263 static const arm_feature_set fpu_vfp_ext_armv8 =
264 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
265 static const arm_feature_set fpu_vfp_ext_armv8xd =
266 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
267 static const arm_feature_set fpu_neon_ext_armv8 =
268 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
269 static const arm_feature_set fpu_crypto_ext_armv8 =
270 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
271 static const arm_feature_set crc_ext_armv8 =
272 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
273 static const arm_feature_set fpu_neon_ext_v8_1 =
274 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8 | FPU_NEON_EXT_RDMA);
275
276 static int mfloat_abi_opt = -1;
277 /* Record user cpu selection for object attributes. */
278 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
279 /* Must be long enough to hold any of the names in arm_cpus. */
280 static char selected_cpu_name[20];
281
282 extern FLONUM_TYPE generic_floating_point_number;
283
284 /* Return if no cpu was selected on command-line. */
285 static bfd_boolean
286 no_cpu_selected (void)
287 {
288 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
289 }
290
291 #ifdef OBJ_ELF
292 # ifdef EABI_DEFAULT
293 static int meabi_flags = EABI_DEFAULT;
294 # else
295 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
296 # endif
297
298 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
299
300 bfd_boolean
301 arm_is_eabi (void)
302 {
303 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
304 }
305 #endif
306
307 #ifdef OBJ_ELF
308 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
309 symbolS * GOT_symbol;
310 #endif
311
312 /* 0: assemble for ARM,
313 1: assemble for Thumb,
314 2: assemble for Thumb even though target CPU does not support thumb
315 instructions. */
316 static int thumb_mode = 0;
317 /* A value distinct from the possible values for thumb_mode that we
318 can use to record whether thumb_mode has been copied into the
319 tc_frag_data field of a frag. */
320 #define MODE_RECORDED (1 << 4)
321
322 /* Specifies the intrinsic IT insn behavior mode. */
323 enum implicit_it_mode
324 {
325 IMPLICIT_IT_MODE_NEVER = 0x00,
326 IMPLICIT_IT_MODE_ARM = 0x01,
327 IMPLICIT_IT_MODE_THUMB = 0x02,
328 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
329 };
330 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
331
332 /* If unified_syntax is true, we are processing the new unified
333 ARM/Thumb syntax. Important differences from the old ARM mode:
334
335 - Immediate operands do not require a # prefix.
336 - Conditional affixes always appear at the end of the
337 instruction. (For backward compatibility, those instructions
338 that formerly had them in the middle, continue to accept them
339 there.)
340 - The IT instruction may appear, and if it does is validated
341 against subsequent conditional affixes. It does not generate
342 machine code.
343
344 Important differences from the old Thumb mode:
345
346 - Immediate operands do not require a # prefix.
347 - Most of the V6T2 instructions are only available in unified mode.
348 - The .N and .W suffixes are recognized and honored (it is an error
349 if they cannot be honored).
350 - All instructions set the flags if and only if they have an 's' affix.
351 - Conditional affixes may be used. They are validated against
352 preceding IT instructions. Unlike ARM mode, you cannot use a
353 conditional affix except in the scope of an IT instruction. */
354
355 static bfd_boolean unified_syntax = FALSE;
356
357 /* An immediate operand can start with #, and ld*, st*, pld operands
358 can contain [ and ]. We need to tell APP not to elide whitespace
359 before a [, which can appear as the first operand for pld.
360 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
361 const char arm_symbol_chars[] = "#[]{}";
362
363 enum neon_el_type
364 {
365 NT_invtype,
366 NT_untyped,
367 NT_integer,
368 NT_float,
369 NT_poly,
370 NT_signed,
371 NT_unsigned
372 };
373
374 struct neon_type_el
375 {
376 enum neon_el_type type;
377 unsigned size;
378 };
379
380 #define NEON_MAX_TYPE_ELS 4
381
382 struct neon_type
383 {
384 struct neon_type_el el[NEON_MAX_TYPE_ELS];
385 unsigned elems;
386 };
387
388 enum it_instruction_type
389 {
390 OUTSIDE_IT_INSN,
391 INSIDE_IT_INSN,
392 INSIDE_IT_LAST_INSN,
393 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
394 if inside, should be the last one. */
395 NEUTRAL_IT_INSN, /* This could be either inside or outside,
396 i.e. BKPT and NOP. */
397 IT_INSN /* The IT insn has been parsed. */
398 };
399
400 /* The maximum number of operands we need. */
401 #define ARM_IT_MAX_OPERANDS 6
402
403 struct arm_it
404 {
405 const char * error;
406 unsigned long instruction;
407 int size;
408 int size_req;
409 int cond;
410 /* "uncond_value" is set to the value in place of the conditional field in
411 unconditional versions of the instruction, or -1 if nothing is
412 appropriate. */
413 int uncond_value;
414 struct neon_type vectype;
415 /* This does not indicate an actual NEON instruction, only that
416 the mnemonic accepts neon-style type suffixes. */
417 int is_neon;
418 /* Set to the opcode if the instruction needs relaxation.
419 Zero if the instruction is not relaxed. */
420 unsigned long relax;
421 struct
422 {
423 bfd_reloc_code_real_type type;
424 expressionS exp;
425 int pc_rel;
426 } reloc;
427
428 enum it_instruction_type it_insn_type;
429
430 struct
431 {
432 unsigned reg;
433 signed int imm;
434 struct neon_type_el vectype;
435 unsigned present : 1; /* Operand present. */
436 unsigned isreg : 1; /* Operand was a register. */
437 unsigned immisreg : 1; /* .imm field is a second register. */
438 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
439 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
440 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
441 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
442 instructions. This allows us to disambiguate ARM <-> vector insns. */
443 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
444 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
445 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
446 unsigned issingle : 1; /* Operand is VFP single-precision register. */
447 unsigned hasreloc : 1; /* Operand has relocation suffix. */
448 unsigned writeback : 1; /* Operand has trailing ! */
449 unsigned preind : 1; /* Preindexed address. */
450 unsigned postind : 1; /* Postindexed address. */
451 unsigned negative : 1; /* Index register was negated. */
452 unsigned shifted : 1; /* Shift applied to operation. */
453 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
454 } operands[ARM_IT_MAX_OPERANDS];
455 };
456
457 static struct arm_it inst;
458
459 #define NUM_FLOAT_VALS 8
460
461 const char * fp_const[] =
462 {
463 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
464 };
465
466 /* Number of littlenums required to hold an extended precision number. */
467 #define MAX_LITTLENUMS 6
468
469 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
470
471 #define FAIL (-1)
472 #define SUCCESS (0)
473
474 #define SUFF_S 1
475 #define SUFF_D 2
476 #define SUFF_E 3
477 #define SUFF_P 4
478
479 #define CP_T_X 0x00008000
480 #define CP_T_Y 0x00400000
481
482 #define CONDS_BIT 0x00100000
483 #define LOAD_BIT 0x00100000
484
485 #define DOUBLE_LOAD_FLAG 0x00000001
486
487 struct asm_cond
488 {
489 const char * template_name;
490 unsigned long value;
491 };
492
493 #define COND_ALWAYS 0xE
494
495 struct asm_psr
496 {
497 const char * template_name;
498 unsigned long field;
499 };
500
501 struct asm_barrier_opt
502 {
503 const char * template_name;
504 unsigned long value;
505 const arm_feature_set arch;
506 };
507
508 /* The bit that distinguishes CPSR and SPSR. */
509 #define SPSR_BIT (1 << 22)
510
511 /* The individual PSR flag bits. */
512 #define PSR_c (1 << 16)
513 #define PSR_x (1 << 17)
514 #define PSR_s (1 << 18)
515 #define PSR_f (1 << 19)
516
517 struct reloc_entry
518 {
519 char * name;
520 bfd_reloc_code_real_type reloc;
521 };
522
523 enum vfp_reg_pos
524 {
525 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
526 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
527 };
528
529 enum vfp_ldstm_type
530 {
531 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
532 };
533
534 /* Bits for DEFINED field in neon_typed_alias. */
535 #define NTA_HASTYPE 1
536 #define NTA_HASINDEX 2
537
538 struct neon_typed_alias
539 {
540 unsigned char defined;
541 unsigned char index;
542 struct neon_type_el eltype;
543 };
544
545 /* ARM register categories. This includes coprocessor numbers and various
546 architecture extensions' registers. */
547 enum arm_reg_type
548 {
549 REG_TYPE_RN,
550 REG_TYPE_CP,
551 REG_TYPE_CN,
552 REG_TYPE_FN,
553 REG_TYPE_VFS,
554 REG_TYPE_VFD,
555 REG_TYPE_NQ,
556 REG_TYPE_VFSD,
557 REG_TYPE_NDQ,
558 REG_TYPE_NSDQ,
559 REG_TYPE_VFC,
560 REG_TYPE_MVF,
561 REG_TYPE_MVD,
562 REG_TYPE_MVFX,
563 REG_TYPE_MVDX,
564 REG_TYPE_MVAX,
565 REG_TYPE_DSPSC,
566 REG_TYPE_MMXWR,
567 REG_TYPE_MMXWC,
568 REG_TYPE_MMXWCG,
569 REG_TYPE_XSCALE,
570 REG_TYPE_RNB
571 };
572
573 /* Structure for a hash table entry for a register.
574 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
575 information which states whether a vector type or index is specified (for a
576 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
577 struct reg_entry
578 {
579 const char * name;
580 unsigned int number;
581 unsigned char type;
582 unsigned char builtin;
583 struct neon_typed_alias * neon;
584 };
585
586 /* Diagnostics used when we don't get a register of the expected type. */
587 const char * const reg_expected_msgs[] =
588 {
589 N_("ARM register expected"),
590 N_("bad or missing co-processor number"),
591 N_("co-processor register expected"),
592 N_("FPA register expected"),
593 N_("VFP single precision register expected"),
594 N_("VFP/Neon double precision register expected"),
595 N_("Neon quad precision register expected"),
596 N_("VFP single or double precision register expected"),
597 N_("Neon double or quad precision register expected"),
598 N_("VFP single, double or Neon quad precision register expected"),
599 N_("VFP system register expected"),
600 N_("Maverick MVF register expected"),
601 N_("Maverick MVD register expected"),
602 N_("Maverick MVFX register expected"),
603 N_("Maverick MVDX register expected"),
604 N_("Maverick MVAX register expected"),
605 N_("Maverick DSPSC register expected"),
606 N_("iWMMXt data register expected"),
607 N_("iWMMXt control register expected"),
608 N_("iWMMXt scalar register expected"),
609 N_("XScale accumulator register expected"),
610 };
611
612 /* Some well known registers that we refer to directly elsewhere. */
613 #define REG_R12 12
614 #define REG_SP 13
615 #define REG_LR 14
616 #define REG_PC 15
617
618 /* ARM instructions take 4bytes in the object file, Thumb instructions
619 take 2: */
620 #define INSN_SIZE 4
621
622 struct asm_opcode
623 {
624 /* Basic string to match. */
625 const char * template_name;
626
627 /* Parameters to instruction. */
628 unsigned int operands[8];
629
630 /* Conditional tag - see opcode_lookup. */
631 unsigned int tag : 4;
632
633 /* Basic instruction code. */
634 unsigned int avalue : 28;
635
636 /* Thumb-format instruction code. */
637 unsigned int tvalue;
638
639 /* Which architecture variant provides this instruction. */
640 const arm_feature_set * avariant;
641 const arm_feature_set * tvariant;
642
643 /* Function to call to encode instruction in ARM format. */
644 void (* aencode) (void);
645
646 /* Function to call to encode instruction in Thumb format. */
647 void (* tencode) (void);
648 };
649
650 /* Defines for various bits that we will want to toggle. */
651 #define INST_IMMEDIATE 0x02000000
652 #define OFFSET_REG 0x02000000
653 #define HWOFFSET_IMM 0x00400000
654 #define SHIFT_BY_REG 0x00000010
655 #define PRE_INDEX 0x01000000
656 #define INDEX_UP 0x00800000
657 #define WRITE_BACK 0x00200000
658 #define LDM_TYPE_2_OR_3 0x00400000
659 #define CPSI_MMOD 0x00020000
660
661 #define LITERAL_MASK 0xf000f000
662 #define OPCODE_MASK 0xfe1fffff
663 #define V4_STR_BIT 0x00000020
664 #define VLDR_VMOV_SAME 0x0040f000
665
666 #define T2_SUBS_PC_LR 0xf3de8f00
667
668 #define DATA_OP_SHIFT 21
669
670 #define T2_OPCODE_MASK 0xfe1fffff
671 #define T2_DATA_OP_SHIFT 21
672
673 #define A_COND_MASK 0xf0000000
674 #define A_PUSH_POP_OP_MASK 0x0fff0000
675
676 /* Opcodes for pushing/poping registers to/from the stack. */
677 #define A1_OPCODE_PUSH 0x092d0000
678 #define A2_OPCODE_PUSH 0x052d0004
679 #define A2_OPCODE_POP 0x049d0004
680
681 /* Codes to distinguish the arithmetic instructions. */
682 #define OPCODE_AND 0
683 #define OPCODE_EOR 1
684 #define OPCODE_SUB 2
685 #define OPCODE_RSB 3
686 #define OPCODE_ADD 4
687 #define OPCODE_ADC 5
688 #define OPCODE_SBC 6
689 #define OPCODE_RSC 7
690 #define OPCODE_TST 8
691 #define OPCODE_TEQ 9
692 #define OPCODE_CMP 10
693 #define OPCODE_CMN 11
694 #define OPCODE_ORR 12
695 #define OPCODE_MOV 13
696 #define OPCODE_BIC 14
697 #define OPCODE_MVN 15
698
699 #define T2_OPCODE_AND 0
700 #define T2_OPCODE_BIC 1
701 #define T2_OPCODE_ORR 2
702 #define T2_OPCODE_ORN 3
703 #define T2_OPCODE_EOR 4
704 #define T2_OPCODE_ADD 8
705 #define T2_OPCODE_ADC 10
706 #define T2_OPCODE_SBC 11
707 #define T2_OPCODE_SUB 13
708 #define T2_OPCODE_RSB 14
709
710 #define T_OPCODE_MUL 0x4340
711 #define T_OPCODE_TST 0x4200
712 #define T_OPCODE_CMN 0x42c0
713 #define T_OPCODE_NEG 0x4240
714 #define T_OPCODE_MVN 0x43c0
715
716 #define T_OPCODE_ADD_R3 0x1800
717 #define T_OPCODE_SUB_R3 0x1a00
718 #define T_OPCODE_ADD_HI 0x4400
719 #define T_OPCODE_ADD_ST 0xb000
720 #define T_OPCODE_SUB_ST 0xb080
721 #define T_OPCODE_ADD_SP 0xa800
722 #define T_OPCODE_ADD_PC 0xa000
723 #define T_OPCODE_ADD_I8 0x3000
724 #define T_OPCODE_SUB_I8 0x3800
725 #define T_OPCODE_ADD_I3 0x1c00
726 #define T_OPCODE_SUB_I3 0x1e00
727
728 #define T_OPCODE_ASR_R 0x4100
729 #define T_OPCODE_LSL_R 0x4080
730 #define T_OPCODE_LSR_R 0x40c0
731 #define T_OPCODE_ROR_R 0x41c0
732 #define T_OPCODE_ASR_I 0x1000
733 #define T_OPCODE_LSL_I 0x0000
734 #define T_OPCODE_LSR_I 0x0800
735
736 #define T_OPCODE_MOV_I8 0x2000
737 #define T_OPCODE_CMP_I8 0x2800
738 #define T_OPCODE_CMP_LR 0x4280
739 #define T_OPCODE_MOV_HR 0x4600
740 #define T_OPCODE_CMP_HR 0x4500
741
742 #define T_OPCODE_LDR_PC 0x4800
743 #define T_OPCODE_LDR_SP 0x9800
744 #define T_OPCODE_STR_SP 0x9000
745 #define T_OPCODE_LDR_IW 0x6800
746 #define T_OPCODE_STR_IW 0x6000
747 #define T_OPCODE_LDR_IH 0x8800
748 #define T_OPCODE_STR_IH 0x8000
749 #define T_OPCODE_LDR_IB 0x7800
750 #define T_OPCODE_STR_IB 0x7000
751 #define T_OPCODE_LDR_RW 0x5800
752 #define T_OPCODE_STR_RW 0x5000
753 #define T_OPCODE_LDR_RH 0x5a00
754 #define T_OPCODE_STR_RH 0x5200
755 #define T_OPCODE_LDR_RB 0x5c00
756 #define T_OPCODE_STR_RB 0x5400
757
758 #define T_OPCODE_PUSH 0xb400
759 #define T_OPCODE_POP 0xbc00
760
761 #define T_OPCODE_BRANCH 0xe000
762
763 #define THUMB_SIZE 2 /* Size of thumb instruction. */
764 #define THUMB_PP_PC_LR 0x0100
765 #define THUMB_LOAD_BIT 0x0800
766 #define THUMB2_LOAD_BIT 0x00100000
767
768 #define BAD_ARGS _("bad arguments to instruction")
769 #define BAD_SP _("r13 not allowed here")
770 #define BAD_PC _("r15 not allowed here")
771 #define BAD_COND _("instruction cannot be conditional")
772 #define BAD_OVERLAP _("registers may not be the same")
773 #define BAD_HIREG _("lo register required")
774 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
775 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
776 #define BAD_BRANCH _("branch must be last instruction in IT block")
777 #define BAD_NOT_IT _("instruction not allowed in IT block")
778 #define BAD_FPU _("selected FPU does not support instruction")
779 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
780 #define BAD_IT_COND _("incorrect condition in IT block")
781 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
782 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
783 #define BAD_PC_ADDRESSING \
784 _("cannot use register index with PC-relative addressing")
785 #define BAD_PC_WRITEBACK \
786 _("cannot use writeback with PC-relative addressing")
787 #define BAD_RANGE _("branch out of range")
788 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
789
790 static struct hash_control * arm_ops_hsh;
791 static struct hash_control * arm_cond_hsh;
792 static struct hash_control * arm_shift_hsh;
793 static struct hash_control * arm_psr_hsh;
794 static struct hash_control * arm_v7m_psr_hsh;
795 static struct hash_control * arm_reg_hsh;
796 static struct hash_control * arm_reloc_hsh;
797 static struct hash_control * arm_barrier_opt_hsh;
798
799 /* Stuff needed to resolve the label ambiguity
800 As:
801 ...
802 label: <insn>
803 may differ from:
804 ...
805 label:
806 <insn> */
807
808 symbolS * last_label_seen;
809 static int label_is_thumb_function_name = FALSE;
810
811 /* Literal pool structure. Held on a per-section
812 and per-sub-section basis. */
813
814 #define MAX_LITERAL_POOL_SIZE 1024
815 typedef struct literal_pool
816 {
817 expressionS literals [MAX_LITERAL_POOL_SIZE];
818 unsigned int next_free_entry;
819 unsigned int id;
820 symbolS * symbol;
821 segT section;
822 subsegT sub_section;
823 #ifdef OBJ_ELF
824 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
825 #endif
826 struct literal_pool * next;
827 unsigned int alignment;
828 } literal_pool;
829
830 /* Pointer to a linked list of literal pools. */
831 literal_pool * list_of_pools = NULL;
832
833 typedef enum asmfunc_states
834 {
835 OUTSIDE_ASMFUNC,
836 WAITING_ASMFUNC_NAME,
837 WAITING_ENDASMFUNC
838 } asmfunc_states;
839
840 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
841
842 #ifdef OBJ_ELF
843 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
844 #else
845 static struct current_it now_it;
846 #endif
847
848 static inline int
849 now_it_compatible (int cond)
850 {
851 return (cond & ~1) == (now_it.cc & ~1);
852 }
853
854 static inline int
855 conditional_insn (void)
856 {
857 return inst.cond != COND_ALWAYS;
858 }
859
860 static int in_it_block (void);
861
862 static int handle_it_state (void);
863
864 static void force_automatic_it_block_close (void);
865
866 static void it_fsm_post_encode (void);
867
868 #define set_it_insn_type(type) \
869 do \
870 { \
871 inst.it_insn_type = type; \
872 if (handle_it_state () == FAIL) \
873 return; \
874 } \
875 while (0)
876
877 #define set_it_insn_type_nonvoid(type, failret) \
878 do \
879 { \
880 inst.it_insn_type = type; \
881 if (handle_it_state () == FAIL) \
882 return failret; \
883 } \
884 while(0)
885
886 #define set_it_insn_type_last() \
887 do \
888 { \
889 if (inst.cond == COND_ALWAYS) \
890 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
891 else \
892 set_it_insn_type (INSIDE_IT_LAST_INSN); \
893 } \
894 while (0)
895
896 /* Pure syntax. */
897
898 /* This array holds the chars that always start a comment. If the
899 pre-processor is disabled, these aren't very useful. */
900 char arm_comment_chars[] = "@";
901
902 /* This array holds the chars that only start a comment at the beginning of
903 a line. If the line seems to have the form '# 123 filename'
904 .line and .file directives will appear in the pre-processed output. */
905 /* Note that input_file.c hand checks for '#' at the beginning of the
906 first line of the input file. This is because the compiler outputs
907 #NO_APP at the beginning of its output. */
908 /* Also note that comments like this one will always work. */
909 const char line_comment_chars[] = "#";
910
911 char arm_line_separator_chars[] = ";";
912
913 /* Chars that can be used to separate mant
914 from exp in floating point numbers. */
915 const char EXP_CHARS[] = "eE";
916
917 /* Chars that mean this number is a floating point constant. */
918 /* As in 0f12.456 */
919 /* or 0d1.2345e12 */
920
921 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
922
923 /* Prefix characters that indicate the start of an immediate
924 value. */
925 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
926
927 /* Separator character handling. */
928
929 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
930
931 static inline int
932 skip_past_char (char ** str, char c)
933 {
934 /* PR gas/14987: Allow for whitespace before the expected character. */
935 skip_whitespace (*str);
936
937 if (**str == c)
938 {
939 (*str)++;
940 return SUCCESS;
941 }
942 else
943 return FAIL;
944 }
945
946 #define skip_past_comma(str) skip_past_char (str, ',')
947
948 /* Arithmetic expressions (possibly involving symbols). */
949
950 /* Return TRUE if anything in the expression is a bignum. */
951
952 static int
953 walk_no_bignums (symbolS * sp)
954 {
955 if (symbol_get_value_expression (sp)->X_op == O_big)
956 return 1;
957
958 if (symbol_get_value_expression (sp)->X_add_symbol)
959 {
960 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
961 || (symbol_get_value_expression (sp)->X_op_symbol
962 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
963 }
964
965 return 0;
966 }
967
968 static int in_my_get_expression = 0;
969
970 /* Third argument to my_get_expression. */
971 #define GE_NO_PREFIX 0
972 #define GE_IMM_PREFIX 1
973 #define GE_OPT_PREFIX 2
974 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
975 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
976 #define GE_OPT_PREFIX_BIG 3
977
978 static int
979 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
980 {
981 char * save_in;
982 segT seg;
983
984 /* In unified syntax, all prefixes are optional. */
985 if (unified_syntax)
986 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
987 : GE_OPT_PREFIX;
988
989 switch (prefix_mode)
990 {
991 case GE_NO_PREFIX: break;
992 case GE_IMM_PREFIX:
993 if (!is_immediate_prefix (**str))
994 {
995 inst.error = _("immediate expression requires a # prefix");
996 return FAIL;
997 }
998 (*str)++;
999 break;
1000 case GE_OPT_PREFIX:
1001 case GE_OPT_PREFIX_BIG:
1002 if (is_immediate_prefix (**str))
1003 (*str)++;
1004 break;
1005 default: abort ();
1006 }
1007
1008 memset (ep, 0, sizeof (expressionS));
1009
1010 save_in = input_line_pointer;
1011 input_line_pointer = *str;
1012 in_my_get_expression = 1;
1013 seg = expression (ep);
1014 in_my_get_expression = 0;
1015
1016 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1017 {
1018 /* We found a bad or missing expression in md_operand(). */
1019 *str = input_line_pointer;
1020 input_line_pointer = save_in;
1021 if (inst.error == NULL)
1022 inst.error = (ep->X_op == O_absent
1023 ? _("missing expression") :_("bad expression"));
1024 return 1;
1025 }
1026
1027 #ifdef OBJ_AOUT
1028 if (seg != absolute_section
1029 && seg != text_section
1030 && seg != data_section
1031 && seg != bss_section
1032 && seg != undefined_section)
1033 {
1034 inst.error = _("bad segment");
1035 *str = input_line_pointer;
1036 input_line_pointer = save_in;
1037 return 1;
1038 }
1039 #else
1040 (void) seg;
1041 #endif
1042
1043 /* Get rid of any bignums now, so that we don't generate an error for which
1044 we can't establish a line number later on. Big numbers are never valid
1045 in instructions, which is where this routine is always called. */
1046 if (prefix_mode != GE_OPT_PREFIX_BIG
1047 && (ep->X_op == O_big
1048 || (ep->X_add_symbol
1049 && (walk_no_bignums (ep->X_add_symbol)
1050 || (ep->X_op_symbol
1051 && walk_no_bignums (ep->X_op_symbol))))))
1052 {
1053 inst.error = _("invalid constant");
1054 *str = input_line_pointer;
1055 input_line_pointer = save_in;
1056 return 1;
1057 }
1058
1059 *str = input_line_pointer;
1060 input_line_pointer = save_in;
1061 return 0;
1062 }
1063
1064 /* Turn a string in input_line_pointer into a floating point constant
1065 of type TYPE, and store the appropriate bytes in *LITP. The number
1066 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1067 returned, or NULL on OK.
1068
1069 Note that fp constants aren't represent in the normal way on the ARM.
1070 In big endian mode, things are as expected. However, in little endian
1071 mode fp constants are big-endian word-wise, and little-endian byte-wise
1072 within the words. For example, (double) 1.1 in big endian mode is
1073 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1074 the byte sequence 99 99 f1 3f 9a 99 99 99.
1075
1076 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1077
1078 char *
1079 md_atof (int type, char * litP, int * sizeP)
1080 {
1081 int prec;
1082 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1083 char *t;
1084 int i;
1085
1086 switch (type)
1087 {
1088 case 'f':
1089 case 'F':
1090 case 's':
1091 case 'S':
1092 prec = 2;
1093 break;
1094
1095 case 'd':
1096 case 'D':
1097 case 'r':
1098 case 'R':
1099 prec = 4;
1100 break;
1101
1102 case 'x':
1103 case 'X':
1104 prec = 5;
1105 break;
1106
1107 case 'p':
1108 case 'P':
1109 prec = 5;
1110 break;
1111
1112 default:
1113 *sizeP = 0;
1114 return _("Unrecognized or unsupported floating point constant");
1115 }
1116
1117 t = atof_ieee (input_line_pointer, type, words);
1118 if (t)
1119 input_line_pointer = t;
1120 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1121
1122 if (target_big_endian)
1123 {
1124 for (i = 0; i < prec; i++)
1125 {
1126 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1127 litP += sizeof (LITTLENUM_TYPE);
1128 }
1129 }
1130 else
1131 {
1132 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1133 for (i = prec - 1; i >= 0; i--)
1134 {
1135 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1136 litP += sizeof (LITTLENUM_TYPE);
1137 }
1138 else
1139 /* For a 4 byte float the order of elements in `words' is 1 0.
1140 For an 8 byte float the order is 1 0 3 2. */
1141 for (i = 0; i < prec; i += 2)
1142 {
1143 md_number_to_chars (litP, (valueT) words[i + 1],
1144 sizeof (LITTLENUM_TYPE));
1145 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1146 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1147 litP += 2 * sizeof (LITTLENUM_TYPE);
1148 }
1149 }
1150
1151 return NULL;
1152 }
1153
1154 /* We handle all bad expressions here, so that we can report the faulty
1155 instruction in the error message. */
1156 void
1157 md_operand (expressionS * exp)
1158 {
1159 if (in_my_get_expression)
1160 exp->X_op = O_illegal;
1161 }
1162
1163 /* Immediate values. */
1164
1165 /* Generic immediate-value read function for use in directives.
1166 Accepts anything that 'expression' can fold to a constant.
1167 *val receives the number. */
1168 #ifdef OBJ_ELF
1169 static int
1170 immediate_for_directive (int *val)
1171 {
1172 expressionS exp;
1173 exp.X_op = O_illegal;
1174
1175 if (is_immediate_prefix (*input_line_pointer))
1176 {
1177 input_line_pointer++;
1178 expression (&exp);
1179 }
1180
1181 if (exp.X_op != O_constant)
1182 {
1183 as_bad (_("expected #constant"));
1184 ignore_rest_of_line ();
1185 return FAIL;
1186 }
1187 *val = exp.X_add_number;
1188 return SUCCESS;
1189 }
1190 #endif
1191
1192 /* Register parsing. */
1193
1194 /* Generic register parser. CCP points to what should be the
1195 beginning of a register name. If it is indeed a valid register
1196 name, advance CCP over it and return the reg_entry structure;
1197 otherwise return NULL. Does not issue diagnostics. */
1198
1199 static struct reg_entry *
1200 arm_reg_parse_multi (char **ccp)
1201 {
1202 char *start = *ccp;
1203 char *p;
1204 struct reg_entry *reg;
1205
1206 skip_whitespace (start);
1207
1208 #ifdef REGISTER_PREFIX
1209 if (*start != REGISTER_PREFIX)
1210 return NULL;
1211 start++;
1212 #endif
1213 #ifdef OPTIONAL_REGISTER_PREFIX
1214 if (*start == OPTIONAL_REGISTER_PREFIX)
1215 start++;
1216 #endif
1217
1218 p = start;
1219 if (!ISALPHA (*p) || !is_name_beginner (*p))
1220 return NULL;
1221
1222 do
1223 p++;
1224 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1225
1226 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1227
1228 if (!reg)
1229 return NULL;
1230
1231 *ccp = p;
1232 return reg;
1233 }
1234
1235 static int
1236 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1237 enum arm_reg_type type)
1238 {
1239 /* Alternative syntaxes are accepted for a few register classes. */
1240 switch (type)
1241 {
1242 case REG_TYPE_MVF:
1243 case REG_TYPE_MVD:
1244 case REG_TYPE_MVFX:
1245 case REG_TYPE_MVDX:
1246 /* Generic coprocessor register names are allowed for these. */
1247 if (reg && reg->type == REG_TYPE_CN)
1248 return reg->number;
1249 break;
1250
1251 case REG_TYPE_CP:
1252 /* For backward compatibility, a bare number is valid here. */
1253 {
1254 unsigned long processor = strtoul (start, ccp, 10);
1255 if (*ccp != start && processor <= 15)
1256 return processor;
1257 }
1258
1259 case REG_TYPE_MMXWC:
1260 /* WC includes WCG. ??? I'm not sure this is true for all
1261 instructions that take WC registers. */
1262 if (reg && reg->type == REG_TYPE_MMXWCG)
1263 return reg->number;
1264 break;
1265
1266 default:
1267 break;
1268 }
1269
1270 return FAIL;
1271 }
1272
1273 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1274 return value is the register number or FAIL. */
1275
1276 static int
1277 arm_reg_parse (char **ccp, enum arm_reg_type type)
1278 {
1279 char *start = *ccp;
1280 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1281 int ret;
1282
1283 /* Do not allow a scalar (reg+index) to parse as a register. */
1284 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1285 return FAIL;
1286
1287 if (reg && reg->type == type)
1288 return reg->number;
1289
1290 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1291 return ret;
1292
1293 *ccp = start;
1294 return FAIL;
1295 }
1296
1297 /* Parse a Neon type specifier. *STR should point at the leading '.'
1298 character. Does no verification at this stage that the type fits the opcode
1299 properly. E.g.,
1300
1301 .i32.i32.s16
1302 .s32.f32
1303 .u16
1304
1305 Can all be legally parsed by this function.
1306
1307 Fills in neon_type struct pointer with parsed information, and updates STR
1308 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1309 type, FAIL if not. */
1310
1311 static int
1312 parse_neon_type (struct neon_type *type, char **str)
1313 {
1314 char *ptr = *str;
1315
1316 if (type)
1317 type->elems = 0;
1318
1319 while (type->elems < NEON_MAX_TYPE_ELS)
1320 {
1321 enum neon_el_type thistype = NT_untyped;
1322 unsigned thissize = -1u;
1323
1324 if (*ptr != '.')
1325 break;
1326
1327 ptr++;
1328
1329 /* Just a size without an explicit type. */
1330 if (ISDIGIT (*ptr))
1331 goto parsesize;
1332
1333 switch (TOLOWER (*ptr))
1334 {
1335 case 'i': thistype = NT_integer; break;
1336 case 'f': thistype = NT_float; break;
1337 case 'p': thistype = NT_poly; break;
1338 case 's': thistype = NT_signed; break;
1339 case 'u': thistype = NT_unsigned; break;
1340 case 'd':
1341 thistype = NT_float;
1342 thissize = 64;
1343 ptr++;
1344 goto done;
1345 default:
1346 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1347 return FAIL;
1348 }
1349
1350 ptr++;
1351
1352 /* .f is an abbreviation for .f32. */
1353 if (thistype == NT_float && !ISDIGIT (*ptr))
1354 thissize = 32;
1355 else
1356 {
1357 parsesize:
1358 thissize = strtoul (ptr, &ptr, 10);
1359
1360 if (thissize != 8 && thissize != 16 && thissize != 32
1361 && thissize != 64)
1362 {
1363 as_bad (_("bad size %d in type specifier"), thissize);
1364 return FAIL;
1365 }
1366 }
1367
1368 done:
1369 if (type)
1370 {
1371 type->el[type->elems].type = thistype;
1372 type->el[type->elems].size = thissize;
1373 type->elems++;
1374 }
1375 }
1376
1377 /* Empty/missing type is not a successful parse. */
1378 if (type->elems == 0)
1379 return FAIL;
1380
1381 *str = ptr;
1382
1383 return SUCCESS;
1384 }
1385
1386 /* Errors may be set multiple times during parsing or bit encoding
1387 (particularly in the Neon bits), but usually the earliest error which is set
1388 will be the most meaningful. Avoid overwriting it with later (cascading)
1389 errors by calling this function. */
1390
1391 static void
1392 first_error (const char *err)
1393 {
1394 if (!inst.error)
1395 inst.error = err;
1396 }
1397
1398 /* Parse a single type, e.g. ".s32", leading period included. */
1399 static int
1400 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1401 {
1402 char *str = *ccp;
1403 struct neon_type optype;
1404
1405 if (*str == '.')
1406 {
1407 if (parse_neon_type (&optype, &str) == SUCCESS)
1408 {
1409 if (optype.elems == 1)
1410 *vectype = optype.el[0];
1411 else
1412 {
1413 first_error (_("only one type should be specified for operand"));
1414 return FAIL;
1415 }
1416 }
1417 else
1418 {
1419 first_error (_("vector type expected"));
1420 return FAIL;
1421 }
1422 }
1423 else
1424 return FAIL;
1425
1426 *ccp = str;
1427
1428 return SUCCESS;
1429 }
1430
1431 /* Special meanings for indices (which have a range of 0-7), which will fit into
1432 a 4-bit integer. */
1433
1434 #define NEON_ALL_LANES 15
1435 #define NEON_INTERLEAVE_LANES 14
1436
1437 /* Parse either a register or a scalar, with an optional type. Return the
1438 register number, and optionally fill in the actual type of the register
1439 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1440 type/index information in *TYPEINFO. */
1441
1442 static int
1443 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1444 enum arm_reg_type *rtype,
1445 struct neon_typed_alias *typeinfo)
1446 {
1447 char *str = *ccp;
1448 struct reg_entry *reg = arm_reg_parse_multi (&str);
1449 struct neon_typed_alias atype;
1450 struct neon_type_el parsetype;
1451
1452 atype.defined = 0;
1453 atype.index = -1;
1454 atype.eltype.type = NT_invtype;
1455 atype.eltype.size = -1;
1456
1457 /* Try alternate syntax for some types of register. Note these are mutually
1458 exclusive with the Neon syntax extensions. */
1459 if (reg == NULL)
1460 {
1461 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1462 if (altreg != FAIL)
1463 *ccp = str;
1464 if (typeinfo)
1465 *typeinfo = atype;
1466 return altreg;
1467 }
1468
1469 /* Undo polymorphism when a set of register types may be accepted. */
1470 if ((type == REG_TYPE_NDQ
1471 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1472 || (type == REG_TYPE_VFSD
1473 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1474 || (type == REG_TYPE_NSDQ
1475 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1476 || reg->type == REG_TYPE_NQ))
1477 || (type == REG_TYPE_MMXWC
1478 && (reg->type == REG_TYPE_MMXWCG)))
1479 type = (enum arm_reg_type) reg->type;
1480
1481 if (type != reg->type)
1482 return FAIL;
1483
1484 if (reg->neon)
1485 atype = *reg->neon;
1486
1487 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1488 {
1489 if ((atype.defined & NTA_HASTYPE) != 0)
1490 {
1491 first_error (_("can't redefine type for operand"));
1492 return FAIL;
1493 }
1494 atype.defined |= NTA_HASTYPE;
1495 atype.eltype = parsetype;
1496 }
1497
1498 if (skip_past_char (&str, '[') == SUCCESS)
1499 {
1500 if (type != REG_TYPE_VFD)
1501 {
1502 first_error (_("only D registers may be indexed"));
1503 return FAIL;
1504 }
1505
1506 if ((atype.defined & NTA_HASINDEX) != 0)
1507 {
1508 first_error (_("can't change index for operand"));
1509 return FAIL;
1510 }
1511
1512 atype.defined |= NTA_HASINDEX;
1513
1514 if (skip_past_char (&str, ']') == SUCCESS)
1515 atype.index = NEON_ALL_LANES;
1516 else
1517 {
1518 expressionS exp;
1519
1520 my_get_expression (&exp, &str, GE_NO_PREFIX);
1521
1522 if (exp.X_op != O_constant)
1523 {
1524 first_error (_("constant expression required"));
1525 return FAIL;
1526 }
1527
1528 if (skip_past_char (&str, ']') == FAIL)
1529 return FAIL;
1530
1531 atype.index = exp.X_add_number;
1532 }
1533 }
1534
1535 if (typeinfo)
1536 *typeinfo = atype;
1537
1538 if (rtype)
1539 *rtype = type;
1540
1541 *ccp = str;
1542
1543 return reg->number;
1544 }
1545
1546 /* Like arm_reg_parse, but allow allow the following extra features:
1547 - If RTYPE is non-zero, return the (possibly restricted) type of the
1548 register (e.g. Neon double or quad reg when either has been requested).
1549 - If this is a Neon vector type with additional type information, fill
1550 in the struct pointed to by VECTYPE (if non-NULL).
1551 This function will fault on encountering a scalar. */
1552
1553 static int
1554 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1555 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1556 {
1557 struct neon_typed_alias atype;
1558 char *str = *ccp;
1559 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1560
1561 if (reg == FAIL)
1562 return FAIL;
1563
1564 /* Do not allow regname(... to parse as a register. */
1565 if (*str == '(')
1566 return FAIL;
1567
1568 /* Do not allow a scalar (reg+index) to parse as a register. */
1569 if ((atype.defined & NTA_HASINDEX) != 0)
1570 {
1571 first_error (_("register operand expected, but got scalar"));
1572 return FAIL;
1573 }
1574
1575 if (vectype)
1576 *vectype = atype.eltype;
1577
1578 *ccp = str;
1579
1580 return reg;
1581 }
1582
1583 #define NEON_SCALAR_REG(X) ((X) >> 4)
1584 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1585
1586 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1587 have enough information to be able to do a good job bounds-checking. So, we
1588 just do easy checks here, and do further checks later. */
1589
1590 static int
1591 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1592 {
1593 int reg;
1594 char *str = *ccp;
1595 struct neon_typed_alias atype;
1596
1597 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1598
1599 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1600 return FAIL;
1601
1602 if (atype.index == NEON_ALL_LANES)
1603 {
1604 first_error (_("scalar must have an index"));
1605 return FAIL;
1606 }
1607 else if (atype.index >= 64 / elsize)
1608 {
1609 first_error (_("scalar index out of range"));
1610 return FAIL;
1611 }
1612
1613 if (type)
1614 *type = atype.eltype;
1615
1616 *ccp = str;
1617
1618 return reg * 16 + atype.index;
1619 }
1620
1621 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1622
1623 static long
1624 parse_reg_list (char ** strp)
1625 {
1626 char * str = * strp;
1627 long range = 0;
1628 int another_range;
1629
1630 /* We come back here if we get ranges concatenated by '+' or '|'. */
1631 do
1632 {
1633 skip_whitespace (str);
1634
1635 another_range = 0;
1636
1637 if (*str == '{')
1638 {
1639 int in_range = 0;
1640 int cur_reg = -1;
1641
1642 str++;
1643 do
1644 {
1645 int reg;
1646
1647 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1648 {
1649 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1650 return FAIL;
1651 }
1652
1653 if (in_range)
1654 {
1655 int i;
1656
1657 if (reg <= cur_reg)
1658 {
1659 first_error (_("bad range in register list"));
1660 return FAIL;
1661 }
1662
1663 for (i = cur_reg + 1; i < reg; i++)
1664 {
1665 if (range & (1 << i))
1666 as_tsktsk
1667 (_("Warning: duplicated register (r%d) in register list"),
1668 i);
1669 else
1670 range |= 1 << i;
1671 }
1672 in_range = 0;
1673 }
1674
1675 if (range & (1 << reg))
1676 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1677 reg);
1678 else if (reg <= cur_reg)
1679 as_tsktsk (_("Warning: register range not in ascending order"));
1680
1681 range |= 1 << reg;
1682 cur_reg = reg;
1683 }
1684 while (skip_past_comma (&str) != FAIL
1685 || (in_range = 1, *str++ == '-'));
1686 str--;
1687
1688 if (skip_past_char (&str, '}') == FAIL)
1689 {
1690 first_error (_("missing `}'"));
1691 return FAIL;
1692 }
1693 }
1694 else
1695 {
1696 expressionS exp;
1697
1698 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1699 return FAIL;
1700
1701 if (exp.X_op == O_constant)
1702 {
1703 if (exp.X_add_number
1704 != (exp.X_add_number & 0x0000ffff))
1705 {
1706 inst.error = _("invalid register mask");
1707 return FAIL;
1708 }
1709
1710 if ((range & exp.X_add_number) != 0)
1711 {
1712 int regno = range & exp.X_add_number;
1713
1714 regno &= -regno;
1715 regno = (1 << regno) - 1;
1716 as_tsktsk
1717 (_("Warning: duplicated register (r%d) in register list"),
1718 regno);
1719 }
1720
1721 range |= exp.X_add_number;
1722 }
1723 else
1724 {
1725 if (inst.reloc.type != 0)
1726 {
1727 inst.error = _("expression too complex");
1728 return FAIL;
1729 }
1730
1731 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1732 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1733 inst.reloc.pc_rel = 0;
1734 }
1735 }
1736
1737 if (*str == '|' || *str == '+')
1738 {
1739 str++;
1740 another_range = 1;
1741 }
1742 }
1743 while (another_range);
1744
1745 *strp = str;
1746 return range;
1747 }
1748
1749 /* Types of registers in a list. */
1750
1751 enum reg_list_els
1752 {
1753 REGLIST_VFP_S,
1754 REGLIST_VFP_D,
1755 REGLIST_NEON_D
1756 };
1757
1758 /* Parse a VFP register list. If the string is invalid return FAIL.
1759 Otherwise return the number of registers, and set PBASE to the first
1760 register. Parses registers of type ETYPE.
1761 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1762 - Q registers can be used to specify pairs of D registers
1763 - { } can be omitted from around a singleton register list
1764 FIXME: This is not implemented, as it would require backtracking in
1765 some cases, e.g.:
1766 vtbl.8 d3,d4,d5
1767 This could be done (the meaning isn't really ambiguous), but doesn't
1768 fit in well with the current parsing framework.
1769 - 32 D registers may be used (also true for VFPv3).
1770 FIXME: Types are ignored in these register lists, which is probably a
1771 bug. */
1772
1773 static int
1774 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1775 {
1776 char *str = *ccp;
1777 int base_reg;
1778 int new_base;
1779 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1780 int max_regs = 0;
1781 int count = 0;
1782 int warned = 0;
1783 unsigned long mask = 0;
1784 int i;
1785
1786 if (skip_past_char (&str, '{') == FAIL)
1787 {
1788 inst.error = _("expecting {");
1789 return FAIL;
1790 }
1791
1792 switch (etype)
1793 {
1794 case REGLIST_VFP_S:
1795 regtype = REG_TYPE_VFS;
1796 max_regs = 32;
1797 break;
1798
1799 case REGLIST_VFP_D:
1800 regtype = REG_TYPE_VFD;
1801 break;
1802
1803 case REGLIST_NEON_D:
1804 regtype = REG_TYPE_NDQ;
1805 break;
1806 }
1807
1808 if (etype != REGLIST_VFP_S)
1809 {
1810 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1811 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1812 {
1813 max_regs = 32;
1814 if (thumb_mode)
1815 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1816 fpu_vfp_ext_d32);
1817 else
1818 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1819 fpu_vfp_ext_d32);
1820 }
1821 else
1822 max_regs = 16;
1823 }
1824
1825 base_reg = max_regs;
1826
1827 do
1828 {
1829 int setmask = 1, addregs = 1;
1830
1831 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1832
1833 if (new_base == FAIL)
1834 {
1835 first_error (_(reg_expected_msgs[regtype]));
1836 return FAIL;
1837 }
1838
1839 if (new_base >= max_regs)
1840 {
1841 first_error (_("register out of range in list"));
1842 return FAIL;
1843 }
1844
1845 /* Note: a value of 2 * n is returned for the register Q<n>. */
1846 if (regtype == REG_TYPE_NQ)
1847 {
1848 setmask = 3;
1849 addregs = 2;
1850 }
1851
1852 if (new_base < base_reg)
1853 base_reg = new_base;
1854
1855 if (mask & (setmask << new_base))
1856 {
1857 first_error (_("invalid register list"));
1858 return FAIL;
1859 }
1860
1861 if ((mask >> new_base) != 0 && ! warned)
1862 {
1863 as_tsktsk (_("register list not in ascending order"));
1864 warned = 1;
1865 }
1866
1867 mask |= setmask << new_base;
1868 count += addregs;
1869
1870 if (*str == '-') /* We have the start of a range expression */
1871 {
1872 int high_range;
1873
1874 str++;
1875
1876 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1877 == FAIL)
1878 {
1879 inst.error = gettext (reg_expected_msgs[regtype]);
1880 return FAIL;
1881 }
1882
1883 if (high_range >= max_regs)
1884 {
1885 first_error (_("register out of range in list"));
1886 return FAIL;
1887 }
1888
1889 if (regtype == REG_TYPE_NQ)
1890 high_range = high_range + 1;
1891
1892 if (high_range <= new_base)
1893 {
1894 inst.error = _("register range not in ascending order");
1895 return FAIL;
1896 }
1897
1898 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1899 {
1900 if (mask & (setmask << new_base))
1901 {
1902 inst.error = _("invalid register list");
1903 return FAIL;
1904 }
1905
1906 mask |= setmask << new_base;
1907 count += addregs;
1908 }
1909 }
1910 }
1911 while (skip_past_comma (&str) != FAIL);
1912
1913 str++;
1914
1915 /* Sanity check -- should have raised a parse error above. */
1916 if (count == 0 || count > max_regs)
1917 abort ();
1918
1919 *pbase = base_reg;
1920
1921 /* Final test -- the registers must be consecutive. */
1922 mask >>= base_reg;
1923 for (i = 0; i < count; i++)
1924 {
1925 if ((mask & (1u << i)) == 0)
1926 {
1927 inst.error = _("non-contiguous register range");
1928 return FAIL;
1929 }
1930 }
1931
1932 *ccp = str;
1933
1934 return count;
1935 }
1936
1937 /* True if two alias types are the same. */
1938
1939 static bfd_boolean
1940 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1941 {
1942 if (!a && !b)
1943 return TRUE;
1944
1945 if (!a || !b)
1946 return FALSE;
1947
1948 if (a->defined != b->defined)
1949 return FALSE;
1950
1951 if ((a->defined & NTA_HASTYPE) != 0
1952 && (a->eltype.type != b->eltype.type
1953 || a->eltype.size != b->eltype.size))
1954 return FALSE;
1955
1956 if ((a->defined & NTA_HASINDEX) != 0
1957 && (a->index != b->index))
1958 return FALSE;
1959
1960 return TRUE;
1961 }
1962
1963 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1964 The base register is put in *PBASE.
1965 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1966 the return value.
1967 The register stride (minus one) is put in bit 4 of the return value.
1968 Bits [6:5] encode the list length (minus one).
1969 The type of the list elements is put in *ELTYPE, if non-NULL. */
1970
1971 #define NEON_LANE(X) ((X) & 0xf)
1972 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1973 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1974
1975 static int
1976 parse_neon_el_struct_list (char **str, unsigned *pbase,
1977 struct neon_type_el *eltype)
1978 {
1979 char *ptr = *str;
1980 int base_reg = -1;
1981 int reg_incr = -1;
1982 int count = 0;
1983 int lane = -1;
1984 int leading_brace = 0;
1985 enum arm_reg_type rtype = REG_TYPE_NDQ;
1986 const char *const incr_error = _("register stride must be 1 or 2");
1987 const char *const type_error = _("mismatched element/structure types in list");
1988 struct neon_typed_alias firsttype;
1989
1990 if (skip_past_char (&ptr, '{') == SUCCESS)
1991 leading_brace = 1;
1992
1993 do
1994 {
1995 struct neon_typed_alias atype;
1996 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1997
1998 if (getreg == FAIL)
1999 {
2000 first_error (_(reg_expected_msgs[rtype]));
2001 return FAIL;
2002 }
2003
2004 if (base_reg == -1)
2005 {
2006 base_reg = getreg;
2007 if (rtype == REG_TYPE_NQ)
2008 {
2009 reg_incr = 1;
2010 }
2011 firsttype = atype;
2012 }
2013 else if (reg_incr == -1)
2014 {
2015 reg_incr = getreg - base_reg;
2016 if (reg_incr < 1 || reg_incr > 2)
2017 {
2018 first_error (_(incr_error));
2019 return FAIL;
2020 }
2021 }
2022 else if (getreg != base_reg + reg_incr * count)
2023 {
2024 first_error (_(incr_error));
2025 return FAIL;
2026 }
2027
2028 if (! neon_alias_types_same (&atype, &firsttype))
2029 {
2030 first_error (_(type_error));
2031 return FAIL;
2032 }
2033
2034 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2035 modes. */
2036 if (ptr[0] == '-')
2037 {
2038 struct neon_typed_alias htype;
2039 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2040 if (lane == -1)
2041 lane = NEON_INTERLEAVE_LANES;
2042 else if (lane != NEON_INTERLEAVE_LANES)
2043 {
2044 first_error (_(type_error));
2045 return FAIL;
2046 }
2047 if (reg_incr == -1)
2048 reg_incr = 1;
2049 else if (reg_incr != 1)
2050 {
2051 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2052 return FAIL;
2053 }
2054 ptr++;
2055 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2056 if (hireg == FAIL)
2057 {
2058 first_error (_(reg_expected_msgs[rtype]));
2059 return FAIL;
2060 }
2061 if (! neon_alias_types_same (&htype, &firsttype))
2062 {
2063 first_error (_(type_error));
2064 return FAIL;
2065 }
2066 count += hireg + dregs - getreg;
2067 continue;
2068 }
2069
2070 /* If we're using Q registers, we can't use [] or [n] syntax. */
2071 if (rtype == REG_TYPE_NQ)
2072 {
2073 count += 2;
2074 continue;
2075 }
2076
2077 if ((atype.defined & NTA_HASINDEX) != 0)
2078 {
2079 if (lane == -1)
2080 lane = atype.index;
2081 else if (lane != atype.index)
2082 {
2083 first_error (_(type_error));
2084 return FAIL;
2085 }
2086 }
2087 else if (lane == -1)
2088 lane = NEON_INTERLEAVE_LANES;
2089 else if (lane != NEON_INTERLEAVE_LANES)
2090 {
2091 first_error (_(type_error));
2092 return FAIL;
2093 }
2094 count++;
2095 }
2096 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2097
2098 /* No lane set by [x]. We must be interleaving structures. */
2099 if (lane == -1)
2100 lane = NEON_INTERLEAVE_LANES;
2101
2102 /* Sanity check. */
2103 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2104 || (count > 1 && reg_incr == -1))
2105 {
2106 first_error (_("error parsing element/structure list"));
2107 return FAIL;
2108 }
2109
2110 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2111 {
2112 first_error (_("expected }"));
2113 return FAIL;
2114 }
2115
2116 if (reg_incr == -1)
2117 reg_incr = 1;
2118
2119 if (eltype)
2120 *eltype = firsttype.eltype;
2121
2122 *pbase = base_reg;
2123 *str = ptr;
2124
2125 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2126 }
2127
2128 /* Parse an explicit relocation suffix on an expression. This is
2129 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2130 arm_reloc_hsh contains no entries, so this function can only
2131 succeed if there is no () after the word. Returns -1 on error,
2132 BFD_RELOC_UNUSED if there wasn't any suffix. */
2133
2134 static int
2135 parse_reloc (char **str)
2136 {
2137 struct reloc_entry *r;
2138 char *p, *q;
2139
2140 if (**str != '(')
2141 return BFD_RELOC_UNUSED;
2142
2143 p = *str + 1;
2144 q = p;
2145
2146 while (*q && *q != ')' && *q != ',')
2147 q++;
2148 if (*q != ')')
2149 return -1;
2150
2151 if ((r = (struct reloc_entry *)
2152 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2153 return -1;
2154
2155 *str = q + 1;
2156 return r->reloc;
2157 }
2158
2159 /* Directives: register aliases. */
2160
2161 static struct reg_entry *
2162 insert_reg_alias (char *str, unsigned number, int type)
2163 {
2164 struct reg_entry *new_reg;
2165 const char *name;
2166
2167 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2168 {
2169 if (new_reg->builtin)
2170 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2171
2172 /* Only warn about a redefinition if it's not defined as the
2173 same register. */
2174 else if (new_reg->number != number || new_reg->type != type)
2175 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2176
2177 return NULL;
2178 }
2179
2180 name = xstrdup (str);
2181 new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));
2182
2183 new_reg->name = name;
2184 new_reg->number = number;
2185 new_reg->type = type;
2186 new_reg->builtin = FALSE;
2187 new_reg->neon = NULL;
2188
2189 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2190 abort ();
2191
2192 return new_reg;
2193 }
2194
2195 static void
2196 insert_neon_reg_alias (char *str, int number, int type,
2197 struct neon_typed_alias *atype)
2198 {
2199 struct reg_entry *reg = insert_reg_alias (str, number, type);
2200
2201 if (!reg)
2202 {
2203 first_error (_("attempt to redefine typed alias"));
2204 return;
2205 }
2206
2207 if (atype)
2208 {
2209 reg->neon = (struct neon_typed_alias *)
2210 xmalloc (sizeof (struct neon_typed_alias));
2211 *reg->neon = *atype;
2212 }
2213 }
2214
2215 /* Look for the .req directive. This is of the form:
2216
2217 new_register_name .req existing_register_name
2218
2219 If we find one, or if it looks sufficiently like one that we want to
2220 handle any error here, return TRUE. Otherwise return FALSE. */
2221
2222 static bfd_boolean
2223 create_register_alias (char * newname, char *p)
2224 {
2225 struct reg_entry *old;
2226 char *oldname, *nbuf;
2227 size_t nlen;
2228
2229 /* The input scrubber ensures that whitespace after the mnemonic is
2230 collapsed to single spaces. */
2231 oldname = p;
2232 if (strncmp (oldname, " .req ", 6) != 0)
2233 return FALSE;
2234
2235 oldname += 6;
2236 if (*oldname == '\0')
2237 return FALSE;
2238
2239 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2240 if (!old)
2241 {
2242 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2243 return TRUE;
2244 }
2245
2246 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2247 the desired alias name, and p points to its end. If not, then
2248 the desired alias name is in the global original_case_string. */
2249 #ifdef TC_CASE_SENSITIVE
2250 nlen = p - newname;
2251 #else
2252 newname = original_case_string;
2253 nlen = strlen (newname);
2254 #endif
2255
2256 nbuf = (char *) alloca (nlen + 1);
2257 memcpy (nbuf, newname, nlen);
2258 nbuf[nlen] = '\0';
2259
2260 /* Create aliases under the new name as stated; an all-lowercase
2261 version of the new name; and an all-uppercase version of the new
2262 name. */
2263 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2264 {
2265 for (p = nbuf; *p; p++)
2266 *p = TOUPPER (*p);
2267
2268 if (strncmp (nbuf, newname, nlen))
2269 {
2270 /* If this attempt to create an additional alias fails, do not bother
2271 trying to create the all-lower case alias. We will fail and issue
2272 a second, duplicate error message. This situation arises when the
2273 programmer does something like:
2274 foo .req r0
2275 Foo .req r1
2276 The second .req creates the "Foo" alias but then fails to create
2277 the artificial FOO alias because it has already been created by the
2278 first .req. */
2279 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2280 return TRUE;
2281 }
2282
2283 for (p = nbuf; *p; p++)
2284 *p = TOLOWER (*p);
2285
2286 if (strncmp (nbuf, newname, nlen))
2287 insert_reg_alias (nbuf, old->number, old->type);
2288 }
2289
2290 return TRUE;
2291 }
2292
2293 /* Create a Neon typed/indexed register alias using directives, e.g.:
2294 X .dn d5.s32[1]
2295 Y .qn 6.s16
2296 Z .dn d7
2297 T .dn Z[0]
2298 These typed registers can be used instead of the types specified after the
2299 Neon mnemonic, so long as all operands given have types. Types can also be
2300 specified directly, e.g.:
2301 vadd d0.s32, d1.s32, d2.s32 */
2302
2303 static bfd_boolean
2304 create_neon_reg_alias (char *newname, char *p)
2305 {
2306 enum arm_reg_type basetype;
2307 struct reg_entry *basereg;
2308 struct reg_entry mybasereg;
2309 struct neon_type ntype;
2310 struct neon_typed_alias typeinfo;
2311 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2312 int namelen;
2313
2314 typeinfo.defined = 0;
2315 typeinfo.eltype.type = NT_invtype;
2316 typeinfo.eltype.size = -1;
2317 typeinfo.index = -1;
2318
2319 nameend = p;
2320
2321 if (strncmp (p, " .dn ", 5) == 0)
2322 basetype = REG_TYPE_VFD;
2323 else if (strncmp (p, " .qn ", 5) == 0)
2324 basetype = REG_TYPE_NQ;
2325 else
2326 return FALSE;
2327
2328 p += 5;
2329
2330 if (*p == '\0')
2331 return FALSE;
2332
2333 basereg = arm_reg_parse_multi (&p);
2334
2335 if (basereg && basereg->type != basetype)
2336 {
2337 as_bad (_("bad type for register"));
2338 return FALSE;
2339 }
2340
2341 if (basereg == NULL)
2342 {
2343 expressionS exp;
2344 /* Try parsing as an integer. */
2345 my_get_expression (&exp, &p, GE_NO_PREFIX);
2346 if (exp.X_op != O_constant)
2347 {
2348 as_bad (_("expression must be constant"));
2349 return FALSE;
2350 }
2351 basereg = &mybasereg;
2352 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2353 : exp.X_add_number;
2354 basereg->neon = 0;
2355 }
2356
2357 if (basereg->neon)
2358 typeinfo = *basereg->neon;
2359
2360 if (parse_neon_type (&ntype, &p) == SUCCESS)
2361 {
2362 /* We got a type. */
2363 if (typeinfo.defined & NTA_HASTYPE)
2364 {
2365 as_bad (_("can't redefine the type of a register alias"));
2366 return FALSE;
2367 }
2368
2369 typeinfo.defined |= NTA_HASTYPE;
2370 if (ntype.elems != 1)
2371 {
2372 as_bad (_("you must specify a single type only"));
2373 return FALSE;
2374 }
2375 typeinfo.eltype = ntype.el[0];
2376 }
2377
2378 if (skip_past_char (&p, '[') == SUCCESS)
2379 {
2380 expressionS exp;
2381 /* We got a scalar index. */
2382
2383 if (typeinfo.defined & NTA_HASINDEX)
2384 {
2385 as_bad (_("can't redefine the index of a scalar alias"));
2386 return FALSE;
2387 }
2388
2389 my_get_expression (&exp, &p, GE_NO_PREFIX);
2390
2391 if (exp.X_op != O_constant)
2392 {
2393 as_bad (_("scalar index must be constant"));
2394 return FALSE;
2395 }
2396
2397 typeinfo.defined |= NTA_HASINDEX;
2398 typeinfo.index = exp.X_add_number;
2399
2400 if (skip_past_char (&p, ']') == FAIL)
2401 {
2402 as_bad (_("expecting ]"));
2403 return FALSE;
2404 }
2405 }
2406
2407 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2408 the desired alias name, and p points to its end. If not, then
2409 the desired alias name is in the global original_case_string. */
2410 #ifdef TC_CASE_SENSITIVE
2411 namelen = nameend - newname;
2412 #else
2413 newname = original_case_string;
2414 namelen = strlen (newname);
2415 #endif
2416
2417 namebuf = (char *) alloca (namelen + 1);
2418 strncpy (namebuf, newname, namelen);
2419 namebuf[namelen] = '\0';
2420
2421 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2422 typeinfo.defined != 0 ? &typeinfo : NULL);
2423
2424 /* Insert name in all uppercase. */
2425 for (p = namebuf; *p; p++)
2426 *p = TOUPPER (*p);
2427
2428 if (strncmp (namebuf, newname, namelen))
2429 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2430 typeinfo.defined != 0 ? &typeinfo : NULL);
2431
2432 /* Insert name in all lowercase. */
2433 for (p = namebuf; *p; p++)
2434 *p = TOLOWER (*p);
2435
2436 if (strncmp (namebuf, newname, namelen))
2437 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2438 typeinfo.defined != 0 ? &typeinfo : NULL);
2439
2440 return TRUE;
2441 }
2442
2443 /* Should never be called, as .req goes between the alias and the
2444 register name, not at the beginning of the line. */
2445
2446 static void
2447 s_req (int a ATTRIBUTE_UNUSED)
2448 {
2449 as_bad (_("invalid syntax for .req directive"));
2450 }
2451
2452 static void
2453 s_dn (int a ATTRIBUTE_UNUSED)
2454 {
2455 as_bad (_("invalid syntax for .dn directive"));
2456 }
2457
2458 static void
2459 s_qn (int a ATTRIBUTE_UNUSED)
2460 {
2461 as_bad (_("invalid syntax for .qn directive"));
2462 }
2463
2464 /* The .unreq directive deletes an alias which was previously defined
2465 by .req. For example:
2466
2467 my_alias .req r11
2468 .unreq my_alias */
2469
2470 static void
2471 s_unreq (int a ATTRIBUTE_UNUSED)
2472 {
2473 char * name;
2474 char saved_char;
2475
2476 name = input_line_pointer;
2477
2478 while (*input_line_pointer != 0
2479 && *input_line_pointer != ' '
2480 && *input_line_pointer != '\n')
2481 ++input_line_pointer;
2482
2483 saved_char = *input_line_pointer;
2484 *input_line_pointer = 0;
2485
2486 if (!*name)
2487 as_bad (_("invalid syntax for .unreq directive"));
2488 else
2489 {
2490 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2491 name);
2492
2493 if (!reg)
2494 as_bad (_("unknown register alias '%s'"), name);
2495 else if (reg->builtin)
2496 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2497 name);
2498 else
2499 {
2500 char * p;
2501 char * nbuf;
2502
2503 hash_delete (arm_reg_hsh, name, FALSE);
2504 free ((char *) reg->name);
2505 if (reg->neon)
2506 free (reg->neon);
2507 free (reg);
2508
2509 /* Also locate the all upper case and all lower case versions.
2510 Do not complain if we cannot find one or the other as it
2511 was probably deleted above. */
2512
2513 nbuf = strdup (name);
2514 for (p = nbuf; *p; p++)
2515 *p = TOUPPER (*p);
2516 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2517 if (reg)
2518 {
2519 hash_delete (arm_reg_hsh, nbuf, FALSE);
2520 free ((char *) reg->name);
2521 if (reg->neon)
2522 free (reg->neon);
2523 free (reg);
2524 }
2525
2526 for (p = nbuf; *p; p++)
2527 *p = TOLOWER (*p);
2528 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2529 if (reg)
2530 {
2531 hash_delete (arm_reg_hsh, nbuf, FALSE);
2532 free ((char *) reg->name);
2533 if (reg->neon)
2534 free (reg->neon);
2535 free (reg);
2536 }
2537
2538 free (nbuf);
2539 }
2540 }
2541
2542 *input_line_pointer = saved_char;
2543 demand_empty_rest_of_line ();
2544 }
2545
2546 /* Directives: Instruction set selection. */
2547
2548 #ifdef OBJ_ELF
2549 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2550 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2551 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2552 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2553
2554 /* Create a new mapping symbol for the transition to STATE. */
2555
2556 static void
2557 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2558 {
2559 symbolS * symbolP;
2560 const char * symname;
2561 int type;
2562
2563 switch (state)
2564 {
2565 case MAP_DATA:
2566 symname = "$d";
2567 type = BSF_NO_FLAGS;
2568 break;
2569 case MAP_ARM:
2570 symname = "$a";
2571 type = BSF_NO_FLAGS;
2572 break;
2573 case MAP_THUMB:
2574 symname = "$t";
2575 type = BSF_NO_FLAGS;
2576 break;
2577 default:
2578 abort ();
2579 }
2580
2581 symbolP = symbol_new (symname, now_seg, value, frag);
2582 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2583
2584 switch (state)
2585 {
2586 case MAP_ARM:
2587 THUMB_SET_FUNC (symbolP, 0);
2588 ARM_SET_THUMB (symbolP, 0);
2589 ARM_SET_INTERWORK (symbolP, support_interwork);
2590 break;
2591
2592 case MAP_THUMB:
2593 THUMB_SET_FUNC (symbolP, 1);
2594 ARM_SET_THUMB (symbolP, 1);
2595 ARM_SET_INTERWORK (symbolP, support_interwork);
2596 break;
2597
2598 case MAP_DATA:
2599 default:
2600 break;
2601 }
2602
2603 /* Save the mapping symbols for future reference. Also check that
2604 we do not place two mapping symbols at the same offset within a
2605 frag. We'll handle overlap between frags in
2606 check_mapping_symbols.
2607
2608 If .fill or other data filling directive generates zero sized data,
2609 the mapping symbol for the following code will have the same value
2610 as the one generated for the data filling directive. In this case,
2611 we replace the old symbol with the new one at the same address. */
2612 if (value == 0)
2613 {
2614 if (frag->tc_frag_data.first_map != NULL)
2615 {
2616 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2617 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2618 }
2619 frag->tc_frag_data.first_map = symbolP;
2620 }
2621 if (frag->tc_frag_data.last_map != NULL)
2622 {
2623 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2624 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2625 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2626 }
2627 frag->tc_frag_data.last_map = symbolP;
2628 }
2629
2630 /* We must sometimes convert a region marked as code to data during
2631 code alignment, if an odd number of bytes have to be padded. The
2632 code mapping symbol is pushed to an aligned address. */
2633
2634 static void
2635 insert_data_mapping_symbol (enum mstate state,
2636 valueT value, fragS *frag, offsetT bytes)
2637 {
2638 /* If there was already a mapping symbol, remove it. */
2639 if (frag->tc_frag_data.last_map != NULL
2640 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2641 {
2642 symbolS *symp = frag->tc_frag_data.last_map;
2643
2644 if (value == 0)
2645 {
2646 know (frag->tc_frag_data.first_map == symp);
2647 frag->tc_frag_data.first_map = NULL;
2648 }
2649 frag->tc_frag_data.last_map = NULL;
2650 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2651 }
2652
2653 make_mapping_symbol (MAP_DATA, value, frag);
2654 make_mapping_symbol (state, value + bytes, frag);
2655 }
2656
2657 static void mapping_state_2 (enum mstate state, int max_chars);
2658
2659 /* Set the mapping state to STATE. Only call this when about to
2660 emit some STATE bytes to the file. */
2661
2662 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2663 void
2664 mapping_state (enum mstate state)
2665 {
2666 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2667
2668 if (mapstate == state)
2669 /* The mapping symbol has already been emitted.
2670 There is nothing else to do. */
2671 return;
2672
2673 if (state == MAP_ARM || state == MAP_THUMB)
2674 /* PR gas/12931
2675 All ARM instructions require 4-byte alignment.
2676 (Almost) all Thumb instructions require 2-byte alignment.
2677
2678 When emitting instructions into any section, mark the section
2679 appropriately.
2680
2681 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2682 but themselves require 2-byte alignment; this applies to some
2683 PC- relative forms. However, these cases will invovle implicit
2684 literal pool generation or an explicit .align >=2, both of
2685 which will cause the section to me marked with sufficient
2686 alignment. Thus, we don't handle those cases here. */
2687 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2688
2689 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2690 /* This case will be evaluated later. */
2691 return;
2692
2693 mapping_state_2 (state, 0);
2694 }
2695
2696 /* Same as mapping_state, but MAX_CHARS bytes have already been
2697 allocated. Put the mapping symbol that far back. */
2698
2699 static void
2700 mapping_state_2 (enum mstate state, int max_chars)
2701 {
2702 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2703
2704 if (!SEG_NORMAL (now_seg))
2705 return;
2706
2707 if (mapstate == state)
2708 /* The mapping symbol has already been emitted.
2709 There is nothing else to do. */
2710 return;
2711
2712 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2713 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2714 {
2715 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2716 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2717
2718 if (add_symbol)
2719 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2720 }
2721
2722 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2723 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2724 }
2725 #undef TRANSITION
2726 #else
2727 #define mapping_state(x) ((void)0)
2728 #define mapping_state_2(x, y) ((void)0)
2729 #endif
2730
2731 /* Find the real, Thumb encoded start of a Thumb function. */
2732
2733 #ifdef OBJ_COFF
2734 static symbolS *
2735 find_real_start (symbolS * symbolP)
2736 {
2737 char * real_start;
2738 const char * name = S_GET_NAME (symbolP);
2739 symbolS * new_target;
2740
2741 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2742 #define STUB_NAME ".real_start_of"
2743
2744 if (name == NULL)
2745 abort ();
2746
2747 /* The compiler may generate BL instructions to local labels because
2748 it needs to perform a branch to a far away location. These labels
2749 do not have a corresponding ".real_start_of" label. We check
2750 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2751 the ".real_start_of" convention for nonlocal branches. */
2752 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2753 return symbolP;
2754
2755 real_start = ACONCAT ((STUB_NAME, name, NULL));
2756 new_target = symbol_find (real_start);
2757
2758 if (new_target == NULL)
2759 {
2760 as_warn (_("Failed to find real start of function: %s\n"), name);
2761 new_target = symbolP;
2762 }
2763
2764 return new_target;
2765 }
2766 #endif
2767
2768 static void
2769 opcode_select (int width)
2770 {
2771 switch (width)
2772 {
2773 case 16:
2774 if (! thumb_mode)
2775 {
2776 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2777 as_bad (_("selected processor does not support THUMB opcodes"));
2778
2779 thumb_mode = 1;
2780 /* No need to force the alignment, since we will have been
2781 coming from ARM mode, which is word-aligned. */
2782 record_alignment (now_seg, 1);
2783 }
2784 break;
2785
2786 case 32:
2787 if (thumb_mode)
2788 {
2789 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2790 as_bad (_("selected processor does not support ARM opcodes"));
2791
2792 thumb_mode = 0;
2793
2794 if (!need_pass_2)
2795 frag_align (2, 0, 0);
2796
2797 record_alignment (now_seg, 1);
2798 }
2799 break;
2800
2801 default:
2802 as_bad (_("invalid instruction size selected (%d)"), width);
2803 }
2804 }
2805
2806 static void
2807 s_arm (int ignore ATTRIBUTE_UNUSED)
2808 {
2809 opcode_select (32);
2810 demand_empty_rest_of_line ();
2811 }
2812
2813 static void
2814 s_thumb (int ignore ATTRIBUTE_UNUSED)
2815 {
2816 opcode_select (16);
2817 demand_empty_rest_of_line ();
2818 }
2819
2820 static void
2821 s_code (int unused ATTRIBUTE_UNUSED)
2822 {
2823 int temp;
2824
2825 temp = get_absolute_expression ();
2826 switch (temp)
2827 {
2828 case 16:
2829 case 32:
2830 opcode_select (temp);
2831 break;
2832
2833 default:
2834 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2835 }
2836 }
2837
2838 static void
2839 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2840 {
2841 /* If we are not already in thumb mode go into it, EVEN if
2842 the target processor does not support thumb instructions.
2843 This is used by gcc/config/arm/lib1funcs.asm for example
2844 to compile interworking support functions even if the
2845 target processor should not support interworking. */
2846 if (! thumb_mode)
2847 {
2848 thumb_mode = 2;
2849 record_alignment (now_seg, 1);
2850 }
2851
2852 demand_empty_rest_of_line ();
2853 }
2854
2855 static void
2856 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2857 {
2858 s_thumb (0);
2859
2860 /* The following label is the name/address of the start of a Thumb function.
2861 We need to know this for the interworking support. */
2862 label_is_thumb_function_name = TRUE;
2863 }
2864
2865 /* Perform a .set directive, but also mark the alias as
2866 being a thumb function. */
2867
2868 static void
2869 s_thumb_set (int equiv)
2870 {
2871 /* XXX the following is a duplicate of the code for s_set() in read.c
2872 We cannot just call that code as we need to get at the symbol that
2873 is created. */
2874 char * name;
2875 char delim;
2876 char * end_name;
2877 symbolS * symbolP;
2878
2879 /* Especial apologies for the random logic:
2880 This just grew, and could be parsed much more simply!
2881 Dean - in haste. */
2882 delim = get_symbol_name (& name);
2883 end_name = input_line_pointer;
2884 (void) restore_line_pointer (delim);
2885
2886 if (*input_line_pointer != ',')
2887 {
2888 *end_name = 0;
2889 as_bad (_("expected comma after name \"%s\""), name);
2890 *end_name = delim;
2891 ignore_rest_of_line ();
2892 return;
2893 }
2894
2895 input_line_pointer++;
2896 *end_name = 0;
2897
2898 if (name[0] == '.' && name[1] == '\0')
2899 {
2900 /* XXX - this should not happen to .thumb_set. */
2901 abort ();
2902 }
2903
2904 if ((symbolP = symbol_find (name)) == NULL
2905 && (symbolP = md_undefined_symbol (name)) == NULL)
2906 {
2907 #ifndef NO_LISTING
2908 /* When doing symbol listings, play games with dummy fragments living
2909 outside the normal fragment chain to record the file and line info
2910 for this symbol. */
2911 if (listing & LISTING_SYMBOLS)
2912 {
2913 extern struct list_info_struct * listing_tail;
2914 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2915
2916 memset (dummy_frag, 0, sizeof (fragS));
2917 dummy_frag->fr_type = rs_fill;
2918 dummy_frag->line = listing_tail;
2919 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2920 dummy_frag->fr_symbol = symbolP;
2921 }
2922 else
2923 #endif
2924 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2925
2926 #ifdef OBJ_COFF
2927 /* "set" symbols are local unless otherwise specified. */
2928 SF_SET_LOCAL (symbolP);
2929 #endif /* OBJ_COFF */
2930 } /* Make a new symbol. */
2931
2932 symbol_table_insert (symbolP);
2933
2934 * end_name = delim;
2935
2936 if (equiv
2937 && S_IS_DEFINED (symbolP)
2938 && S_GET_SEGMENT (symbolP) != reg_section)
2939 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2940
2941 pseudo_set (symbolP);
2942
2943 demand_empty_rest_of_line ();
2944
2945 /* XXX Now we come to the Thumb specific bit of code. */
2946
2947 THUMB_SET_FUNC (symbolP, 1);
2948 ARM_SET_THUMB (symbolP, 1);
2949 #if defined OBJ_ELF || defined OBJ_COFF
2950 ARM_SET_INTERWORK (symbolP, support_interwork);
2951 #endif
2952 }
2953
2954 /* Directives: Mode selection. */
2955
2956 /* .syntax [unified|divided] - choose the new unified syntax
2957 (same for Arm and Thumb encoding, modulo slight differences in what
2958 can be represented) or the old divergent syntax for each mode. */
2959 static void
2960 s_syntax (int unused ATTRIBUTE_UNUSED)
2961 {
2962 char *name, delim;
2963
2964 delim = get_symbol_name (& name);
2965
2966 if (!strcasecmp (name, "unified"))
2967 unified_syntax = TRUE;
2968 else if (!strcasecmp (name, "divided"))
2969 unified_syntax = FALSE;
2970 else
2971 {
2972 as_bad (_("unrecognized syntax mode \"%s\""), name);
2973 return;
2974 }
2975 (void) restore_line_pointer (delim);
2976 demand_empty_rest_of_line ();
2977 }
2978
2979 /* Directives: sectioning and alignment. */
2980
2981 static void
2982 s_bss (int ignore ATTRIBUTE_UNUSED)
2983 {
2984 /* We don't support putting frags in the BSS segment, we fake it by
2985 marking in_bss, then looking at s_skip for clues. */
2986 subseg_set (bss_section, 0);
2987 demand_empty_rest_of_line ();
2988
2989 #ifdef md_elf_section_change_hook
2990 md_elf_section_change_hook ();
2991 #endif
2992 }
2993
2994 static void
2995 s_even (int ignore ATTRIBUTE_UNUSED)
2996 {
2997 /* Never make frag if expect extra pass. */
2998 if (!need_pass_2)
2999 frag_align (1, 0, 0);
3000
3001 record_alignment (now_seg, 1);
3002
3003 demand_empty_rest_of_line ();
3004 }
3005
3006 /* Directives: CodeComposer Studio. */
3007
3008 /* .ref (for CodeComposer Studio syntax only). */
3009 static void
3010 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3011 {
3012 if (codecomposer_syntax)
3013 ignore_rest_of_line ();
3014 else
3015 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3016 }
3017
3018 /* If name is not NULL, then it is used for marking the beginning of a
3019 function, wherease if it is NULL then it means the function end. */
3020 static void
3021 asmfunc_debug (const char * name)
3022 {
3023 static const char * last_name = NULL;
3024
3025 if (name != NULL)
3026 {
3027 gas_assert (last_name == NULL);
3028 last_name = name;
3029
3030 if (debug_type == DEBUG_STABS)
3031 stabs_generate_asm_func (name, name);
3032 }
3033 else
3034 {
3035 gas_assert (last_name != NULL);
3036
3037 if (debug_type == DEBUG_STABS)
3038 stabs_generate_asm_endfunc (last_name, last_name);
3039
3040 last_name = NULL;
3041 }
3042 }
3043
3044 static void
3045 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3046 {
3047 if (codecomposer_syntax)
3048 {
3049 switch (asmfunc_state)
3050 {
3051 case OUTSIDE_ASMFUNC:
3052 asmfunc_state = WAITING_ASMFUNC_NAME;
3053 break;
3054
3055 case WAITING_ASMFUNC_NAME:
3056 as_bad (_(".asmfunc repeated."));
3057 break;
3058
3059 case WAITING_ENDASMFUNC:
3060 as_bad (_(".asmfunc without function."));
3061 break;
3062 }
3063 demand_empty_rest_of_line ();
3064 }
3065 else
3066 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3067 }
3068
3069 static void
3070 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3071 {
3072 if (codecomposer_syntax)
3073 {
3074 switch (asmfunc_state)
3075 {
3076 case OUTSIDE_ASMFUNC:
3077 as_bad (_(".endasmfunc without a .asmfunc."));
3078 break;
3079
3080 case WAITING_ASMFUNC_NAME:
3081 as_bad (_(".endasmfunc without function."));
3082 break;
3083
3084 case WAITING_ENDASMFUNC:
3085 asmfunc_state = OUTSIDE_ASMFUNC;
3086 asmfunc_debug (NULL);
3087 break;
3088 }
3089 demand_empty_rest_of_line ();
3090 }
3091 else
3092 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3093 }
3094
3095 static void
3096 s_ccs_def (int name)
3097 {
3098 if (codecomposer_syntax)
3099 s_globl (name);
3100 else
3101 as_bad (_(".def pseudo-op only available with -mccs flag."));
3102 }
3103
3104 /* Directives: Literal pools. */
3105
3106 static literal_pool *
3107 find_literal_pool (void)
3108 {
3109 literal_pool * pool;
3110
3111 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3112 {
3113 if (pool->section == now_seg
3114 && pool->sub_section == now_subseg)
3115 break;
3116 }
3117
3118 return pool;
3119 }
3120
3121 static literal_pool *
3122 find_or_make_literal_pool (void)
3123 {
3124 /* Next literal pool ID number. */
3125 static unsigned int latest_pool_num = 1;
3126 literal_pool * pool;
3127
3128 pool = find_literal_pool ();
3129
3130 if (pool == NULL)
3131 {
3132 /* Create a new pool. */
3133 pool = (literal_pool *) xmalloc (sizeof (* pool));
3134 if (! pool)
3135 return NULL;
3136
3137 pool->next_free_entry = 0;
3138 pool->section = now_seg;
3139 pool->sub_section = now_subseg;
3140 pool->next = list_of_pools;
3141 pool->symbol = NULL;
3142 pool->alignment = 2;
3143
3144 /* Add it to the list. */
3145 list_of_pools = pool;
3146 }
3147
3148 /* New pools, and emptied pools, will have a NULL symbol. */
3149 if (pool->symbol == NULL)
3150 {
3151 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3152 (valueT) 0, &zero_address_frag);
3153 pool->id = latest_pool_num ++;
3154 }
3155
3156 /* Done. */
3157 return pool;
3158 }
3159
3160 /* Add the literal in the global 'inst'
3161 structure to the relevant literal pool. */
3162
3163 static int
3164 add_to_lit_pool (unsigned int nbytes)
3165 {
3166 #define PADDING_SLOT 0x1
3167 #define LIT_ENTRY_SIZE_MASK 0xFF
3168 literal_pool * pool;
3169 unsigned int entry, pool_size = 0;
3170 bfd_boolean padding_slot_p = FALSE;
3171 unsigned imm1 = 0;
3172 unsigned imm2 = 0;
3173
3174 if (nbytes == 8)
3175 {
3176 imm1 = inst.operands[1].imm;
3177 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3178 : inst.reloc.exp.X_unsigned ? 0
3179 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3180 if (target_big_endian)
3181 {
3182 imm1 = imm2;
3183 imm2 = inst.operands[1].imm;
3184 }
3185 }
3186
3187 pool = find_or_make_literal_pool ();
3188
3189 /* Check if this literal value is already in the pool. */
3190 for (entry = 0; entry < pool->next_free_entry; entry ++)
3191 {
3192 if (nbytes == 4)
3193 {
3194 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3195 && (inst.reloc.exp.X_op == O_constant)
3196 && (pool->literals[entry].X_add_number
3197 == inst.reloc.exp.X_add_number)
3198 && (pool->literals[entry].X_md == nbytes)
3199 && (pool->literals[entry].X_unsigned
3200 == inst.reloc.exp.X_unsigned))
3201 break;
3202
3203 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3204 && (inst.reloc.exp.X_op == O_symbol)
3205 && (pool->literals[entry].X_add_number
3206 == inst.reloc.exp.X_add_number)
3207 && (pool->literals[entry].X_add_symbol
3208 == inst.reloc.exp.X_add_symbol)
3209 && (pool->literals[entry].X_op_symbol
3210 == inst.reloc.exp.X_op_symbol)
3211 && (pool->literals[entry].X_md == nbytes))
3212 break;
3213 }
3214 else if ((nbytes == 8)
3215 && !(pool_size & 0x7)
3216 && ((entry + 1) != pool->next_free_entry)
3217 && (pool->literals[entry].X_op == O_constant)
3218 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3219 && (pool->literals[entry].X_unsigned
3220 == inst.reloc.exp.X_unsigned)
3221 && (pool->literals[entry + 1].X_op == O_constant)
3222 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3223 && (pool->literals[entry + 1].X_unsigned
3224 == inst.reloc.exp.X_unsigned))
3225 break;
3226
3227 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3228 if (padding_slot_p && (nbytes == 4))
3229 break;
3230
3231 pool_size += 4;
3232 }
3233
3234 /* Do we need to create a new entry? */
3235 if (entry == pool->next_free_entry)
3236 {
3237 if (entry >= MAX_LITERAL_POOL_SIZE)
3238 {
3239 inst.error = _("literal pool overflow");
3240 return FAIL;
3241 }
3242
3243 if (nbytes == 8)
3244 {
3245 /* For 8-byte entries, we align to an 8-byte boundary,
3246 and split it into two 4-byte entries, because on 32-bit
3247 host, 8-byte constants are treated as big num, thus
3248 saved in "generic_bignum" which will be overwritten
3249 by later assignments.
3250
3251 We also need to make sure there is enough space for
3252 the split.
3253
3254 We also check to make sure the literal operand is a
3255 constant number. */
3256 if (!(inst.reloc.exp.X_op == O_constant
3257 || inst.reloc.exp.X_op == O_big))
3258 {
3259 inst.error = _("invalid type for literal pool");
3260 return FAIL;
3261 }
3262 else if (pool_size & 0x7)
3263 {
3264 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3265 {
3266 inst.error = _("literal pool overflow");
3267 return FAIL;
3268 }
3269
3270 pool->literals[entry] = inst.reloc.exp;
3271 pool->literals[entry].X_add_number = 0;
3272 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3273 pool->next_free_entry += 1;
3274 pool_size += 4;
3275 }
3276 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3277 {
3278 inst.error = _("literal pool overflow");
3279 return FAIL;
3280 }
3281
3282 pool->literals[entry] = inst.reloc.exp;
3283 pool->literals[entry].X_op = O_constant;
3284 pool->literals[entry].X_add_number = imm1;
3285 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3286 pool->literals[entry++].X_md = 4;
3287 pool->literals[entry] = inst.reloc.exp;
3288 pool->literals[entry].X_op = O_constant;
3289 pool->literals[entry].X_add_number = imm2;
3290 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3291 pool->literals[entry].X_md = 4;
3292 pool->alignment = 3;
3293 pool->next_free_entry += 1;
3294 }
3295 else
3296 {
3297 pool->literals[entry] = inst.reloc.exp;
3298 pool->literals[entry].X_md = 4;
3299 }
3300
3301 #ifdef OBJ_ELF
3302 /* PR ld/12974: Record the location of the first source line to reference
3303 this entry in the literal pool. If it turns out during linking that the
3304 symbol does not exist we will be able to give an accurate line number for
3305 the (first use of the) missing reference. */
3306 if (debug_type == DEBUG_DWARF2)
3307 dwarf2_where (pool->locs + entry);
3308 #endif
3309 pool->next_free_entry += 1;
3310 }
3311 else if (padding_slot_p)
3312 {
3313 pool->literals[entry] = inst.reloc.exp;
3314 pool->literals[entry].X_md = nbytes;
3315 }
3316
3317 inst.reloc.exp.X_op = O_symbol;
3318 inst.reloc.exp.X_add_number = pool_size;
3319 inst.reloc.exp.X_add_symbol = pool->symbol;
3320
3321 return SUCCESS;
3322 }
3323
3324 bfd_boolean
3325 tc_start_label_without_colon (void)
3326 {
3327 bfd_boolean ret = TRUE;
3328
3329 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3330 {
3331 const char *label = input_line_pointer;
3332
3333 while (!is_end_of_line[(int) label[-1]])
3334 --label;
3335
3336 if (*label == '.')
3337 {
3338 as_bad (_("Invalid label '%s'"), label);
3339 ret = FALSE;
3340 }
3341
3342 asmfunc_debug (label);
3343
3344 asmfunc_state = WAITING_ENDASMFUNC;
3345 }
3346
3347 return ret;
3348 }
3349
3350 /* Can't use symbol_new here, so have to create a symbol and then at
3351 a later date assign it a value. Thats what these functions do. */
3352
3353 static void
3354 symbol_locate (symbolS * symbolP,
3355 const char * name, /* It is copied, the caller can modify. */
3356 segT segment, /* Segment identifier (SEG_<something>). */
3357 valueT valu, /* Symbol value. */
3358 fragS * frag) /* Associated fragment. */
3359 {
3360 size_t name_length;
3361 char * preserved_copy_of_name;
3362
3363 name_length = strlen (name) + 1; /* +1 for \0. */
3364 obstack_grow (&notes, name, name_length);
3365 preserved_copy_of_name = (char *) obstack_finish (&notes);
3366
3367 #ifdef tc_canonicalize_symbol_name
3368 preserved_copy_of_name =
3369 tc_canonicalize_symbol_name (preserved_copy_of_name);
3370 #endif
3371
3372 S_SET_NAME (symbolP, preserved_copy_of_name);
3373
3374 S_SET_SEGMENT (symbolP, segment);
3375 S_SET_VALUE (symbolP, valu);
3376 symbol_clear_list_pointers (symbolP);
3377
3378 symbol_set_frag (symbolP, frag);
3379
3380 /* Link to end of symbol chain. */
3381 {
3382 extern int symbol_table_frozen;
3383
3384 if (symbol_table_frozen)
3385 abort ();
3386 }
3387
3388 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3389
3390 obj_symbol_new_hook (symbolP);
3391
3392 #ifdef tc_symbol_new_hook
3393 tc_symbol_new_hook (symbolP);
3394 #endif
3395
3396 #ifdef DEBUG_SYMS
3397 verify_symbol_chain (symbol_rootP, symbol_lastP);
3398 #endif /* DEBUG_SYMS */
3399 }
3400
3401 static void
3402 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3403 {
3404 unsigned int entry;
3405 literal_pool * pool;
3406 char sym_name[20];
3407
3408 pool = find_literal_pool ();
3409 if (pool == NULL
3410 || pool->symbol == NULL
3411 || pool->next_free_entry == 0)
3412 return;
3413
3414 /* Align pool as you have word accesses.
3415 Only make a frag if we have to. */
3416 if (!need_pass_2)
3417 frag_align (pool->alignment, 0, 0);
3418
3419 record_alignment (now_seg, 2);
3420
3421 #ifdef OBJ_ELF
3422 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3423 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3424 #endif
3425 sprintf (sym_name, "$$lit_\002%x", pool->id);
3426
3427 symbol_locate (pool->symbol, sym_name, now_seg,
3428 (valueT) frag_now_fix (), frag_now);
3429 symbol_table_insert (pool->symbol);
3430
3431 ARM_SET_THUMB (pool->symbol, thumb_mode);
3432
3433 #if defined OBJ_COFF || defined OBJ_ELF
3434 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3435 #endif
3436
3437 for (entry = 0; entry < pool->next_free_entry; entry ++)
3438 {
3439 #ifdef OBJ_ELF
3440 if (debug_type == DEBUG_DWARF2)
3441 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3442 #endif
3443 /* First output the expression in the instruction to the pool. */
3444 emit_expr (&(pool->literals[entry]),
3445 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3446 }
3447
3448 /* Mark the pool as empty. */
3449 pool->next_free_entry = 0;
3450 pool->symbol = NULL;
3451 }
3452
3453 #ifdef OBJ_ELF
3454 /* Forward declarations for functions below, in the MD interface
3455 section. */
3456 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3457 static valueT create_unwind_entry (int);
3458 static void start_unwind_section (const segT, int);
3459 static void add_unwind_opcode (valueT, int);
3460 static void flush_pending_unwind (void);
3461
3462 /* Directives: Data. */
3463
3464 static void
3465 s_arm_elf_cons (int nbytes)
3466 {
3467 expressionS exp;
3468
3469 #ifdef md_flush_pending_output
3470 md_flush_pending_output ();
3471 #endif
3472
3473 if (is_it_end_of_statement ())
3474 {
3475 demand_empty_rest_of_line ();
3476 return;
3477 }
3478
3479 #ifdef md_cons_align
3480 md_cons_align (nbytes);
3481 #endif
3482
3483 mapping_state (MAP_DATA);
3484 do
3485 {
3486 int reloc;
3487 char *base = input_line_pointer;
3488
3489 expression (& exp);
3490
3491 if (exp.X_op != O_symbol)
3492 emit_expr (&exp, (unsigned int) nbytes);
3493 else
3494 {
3495 char *before_reloc = input_line_pointer;
3496 reloc = parse_reloc (&input_line_pointer);
3497 if (reloc == -1)
3498 {
3499 as_bad (_("unrecognized relocation suffix"));
3500 ignore_rest_of_line ();
3501 return;
3502 }
3503 else if (reloc == BFD_RELOC_UNUSED)
3504 emit_expr (&exp, (unsigned int) nbytes);
3505 else
3506 {
3507 reloc_howto_type *howto = (reloc_howto_type *)
3508 bfd_reloc_type_lookup (stdoutput,
3509 (bfd_reloc_code_real_type) reloc);
3510 int size = bfd_get_reloc_size (howto);
3511
3512 if (reloc == BFD_RELOC_ARM_PLT32)
3513 {
3514 as_bad (_("(plt) is only valid on branch targets"));
3515 reloc = BFD_RELOC_UNUSED;
3516 size = 0;
3517 }
3518
3519 if (size > nbytes)
3520 as_bad (_("%s relocations do not fit in %d bytes"),
3521 howto->name, nbytes);
3522 else
3523 {
3524 /* We've parsed an expression stopping at O_symbol.
3525 But there may be more expression left now that we
3526 have parsed the relocation marker. Parse it again.
3527 XXX Surely there is a cleaner way to do this. */
3528 char *p = input_line_pointer;
3529 int offset;
3530 char *save_buf = (char *) alloca (input_line_pointer - base);
3531 memcpy (save_buf, base, input_line_pointer - base);
3532 memmove (base + (input_line_pointer - before_reloc),
3533 base, before_reloc - base);
3534
3535 input_line_pointer = base + (input_line_pointer-before_reloc);
3536 expression (&exp);
3537 memcpy (base, save_buf, p - base);
3538
3539 offset = nbytes - size;
3540 p = frag_more (nbytes);
3541 memset (p, 0, nbytes);
3542 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3543 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3544 }
3545 }
3546 }
3547 }
3548 while (*input_line_pointer++ == ',');
3549
3550 /* Put terminator back into stream. */
3551 input_line_pointer --;
3552 demand_empty_rest_of_line ();
3553 }
3554
3555 /* Emit an expression containing a 32-bit thumb instruction.
3556 Implementation based on put_thumb32_insn. */
3557
3558 static void
3559 emit_thumb32_expr (expressionS * exp)
3560 {
3561 expressionS exp_high = *exp;
3562
3563 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3564 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3565 exp->X_add_number &= 0xffff;
3566 emit_expr (exp, (unsigned int) THUMB_SIZE);
3567 }
3568
3569 /* Guess the instruction size based on the opcode. */
3570
3571 static int
3572 thumb_insn_size (int opcode)
3573 {
3574 if ((unsigned int) opcode < 0xe800u)
3575 return 2;
3576 else if ((unsigned int) opcode >= 0xe8000000u)
3577 return 4;
3578 else
3579 return 0;
3580 }
3581
3582 static bfd_boolean
3583 emit_insn (expressionS *exp, int nbytes)
3584 {
3585 int size = 0;
3586
3587 if (exp->X_op == O_constant)
3588 {
3589 size = nbytes;
3590
3591 if (size == 0)
3592 size = thumb_insn_size (exp->X_add_number);
3593
3594 if (size != 0)
3595 {
3596 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3597 {
3598 as_bad (_(".inst.n operand too big. "\
3599 "Use .inst.w instead"));
3600 size = 0;
3601 }
3602 else
3603 {
3604 if (now_it.state == AUTOMATIC_IT_BLOCK)
3605 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3606 else
3607 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3608
3609 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3610 emit_thumb32_expr (exp);
3611 else
3612 emit_expr (exp, (unsigned int) size);
3613
3614 it_fsm_post_encode ();
3615 }
3616 }
3617 else
3618 as_bad (_("cannot determine Thumb instruction size. " \
3619 "Use .inst.n/.inst.w instead"));
3620 }
3621 else
3622 as_bad (_("constant expression required"));
3623
3624 return (size != 0);
3625 }
3626
3627 /* Like s_arm_elf_cons but do not use md_cons_align and
3628 set the mapping state to MAP_ARM/MAP_THUMB. */
3629
3630 static void
3631 s_arm_elf_inst (int nbytes)
3632 {
3633 if (is_it_end_of_statement ())
3634 {
3635 demand_empty_rest_of_line ();
3636 return;
3637 }
3638
3639 /* Calling mapping_state () here will not change ARM/THUMB,
3640 but will ensure not to be in DATA state. */
3641
3642 if (thumb_mode)
3643 mapping_state (MAP_THUMB);
3644 else
3645 {
3646 if (nbytes != 0)
3647 {
3648 as_bad (_("width suffixes are invalid in ARM mode"));
3649 ignore_rest_of_line ();
3650 return;
3651 }
3652
3653 nbytes = 4;
3654
3655 mapping_state (MAP_ARM);
3656 }
3657
3658 do
3659 {
3660 expressionS exp;
3661
3662 expression (& exp);
3663
3664 if (! emit_insn (& exp, nbytes))
3665 {
3666 ignore_rest_of_line ();
3667 return;
3668 }
3669 }
3670 while (*input_line_pointer++ == ',');
3671
3672 /* Put terminator back into stream. */
3673 input_line_pointer --;
3674 demand_empty_rest_of_line ();
3675 }
3676
3677 /* Parse a .rel31 directive. */
3678
3679 static void
3680 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3681 {
3682 expressionS exp;
3683 char *p;
3684 valueT highbit;
3685
3686 highbit = 0;
3687 if (*input_line_pointer == '1')
3688 highbit = 0x80000000;
3689 else if (*input_line_pointer != '0')
3690 as_bad (_("expected 0 or 1"));
3691
3692 input_line_pointer++;
3693 if (*input_line_pointer != ',')
3694 as_bad (_("missing comma"));
3695 input_line_pointer++;
3696
3697 #ifdef md_flush_pending_output
3698 md_flush_pending_output ();
3699 #endif
3700
3701 #ifdef md_cons_align
3702 md_cons_align (4);
3703 #endif
3704
3705 mapping_state (MAP_DATA);
3706
3707 expression (&exp);
3708
3709 p = frag_more (4);
3710 md_number_to_chars (p, highbit, 4);
3711 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3712 BFD_RELOC_ARM_PREL31);
3713
3714 demand_empty_rest_of_line ();
3715 }
3716
3717 /* Directives: AEABI stack-unwind tables. */
3718
3719 /* Parse an unwind_fnstart directive. Simply records the current location. */
3720
3721 static void
3722 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3723 {
3724 demand_empty_rest_of_line ();
3725 if (unwind.proc_start)
3726 {
3727 as_bad (_("duplicate .fnstart directive"));
3728 return;
3729 }
3730
3731 /* Mark the start of the function. */
3732 unwind.proc_start = expr_build_dot ();
3733
3734 /* Reset the rest of the unwind info. */
3735 unwind.opcode_count = 0;
3736 unwind.table_entry = NULL;
3737 unwind.personality_routine = NULL;
3738 unwind.personality_index = -1;
3739 unwind.frame_size = 0;
3740 unwind.fp_offset = 0;
3741 unwind.fp_reg = REG_SP;
3742 unwind.fp_used = 0;
3743 unwind.sp_restored = 0;
3744 }
3745
3746
3747 /* Parse a handlerdata directive. Creates the exception handling table entry
3748 for the function. */
3749
3750 static void
3751 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3752 {
3753 demand_empty_rest_of_line ();
3754 if (!unwind.proc_start)
3755 as_bad (MISSING_FNSTART);
3756
3757 if (unwind.table_entry)
3758 as_bad (_("duplicate .handlerdata directive"));
3759
3760 create_unwind_entry (1);
3761 }
3762
3763 /* Parse an unwind_fnend directive. Generates the index table entry. */
3764
3765 static void
3766 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3767 {
3768 long where;
3769 char *ptr;
3770 valueT val;
3771 unsigned int marked_pr_dependency;
3772
3773 demand_empty_rest_of_line ();
3774
3775 if (!unwind.proc_start)
3776 {
3777 as_bad (_(".fnend directive without .fnstart"));
3778 return;
3779 }
3780
3781 /* Add eh table entry. */
3782 if (unwind.table_entry == NULL)
3783 val = create_unwind_entry (0);
3784 else
3785 val = 0;
3786
3787 /* Add index table entry. This is two words. */
3788 start_unwind_section (unwind.saved_seg, 1);
3789 frag_align (2, 0, 0);
3790 record_alignment (now_seg, 2);
3791
3792 ptr = frag_more (8);
3793 memset (ptr, 0, 8);
3794 where = frag_now_fix () - 8;
3795
3796 /* Self relative offset of the function start. */
3797 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3798 BFD_RELOC_ARM_PREL31);
3799
3800 /* Indicate dependency on EHABI-defined personality routines to the
3801 linker, if it hasn't been done already. */
3802 marked_pr_dependency
3803 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3804 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3805 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3806 {
3807 static const char *const name[] =
3808 {
3809 "__aeabi_unwind_cpp_pr0",
3810 "__aeabi_unwind_cpp_pr1",
3811 "__aeabi_unwind_cpp_pr2"
3812 };
3813 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3814 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3815 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3816 |= 1 << unwind.personality_index;
3817 }
3818
3819 if (val)
3820 /* Inline exception table entry. */
3821 md_number_to_chars (ptr + 4, val, 4);
3822 else
3823 /* Self relative offset of the table entry. */
3824 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3825 BFD_RELOC_ARM_PREL31);
3826
3827 /* Restore the original section. */
3828 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3829
3830 unwind.proc_start = NULL;
3831 }
3832
3833
3834 /* Parse an unwind_cantunwind directive. */
3835
3836 static void
3837 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3838 {
3839 demand_empty_rest_of_line ();
3840 if (!unwind.proc_start)
3841 as_bad (MISSING_FNSTART);
3842
3843 if (unwind.personality_routine || unwind.personality_index != -1)
3844 as_bad (_("personality routine specified for cantunwind frame"));
3845
3846 unwind.personality_index = -2;
3847 }
3848
3849
3850 /* Parse a personalityindex directive. */
3851
3852 static void
3853 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3854 {
3855 expressionS exp;
3856
3857 if (!unwind.proc_start)
3858 as_bad (MISSING_FNSTART);
3859
3860 if (unwind.personality_routine || unwind.personality_index != -1)
3861 as_bad (_("duplicate .personalityindex directive"));
3862
3863 expression (&exp);
3864
3865 if (exp.X_op != O_constant
3866 || exp.X_add_number < 0 || exp.X_add_number > 15)
3867 {
3868 as_bad (_("bad personality routine number"));
3869 ignore_rest_of_line ();
3870 return;
3871 }
3872
3873 unwind.personality_index = exp.X_add_number;
3874
3875 demand_empty_rest_of_line ();
3876 }
3877
3878
3879 /* Parse a personality directive. */
3880
3881 static void
3882 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3883 {
3884 char *name, *p, c;
3885
3886 if (!unwind.proc_start)
3887 as_bad (MISSING_FNSTART);
3888
3889 if (unwind.personality_routine || unwind.personality_index != -1)
3890 as_bad (_("duplicate .personality directive"));
3891
3892 c = get_symbol_name (& name);
3893 p = input_line_pointer;
3894 if (c == '"')
3895 ++ input_line_pointer;
3896 unwind.personality_routine = symbol_find_or_make (name);
3897 *p = c;
3898 demand_empty_rest_of_line ();
3899 }
3900
3901
3902 /* Parse a directive saving core registers. */
3903
3904 static void
3905 s_arm_unwind_save_core (void)
3906 {
3907 valueT op;
3908 long range;
3909 int n;
3910
3911 range = parse_reg_list (&input_line_pointer);
3912 if (range == FAIL)
3913 {
3914 as_bad (_("expected register list"));
3915 ignore_rest_of_line ();
3916 return;
3917 }
3918
3919 demand_empty_rest_of_line ();
3920
3921 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3922 into .unwind_save {..., sp...}. We aren't bothered about the value of
3923 ip because it is clobbered by calls. */
3924 if (unwind.sp_restored && unwind.fp_reg == 12
3925 && (range & 0x3000) == 0x1000)
3926 {
3927 unwind.opcode_count--;
3928 unwind.sp_restored = 0;
3929 range = (range | 0x2000) & ~0x1000;
3930 unwind.pending_offset = 0;
3931 }
3932
3933 /* Pop r4-r15. */
3934 if (range & 0xfff0)
3935 {
3936 /* See if we can use the short opcodes. These pop a block of up to 8
3937 registers starting with r4, plus maybe r14. */
3938 for (n = 0; n < 8; n++)
3939 {
3940 /* Break at the first non-saved register. */
3941 if ((range & (1 << (n + 4))) == 0)
3942 break;
3943 }
3944 /* See if there are any other bits set. */
3945 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3946 {
3947 /* Use the long form. */
3948 op = 0x8000 | ((range >> 4) & 0xfff);
3949 add_unwind_opcode (op, 2);
3950 }
3951 else
3952 {
3953 /* Use the short form. */
3954 if (range & 0x4000)
3955 op = 0xa8; /* Pop r14. */
3956 else
3957 op = 0xa0; /* Do not pop r14. */
3958 op |= (n - 1);
3959 add_unwind_opcode (op, 1);
3960 }
3961 }
3962
3963 /* Pop r0-r3. */
3964 if (range & 0xf)
3965 {
3966 op = 0xb100 | (range & 0xf);
3967 add_unwind_opcode (op, 2);
3968 }
3969
3970 /* Record the number of bytes pushed. */
3971 for (n = 0; n < 16; n++)
3972 {
3973 if (range & (1 << n))
3974 unwind.frame_size += 4;
3975 }
3976 }
3977
3978
3979 /* Parse a directive saving FPA registers. */
3980
3981 static void
3982 s_arm_unwind_save_fpa (int reg)
3983 {
3984 expressionS exp;
3985 int num_regs;
3986 valueT op;
3987
3988 /* Get Number of registers to transfer. */
3989 if (skip_past_comma (&input_line_pointer) != FAIL)
3990 expression (&exp);
3991 else
3992 exp.X_op = O_illegal;
3993
3994 if (exp.X_op != O_constant)
3995 {
3996 as_bad (_("expected , <constant>"));
3997 ignore_rest_of_line ();
3998 return;
3999 }
4000
4001 num_regs = exp.X_add_number;
4002
4003 if (num_regs < 1 || num_regs > 4)
4004 {
4005 as_bad (_("number of registers must be in the range [1:4]"));
4006 ignore_rest_of_line ();
4007 return;
4008 }
4009
4010 demand_empty_rest_of_line ();
4011
4012 if (reg == 4)
4013 {
4014 /* Short form. */
4015 op = 0xb4 | (num_regs - 1);
4016 add_unwind_opcode (op, 1);
4017 }
4018 else
4019 {
4020 /* Long form. */
4021 op = 0xc800 | (reg << 4) | (num_regs - 1);
4022 add_unwind_opcode (op, 2);
4023 }
4024 unwind.frame_size += num_regs * 12;
4025 }
4026
4027
4028 /* Parse a directive saving VFP registers for ARMv6 and above. */
4029
4030 static void
4031 s_arm_unwind_save_vfp_armv6 (void)
4032 {
4033 int count;
4034 unsigned int start;
4035 valueT op;
4036 int num_vfpv3_regs = 0;
4037 int num_regs_below_16;
4038
4039 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4040 if (count == FAIL)
4041 {
4042 as_bad (_("expected register list"));
4043 ignore_rest_of_line ();
4044 return;
4045 }
4046
4047 demand_empty_rest_of_line ();
4048
4049 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4050 than FSTMX/FLDMX-style ones). */
4051
4052 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4053 if (start >= 16)
4054 num_vfpv3_regs = count;
4055 else if (start + count > 16)
4056 num_vfpv3_regs = start + count - 16;
4057
4058 if (num_vfpv3_regs > 0)
4059 {
4060 int start_offset = start > 16 ? start - 16 : 0;
4061 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4062 add_unwind_opcode (op, 2);
4063 }
4064
4065 /* Generate opcode for registers numbered in the range 0 .. 15. */
4066 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4067 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4068 if (num_regs_below_16 > 0)
4069 {
4070 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4071 add_unwind_opcode (op, 2);
4072 }
4073
4074 unwind.frame_size += count * 8;
4075 }
4076
4077
4078 /* Parse a directive saving VFP registers for pre-ARMv6. */
4079
4080 static void
4081 s_arm_unwind_save_vfp (void)
4082 {
4083 int count;
4084 unsigned int reg;
4085 valueT op;
4086
4087 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
4088 if (count == FAIL)
4089 {
4090 as_bad (_("expected register list"));
4091 ignore_rest_of_line ();
4092 return;
4093 }
4094
4095 demand_empty_rest_of_line ();
4096
4097 if (reg == 8)
4098 {
4099 /* Short form. */
4100 op = 0xb8 | (count - 1);
4101 add_unwind_opcode (op, 1);
4102 }
4103 else
4104 {
4105 /* Long form. */
4106 op = 0xb300 | (reg << 4) | (count - 1);
4107 add_unwind_opcode (op, 2);
4108 }
4109 unwind.frame_size += count * 8 + 4;
4110 }
4111
4112
4113 /* Parse a directive saving iWMMXt data registers. */
4114
4115 static void
4116 s_arm_unwind_save_mmxwr (void)
4117 {
4118 int reg;
4119 int hi_reg;
4120 int i;
4121 unsigned mask = 0;
4122 valueT op;
4123
4124 if (*input_line_pointer == '{')
4125 input_line_pointer++;
4126
4127 do
4128 {
4129 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4130
4131 if (reg == FAIL)
4132 {
4133 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4134 goto error;
4135 }
4136
4137 if (mask >> reg)
4138 as_tsktsk (_("register list not in ascending order"));
4139 mask |= 1 << reg;
4140
4141 if (*input_line_pointer == '-')
4142 {
4143 input_line_pointer++;
4144 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4145 if (hi_reg == FAIL)
4146 {
4147 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4148 goto error;
4149 }
4150 else if (reg >= hi_reg)
4151 {
4152 as_bad (_("bad register range"));
4153 goto error;
4154 }
4155 for (; reg < hi_reg; reg++)
4156 mask |= 1 << reg;
4157 }
4158 }
4159 while (skip_past_comma (&input_line_pointer) != FAIL);
4160
4161 skip_past_char (&input_line_pointer, '}');
4162
4163 demand_empty_rest_of_line ();
4164
4165 /* Generate any deferred opcodes because we're going to be looking at
4166 the list. */
4167 flush_pending_unwind ();
4168
4169 for (i = 0; i < 16; i++)
4170 {
4171 if (mask & (1 << i))
4172 unwind.frame_size += 8;
4173 }
4174
4175 /* Attempt to combine with a previous opcode. We do this because gcc
4176 likes to output separate unwind directives for a single block of
4177 registers. */
4178 if (unwind.opcode_count > 0)
4179 {
4180 i = unwind.opcodes[unwind.opcode_count - 1];
4181 if ((i & 0xf8) == 0xc0)
4182 {
4183 i &= 7;
4184 /* Only merge if the blocks are contiguous. */
4185 if (i < 6)
4186 {
4187 if ((mask & 0xfe00) == (1 << 9))
4188 {
4189 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4190 unwind.opcode_count--;
4191 }
4192 }
4193 else if (i == 6 && unwind.opcode_count >= 2)
4194 {
4195 i = unwind.opcodes[unwind.opcode_count - 2];
4196 reg = i >> 4;
4197 i &= 0xf;
4198
4199 op = 0xffff << (reg - 1);
4200 if (reg > 0
4201 && ((mask & op) == (1u << (reg - 1))))
4202 {
4203 op = (1 << (reg + i + 1)) - 1;
4204 op &= ~((1 << reg) - 1);
4205 mask |= op;
4206 unwind.opcode_count -= 2;
4207 }
4208 }
4209 }
4210 }
4211
4212 hi_reg = 15;
4213 /* We want to generate opcodes in the order the registers have been
4214 saved, ie. descending order. */
4215 for (reg = 15; reg >= -1; reg--)
4216 {
4217 /* Save registers in blocks. */
4218 if (reg < 0
4219 || !(mask & (1 << reg)))
4220 {
4221 /* We found an unsaved reg. Generate opcodes to save the
4222 preceding block. */
4223 if (reg != hi_reg)
4224 {
4225 if (reg == 9)
4226 {
4227 /* Short form. */
4228 op = 0xc0 | (hi_reg - 10);
4229 add_unwind_opcode (op, 1);
4230 }
4231 else
4232 {
4233 /* Long form. */
4234 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4235 add_unwind_opcode (op, 2);
4236 }
4237 }
4238 hi_reg = reg - 1;
4239 }
4240 }
4241
4242 return;
4243 error:
4244 ignore_rest_of_line ();
4245 }
4246
4247 static void
4248 s_arm_unwind_save_mmxwcg (void)
4249 {
4250 int reg;
4251 int hi_reg;
4252 unsigned mask = 0;
4253 valueT op;
4254
4255 if (*input_line_pointer == '{')
4256 input_line_pointer++;
4257
4258 skip_whitespace (input_line_pointer);
4259
4260 do
4261 {
4262 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4263
4264 if (reg == FAIL)
4265 {
4266 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4267 goto error;
4268 }
4269
4270 reg -= 8;
4271 if (mask >> reg)
4272 as_tsktsk (_("register list not in ascending order"));
4273 mask |= 1 << reg;
4274
4275 if (*input_line_pointer == '-')
4276 {
4277 input_line_pointer++;
4278 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4279 if (hi_reg == FAIL)
4280 {
4281 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4282 goto error;
4283 }
4284 else if (reg >= hi_reg)
4285 {
4286 as_bad (_("bad register range"));
4287 goto error;
4288 }
4289 for (; reg < hi_reg; reg++)
4290 mask |= 1 << reg;
4291 }
4292 }
4293 while (skip_past_comma (&input_line_pointer) != FAIL);
4294
4295 skip_past_char (&input_line_pointer, '}');
4296
4297 demand_empty_rest_of_line ();
4298
4299 /* Generate any deferred opcodes because we're going to be looking at
4300 the list. */
4301 flush_pending_unwind ();
4302
4303 for (reg = 0; reg < 16; reg++)
4304 {
4305 if (mask & (1 << reg))
4306 unwind.frame_size += 4;
4307 }
4308 op = 0xc700 | mask;
4309 add_unwind_opcode (op, 2);
4310 return;
4311 error:
4312 ignore_rest_of_line ();
4313 }
4314
4315
4316 /* Parse an unwind_save directive.
4317 If the argument is non-zero, this is a .vsave directive. */
4318
4319 static void
4320 s_arm_unwind_save (int arch_v6)
4321 {
4322 char *peek;
4323 struct reg_entry *reg;
4324 bfd_boolean had_brace = FALSE;
4325
4326 if (!unwind.proc_start)
4327 as_bad (MISSING_FNSTART);
4328
4329 /* Figure out what sort of save we have. */
4330 peek = input_line_pointer;
4331
4332 if (*peek == '{')
4333 {
4334 had_brace = TRUE;
4335 peek++;
4336 }
4337
4338 reg = arm_reg_parse_multi (&peek);
4339
4340 if (!reg)
4341 {
4342 as_bad (_("register expected"));
4343 ignore_rest_of_line ();
4344 return;
4345 }
4346
4347 switch (reg->type)
4348 {
4349 case REG_TYPE_FN:
4350 if (had_brace)
4351 {
4352 as_bad (_("FPA .unwind_save does not take a register list"));
4353 ignore_rest_of_line ();
4354 return;
4355 }
4356 input_line_pointer = peek;
4357 s_arm_unwind_save_fpa (reg->number);
4358 return;
4359
4360 case REG_TYPE_RN:
4361 s_arm_unwind_save_core ();
4362 return;
4363
4364 case REG_TYPE_VFD:
4365 if (arch_v6)
4366 s_arm_unwind_save_vfp_armv6 ();
4367 else
4368 s_arm_unwind_save_vfp ();
4369 return;
4370
4371 case REG_TYPE_MMXWR:
4372 s_arm_unwind_save_mmxwr ();
4373 return;
4374
4375 case REG_TYPE_MMXWCG:
4376 s_arm_unwind_save_mmxwcg ();
4377 return;
4378
4379 default:
4380 as_bad (_(".unwind_save does not support this kind of register"));
4381 ignore_rest_of_line ();
4382 }
4383 }
4384
4385
4386 /* Parse an unwind_movsp directive. */
4387
4388 static void
4389 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4390 {
4391 int reg;
4392 valueT op;
4393 int offset;
4394
4395 if (!unwind.proc_start)
4396 as_bad (MISSING_FNSTART);
4397
4398 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4399 if (reg == FAIL)
4400 {
4401 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4402 ignore_rest_of_line ();
4403 return;
4404 }
4405
4406 /* Optional constant. */
4407 if (skip_past_comma (&input_line_pointer) != FAIL)
4408 {
4409 if (immediate_for_directive (&offset) == FAIL)
4410 return;
4411 }
4412 else
4413 offset = 0;
4414
4415 demand_empty_rest_of_line ();
4416
4417 if (reg == REG_SP || reg == REG_PC)
4418 {
4419 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4420 return;
4421 }
4422
4423 if (unwind.fp_reg != REG_SP)
4424 as_bad (_("unexpected .unwind_movsp directive"));
4425
4426 /* Generate opcode to restore the value. */
4427 op = 0x90 | reg;
4428 add_unwind_opcode (op, 1);
4429
4430 /* Record the information for later. */
4431 unwind.fp_reg = reg;
4432 unwind.fp_offset = unwind.frame_size - offset;
4433 unwind.sp_restored = 1;
4434 }
4435
4436 /* Parse an unwind_pad directive. */
4437
4438 static void
4439 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4440 {
4441 int offset;
4442
4443 if (!unwind.proc_start)
4444 as_bad (MISSING_FNSTART);
4445
4446 if (immediate_for_directive (&offset) == FAIL)
4447 return;
4448
4449 if (offset & 3)
4450 {
4451 as_bad (_("stack increment must be multiple of 4"));
4452 ignore_rest_of_line ();
4453 return;
4454 }
4455
4456 /* Don't generate any opcodes, just record the details for later. */
4457 unwind.frame_size += offset;
4458 unwind.pending_offset += offset;
4459
4460 demand_empty_rest_of_line ();
4461 }
4462
4463 /* Parse an unwind_setfp directive. */
4464
4465 static void
4466 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4467 {
4468 int sp_reg;
4469 int fp_reg;
4470 int offset;
4471
4472 if (!unwind.proc_start)
4473 as_bad (MISSING_FNSTART);
4474
4475 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4476 if (skip_past_comma (&input_line_pointer) == FAIL)
4477 sp_reg = FAIL;
4478 else
4479 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4480
4481 if (fp_reg == FAIL || sp_reg == FAIL)
4482 {
4483 as_bad (_("expected <reg>, <reg>"));
4484 ignore_rest_of_line ();
4485 return;
4486 }
4487
4488 /* Optional constant. */
4489 if (skip_past_comma (&input_line_pointer) != FAIL)
4490 {
4491 if (immediate_for_directive (&offset) == FAIL)
4492 return;
4493 }
4494 else
4495 offset = 0;
4496
4497 demand_empty_rest_of_line ();
4498
4499 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4500 {
4501 as_bad (_("register must be either sp or set by a previous"
4502 "unwind_movsp directive"));
4503 return;
4504 }
4505
4506 /* Don't generate any opcodes, just record the information for later. */
4507 unwind.fp_reg = fp_reg;
4508 unwind.fp_used = 1;
4509 if (sp_reg == REG_SP)
4510 unwind.fp_offset = unwind.frame_size - offset;
4511 else
4512 unwind.fp_offset -= offset;
4513 }
4514
4515 /* Parse an unwind_raw directive. */
4516
4517 static void
4518 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4519 {
4520 expressionS exp;
4521 /* This is an arbitrary limit. */
4522 unsigned char op[16];
4523 int count;
4524
4525 if (!unwind.proc_start)
4526 as_bad (MISSING_FNSTART);
4527
4528 expression (&exp);
4529 if (exp.X_op == O_constant
4530 && skip_past_comma (&input_line_pointer) != FAIL)
4531 {
4532 unwind.frame_size += exp.X_add_number;
4533 expression (&exp);
4534 }
4535 else
4536 exp.X_op = O_illegal;
4537
4538 if (exp.X_op != O_constant)
4539 {
4540 as_bad (_("expected <offset>, <opcode>"));
4541 ignore_rest_of_line ();
4542 return;
4543 }
4544
4545 count = 0;
4546
4547 /* Parse the opcode. */
4548 for (;;)
4549 {
4550 if (count >= 16)
4551 {
4552 as_bad (_("unwind opcode too long"));
4553 ignore_rest_of_line ();
4554 }
4555 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4556 {
4557 as_bad (_("invalid unwind opcode"));
4558 ignore_rest_of_line ();
4559 return;
4560 }
4561 op[count++] = exp.X_add_number;
4562
4563 /* Parse the next byte. */
4564 if (skip_past_comma (&input_line_pointer) == FAIL)
4565 break;
4566
4567 expression (&exp);
4568 }
4569
4570 /* Add the opcode bytes in reverse order. */
4571 while (count--)
4572 add_unwind_opcode (op[count], 1);
4573
4574 demand_empty_rest_of_line ();
4575 }
4576
4577
4578 /* Parse a .eabi_attribute directive. */
4579
4580 static void
4581 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4582 {
4583 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4584
4585 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4586 attributes_set_explicitly[tag] = 1;
4587 }
4588
4589 /* Emit a tls fix for the symbol. */
4590
4591 static void
4592 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4593 {
4594 char *p;
4595 expressionS exp;
4596 #ifdef md_flush_pending_output
4597 md_flush_pending_output ();
4598 #endif
4599
4600 #ifdef md_cons_align
4601 md_cons_align (4);
4602 #endif
4603
4604 /* Since we're just labelling the code, there's no need to define a
4605 mapping symbol. */
4606 expression (&exp);
4607 p = obstack_next_free (&frchain_now->frch_obstack);
4608 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4609 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4610 : BFD_RELOC_ARM_TLS_DESCSEQ);
4611 }
4612 #endif /* OBJ_ELF */
4613
4614 static void s_arm_arch (int);
4615 static void s_arm_object_arch (int);
4616 static void s_arm_cpu (int);
4617 static void s_arm_fpu (int);
4618 static void s_arm_arch_extension (int);
4619
4620 #ifdef TE_PE
4621
4622 static void
4623 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4624 {
4625 expressionS exp;
4626
4627 do
4628 {
4629 expression (&exp);
4630 if (exp.X_op == O_symbol)
4631 exp.X_op = O_secrel;
4632
4633 emit_expr (&exp, 4);
4634 }
4635 while (*input_line_pointer++ == ',');
4636
4637 input_line_pointer--;
4638 demand_empty_rest_of_line ();
4639 }
4640 #endif /* TE_PE */
4641
4642 /* This table describes all the machine specific pseudo-ops the assembler
4643 has to support. The fields are:
4644 pseudo-op name without dot
4645 function to call to execute this pseudo-op
4646 Integer arg to pass to the function. */
4647
4648 const pseudo_typeS md_pseudo_table[] =
4649 {
4650 /* Never called because '.req' does not start a line. */
4651 { "req", s_req, 0 },
4652 /* Following two are likewise never called. */
4653 { "dn", s_dn, 0 },
4654 { "qn", s_qn, 0 },
4655 { "unreq", s_unreq, 0 },
4656 { "bss", s_bss, 0 },
4657 { "align", s_align_ptwo, 2 },
4658 { "arm", s_arm, 0 },
4659 { "thumb", s_thumb, 0 },
4660 { "code", s_code, 0 },
4661 { "force_thumb", s_force_thumb, 0 },
4662 { "thumb_func", s_thumb_func, 0 },
4663 { "thumb_set", s_thumb_set, 0 },
4664 { "even", s_even, 0 },
4665 { "ltorg", s_ltorg, 0 },
4666 { "pool", s_ltorg, 0 },
4667 { "syntax", s_syntax, 0 },
4668 { "cpu", s_arm_cpu, 0 },
4669 { "arch", s_arm_arch, 0 },
4670 { "object_arch", s_arm_object_arch, 0 },
4671 { "fpu", s_arm_fpu, 0 },
4672 { "arch_extension", s_arm_arch_extension, 0 },
4673 #ifdef OBJ_ELF
4674 { "word", s_arm_elf_cons, 4 },
4675 { "long", s_arm_elf_cons, 4 },
4676 { "inst.n", s_arm_elf_inst, 2 },
4677 { "inst.w", s_arm_elf_inst, 4 },
4678 { "inst", s_arm_elf_inst, 0 },
4679 { "rel31", s_arm_rel31, 0 },
4680 { "fnstart", s_arm_unwind_fnstart, 0 },
4681 { "fnend", s_arm_unwind_fnend, 0 },
4682 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4683 { "personality", s_arm_unwind_personality, 0 },
4684 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4685 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4686 { "save", s_arm_unwind_save, 0 },
4687 { "vsave", s_arm_unwind_save, 1 },
4688 { "movsp", s_arm_unwind_movsp, 0 },
4689 { "pad", s_arm_unwind_pad, 0 },
4690 { "setfp", s_arm_unwind_setfp, 0 },
4691 { "unwind_raw", s_arm_unwind_raw, 0 },
4692 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4693 { "tlsdescseq", s_arm_tls_descseq, 0 },
4694 #else
4695 { "word", cons, 4},
4696
4697 /* These are used for dwarf. */
4698 {"2byte", cons, 2},
4699 {"4byte", cons, 4},
4700 {"8byte", cons, 8},
4701 /* These are used for dwarf2. */
4702 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4703 { "loc", dwarf2_directive_loc, 0 },
4704 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4705 #endif
4706 { "extend", float_cons, 'x' },
4707 { "ldouble", float_cons, 'x' },
4708 { "packed", float_cons, 'p' },
4709 #ifdef TE_PE
4710 {"secrel32", pe_directive_secrel, 0},
4711 #endif
4712
4713 /* These are for compatibility with CodeComposer Studio. */
4714 {"ref", s_ccs_ref, 0},
4715 {"def", s_ccs_def, 0},
4716 {"asmfunc", s_ccs_asmfunc, 0},
4717 {"endasmfunc", s_ccs_endasmfunc, 0},
4718
4719 { 0, 0, 0 }
4720 };
4721 \f
4722 /* Parser functions used exclusively in instruction operands. */
4723
4724 /* Generic immediate-value read function for use in insn parsing.
4725 STR points to the beginning of the immediate (the leading #);
4726 VAL receives the value; if the value is outside [MIN, MAX]
4727 issue an error. PREFIX_OPT is true if the immediate prefix is
4728 optional. */
4729
4730 static int
4731 parse_immediate (char **str, int *val, int min, int max,
4732 bfd_boolean prefix_opt)
4733 {
4734 expressionS exp;
4735 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4736 if (exp.X_op != O_constant)
4737 {
4738 inst.error = _("constant expression required");
4739 return FAIL;
4740 }
4741
4742 if (exp.X_add_number < min || exp.X_add_number > max)
4743 {
4744 inst.error = _("immediate value out of range");
4745 return FAIL;
4746 }
4747
4748 *val = exp.X_add_number;
4749 return SUCCESS;
4750 }
4751
4752 /* Less-generic immediate-value read function with the possibility of loading a
4753 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4754 instructions. Puts the result directly in inst.operands[i]. */
4755
4756 static int
4757 parse_big_immediate (char **str, int i, expressionS *in_exp,
4758 bfd_boolean allow_symbol_p)
4759 {
4760 expressionS exp;
4761 expressionS *exp_p = in_exp ? in_exp : &exp;
4762 char *ptr = *str;
4763
4764 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4765
4766 if (exp_p->X_op == O_constant)
4767 {
4768 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4769 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4770 O_constant. We have to be careful not to break compilation for
4771 32-bit X_add_number, though. */
4772 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4773 {
4774 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4775 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4776 & 0xffffffff);
4777 inst.operands[i].regisimm = 1;
4778 }
4779 }
4780 else if (exp_p->X_op == O_big
4781 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4782 {
4783 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4784
4785 /* Bignums have their least significant bits in
4786 generic_bignum[0]. Make sure we put 32 bits in imm and
4787 32 bits in reg, in a (hopefully) portable way. */
4788 gas_assert (parts != 0);
4789
4790 /* Make sure that the number is not too big.
4791 PR 11972: Bignums can now be sign-extended to the
4792 size of a .octa so check that the out of range bits
4793 are all zero or all one. */
4794 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4795 {
4796 LITTLENUM_TYPE m = -1;
4797
4798 if (generic_bignum[parts * 2] != 0
4799 && generic_bignum[parts * 2] != m)
4800 return FAIL;
4801
4802 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4803 if (generic_bignum[j] != generic_bignum[j-1])
4804 return FAIL;
4805 }
4806
4807 inst.operands[i].imm = 0;
4808 for (j = 0; j < parts; j++, idx++)
4809 inst.operands[i].imm |= generic_bignum[idx]
4810 << (LITTLENUM_NUMBER_OF_BITS * j);
4811 inst.operands[i].reg = 0;
4812 for (j = 0; j < parts; j++, idx++)
4813 inst.operands[i].reg |= generic_bignum[idx]
4814 << (LITTLENUM_NUMBER_OF_BITS * j);
4815 inst.operands[i].regisimm = 1;
4816 }
4817 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4818 return FAIL;
4819
4820 *str = ptr;
4821
4822 return SUCCESS;
4823 }
4824
4825 /* Returns the pseudo-register number of an FPA immediate constant,
4826 or FAIL if there isn't a valid constant here. */
4827
4828 static int
4829 parse_fpa_immediate (char ** str)
4830 {
4831 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4832 char * save_in;
4833 expressionS exp;
4834 int i;
4835 int j;
4836
4837 /* First try and match exact strings, this is to guarantee
4838 that some formats will work even for cross assembly. */
4839
4840 for (i = 0; fp_const[i]; i++)
4841 {
4842 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4843 {
4844 char *start = *str;
4845
4846 *str += strlen (fp_const[i]);
4847 if (is_end_of_line[(unsigned char) **str])
4848 return i + 8;
4849 *str = start;
4850 }
4851 }
4852
4853 /* Just because we didn't get a match doesn't mean that the constant
4854 isn't valid, just that it is in a format that we don't
4855 automatically recognize. Try parsing it with the standard
4856 expression routines. */
4857
4858 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4859
4860 /* Look for a raw floating point number. */
4861 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4862 && is_end_of_line[(unsigned char) *save_in])
4863 {
4864 for (i = 0; i < NUM_FLOAT_VALS; i++)
4865 {
4866 for (j = 0; j < MAX_LITTLENUMS; j++)
4867 {
4868 if (words[j] != fp_values[i][j])
4869 break;
4870 }
4871
4872 if (j == MAX_LITTLENUMS)
4873 {
4874 *str = save_in;
4875 return i + 8;
4876 }
4877 }
4878 }
4879
4880 /* Try and parse a more complex expression, this will probably fail
4881 unless the code uses a floating point prefix (eg "0f"). */
4882 save_in = input_line_pointer;
4883 input_line_pointer = *str;
4884 if (expression (&exp) == absolute_section
4885 && exp.X_op == O_big
4886 && exp.X_add_number < 0)
4887 {
4888 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4889 Ditto for 15. */
4890 #define X_PRECISION 5
4891 #define E_PRECISION 15L
4892 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
4893 {
4894 for (i = 0; i < NUM_FLOAT_VALS; i++)
4895 {
4896 for (j = 0; j < MAX_LITTLENUMS; j++)
4897 {
4898 if (words[j] != fp_values[i][j])
4899 break;
4900 }
4901
4902 if (j == MAX_LITTLENUMS)
4903 {
4904 *str = input_line_pointer;
4905 input_line_pointer = save_in;
4906 return i + 8;
4907 }
4908 }
4909 }
4910 }
4911
4912 *str = input_line_pointer;
4913 input_line_pointer = save_in;
4914 inst.error = _("invalid FPA immediate expression");
4915 return FAIL;
4916 }
4917
4918 /* Returns 1 if a number has "quarter-precision" float format
4919 0baBbbbbbc defgh000 00000000 00000000. */
4920
4921 static int
4922 is_quarter_float (unsigned imm)
4923 {
4924 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4925 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4926 }
4927
4928
4929 /* Detect the presence of a floating point or integer zero constant,
4930 i.e. #0.0 or #0. */
4931
4932 static bfd_boolean
4933 parse_ifimm_zero (char **in)
4934 {
4935 int error_code;
4936
4937 if (!is_immediate_prefix (**in))
4938 return FALSE;
4939
4940 ++*in;
4941
4942 /* Accept #0x0 as a synonym for #0. */
4943 if (strncmp (*in, "0x", 2) == 0)
4944 {
4945 int val;
4946 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
4947 return FALSE;
4948 return TRUE;
4949 }
4950
4951 error_code = atof_generic (in, ".", EXP_CHARS,
4952 &generic_floating_point_number);
4953
4954 if (!error_code
4955 && generic_floating_point_number.sign == '+'
4956 && (generic_floating_point_number.low
4957 > generic_floating_point_number.leader))
4958 return TRUE;
4959
4960 return FALSE;
4961 }
4962
4963 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4964 0baBbbbbbc defgh000 00000000 00000000.
4965 The zero and minus-zero cases need special handling, since they can't be
4966 encoded in the "quarter-precision" float format, but can nonetheless be
4967 loaded as integer constants. */
4968
4969 static unsigned
4970 parse_qfloat_immediate (char **ccp, int *immed)
4971 {
4972 char *str = *ccp;
4973 char *fpnum;
4974 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4975 int found_fpchar = 0;
4976
4977 skip_past_char (&str, '#');
4978
4979 /* We must not accidentally parse an integer as a floating-point number. Make
4980 sure that the value we parse is not an integer by checking for special
4981 characters '.' or 'e'.
4982 FIXME: This is a horrible hack, but doing better is tricky because type
4983 information isn't in a very usable state at parse time. */
4984 fpnum = str;
4985 skip_whitespace (fpnum);
4986
4987 if (strncmp (fpnum, "0x", 2) == 0)
4988 return FAIL;
4989 else
4990 {
4991 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4992 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4993 {
4994 found_fpchar = 1;
4995 break;
4996 }
4997
4998 if (!found_fpchar)
4999 return FAIL;
5000 }
5001
5002 if ((str = atof_ieee (str, 's', words)) != NULL)
5003 {
5004 unsigned fpword = 0;
5005 int i;
5006
5007 /* Our FP word must be 32 bits (single-precision FP). */
5008 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5009 {
5010 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5011 fpword |= words[i];
5012 }
5013
5014 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5015 *immed = fpword;
5016 else
5017 return FAIL;
5018
5019 *ccp = str;
5020
5021 return SUCCESS;
5022 }
5023
5024 return FAIL;
5025 }
5026
5027 /* Shift operands. */
5028 enum shift_kind
5029 {
5030 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5031 };
5032
5033 struct asm_shift_name
5034 {
5035 const char *name;
5036 enum shift_kind kind;
5037 };
5038
5039 /* Third argument to parse_shift. */
5040 enum parse_shift_mode
5041 {
5042 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5043 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5044 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5045 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5046 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5047 };
5048
5049 /* Parse a <shift> specifier on an ARM data processing instruction.
5050 This has three forms:
5051
5052 (LSL|LSR|ASL|ASR|ROR) Rs
5053 (LSL|LSR|ASL|ASR|ROR) #imm
5054 RRX
5055
5056 Note that ASL is assimilated to LSL in the instruction encoding, and
5057 RRX to ROR #0 (which cannot be written as such). */
5058
5059 static int
5060 parse_shift (char **str, int i, enum parse_shift_mode mode)
5061 {
5062 const struct asm_shift_name *shift_name;
5063 enum shift_kind shift;
5064 char *s = *str;
5065 char *p = s;
5066 int reg;
5067
5068 for (p = *str; ISALPHA (*p); p++)
5069 ;
5070
5071 if (p == *str)
5072 {
5073 inst.error = _("shift expression expected");
5074 return FAIL;
5075 }
5076
5077 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5078 p - *str);
5079
5080 if (shift_name == NULL)
5081 {
5082 inst.error = _("shift expression expected");
5083 return FAIL;
5084 }
5085
5086 shift = shift_name->kind;
5087
5088 switch (mode)
5089 {
5090 case NO_SHIFT_RESTRICT:
5091 case SHIFT_IMMEDIATE: break;
5092
5093 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5094 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5095 {
5096 inst.error = _("'LSL' or 'ASR' required");
5097 return FAIL;
5098 }
5099 break;
5100
5101 case SHIFT_LSL_IMMEDIATE:
5102 if (shift != SHIFT_LSL)
5103 {
5104 inst.error = _("'LSL' required");
5105 return FAIL;
5106 }
5107 break;
5108
5109 case SHIFT_ASR_IMMEDIATE:
5110 if (shift != SHIFT_ASR)
5111 {
5112 inst.error = _("'ASR' required");
5113 return FAIL;
5114 }
5115 break;
5116
5117 default: abort ();
5118 }
5119
5120 if (shift != SHIFT_RRX)
5121 {
5122 /* Whitespace can appear here if the next thing is a bare digit. */
5123 skip_whitespace (p);
5124
5125 if (mode == NO_SHIFT_RESTRICT
5126 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5127 {
5128 inst.operands[i].imm = reg;
5129 inst.operands[i].immisreg = 1;
5130 }
5131 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5132 return FAIL;
5133 }
5134 inst.operands[i].shift_kind = shift;
5135 inst.operands[i].shifted = 1;
5136 *str = p;
5137 return SUCCESS;
5138 }
5139
5140 /* Parse a <shifter_operand> for an ARM data processing instruction:
5141
5142 #<immediate>
5143 #<immediate>, <rotate>
5144 <Rm>
5145 <Rm>, <shift>
5146
5147 where <shift> is defined by parse_shift above, and <rotate> is a
5148 multiple of 2 between 0 and 30. Validation of immediate operands
5149 is deferred to md_apply_fix. */
5150
5151 static int
5152 parse_shifter_operand (char **str, int i)
5153 {
5154 int value;
5155 expressionS exp;
5156
5157 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5158 {
5159 inst.operands[i].reg = value;
5160 inst.operands[i].isreg = 1;
5161
5162 /* parse_shift will override this if appropriate */
5163 inst.reloc.exp.X_op = O_constant;
5164 inst.reloc.exp.X_add_number = 0;
5165
5166 if (skip_past_comma (str) == FAIL)
5167 return SUCCESS;
5168
5169 /* Shift operation on register. */
5170 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5171 }
5172
5173 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
5174 return FAIL;
5175
5176 if (skip_past_comma (str) == SUCCESS)
5177 {
5178 /* #x, y -- ie explicit rotation by Y. */
5179 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5180 return FAIL;
5181
5182 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
5183 {
5184 inst.error = _("constant expression expected");
5185 return FAIL;
5186 }
5187
5188 value = exp.X_add_number;
5189 if (value < 0 || value > 30 || value % 2 != 0)
5190 {
5191 inst.error = _("invalid rotation");
5192 return FAIL;
5193 }
5194 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
5195 {
5196 inst.error = _("invalid constant");
5197 return FAIL;
5198 }
5199
5200 /* Encode as specified. */
5201 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
5202 return SUCCESS;
5203 }
5204
5205 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5206 inst.reloc.pc_rel = 0;
5207 return SUCCESS;
5208 }
5209
5210 /* Group relocation information. Each entry in the table contains the
5211 textual name of the relocation as may appear in assembler source
5212 and must end with a colon.
5213 Along with this textual name are the relocation codes to be used if
5214 the corresponding instruction is an ALU instruction (ADD or SUB only),
5215 an LDR, an LDRS, or an LDC. */
5216
5217 struct group_reloc_table_entry
5218 {
5219 const char *name;
5220 int alu_code;
5221 int ldr_code;
5222 int ldrs_code;
5223 int ldc_code;
5224 };
5225
5226 typedef enum
5227 {
5228 /* Varieties of non-ALU group relocation. */
5229
5230 GROUP_LDR,
5231 GROUP_LDRS,
5232 GROUP_LDC
5233 } group_reloc_type;
5234
5235 static struct group_reloc_table_entry group_reloc_table[] =
5236 { /* Program counter relative: */
5237 { "pc_g0_nc",
5238 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5239 0, /* LDR */
5240 0, /* LDRS */
5241 0 }, /* LDC */
5242 { "pc_g0",
5243 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5244 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5245 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5246 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5247 { "pc_g1_nc",
5248 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5249 0, /* LDR */
5250 0, /* LDRS */
5251 0 }, /* LDC */
5252 { "pc_g1",
5253 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5254 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5255 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5256 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5257 { "pc_g2",
5258 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5259 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5260 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5261 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5262 /* Section base relative */
5263 { "sb_g0_nc",
5264 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5265 0, /* LDR */
5266 0, /* LDRS */
5267 0 }, /* LDC */
5268 { "sb_g0",
5269 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5270 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5271 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5272 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5273 { "sb_g1_nc",
5274 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5275 0, /* LDR */
5276 0, /* LDRS */
5277 0 }, /* LDC */
5278 { "sb_g1",
5279 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5280 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5281 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5282 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5283 { "sb_g2",
5284 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5285 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5286 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5287 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5288 /* Absolute thumb alu relocations. */
5289 { "lower0_7",
5290 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5291 0, /* LDR. */
5292 0, /* LDRS. */
5293 0 }, /* LDC. */
5294 { "lower8_15",
5295 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5296 0, /* LDR. */
5297 0, /* LDRS. */
5298 0 }, /* LDC. */
5299 { "upper0_7",
5300 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5301 0, /* LDR. */
5302 0, /* LDRS. */
5303 0 }, /* LDC. */
5304 { "upper8_15",
5305 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5306 0, /* LDR. */
5307 0, /* LDRS. */
5308 0 } }; /* LDC. */
5309
5310 /* Given the address of a pointer pointing to the textual name of a group
5311 relocation as may appear in assembler source, attempt to find its details
5312 in group_reloc_table. The pointer will be updated to the character after
5313 the trailing colon. On failure, FAIL will be returned; SUCCESS
5314 otherwise. On success, *entry will be updated to point at the relevant
5315 group_reloc_table entry. */
5316
5317 static int
5318 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5319 {
5320 unsigned int i;
5321 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5322 {
5323 int length = strlen (group_reloc_table[i].name);
5324
5325 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5326 && (*str)[length] == ':')
5327 {
5328 *out = &group_reloc_table[i];
5329 *str += (length + 1);
5330 return SUCCESS;
5331 }
5332 }
5333
5334 return FAIL;
5335 }
5336
5337 /* Parse a <shifter_operand> for an ARM data processing instruction
5338 (as for parse_shifter_operand) where group relocations are allowed:
5339
5340 #<immediate>
5341 #<immediate>, <rotate>
5342 #:<group_reloc>:<expression>
5343 <Rm>
5344 <Rm>, <shift>
5345
5346 where <group_reloc> is one of the strings defined in group_reloc_table.
5347 The hashes are optional.
5348
5349 Everything else is as for parse_shifter_operand. */
5350
5351 static parse_operand_result
5352 parse_shifter_operand_group_reloc (char **str, int i)
5353 {
5354 /* Determine if we have the sequence of characters #: or just :
5355 coming next. If we do, then we check for a group relocation.
5356 If we don't, punt the whole lot to parse_shifter_operand. */
5357
5358 if (((*str)[0] == '#' && (*str)[1] == ':')
5359 || (*str)[0] == ':')
5360 {
5361 struct group_reloc_table_entry *entry;
5362
5363 if ((*str)[0] == '#')
5364 (*str) += 2;
5365 else
5366 (*str)++;
5367
5368 /* Try to parse a group relocation. Anything else is an error. */
5369 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5370 {
5371 inst.error = _("unknown group relocation");
5372 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5373 }
5374
5375 /* We now have the group relocation table entry corresponding to
5376 the name in the assembler source. Next, we parse the expression. */
5377 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5378 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5379
5380 /* Record the relocation type (always the ALU variant here). */
5381 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5382 gas_assert (inst.reloc.type != 0);
5383
5384 return PARSE_OPERAND_SUCCESS;
5385 }
5386 else
5387 return parse_shifter_operand (str, i) == SUCCESS
5388 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5389
5390 /* Never reached. */
5391 }
5392
5393 /* Parse a Neon alignment expression. Information is written to
5394 inst.operands[i]. We assume the initial ':' has been skipped.
5395
5396 align .imm = align << 8, .immisalign=1, .preind=0 */
5397 static parse_operand_result
5398 parse_neon_alignment (char **str, int i)
5399 {
5400 char *p = *str;
5401 expressionS exp;
5402
5403 my_get_expression (&exp, &p, GE_NO_PREFIX);
5404
5405 if (exp.X_op != O_constant)
5406 {
5407 inst.error = _("alignment must be constant");
5408 return PARSE_OPERAND_FAIL;
5409 }
5410
5411 inst.operands[i].imm = exp.X_add_number << 8;
5412 inst.operands[i].immisalign = 1;
5413 /* Alignments are not pre-indexes. */
5414 inst.operands[i].preind = 0;
5415
5416 *str = p;
5417 return PARSE_OPERAND_SUCCESS;
5418 }
5419
5420 /* Parse all forms of an ARM address expression. Information is written
5421 to inst.operands[i] and/or inst.reloc.
5422
5423 Preindexed addressing (.preind=1):
5424
5425 [Rn, #offset] .reg=Rn .reloc.exp=offset
5426 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5427 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5428 .shift_kind=shift .reloc.exp=shift_imm
5429
5430 These three may have a trailing ! which causes .writeback to be set also.
5431
5432 Postindexed addressing (.postind=1, .writeback=1):
5433
5434 [Rn], #offset .reg=Rn .reloc.exp=offset
5435 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5436 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5437 .shift_kind=shift .reloc.exp=shift_imm
5438
5439 Unindexed addressing (.preind=0, .postind=0):
5440
5441 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5442
5443 Other:
5444
5445 [Rn]{!} shorthand for [Rn,#0]{!}
5446 =immediate .isreg=0 .reloc.exp=immediate
5447 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5448
5449 It is the caller's responsibility to check for addressing modes not
5450 supported by the instruction, and to set inst.reloc.type. */
5451
5452 static parse_operand_result
5453 parse_address_main (char **str, int i, int group_relocations,
5454 group_reloc_type group_type)
5455 {
5456 char *p = *str;
5457 int reg;
5458
5459 if (skip_past_char (&p, '[') == FAIL)
5460 {
5461 if (skip_past_char (&p, '=') == FAIL)
5462 {
5463 /* Bare address - translate to PC-relative offset. */
5464 inst.reloc.pc_rel = 1;
5465 inst.operands[i].reg = REG_PC;
5466 inst.operands[i].isreg = 1;
5467 inst.operands[i].preind = 1;
5468
5469 if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
5470 return PARSE_OPERAND_FAIL;
5471 }
5472 else if (parse_big_immediate (&p, i, &inst.reloc.exp,
5473 /*allow_symbol_p=*/TRUE))
5474 return PARSE_OPERAND_FAIL;
5475
5476 *str = p;
5477 return PARSE_OPERAND_SUCCESS;
5478 }
5479
5480 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5481 skip_whitespace (p);
5482
5483 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5484 {
5485 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5486 return PARSE_OPERAND_FAIL;
5487 }
5488 inst.operands[i].reg = reg;
5489 inst.operands[i].isreg = 1;
5490
5491 if (skip_past_comma (&p) == SUCCESS)
5492 {
5493 inst.operands[i].preind = 1;
5494
5495 if (*p == '+') p++;
5496 else if (*p == '-') p++, inst.operands[i].negative = 1;
5497
5498 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5499 {
5500 inst.operands[i].imm = reg;
5501 inst.operands[i].immisreg = 1;
5502
5503 if (skip_past_comma (&p) == SUCCESS)
5504 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5505 return PARSE_OPERAND_FAIL;
5506 }
5507 else if (skip_past_char (&p, ':') == SUCCESS)
5508 {
5509 /* FIXME: '@' should be used here, but it's filtered out by generic
5510 code before we get to see it here. This may be subject to
5511 change. */
5512 parse_operand_result result = parse_neon_alignment (&p, i);
5513
5514 if (result != PARSE_OPERAND_SUCCESS)
5515 return result;
5516 }
5517 else
5518 {
5519 if (inst.operands[i].negative)
5520 {
5521 inst.operands[i].negative = 0;
5522 p--;
5523 }
5524
5525 if (group_relocations
5526 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5527 {
5528 struct group_reloc_table_entry *entry;
5529
5530 /* Skip over the #: or : sequence. */
5531 if (*p == '#')
5532 p += 2;
5533 else
5534 p++;
5535
5536 /* Try to parse a group relocation. Anything else is an
5537 error. */
5538 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5539 {
5540 inst.error = _("unknown group relocation");
5541 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5542 }
5543
5544 /* We now have the group relocation table entry corresponding to
5545 the name in the assembler source. Next, we parse the
5546 expression. */
5547 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5548 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5549
5550 /* Record the relocation type. */
5551 switch (group_type)
5552 {
5553 case GROUP_LDR:
5554 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5555 break;
5556
5557 case GROUP_LDRS:
5558 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5559 break;
5560
5561 case GROUP_LDC:
5562 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5563 break;
5564
5565 default:
5566 gas_assert (0);
5567 }
5568
5569 if (inst.reloc.type == 0)
5570 {
5571 inst.error = _("this group relocation is not allowed on this instruction");
5572 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5573 }
5574 }
5575 else
5576 {
5577 char *q = p;
5578 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5579 return PARSE_OPERAND_FAIL;
5580 /* If the offset is 0, find out if it's a +0 or -0. */
5581 if (inst.reloc.exp.X_op == O_constant
5582 && inst.reloc.exp.X_add_number == 0)
5583 {
5584 skip_whitespace (q);
5585 if (*q == '#')
5586 {
5587 q++;
5588 skip_whitespace (q);
5589 }
5590 if (*q == '-')
5591 inst.operands[i].negative = 1;
5592 }
5593 }
5594 }
5595 }
5596 else if (skip_past_char (&p, ':') == SUCCESS)
5597 {
5598 /* FIXME: '@' should be used here, but it's filtered out by generic code
5599 before we get to see it here. This may be subject to change. */
5600 parse_operand_result result = parse_neon_alignment (&p, i);
5601
5602 if (result != PARSE_OPERAND_SUCCESS)
5603 return result;
5604 }
5605
5606 if (skip_past_char (&p, ']') == FAIL)
5607 {
5608 inst.error = _("']' expected");
5609 return PARSE_OPERAND_FAIL;
5610 }
5611
5612 if (skip_past_char (&p, '!') == SUCCESS)
5613 inst.operands[i].writeback = 1;
5614
5615 else if (skip_past_comma (&p) == SUCCESS)
5616 {
5617 if (skip_past_char (&p, '{') == SUCCESS)
5618 {
5619 /* [Rn], {expr} - unindexed, with option */
5620 if (parse_immediate (&p, &inst.operands[i].imm,
5621 0, 255, TRUE) == FAIL)
5622 return PARSE_OPERAND_FAIL;
5623
5624 if (skip_past_char (&p, '}') == FAIL)
5625 {
5626 inst.error = _("'}' expected at end of 'option' field");
5627 return PARSE_OPERAND_FAIL;
5628 }
5629 if (inst.operands[i].preind)
5630 {
5631 inst.error = _("cannot combine index with option");
5632 return PARSE_OPERAND_FAIL;
5633 }
5634 *str = p;
5635 return PARSE_OPERAND_SUCCESS;
5636 }
5637 else
5638 {
5639 inst.operands[i].postind = 1;
5640 inst.operands[i].writeback = 1;
5641
5642 if (inst.operands[i].preind)
5643 {
5644 inst.error = _("cannot combine pre- and post-indexing");
5645 return PARSE_OPERAND_FAIL;
5646 }
5647
5648 if (*p == '+') p++;
5649 else if (*p == '-') p++, inst.operands[i].negative = 1;
5650
5651 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5652 {
5653 /* We might be using the immediate for alignment already. If we
5654 are, OR the register number into the low-order bits. */
5655 if (inst.operands[i].immisalign)
5656 inst.operands[i].imm |= reg;
5657 else
5658 inst.operands[i].imm = reg;
5659 inst.operands[i].immisreg = 1;
5660
5661 if (skip_past_comma (&p) == SUCCESS)
5662 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5663 return PARSE_OPERAND_FAIL;
5664 }
5665 else
5666 {
5667 char *q = p;
5668 if (inst.operands[i].negative)
5669 {
5670 inst.operands[i].negative = 0;
5671 p--;
5672 }
5673 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5674 return PARSE_OPERAND_FAIL;
5675 /* If the offset is 0, find out if it's a +0 or -0. */
5676 if (inst.reloc.exp.X_op == O_constant
5677 && inst.reloc.exp.X_add_number == 0)
5678 {
5679 skip_whitespace (q);
5680 if (*q == '#')
5681 {
5682 q++;
5683 skip_whitespace (q);
5684 }
5685 if (*q == '-')
5686 inst.operands[i].negative = 1;
5687 }
5688 }
5689 }
5690 }
5691
5692 /* If at this point neither .preind nor .postind is set, we have a
5693 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5694 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5695 {
5696 inst.operands[i].preind = 1;
5697 inst.reloc.exp.X_op = O_constant;
5698 inst.reloc.exp.X_add_number = 0;
5699 }
5700 *str = p;
5701 return PARSE_OPERAND_SUCCESS;
5702 }
5703
5704 static int
5705 parse_address (char **str, int i)
5706 {
5707 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5708 ? SUCCESS : FAIL;
5709 }
5710
5711 static parse_operand_result
5712 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5713 {
5714 return parse_address_main (str, i, 1, type);
5715 }
5716
5717 /* Parse an operand for a MOVW or MOVT instruction. */
5718 static int
5719 parse_half (char **str)
5720 {
5721 char * p;
5722
5723 p = *str;
5724 skip_past_char (&p, '#');
5725 if (strncasecmp (p, ":lower16:", 9) == 0)
5726 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5727 else if (strncasecmp (p, ":upper16:", 9) == 0)
5728 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5729
5730 if (inst.reloc.type != BFD_RELOC_UNUSED)
5731 {
5732 p += 9;
5733 skip_whitespace (p);
5734 }
5735
5736 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5737 return FAIL;
5738
5739 if (inst.reloc.type == BFD_RELOC_UNUSED)
5740 {
5741 if (inst.reloc.exp.X_op != O_constant)
5742 {
5743 inst.error = _("constant expression expected");
5744 return FAIL;
5745 }
5746 if (inst.reloc.exp.X_add_number < 0
5747 || inst.reloc.exp.X_add_number > 0xffff)
5748 {
5749 inst.error = _("immediate value out of range");
5750 return FAIL;
5751 }
5752 }
5753 *str = p;
5754 return SUCCESS;
5755 }
5756
5757 /* Miscellaneous. */
5758
5759 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5760 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5761 static int
5762 parse_psr (char **str, bfd_boolean lhs)
5763 {
5764 char *p;
5765 unsigned long psr_field;
5766 const struct asm_psr *psr;
5767 char *start;
5768 bfd_boolean is_apsr = FALSE;
5769 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5770
5771 /* PR gas/12698: If the user has specified -march=all then m_profile will
5772 be TRUE, but we want to ignore it in this case as we are building for any
5773 CPU type, including non-m variants. */
5774 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
5775 m_profile = FALSE;
5776
5777 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5778 feature for ease of use and backwards compatibility. */
5779 p = *str;
5780 if (strncasecmp (p, "SPSR", 4) == 0)
5781 {
5782 if (m_profile)
5783 goto unsupported_psr;
5784
5785 psr_field = SPSR_BIT;
5786 }
5787 else if (strncasecmp (p, "CPSR", 4) == 0)
5788 {
5789 if (m_profile)
5790 goto unsupported_psr;
5791
5792 psr_field = 0;
5793 }
5794 else if (strncasecmp (p, "APSR", 4) == 0)
5795 {
5796 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5797 and ARMv7-R architecture CPUs. */
5798 is_apsr = TRUE;
5799 psr_field = 0;
5800 }
5801 else if (m_profile)
5802 {
5803 start = p;
5804 do
5805 p++;
5806 while (ISALNUM (*p) || *p == '_');
5807
5808 if (strncasecmp (start, "iapsr", 5) == 0
5809 || strncasecmp (start, "eapsr", 5) == 0
5810 || strncasecmp (start, "xpsr", 4) == 0
5811 || strncasecmp (start, "psr", 3) == 0)
5812 p = start + strcspn (start, "rR") + 1;
5813
5814 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5815 p - start);
5816
5817 if (!psr)
5818 return FAIL;
5819
5820 /* If APSR is being written, a bitfield may be specified. Note that
5821 APSR itself is handled above. */
5822 if (psr->field <= 3)
5823 {
5824 psr_field = psr->field;
5825 is_apsr = TRUE;
5826 goto check_suffix;
5827 }
5828
5829 *str = p;
5830 /* M-profile MSR instructions have the mask field set to "10", except
5831 *PSR variants which modify APSR, which may use a different mask (and
5832 have been handled already). Do that by setting the PSR_f field
5833 here. */
5834 return psr->field | (lhs ? PSR_f : 0);
5835 }
5836 else
5837 goto unsupported_psr;
5838
5839 p += 4;
5840 check_suffix:
5841 if (*p == '_')
5842 {
5843 /* A suffix follows. */
5844 p++;
5845 start = p;
5846
5847 do
5848 p++;
5849 while (ISALNUM (*p) || *p == '_');
5850
5851 if (is_apsr)
5852 {
5853 /* APSR uses a notation for bits, rather than fields. */
5854 unsigned int nzcvq_bits = 0;
5855 unsigned int g_bit = 0;
5856 char *bit;
5857
5858 for (bit = start; bit != p; bit++)
5859 {
5860 switch (TOLOWER (*bit))
5861 {
5862 case 'n':
5863 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5864 break;
5865
5866 case 'z':
5867 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5868 break;
5869
5870 case 'c':
5871 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5872 break;
5873
5874 case 'v':
5875 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5876 break;
5877
5878 case 'q':
5879 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5880 break;
5881
5882 case 'g':
5883 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5884 break;
5885
5886 default:
5887 inst.error = _("unexpected bit specified after APSR");
5888 return FAIL;
5889 }
5890 }
5891
5892 if (nzcvq_bits == 0x1f)
5893 psr_field |= PSR_f;
5894
5895 if (g_bit == 0x1)
5896 {
5897 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5898 {
5899 inst.error = _("selected processor does not "
5900 "support DSP extension");
5901 return FAIL;
5902 }
5903
5904 psr_field |= PSR_s;
5905 }
5906
5907 if ((nzcvq_bits & 0x20) != 0
5908 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5909 || (g_bit & 0x2) != 0)
5910 {
5911 inst.error = _("bad bitmask specified after APSR");
5912 return FAIL;
5913 }
5914 }
5915 else
5916 {
5917 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5918 p - start);
5919 if (!psr)
5920 goto error;
5921
5922 psr_field |= psr->field;
5923 }
5924 }
5925 else
5926 {
5927 if (ISALNUM (*p))
5928 goto error; /* Garbage after "[CS]PSR". */
5929
5930 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5931 is deprecated, but allow it anyway. */
5932 if (is_apsr && lhs)
5933 {
5934 psr_field |= PSR_f;
5935 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5936 "deprecated"));
5937 }
5938 else if (!m_profile)
5939 /* These bits are never right for M-profile devices: don't set them
5940 (only code paths which read/write APSR reach here). */
5941 psr_field |= (PSR_c | PSR_f);
5942 }
5943 *str = p;
5944 return psr_field;
5945
5946 unsupported_psr:
5947 inst.error = _("selected processor does not support requested special "
5948 "purpose register");
5949 return FAIL;
5950
5951 error:
5952 inst.error = _("flag for {c}psr instruction expected");
5953 return FAIL;
5954 }
5955
5956 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5957 value suitable for splatting into the AIF field of the instruction. */
5958
5959 static int
5960 parse_cps_flags (char **str)
5961 {
5962 int val = 0;
5963 int saw_a_flag = 0;
5964 char *s = *str;
5965
5966 for (;;)
5967 switch (*s++)
5968 {
5969 case '\0': case ',':
5970 goto done;
5971
5972 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
5973 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
5974 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
5975
5976 default:
5977 inst.error = _("unrecognized CPS flag");
5978 return FAIL;
5979 }
5980
5981 done:
5982 if (saw_a_flag == 0)
5983 {
5984 inst.error = _("missing CPS flags");
5985 return FAIL;
5986 }
5987
5988 *str = s - 1;
5989 return val;
5990 }
5991
5992 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5993 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5994
5995 static int
5996 parse_endian_specifier (char **str)
5997 {
5998 int little_endian;
5999 char *s = *str;
6000
6001 if (strncasecmp (s, "BE", 2))
6002 little_endian = 0;
6003 else if (strncasecmp (s, "LE", 2))
6004 little_endian = 1;
6005 else
6006 {
6007 inst.error = _("valid endian specifiers are be or le");
6008 return FAIL;
6009 }
6010
6011 if (ISALNUM (s[2]) || s[2] == '_')
6012 {
6013 inst.error = _("valid endian specifiers are be or le");
6014 return FAIL;
6015 }
6016
6017 *str = s + 2;
6018 return little_endian;
6019 }
6020
6021 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6022 value suitable for poking into the rotate field of an sxt or sxta
6023 instruction, or FAIL on error. */
6024
6025 static int
6026 parse_ror (char **str)
6027 {
6028 int rot;
6029 char *s = *str;
6030
6031 if (strncasecmp (s, "ROR", 3) == 0)
6032 s += 3;
6033 else
6034 {
6035 inst.error = _("missing rotation field after comma");
6036 return FAIL;
6037 }
6038
6039 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6040 return FAIL;
6041
6042 switch (rot)
6043 {
6044 case 0: *str = s; return 0x0;
6045 case 8: *str = s; return 0x1;
6046 case 16: *str = s; return 0x2;
6047 case 24: *str = s; return 0x3;
6048
6049 default:
6050 inst.error = _("rotation can only be 0, 8, 16, or 24");
6051 return FAIL;
6052 }
6053 }
6054
6055 /* Parse a conditional code (from conds[] below). The value returned is in the
6056 range 0 .. 14, or FAIL. */
6057 static int
6058 parse_cond (char **str)
6059 {
6060 char *q;
6061 const struct asm_cond *c;
6062 int n;
6063 /* Condition codes are always 2 characters, so matching up to
6064 3 characters is sufficient. */
6065 char cond[3];
6066
6067 q = *str;
6068 n = 0;
6069 while (ISALPHA (*q) && n < 3)
6070 {
6071 cond[n] = TOLOWER (*q);
6072 q++;
6073 n++;
6074 }
6075
6076 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6077 if (!c)
6078 {
6079 inst.error = _("condition required");
6080 return FAIL;
6081 }
6082
6083 *str = q;
6084 return c->value;
6085 }
6086
6087 /* If the given feature available in the selected CPU, mark it as used.
6088 Returns TRUE iff feature is available. */
6089 static bfd_boolean
6090 mark_feature_used (const arm_feature_set *feature)
6091 {
6092 /* Ensure the option is valid on the current architecture. */
6093 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6094 return FALSE;
6095
6096 /* Add the appropriate architecture feature for the barrier option used.
6097 */
6098 if (thumb_mode)
6099 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6100 else
6101 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6102
6103 return TRUE;
6104 }
6105
6106 /* Parse an option for a barrier instruction. Returns the encoding for the
6107 option, or FAIL. */
6108 static int
6109 parse_barrier (char **str)
6110 {
6111 char *p, *q;
6112 const struct asm_barrier_opt *o;
6113
6114 p = q = *str;
6115 while (ISALPHA (*q))
6116 q++;
6117
6118 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6119 q - p);
6120 if (!o)
6121 return FAIL;
6122
6123 if (!mark_feature_used (&o->arch))
6124 return FAIL;
6125
6126 *str = q;
6127 return o->value;
6128 }
6129
6130 /* Parse the operands of a table branch instruction. Similar to a memory
6131 operand. */
6132 static int
6133 parse_tb (char **str)
6134 {
6135 char * p = *str;
6136 int reg;
6137
6138 if (skip_past_char (&p, '[') == FAIL)
6139 {
6140 inst.error = _("'[' expected");
6141 return FAIL;
6142 }
6143
6144 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6145 {
6146 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6147 return FAIL;
6148 }
6149 inst.operands[0].reg = reg;
6150
6151 if (skip_past_comma (&p) == FAIL)
6152 {
6153 inst.error = _("',' expected");
6154 return FAIL;
6155 }
6156
6157 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6158 {
6159 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6160 return FAIL;
6161 }
6162 inst.operands[0].imm = reg;
6163
6164 if (skip_past_comma (&p) == SUCCESS)
6165 {
6166 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6167 return FAIL;
6168 if (inst.reloc.exp.X_add_number != 1)
6169 {
6170 inst.error = _("invalid shift");
6171 return FAIL;
6172 }
6173 inst.operands[0].shifted = 1;
6174 }
6175
6176 if (skip_past_char (&p, ']') == FAIL)
6177 {
6178 inst.error = _("']' expected");
6179 return FAIL;
6180 }
6181 *str = p;
6182 return SUCCESS;
6183 }
6184
6185 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6186 information on the types the operands can take and how they are encoded.
6187 Up to four operands may be read; this function handles setting the
6188 ".present" field for each read operand itself.
6189 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6190 else returns FAIL. */
6191
6192 static int
6193 parse_neon_mov (char **str, int *which_operand)
6194 {
6195 int i = *which_operand, val;
6196 enum arm_reg_type rtype;
6197 char *ptr = *str;
6198 struct neon_type_el optype;
6199
6200 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6201 {
6202 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6203 inst.operands[i].reg = val;
6204 inst.operands[i].isscalar = 1;
6205 inst.operands[i].vectype = optype;
6206 inst.operands[i++].present = 1;
6207
6208 if (skip_past_comma (&ptr) == FAIL)
6209 goto wanted_comma;
6210
6211 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6212 goto wanted_arm;
6213
6214 inst.operands[i].reg = val;
6215 inst.operands[i].isreg = 1;
6216 inst.operands[i].present = 1;
6217 }
6218 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6219 != FAIL)
6220 {
6221 /* Cases 0, 1, 2, 3, 5 (D only). */
6222 if (skip_past_comma (&ptr) == FAIL)
6223 goto wanted_comma;
6224
6225 inst.operands[i].reg = val;
6226 inst.operands[i].isreg = 1;
6227 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6228 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6229 inst.operands[i].isvec = 1;
6230 inst.operands[i].vectype = optype;
6231 inst.operands[i++].present = 1;
6232
6233 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6234 {
6235 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6236 Case 13: VMOV <Sd>, <Rm> */
6237 inst.operands[i].reg = val;
6238 inst.operands[i].isreg = 1;
6239 inst.operands[i].present = 1;
6240
6241 if (rtype == REG_TYPE_NQ)
6242 {
6243 first_error (_("can't use Neon quad register here"));
6244 return FAIL;
6245 }
6246 else if (rtype != REG_TYPE_VFS)
6247 {
6248 i++;
6249 if (skip_past_comma (&ptr) == FAIL)
6250 goto wanted_comma;
6251 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6252 goto wanted_arm;
6253 inst.operands[i].reg = val;
6254 inst.operands[i].isreg = 1;
6255 inst.operands[i].present = 1;
6256 }
6257 }
6258 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6259 &optype)) != FAIL)
6260 {
6261 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6262 Case 1: VMOV<c><q> <Dd>, <Dm>
6263 Case 8: VMOV.F32 <Sd>, <Sm>
6264 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6265
6266 inst.operands[i].reg = val;
6267 inst.operands[i].isreg = 1;
6268 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6269 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6270 inst.operands[i].isvec = 1;
6271 inst.operands[i].vectype = optype;
6272 inst.operands[i].present = 1;
6273
6274 if (skip_past_comma (&ptr) == SUCCESS)
6275 {
6276 /* Case 15. */
6277 i++;
6278
6279 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6280 goto wanted_arm;
6281
6282 inst.operands[i].reg = val;
6283 inst.operands[i].isreg = 1;
6284 inst.operands[i++].present = 1;
6285
6286 if (skip_past_comma (&ptr) == FAIL)
6287 goto wanted_comma;
6288
6289 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6290 goto wanted_arm;
6291
6292 inst.operands[i].reg = val;
6293 inst.operands[i].isreg = 1;
6294 inst.operands[i].present = 1;
6295 }
6296 }
6297 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6298 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6299 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6300 Case 10: VMOV.F32 <Sd>, #<imm>
6301 Case 11: VMOV.F64 <Dd>, #<imm> */
6302 inst.operands[i].immisfloat = 1;
6303 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6304 == SUCCESS)
6305 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6306 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6307 ;
6308 else
6309 {
6310 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6311 return FAIL;
6312 }
6313 }
6314 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6315 {
6316 /* Cases 6, 7. */
6317 inst.operands[i].reg = val;
6318 inst.operands[i].isreg = 1;
6319 inst.operands[i++].present = 1;
6320
6321 if (skip_past_comma (&ptr) == FAIL)
6322 goto wanted_comma;
6323
6324 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6325 {
6326 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6327 inst.operands[i].reg = val;
6328 inst.operands[i].isscalar = 1;
6329 inst.operands[i].present = 1;
6330 inst.operands[i].vectype = optype;
6331 }
6332 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6333 {
6334 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6335 inst.operands[i].reg = val;
6336 inst.operands[i].isreg = 1;
6337 inst.operands[i++].present = 1;
6338
6339 if (skip_past_comma (&ptr) == FAIL)
6340 goto wanted_comma;
6341
6342 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6343 == FAIL)
6344 {
6345 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6346 return FAIL;
6347 }
6348
6349 inst.operands[i].reg = val;
6350 inst.operands[i].isreg = 1;
6351 inst.operands[i].isvec = 1;
6352 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6353 inst.operands[i].vectype = optype;
6354 inst.operands[i].present = 1;
6355
6356 if (rtype == REG_TYPE_VFS)
6357 {
6358 /* Case 14. */
6359 i++;
6360 if (skip_past_comma (&ptr) == FAIL)
6361 goto wanted_comma;
6362 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6363 &optype)) == FAIL)
6364 {
6365 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6366 return FAIL;
6367 }
6368 inst.operands[i].reg = val;
6369 inst.operands[i].isreg = 1;
6370 inst.operands[i].isvec = 1;
6371 inst.operands[i].issingle = 1;
6372 inst.operands[i].vectype = optype;
6373 inst.operands[i].present = 1;
6374 }
6375 }
6376 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6377 != FAIL)
6378 {
6379 /* Case 13. */
6380 inst.operands[i].reg = val;
6381 inst.operands[i].isreg = 1;
6382 inst.operands[i].isvec = 1;
6383 inst.operands[i].issingle = 1;
6384 inst.operands[i].vectype = optype;
6385 inst.operands[i].present = 1;
6386 }
6387 }
6388 else
6389 {
6390 first_error (_("parse error"));
6391 return FAIL;
6392 }
6393
6394 /* Successfully parsed the operands. Update args. */
6395 *which_operand = i;
6396 *str = ptr;
6397 return SUCCESS;
6398
6399 wanted_comma:
6400 first_error (_("expected comma"));
6401 return FAIL;
6402
6403 wanted_arm:
6404 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6405 return FAIL;
6406 }
6407
6408 /* Use this macro when the operand constraints are different
6409 for ARM and THUMB (e.g. ldrd). */
6410 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6411 ((arm_operand) | ((thumb_operand) << 16))
6412
6413 /* Matcher codes for parse_operands. */
6414 enum operand_parse_code
6415 {
6416 OP_stop, /* end of line */
6417
6418 OP_RR, /* ARM register */
6419 OP_RRnpc, /* ARM register, not r15 */
6420 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6421 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6422 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6423 optional trailing ! */
6424 OP_RRw, /* ARM register, not r15, optional trailing ! */
6425 OP_RCP, /* Coprocessor number */
6426 OP_RCN, /* Coprocessor register */
6427 OP_RF, /* FPA register */
6428 OP_RVS, /* VFP single precision register */
6429 OP_RVD, /* VFP double precision register (0..15) */
6430 OP_RND, /* Neon double precision register (0..31) */
6431 OP_RNQ, /* Neon quad precision register */
6432 OP_RVSD, /* VFP single or double precision register */
6433 OP_RNDQ, /* Neon double or quad precision register */
6434 OP_RNSDQ, /* Neon single, double or quad precision register */
6435 OP_RNSC, /* Neon scalar D[X] */
6436 OP_RVC, /* VFP control register */
6437 OP_RMF, /* Maverick F register */
6438 OP_RMD, /* Maverick D register */
6439 OP_RMFX, /* Maverick FX register */
6440 OP_RMDX, /* Maverick DX register */
6441 OP_RMAX, /* Maverick AX register */
6442 OP_RMDS, /* Maverick DSPSC register */
6443 OP_RIWR, /* iWMMXt wR register */
6444 OP_RIWC, /* iWMMXt wC register */
6445 OP_RIWG, /* iWMMXt wCG register */
6446 OP_RXA, /* XScale accumulator register */
6447
6448 OP_REGLST, /* ARM register list */
6449 OP_VRSLST, /* VFP single-precision register list */
6450 OP_VRDLST, /* VFP double-precision register list */
6451 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6452 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6453 OP_NSTRLST, /* Neon element/structure list */
6454
6455 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6456 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6457 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6458 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6459 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6460 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6461 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6462 OP_VMOV, /* Neon VMOV operands. */
6463 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6464 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6465 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6466
6467 OP_I0, /* immediate zero */
6468 OP_I7, /* immediate value 0 .. 7 */
6469 OP_I15, /* 0 .. 15 */
6470 OP_I16, /* 1 .. 16 */
6471 OP_I16z, /* 0 .. 16 */
6472 OP_I31, /* 0 .. 31 */
6473 OP_I31w, /* 0 .. 31, optional trailing ! */
6474 OP_I32, /* 1 .. 32 */
6475 OP_I32z, /* 0 .. 32 */
6476 OP_I63, /* 0 .. 63 */
6477 OP_I63s, /* -64 .. 63 */
6478 OP_I64, /* 1 .. 64 */
6479 OP_I64z, /* 0 .. 64 */
6480 OP_I255, /* 0 .. 255 */
6481
6482 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6483 OP_I7b, /* 0 .. 7 */
6484 OP_I15b, /* 0 .. 15 */
6485 OP_I31b, /* 0 .. 31 */
6486
6487 OP_SH, /* shifter operand */
6488 OP_SHG, /* shifter operand with possible group relocation */
6489 OP_ADDR, /* Memory address expression (any mode) */
6490 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6491 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6492 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6493 OP_EXP, /* arbitrary expression */
6494 OP_EXPi, /* same, with optional immediate prefix */
6495 OP_EXPr, /* same, with optional relocation suffix */
6496 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6497
6498 OP_CPSF, /* CPS flags */
6499 OP_ENDI, /* Endianness specifier */
6500 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6501 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6502 OP_COND, /* conditional code */
6503 OP_TB, /* Table branch. */
6504
6505 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6506
6507 OP_RRnpc_I0, /* ARM register or literal 0 */
6508 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
6509 OP_RR_EXi, /* ARM register or expression with imm prefix */
6510 OP_RF_IF, /* FPA register or immediate */
6511 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6512 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6513
6514 /* Optional operands. */
6515 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6516 OP_oI31b, /* 0 .. 31 */
6517 OP_oI32b, /* 1 .. 32 */
6518 OP_oI32z, /* 0 .. 32 */
6519 OP_oIffffb, /* 0 .. 65535 */
6520 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6521
6522 OP_oRR, /* ARM register */
6523 OP_oRRnpc, /* ARM register, not the PC */
6524 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6525 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6526 OP_oRND, /* Optional Neon double precision register */
6527 OP_oRNQ, /* Optional Neon quad precision register */
6528 OP_oRNDQ, /* Optional Neon double or quad precision register */
6529 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6530 OP_oSHll, /* LSL immediate */
6531 OP_oSHar, /* ASR immediate */
6532 OP_oSHllar, /* LSL or ASR immediate */
6533 OP_oROR, /* ROR 0/8/16/24 */
6534 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6535
6536 /* Some pre-defined mixed (ARM/THUMB) operands. */
6537 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6538 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6539 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6540
6541 OP_FIRST_OPTIONAL = OP_oI7b
6542 };
6543
6544 /* Generic instruction operand parser. This does no encoding and no
6545 semantic validation; it merely squirrels values away in the inst
6546 structure. Returns SUCCESS or FAIL depending on whether the
6547 specified grammar matched. */
6548 static int
6549 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6550 {
6551 unsigned const int *upat = pattern;
6552 char *backtrack_pos = 0;
6553 const char *backtrack_error = 0;
6554 int i, val = 0, backtrack_index = 0;
6555 enum arm_reg_type rtype;
6556 parse_operand_result result;
6557 unsigned int op_parse_code;
6558
6559 #define po_char_or_fail(chr) \
6560 do \
6561 { \
6562 if (skip_past_char (&str, chr) == FAIL) \
6563 goto bad_args; \
6564 } \
6565 while (0)
6566
6567 #define po_reg_or_fail(regtype) \
6568 do \
6569 { \
6570 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6571 & inst.operands[i].vectype); \
6572 if (val == FAIL) \
6573 { \
6574 first_error (_(reg_expected_msgs[regtype])); \
6575 goto failure; \
6576 } \
6577 inst.operands[i].reg = val; \
6578 inst.operands[i].isreg = 1; \
6579 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6580 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6581 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6582 || rtype == REG_TYPE_VFD \
6583 || rtype == REG_TYPE_NQ); \
6584 } \
6585 while (0)
6586
6587 #define po_reg_or_goto(regtype, label) \
6588 do \
6589 { \
6590 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6591 & inst.operands[i].vectype); \
6592 if (val == FAIL) \
6593 goto label; \
6594 \
6595 inst.operands[i].reg = val; \
6596 inst.operands[i].isreg = 1; \
6597 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6598 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6599 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6600 || rtype == REG_TYPE_VFD \
6601 || rtype == REG_TYPE_NQ); \
6602 } \
6603 while (0)
6604
6605 #define po_imm_or_fail(min, max, popt) \
6606 do \
6607 { \
6608 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6609 goto failure; \
6610 inst.operands[i].imm = val; \
6611 } \
6612 while (0)
6613
6614 #define po_scalar_or_goto(elsz, label) \
6615 do \
6616 { \
6617 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6618 if (val == FAIL) \
6619 goto label; \
6620 inst.operands[i].reg = val; \
6621 inst.operands[i].isscalar = 1; \
6622 } \
6623 while (0)
6624
6625 #define po_misc_or_fail(expr) \
6626 do \
6627 { \
6628 if (expr) \
6629 goto failure; \
6630 } \
6631 while (0)
6632
6633 #define po_misc_or_fail_no_backtrack(expr) \
6634 do \
6635 { \
6636 result = expr; \
6637 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6638 backtrack_pos = 0; \
6639 if (result != PARSE_OPERAND_SUCCESS) \
6640 goto failure; \
6641 } \
6642 while (0)
6643
6644 #define po_barrier_or_imm(str) \
6645 do \
6646 { \
6647 val = parse_barrier (&str); \
6648 if (val == FAIL && ! ISALPHA (*str)) \
6649 goto immediate; \
6650 if (val == FAIL \
6651 /* ISB can only take SY as an option. */ \
6652 || ((inst.instruction & 0xf0) == 0x60 \
6653 && val != 0xf)) \
6654 { \
6655 inst.error = _("invalid barrier type"); \
6656 backtrack_pos = 0; \
6657 goto failure; \
6658 } \
6659 } \
6660 while (0)
6661
6662 skip_whitespace (str);
6663
6664 for (i = 0; upat[i] != OP_stop; i++)
6665 {
6666 op_parse_code = upat[i];
6667 if (op_parse_code >= 1<<16)
6668 op_parse_code = thumb ? (op_parse_code >> 16)
6669 : (op_parse_code & ((1<<16)-1));
6670
6671 if (op_parse_code >= OP_FIRST_OPTIONAL)
6672 {
6673 /* Remember where we are in case we need to backtrack. */
6674 gas_assert (!backtrack_pos);
6675 backtrack_pos = str;
6676 backtrack_error = inst.error;
6677 backtrack_index = i;
6678 }
6679
6680 if (i > 0 && (i > 1 || inst.operands[0].present))
6681 po_char_or_fail (',');
6682
6683 switch (op_parse_code)
6684 {
6685 /* Registers */
6686 case OP_oRRnpc:
6687 case OP_oRRnpcsp:
6688 case OP_RRnpc:
6689 case OP_RRnpcsp:
6690 case OP_oRR:
6691 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6692 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6693 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6694 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6695 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6696 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6697 case OP_oRND:
6698 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6699 case OP_RVC:
6700 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6701 break;
6702 /* Also accept generic coprocessor regs for unknown registers. */
6703 coproc_reg:
6704 po_reg_or_fail (REG_TYPE_CN);
6705 break;
6706 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6707 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6708 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6709 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6710 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6711 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6712 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6713 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6714 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6715 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6716 case OP_oRNQ:
6717 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6718 case OP_oRNDQ:
6719 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6720 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6721 case OP_oRNSDQ:
6722 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6723
6724 /* Neon scalar. Using an element size of 8 means that some invalid
6725 scalars are accepted here, so deal with those in later code. */
6726 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6727
6728 case OP_RNDQ_I0:
6729 {
6730 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6731 break;
6732 try_imm0:
6733 po_imm_or_fail (0, 0, TRUE);
6734 }
6735 break;
6736
6737 case OP_RVSD_I0:
6738 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6739 break;
6740
6741 case OP_RSVD_FI0:
6742 {
6743 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6744 break;
6745 try_ifimm0:
6746 if (parse_ifimm_zero (&str))
6747 inst.operands[i].imm = 0;
6748 else
6749 {
6750 inst.error
6751 = _("only floating point zero is allowed as immediate value");
6752 goto failure;
6753 }
6754 }
6755 break;
6756
6757 case OP_RR_RNSC:
6758 {
6759 po_scalar_or_goto (8, try_rr);
6760 break;
6761 try_rr:
6762 po_reg_or_fail (REG_TYPE_RN);
6763 }
6764 break;
6765
6766 case OP_RNSDQ_RNSC:
6767 {
6768 po_scalar_or_goto (8, try_nsdq);
6769 break;
6770 try_nsdq:
6771 po_reg_or_fail (REG_TYPE_NSDQ);
6772 }
6773 break;
6774
6775 case OP_RNDQ_RNSC:
6776 {
6777 po_scalar_or_goto (8, try_ndq);
6778 break;
6779 try_ndq:
6780 po_reg_or_fail (REG_TYPE_NDQ);
6781 }
6782 break;
6783
6784 case OP_RND_RNSC:
6785 {
6786 po_scalar_or_goto (8, try_vfd);
6787 break;
6788 try_vfd:
6789 po_reg_or_fail (REG_TYPE_VFD);
6790 }
6791 break;
6792
6793 case OP_VMOV:
6794 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6795 not careful then bad things might happen. */
6796 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6797 break;
6798
6799 case OP_RNDQ_Ibig:
6800 {
6801 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6802 break;
6803 try_immbig:
6804 /* There's a possibility of getting a 64-bit immediate here, so
6805 we need special handling. */
6806 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6807 == FAIL)
6808 {
6809 inst.error = _("immediate value is out of range");
6810 goto failure;
6811 }
6812 }
6813 break;
6814
6815 case OP_RNDQ_I63b:
6816 {
6817 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6818 break;
6819 try_shimm:
6820 po_imm_or_fail (0, 63, TRUE);
6821 }
6822 break;
6823
6824 case OP_RRnpcb:
6825 po_char_or_fail ('[');
6826 po_reg_or_fail (REG_TYPE_RN);
6827 po_char_or_fail (']');
6828 break;
6829
6830 case OP_RRnpctw:
6831 case OP_RRw:
6832 case OP_oRRw:
6833 po_reg_or_fail (REG_TYPE_RN);
6834 if (skip_past_char (&str, '!') == SUCCESS)
6835 inst.operands[i].writeback = 1;
6836 break;
6837
6838 /* Immediates */
6839 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6840 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6841 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6842 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6843 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6844 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6845 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6846 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6847 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6848 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6849 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6850 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6851
6852 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6853 case OP_oI7b:
6854 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6855 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6856 case OP_oI31b:
6857 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6858 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6859 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6860 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6861
6862 /* Immediate variants */
6863 case OP_oI255c:
6864 po_char_or_fail ('{');
6865 po_imm_or_fail (0, 255, TRUE);
6866 po_char_or_fail ('}');
6867 break;
6868
6869 case OP_I31w:
6870 /* The expression parser chokes on a trailing !, so we have
6871 to find it first and zap it. */
6872 {
6873 char *s = str;
6874 while (*s && *s != ',')
6875 s++;
6876 if (s[-1] == '!')
6877 {
6878 s[-1] = '\0';
6879 inst.operands[i].writeback = 1;
6880 }
6881 po_imm_or_fail (0, 31, TRUE);
6882 if (str == s - 1)
6883 str = s;
6884 }
6885 break;
6886
6887 /* Expressions */
6888 case OP_EXPi: EXPi:
6889 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6890 GE_OPT_PREFIX));
6891 break;
6892
6893 case OP_EXP:
6894 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6895 GE_NO_PREFIX));
6896 break;
6897
6898 case OP_EXPr: EXPr:
6899 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6900 GE_NO_PREFIX));
6901 if (inst.reloc.exp.X_op == O_symbol)
6902 {
6903 val = parse_reloc (&str);
6904 if (val == -1)
6905 {
6906 inst.error = _("unrecognized relocation suffix");
6907 goto failure;
6908 }
6909 else if (val != BFD_RELOC_UNUSED)
6910 {
6911 inst.operands[i].imm = val;
6912 inst.operands[i].hasreloc = 1;
6913 }
6914 }
6915 break;
6916
6917 /* Operand for MOVW or MOVT. */
6918 case OP_HALF:
6919 po_misc_or_fail (parse_half (&str));
6920 break;
6921
6922 /* Register or expression. */
6923 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6924 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6925
6926 /* Register or immediate. */
6927 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6928 I0: po_imm_or_fail (0, 0, FALSE); break;
6929
6930 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6931 IF:
6932 if (!is_immediate_prefix (*str))
6933 goto bad_args;
6934 str++;
6935 val = parse_fpa_immediate (&str);
6936 if (val == FAIL)
6937 goto failure;
6938 /* FPA immediates are encoded as registers 8-15.
6939 parse_fpa_immediate has already applied the offset. */
6940 inst.operands[i].reg = val;
6941 inst.operands[i].isreg = 1;
6942 break;
6943
6944 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6945 I32z: po_imm_or_fail (0, 32, FALSE); break;
6946
6947 /* Two kinds of register. */
6948 case OP_RIWR_RIWC:
6949 {
6950 struct reg_entry *rege = arm_reg_parse_multi (&str);
6951 if (!rege
6952 || (rege->type != REG_TYPE_MMXWR
6953 && rege->type != REG_TYPE_MMXWC
6954 && rege->type != REG_TYPE_MMXWCG))
6955 {
6956 inst.error = _("iWMMXt data or control register expected");
6957 goto failure;
6958 }
6959 inst.operands[i].reg = rege->number;
6960 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6961 }
6962 break;
6963
6964 case OP_RIWC_RIWG:
6965 {
6966 struct reg_entry *rege = arm_reg_parse_multi (&str);
6967 if (!rege
6968 || (rege->type != REG_TYPE_MMXWC
6969 && rege->type != REG_TYPE_MMXWCG))
6970 {
6971 inst.error = _("iWMMXt control register expected");
6972 goto failure;
6973 }
6974 inst.operands[i].reg = rege->number;
6975 inst.operands[i].isreg = 1;
6976 }
6977 break;
6978
6979 /* Misc */
6980 case OP_CPSF: val = parse_cps_flags (&str); break;
6981 case OP_ENDI: val = parse_endian_specifier (&str); break;
6982 case OP_oROR: val = parse_ror (&str); break;
6983 case OP_COND: val = parse_cond (&str); break;
6984 case OP_oBARRIER_I15:
6985 po_barrier_or_imm (str); break;
6986 immediate:
6987 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
6988 goto failure;
6989 break;
6990
6991 case OP_wPSR:
6992 case OP_rPSR:
6993 po_reg_or_goto (REG_TYPE_RNB, try_psr);
6994 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
6995 {
6996 inst.error = _("Banked registers are not available with this "
6997 "architecture.");
6998 goto failure;
6999 }
7000 break;
7001 try_psr:
7002 val = parse_psr (&str, op_parse_code == OP_wPSR);
7003 break;
7004
7005 case OP_APSR_RR:
7006 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7007 break;
7008 try_apsr:
7009 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7010 instruction). */
7011 if (strncasecmp (str, "APSR_", 5) == 0)
7012 {
7013 unsigned found = 0;
7014 str += 5;
7015 while (found < 15)
7016 switch (*str++)
7017 {
7018 case 'c': found = (found & 1) ? 16 : found | 1; break;
7019 case 'n': found = (found & 2) ? 16 : found | 2; break;
7020 case 'z': found = (found & 4) ? 16 : found | 4; break;
7021 case 'v': found = (found & 8) ? 16 : found | 8; break;
7022 default: found = 16;
7023 }
7024 if (found != 15)
7025 goto failure;
7026 inst.operands[i].isvec = 1;
7027 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7028 inst.operands[i].reg = REG_PC;
7029 }
7030 else
7031 goto failure;
7032 break;
7033
7034 case OP_TB:
7035 po_misc_or_fail (parse_tb (&str));
7036 break;
7037
7038 /* Register lists. */
7039 case OP_REGLST:
7040 val = parse_reg_list (&str);
7041 if (*str == '^')
7042 {
7043 inst.operands[i].writeback = 1;
7044 str++;
7045 }
7046 break;
7047
7048 case OP_VRSLST:
7049 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
7050 break;
7051
7052 case OP_VRDLST:
7053 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7054 break;
7055
7056 case OP_VRSDLST:
7057 /* Allow Q registers too. */
7058 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7059 REGLIST_NEON_D);
7060 if (val == FAIL)
7061 {
7062 inst.error = NULL;
7063 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7064 REGLIST_VFP_S);
7065 inst.operands[i].issingle = 1;
7066 }
7067 break;
7068
7069 case OP_NRDLST:
7070 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7071 REGLIST_NEON_D);
7072 break;
7073
7074 case OP_NSTRLST:
7075 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7076 &inst.operands[i].vectype);
7077 break;
7078
7079 /* Addressing modes */
7080 case OP_ADDR:
7081 po_misc_or_fail (parse_address (&str, i));
7082 break;
7083
7084 case OP_ADDRGLDR:
7085 po_misc_or_fail_no_backtrack (
7086 parse_address_group_reloc (&str, i, GROUP_LDR));
7087 break;
7088
7089 case OP_ADDRGLDRS:
7090 po_misc_or_fail_no_backtrack (
7091 parse_address_group_reloc (&str, i, GROUP_LDRS));
7092 break;
7093
7094 case OP_ADDRGLDC:
7095 po_misc_or_fail_no_backtrack (
7096 parse_address_group_reloc (&str, i, GROUP_LDC));
7097 break;
7098
7099 case OP_SH:
7100 po_misc_or_fail (parse_shifter_operand (&str, i));
7101 break;
7102
7103 case OP_SHG:
7104 po_misc_or_fail_no_backtrack (
7105 parse_shifter_operand_group_reloc (&str, i));
7106 break;
7107
7108 case OP_oSHll:
7109 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7110 break;
7111
7112 case OP_oSHar:
7113 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7114 break;
7115
7116 case OP_oSHllar:
7117 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7118 break;
7119
7120 default:
7121 as_fatal (_("unhandled operand code %d"), op_parse_code);
7122 }
7123
7124 /* Various value-based sanity checks and shared operations. We
7125 do not signal immediate failures for the register constraints;
7126 this allows a syntax error to take precedence. */
7127 switch (op_parse_code)
7128 {
7129 case OP_oRRnpc:
7130 case OP_RRnpc:
7131 case OP_RRnpcb:
7132 case OP_RRw:
7133 case OP_oRRw:
7134 case OP_RRnpc_I0:
7135 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7136 inst.error = BAD_PC;
7137 break;
7138
7139 case OP_oRRnpcsp:
7140 case OP_RRnpcsp:
7141 if (inst.operands[i].isreg)
7142 {
7143 if (inst.operands[i].reg == REG_PC)
7144 inst.error = BAD_PC;
7145 else if (inst.operands[i].reg == REG_SP)
7146 inst.error = BAD_SP;
7147 }
7148 break;
7149
7150 case OP_RRnpctw:
7151 if (inst.operands[i].isreg
7152 && inst.operands[i].reg == REG_PC
7153 && (inst.operands[i].writeback || thumb))
7154 inst.error = BAD_PC;
7155 break;
7156
7157 case OP_CPSF:
7158 case OP_ENDI:
7159 case OP_oROR:
7160 case OP_wPSR:
7161 case OP_rPSR:
7162 case OP_COND:
7163 case OP_oBARRIER_I15:
7164 case OP_REGLST:
7165 case OP_VRSLST:
7166 case OP_VRDLST:
7167 case OP_VRSDLST:
7168 case OP_NRDLST:
7169 case OP_NSTRLST:
7170 if (val == FAIL)
7171 goto failure;
7172 inst.operands[i].imm = val;
7173 break;
7174
7175 default:
7176 break;
7177 }
7178
7179 /* If we get here, this operand was successfully parsed. */
7180 inst.operands[i].present = 1;
7181 continue;
7182
7183 bad_args:
7184 inst.error = BAD_ARGS;
7185
7186 failure:
7187 if (!backtrack_pos)
7188 {
7189 /* The parse routine should already have set inst.error, but set a
7190 default here just in case. */
7191 if (!inst.error)
7192 inst.error = _("syntax error");
7193 return FAIL;
7194 }
7195
7196 /* Do not backtrack over a trailing optional argument that
7197 absorbed some text. We will only fail again, with the
7198 'garbage following instruction' error message, which is
7199 probably less helpful than the current one. */
7200 if (backtrack_index == i && backtrack_pos != str
7201 && upat[i+1] == OP_stop)
7202 {
7203 if (!inst.error)
7204 inst.error = _("syntax error");
7205 return FAIL;
7206 }
7207
7208 /* Try again, skipping the optional argument at backtrack_pos. */
7209 str = backtrack_pos;
7210 inst.error = backtrack_error;
7211 inst.operands[backtrack_index].present = 0;
7212 i = backtrack_index;
7213 backtrack_pos = 0;
7214 }
7215
7216 /* Check that we have parsed all the arguments. */
7217 if (*str != '\0' && !inst.error)
7218 inst.error = _("garbage following instruction");
7219
7220 return inst.error ? FAIL : SUCCESS;
7221 }
7222
7223 #undef po_char_or_fail
7224 #undef po_reg_or_fail
7225 #undef po_reg_or_goto
7226 #undef po_imm_or_fail
7227 #undef po_scalar_or_fail
7228 #undef po_barrier_or_imm
7229
7230 /* Shorthand macro for instruction encoding functions issuing errors. */
7231 #define constraint(expr, err) \
7232 do \
7233 { \
7234 if (expr) \
7235 { \
7236 inst.error = err; \
7237 return; \
7238 } \
7239 } \
7240 while (0)
7241
7242 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7243 instructions are unpredictable if these registers are used. This
7244 is the BadReg predicate in ARM's Thumb-2 documentation. */
7245 #define reject_bad_reg(reg) \
7246 do \
7247 if (reg == REG_SP || reg == REG_PC) \
7248 { \
7249 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
7250 return; \
7251 } \
7252 while (0)
7253
7254 /* If REG is R13 (the stack pointer), warn that its use is
7255 deprecated. */
7256 #define warn_deprecated_sp(reg) \
7257 do \
7258 if (warn_on_deprecated && reg == REG_SP) \
7259 as_tsktsk (_("use of r13 is deprecated")); \
7260 while (0)
7261
7262 /* Functions for operand encoding. ARM, then Thumb. */
7263
7264 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7265
7266 /* If VAL can be encoded in the immediate field of an ARM instruction,
7267 return the encoded form. Otherwise, return FAIL. */
7268
7269 static unsigned int
7270 encode_arm_immediate (unsigned int val)
7271 {
7272 unsigned int a, i;
7273
7274 if (val <= 0xff)
7275 return val;
7276
7277 for (i = 2; i < 32; i += 2)
7278 if ((a = rotate_left (val, i)) <= 0xff)
7279 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7280
7281 return FAIL;
7282 }
7283
7284 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7285 return the encoded form. Otherwise, return FAIL. */
7286 static unsigned int
7287 encode_thumb32_immediate (unsigned int val)
7288 {
7289 unsigned int a, i;
7290
7291 if (val <= 0xff)
7292 return val;
7293
7294 for (i = 1; i <= 24; i++)
7295 {
7296 a = val >> i;
7297 if ((val & ~(0xff << i)) == 0)
7298 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7299 }
7300
7301 a = val & 0xff;
7302 if (val == ((a << 16) | a))
7303 return 0x100 | a;
7304 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7305 return 0x300 | a;
7306
7307 a = val & 0xff00;
7308 if (val == ((a << 16) | a))
7309 return 0x200 | (a >> 8);
7310
7311 return FAIL;
7312 }
7313 /* Encode a VFP SP or DP register number into inst.instruction. */
7314
7315 static void
7316 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7317 {
7318 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7319 && reg > 15)
7320 {
7321 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7322 {
7323 if (thumb_mode)
7324 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7325 fpu_vfp_ext_d32);
7326 else
7327 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7328 fpu_vfp_ext_d32);
7329 }
7330 else
7331 {
7332 first_error (_("D register out of range for selected VFP version"));
7333 return;
7334 }
7335 }
7336
7337 switch (pos)
7338 {
7339 case VFP_REG_Sd:
7340 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7341 break;
7342
7343 case VFP_REG_Sn:
7344 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7345 break;
7346
7347 case VFP_REG_Sm:
7348 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7349 break;
7350
7351 case VFP_REG_Dd:
7352 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7353 break;
7354
7355 case VFP_REG_Dn:
7356 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7357 break;
7358
7359 case VFP_REG_Dm:
7360 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7361 break;
7362
7363 default:
7364 abort ();
7365 }
7366 }
7367
7368 /* Encode a <shift> in an ARM-format instruction. The immediate,
7369 if any, is handled by md_apply_fix. */
7370 static void
7371 encode_arm_shift (int i)
7372 {
7373 if (inst.operands[i].shift_kind == SHIFT_RRX)
7374 inst.instruction |= SHIFT_ROR << 5;
7375 else
7376 {
7377 inst.instruction |= inst.operands[i].shift_kind << 5;
7378 if (inst.operands[i].immisreg)
7379 {
7380 inst.instruction |= SHIFT_BY_REG;
7381 inst.instruction |= inst.operands[i].imm << 8;
7382 }
7383 else
7384 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7385 }
7386 }
7387
7388 static void
7389 encode_arm_shifter_operand (int i)
7390 {
7391 if (inst.operands[i].isreg)
7392 {
7393 inst.instruction |= inst.operands[i].reg;
7394 encode_arm_shift (i);
7395 }
7396 else
7397 {
7398 inst.instruction |= INST_IMMEDIATE;
7399 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7400 inst.instruction |= inst.operands[i].imm;
7401 }
7402 }
7403
7404 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7405 static void
7406 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7407 {
7408 /* PR 14260:
7409 Generate an error if the operand is not a register. */
7410 constraint (!inst.operands[i].isreg,
7411 _("Instruction does not support =N addresses"));
7412
7413 inst.instruction |= inst.operands[i].reg << 16;
7414
7415 if (inst.operands[i].preind)
7416 {
7417 if (is_t)
7418 {
7419 inst.error = _("instruction does not accept preindexed addressing");
7420 return;
7421 }
7422 inst.instruction |= PRE_INDEX;
7423 if (inst.operands[i].writeback)
7424 inst.instruction |= WRITE_BACK;
7425
7426 }
7427 else if (inst.operands[i].postind)
7428 {
7429 gas_assert (inst.operands[i].writeback);
7430 if (is_t)
7431 inst.instruction |= WRITE_BACK;
7432 }
7433 else /* unindexed - only for coprocessor */
7434 {
7435 inst.error = _("instruction does not accept unindexed addressing");
7436 return;
7437 }
7438
7439 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7440 && (((inst.instruction & 0x000f0000) >> 16)
7441 == ((inst.instruction & 0x0000f000) >> 12)))
7442 as_warn ((inst.instruction & LOAD_BIT)
7443 ? _("destination register same as write-back base")
7444 : _("source register same as write-back base"));
7445 }
7446
7447 /* inst.operands[i] was set up by parse_address. Encode it into an
7448 ARM-format mode 2 load or store instruction. If is_t is true,
7449 reject forms that cannot be used with a T instruction (i.e. not
7450 post-indexed). */
7451 static void
7452 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7453 {
7454 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7455
7456 encode_arm_addr_mode_common (i, is_t);
7457
7458 if (inst.operands[i].immisreg)
7459 {
7460 constraint ((inst.operands[i].imm == REG_PC
7461 || (is_pc && inst.operands[i].writeback)),
7462 BAD_PC_ADDRESSING);
7463 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7464 inst.instruction |= inst.operands[i].imm;
7465 if (!inst.operands[i].negative)
7466 inst.instruction |= INDEX_UP;
7467 if (inst.operands[i].shifted)
7468 {
7469 if (inst.operands[i].shift_kind == SHIFT_RRX)
7470 inst.instruction |= SHIFT_ROR << 5;
7471 else
7472 {
7473 inst.instruction |= inst.operands[i].shift_kind << 5;
7474 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7475 }
7476 }
7477 }
7478 else /* immediate offset in inst.reloc */
7479 {
7480 if (is_pc && !inst.reloc.pc_rel)
7481 {
7482 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7483
7484 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7485 cannot use PC in addressing.
7486 PC cannot be used in writeback addressing, either. */
7487 constraint ((is_t || inst.operands[i].writeback),
7488 BAD_PC_ADDRESSING);
7489
7490 /* Use of PC in str is deprecated for ARMv7. */
7491 if (warn_on_deprecated
7492 && !is_load
7493 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7494 as_tsktsk (_("use of PC in this instruction is deprecated"));
7495 }
7496
7497 if (inst.reloc.type == BFD_RELOC_UNUSED)
7498 {
7499 /* Prefer + for zero encoded value. */
7500 if (!inst.operands[i].negative)
7501 inst.instruction |= INDEX_UP;
7502 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7503 }
7504 }
7505 }
7506
7507 /* inst.operands[i] was set up by parse_address. Encode it into an
7508 ARM-format mode 3 load or store instruction. Reject forms that
7509 cannot be used with such instructions. If is_t is true, reject
7510 forms that cannot be used with a T instruction (i.e. not
7511 post-indexed). */
7512 static void
7513 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7514 {
7515 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7516 {
7517 inst.error = _("instruction does not accept scaled register index");
7518 return;
7519 }
7520
7521 encode_arm_addr_mode_common (i, is_t);
7522
7523 if (inst.operands[i].immisreg)
7524 {
7525 constraint ((inst.operands[i].imm == REG_PC
7526 || (is_t && inst.operands[i].reg == REG_PC)),
7527 BAD_PC_ADDRESSING);
7528 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7529 BAD_PC_WRITEBACK);
7530 inst.instruction |= inst.operands[i].imm;
7531 if (!inst.operands[i].negative)
7532 inst.instruction |= INDEX_UP;
7533 }
7534 else /* immediate offset in inst.reloc */
7535 {
7536 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7537 && inst.operands[i].writeback),
7538 BAD_PC_WRITEBACK);
7539 inst.instruction |= HWOFFSET_IMM;
7540 if (inst.reloc.type == BFD_RELOC_UNUSED)
7541 {
7542 /* Prefer + for zero encoded value. */
7543 if (!inst.operands[i].negative)
7544 inst.instruction |= INDEX_UP;
7545
7546 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7547 }
7548 }
7549 }
7550
7551 /* Write immediate bits [7:0] to the following locations:
7552
7553 |28/24|23 19|18 16|15 4|3 0|
7554 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7555
7556 This function is used by VMOV/VMVN/VORR/VBIC. */
7557
7558 static void
7559 neon_write_immbits (unsigned immbits)
7560 {
7561 inst.instruction |= immbits & 0xf;
7562 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7563 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7564 }
7565
7566 /* Invert low-order SIZE bits of XHI:XLO. */
7567
7568 static void
7569 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7570 {
7571 unsigned immlo = xlo ? *xlo : 0;
7572 unsigned immhi = xhi ? *xhi : 0;
7573
7574 switch (size)
7575 {
7576 case 8:
7577 immlo = (~immlo) & 0xff;
7578 break;
7579
7580 case 16:
7581 immlo = (~immlo) & 0xffff;
7582 break;
7583
7584 case 64:
7585 immhi = (~immhi) & 0xffffffff;
7586 /* fall through. */
7587
7588 case 32:
7589 immlo = (~immlo) & 0xffffffff;
7590 break;
7591
7592 default:
7593 abort ();
7594 }
7595
7596 if (xlo)
7597 *xlo = immlo;
7598
7599 if (xhi)
7600 *xhi = immhi;
7601 }
7602
7603 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7604 A, B, C, D. */
7605
7606 static int
7607 neon_bits_same_in_bytes (unsigned imm)
7608 {
7609 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7610 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7611 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7612 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7613 }
7614
7615 /* For immediate of above form, return 0bABCD. */
7616
7617 static unsigned
7618 neon_squash_bits (unsigned imm)
7619 {
7620 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7621 | ((imm & 0x01000000) >> 21);
7622 }
7623
7624 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7625
7626 static unsigned
7627 neon_qfloat_bits (unsigned imm)
7628 {
7629 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7630 }
7631
7632 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7633 the instruction. *OP is passed as the initial value of the op field, and
7634 may be set to a different value depending on the constant (i.e.
7635 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7636 MVN). If the immediate looks like a repeated pattern then also
7637 try smaller element sizes. */
7638
7639 static int
7640 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7641 unsigned *immbits, int *op, int size,
7642 enum neon_el_type type)
7643 {
7644 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7645 float. */
7646 if (type == NT_float && !float_p)
7647 return FAIL;
7648
7649 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7650 {
7651 if (size != 32 || *op == 1)
7652 return FAIL;
7653 *immbits = neon_qfloat_bits (immlo);
7654 return 0xf;
7655 }
7656
7657 if (size == 64)
7658 {
7659 if (neon_bits_same_in_bytes (immhi)
7660 && neon_bits_same_in_bytes (immlo))
7661 {
7662 if (*op == 1)
7663 return FAIL;
7664 *immbits = (neon_squash_bits (immhi) << 4)
7665 | neon_squash_bits (immlo);
7666 *op = 1;
7667 return 0xe;
7668 }
7669
7670 if (immhi != immlo)
7671 return FAIL;
7672 }
7673
7674 if (size >= 32)
7675 {
7676 if (immlo == (immlo & 0x000000ff))
7677 {
7678 *immbits = immlo;
7679 return 0x0;
7680 }
7681 else if (immlo == (immlo & 0x0000ff00))
7682 {
7683 *immbits = immlo >> 8;
7684 return 0x2;
7685 }
7686 else if (immlo == (immlo & 0x00ff0000))
7687 {
7688 *immbits = immlo >> 16;
7689 return 0x4;
7690 }
7691 else if (immlo == (immlo & 0xff000000))
7692 {
7693 *immbits = immlo >> 24;
7694 return 0x6;
7695 }
7696 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7697 {
7698 *immbits = (immlo >> 8) & 0xff;
7699 return 0xc;
7700 }
7701 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7702 {
7703 *immbits = (immlo >> 16) & 0xff;
7704 return 0xd;
7705 }
7706
7707 if ((immlo & 0xffff) != (immlo >> 16))
7708 return FAIL;
7709 immlo &= 0xffff;
7710 }
7711
7712 if (size >= 16)
7713 {
7714 if (immlo == (immlo & 0x000000ff))
7715 {
7716 *immbits = immlo;
7717 return 0x8;
7718 }
7719 else if (immlo == (immlo & 0x0000ff00))
7720 {
7721 *immbits = immlo >> 8;
7722 return 0xa;
7723 }
7724
7725 if ((immlo & 0xff) != (immlo >> 8))
7726 return FAIL;
7727 immlo &= 0xff;
7728 }
7729
7730 if (immlo == (immlo & 0x000000ff))
7731 {
7732 /* Don't allow MVN with 8-bit immediate. */
7733 if (*op == 1)
7734 return FAIL;
7735 *immbits = immlo;
7736 return 0xe;
7737 }
7738
7739 return FAIL;
7740 }
7741
7742 #if defined BFD_HOST_64_BIT
7743 /* Returns TRUE if double precision value V may be cast
7744 to single precision without loss of accuracy. */
7745
7746 static bfd_boolean
7747 is_double_a_single (bfd_int64_t v)
7748 {
7749 int exp = (int)((v >> 52) & 0x7FF);
7750 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7751
7752 return (exp == 0 || exp == 0x7FF
7753 || (exp >= 1023 - 126 && exp <= 1023 + 127))
7754 && (mantissa & 0x1FFFFFFFl) == 0;
7755 }
7756
7757 /* Returns a double precision value casted to single precision
7758 (ignoring the least significant bits in exponent and mantissa). */
7759
7760 static int
7761 double_to_single (bfd_int64_t v)
7762 {
7763 int sign = (int) ((v >> 63) & 1l);
7764 int exp = (int) ((v >> 52) & 0x7FF);
7765 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7766
7767 if (exp == 0x7FF)
7768 exp = 0xFF;
7769 else
7770 {
7771 exp = exp - 1023 + 127;
7772 if (exp >= 0xFF)
7773 {
7774 /* Infinity. */
7775 exp = 0x7F;
7776 mantissa = 0;
7777 }
7778 else if (exp < 0)
7779 {
7780 /* No denormalized numbers. */
7781 exp = 0;
7782 mantissa = 0;
7783 }
7784 }
7785 mantissa >>= 29;
7786 return (sign << 31) | (exp << 23) | mantissa;
7787 }
7788 #endif /* BFD_HOST_64_BIT */
7789
7790 enum lit_type
7791 {
7792 CONST_THUMB,
7793 CONST_ARM,
7794 CONST_VEC
7795 };
7796
7797 static void do_vfp_nsyn_opcode (const char *);
7798
7799 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7800 Determine whether it can be performed with a move instruction; if
7801 it can, convert inst.instruction to that move instruction and
7802 return TRUE; if it can't, convert inst.instruction to a literal-pool
7803 load and return FALSE. If this is not a valid thing to do in the
7804 current context, set inst.error and return TRUE.
7805
7806 inst.operands[i] describes the destination register. */
7807
7808 static bfd_boolean
7809 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
7810 {
7811 unsigned long tbit;
7812 bfd_boolean thumb_p = (t == CONST_THUMB);
7813 bfd_boolean arm_p = (t == CONST_ARM);
7814
7815 if (thumb_p)
7816 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7817 else
7818 tbit = LOAD_BIT;
7819
7820 if ((inst.instruction & tbit) == 0)
7821 {
7822 inst.error = _("invalid pseudo operation");
7823 return TRUE;
7824 }
7825
7826 if (inst.reloc.exp.X_op != O_constant
7827 && inst.reloc.exp.X_op != O_symbol
7828 && inst.reloc.exp.X_op != O_big)
7829 {
7830 inst.error = _("constant expression expected");
7831 return TRUE;
7832 }
7833
7834 if (inst.reloc.exp.X_op == O_constant
7835 || inst.reloc.exp.X_op == O_big)
7836 {
7837 #if defined BFD_HOST_64_BIT
7838 bfd_int64_t v;
7839 #else
7840 offsetT v;
7841 #endif
7842 if (inst.reloc.exp.X_op == O_big)
7843 {
7844 LITTLENUM_TYPE w[X_PRECISION];
7845 LITTLENUM_TYPE * l;
7846
7847 if (inst.reloc.exp.X_add_number == -1)
7848 {
7849 gen_to_words (w, X_PRECISION, E_PRECISION);
7850 l = w;
7851 /* FIXME: Should we check words w[2..5] ? */
7852 }
7853 else
7854 l = generic_bignum;
7855
7856 #if defined BFD_HOST_64_BIT
7857 v =
7858 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
7859 << LITTLENUM_NUMBER_OF_BITS)
7860 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
7861 << LITTLENUM_NUMBER_OF_BITS)
7862 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
7863 << LITTLENUM_NUMBER_OF_BITS)
7864 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
7865 #else
7866 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
7867 | (l[0] & LITTLENUM_MASK);
7868 #endif
7869 }
7870 else
7871 v = inst.reloc.exp.X_add_number;
7872
7873 if (!inst.operands[i].issingle)
7874 {
7875 if (thumb_p)
7876 {
7877 /* This can be encoded only for a low register. */
7878 if ((v & ~0xFF) == 0 && (inst.operands[i].reg < 8))
7879 {
7880 /* This can be done with a mov(1) instruction. */
7881 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7882 inst.instruction |= v;
7883 return TRUE;
7884 }
7885
7886 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
7887 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
7888 {
7889 /* Check if on thumb2 it can be done with a mov.w, mvn or
7890 movw instruction. */
7891 unsigned int newimm;
7892 bfd_boolean isNegated;
7893
7894 newimm = encode_thumb32_immediate (v);
7895 if (newimm != (unsigned int) FAIL)
7896 isNegated = FALSE;
7897 else
7898 {
7899 newimm = encode_thumb32_immediate (~v);
7900 if (newimm != (unsigned int) FAIL)
7901 isNegated = TRUE;
7902 }
7903
7904 /* The number can be loaded with a mov.w or mvn
7905 instruction. */
7906 if (newimm != (unsigned int) FAIL
7907 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
7908 {
7909 inst.instruction = (0xf04f0000 /* MOV.W. */
7910 | (inst.operands[i].reg << 8));
7911 /* Change to MOVN. */
7912 inst.instruction |= (isNegated ? 0x200000 : 0);
7913 inst.instruction |= (newimm & 0x800) << 15;
7914 inst.instruction |= (newimm & 0x700) << 4;
7915 inst.instruction |= (newimm & 0x0ff);
7916 return TRUE;
7917 }
7918 /* The number can be loaded with a movw instruction. */
7919 else if ((v & ~0xFFFF) == 0
7920 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
7921 {
7922 int imm = v & 0xFFFF;
7923
7924 inst.instruction = 0xf2400000; /* MOVW. */
7925 inst.instruction |= (inst.operands[i].reg << 8);
7926 inst.instruction |= (imm & 0xf000) << 4;
7927 inst.instruction |= (imm & 0x0800) << 15;
7928 inst.instruction |= (imm & 0x0700) << 4;
7929 inst.instruction |= (imm & 0x00ff);
7930 return TRUE;
7931 }
7932 }
7933 }
7934 else if (arm_p)
7935 {
7936 int value = encode_arm_immediate (v);
7937
7938 if (value != FAIL)
7939 {
7940 /* This can be done with a mov instruction. */
7941 inst.instruction &= LITERAL_MASK;
7942 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
7943 inst.instruction |= value & 0xfff;
7944 return TRUE;
7945 }
7946
7947 value = encode_arm_immediate (~ v);
7948 if (value != FAIL)
7949 {
7950 /* This can be done with a mvn instruction. */
7951 inst.instruction &= LITERAL_MASK;
7952 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
7953 inst.instruction |= value & 0xfff;
7954 return TRUE;
7955 }
7956 }
7957 else if (t == CONST_VEC)
7958 {
7959 int op = 0;
7960 unsigned immbits = 0;
7961 unsigned immlo = inst.operands[1].imm;
7962 unsigned immhi = inst.operands[1].regisimm
7963 ? inst.operands[1].reg
7964 : inst.reloc.exp.X_unsigned
7965 ? 0
7966 : ((bfd_int64_t)((int) immlo)) >> 32;
7967 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
7968 &op, 64, NT_invtype);
7969
7970 if (cmode == FAIL)
7971 {
7972 neon_invert_size (&immlo, &immhi, 64);
7973 op = !op;
7974 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
7975 &op, 64, NT_invtype);
7976 }
7977
7978 if (cmode != FAIL)
7979 {
7980 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
7981 | (1 << 23)
7982 | (cmode << 8)
7983 | (op << 5)
7984 | (1 << 4);
7985
7986 /* Fill other bits in vmov encoding for both thumb and arm. */
7987 if (thumb_mode)
7988 inst.instruction |= (0x7U << 29) | (0xF << 24);
7989 else
7990 inst.instruction |= (0xFU << 28) | (0x1 << 25);
7991 neon_write_immbits (immbits);
7992 return TRUE;
7993 }
7994 }
7995 }
7996
7997 if (t == CONST_VEC)
7998 {
7999 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8000 if (inst.operands[i].issingle
8001 && is_quarter_float (inst.operands[1].imm)
8002 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8003 {
8004 inst.operands[1].imm =
8005 neon_qfloat_bits (v);
8006 do_vfp_nsyn_opcode ("fconsts");
8007 return TRUE;
8008 }
8009
8010 /* If our host does not support a 64-bit type then we cannot perform
8011 the following optimization. This mean that there will be a
8012 discrepancy between the output produced by an assembler built for
8013 a 32-bit-only host and the output produced from a 64-bit host, but
8014 this cannot be helped. */
8015 #if defined BFD_HOST_64_BIT
8016 else if (!inst.operands[1].issingle
8017 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8018 {
8019 if (is_double_a_single (v)
8020 && is_quarter_float (double_to_single (v)))
8021 {
8022 inst.operands[1].imm =
8023 neon_qfloat_bits (double_to_single (v));
8024 do_vfp_nsyn_opcode ("fconstd");
8025 return TRUE;
8026 }
8027 }
8028 #endif
8029 }
8030 }
8031
8032 if (add_to_lit_pool ((!inst.operands[i].isvec
8033 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
8034 return TRUE;
8035
8036 inst.operands[1].reg = REG_PC;
8037 inst.operands[1].isreg = 1;
8038 inst.operands[1].preind = 1;
8039 inst.reloc.pc_rel = 1;
8040 inst.reloc.type = (thumb_p
8041 ? BFD_RELOC_ARM_THUMB_OFFSET
8042 : (mode_3
8043 ? BFD_RELOC_ARM_HWLITERAL
8044 : BFD_RELOC_ARM_LITERAL));
8045 return FALSE;
8046 }
8047
8048 /* inst.operands[i] was set up by parse_address. Encode it into an
8049 ARM-format instruction. Reject all forms which cannot be encoded
8050 into a coprocessor load/store instruction. If wb_ok is false,
8051 reject use of writeback; if unind_ok is false, reject use of
8052 unindexed addressing. If reloc_override is not 0, use it instead
8053 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8054 (in which case it is preserved). */
8055
8056 static int
8057 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8058 {
8059 if (!inst.operands[i].isreg)
8060 {
8061 /* PR 18256 */
8062 if (! inst.operands[0].isvec)
8063 {
8064 inst.error = _("invalid co-processor operand");
8065 return FAIL;
8066 }
8067 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8068 return SUCCESS;
8069 }
8070
8071 inst.instruction |= inst.operands[i].reg << 16;
8072
8073 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8074
8075 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8076 {
8077 gas_assert (!inst.operands[i].writeback);
8078 if (!unind_ok)
8079 {
8080 inst.error = _("instruction does not support unindexed addressing");
8081 return FAIL;
8082 }
8083 inst.instruction |= inst.operands[i].imm;
8084 inst.instruction |= INDEX_UP;
8085 return SUCCESS;
8086 }
8087
8088 if (inst.operands[i].preind)
8089 inst.instruction |= PRE_INDEX;
8090
8091 if (inst.operands[i].writeback)
8092 {
8093 if (inst.operands[i].reg == REG_PC)
8094 {
8095 inst.error = _("pc may not be used with write-back");
8096 return FAIL;
8097 }
8098 if (!wb_ok)
8099 {
8100 inst.error = _("instruction does not support writeback");
8101 return FAIL;
8102 }
8103 inst.instruction |= WRITE_BACK;
8104 }
8105
8106 if (reloc_override)
8107 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
8108 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
8109 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
8110 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
8111 {
8112 if (thumb_mode)
8113 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8114 else
8115 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
8116 }
8117
8118 /* Prefer + for zero encoded value. */
8119 if (!inst.operands[i].negative)
8120 inst.instruction |= INDEX_UP;
8121
8122 return SUCCESS;
8123 }
8124
8125 /* Functions for instruction encoding, sorted by sub-architecture.
8126 First some generics; their names are taken from the conventional
8127 bit positions for register arguments in ARM format instructions. */
8128
8129 static void
8130 do_noargs (void)
8131 {
8132 }
8133
8134 static void
8135 do_rd (void)
8136 {
8137 inst.instruction |= inst.operands[0].reg << 12;
8138 }
8139
8140 static void
8141 do_rd_rm (void)
8142 {
8143 inst.instruction |= inst.operands[0].reg << 12;
8144 inst.instruction |= inst.operands[1].reg;
8145 }
8146
8147 static void
8148 do_rm_rn (void)
8149 {
8150 inst.instruction |= inst.operands[0].reg;
8151 inst.instruction |= inst.operands[1].reg << 16;
8152 }
8153
8154 static void
8155 do_rd_rn (void)
8156 {
8157 inst.instruction |= inst.operands[0].reg << 12;
8158 inst.instruction |= inst.operands[1].reg << 16;
8159 }
8160
8161 static void
8162 do_rn_rd (void)
8163 {
8164 inst.instruction |= inst.operands[0].reg << 16;
8165 inst.instruction |= inst.operands[1].reg << 12;
8166 }
8167
8168 static void
8169 do_tt (void)
8170 {
8171 inst.instruction |= inst.operands[0].reg << 8;
8172 inst.instruction |= inst.operands[1].reg << 16;
8173 }
8174
8175 static bfd_boolean
8176 check_obsolete (const arm_feature_set *feature, const char *msg)
8177 {
8178 if (ARM_CPU_IS_ANY (cpu_variant))
8179 {
8180 as_tsktsk ("%s", msg);
8181 return TRUE;
8182 }
8183 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8184 {
8185 as_bad ("%s", msg);
8186 return TRUE;
8187 }
8188
8189 return FALSE;
8190 }
8191
8192 static void
8193 do_rd_rm_rn (void)
8194 {
8195 unsigned Rn = inst.operands[2].reg;
8196 /* Enforce restrictions on SWP instruction. */
8197 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8198 {
8199 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8200 _("Rn must not overlap other operands"));
8201
8202 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8203 */
8204 if (!check_obsolete (&arm_ext_v8,
8205 _("swp{b} use is obsoleted for ARMv8 and later"))
8206 && warn_on_deprecated
8207 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8208 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8209 }
8210
8211 inst.instruction |= inst.operands[0].reg << 12;
8212 inst.instruction |= inst.operands[1].reg;
8213 inst.instruction |= Rn << 16;
8214 }
8215
8216 static void
8217 do_rd_rn_rm (void)
8218 {
8219 inst.instruction |= inst.operands[0].reg << 12;
8220 inst.instruction |= inst.operands[1].reg << 16;
8221 inst.instruction |= inst.operands[2].reg;
8222 }
8223
8224 static void
8225 do_rm_rd_rn (void)
8226 {
8227 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8228 constraint (((inst.reloc.exp.X_op != O_constant
8229 && inst.reloc.exp.X_op != O_illegal)
8230 || inst.reloc.exp.X_add_number != 0),
8231 BAD_ADDR_MODE);
8232 inst.instruction |= inst.operands[0].reg;
8233 inst.instruction |= inst.operands[1].reg << 12;
8234 inst.instruction |= inst.operands[2].reg << 16;
8235 }
8236
8237 static void
8238 do_imm0 (void)
8239 {
8240 inst.instruction |= inst.operands[0].imm;
8241 }
8242
8243 static void
8244 do_rd_cpaddr (void)
8245 {
8246 inst.instruction |= inst.operands[0].reg << 12;
8247 encode_arm_cp_address (1, TRUE, TRUE, 0);
8248 }
8249
8250 /* ARM instructions, in alphabetical order by function name (except
8251 that wrapper functions appear immediately after the function they
8252 wrap). */
8253
8254 /* This is a pseudo-op of the form "adr rd, label" to be converted
8255 into a relative address of the form "add rd, pc, #label-.-8". */
8256
8257 static void
8258 do_adr (void)
8259 {
8260 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8261
8262 /* Frag hacking will turn this into a sub instruction if the offset turns
8263 out to be negative. */
8264 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8265 inst.reloc.pc_rel = 1;
8266 inst.reloc.exp.X_add_number -= 8;
8267 }
8268
8269 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8270 into a relative address of the form:
8271 add rd, pc, #low(label-.-8)"
8272 add rd, rd, #high(label-.-8)" */
8273
8274 static void
8275 do_adrl (void)
8276 {
8277 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8278
8279 /* Frag hacking will turn this into a sub instruction if the offset turns
8280 out to be negative. */
8281 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8282 inst.reloc.pc_rel = 1;
8283 inst.size = INSN_SIZE * 2;
8284 inst.reloc.exp.X_add_number -= 8;
8285 }
8286
8287 static void
8288 do_arit (void)
8289 {
8290 if (!inst.operands[1].present)
8291 inst.operands[1].reg = inst.operands[0].reg;
8292 inst.instruction |= inst.operands[0].reg << 12;
8293 inst.instruction |= inst.operands[1].reg << 16;
8294 encode_arm_shifter_operand (2);
8295 }
8296
8297 static void
8298 do_barrier (void)
8299 {
8300 if (inst.operands[0].present)
8301 inst.instruction |= inst.operands[0].imm;
8302 else
8303 inst.instruction |= 0xf;
8304 }
8305
8306 static void
8307 do_bfc (void)
8308 {
8309 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8310 constraint (msb > 32, _("bit-field extends past end of register"));
8311 /* The instruction encoding stores the LSB and MSB,
8312 not the LSB and width. */
8313 inst.instruction |= inst.operands[0].reg << 12;
8314 inst.instruction |= inst.operands[1].imm << 7;
8315 inst.instruction |= (msb - 1) << 16;
8316 }
8317
8318 static void
8319 do_bfi (void)
8320 {
8321 unsigned int msb;
8322
8323 /* #0 in second position is alternative syntax for bfc, which is
8324 the same instruction but with REG_PC in the Rm field. */
8325 if (!inst.operands[1].isreg)
8326 inst.operands[1].reg = REG_PC;
8327
8328 msb = inst.operands[2].imm + inst.operands[3].imm;
8329 constraint (msb > 32, _("bit-field extends past end of register"));
8330 /* The instruction encoding stores the LSB and MSB,
8331 not the LSB and width. */
8332 inst.instruction |= inst.operands[0].reg << 12;
8333 inst.instruction |= inst.operands[1].reg;
8334 inst.instruction |= inst.operands[2].imm << 7;
8335 inst.instruction |= (msb - 1) << 16;
8336 }
8337
8338 static void
8339 do_bfx (void)
8340 {
8341 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8342 _("bit-field extends past end of register"));
8343 inst.instruction |= inst.operands[0].reg << 12;
8344 inst.instruction |= inst.operands[1].reg;
8345 inst.instruction |= inst.operands[2].imm << 7;
8346 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8347 }
8348
8349 /* ARM V5 breakpoint instruction (argument parse)
8350 BKPT <16 bit unsigned immediate>
8351 Instruction is not conditional.
8352 The bit pattern given in insns[] has the COND_ALWAYS condition,
8353 and it is an error if the caller tried to override that. */
8354
8355 static void
8356 do_bkpt (void)
8357 {
8358 /* Top 12 of 16 bits to bits 19:8. */
8359 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8360
8361 /* Bottom 4 of 16 bits to bits 3:0. */
8362 inst.instruction |= inst.operands[0].imm & 0xf;
8363 }
8364
8365 static void
8366 encode_branch (int default_reloc)
8367 {
8368 if (inst.operands[0].hasreloc)
8369 {
8370 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8371 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8372 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8373 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8374 ? BFD_RELOC_ARM_PLT32
8375 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8376 }
8377 else
8378 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
8379 inst.reloc.pc_rel = 1;
8380 }
8381
8382 static void
8383 do_branch (void)
8384 {
8385 #ifdef OBJ_ELF
8386 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8387 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8388 else
8389 #endif
8390 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8391 }
8392
8393 static void
8394 do_bl (void)
8395 {
8396 #ifdef OBJ_ELF
8397 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8398 {
8399 if (inst.cond == COND_ALWAYS)
8400 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8401 else
8402 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8403 }
8404 else
8405 #endif
8406 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8407 }
8408
8409 /* ARM V5 branch-link-exchange instruction (argument parse)
8410 BLX <target_addr> ie BLX(1)
8411 BLX{<condition>} <Rm> ie BLX(2)
8412 Unfortunately, there are two different opcodes for this mnemonic.
8413 So, the insns[].value is not used, and the code here zaps values
8414 into inst.instruction.
8415 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8416
8417 static void
8418 do_blx (void)
8419 {
8420 if (inst.operands[0].isreg)
8421 {
8422 /* Arg is a register; the opcode provided by insns[] is correct.
8423 It is not illegal to do "blx pc", just useless. */
8424 if (inst.operands[0].reg == REG_PC)
8425 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8426
8427 inst.instruction |= inst.operands[0].reg;
8428 }
8429 else
8430 {
8431 /* Arg is an address; this instruction cannot be executed
8432 conditionally, and the opcode must be adjusted.
8433 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8434 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8435 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8436 inst.instruction = 0xfa000000;
8437 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8438 }
8439 }
8440
8441 static void
8442 do_bx (void)
8443 {
8444 bfd_boolean want_reloc;
8445
8446 if (inst.operands[0].reg == REG_PC)
8447 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8448
8449 inst.instruction |= inst.operands[0].reg;
8450 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8451 it is for ARMv4t or earlier. */
8452 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8453 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
8454 want_reloc = TRUE;
8455
8456 #ifdef OBJ_ELF
8457 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8458 #endif
8459 want_reloc = FALSE;
8460
8461 if (want_reloc)
8462 inst.reloc.type = BFD_RELOC_ARM_V4BX;
8463 }
8464
8465
8466 /* ARM v5TEJ. Jump to Jazelle code. */
8467
8468 static void
8469 do_bxj (void)
8470 {
8471 if (inst.operands[0].reg == REG_PC)
8472 as_tsktsk (_("use of r15 in bxj is not really useful"));
8473
8474 inst.instruction |= inst.operands[0].reg;
8475 }
8476
8477 /* Co-processor data operation:
8478 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8479 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8480 static void
8481 do_cdp (void)
8482 {
8483 inst.instruction |= inst.operands[0].reg << 8;
8484 inst.instruction |= inst.operands[1].imm << 20;
8485 inst.instruction |= inst.operands[2].reg << 12;
8486 inst.instruction |= inst.operands[3].reg << 16;
8487 inst.instruction |= inst.operands[4].reg;
8488 inst.instruction |= inst.operands[5].imm << 5;
8489 }
8490
8491 static void
8492 do_cmp (void)
8493 {
8494 inst.instruction |= inst.operands[0].reg << 16;
8495 encode_arm_shifter_operand (1);
8496 }
8497
8498 /* Transfer between coprocessor and ARM registers.
8499 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8500 MRC2
8501 MCR{cond}
8502 MCR2
8503
8504 No special properties. */
8505
8506 struct deprecated_coproc_regs_s
8507 {
8508 unsigned cp;
8509 int opc1;
8510 unsigned crn;
8511 unsigned crm;
8512 int opc2;
8513 arm_feature_set deprecated;
8514 arm_feature_set obsoleted;
8515 const char *dep_msg;
8516 const char *obs_msg;
8517 };
8518
8519 #define DEPR_ACCESS_V8 \
8520 N_("This coprocessor register access is deprecated in ARMv8")
8521
8522 /* Table of all deprecated coprocessor registers. */
8523 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8524 {
8525 {15, 0, 7, 10, 5, /* CP15DMB. */
8526 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8527 DEPR_ACCESS_V8, NULL},
8528 {15, 0, 7, 10, 4, /* CP15DSB. */
8529 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8530 DEPR_ACCESS_V8, NULL},
8531 {15, 0, 7, 5, 4, /* CP15ISB. */
8532 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8533 DEPR_ACCESS_V8, NULL},
8534 {14, 6, 1, 0, 0, /* TEEHBR. */
8535 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8536 DEPR_ACCESS_V8, NULL},
8537 {14, 6, 0, 0, 0, /* TEECR. */
8538 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8539 DEPR_ACCESS_V8, NULL},
8540 };
8541
8542 #undef DEPR_ACCESS_V8
8543
8544 static const size_t deprecated_coproc_reg_count =
8545 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8546
8547 static void
8548 do_co_reg (void)
8549 {
8550 unsigned Rd;
8551 size_t i;
8552
8553 Rd = inst.operands[2].reg;
8554 if (thumb_mode)
8555 {
8556 if (inst.instruction == 0xee000010
8557 || inst.instruction == 0xfe000010)
8558 /* MCR, MCR2 */
8559 reject_bad_reg (Rd);
8560 else
8561 /* MRC, MRC2 */
8562 constraint (Rd == REG_SP, BAD_SP);
8563 }
8564 else
8565 {
8566 /* MCR */
8567 if (inst.instruction == 0xe000010)
8568 constraint (Rd == REG_PC, BAD_PC);
8569 }
8570
8571 for (i = 0; i < deprecated_coproc_reg_count; ++i)
8572 {
8573 const struct deprecated_coproc_regs_s *r =
8574 deprecated_coproc_regs + i;
8575
8576 if (inst.operands[0].reg == r->cp
8577 && inst.operands[1].imm == r->opc1
8578 && inst.operands[3].reg == r->crn
8579 && inst.operands[4].reg == r->crm
8580 && inst.operands[5].imm == r->opc2)
8581 {
8582 if (! ARM_CPU_IS_ANY (cpu_variant)
8583 && warn_on_deprecated
8584 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8585 as_tsktsk ("%s", r->dep_msg);
8586 }
8587 }
8588
8589 inst.instruction |= inst.operands[0].reg << 8;
8590 inst.instruction |= inst.operands[1].imm << 21;
8591 inst.instruction |= Rd << 12;
8592 inst.instruction |= inst.operands[3].reg << 16;
8593 inst.instruction |= inst.operands[4].reg;
8594 inst.instruction |= inst.operands[5].imm << 5;
8595 }
8596
8597 /* Transfer between coprocessor register and pair of ARM registers.
8598 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8599 MCRR2
8600 MRRC{cond}
8601 MRRC2
8602
8603 Two XScale instructions are special cases of these:
8604
8605 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8606 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8607
8608 Result unpredictable if Rd or Rn is R15. */
8609
8610 static void
8611 do_co_reg2c (void)
8612 {
8613 unsigned Rd, Rn;
8614
8615 Rd = inst.operands[2].reg;
8616 Rn = inst.operands[3].reg;
8617
8618 if (thumb_mode)
8619 {
8620 reject_bad_reg (Rd);
8621 reject_bad_reg (Rn);
8622 }
8623 else
8624 {
8625 constraint (Rd == REG_PC, BAD_PC);
8626 constraint (Rn == REG_PC, BAD_PC);
8627 }
8628
8629 inst.instruction |= inst.operands[0].reg << 8;
8630 inst.instruction |= inst.operands[1].imm << 4;
8631 inst.instruction |= Rd << 12;
8632 inst.instruction |= Rn << 16;
8633 inst.instruction |= inst.operands[4].reg;
8634 }
8635
8636 static void
8637 do_cpsi (void)
8638 {
8639 inst.instruction |= inst.operands[0].imm << 6;
8640 if (inst.operands[1].present)
8641 {
8642 inst.instruction |= CPSI_MMOD;
8643 inst.instruction |= inst.operands[1].imm;
8644 }
8645 }
8646
8647 static void
8648 do_dbg (void)
8649 {
8650 inst.instruction |= inst.operands[0].imm;
8651 }
8652
8653 static void
8654 do_div (void)
8655 {
8656 unsigned Rd, Rn, Rm;
8657
8658 Rd = inst.operands[0].reg;
8659 Rn = (inst.operands[1].present
8660 ? inst.operands[1].reg : Rd);
8661 Rm = inst.operands[2].reg;
8662
8663 constraint ((Rd == REG_PC), BAD_PC);
8664 constraint ((Rn == REG_PC), BAD_PC);
8665 constraint ((Rm == REG_PC), BAD_PC);
8666
8667 inst.instruction |= Rd << 16;
8668 inst.instruction |= Rn << 0;
8669 inst.instruction |= Rm << 8;
8670 }
8671
8672 static void
8673 do_it (void)
8674 {
8675 /* There is no IT instruction in ARM mode. We
8676 process it to do the validation as if in
8677 thumb mode, just in case the code gets
8678 assembled for thumb using the unified syntax. */
8679
8680 inst.size = 0;
8681 if (unified_syntax)
8682 {
8683 set_it_insn_type (IT_INSN);
8684 now_it.mask = (inst.instruction & 0xf) | 0x10;
8685 now_it.cc = inst.operands[0].imm;
8686 }
8687 }
8688
8689 /* If there is only one register in the register list,
8690 then return its register number. Otherwise return -1. */
8691 static int
8692 only_one_reg_in_list (int range)
8693 {
8694 int i = ffs (range) - 1;
8695 return (i > 15 || range != (1 << i)) ? -1 : i;
8696 }
8697
8698 static void
8699 encode_ldmstm(int from_push_pop_mnem)
8700 {
8701 int base_reg = inst.operands[0].reg;
8702 int range = inst.operands[1].imm;
8703 int one_reg;
8704
8705 inst.instruction |= base_reg << 16;
8706 inst.instruction |= range;
8707
8708 if (inst.operands[1].writeback)
8709 inst.instruction |= LDM_TYPE_2_OR_3;
8710
8711 if (inst.operands[0].writeback)
8712 {
8713 inst.instruction |= WRITE_BACK;
8714 /* Check for unpredictable uses of writeback. */
8715 if (inst.instruction & LOAD_BIT)
8716 {
8717 /* Not allowed in LDM type 2. */
8718 if ((inst.instruction & LDM_TYPE_2_OR_3)
8719 && ((range & (1 << REG_PC)) == 0))
8720 as_warn (_("writeback of base register is UNPREDICTABLE"));
8721 /* Only allowed if base reg not in list for other types. */
8722 else if (range & (1 << base_reg))
8723 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8724 }
8725 else /* STM. */
8726 {
8727 /* Not allowed for type 2. */
8728 if (inst.instruction & LDM_TYPE_2_OR_3)
8729 as_warn (_("writeback of base register is UNPREDICTABLE"));
8730 /* Only allowed if base reg not in list, or first in list. */
8731 else if ((range & (1 << base_reg))
8732 && (range & ((1 << base_reg) - 1)))
8733 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8734 }
8735 }
8736
8737 /* If PUSH/POP has only one register, then use the A2 encoding. */
8738 one_reg = only_one_reg_in_list (range);
8739 if (from_push_pop_mnem && one_reg >= 0)
8740 {
8741 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
8742
8743 inst.instruction &= A_COND_MASK;
8744 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
8745 inst.instruction |= one_reg << 12;
8746 }
8747 }
8748
8749 static void
8750 do_ldmstm (void)
8751 {
8752 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
8753 }
8754
8755 /* ARMv5TE load-consecutive (argument parse)
8756 Mode is like LDRH.
8757
8758 LDRccD R, mode
8759 STRccD R, mode. */
8760
8761 static void
8762 do_ldrd (void)
8763 {
8764 constraint (inst.operands[0].reg % 2 != 0,
8765 _("first transfer register must be even"));
8766 constraint (inst.operands[1].present
8767 && inst.operands[1].reg != inst.operands[0].reg + 1,
8768 _("can only transfer two consecutive registers"));
8769 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8770 constraint (!inst.operands[2].isreg, _("'[' expected"));
8771
8772 if (!inst.operands[1].present)
8773 inst.operands[1].reg = inst.operands[0].reg + 1;
8774
8775 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8776 register and the first register written; we have to diagnose
8777 overlap between the base and the second register written here. */
8778
8779 if (inst.operands[2].reg == inst.operands[1].reg
8780 && (inst.operands[2].writeback || inst.operands[2].postind))
8781 as_warn (_("base register written back, and overlaps "
8782 "second transfer register"));
8783
8784 if (!(inst.instruction & V4_STR_BIT))
8785 {
8786 /* For an index-register load, the index register must not overlap the
8787 destination (even if not write-back). */
8788 if (inst.operands[2].immisreg
8789 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8790 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8791 as_warn (_("index register overlaps transfer register"));
8792 }
8793 inst.instruction |= inst.operands[0].reg << 12;
8794 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8795 }
8796
8797 static void
8798 do_ldrex (void)
8799 {
8800 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8801 || inst.operands[1].postind || inst.operands[1].writeback
8802 || inst.operands[1].immisreg || inst.operands[1].shifted
8803 || inst.operands[1].negative
8804 /* This can arise if the programmer has written
8805 strex rN, rM, foo
8806 or if they have mistakenly used a register name as the last
8807 operand, eg:
8808 strex rN, rM, rX
8809 It is very difficult to distinguish between these two cases
8810 because "rX" might actually be a label. ie the register
8811 name has been occluded by a symbol of the same name. So we
8812 just generate a general 'bad addressing mode' type error
8813 message and leave it up to the programmer to discover the
8814 true cause and fix their mistake. */
8815 || (inst.operands[1].reg == REG_PC),
8816 BAD_ADDR_MODE);
8817
8818 constraint (inst.reloc.exp.X_op != O_constant
8819 || inst.reloc.exp.X_add_number != 0,
8820 _("offset must be zero in ARM encoding"));
8821
8822 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
8823
8824 inst.instruction |= inst.operands[0].reg << 12;
8825 inst.instruction |= inst.operands[1].reg << 16;
8826 inst.reloc.type = BFD_RELOC_UNUSED;
8827 }
8828
8829 static void
8830 do_ldrexd (void)
8831 {
8832 constraint (inst.operands[0].reg % 2 != 0,
8833 _("even register required"));
8834 constraint (inst.operands[1].present
8835 && inst.operands[1].reg != inst.operands[0].reg + 1,
8836 _("can only load two consecutive registers"));
8837 /* If op 1 were present and equal to PC, this function wouldn't
8838 have been called in the first place. */
8839 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8840
8841 inst.instruction |= inst.operands[0].reg << 12;
8842 inst.instruction |= inst.operands[2].reg << 16;
8843 }
8844
8845 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8846 which is not a multiple of four is UNPREDICTABLE. */
8847 static void
8848 check_ldr_r15_aligned (void)
8849 {
8850 constraint (!(inst.operands[1].immisreg)
8851 && (inst.operands[0].reg == REG_PC
8852 && inst.operands[1].reg == REG_PC
8853 && (inst.reloc.exp.X_add_number & 0x3)),
8854 _("ldr to register 15 must be 4-byte alligned"));
8855 }
8856
8857 static void
8858 do_ldst (void)
8859 {
8860 inst.instruction |= inst.operands[0].reg << 12;
8861 if (!inst.operands[1].isreg)
8862 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
8863 return;
8864 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
8865 check_ldr_r15_aligned ();
8866 }
8867
8868 static void
8869 do_ldstt (void)
8870 {
8871 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8872 reject [Rn,...]. */
8873 if (inst.operands[1].preind)
8874 {
8875 constraint (inst.reloc.exp.X_op != O_constant
8876 || inst.reloc.exp.X_add_number != 0,
8877 _("this instruction requires a post-indexed address"));
8878
8879 inst.operands[1].preind = 0;
8880 inst.operands[1].postind = 1;
8881 inst.operands[1].writeback = 1;
8882 }
8883 inst.instruction |= inst.operands[0].reg << 12;
8884 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
8885 }
8886
8887 /* Halfword and signed-byte load/store operations. */
8888
8889 static void
8890 do_ldstv4 (void)
8891 {
8892 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8893 inst.instruction |= inst.operands[0].reg << 12;
8894 if (!inst.operands[1].isreg)
8895 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
8896 return;
8897 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
8898 }
8899
8900 static void
8901 do_ldsttv4 (void)
8902 {
8903 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8904 reject [Rn,...]. */
8905 if (inst.operands[1].preind)
8906 {
8907 constraint (inst.reloc.exp.X_op != O_constant
8908 || inst.reloc.exp.X_add_number != 0,
8909 _("this instruction requires a post-indexed address"));
8910
8911 inst.operands[1].preind = 0;
8912 inst.operands[1].postind = 1;
8913 inst.operands[1].writeback = 1;
8914 }
8915 inst.instruction |= inst.operands[0].reg << 12;
8916 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
8917 }
8918
8919 /* Co-processor register load/store.
8920 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
8921 static void
8922 do_lstc (void)
8923 {
8924 inst.instruction |= inst.operands[0].reg << 8;
8925 inst.instruction |= inst.operands[1].reg << 12;
8926 encode_arm_cp_address (2, TRUE, TRUE, 0);
8927 }
8928
8929 static void
8930 do_mlas (void)
8931 {
8932 /* This restriction does not apply to mls (nor to mla in v6 or later). */
8933 if (inst.operands[0].reg == inst.operands[1].reg
8934 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
8935 && !(inst.instruction & 0x00400000))
8936 as_tsktsk (_("Rd and Rm should be different in mla"));
8937
8938 inst.instruction |= inst.operands[0].reg << 16;
8939 inst.instruction |= inst.operands[1].reg;
8940 inst.instruction |= inst.operands[2].reg << 8;
8941 inst.instruction |= inst.operands[3].reg << 12;
8942 }
8943
8944 static void
8945 do_mov (void)
8946 {
8947 inst.instruction |= inst.operands[0].reg << 12;
8948 encode_arm_shifter_operand (1);
8949 }
8950
8951 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
8952 static void
8953 do_mov16 (void)
8954 {
8955 bfd_vma imm;
8956 bfd_boolean top;
8957
8958 top = (inst.instruction & 0x00400000) != 0;
8959 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
8960 _(":lower16: not allowed this instruction"));
8961 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
8962 _(":upper16: not allowed instruction"));
8963 inst.instruction |= inst.operands[0].reg << 12;
8964 if (inst.reloc.type == BFD_RELOC_UNUSED)
8965 {
8966 imm = inst.reloc.exp.X_add_number;
8967 /* The value is in two pieces: 0:11, 16:19. */
8968 inst.instruction |= (imm & 0x00000fff);
8969 inst.instruction |= (imm & 0x0000f000) << 4;
8970 }
8971 }
8972
8973 static int
8974 do_vfp_nsyn_mrs (void)
8975 {
8976 if (inst.operands[0].isvec)
8977 {
8978 if (inst.operands[1].reg != 1)
8979 first_error (_("operand 1 must be FPSCR"));
8980 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
8981 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
8982 do_vfp_nsyn_opcode ("fmstat");
8983 }
8984 else if (inst.operands[1].isvec)
8985 do_vfp_nsyn_opcode ("fmrx");
8986 else
8987 return FAIL;
8988
8989 return SUCCESS;
8990 }
8991
8992 static int
8993 do_vfp_nsyn_msr (void)
8994 {
8995 if (inst.operands[0].isvec)
8996 do_vfp_nsyn_opcode ("fmxr");
8997 else
8998 return FAIL;
8999
9000 return SUCCESS;
9001 }
9002
9003 static void
9004 do_vmrs (void)
9005 {
9006 unsigned Rt = inst.operands[0].reg;
9007
9008 if (thumb_mode && Rt == REG_SP)
9009 {
9010 inst.error = BAD_SP;
9011 return;
9012 }
9013
9014 /* APSR_ sets isvec. All other refs to PC are illegal. */
9015 if (!inst.operands[0].isvec && Rt == REG_PC)
9016 {
9017 inst.error = BAD_PC;
9018 return;
9019 }
9020
9021 /* If we get through parsing the register name, we just insert the number
9022 generated into the instruction without further validation. */
9023 inst.instruction |= (inst.operands[1].reg << 16);
9024 inst.instruction |= (Rt << 12);
9025 }
9026
9027 static void
9028 do_vmsr (void)
9029 {
9030 unsigned Rt = inst.operands[1].reg;
9031
9032 if (thumb_mode)
9033 reject_bad_reg (Rt);
9034 else if (Rt == REG_PC)
9035 {
9036 inst.error = BAD_PC;
9037 return;
9038 }
9039
9040 /* If we get through parsing the register name, we just insert the number
9041 generated into the instruction without further validation. */
9042 inst.instruction |= (inst.operands[0].reg << 16);
9043 inst.instruction |= (Rt << 12);
9044 }
9045
9046 static void
9047 do_mrs (void)
9048 {
9049 unsigned br;
9050
9051 if (do_vfp_nsyn_mrs () == SUCCESS)
9052 return;
9053
9054 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9055 inst.instruction |= inst.operands[0].reg << 12;
9056
9057 if (inst.operands[1].isreg)
9058 {
9059 br = inst.operands[1].reg;
9060 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
9061 as_bad (_("bad register for mrs"));
9062 }
9063 else
9064 {
9065 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9066 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9067 != (PSR_c|PSR_f),
9068 _("'APSR', 'CPSR' or 'SPSR' expected"));
9069 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9070 }
9071
9072 inst.instruction |= br;
9073 }
9074
9075 /* Two possible forms:
9076 "{C|S}PSR_<field>, Rm",
9077 "{C|S}PSR_f, #expression". */
9078
9079 static void
9080 do_msr (void)
9081 {
9082 if (do_vfp_nsyn_msr () == SUCCESS)
9083 return;
9084
9085 inst.instruction |= inst.operands[0].imm;
9086 if (inst.operands[1].isreg)
9087 inst.instruction |= inst.operands[1].reg;
9088 else
9089 {
9090 inst.instruction |= INST_IMMEDIATE;
9091 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
9092 inst.reloc.pc_rel = 0;
9093 }
9094 }
9095
9096 static void
9097 do_mul (void)
9098 {
9099 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9100
9101 if (!inst.operands[2].present)
9102 inst.operands[2].reg = inst.operands[0].reg;
9103 inst.instruction |= inst.operands[0].reg << 16;
9104 inst.instruction |= inst.operands[1].reg;
9105 inst.instruction |= inst.operands[2].reg << 8;
9106
9107 if (inst.operands[0].reg == inst.operands[1].reg
9108 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9109 as_tsktsk (_("Rd and Rm should be different in mul"));
9110 }
9111
9112 /* Long Multiply Parser
9113 UMULL RdLo, RdHi, Rm, Rs
9114 SMULL RdLo, RdHi, Rm, Rs
9115 UMLAL RdLo, RdHi, Rm, Rs
9116 SMLAL RdLo, RdHi, Rm, Rs. */
9117
9118 static void
9119 do_mull (void)
9120 {
9121 inst.instruction |= inst.operands[0].reg << 12;
9122 inst.instruction |= inst.operands[1].reg << 16;
9123 inst.instruction |= inst.operands[2].reg;
9124 inst.instruction |= inst.operands[3].reg << 8;
9125
9126 /* rdhi and rdlo must be different. */
9127 if (inst.operands[0].reg == inst.operands[1].reg)
9128 as_tsktsk (_("rdhi and rdlo must be different"));
9129
9130 /* rdhi, rdlo and rm must all be different before armv6. */
9131 if ((inst.operands[0].reg == inst.operands[2].reg
9132 || inst.operands[1].reg == inst.operands[2].reg)
9133 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9134 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9135 }
9136
9137 static void
9138 do_nop (void)
9139 {
9140 if (inst.operands[0].present
9141 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9142 {
9143 /* Architectural NOP hints are CPSR sets with no bits selected. */
9144 inst.instruction &= 0xf0000000;
9145 inst.instruction |= 0x0320f000;
9146 if (inst.operands[0].present)
9147 inst.instruction |= inst.operands[0].imm;
9148 }
9149 }
9150
9151 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9152 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9153 Condition defaults to COND_ALWAYS.
9154 Error if Rd, Rn or Rm are R15. */
9155
9156 static void
9157 do_pkhbt (void)
9158 {
9159 inst.instruction |= inst.operands[0].reg << 12;
9160 inst.instruction |= inst.operands[1].reg << 16;
9161 inst.instruction |= inst.operands[2].reg;
9162 if (inst.operands[3].present)
9163 encode_arm_shift (3);
9164 }
9165
9166 /* ARM V6 PKHTB (Argument Parse). */
9167
9168 static void
9169 do_pkhtb (void)
9170 {
9171 if (!inst.operands[3].present)
9172 {
9173 /* If the shift specifier is omitted, turn the instruction
9174 into pkhbt rd, rm, rn. */
9175 inst.instruction &= 0xfff00010;
9176 inst.instruction |= inst.operands[0].reg << 12;
9177 inst.instruction |= inst.operands[1].reg;
9178 inst.instruction |= inst.operands[2].reg << 16;
9179 }
9180 else
9181 {
9182 inst.instruction |= inst.operands[0].reg << 12;
9183 inst.instruction |= inst.operands[1].reg << 16;
9184 inst.instruction |= inst.operands[2].reg;
9185 encode_arm_shift (3);
9186 }
9187 }
9188
9189 /* ARMv5TE: Preload-Cache
9190 MP Extensions: Preload for write
9191
9192 PLD(W) <addr_mode>
9193
9194 Syntactically, like LDR with B=1, W=0, L=1. */
9195
9196 static void
9197 do_pld (void)
9198 {
9199 constraint (!inst.operands[0].isreg,
9200 _("'[' expected after PLD mnemonic"));
9201 constraint (inst.operands[0].postind,
9202 _("post-indexed expression used in preload instruction"));
9203 constraint (inst.operands[0].writeback,
9204 _("writeback used in preload instruction"));
9205 constraint (!inst.operands[0].preind,
9206 _("unindexed addressing used in preload instruction"));
9207 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9208 }
9209
9210 /* ARMv7: PLI <addr_mode> */
9211 static void
9212 do_pli (void)
9213 {
9214 constraint (!inst.operands[0].isreg,
9215 _("'[' expected after PLI mnemonic"));
9216 constraint (inst.operands[0].postind,
9217 _("post-indexed expression used in preload instruction"));
9218 constraint (inst.operands[0].writeback,
9219 _("writeback used in preload instruction"));
9220 constraint (!inst.operands[0].preind,
9221 _("unindexed addressing used in preload instruction"));
9222 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9223 inst.instruction &= ~PRE_INDEX;
9224 }
9225
9226 static void
9227 do_push_pop (void)
9228 {
9229 constraint (inst.operands[0].writeback,
9230 _("push/pop do not support {reglist}^"));
9231 inst.operands[1] = inst.operands[0];
9232 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9233 inst.operands[0].isreg = 1;
9234 inst.operands[0].writeback = 1;
9235 inst.operands[0].reg = REG_SP;
9236 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9237 }
9238
9239 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9240 word at the specified address and the following word
9241 respectively.
9242 Unconditionally executed.
9243 Error if Rn is R15. */
9244
9245 static void
9246 do_rfe (void)
9247 {
9248 inst.instruction |= inst.operands[0].reg << 16;
9249 if (inst.operands[0].writeback)
9250 inst.instruction |= WRITE_BACK;
9251 }
9252
9253 /* ARM V6 ssat (argument parse). */
9254
9255 static void
9256 do_ssat (void)
9257 {
9258 inst.instruction |= inst.operands[0].reg << 12;
9259 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9260 inst.instruction |= inst.operands[2].reg;
9261
9262 if (inst.operands[3].present)
9263 encode_arm_shift (3);
9264 }
9265
9266 /* ARM V6 usat (argument parse). */
9267
9268 static void
9269 do_usat (void)
9270 {
9271 inst.instruction |= inst.operands[0].reg << 12;
9272 inst.instruction |= inst.operands[1].imm << 16;
9273 inst.instruction |= inst.operands[2].reg;
9274
9275 if (inst.operands[3].present)
9276 encode_arm_shift (3);
9277 }
9278
9279 /* ARM V6 ssat16 (argument parse). */
9280
9281 static void
9282 do_ssat16 (void)
9283 {
9284 inst.instruction |= inst.operands[0].reg << 12;
9285 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9286 inst.instruction |= inst.operands[2].reg;
9287 }
9288
9289 static void
9290 do_usat16 (void)
9291 {
9292 inst.instruction |= inst.operands[0].reg << 12;
9293 inst.instruction |= inst.operands[1].imm << 16;
9294 inst.instruction |= inst.operands[2].reg;
9295 }
9296
9297 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9298 preserving the other bits.
9299
9300 setend <endian_specifier>, where <endian_specifier> is either
9301 BE or LE. */
9302
9303 static void
9304 do_setend (void)
9305 {
9306 if (warn_on_deprecated
9307 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9308 as_tsktsk (_("setend use is deprecated for ARMv8"));
9309
9310 if (inst.operands[0].imm)
9311 inst.instruction |= 0x200;
9312 }
9313
9314 static void
9315 do_shift (void)
9316 {
9317 unsigned int Rm = (inst.operands[1].present
9318 ? inst.operands[1].reg
9319 : inst.operands[0].reg);
9320
9321 inst.instruction |= inst.operands[0].reg << 12;
9322 inst.instruction |= Rm;
9323 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9324 {
9325 inst.instruction |= inst.operands[2].reg << 8;
9326 inst.instruction |= SHIFT_BY_REG;
9327 /* PR 12854: Error on extraneous shifts. */
9328 constraint (inst.operands[2].shifted,
9329 _("extraneous shift as part of operand to shift insn"));
9330 }
9331 else
9332 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
9333 }
9334
9335 static void
9336 do_smc (void)
9337 {
9338 inst.reloc.type = BFD_RELOC_ARM_SMC;
9339 inst.reloc.pc_rel = 0;
9340 }
9341
9342 static void
9343 do_hvc (void)
9344 {
9345 inst.reloc.type = BFD_RELOC_ARM_HVC;
9346 inst.reloc.pc_rel = 0;
9347 }
9348
9349 static void
9350 do_swi (void)
9351 {
9352 inst.reloc.type = BFD_RELOC_ARM_SWI;
9353 inst.reloc.pc_rel = 0;
9354 }
9355
9356 static void
9357 do_setpan (void)
9358 {
9359 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9360 _("selected processor does not support SETPAN instruction"));
9361
9362 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9363 }
9364
9365 static void
9366 do_t_setpan (void)
9367 {
9368 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9369 _("selected processor does not support SETPAN instruction"));
9370
9371 inst.instruction |= (inst.operands[0].imm << 3);
9372 }
9373
9374 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9375 SMLAxy{cond} Rd,Rm,Rs,Rn
9376 SMLAWy{cond} Rd,Rm,Rs,Rn
9377 Error if any register is R15. */
9378
9379 static void
9380 do_smla (void)
9381 {
9382 inst.instruction |= inst.operands[0].reg << 16;
9383 inst.instruction |= inst.operands[1].reg;
9384 inst.instruction |= inst.operands[2].reg << 8;
9385 inst.instruction |= inst.operands[3].reg << 12;
9386 }
9387
9388 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9389 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9390 Error if any register is R15.
9391 Warning if Rdlo == Rdhi. */
9392
9393 static void
9394 do_smlal (void)
9395 {
9396 inst.instruction |= inst.operands[0].reg << 12;
9397 inst.instruction |= inst.operands[1].reg << 16;
9398 inst.instruction |= inst.operands[2].reg;
9399 inst.instruction |= inst.operands[3].reg << 8;
9400
9401 if (inst.operands[0].reg == inst.operands[1].reg)
9402 as_tsktsk (_("rdhi and rdlo must be different"));
9403 }
9404
9405 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9406 SMULxy{cond} Rd,Rm,Rs
9407 Error if any register is R15. */
9408
9409 static void
9410 do_smul (void)
9411 {
9412 inst.instruction |= inst.operands[0].reg << 16;
9413 inst.instruction |= inst.operands[1].reg;
9414 inst.instruction |= inst.operands[2].reg << 8;
9415 }
9416
9417 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9418 the same for both ARM and Thumb-2. */
9419
9420 static void
9421 do_srs (void)
9422 {
9423 int reg;
9424
9425 if (inst.operands[0].present)
9426 {
9427 reg = inst.operands[0].reg;
9428 constraint (reg != REG_SP, _("SRS base register must be r13"));
9429 }
9430 else
9431 reg = REG_SP;
9432
9433 inst.instruction |= reg << 16;
9434 inst.instruction |= inst.operands[1].imm;
9435 if (inst.operands[0].writeback || inst.operands[1].writeback)
9436 inst.instruction |= WRITE_BACK;
9437 }
9438
9439 /* ARM V6 strex (argument parse). */
9440
9441 static void
9442 do_strex (void)
9443 {
9444 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9445 || inst.operands[2].postind || inst.operands[2].writeback
9446 || inst.operands[2].immisreg || inst.operands[2].shifted
9447 || inst.operands[2].negative
9448 /* See comment in do_ldrex(). */
9449 || (inst.operands[2].reg == REG_PC),
9450 BAD_ADDR_MODE);
9451
9452 constraint (inst.operands[0].reg == inst.operands[1].reg
9453 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9454
9455 constraint (inst.reloc.exp.X_op != O_constant
9456 || inst.reloc.exp.X_add_number != 0,
9457 _("offset must be zero in ARM encoding"));
9458
9459 inst.instruction |= inst.operands[0].reg << 12;
9460 inst.instruction |= inst.operands[1].reg;
9461 inst.instruction |= inst.operands[2].reg << 16;
9462 inst.reloc.type = BFD_RELOC_UNUSED;
9463 }
9464
9465 static void
9466 do_t_strexbh (void)
9467 {
9468 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9469 || inst.operands[2].postind || inst.operands[2].writeback
9470 || inst.operands[2].immisreg || inst.operands[2].shifted
9471 || inst.operands[2].negative,
9472 BAD_ADDR_MODE);
9473
9474 constraint (inst.operands[0].reg == inst.operands[1].reg
9475 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9476
9477 do_rm_rd_rn ();
9478 }
9479
9480 static void
9481 do_strexd (void)
9482 {
9483 constraint (inst.operands[1].reg % 2 != 0,
9484 _("even register required"));
9485 constraint (inst.operands[2].present
9486 && inst.operands[2].reg != inst.operands[1].reg + 1,
9487 _("can only store two consecutive registers"));
9488 /* If op 2 were present and equal to PC, this function wouldn't
9489 have been called in the first place. */
9490 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9491
9492 constraint (inst.operands[0].reg == inst.operands[1].reg
9493 || inst.operands[0].reg == inst.operands[1].reg + 1
9494 || inst.operands[0].reg == inst.operands[3].reg,
9495 BAD_OVERLAP);
9496
9497 inst.instruction |= inst.operands[0].reg << 12;
9498 inst.instruction |= inst.operands[1].reg;
9499 inst.instruction |= inst.operands[3].reg << 16;
9500 }
9501
9502 /* ARM V8 STRL. */
9503 static void
9504 do_stlex (void)
9505 {
9506 constraint (inst.operands[0].reg == inst.operands[1].reg
9507 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9508
9509 do_rd_rm_rn ();
9510 }
9511
9512 static void
9513 do_t_stlex (void)
9514 {
9515 constraint (inst.operands[0].reg == inst.operands[1].reg
9516 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9517
9518 do_rm_rd_rn ();
9519 }
9520
9521 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9522 extends it to 32-bits, and adds the result to a value in another
9523 register. You can specify a rotation by 0, 8, 16, or 24 bits
9524 before extracting the 16-bit value.
9525 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9526 Condition defaults to COND_ALWAYS.
9527 Error if any register uses R15. */
9528
9529 static void
9530 do_sxtah (void)
9531 {
9532 inst.instruction |= inst.operands[0].reg << 12;
9533 inst.instruction |= inst.operands[1].reg << 16;
9534 inst.instruction |= inst.operands[2].reg;
9535 inst.instruction |= inst.operands[3].imm << 10;
9536 }
9537
9538 /* ARM V6 SXTH.
9539
9540 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9541 Condition defaults to COND_ALWAYS.
9542 Error if any register uses R15. */
9543
9544 static void
9545 do_sxth (void)
9546 {
9547 inst.instruction |= inst.operands[0].reg << 12;
9548 inst.instruction |= inst.operands[1].reg;
9549 inst.instruction |= inst.operands[2].imm << 10;
9550 }
9551 \f
9552 /* VFP instructions. In a logical order: SP variant first, monad
9553 before dyad, arithmetic then move then load/store. */
9554
9555 static void
9556 do_vfp_sp_monadic (void)
9557 {
9558 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9559 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9560 }
9561
9562 static void
9563 do_vfp_sp_dyadic (void)
9564 {
9565 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9566 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9567 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9568 }
9569
9570 static void
9571 do_vfp_sp_compare_z (void)
9572 {
9573 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9574 }
9575
9576 static void
9577 do_vfp_dp_sp_cvt (void)
9578 {
9579 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9580 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9581 }
9582
9583 static void
9584 do_vfp_sp_dp_cvt (void)
9585 {
9586 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9587 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9588 }
9589
9590 static void
9591 do_vfp_reg_from_sp (void)
9592 {
9593 inst.instruction |= inst.operands[0].reg << 12;
9594 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9595 }
9596
9597 static void
9598 do_vfp_reg2_from_sp2 (void)
9599 {
9600 constraint (inst.operands[2].imm != 2,
9601 _("only two consecutive VFP SP registers allowed here"));
9602 inst.instruction |= inst.operands[0].reg << 12;
9603 inst.instruction |= inst.operands[1].reg << 16;
9604 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9605 }
9606
9607 static void
9608 do_vfp_sp_from_reg (void)
9609 {
9610 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9611 inst.instruction |= inst.operands[1].reg << 12;
9612 }
9613
9614 static void
9615 do_vfp_sp2_from_reg2 (void)
9616 {
9617 constraint (inst.operands[0].imm != 2,
9618 _("only two consecutive VFP SP registers allowed here"));
9619 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9620 inst.instruction |= inst.operands[1].reg << 12;
9621 inst.instruction |= inst.operands[2].reg << 16;
9622 }
9623
9624 static void
9625 do_vfp_sp_ldst (void)
9626 {
9627 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9628 encode_arm_cp_address (1, FALSE, TRUE, 0);
9629 }
9630
9631 static void
9632 do_vfp_dp_ldst (void)
9633 {
9634 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9635 encode_arm_cp_address (1, FALSE, TRUE, 0);
9636 }
9637
9638
9639 static void
9640 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9641 {
9642 if (inst.operands[0].writeback)
9643 inst.instruction |= WRITE_BACK;
9644 else
9645 constraint (ldstm_type != VFP_LDSTMIA,
9646 _("this addressing mode requires base-register writeback"));
9647 inst.instruction |= inst.operands[0].reg << 16;
9648 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9649 inst.instruction |= inst.operands[1].imm;
9650 }
9651
9652 static void
9653 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9654 {
9655 int count;
9656
9657 if (inst.operands[0].writeback)
9658 inst.instruction |= WRITE_BACK;
9659 else
9660 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9661 _("this addressing mode requires base-register writeback"));
9662
9663 inst.instruction |= inst.operands[0].reg << 16;
9664 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9665
9666 count = inst.operands[1].imm << 1;
9667 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9668 count += 1;
9669
9670 inst.instruction |= count;
9671 }
9672
9673 static void
9674 do_vfp_sp_ldstmia (void)
9675 {
9676 vfp_sp_ldstm (VFP_LDSTMIA);
9677 }
9678
9679 static void
9680 do_vfp_sp_ldstmdb (void)
9681 {
9682 vfp_sp_ldstm (VFP_LDSTMDB);
9683 }
9684
9685 static void
9686 do_vfp_dp_ldstmia (void)
9687 {
9688 vfp_dp_ldstm (VFP_LDSTMIA);
9689 }
9690
9691 static void
9692 do_vfp_dp_ldstmdb (void)
9693 {
9694 vfp_dp_ldstm (VFP_LDSTMDB);
9695 }
9696
9697 static void
9698 do_vfp_xp_ldstmia (void)
9699 {
9700 vfp_dp_ldstm (VFP_LDSTMIAX);
9701 }
9702
9703 static void
9704 do_vfp_xp_ldstmdb (void)
9705 {
9706 vfp_dp_ldstm (VFP_LDSTMDBX);
9707 }
9708
9709 static void
9710 do_vfp_dp_rd_rm (void)
9711 {
9712 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9713 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9714 }
9715
9716 static void
9717 do_vfp_dp_rn_rd (void)
9718 {
9719 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9720 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9721 }
9722
9723 static void
9724 do_vfp_dp_rd_rn (void)
9725 {
9726 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9727 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9728 }
9729
9730 static void
9731 do_vfp_dp_rd_rn_rm (void)
9732 {
9733 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9734 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9735 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
9736 }
9737
9738 static void
9739 do_vfp_dp_rd (void)
9740 {
9741 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9742 }
9743
9744 static void
9745 do_vfp_dp_rm_rd_rn (void)
9746 {
9747 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
9748 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9749 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
9750 }
9751
9752 /* VFPv3 instructions. */
9753 static void
9754 do_vfp_sp_const (void)
9755 {
9756 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9757 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9758 inst.instruction |= (inst.operands[1].imm & 0x0f);
9759 }
9760
9761 static void
9762 do_vfp_dp_const (void)
9763 {
9764 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9765 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9766 inst.instruction |= (inst.operands[1].imm & 0x0f);
9767 }
9768
9769 static void
9770 vfp_conv (int srcsize)
9771 {
9772 int immbits = srcsize - inst.operands[1].imm;
9773
9774 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9775 {
9776 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9777 i.e. immbits must be in range 0 - 16. */
9778 inst.error = _("immediate value out of range, expected range [0, 16]");
9779 return;
9780 }
9781 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9782 {
9783 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9784 i.e. immbits must be in range 0 - 31. */
9785 inst.error = _("immediate value out of range, expected range [1, 32]");
9786 return;
9787 }
9788
9789 inst.instruction |= (immbits & 1) << 5;
9790 inst.instruction |= (immbits >> 1);
9791 }
9792
9793 static void
9794 do_vfp_sp_conv_16 (void)
9795 {
9796 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9797 vfp_conv (16);
9798 }
9799
9800 static void
9801 do_vfp_dp_conv_16 (void)
9802 {
9803 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9804 vfp_conv (16);
9805 }
9806
9807 static void
9808 do_vfp_sp_conv_32 (void)
9809 {
9810 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9811 vfp_conv (32);
9812 }
9813
9814 static void
9815 do_vfp_dp_conv_32 (void)
9816 {
9817 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9818 vfp_conv (32);
9819 }
9820 \f
9821 /* FPA instructions. Also in a logical order. */
9822
9823 static void
9824 do_fpa_cmp (void)
9825 {
9826 inst.instruction |= inst.operands[0].reg << 16;
9827 inst.instruction |= inst.operands[1].reg;
9828 }
9829
9830 static void
9831 do_fpa_ldmstm (void)
9832 {
9833 inst.instruction |= inst.operands[0].reg << 12;
9834 switch (inst.operands[1].imm)
9835 {
9836 case 1: inst.instruction |= CP_T_X; break;
9837 case 2: inst.instruction |= CP_T_Y; break;
9838 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
9839 case 4: break;
9840 default: abort ();
9841 }
9842
9843 if (inst.instruction & (PRE_INDEX | INDEX_UP))
9844 {
9845 /* The instruction specified "ea" or "fd", so we can only accept
9846 [Rn]{!}. The instruction does not really support stacking or
9847 unstacking, so we have to emulate these by setting appropriate
9848 bits and offsets. */
9849 constraint (inst.reloc.exp.X_op != O_constant
9850 || inst.reloc.exp.X_add_number != 0,
9851 _("this instruction does not support indexing"));
9852
9853 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
9854 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
9855
9856 if (!(inst.instruction & INDEX_UP))
9857 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
9858
9859 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
9860 {
9861 inst.operands[2].preind = 0;
9862 inst.operands[2].postind = 1;
9863 }
9864 }
9865
9866 encode_arm_cp_address (2, TRUE, TRUE, 0);
9867 }
9868 \f
9869 /* iWMMXt instructions: strictly in alphabetical order. */
9870
9871 static void
9872 do_iwmmxt_tandorc (void)
9873 {
9874 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
9875 }
9876
9877 static void
9878 do_iwmmxt_textrc (void)
9879 {
9880 inst.instruction |= inst.operands[0].reg << 12;
9881 inst.instruction |= inst.operands[1].imm;
9882 }
9883
9884 static void
9885 do_iwmmxt_textrm (void)
9886 {
9887 inst.instruction |= inst.operands[0].reg << 12;
9888 inst.instruction |= inst.operands[1].reg << 16;
9889 inst.instruction |= inst.operands[2].imm;
9890 }
9891
9892 static void
9893 do_iwmmxt_tinsr (void)
9894 {
9895 inst.instruction |= inst.operands[0].reg << 16;
9896 inst.instruction |= inst.operands[1].reg << 12;
9897 inst.instruction |= inst.operands[2].imm;
9898 }
9899
9900 static void
9901 do_iwmmxt_tmia (void)
9902 {
9903 inst.instruction |= inst.operands[0].reg << 5;
9904 inst.instruction |= inst.operands[1].reg;
9905 inst.instruction |= inst.operands[2].reg << 12;
9906 }
9907
9908 static void
9909 do_iwmmxt_waligni (void)
9910 {
9911 inst.instruction |= inst.operands[0].reg << 12;
9912 inst.instruction |= inst.operands[1].reg << 16;
9913 inst.instruction |= inst.operands[2].reg;
9914 inst.instruction |= inst.operands[3].imm << 20;
9915 }
9916
9917 static void
9918 do_iwmmxt_wmerge (void)
9919 {
9920 inst.instruction |= inst.operands[0].reg << 12;
9921 inst.instruction |= inst.operands[1].reg << 16;
9922 inst.instruction |= inst.operands[2].reg;
9923 inst.instruction |= inst.operands[3].imm << 21;
9924 }
9925
9926 static void
9927 do_iwmmxt_wmov (void)
9928 {
9929 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
9930 inst.instruction |= inst.operands[0].reg << 12;
9931 inst.instruction |= inst.operands[1].reg << 16;
9932 inst.instruction |= inst.operands[1].reg;
9933 }
9934
9935 static void
9936 do_iwmmxt_wldstbh (void)
9937 {
9938 int reloc;
9939 inst.instruction |= inst.operands[0].reg << 12;
9940 if (thumb_mode)
9941 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
9942 else
9943 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
9944 encode_arm_cp_address (1, TRUE, FALSE, reloc);
9945 }
9946
9947 static void
9948 do_iwmmxt_wldstw (void)
9949 {
9950 /* RIWR_RIWC clears .isreg for a control register. */
9951 if (!inst.operands[0].isreg)
9952 {
9953 constraint (inst.cond != COND_ALWAYS, BAD_COND);
9954 inst.instruction |= 0xf0000000;
9955 }
9956
9957 inst.instruction |= inst.operands[0].reg << 12;
9958 encode_arm_cp_address (1, TRUE, TRUE, 0);
9959 }
9960
9961 static void
9962 do_iwmmxt_wldstd (void)
9963 {
9964 inst.instruction |= inst.operands[0].reg << 12;
9965 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
9966 && inst.operands[1].immisreg)
9967 {
9968 inst.instruction &= ~0x1a000ff;
9969 inst.instruction |= (0xfU << 28);
9970 if (inst.operands[1].preind)
9971 inst.instruction |= PRE_INDEX;
9972 if (!inst.operands[1].negative)
9973 inst.instruction |= INDEX_UP;
9974 if (inst.operands[1].writeback)
9975 inst.instruction |= WRITE_BACK;
9976 inst.instruction |= inst.operands[1].reg << 16;
9977 inst.instruction |= inst.reloc.exp.X_add_number << 4;
9978 inst.instruction |= inst.operands[1].imm;
9979 }
9980 else
9981 encode_arm_cp_address (1, TRUE, FALSE, 0);
9982 }
9983
9984 static void
9985 do_iwmmxt_wshufh (void)
9986 {
9987 inst.instruction |= inst.operands[0].reg << 12;
9988 inst.instruction |= inst.operands[1].reg << 16;
9989 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
9990 inst.instruction |= (inst.operands[2].imm & 0x0f);
9991 }
9992
9993 static void
9994 do_iwmmxt_wzero (void)
9995 {
9996 /* WZERO reg is an alias for WANDN reg, reg, reg. */
9997 inst.instruction |= inst.operands[0].reg;
9998 inst.instruction |= inst.operands[0].reg << 12;
9999 inst.instruction |= inst.operands[0].reg << 16;
10000 }
10001
10002 static void
10003 do_iwmmxt_wrwrwr_or_imm5 (void)
10004 {
10005 if (inst.operands[2].isreg)
10006 do_rd_rn_rm ();
10007 else {
10008 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
10009 _("immediate operand requires iWMMXt2"));
10010 do_rd_rn ();
10011 if (inst.operands[2].imm == 0)
10012 {
10013 switch ((inst.instruction >> 20) & 0xf)
10014 {
10015 case 4:
10016 case 5:
10017 case 6:
10018 case 7:
10019 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10020 inst.operands[2].imm = 16;
10021 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
10022 break;
10023 case 8:
10024 case 9:
10025 case 10:
10026 case 11:
10027 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10028 inst.operands[2].imm = 32;
10029 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
10030 break;
10031 case 12:
10032 case 13:
10033 case 14:
10034 case 15:
10035 {
10036 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10037 unsigned long wrn;
10038 wrn = (inst.instruction >> 16) & 0xf;
10039 inst.instruction &= 0xff0fff0f;
10040 inst.instruction |= wrn;
10041 /* Bail out here; the instruction is now assembled. */
10042 return;
10043 }
10044 }
10045 }
10046 /* Map 32 -> 0, etc. */
10047 inst.operands[2].imm &= 0x1f;
10048 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10049 }
10050 }
10051 \f
10052 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10053 operations first, then control, shift, and load/store. */
10054
10055 /* Insns like "foo X,Y,Z". */
10056
10057 static void
10058 do_mav_triple (void)
10059 {
10060 inst.instruction |= inst.operands[0].reg << 16;
10061 inst.instruction |= inst.operands[1].reg;
10062 inst.instruction |= inst.operands[2].reg << 12;
10063 }
10064
10065 /* Insns like "foo W,X,Y,Z".
10066 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10067
10068 static void
10069 do_mav_quad (void)
10070 {
10071 inst.instruction |= inst.operands[0].reg << 5;
10072 inst.instruction |= inst.operands[1].reg << 12;
10073 inst.instruction |= inst.operands[2].reg << 16;
10074 inst.instruction |= inst.operands[3].reg;
10075 }
10076
10077 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10078 static void
10079 do_mav_dspsc (void)
10080 {
10081 inst.instruction |= inst.operands[1].reg << 12;
10082 }
10083
10084 /* Maverick shift immediate instructions.
10085 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10086 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10087
10088 static void
10089 do_mav_shift (void)
10090 {
10091 int imm = inst.operands[2].imm;
10092
10093 inst.instruction |= inst.operands[0].reg << 12;
10094 inst.instruction |= inst.operands[1].reg << 16;
10095
10096 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10097 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10098 Bit 4 should be 0. */
10099 imm = (imm & 0xf) | ((imm & 0x70) << 1);
10100
10101 inst.instruction |= imm;
10102 }
10103 \f
10104 /* XScale instructions. Also sorted arithmetic before move. */
10105
10106 /* Xscale multiply-accumulate (argument parse)
10107 MIAcc acc0,Rm,Rs
10108 MIAPHcc acc0,Rm,Rs
10109 MIAxycc acc0,Rm,Rs. */
10110
10111 static void
10112 do_xsc_mia (void)
10113 {
10114 inst.instruction |= inst.operands[1].reg;
10115 inst.instruction |= inst.operands[2].reg << 12;
10116 }
10117
10118 /* Xscale move-accumulator-register (argument parse)
10119
10120 MARcc acc0,RdLo,RdHi. */
10121
10122 static void
10123 do_xsc_mar (void)
10124 {
10125 inst.instruction |= inst.operands[1].reg << 12;
10126 inst.instruction |= inst.operands[2].reg << 16;
10127 }
10128
10129 /* Xscale move-register-accumulator (argument parse)
10130
10131 MRAcc RdLo,RdHi,acc0. */
10132
10133 static void
10134 do_xsc_mra (void)
10135 {
10136 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10137 inst.instruction |= inst.operands[0].reg << 12;
10138 inst.instruction |= inst.operands[1].reg << 16;
10139 }
10140 \f
10141 /* Encoding functions relevant only to Thumb. */
10142
10143 /* inst.operands[i] is a shifted-register operand; encode
10144 it into inst.instruction in the format used by Thumb32. */
10145
10146 static void
10147 encode_thumb32_shifted_operand (int i)
10148 {
10149 unsigned int value = inst.reloc.exp.X_add_number;
10150 unsigned int shift = inst.operands[i].shift_kind;
10151
10152 constraint (inst.operands[i].immisreg,
10153 _("shift by register not allowed in thumb mode"));
10154 inst.instruction |= inst.operands[i].reg;
10155 if (shift == SHIFT_RRX)
10156 inst.instruction |= SHIFT_ROR << 4;
10157 else
10158 {
10159 constraint (inst.reloc.exp.X_op != O_constant,
10160 _("expression too complex"));
10161
10162 constraint (value > 32
10163 || (value == 32 && (shift == SHIFT_LSL
10164 || shift == SHIFT_ROR)),
10165 _("shift expression is too large"));
10166
10167 if (value == 0)
10168 shift = SHIFT_LSL;
10169 else if (value == 32)
10170 value = 0;
10171
10172 inst.instruction |= shift << 4;
10173 inst.instruction |= (value & 0x1c) << 10;
10174 inst.instruction |= (value & 0x03) << 6;
10175 }
10176 }
10177
10178
10179 /* inst.operands[i] was set up by parse_address. Encode it into a
10180 Thumb32 format load or store instruction. Reject forms that cannot
10181 be used with such instructions. If is_t is true, reject forms that
10182 cannot be used with a T instruction; if is_d is true, reject forms
10183 that cannot be used with a D instruction. If it is a store insn,
10184 reject PC in Rn. */
10185
10186 static void
10187 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10188 {
10189 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10190
10191 constraint (!inst.operands[i].isreg,
10192 _("Instruction does not support =N addresses"));
10193
10194 inst.instruction |= inst.operands[i].reg << 16;
10195 if (inst.operands[i].immisreg)
10196 {
10197 constraint (is_pc, BAD_PC_ADDRESSING);
10198 constraint (is_t || is_d, _("cannot use register index with this instruction"));
10199 constraint (inst.operands[i].negative,
10200 _("Thumb does not support negative register indexing"));
10201 constraint (inst.operands[i].postind,
10202 _("Thumb does not support register post-indexing"));
10203 constraint (inst.operands[i].writeback,
10204 _("Thumb does not support register indexing with writeback"));
10205 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10206 _("Thumb supports only LSL in shifted register indexing"));
10207
10208 inst.instruction |= inst.operands[i].imm;
10209 if (inst.operands[i].shifted)
10210 {
10211 constraint (inst.reloc.exp.X_op != O_constant,
10212 _("expression too complex"));
10213 constraint (inst.reloc.exp.X_add_number < 0
10214 || inst.reloc.exp.X_add_number > 3,
10215 _("shift out of range"));
10216 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10217 }
10218 inst.reloc.type = BFD_RELOC_UNUSED;
10219 }
10220 else if (inst.operands[i].preind)
10221 {
10222 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10223 constraint (is_t && inst.operands[i].writeback,
10224 _("cannot use writeback with this instruction"));
10225 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10226 BAD_PC_ADDRESSING);
10227
10228 if (is_d)
10229 {
10230 inst.instruction |= 0x01000000;
10231 if (inst.operands[i].writeback)
10232 inst.instruction |= 0x00200000;
10233 }
10234 else
10235 {
10236 inst.instruction |= 0x00000c00;
10237 if (inst.operands[i].writeback)
10238 inst.instruction |= 0x00000100;
10239 }
10240 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10241 }
10242 else if (inst.operands[i].postind)
10243 {
10244 gas_assert (inst.operands[i].writeback);
10245 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10246 constraint (is_t, _("cannot use post-indexing with this instruction"));
10247
10248 if (is_d)
10249 inst.instruction |= 0x00200000;
10250 else
10251 inst.instruction |= 0x00000900;
10252 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10253 }
10254 else /* unindexed - only for coprocessor */
10255 inst.error = _("instruction does not accept unindexed addressing");
10256 }
10257
10258 /* Table of Thumb instructions which exist in both 16- and 32-bit
10259 encodings (the latter only in post-V6T2 cores). The index is the
10260 value used in the insns table below. When there is more than one
10261 possible 16-bit encoding for the instruction, this table always
10262 holds variant (1).
10263 Also contains several pseudo-instructions used during relaxation. */
10264 #define T16_32_TAB \
10265 X(_adc, 4140, eb400000), \
10266 X(_adcs, 4140, eb500000), \
10267 X(_add, 1c00, eb000000), \
10268 X(_adds, 1c00, eb100000), \
10269 X(_addi, 0000, f1000000), \
10270 X(_addis, 0000, f1100000), \
10271 X(_add_pc,000f, f20f0000), \
10272 X(_add_sp,000d, f10d0000), \
10273 X(_adr, 000f, f20f0000), \
10274 X(_and, 4000, ea000000), \
10275 X(_ands, 4000, ea100000), \
10276 X(_asr, 1000, fa40f000), \
10277 X(_asrs, 1000, fa50f000), \
10278 X(_b, e000, f000b000), \
10279 X(_bcond, d000, f0008000), \
10280 X(_bic, 4380, ea200000), \
10281 X(_bics, 4380, ea300000), \
10282 X(_cmn, 42c0, eb100f00), \
10283 X(_cmp, 2800, ebb00f00), \
10284 X(_cpsie, b660, f3af8400), \
10285 X(_cpsid, b670, f3af8600), \
10286 X(_cpy, 4600, ea4f0000), \
10287 X(_dec_sp,80dd, f1ad0d00), \
10288 X(_eor, 4040, ea800000), \
10289 X(_eors, 4040, ea900000), \
10290 X(_inc_sp,00dd, f10d0d00), \
10291 X(_ldmia, c800, e8900000), \
10292 X(_ldr, 6800, f8500000), \
10293 X(_ldrb, 7800, f8100000), \
10294 X(_ldrh, 8800, f8300000), \
10295 X(_ldrsb, 5600, f9100000), \
10296 X(_ldrsh, 5e00, f9300000), \
10297 X(_ldr_pc,4800, f85f0000), \
10298 X(_ldr_pc2,4800, f85f0000), \
10299 X(_ldr_sp,9800, f85d0000), \
10300 X(_lsl, 0000, fa00f000), \
10301 X(_lsls, 0000, fa10f000), \
10302 X(_lsr, 0800, fa20f000), \
10303 X(_lsrs, 0800, fa30f000), \
10304 X(_mov, 2000, ea4f0000), \
10305 X(_movs, 2000, ea5f0000), \
10306 X(_mul, 4340, fb00f000), \
10307 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10308 X(_mvn, 43c0, ea6f0000), \
10309 X(_mvns, 43c0, ea7f0000), \
10310 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10311 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10312 X(_orr, 4300, ea400000), \
10313 X(_orrs, 4300, ea500000), \
10314 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10315 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10316 X(_rev, ba00, fa90f080), \
10317 X(_rev16, ba40, fa90f090), \
10318 X(_revsh, bac0, fa90f0b0), \
10319 X(_ror, 41c0, fa60f000), \
10320 X(_rors, 41c0, fa70f000), \
10321 X(_sbc, 4180, eb600000), \
10322 X(_sbcs, 4180, eb700000), \
10323 X(_stmia, c000, e8800000), \
10324 X(_str, 6000, f8400000), \
10325 X(_strb, 7000, f8000000), \
10326 X(_strh, 8000, f8200000), \
10327 X(_str_sp,9000, f84d0000), \
10328 X(_sub, 1e00, eba00000), \
10329 X(_subs, 1e00, ebb00000), \
10330 X(_subi, 8000, f1a00000), \
10331 X(_subis, 8000, f1b00000), \
10332 X(_sxtb, b240, fa4ff080), \
10333 X(_sxth, b200, fa0ff080), \
10334 X(_tst, 4200, ea100f00), \
10335 X(_uxtb, b2c0, fa5ff080), \
10336 X(_uxth, b280, fa1ff080), \
10337 X(_nop, bf00, f3af8000), \
10338 X(_yield, bf10, f3af8001), \
10339 X(_wfe, bf20, f3af8002), \
10340 X(_wfi, bf30, f3af8003), \
10341 X(_sev, bf40, f3af8004), \
10342 X(_sevl, bf50, f3af8005), \
10343 X(_udf, de00, f7f0a000)
10344
10345 /* To catch errors in encoding functions, the codes are all offset by
10346 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10347 as 16-bit instructions. */
10348 #define X(a,b,c) T_MNEM##a
10349 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10350 #undef X
10351
10352 #define X(a,b,c) 0x##b
10353 static const unsigned short thumb_op16[] = { T16_32_TAB };
10354 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10355 #undef X
10356
10357 #define X(a,b,c) 0x##c
10358 static const unsigned int thumb_op32[] = { T16_32_TAB };
10359 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10360 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10361 #undef X
10362 #undef T16_32_TAB
10363
10364 /* Thumb instruction encoders, in alphabetical order. */
10365
10366 /* ADDW or SUBW. */
10367
10368 static void
10369 do_t_add_sub_w (void)
10370 {
10371 int Rd, Rn;
10372
10373 Rd = inst.operands[0].reg;
10374 Rn = inst.operands[1].reg;
10375
10376 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10377 is the SP-{plus,minus}-immediate form of the instruction. */
10378 if (Rn == REG_SP)
10379 constraint (Rd == REG_PC, BAD_PC);
10380 else
10381 reject_bad_reg (Rd);
10382
10383 inst.instruction |= (Rn << 16) | (Rd << 8);
10384 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10385 }
10386
10387 /* Parse an add or subtract instruction. We get here with inst.instruction
10388 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
10389
10390 static void
10391 do_t_add_sub (void)
10392 {
10393 int Rd, Rs, Rn;
10394
10395 Rd = inst.operands[0].reg;
10396 Rs = (inst.operands[1].present
10397 ? inst.operands[1].reg /* Rd, Rs, foo */
10398 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10399
10400 if (Rd == REG_PC)
10401 set_it_insn_type_last ();
10402
10403 if (unified_syntax)
10404 {
10405 bfd_boolean flags;
10406 bfd_boolean narrow;
10407 int opcode;
10408
10409 flags = (inst.instruction == T_MNEM_adds
10410 || inst.instruction == T_MNEM_subs);
10411 if (flags)
10412 narrow = !in_it_block ();
10413 else
10414 narrow = in_it_block ();
10415 if (!inst.operands[2].isreg)
10416 {
10417 int add;
10418
10419 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10420
10421 add = (inst.instruction == T_MNEM_add
10422 || inst.instruction == T_MNEM_adds);
10423 opcode = 0;
10424 if (inst.size_req != 4)
10425 {
10426 /* Attempt to use a narrow opcode, with relaxation if
10427 appropriate. */
10428 if (Rd == REG_SP && Rs == REG_SP && !flags)
10429 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10430 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10431 opcode = T_MNEM_add_sp;
10432 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10433 opcode = T_MNEM_add_pc;
10434 else if (Rd <= 7 && Rs <= 7 && narrow)
10435 {
10436 if (flags)
10437 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10438 else
10439 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10440 }
10441 if (opcode)
10442 {
10443 inst.instruction = THUMB_OP16(opcode);
10444 inst.instruction |= (Rd << 4) | Rs;
10445 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10446 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
10447 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10448 if (inst.size_req != 2)
10449 inst.relax = opcode;
10450 }
10451 else
10452 constraint (inst.size_req == 2, BAD_HIREG);
10453 }
10454 if (inst.size_req == 4
10455 || (inst.size_req != 2 && !opcode))
10456 {
10457 if (Rd == REG_PC)
10458 {
10459 constraint (add, BAD_PC);
10460 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10461 _("only SUBS PC, LR, #const allowed"));
10462 constraint (inst.reloc.exp.X_op != O_constant,
10463 _("expression too complex"));
10464 constraint (inst.reloc.exp.X_add_number < 0
10465 || inst.reloc.exp.X_add_number > 0xff,
10466 _("immediate value out of range"));
10467 inst.instruction = T2_SUBS_PC_LR
10468 | inst.reloc.exp.X_add_number;
10469 inst.reloc.type = BFD_RELOC_UNUSED;
10470 return;
10471 }
10472 else if (Rs == REG_PC)
10473 {
10474 /* Always use addw/subw. */
10475 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10476 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10477 }
10478 else
10479 {
10480 inst.instruction = THUMB_OP32 (inst.instruction);
10481 inst.instruction = (inst.instruction & 0xe1ffffff)
10482 | 0x10000000;
10483 if (flags)
10484 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10485 else
10486 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
10487 }
10488 inst.instruction |= Rd << 8;
10489 inst.instruction |= Rs << 16;
10490 }
10491 }
10492 else
10493 {
10494 unsigned int value = inst.reloc.exp.X_add_number;
10495 unsigned int shift = inst.operands[2].shift_kind;
10496
10497 Rn = inst.operands[2].reg;
10498 /* See if we can do this with a 16-bit instruction. */
10499 if (!inst.operands[2].shifted && inst.size_req != 4)
10500 {
10501 if (Rd > 7 || Rs > 7 || Rn > 7)
10502 narrow = FALSE;
10503
10504 if (narrow)
10505 {
10506 inst.instruction = ((inst.instruction == T_MNEM_adds
10507 || inst.instruction == T_MNEM_add)
10508 ? T_OPCODE_ADD_R3
10509 : T_OPCODE_SUB_R3);
10510 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10511 return;
10512 }
10513
10514 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10515 {
10516 /* Thumb-1 cores (except v6-M) require at least one high
10517 register in a narrow non flag setting add. */
10518 if (Rd > 7 || Rn > 7
10519 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10520 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10521 {
10522 if (Rd == Rn)
10523 {
10524 Rn = Rs;
10525 Rs = Rd;
10526 }
10527 inst.instruction = T_OPCODE_ADD_HI;
10528 inst.instruction |= (Rd & 8) << 4;
10529 inst.instruction |= (Rd & 7);
10530 inst.instruction |= Rn << 3;
10531 return;
10532 }
10533 }
10534 }
10535
10536 constraint (Rd == REG_PC, BAD_PC);
10537 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10538 constraint (Rs == REG_PC, BAD_PC);
10539 reject_bad_reg (Rn);
10540
10541 /* If we get here, it can't be done in 16 bits. */
10542 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10543 _("shift must be constant"));
10544 inst.instruction = THUMB_OP32 (inst.instruction);
10545 inst.instruction |= Rd << 8;
10546 inst.instruction |= Rs << 16;
10547 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10548 _("shift value over 3 not allowed in thumb mode"));
10549 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10550 _("only LSL shift allowed in thumb mode"));
10551 encode_thumb32_shifted_operand (2);
10552 }
10553 }
10554 else
10555 {
10556 constraint (inst.instruction == T_MNEM_adds
10557 || inst.instruction == T_MNEM_subs,
10558 BAD_THUMB32);
10559
10560 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10561 {
10562 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10563 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10564 BAD_HIREG);
10565
10566 inst.instruction = (inst.instruction == T_MNEM_add
10567 ? 0x0000 : 0x8000);
10568 inst.instruction |= (Rd << 4) | Rs;
10569 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10570 return;
10571 }
10572
10573 Rn = inst.operands[2].reg;
10574 constraint (inst.operands[2].shifted, _("unshifted register required"));
10575
10576 /* We now have Rd, Rs, and Rn set to registers. */
10577 if (Rd > 7 || Rs > 7 || Rn > 7)
10578 {
10579 /* Can't do this for SUB. */
10580 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10581 inst.instruction = T_OPCODE_ADD_HI;
10582 inst.instruction |= (Rd & 8) << 4;
10583 inst.instruction |= (Rd & 7);
10584 if (Rs == Rd)
10585 inst.instruction |= Rn << 3;
10586 else if (Rn == Rd)
10587 inst.instruction |= Rs << 3;
10588 else
10589 constraint (1, _("dest must overlap one source register"));
10590 }
10591 else
10592 {
10593 inst.instruction = (inst.instruction == T_MNEM_add
10594 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10595 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10596 }
10597 }
10598 }
10599
10600 static void
10601 do_t_adr (void)
10602 {
10603 unsigned Rd;
10604
10605 Rd = inst.operands[0].reg;
10606 reject_bad_reg (Rd);
10607
10608 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10609 {
10610 /* Defer to section relaxation. */
10611 inst.relax = inst.instruction;
10612 inst.instruction = THUMB_OP16 (inst.instruction);
10613 inst.instruction |= Rd << 4;
10614 }
10615 else if (unified_syntax && inst.size_req != 2)
10616 {
10617 /* Generate a 32-bit opcode. */
10618 inst.instruction = THUMB_OP32 (inst.instruction);
10619 inst.instruction |= Rd << 8;
10620 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
10621 inst.reloc.pc_rel = 1;
10622 }
10623 else
10624 {
10625 /* Generate a 16-bit opcode. */
10626 inst.instruction = THUMB_OP16 (inst.instruction);
10627 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10628 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
10629 inst.reloc.pc_rel = 1;
10630
10631 inst.instruction |= Rd << 4;
10632 }
10633 }
10634
10635 /* Arithmetic instructions for which there is just one 16-bit
10636 instruction encoding, and it allows only two low registers.
10637 For maximal compatibility with ARM syntax, we allow three register
10638 operands even when Thumb-32 instructions are not available, as long
10639 as the first two are identical. For instance, both "sbc r0,r1" and
10640 "sbc r0,r0,r1" are allowed. */
10641 static void
10642 do_t_arit3 (void)
10643 {
10644 int Rd, Rs, Rn;
10645
10646 Rd = inst.operands[0].reg;
10647 Rs = (inst.operands[1].present
10648 ? inst.operands[1].reg /* Rd, Rs, foo */
10649 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10650 Rn = inst.operands[2].reg;
10651
10652 reject_bad_reg (Rd);
10653 reject_bad_reg (Rs);
10654 if (inst.operands[2].isreg)
10655 reject_bad_reg (Rn);
10656
10657 if (unified_syntax)
10658 {
10659 if (!inst.operands[2].isreg)
10660 {
10661 /* For an immediate, we always generate a 32-bit opcode;
10662 section relaxation will shrink it later if possible. */
10663 inst.instruction = THUMB_OP32 (inst.instruction);
10664 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10665 inst.instruction |= Rd << 8;
10666 inst.instruction |= Rs << 16;
10667 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10668 }
10669 else
10670 {
10671 bfd_boolean narrow;
10672
10673 /* See if we can do this with a 16-bit instruction. */
10674 if (THUMB_SETS_FLAGS (inst.instruction))
10675 narrow = !in_it_block ();
10676 else
10677 narrow = in_it_block ();
10678
10679 if (Rd > 7 || Rn > 7 || Rs > 7)
10680 narrow = FALSE;
10681 if (inst.operands[2].shifted)
10682 narrow = FALSE;
10683 if (inst.size_req == 4)
10684 narrow = FALSE;
10685
10686 if (narrow
10687 && Rd == Rs)
10688 {
10689 inst.instruction = THUMB_OP16 (inst.instruction);
10690 inst.instruction |= Rd;
10691 inst.instruction |= Rn << 3;
10692 return;
10693 }
10694
10695 /* If we get here, it can't be done in 16 bits. */
10696 constraint (inst.operands[2].shifted
10697 && inst.operands[2].immisreg,
10698 _("shift must be constant"));
10699 inst.instruction = THUMB_OP32 (inst.instruction);
10700 inst.instruction |= Rd << 8;
10701 inst.instruction |= Rs << 16;
10702 encode_thumb32_shifted_operand (2);
10703 }
10704 }
10705 else
10706 {
10707 /* On its face this is a lie - the instruction does set the
10708 flags. However, the only supported mnemonic in this mode
10709 says it doesn't. */
10710 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10711
10712 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10713 _("unshifted register required"));
10714 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10715 constraint (Rd != Rs,
10716 _("dest and source1 must be the same register"));
10717
10718 inst.instruction = THUMB_OP16 (inst.instruction);
10719 inst.instruction |= Rd;
10720 inst.instruction |= Rn << 3;
10721 }
10722 }
10723
10724 /* Similarly, but for instructions where the arithmetic operation is
10725 commutative, so we can allow either of them to be different from
10726 the destination operand in a 16-bit instruction. For instance, all
10727 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10728 accepted. */
10729 static void
10730 do_t_arit3c (void)
10731 {
10732 int Rd, Rs, Rn;
10733
10734 Rd = inst.operands[0].reg;
10735 Rs = (inst.operands[1].present
10736 ? inst.operands[1].reg /* Rd, Rs, foo */
10737 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10738 Rn = inst.operands[2].reg;
10739
10740 reject_bad_reg (Rd);
10741 reject_bad_reg (Rs);
10742 if (inst.operands[2].isreg)
10743 reject_bad_reg (Rn);
10744
10745 if (unified_syntax)
10746 {
10747 if (!inst.operands[2].isreg)
10748 {
10749 /* For an immediate, we always generate a 32-bit opcode;
10750 section relaxation will shrink it later if possible. */
10751 inst.instruction = THUMB_OP32 (inst.instruction);
10752 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10753 inst.instruction |= Rd << 8;
10754 inst.instruction |= Rs << 16;
10755 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10756 }
10757 else
10758 {
10759 bfd_boolean narrow;
10760
10761 /* See if we can do this with a 16-bit instruction. */
10762 if (THUMB_SETS_FLAGS (inst.instruction))
10763 narrow = !in_it_block ();
10764 else
10765 narrow = in_it_block ();
10766
10767 if (Rd > 7 || Rn > 7 || Rs > 7)
10768 narrow = FALSE;
10769 if (inst.operands[2].shifted)
10770 narrow = FALSE;
10771 if (inst.size_req == 4)
10772 narrow = FALSE;
10773
10774 if (narrow)
10775 {
10776 if (Rd == Rs)
10777 {
10778 inst.instruction = THUMB_OP16 (inst.instruction);
10779 inst.instruction |= Rd;
10780 inst.instruction |= Rn << 3;
10781 return;
10782 }
10783 if (Rd == Rn)
10784 {
10785 inst.instruction = THUMB_OP16 (inst.instruction);
10786 inst.instruction |= Rd;
10787 inst.instruction |= Rs << 3;
10788 return;
10789 }
10790 }
10791
10792 /* If we get here, it can't be done in 16 bits. */
10793 constraint (inst.operands[2].shifted
10794 && inst.operands[2].immisreg,
10795 _("shift must be constant"));
10796 inst.instruction = THUMB_OP32 (inst.instruction);
10797 inst.instruction |= Rd << 8;
10798 inst.instruction |= Rs << 16;
10799 encode_thumb32_shifted_operand (2);
10800 }
10801 }
10802 else
10803 {
10804 /* On its face this is a lie - the instruction does set the
10805 flags. However, the only supported mnemonic in this mode
10806 says it doesn't. */
10807 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10808
10809 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10810 _("unshifted register required"));
10811 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10812
10813 inst.instruction = THUMB_OP16 (inst.instruction);
10814 inst.instruction |= Rd;
10815
10816 if (Rd == Rs)
10817 inst.instruction |= Rn << 3;
10818 else if (Rd == Rn)
10819 inst.instruction |= Rs << 3;
10820 else
10821 constraint (1, _("dest must overlap one source register"));
10822 }
10823 }
10824
10825 static void
10826 do_t_bfc (void)
10827 {
10828 unsigned Rd;
10829 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
10830 constraint (msb > 32, _("bit-field extends past end of register"));
10831 /* The instruction encoding stores the LSB and MSB,
10832 not the LSB and width. */
10833 Rd = inst.operands[0].reg;
10834 reject_bad_reg (Rd);
10835 inst.instruction |= Rd << 8;
10836 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
10837 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
10838 inst.instruction |= msb - 1;
10839 }
10840
10841 static void
10842 do_t_bfi (void)
10843 {
10844 int Rd, Rn;
10845 unsigned int msb;
10846
10847 Rd = inst.operands[0].reg;
10848 reject_bad_reg (Rd);
10849
10850 /* #0 in second position is alternative syntax for bfc, which is
10851 the same instruction but with REG_PC in the Rm field. */
10852 if (!inst.operands[1].isreg)
10853 Rn = REG_PC;
10854 else
10855 {
10856 Rn = inst.operands[1].reg;
10857 reject_bad_reg (Rn);
10858 }
10859
10860 msb = inst.operands[2].imm + inst.operands[3].imm;
10861 constraint (msb > 32, _("bit-field extends past end of register"));
10862 /* The instruction encoding stores the LSB and MSB,
10863 not the LSB and width. */
10864 inst.instruction |= Rd << 8;
10865 inst.instruction |= Rn << 16;
10866 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10867 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10868 inst.instruction |= msb - 1;
10869 }
10870
10871 static void
10872 do_t_bfx (void)
10873 {
10874 unsigned Rd, Rn;
10875
10876 Rd = inst.operands[0].reg;
10877 Rn = inst.operands[1].reg;
10878
10879 reject_bad_reg (Rd);
10880 reject_bad_reg (Rn);
10881
10882 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
10883 _("bit-field extends past end of register"));
10884 inst.instruction |= Rd << 8;
10885 inst.instruction |= Rn << 16;
10886 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10887 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10888 inst.instruction |= inst.operands[3].imm - 1;
10889 }
10890
10891 /* ARM V5 Thumb BLX (argument parse)
10892 BLX <target_addr> which is BLX(1)
10893 BLX <Rm> which is BLX(2)
10894 Unfortunately, there are two different opcodes for this mnemonic.
10895 So, the insns[].value is not used, and the code here zaps values
10896 into inst.instruction.
10897
10898 ??? How to take advantage of the additional two bits of displacement
10899 available in Thumb32 mode? Need new relocation? */
10900
10901 static void
10902 do_t_blx (void)
10903 {
10904 set_it_insn_type_last ();
10905
10906 if (inst.operands[0].isreg)
10907 {
10908 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10909 /* We have a register, so this is BLX(2). */
10910 inst.instruction |= inst.operands[0].reg << 3;
10911 }
10912 else
10913 {
10914 /* No register. This must be BLX(1). */
10915 inst.instruction = 0xf000e800;
10916 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
10917 }
10918 }
10919
10920 static void
10921 do_t_branch (void)
10922 {
10923 int opcode;
10924 int cond;
10925 int reloc;
10926
10927 cond = inst.cond;
10928 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
10929
10930 if (in_it_block ())
10931 {
10932 /* Conditional branches inside IT blocks are encoded as unconditional
10933 branches. */
10934 cond = COND_ALWAYS;
10935 }
10936 else
10937 cond = inst.cond;
10938
10939 if (cond != COND_ALWAYS)
10940 opcode = T_MNEM_bcond;
10941 else
10942 opcode = inst.instruction;
10943
10944 if (unified_syntax
10945 && (inst.size_req == 4
10946 || (inst.size_req != 2
10947 && (inst.operands[0].hasreloc
10948 || inst.reloc.exp.X_op == O_constant))))
10949 {
10950 inst.instruction = THUMB_OP32(opcode);
10951 if (cond == COND_ALWAYS)
10952 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
10953 else
10954 {
10955 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
10956 _("selected architecture does not support "
10957 "wide conditional branch instruction"));
10958
10959 gas_assert (cond != 0xF);
10960 inst.instruction |= cond << 22;
10961 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
10962 }
10963 }
10964 else
10965 {
10966 inst.instruction = THUMB_OP16(opcode);
10967 if (cond == COND_ALWAYS)
10968 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
10969 else
10970 {
10971 inst.instruction |= cond << 8;
10972 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
10973 }
10974 /* Allow section relaxation. */
10975 if (unified_syntax && inst.size_req != 2)
10976 inst.relax = opcode;
10977 }
10978 inst.reloc.type = reloc;
10979 inst.reloc.pc_rel = 1;
10980 }
10981
10982 /* Actually do the work for Thumb state bkpt and hlt. The only difference
10983 between the two is the maximum immediate allowed - which is passed in
10984 RANGE. */
10985 static void
10986 do_t_bkpt_hlt1 (int range)
10987 {
10988 constraint (inst.cond != COND_ALWAYS,
10989 _("instruction is always unconditional"));
10990 if (inst.operands[0].present)
10991 {
10992 constraint (inst.operands[0].imm > range,
10993 _("immediate value out of range"));
10994 inst.instruction |= inst.operands[0].imm;
10995 }
10996
10997 set_it_insn_type (NEUTRAL_IT_INSN);
10998 }
10999
11000 static void
11001 do_t_hlt (void)
11002 {
11003 do_t_bkpt_hlt1 (63);
11004 }
11005
11006 static void
11007 do_t_bkpt (void)
11008 {
11009 do_t_bkpt_hlt1 (255);
11010 }
11011
11012 static void
11013 do_t_branch23 (void)
11014 {
11015 set_it_insn_type_last ();
11016 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
11017
11018 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11019 this file. We used to simply ignore the PLT reloc type here --
11020 the branch encoding is now needed to deal with TLSCALL relocs.
11021 So if we see a PLT reloc now, put it back to how it used to be to
11022 keep the preexisting behaviour. */
11023 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
11024 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
11025
11026 #if defined(OBJ_COFF)
11027 /* If the destination of the branch is a defined symbol which does not have
11028 the THUMB_FUNC attribute, then we must be calling a function which has
11029 the (interfacearm) attribute. We look for the Thumb entry point to that
11030 function and change the branch to refer to that function instead. */
11031 if ( inst.reloc.exp.X_op == O_symbol
11032 && inst.reloc.exp.X_add_symbol != NULL
11033 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
11034 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
11035 inst.reloc.exp.X_add_symbol =
11036 find_real_start (inst.reloc.exp.X_add_symbol);
11037 #endif
11038 }
11039
11040 static void
11041 do_t_bx (void)
11042 {
11043 set_it_insn_type_last ();
11044 inst.instruction |= inst.operands[0].reg << 3;
11045 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11046 should cause the alignment to be checked once it is known. This is
11047 because BX PC only works if the instruction is word aligned. */
11048 }
11049
11050 static void
11051 do_t_bxj (void)
11052 {
11053 int Rm;
11054
11055 set_it_insn_type_last ();
11056 Rm = inst.operands[0].reg;
11057 reject_bad_reg (Rm);
11058 inst.instruction |= Rm << 16;
11059 }
11060
11061 static void
11062 do_t_clz (void)
11063 {
11064 unsigned Rd;
11065 unsigned Rm;
11066
11067 Rd = inst.operands[0].reg;
11068 Rm = inst.operands[1].reg;
11069
11070 reject_bad_reg (Rd);
11071 reject_bad_reg (Rm);
11072
11073 inst.instruction |= Rd << 8;
11074 inst.instruction |= Rm << 16;
11075 inst.instruction |= Rm;
11076 }
11077
11078 static void
11079 do_t_cps (void)
11080 {
11081 set_it_insn_type (OUTSIDE_IT_INSN);
11082 inst.instruction |= inst.operands[0].imm;
11083 }
11084
11085 static void
11086 do_t_cpsi (void)
11087 {
11088 set_it_insn_type (OUTSIDE_IT_INSN);
11089 if (unified_syntax
11090 && (inst.operands[1].present || inst.size_req == 4)
11091 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11092 {
11093 unsigned int imod = (inst.instruction & 0x0030) >> 4;
11094 inst.instruction = 0xf3af8000;
11095 inst.instruction |= imod << 9;
11096 inst.instruction |= inst.operands[0].imm << 5;
11097 if (inst.operands[1].present)
11098 inst.instruction |= 0x100 | inst.operands[1].imm;
11099 }
11100 else
11101 {
11102 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11103 && (inst.operands[0].imm & 4),
11104 _("selected processor does not support 'A' form "
11105 "of this instruction"));
11106 constraint (inst.operands[1].present || inst.size_req == 4,
11107 _("Thumb does not support the 2-argument "
11108 "form of this instruction"));
11109 inst.instruction |= inst.operands[0].imm;
11110 }
11111 }
11112
11113 /* THUMB CPY instruction (argument parse). */
11114
11115 static void
11116 do_t_cpy (void)
11117 {
11118 if (inst.size_req == 4)
11119 {
11120 inst.instruction = THUMB_OP32 (T_MNEM_mov);
11121 inst.instruction |= inst.operands[0].reg << 8;
11122 inst.instruction |= inst.operands[1].reg;
11123 }
11124 else
11125 {
11126 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11127 inst.instruction |= (inst.operands[0].reg & 0x7);
11128 inst.instruction |= inst.operands[1].reg << 3;
11129 }
11130 }
11131
11132 static void
11133 do_t_cbz (void)
11134 {
11135 set_it_insn_type (OUTSIDE_IT_INSN);
11136 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11137 inst.instruction |= inst.operands[0].reg;
11138 inst.reloc.pc_rel = 1;
11139 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11140 }
11141
11142 static void
11143 do_t_dbg (void)
11144 {
11145 inst.instruction |= inst.operands[0].imm;
11146 }
11147
11148 static void
11149 do_t_div (void)
11150 {
11151 unsigned Rd, Rn, Rm;
11152
11153 Rd = inst.operands[0].reg;
11154 Rn = (inst.operands[1].present
11155 ? inst.operands[1].reg : Rd);
11156 Rm = inst.operands[2].reg;
11157
11158 reject_bad_reg (Rd);
11159 reject_bad_reg (Rn);
11160 reject_bad_reg (Rm);
11161
11162 inst.instruction |= Rd << 8;
11163 inst.instruction |= Rn << 16;
11164 inst.instruction |= Rm;
11165 }
11166
11167 static void
11168 do_t_hint (void)
11169 {
11170 if (unified_syntax && inst.size_req == 4)
11171 inst.instruction = THUMB_OP32 (inst.instruction);
11172 else
11173 inst.instruction = THUMB_OP16 (inst.instruction);
11174 }
11175
11176 static void
11177 do_t_it (void)
11178 {
11179 unsigned int cond = inst.operands[0].imm;
11180
11181 set_it_insn_type (IT_INSN);
11182 now_it.mask = (inst.instruction & 0xf) | 0x10;
11183 now_it.cc = cond;
11184 now_it.warn_deprecated = FALSE;
11185
11186 /* If the condition is a negative condition, invert the mask. */
11187 if ((cond & 0x1) == 0x0)
11188 {
11189 unsigned int mask = inst.instruction & 0x000f;
11190
11191 if ((mask & 0x7) == 0)
11192 {
11193 /* No conversion needed. */
11194 now_it.block_length = 1;
11195 }
11196 else if ((mask & 0x3) == 0)
11197 {
11198 mask ^= 0x8;
11199 now_it.block_length = 2;
11200 }
11201 else if ((mask & 0x1) == 0)
11202 {
11203 mask ^= 0xC;
11204 now_it.block_length = 3;
11205 }
11206 else
11207 {
11208 mask ^= 0xE;
11209 now_it.block_length = 4;
11210 }
11211
11212 inst.instruction &= 0xfff0;
11213 inst.instruction |= mask;
11214 }
11215
11216 inst.instruction |= cond << 4;
11217 }
11218
11219 /* Helper function used for both push/pop and ldm/stm. */
11220 static void
11221 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
11222 {
11223 bfd_boolean load;
11224
11225 load = (inst.instruction & (1 << 20)) != 0;
11226
11227 if (mask & (1 << 13))
11228 inst.error = _("SP not allowed in register list");
11229
11230 if ((mask & (1 << base)) != 0
11231 && writeback)
11232 inst.error = _("having the base register in the register list when "
11233 "using write back is UNPREDICTABLE");
11234
11235 if (load)
11236 {
11237 if (mask & (1 << 15))
11238 {
11239 if (mask & (1 << 14))
11240 inst.error = _("LR and PC should not both be in register list");
11241 else
11242 set_it_insn_type_last ();
11243 }
11244 }
11245 else
11246 {
11247 if (mask & (1 << 15))
11248 inst.error = _("PC not allowed in register list");
11249 }
11250
11251 if ((mask & (mask - 1)) == 0)
11252 {
11253 /* Single register transfers implemented as str/ldr. */
11254 if (writeback)
11255 {
11256 if (inst.instruction & (1 << 23))
11257 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11258 else
11259 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11260 }
11261 else
11262 {
11263 if (inst.instruction & (1 << 23))
11264 inst.instruction = 0x00800000; /* ia -> [base] */
11265 else
11266 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11267 }
11268
11269 inst.instruction |= 0xf8400000;
11270 if (load)
11271 inst.instruction |= 0x00100000;
11272
11273 mask = ffs (mask) - 1;
11274 mask <<= 12;
11275 }
11276 else if (writeback)
11277 inst.instruction |= WRITE_BACK;
11278
11279 inst.instruction |= mask;
11280 inst.instruction |= base << 16;
11281 }
11282
11283 static void
11284 do_t_ldmstm (void)
11285 {
11286 /* This really doesn't seem worth it. */
11287 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11288 _("expression too complex"));
11289 constraint (inst.operands[1].writeback,
11290 _("Thumb load/store multiple does not support {reglist}^"));
11291
11292 if (unified_syntax)
11293 {
11294 bfd_boolean narrow;
11295 unsigned mask;
11296
11297 narrow = FALSE;
11298 /* See if we can use a 16-bit instruction. */
11299 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11300 && inst.size_req != 4
11301 && !(inst.operands[1].imm & ~0xff))
11302 {
11303 mask = 1 << inst.operands[0].reg;
11304
11305 if (inst.operands[0].reg <= 7)
11306 {
11307 if (inst.instruction == T_MNEM_stmia
11308 ? inst.operands[0].writeback
11309 : (inst.operands[0].writeback
11310 == !(inst.operands[1].imm & mask)))
11311 {
11312 if (inst.instruction == T_MNEM_stmia
11313 && (inst.operands[1].imm & mask)
11314 && (inst.operands[1].imm & (mask - 1)))
11315 as_warn (_("value stored for r%d is UNKNOWN"),
11316 inst.operands[0].reg);
11317
11318 inst.instruction = THUMB_OP16 (inst.instruction);
11319 inst.instruction |= inst.operands[0].reg << 8;
11320 inst.instruction |= inst.operands[1].imm;
11321 narrow = TRUE;
11322 }
11323 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11324 {
11325 /* This means 1 register in reg list one of 3 situations:
11326 1. Instruction is stmia, but without writeback.
11327 2. lmdia without writeback, but with Rn not in
11328 reglist.
11329 3. ldmia with writeback, but with Rn in reglist.
11330 Case 3 is UNPREDICTABLE behaviour, so we handle
11331 case 1 and 2 which can be converted into a 16-bit
11332 str or ldr. The SP cases are handled below. */
11333 unsigned long opcode;
11334 /* First, record an error for Case 3. */
11335 if (inst.operands[1].imm & mask
11336 && inst.operands[0].writeback)
11337 inst.error =
11338 _("having the base register in the register list when "
11339 "using write back is UNPREDICTABLE");
11340
11341 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11342 : T_MNEM_ldr);
11343 inst.instruction = THUMB_OP16 (opcode);
11344 inst.instruction |= inst.operands[0].reg << 3;
11345 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11346 narrow = TRUE;
11347 }
11348 }
11349 else if (inst.operands[0] .reg == REG_SP)
11350 {
11351 if (inst.operands[0].writeback)
11352 {
11353 inst.instruction =
11354 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11355 ? T_MNEM_push : T_MNEM_pop);
11356 inst.instruction |= inst.operands[1].imm;
11357 narrow = TRUE;
11358 }
11359 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11360 {
11361 inst.instruction =
11362 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11363 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11364 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11365 narrow = TRUE;
11366 }
11367 }
11368 }
11369
11370 if (!narrow)
11371 {
11372 if (inst.instruction < 0xffff)
11373 inst.instruction = THUMB_OP32 (inst.instruction);
11374
11375 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
11376 inst.operands[0].writeback);
11377 }
11378 }
11379 else
11380 {
11381 constraint (inst.operands[0].reg > 7
11382 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11383 constraint (inst.instruction != T_MNEM_ldmia
11384 && inst.instruction != T_MNEM_stmia,
11385 _("Thumb-2 instruction only valid in unified syntax"));
11386 if (inst.instruction == T_MNEM_stmia)
11387 {
11388 if (!inst.operands[0].writeback)
11389 as_warn (_("this instruction will write back the base register"));
11390 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11391 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11392 as_warn (_("value stored for r%d is UNKNOWN"),
11393 inst.operands[0].reg);
11394 }
11395 else
11396 {
11397 if (!inst.operands[0].writeback
11398 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11399 as_warn (_("this instruction will write back the base register"));
11400 else if (inst.operands[0].writeback
11401 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11402 as_warn (_("this instruction will not write back the base register"));
11403 }
11404
11405 inst.instruction = THUMB_OP16 (inst.instruction);
11406 inst.instruction |= inst.operands[0].reg << 8;
11407 inst.instruction |= inst.operands[1].imm;
11408 }
11409 }
11410
11411 static void
11412 do_t_ldrex (void)
11413 {
11414 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11415 || inst.operands[1].postind || inst.operands[1].writeback
11416 || inst.operands[1].immisreg || inst.operands[1].shifted
11417 || inst.operands[1].negative,
11418 BAD_ADDR_MODE);
11419
11420 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11421
11422 inst.instruction |= inst.operands[0].reg << 12;
11423 inst.instruction |= inst.operands[1].reg << 16;
11424 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11425 }
11426
11427 static void
11428 do_t_ldrexd (void)
11429 {
11430 if (!inst.operands[1].present)
11431 {
11432 constraint (inst.operands[0].reg == REG_LR,
11433 _("r14 not allowed as first register "
11434 "when second register is omitted"));
11435 inst.operands[1].reg = inst.operands[0].reg + 1;
11436 }
11437 constraint (inst.operands[0].reg == inst.operands[1].reg,
11438 BAD_OVERLAP);
11439
11440 inst.instruction |= inst.operands[0].reg << 12;
11441 inst.instruction |= inst.operands[1].reg << 8;
11442 inst.instruction |= inst.operands[2].reg << 16;
11443 }
11444
11445 static void
11446 do_t_ldst (void)
11447 {
11448 unsigned long opcode;
11449 int Rn;
11450
11451 if (inst.operands[0].isreg
11452 && !inst.operands[0].preind
11453 && inst.operands[0].reg == REG_PC)
11454 set_it_insn_type_last ();
11455
11456 opcode = inst.instruction;
11457 if (unified_syntax)
11458 {
11459 if (!inst.operands[1].isreg)
11460 {
11461 if (opcode <= 0xffff)
11462 inst.instruction = THUMB_OP32 (opcode);
11463 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11464 return;
11465 }
11466 if (inst.operands[1].isreg
11467 && !inst.operands[1].writeback
11468 && !inst.operands[1].shifted && !inst.operands[1].postind
11469 && !inst.operands[1].negative && inst.operands[0].reg <= 7
11470 && opcode <= 0xffff
11471 && inst.size_req != 4)
11472 {
11473 /* Insn may have a 16-bit form. */
11474 Rn = inst.operands[1].reg;
11475 if (inst.operands[1].immisreg)
11476 {
11477 inst.instruction = THUMB_OP16 (opcode);
11478 /* [Rn, Rik] */
11479 if (Rn <= 7 && inst.operands[1].imm <= 7)
11480 goto op16;
11481 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11482 reject_bad_reg (inst.operands[1].imm);
11483 }
11484 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11485 && opcode != T_MNEM_ldrsb)
11486 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11487 || (Rn == REG_SP && opcode == T_MNEM_str))
11488 {
11489 /* [Rn, #const] */
11490 if (Rn > 7)
11491 {
11492 if (Rn == REG_PC)
11493 {
11494 if (inst.reloc.pc_rel)
11495 opcode = T_MNEM_ldr_pc2;
11496 else
11497 opcode = T_MNEM_ldr_pc;
11498 }
11499 else
11500 {
11501 if (opcode == T_MNEM_ldr)
11502 opcode = T_MNEM_ldr_sp;
11503 else
11504 opcode = T_MNEM_str_sp;
11505 }
11506 inst.instruction = inst.operands[0].reg << 8;
11507 }
11508 else
11509 {
11510 inst.instruction = inst.operands[0].reg;
11511 inst.instruction |= inst.operands[1].reg << 3;
11512 }
11513 inst.instruction |= THUMB_OP16 (opcode);
11514 if (inst.size_req == 2)
11515 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11516 else
11517 inst.relax = opcode;
11518 return;
11519 }
11520 }
11521 /* Definitely a 32-bit variant. */
11522
11523 /* Warning for Erratum 752419. */
11524 if (opcode == T_MNEM_ldr
11525 && inst.operands[0].reg == REG_SP
11526 && inst.operands[1].writeback == 1
11527 && !inst.operands[1].immisreg)
11528 {
11529 if (no_cpu_selected ()
11530 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11531 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11532 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11533 as_warn (_("This instruction may be unpredictable "
11534 "if executed on M-profile cores "
11535 "with interrupts enabled."));
11536 }
11537
11538 /* Do some validations regarding addressing modes. */
11539 if (inst.operands[1].immisreg)
11540 reject_bad_reg (inst.operands[1].imm);
11541
11542 constraint (inst.operands[1].writeback == 1
11543 && inst.operands[0].reg == inst.operands[1].reg,
11544 BAD_OVERLAP);
11545
11546 inst.instruction = THUMB_OP32 (opcode);
11547 inst.instruction |= inst.operands[0].reg << 12;
11548 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11549 check_ldr_r15_aligned ();
11550 return;
11551 }
11552
11553 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11554
11555 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11556 {
11557 /* Only [Rn,Rm] is acceptable. */
11558 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11559 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11560 || inst.operands[1].postind || inst.operands[1].shifted
11561 || inst.operands[1].negative,
11562 _("Thumb does not support this addressing mode"));
11563 inst.instruction = THUMB_OP16 (inst.instruction);
11564 goto op16;
11565 }
11566
11567 inst.instruction = THUMB_OP16 (inst.instruction);
11568 if (!inst.operands[1].isreg)
11569 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11570 return;
11571
11572 constraint (!inst.operands[1].preind
11573 || inst.operands[1].shifted
11574 || inst.operands[1].writeback,
11575 _("Thumb does not support this addressing mode"));
11576 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11577 {
11578 constraint (inst.instruction & 0x0600,
11579 _("byte or halfword not valid for base register"));
11580 constraint (inst.operands[1].reg == REG_PC
11581 && !(inst.instruction & THUMB_LOAD_BIT),
11582 _("r15 based store not allowed"));
11583 constraint (inst.operands[1].immisreg,
11584 _("invalid base register for register offset"));
11585
11586 if (inst.operands[1].reg == REG_PC)
11587 inst.instruction = T_OPCODE_LDR_PC;
11588 else if (inst.instruction & THUMB_LOAD_BIT)
11589 inst.instruction = T_OPCODE_LDR_SP;
11590 else
11591 inst.instruction = T_OPCODE_STR_SP;
11592
11593 inst.instruction |= inst.operands[0].reg << 8;
11594 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11595 return;
11596 }
11597
11598 constraint (inst.operands[1].reg > 7, BAD_HIREG);
11599 if (!inst.operands[1].immisreg)
11600 {
11601 /* Immediate offset. */
11602 inst.instruction |= inst.operands[0].reg;
11603 inst.instruction |= inst.operands[1].reg << 3;
11604 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11605 return;
11606 }
11607
11608 /* Register offset. */
11609 constraint (inst.operands[1].imm > 7, BAD_HIREG);
11610 constraint (inst.operands[1].negative,
11611 _("Thumb does not support this addressing mode"));
11612
11613 op16:
11614 switch (inst.instruction)
11615 {
11616 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11617 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11618 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11619 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11620 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11621 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11622 case 0x5600 /* ldrsb */:
11623 case 0x5e00 /* ldrsh */: break;
11624 default: abort ();
11625 }
11626
11627 inst.instruction |= inst.operands[0].reg;
11628 inst.instruction |= inst.operands[1].reg << 3;
11629 inst.instruction |= inst.operands[1].imm << 6;
11630 }
11631
11632 static void
11633 do_t_ldstd (void)
11634 {
11635 if (!inst.operands[1].present)
11636 {
11637 inst.operands[1].reg = inst.operands[0].reg + 1;
11638 constraint (inst.operands[0].reg == REG_LR,
11639 _("r14 not allowed here"));
11640 constraint (inst.operands[0].reg == REG_R12,
11641 _("r12 not allowed here"));
11642 }
11643
11644 if (inst.operands[2].writeback
11645 && (inst.operands[0].reg == inst.operands[2].reg
11646 || inst.operands[1].reg == inst.operands[2].reg))
11647 as_warn (_("base register written back, and overlaps "
11648 "one of transfer registers"));
11649
11650 inst.instruction |= inst.operands[0].reg << 12;
11651 inst.instruction |= inst.operands[1].reg << 8;
11652 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11653 }
11654
11655 static void
11656 do_t_ldstt (void)
11657 {
11658 inst.instruction |= inst.operands[0].reg << 12;
11659 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11660 }
11661
11662 static void
11663 do_t_mla (void)
11664 {
11665 unsigned Rd, Rn, Rm, Ra;
11666
11667 Rd = inst.operands[0].reg;
11668 Rn = inst.operands[1].reg;
11669 Rm = inst.operands[2].reg;
11670 Ra = inst.operands[3].reg;
11671
11672 reject_bad_reg (Rd);
11673 reject_bad_reg (Rn);
11674 reject_bad_reg (Rm);
11675 reject_bad_reg (Ra);
11676
11677 inst.instruction |= Rd << 8;
11678 inst.instruction |= Rn << 16;
11679 inst.instruction |= Rm;
11680 inst.instruction |= Ra << 12;
11681 }
11682
11683 static void
11684 do_t_mlal (void)
11685 {
11686 unsigned RdLo, RdHi, Rn, Rm;
11687
11688 RdLo = inst.operands[0].reg;
11689 RdHi = inst.operands[1].reg;
11690 Rn = inst.operands[2].reg;
11691 Rm = inst.operands[3].reg;
11692
11693 reject_bad_reg (RdLo);
11694 reject_bad_reg (RdHi);
11695 reject_bad_reg (Rn);
11696 reject_bad_reg (Rm);
11697
11698 inst.instruction |= RdLo << 12;
11699 inst.instruction |= RdHi << 8;
11700 inst.instruction |= Rn << 16;
11701 inst.instruction |= Rm;
11702 }
11703
11704 static void
11705 do_t_mov_cmp (void)
11706 {
11707 unsigned Rn, Rm;
11708
11709 Rn = inst.operands[0].reg;
11710 Rm = inst.operands[1].reg;
11711
11712 if (Rn == REG_PC)
11713 set_it_insn_type_last ();
11714
11715 if (unified_syntax)
11716 {
11717 int r0off = (inst.instruction == T_MNEM_mov
11718 || inst.instruction == T_MNEM_movs) ? 8 : 16;
11719 unsigned long opcode;
11720 bfd_boolean narrow;
11721 bfd_boolean low_regs;
11722
11723 low_regs = (Rn <= 7 && Rm <= 7);
11724 opcode = inst.instruction;
11725 if (in_it_block ())
11726 narrow = opcode != T_MNEM_movs;
11727 else
11728 narrow = opcode != T_MNEM_movs || low_regs;
11729 if (inst.size_req == 4
11730 || inst.operands[1].shifted)
11731 narrow = FALSE;
11732
11733 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11734 if (opcode == T_MNEM_movs && inst.operands[1].isreg
11735 && !inst.operands[1].shifted
11736 && Rn == REG_PC
11737 && Rm == REG_LR)
11738 {
11739 inst.instruction = T2_SUBS_PC_LR;
11740 return;
11741 }
11742
11743 if (opcode == T_MNEM_cmp)
11744 {
11745 constraint (Rn == REG_PC, BAD_PC);
11746 if (narrow)
11747 {
11748 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11749 but valid. */
11750 warn_deprecated_sp (Rm);
11751 /* R15 was documented as a valid choice for Rm in ARMv6,
11752 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11753 tools reject R15, so we do too. */
11754 constraint (Rm == REG_PC, BAD_PC);
11755 }
11756 else
11757 reject_bad_reg (Rm);
11758 }
11759 else if (opcode == T_MNEM_mov
11760 || opcode == T_MNEM_movs)
11761 {
11762 if (inst.operands[1].isreg)
11763 {
11764 if (opcode == T_MNEM_movs)
11765 {
11766 reject_bad_reg (Rn);
11767 reject_bad_reg (Rm);
11768 }
11769 else if (narrow)
11770 {
11771 /* This is mov.n. */
11772 if ((Rn == REG_SP || Rn == REG_PC)
11773 && (Rm == REG_SP || Rm == REG_PC))
11774 {
11775 as_tsktsk (_("Use of r%u as a source register is "
11776 "deprecated when r%u is the destination "
11777 "register."), Rm, Rn);
11778 }
11779 }
11780 else
11781 {
11782 /* This is mov.w. */
11783 constraint (Rn == REG_PC, BAD_PC);
11784 constraint (Rm == REG_PC, BAD_PC);
11785 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
11786 }
11787 }
11788 else
11789 reject_bad_reg (Rn);
11790 }
11791
11792 if (!inst.operands[1].isreg)
11793 {
11794 /* Immediate operand. */
11795 if (!in_it_block () && opcode == T_MNEM_mov)
11796 narrow = 0;
11797 if (low_regs && narrow)
11798 {
11799 inst.instruction = THUMB_OP16 (opcode);
11800 inst.instruction |= Rn << 8;
11801 if (inst.size_req == 2)
11802 {
11803 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11804 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
11805 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11806 }
11807 else
11808 inst.relax = opcode;
11809 }
11810 else
11811 {
11812 inst.instruction = THUMB_OP32 (inst.instruction);
11813 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11814 inst.instruction |= Rn << r0off;
11815 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11816 }
11817 }
11818 else if (inst.operands[1].shifted && inst.operands[1].immisreg
11819 && (inst.instruction == T_MNEM_mov
11820 || inst.instruction == T_MNEM_movs))
11821 {
11822 /* Register shifts are encoded as separate shift instructions. */
11823 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
11824
11825 if (in_it_block ())
11826 narrow = !flags;
11827 else
11828 narrow = flags;
11829
11830 if (inst.size_req == 4)
11831 narrow = FALSE;
11832
11833 if (!low_regs || inst.operands[1].imm > 7)
11834 narrow = FALSE;
11835
11836 if (Rn != Rm)
11837 narrow = FALSE;
11838
11839 switch (inst.operands[1].shift_kind)
11840 {
11841 case SHIFT_LSL:
11842 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
11843 break;
11844 case SHIFT_ASR:
11845 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
11846 break;
11847 case SHIFT_LSR:
11848 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
11849 break;
11850 case SHIFT_ROR:
11851 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
11852 break;
11853 default:
11854 abort ();
11855 }
11856
11857 inst.instruction = opcode;
11858 if (narrow)
11859 {
11860 inst.instruction |= Rn;
11861 inst.instruction |= inst.operands[1].imm << 3;
11862 }
11863 else
11864 {
11865 if (flags)
11866 inst.instruction |= CONDS_BIT;
11867
11868 inst.instruction |= Rn << 8;
11869 inst.instruction |= Rm << 16;
11870 inst.instruction |= inst.operands[1].imm;
11871 }
11872 }
11873 else if (!narrow)
11874 {
11875 /* Some mov with immediate shift have narrow variants.
11876 Register shifts are handled above. */
11877 if (low_regs && inst.operands[1].shifted
11878 && (inst.instruction == T_MNEM_mov
11879 || inst.instruction == T_MNEM_movs))
11880 {
11881 if (in_it_block ())
11882 narrow = (inst.instruction == T_MNEM_mov);
11883 else
11884 narrow = (inst.instruction == T_MNEM_movs);
11885 }
11886
11887 if (narrow)
11888 {
11889 switch (inst.operands[1].shift_kind)
11890 {
11891 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11892 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11893 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11894 default: narrow = FALSE; break;
11895 }
11896 }
11897
11898 if (narrow)
11899 {
11900 inst.instruction |= Rn;
11901 inst.instruction |= Rm << 3;
11902 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11903 }
11904 else
11905 {
11906 inst.instruction = THUMB_OP32 (inst.instruction);
11907 inst.instruction |= Rn << r0off;
11908 encode_thumb32_shifted_operand (1);
11909 }
11910 }
11911 else
11912 switch (inst.instruction)
11913 {
11914 case T_MNEM_mov:
11915 /* In v4t or v5t a move of two lowregs produces unpredictable
11916 results. Don't allow this. */
11917 if (low_regs)
11918 {
11919 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
11920 "MOV Rd, Rs with two low registers is not "
11921 "permitted on this architecture");
11922 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
11923 arm_ext_v6);
11924 }
11925
11926 inst.instruction = T_OPCODE_MOV_HR;
11927 inst.instruction |= (Rn & 0x8) << 4;
11928 inst.instruction |= (Rn & 0x7);
11929 inst.instruction |= Rm << 3;
11930 break;
11931
11932 case T_MNEM_movs:
11933 /* We know we have low registers at this point.
11934 Generate LSLS Rd, Rs, #0. */
11935 inst.instruction = T_OPCODE_LSL_I;
11936 inst.instruction |= Rn;
11937 inst.instruction |= Rm << 3;
11938 break;
11939
11940 case T_MNEM_cmp:
11941 if (low_regs)
11942 {
11943 inst.instruction = T_OPCODE_CMP_LR;
11944 inst.instruction |= Rn;
11945 inst.instruction |= Rm << 3;
11946 }
11947 else
11948 {
11949 inst.instruction = T_OPCODE_CMP_HR;
11950 inst.instruction |= (Rn & 0x8) << 4;
11951 inst.instruction |= (Rn & 0x7);
11952 inst.instruction |= Rm << 3;
11953 }
11954 break;
11955 }
11956 return;
11957 }
11958
11959 inst.instruction = THUMB_OP16 (inst.instruction);
11960
11961 /* PR 10443: Do not silently ignore shifted operands. */
11962 constraint (inst.operands[1].shifted,
11963 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
11964
11965 if (inst.operands[1].isreg)
11966 {
11967 if (Rn < 8 && Rm < 8)
11968 {
11969 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
11970 since a MOV instruction produces unpredictable results. */
11971 if (inst.instruction == T_OPCODE_MOV_I8)
11972 inst.instruction = T_OPCODE_ADD_I3;
11973 else
11974 inst.instruction = T_OPCODE_CMP_LR;
11975
11976 inst.instruction |= Rn;
11977 inst.instruction |= Rm << 3;
11978 }
11979 else
11980 {
11981 if (inst.instruction == T_OPCODE_MOV_I8)
11982 inst.instruction = T_OPCODE_MOV_HR;
11983 else
11984 inst.instruction = T_OPCODE_CMP_HR;
11985 do_t_cpy ();
11986 }
11987 }
11988 else
11989 {
11990 constraint (Rn > 7,
11991 _("only lo regs allowed with immediate"));
11992 inst.instruction |= Rn << 8;
11993 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11994 }
11995 }
11996
11997 static void
11998 do_t_mov16 (void)
11999 {
12000 unsigned Rd;
12001 bfd_vma imm;
12002 bfd_boolean top;
12003
12004 top = (inst.instruction & 0x00800000) != 0;
12005 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
12006 {
12007 constraint (top, _(":lower16: not allowed this instruction"));
12008 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
12009 }
12010 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
12011 {
12012 constraint (!top, _(":upper16: not allowed this instruction"));
12013 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
12014 }
12015
12016 Rd = inst.operands[0].reg;
12017 reject_bad_reg (Rd);
12018
12019 inst.instruction |= Rd << 8;
12020 if (inst.reloc.type == BFD_RELOC_UNUSED)
12021 {
12022 imm = inst.reloc.exp.X_add_number;
12023 inst.instruction |= (imm & 0xf000) << 4;
12024 inst.instruction |= (imm & 0x0800) << 15;
12025 inst.instruction |= (imm & 0x0700) << 4;
12026 inst.instruction |= (imm & 0x00ff);
12027 }
12028 }
12029
12030 static void
12031 do_t_mvn_tst (void)
12032 {
12033 unsigned Rn, Rm;
12034
12035 Rn = inst.operands[0].reg;
12036 Rm = inst.operands[1].reg;
12037
12038 if (inst.instruction == T_MNEM_cmp
12039 || inst.instruction == T_MNEM_cmn)
12040 constraint (Rn == REG_PC, BAD_PC);
12041 else
12042 reject_bad_reg (Rn);
12043 reject_bad_reg (Rm);
12044
12045 if (unified_syntax)
12046 {
12047 int r0off = (inst.instruction == T_MNEM_mvn
12048 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
12049 bfd_boolean narrow;
12050
12051 if (inst.size_req == 4
12052 || inst.instruction > 0xffff
12053 || inst.operands[1].shifted
12054 || Rn > 7 || Rm > 7)
12055 narrow = FALSE;
12056 else if (inst.instruction == T_MNEM_cmn
12057 || inst.instruction == T_MNEM_tst)
12058 narrow = TRUE;
12059 else if (THUMB_SETS_FLAGS (inst.instruction))
12060 narrow = !in_it_block ();
12061 else
12062 narrow = in_it_block ();
12063
12064 if (!inst.operands[1].isreg)
12065 {
12066 /* For an immediate, we always generate a 32-bit opcode;
12067 section relaxation will shrink it later if possible. */
12068 if (inst.instruction < 0xffff)
12069 inst.instruction = THUMB_OP32 (inst.instruction);
12070 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12071 inst.instruction |= Rn << r0off;
12072 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12073 }
12074 else
12075 {
12076 /* See if we can do this with a 16-bit instruction. */
12077 if (narrow)
12078 {
12079 inst.instruction = THUMB_OP16 (inst.instruction);
12080 inst.instruction |= Rn;
12081 inst.instruction |= Rm << 3;
12082 }
12083 else
12084 {
12085 constraint (inst.operands[1].shifted
12086 && inst.operands[1].immisreg,
12087 _("shift must be constant"));
12088 if (inst.instruction < 0xffff)
12089 inst.instruction = THUMB_OP32 (inst.instruction);
12090 inst.instruction |= Rn << r0off;
12091 encode_thumb32_shifted_operand (1);
12092 }
12093 }
12094 }
12095 else
12096 {
12097 constraint (inst.instruction > 0xffff
12098 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12099 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12100 _("unshifted register required"));
12101 constraint (Rn > 7 || Rm > 7,
12102 BAD_HIREG);
12103
12104 inst.instruction = THUMB_OP16 (inst.instruction);
12105 inst.instruction |= Rn;
12106 inst.instruction |= Rm << 3;
12107 }
12108 }
12109
12110 static void
12111 do_t_mrs (void)
12112 {
12113 unsigned Rd;
12114
12115 if (do_vfp_nsyn_mrs () == SUCCESS)
12116 return;
12117
12118 Rd = inst.operands[0].reg;
12119 reject_bad_reg (Rd);
12120 inst.instruction |= Rd << 8;
12121
12122 if (inst.operands[1].isreg)
12123 {
12124 unsigned br = inst.operands[1].reg;
12125 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12126 as_bad (_("bad register for mrs"));
12127
12128 inst.instruction |= br & (0xf << 16);
12129 inst.instruction |= (br & 0x300) >> 4;
12130 inst.instruction |= (br & SPSR_BIT) >> 2;
12131 }
12132 else
12133 {
12134 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12135
12136 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12137 {
12138 /* PR gas/12698: The constraint is only applied for m_profile.
12139 If the user has specified -march=all, we want to ignore it as
12140 we are building for any CPU type, including non-m variants. */
12141 bfd_boolean m_profile =
12142 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12143 constraint ((flags != 0) && m_profile, _("selected processor does "
12144 "not support requested special purpose register"));
12145 }
12146 else
12147 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12148 devices). */
12149 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12150 _("'APSR', 'CPSR' or 'SPSR' expected"));
12151
12152 inst.instruction |= (flags & SPSR_BIT) >> 2;
12153 inst.instruction |= inst.operands[1].imm & 0xff;
12154 inst.instruction |= 0xf0000;
12155 }
12156 }
12157
12158 static void
12159 do_t_msr (void)
12160 {
12161 int flags;
12162 unsigned Rn;
12163
12164 if (do_vfp_nsyn_msr () == SUCCESS)
12165 return;
12166
12167 constraint (!inst.operands[1].isreg,
12168 _("Thumb encoding does not support an immediate here"));
12169
12170 if (inst.operands[0].isreg)
12171 flags = (int)(inst.operands[0].reg);
12172 else
12173 flags = inst.operands[0].imm;
12174
12175 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12176 {
12177 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12178
12179 /* PR gas/12698: The constraint is only applied for m_profile.
12180 If the user has specified -march=all, we want to ignore it as
12181 we are building for any CPU type, including non-m variants. */
12182 bfd_boolean m_profile =
12183 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12184 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12185 && (bits & ~(PSR_s | PSR_f)) != 0)
12186 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12187 && bits != PSR_f)) && m_profile,
12188 _("selected processor does not support requested special "
12189 "purpose register"));
12190 }
12191 else
12192 constraint ((flags & 0xff) != 0, _("selected processor does not support "
12193 "requested special purpose register"));
12194
12195 Rn = inst.operands[1].reg;
12196 reject_bad_reg (Rn);
12197
12198 inst.instruction |= (flags & SPSR_BIT) >> 2;
12199 inst.instruction |= (flags & 0xf0000) >> 8;
12200 inst.instruction |= (flags & 0x300) >> 4;
12201 inst.instruction |= (flags & 0xff);
12202 inst.instruction |= Rn << 16;
12203 }
12204
12205 static void
12206 do_t_mul (void)
12207 {
12208 bfd_boolean narrow;
12209 unsigned Rd, Rn, Rm;
12210
12211 if (!inst.operands[2].present)
12212 inst.operands[2].reg = inst.operands[0].reg;
12213
12214 Rd = inst.operands[0].reg;
12215 Rn = inst.operands[1].reg;
12216 Rm = inst.operands[2].reg;
12217
12218 if (unified_syntax)
12219 {
12220 if (inst.size_req == 4
12221 || (Rd != Rn
12222 && Rd != Rm)
12223 || Rn > 7
12224 || Rm > 7)
12225 narrow = FALSE;
12226 else if (inst.instruction == T_MNEM_muls)
12227 narrow = !in_it_block ();
12228 else
12229 narrow = in_it_block ();
12230 }
12231 else
12232 {
12233 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12234 constraint (Rn > 7 || Rm > 7,
12235 BAD_HIREG);
12236 narrow = TRUE;
12237 }
12238
12239 if (narrow)
12240 {
12241 /* 16-bit MULS/Conditional MUL. */
12242 inst.instruction = THUMB_OP16 (inst.instruction);
12243 inst.instruction |= Rd;
12244
12245 if (Rd == Rn)
12246 inst.instruction |= Rm << 3;
12247 else if (Rd == Rm)
12248 inst.instruction |= Rn << 3;
12249 else
12250 constraint (1, _("dest must overlap one source register"));
12251 }
12252 else
12253 {
12254 constraint (inst.instruction != T_MNEM_mul,
12255 _("Thumb-2 MUL must not set flags"));
12256 /* 32-bit MUL. */
12257 inst.instruction = THUMB_OP32 (inst.instruction);
12258 inst.instruction |= Rd << 8;
12259 inst.instruction |= Rn << 16;
12260 inst.instruction |= Rm << 0;
12261
12262 reject_bad_reg (Rd);
12263 reject_bad_reg (Rn);
12264 reject_bad_reg (Rm);
12265 }
12266 }
12267
12268 static void
12269 do_t_mull (void)
12270 {
12271 unsigned RdLo, RdHi, Rn, Rm;
12272
12273 RdLo = inst.operands[0].reg;
12274 RdHi = inst.operands[1].reg;
12275 Rn = inst.operands[2].reg;
12276 Rm = inst.operands[3].reg;
12277
12278 reject_bad_reg (RdLo);
12279 reject_bad_reg (RdHi);
12280 reject_bad_reg (Rn);
12281 reject_bad_reg (Rm);
12282
12283 inst.instruction |= RdLo << 12;
12284 inst.instruction |= RdHi << 8;
12285 inst.instruction |= Rn << 16;
12286 inst.instruction |= Rm;
12287
12288 if (RdLo == RdHi)
12289 as_tsktsk (_("rdhi and rdlo must be different"));
12290 }
12291
12292 static void
12293 do_t_nop (void)
12294 {
12295 set_it_insn_type (NEUTRAL_IT_INSN);
12296
12297 if (unified_syntax)
12298 {
12299 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12300 {
12301 inst.instruction = THUMB_OP32 (inst.instruction);
12302 inst.instruction |= inst.operands[0].imm;
12303 }
12304 else
12305 {
12306 /* PR9722: Check for Thumb2 availability before
12307 generating a thumb2 nop instruction. */
12308 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12309 {
12310 inst.instruction = THUMB_OP16 (inst.instruction);
12311 inst.instruction |= inst.operands[0].imm << 4;
12312 }
12313 else
12314 inst.instruction = 0x46c0;
12315 }
12316 }
12317 else
12318 {
12319 constraint (inst.operands[0].present,
12320 _("Thumb does not support NOP with hints"));
12321 inst.instruction = 0x46c0;
12322 }
12323 }
12324
12325 static void
12326 do_t_neg (void)
12327 {
12328 if (unified_syntax)
12329 {
12330 bfd_boolean narrow;
12331
12332 if (THUMB_SETS_FLAGS (inst.instruction))
12333 narrow = !in_it_block ();
12334 else
12335 narrow = in_it_block ();
12336 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12337 narrow = FALSE;
12338 if (inst.size_req == 4)
12339 narrow = FALSE;
12340
12341 if (!narrow)
12342 {
12343 inst.instruction = THUMB_OP32 (inst.instruction);
12344 inst.instruction |= inst.operands[0].reg << 8;
12345 inst.instruction |= inst.operands[1].reg << 16;
12346 }
12347 else
12348 {
12349 inst.instruction = THUMB_OP16 (inst.instruction);
12350 inst.instruction |= inst.operands[0].reg;
12351 inst.instruction |= inst.operands[1].reg << 3;
12352 }
12353 }
12354 else
12355 {
12356 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12357 BAD_HIREG);
12358 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12359
12360 inst.instruction = THUMB_OP16 (inst.instruction);
12361 inst.instruction |= inst.operands[0].reg;
12362 inst.instruction |= inst.operands[1].reg << 3;
12363 }
12364 }
12365
12366 static void
12367 do_t_orn (void)
12368 {
12369 unsigned Rd, Rn;
12370
12371 Rd = inst.operands[0].reg;
12372 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12373
12374 reject_bad_reg (Rd);
12375 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12376 reject_bad_reg (Rn);
12377
12378 inst.instruction |= Rd << 8;
12379 inst.instruction |= Rn << 16;
12380
12381 if (!inst.operands[2].isreg)
12382 {
12383 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12384 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12385 }
12386 else
12387 {
12388 unsigned Rm;
12389
12390 Rm = inst.operands[2].reg;
12391 reject_bad_reg (Rm);
12392
12393 constraint (inst.operands[2].shifted
12394 && inst.operands[2].immisreg,
12395 _("shift must be constant"));
12396 encode_thumb32_shifted_operand (2);
12397 }
12398 }
12399
12400 static void
12401 do_t_pkhbt (void)
12402 {
12403 unsigned Rd, Rn, Rm;
12404
12405 Rd = inst.operands[0].reg;
12406 Rn = inst.operands[1].reg;
12407 Rm = inst.operands[2].reg;
12408
12409 reject_bad_reg (Rd);
12410 reject_bad_reg (Rn);
12411 reject_bad_reg (Rm);
12412
12413 inst.instruction |= Rd << 8;
12414 inst.instruction |= Rn << 16;
12415 inst.instruction |= Rm;
12416 if (inst.operands[3].present)
12417 {
12418 unsigned int val = inst.reloc.exp.X_add_number;
12419 constraint (inst.reloc.exp.X_op != O_constant,
12420 _("expression too complex"));
12421 inst.instruction |= (val & 0x1c) << 10;
12422 inst.instruction |= (val & 0x03) << 6;
12423 }
12424 }
12425
12426 static void
12427 do_t_pkhtb (void)
12428 {
12429 if (!inst.operands[3].present)
12430 {
12431 unsigned Rtmp;
12432
12433 inst.instruction &= ~0x00000020;
12434
12435 /* PR 10168. Swap the Rm and Rn registers. */
12436 Rtmp = inst.operands[1].reg;
12437 inst.operands[1].reg = inst.operands[2].reg;
12438 inst.operands[2].reg = Rtmp;
12439 }
12440 do_t_pkhbt ();
12441 }
12442
12443 static void
12444 do_t_pld (void)
12445 {
12446 if (inst.operands[0].immisreg)
12447 reject_bad_reg (inst.operands[0].imm);
12448
12449 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12450 }
12451
12452 static void
12453 do_t_push_pop (void)
12454 {
12455 unsigned mask;
12456
12457 constraint (inst.operands[0].writeback,
12458 _("push/pop do not support {reglist}^"));
12459 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
12460 _("expression too complex"));
12461
12462 mask = inst.operands[0].imm;
12463 if (inst.size_req != 4 && (mask & ~0xff) == 0)
12464 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12465 else if (inst.size_req != 4
12466 && (mask & ~0xff) == (1 << (inst.instruction == T_MNEM_push
12467 ? REG_LR : REG_PC)))
12468 {
12469 inst.instruction = THUMB_OP16 (inst.instruction);
12470 inst.instruction |= THUMB_PP_PC_LR;
12471 inst.instruction |= mask & 0xff;
12472 }
12473 else if (unified_syntax)
12474 {
12475 inst.instruction = THUMB_OP32 (inst.instruction);
12476 encode_thumb2_ldmstm (13, mask, TRUE);
12477 }
12478 else
12479 {
12480 inst.error = _("invalid register list to push/pop instruction");
12481 return;
12482 }
12483 }
12484
12485 static void
12486 do_t_rbit (void)
12487 {
12488 unsigned Rd, Rm;
12489
12490 Rd = inst.operands[0].reg;
12491 Rm = inst.operands[1].reg;
12492
12493 reject_bad_reg (Rd);
12494 reject_bad_reg (Rm);
12495
12496 inst.instruction |= Rd << 8;
12497 inst.instruction |= Rm << 16;
12498 inst.instruction |= Rm;
12499 }
12500
12501 static void
12502 do_t_rev (void)
12503 {
12504 unsigned Rd, Rm;
12505
12506 Rd = inst.operands[0].reg;
12507 Rm = inst.operands[1].reg;
12508
12509 reject_bad_reg (Rd);
12510 reject_bad_reg (Rm);
12511
12512 if (Rd <= 7 && Rm <= 7
12513 && inst.size_req != 4)
12514 {
12515 inst.instruction = THUMB_OP16 (inst.instruction);
12516 inst.instruction |= Rd;
12517 inst.instruction |= Rm << 3;
12518 }
12519 else if (unified_syntax)
12520 {
12521 inst.instruction = THUMB_OP32 (inst.instruction);
12522 inst.instruction |= Rd << 8;
12523 inst.instruction |= Rm << 16;
12524 inst.instruction |= Rm;
12525 }
12526 else
12527 inst.error = BAD_HIREG;
12528 }
12529
12530 static void
12531 do_t_rrx (void)
12532 {
12533 unsigned Rd, Rm;
12534
12535 Rd = inst.operands[0].reg;
12536 Rm = inst.operands[1].reg;
12537
12538 reject_bad_reg (Rd);
12539 reject_bad_reg (Rm);
12540
12541 inst.instruction |= Rd << 8;
12542 inst.instruction |= Rm;
12543 }
12544
12545 static void
12546 do_t_rsb (void)
12547 {
12548 unsigned Rd, Rs;
12549
12550 Rd = inst.operands[0].reg;
12551 Rs = (inst.operands[1].present
12552 ? inst.operands[1].reg /* Rd, Rs, foo */
12553 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
12554
12555 reject_bad_reg (Rd);
12556 reject_bad_reg (Rs);
12557 if (inst.operands[2].isreg)
12558 reject_bad_reg (inst.operands[2].reg);
12559
12560 inst.instruction |= Rd << 8;
12561 inst.instruction |= Rs << 16;
12562 if (!inst.operands[2].isreg)
12563 {
12564 bfd_boolean narrow;
12565
12566 if ((inst.instruction & 0x00100000) != 0)
12567 narrow = !in_it_block ();
12568 else
12569 narrow = in_it_block ();
12570
12571 if (Rd > 7 || Rs > 7)
12572 narrow = FALSE;
12573
12574 if (inst.size_req == 4 || !unified_syntax)
12575 narrow = FALSE;
12576
12577 if (inst.reloc.exp.X_op != O_constant
12578 || inst.reloc.exp.X_add_number != 0)
12579 narrow = FALSE;
12580
12581 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12582 relaxation, but it doesn't seem worth the hassle. */
12583 if (narrow)
12584 {
12585 inst.reloc.type = BFD_RELOC_UNUSED;
12586 inst.instruction = THUMB_OP16 (T_MNEM_negs);
12587 inst.instruction |= Rs << 3;
12588 inst.instruction |= Rd;
12589 }
12590 else
12591 {
12592 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12593 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12594 }
12595 }
12596 else
12597 encode_thumb32_shifted_operand (2);
12598 }
12599
12600 static void
12601 do_t_setend (void)
12602 {
12603 if (warn_on_deprecated
12604 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12605 as_tsktsk (_("setend use is deprecated for ARMv8"));
12606
12607 set_it_insn_type (OUTSIDE_IT_INSN);
12608 if (inst.operands[0].imm)
12609 inst.instruction |= 0x8;
12610 }
12611
12612 static void
12613 do_t_shift (void)
12614 {
12615 if (!inst.operands[1].present)
12616 inst.operands[1].reg = inst.operands[0].reg;
12617
12618 if (unified_syntax)
12619 {
12620 bfd_boolean narrow;
12621 int shift_kind;
12622
12623 switch (inst.instruction)
12624 {
12625 case T_MNEM_asr:
12626 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12627 case T_MNEM_lsl:
12628 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12629 case T_MNEM_lsr:
12630 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12631 case T_MNEM_ror:
12632 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12633 default: abort ();
12634 }
12635
12636 if (THUMB_SETS_FLAGS (inst.instruction))
12637 narrow = !in_it_block ();
12638 else
12639 narrow = in_it_block ();
12640 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12641 narrow = FALSE;
12642 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12643 narrow = FALSE;
12644 if (inst.operands[2].isreg
12645 && (inst.operands[1].reg != inst.operands[0].reg
12646 || inst.operands[2].reg > 7))
12647 narrow = FALSE;
12648 if (inst.size_req == 4)
12649 narrow = FALSE;
12650
12651 reject_bad_reg (inst.operands[0].reg);
12652 reject_bad_reg (inst.operands[1].reg);
12653
12654 if (!narrow)
12655 {
12656 if (inst.operands[2].isreg)
12657 {
12658 reject_bad_reg (inst.operands[2].reg);
12659 inst.instruction = THUMB_OP32 (inst.instruction);
12660 inst.instruction |= inst.operands[0].reg << 8;
12661 inst.instruction |= inst.operands[1].reg << 16;
12662 inst.instruction |= inst.operands[2].reg;
12663
12664 /* PR 12854: Error on extraneous shifts. */
12665 constraint (inst.operands[2].shifted,
12666 _("extraneous shift as part of operand to shift insn"));
12667 }
12668 else
12669 {
12670 inst.operands[1].shifted = 1;
12671 inst.operands[1].shift_kind = shift_kind;
12672 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
12673 ? T_MNEM_movs : T_MNEM_mov);
12674 inst.instruction |= inst.operands[0].reg << 8;
12675 encode_thumb32_shifted_operand (1);
12676 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12677 inst.reloc.type = BFD_RELOC_UNUSED;
12678 }
12679 }
12680 else
12681 {
12682 if (inst.operands[2].isreg)
12683 {
12684 switch (shift_kind)
12685 {
12686 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
12687 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
12688 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
12689 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
12690 default: abort ();
12691 }
12692
12693 inst.instruction |= inst.operands[0].reg;
12694 inst.instruction |= inst.operands[2].reg << 3;
12695
12696 /* PR 12854: Error on extraneous shifts. */
12697 constraint (inst.operands[2].shifted,
12698 _("extraneous shift as part of operand to shift insn"));
12699 }
12700 else
12701 {
12702 switch (shift_kind)
12703 {
12704 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12705 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12706 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12707 default: abort ();
12708 }
12709 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12710 inst.instruction |= inst.operands[0].reg;
12711 inst.instruction |= inst.operands[1].reg << 3;
12712 }
12713 }
12714 }
12715 else
12716 {
12717 constraint (inst.operands[0].reg > 7
12718 || inst.operands[1].reg > 7, BAD_HIREG);
12719 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12720
12721 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
12722 {
12723 constraint (inst.operands[2].reg > 7, BAD_HIREG);
12724 constraint (inst.operands[0].reg != inst.operands[1].reg,
12725 _("source1 and dest must be same register"));
12726
12727 switch (inst.instruction)
12728 {
12729 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
12730 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
12731 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
12732 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
12733 default: abort ();
12734 }
12735
12736 inst.instruction |= inst.operands[0].reg;
12737 inst.instruction |= inst.operands[2].reg << 3;
12738
12739 /* PR 12854: Error on extraneous shifts. */
12740 constraint (inst.operands[2].shifted,
12741 _("extraneous shift as part of operand to shift insn"));
12742 }
12743 else
12744 {
12745 switch (inst.instruction)
12746 {
12747 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
12748 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
12749 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
12750 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
12751 default: abort ();
12752 }
12753 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12754 inst.instruction |= inst.operands[0].reg;
12755 inst.instruction |= inst.operands[1].reg << 3;
12756 }
12757 }
12758 }
12759
12760 static void
12761 do_t_simd (void)
12762 {
12763 unsigned Rd, Rn, Rm;
12764
12765 Rd = inst.operands[0].reg;
12766 Rn = inst.operands[1].reg;
12767 Rm = inst.operands[2].reg;
12768
12769 reject_bad_reg (Rd);
12770 reject_bad_reg (Rn);
12771 reject_bad_reg (Rm);
12772
12773 inst.instruction |= Rd << 8;
12774 inst.instruction |= Rn << 16;
12775 inst.instruction |= Rm;
12776 }
12777
12778 static void
12779 do_t_simd2 (void)
12780 {
12781 unsigned Rd, Rn, Rm;
12782
12783 Rd = inst.operands[0].reg;
12784 Rm = inst.operands[1].reg;
12785 Rn = inst.operands[2].reg;
12786
12787 reject_bad_reg (Rd);
12788 reject_bad_reg (Rn);
12789 reject_bad_reg (Rm);
12790
12791 inst.instruction |= Rd << 8;
12792 inst.instruction |= Rn << 16;
12793 inst.instruction |= Rm;
12794 }
12795
12796 static void
12797 do_t_smc (void)
12798 {
12799 unsigned int value = inst.reloc.exp.X_add_number;
12800 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
12801 _("SMC is not permitted on this architecture"));
12802 constraint (inst.reloc.exp.X_op != O_constant,
12803 _("expression too complex"));
12804 inst.reloc.type = BFD_RELOC_UNUSED;
12805 inst.instruction |= (value & 0xf000) >> 12;
12806 inst.instruction |= (value & 0x0ff0);
12807 inst.instruction |= (value & 0x000f) << 16;
12808 /* PR gas/15623: SMC instructions must be last in an IT block. */
12809 set_it_insn_type_last ();
12810 }
12811
12812 static void
12813 do_t_hvc (void)
12814 {
12815 unsigned int value = inst.reloc.exp.X_add_number;
12816
12817 inst.reloc.type = BFD_RELOC_UNUSED;
12818 inst.instruction |= (value & 0x0fff);
12819 inst.instruction |= (value & 0xf000) << 4;
12820 }
12821
12822 static void
12823 do_t_ssat_usat (int bias)
12824 {
12825 unsigned Rd, Rn;
12826
12827 Rd = inst.operands[0].reg;
12828 Rn = inst.operands[2].reg;
12829
12830 reject_bad_reg (Rd);
12831 reject_bad_reg (Rn);
12832
12833 inst.instruction |= Rd << 8;
12834 inst.instruction |= inst.operands[1].imm - bias;
12835 inst.instruction |= Rn << 16;
12836
12837 if (inst.operands[3].present)
12838 {
12839 offsetT shift_amount = inst.reloc.exp.X_add_number;
12840
12841 inst.reloc.type = BFD_RELOC_UNUSED;
12842
12843 constraint (inst.reloc.exp.X_op != O_constant,
12844 _("expression too complex"));
12845
12846 if (shift_amount != 0)
12847 {
12848 constraint (shift_amount > 31,
12849 _("shift expression is too large"));
12850
12851 if (inst.operands[3].shift_kind == SHIFT_ASR)
12852 inst.instruction |= 0x00200000; /* sh bit. */
12853
12854 inst.instruction |= (shift_amount & 0x1c) << 10;
12855 inst.instruction |= (shift_amount & 0x03) << 6;
12856 }
12857 }
12858 }
12859
12860 static void
12861 do_t_ssat (void)
12862 {
12863 do_t_ssat_usat (1);
12864 }
12865
12866 static void
12867 do_t_ssat16 (void)
12868 {
12869 unsigned Rd, Rn;
12870
12871 Rd = inst.operands[0].reg;
12872 Rn = inst.operands[2].reg;
12873
12874 reject_bad_reg (Rd);
12875 reject_bad_reg (Rn);
12876
12877 inst.instruction |= Rd << 8;
12878 inst.instruction |= inst.operands[1].imm - 1;
12879 inst.instruction |= Rn << 16;
12880 }
12881
12882 static void
12883 do_t_strex (void)
12884 {
12885 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
12886 || inst.operands[2].postind || inst.operands[2].writeback
12887 || inst.operands[2].immisreg || inst.operands[2].shifted
12888 || inst.operands[2].negative,
12889 BAD_ADDR_MODE);
12890
12891 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
12892
12893 inst.instruction |= inst.operands[0].reg << 8;
12894 inst.instruction |= inst.operands[1].reg << 12;
12895 inst.instruction |= inst.operands[2].reg << 16;
12896 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
12897 }
12898
12899 static void
12900 do_t_strexd (void)
12901 {
12902 if (!inst.operands[2].present)
12903 inst.operands[2].reg = inst.operands[1].reg + 1;
12904
12905 constraint (inst.operands[0].reg == inst.operands[1].reg
12906 || inst.operands[0].reg == inst.operands[2].reg
12907 || inst.operands[0].reg == inst.operands[3].reg,
12908 BAD_OVERLAP);
12909
12910 inst.instruction |= inst.operands[0].reg;
12911 inst.instruction |= inst.operands[1].reg << 12;
12912 inst.instruction |= inst.operands[2].reg << 8;
12913 inst.instruction |= inst.operands[3].reg << 16;
12914 }
12915
12916 static void
12917 do_t_sxtah (void)
12918 {
12919 unsigned Rd, Rn, Rm;
12920
12921 Rd = inst.operands[0].reg;
12922 Rn = inst.operands[1].reg;
12923 Rm = inst.operands[2].reg;
12924
12925 reject_bad_reg (Rd);
12926 reject_bad_reg (Rn);
12927 reject_bad_reg (Rm);
12928
12929 inst.instruction |= Rd << 8;
12930 inst.instruction |= Rn << 16;
12931 inst.instruction |= Rm;
12932 inst.instruction |= inst.operands[3].imm << 4;
12933 }
12934
12935 static void
12936 do_t_sxth (void)
12937 {
12938 unsigned Rd, Rm;
12939
12940 Rd = inst.operands[0].reg;
12941 Rm = inst.operands[1].reg;
12942
12943 reject_bad_reg (Rd);
12944 reject_bad_reg (Rm);
12945
12946 if (inst.instruction <= 0xffff
12947 && inst.size_req != 4
12948 && Rd <= 7 && Rm <= 7
12949 && (!inst.operands[2].present || inst.operands[2].imm == 0))
12950 {
12951 inst.instruction = THUMB_OP16 (inst.instruction);
12952 inst.instruction |= Rd;
12953 inst.instruction |= Rm << 3;
12954 }
12955 else if (unified_syntax)
12956 {
12957 if (inst.instruction <= 0xffff)
12958 inst.instruction = THUMB_OP32 (inst.instruction);
12959 inst.instruction |= Rd << 8;
12960 inst.instruction |= Rm;
12961 inst.instruction |= inst.operands[2].imm << 4;
12962 }
12963 else
12964 {
12965 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
12966 _("Thumb encoding does not support rotation"));
12967 constraint (1, BAD_HIREG);
12968 }
12969 }
12970
12971 static void
12972 do_t_swi (void)
12973 {
12974 /* We have to do the following check manually as ARM_EXT_OS only applies
12975 to ARM_EXT_V6M. */
12976 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
12977 {
12978 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
12979 /* This only applies to the v6m howver, not later architectures. */
12980 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
12981 as_bad (_("SVC is not permitted on this architecture"));
12982 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
12983 }
12984
12985 inst.reloc.type = BFD_RELOC_ARM_SWI;
12986 }
12987
12988 static void
12989 do_t_tb (void)
12990 {
12991 unsigned Rn, Rm;
12992 int half;
12993
12994 half = (inst.instruction & 0x10) != 0;
12995 set_it_insn_type_last ();
12996 constraint (inst.operands[0].immisreg,
12997 _("instruction requires register index"));
12998
12999 Rn = inst.operands[0].reg;
13000 Rm = inst.operands[0].imm;
13001
13002 constraint (Rn == REG_SP, BAD_SP);
13003 reject_bad_reg (Rm);
13004
13005 constraint (!half && inst.operands[0].shifted,
13006 _("instruction does not allow shifted index"));
13007 inst.instruction |= (Rn << 16) | Rm;
13008 }
13009
13010 static void
13011 do_t_udf (void)
13012 {
13013 if (!inst.operands[0].present)
13014 inst.operands[0].imm = 0;
13015
13016 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
13017 {
13018 constraint (inst.size_req == 2,
13019 _("immediate value out of range"));
13020 inst.instruction = THUMB_OP32 (inst.instruction);
13021 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
13022 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
13023 }
13024 else
13025 {
13026 inst.instruction = THUMB_OP16 (inst.instruction);
13027 inst.instruction |= inst.operands[0].imm;
13028 }
13029
13030 set_it_insn_type (NEUTRAL_IT_INSN);
13031 }
13032
13033
13034 static void
13035 do_t_usat (void)
13036 {
13037 do_t_ssat_usat (0);
13038 }
13039
13040 static void
13041 do_t_usat16 (void)
13042 {
13043 unsigned Rd, Rn;
13044
13045 Rd = inst.operands[0].reg;
13046 Rn = inst.operands[2].reg;
13047
13048 reject_bad_reg (Rd);
13049 reject_bad_reg (Rn);
13050
13051 inst.instruction |= Rd << 8;
13052 inst.instruction |= inst.operands[1].imm;
13053 inst.instruction |= Rn << 16;
13054 }
13055
13056 /* Neon instruction encoder helpers. */
13057
13058 /* Encodings for the different types for various Neon opcodes. */
13059
13060 /* An "invalid" code for the following tables. */
13061 #define N_INV -1u
13062
13063 struct neon_tab_entry
13064 {
13065 unsigned integer;
13066 unsigned float_or_poly;
13067 unsigned scalar_or_imm;
13068 };
13069
13070 /* Map overloaded Neon opcodes to their respective encodings. */
13071 #define NEON_ENC_TAB \
13072 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13073 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13074 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13075 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13076 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13077 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13078 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13079 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13080 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13081 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13082 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13083 /* Register variants of the following two instructions are encoded as
13084 vcge / vcgt with the operands reversed. */ \
13085 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13086 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13087 X(vfma, N_INV, 0x0000c10, N_INV), \
13088 X(vfms, N_INV, 0x0200c10, N_INV), \
13089 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13090 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13091 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13092 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13093 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13094 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13095 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13096 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13097 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13098 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13099 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13100 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13101 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13102 X(vshl, 0x0000400, N_INV, 0x0800510), \
13103 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13104 X(vand, 0x0000110, N_INV, 0x0800030), \
13105 X(vbic, 0x0100110, N_INV, 0x0800030), \
13106 X(veor, 0x1000110, N_INV, N_INV), \
13107 X(vorn, 0x0300110, N_INV, 0x0800010), \
13108 X(vorr, 0x0200110, N_INV, 0x0800010), \
13109 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13110 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13111 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13112 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13113 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13114 X(vst1, 0x0000000, 0x0800000, N_INV), \
13115 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13116 X(vst2, 0x0000100, 0x0800100, N_INV), \
13117 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13118 X(vst3, 0x0000200, 0x0800200, N_INV), \
13119 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13120 X(vst4, 0x0000300, 0x0800300, N_INV), \
13121 X(vmovn, 0x1b20200, N_INV, N_INV), \
13122 X(vtrn, 0x1b20080, N_INV, N_INV), \
13123 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13124 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13125 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13126 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13127 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13128 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13129 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13130 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13131 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13132 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13133 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13134 X(vseleq, 0xe000a00, N_INV, N_INV), \
13135 X(vselvs, 0xe100a00, N_INV, N_INV), \
13136 X(vselge, 0xe200a00, N_INV, N_INV), \
13137 X(vselgt, 0xe300a00, N_INV, N_INV), \
13138 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13139 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13140 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13141 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13142 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13143 X(aes, 0x3b00300, N_INV, N_INV), \
13144 X(sha3op, 0x2000c00, N_INV, N_INV), \
13145 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13146 X(sha2op, 0x3ba0380, N_INV, N_INV)
13147
13148 enum neon_opc
13149 {
13150 #define X(OPC,I,F,S) N_MNEM_##OPC
13151 NEON_ENC_TAB
13152 #undef X
13153 };
13154
13155 static const struct neon_tab_entry neon_enc_tab[] =
13156 {
13157 #define X(OPC,I,F,S) { (I), (F), (S) }
13158 NEON_ENC_TAB
13159 #undef X
13160 };
13161
13162 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13163 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13164 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13165 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13166 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13167 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13168 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13169 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13170 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13171 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13172 #define NEON_ENC_SINGLE_(X) \
13173 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13174 #define NEON_ENC_DOUBLE_(X) \
13175 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13176 #define NEON_ENC_FPV8_(X) \
13177 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13178
13179 #define NEON_ENCODE(type, inst) \
13180 do \
13181 { \
13182 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13183 inst.is_neon = 1; \
13184 } \
13185 while (0)
13186
13187 #define check_neon_suffixes \
13188 do \
13189 { \
13190 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13191 { \
13192 as_bad (_("invalid neon suffix for non neon instruction")); \
13193 return; \
13194 } \
13195 } \
13196 while (0)
13197
13198 /* Define shapes for instruction operands. The following mnemonic characters
13199 are used in this table:
13200
13201 F - VFP S<n> register
13202 D - Neon D<n> register
13203 Q - Neon Q<n> register
13204 I - Immediate
13205 S - Scalar
13206 R - ARM register
13207 L - D<n> register list
13208
13209 This table is used to generate various data:
13210 - enumerations of the form NS_DDR to be used as arguments to
13211 neon_select_shape.
13212 - a table classifying shapes into single, double, quad, mixed.
13213 - a table used to drive neon_select_shape. */
13214
13215 #define NEON_SHAPE_DEF \
13216 X(3, (D, D, D), DOUBLE), \
13217 X(3, (Q, Q, Q), QUAD), \
13218 X(3, (D, D, I), DOUBLE), \
13219 X(3, (Q, Q, I), QUAD), \
13220 X(3, (D, D, S), DOUBLE), \
13221 X(3, (Q, Q, S), QUAD), \
13222 X(2, (D, D), DOUBLE), \
13223 X(2, (Q, Q), QUAD), \
13224 X(2, (D, S), DOUBLE), \
13225 X(2, (Q, S), QUAD), \
13226 X(2, (D, R), DOUBLE), \
13227 X(2, (Q, R), QUAD), \
13228 X(2, (D, I), DOUBLE), \
13229 X(2, (Q, I), QUAD), \
13230 X(3, (D, L, D), DOUBLE), \
13231 X(2, (D, Q), MIXED), \
13232 X(2, (Q, D), MIXED), \
13233 X(3, (D, Q, I), MIXED), \
13234 X(3, (Q, D, I), MIXED), \
13235 X(3, (Q, D, D), MIXED), \
13236 X(3, (D, Q, Q), MIXED), \
13237 X(3, (Q, Q, D), MIXED), \
13238 X(3, (Q, D, S), MIXED), \
13239 X(3, (D, Q, S), MIXED), \
13240 X(4, (D, D, D, I), DOUBLE), \
13241 X(4, (Q, Q, Q, I), QUAD), \
13242 X(2, (F, F), SINGLE), \
13243 X(3, (F, F, F), SINGLE), \
13244 X(2, (F, I), SINGLE), \
13245 X(2, (F, D), MIXED), \
13246 X(2, (D, F), MIXED), \
13247 X(3, (F, F, I), MIXED), \
13248 X(4, (R, R, F, F), SINGLE), \
13249 X(4, (F, F, R, R), SINGLE), \
13250 X(3, (D, R, R), DOUBLE), \
13251 X(3, (R, R, D), DOUBLE), \
13252 X(2, (S, R), SINGLE), \
13253 X(2, (R, S), SINGLE), \
13254 X(2, (F, R), SINGLE), \
13255 X(2, (R, F), SINGLE)
13256
13257 #define S2(A,B) NS_##A##B
13258 #define S3(A,B,C) NS_##A##B##C
13259 #define S4(A,B,C,D) NS_##A##B##C##D
13260
13261 #define X(N, L, C) S##N L
13262
13263 enum neon_shape
13264 {
13265 NEON_SHAPE_DEF,
13266 NS_NULL
13267 };
13268
13269 #undef X
13270 #undef S2
13271 #undef S3
13272 #undef S4
13273
13274 enum neon_shape_class
13275 {
13276 SC_SINGLE,
13277 SC_DOUBLE,
13278 SC_QUAD,
13279 SC_MIXED
13280 };
13281
13282 #define X(N, L, C) SC_##C
13283
13284 static enum neon_shape_class neon_shape_class[] =
13285 {
13286 NEON_SHAPE_DEF
13287 };
13288
13289 #undef X
13290
13291 enum neon_shape_el
13292 {
13293 SE_F,
13294 SE_D,
13295 SE_Q,
13296 SE_I,
13297 SE_S,
13298 SE_R,
13299 SE_L
13300 };
13301
13302 /* Register widths of above. */
13303 static unsigned neon_shape_el_size[] =
13304 {
13305 32,
13306 64,
13307 128,
13308 0,
13309 32,
13310 32,
13311 0
13312 };
13313
13314 struct neon_shape_info
13315 {
13316 unsigned els;
13317 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13318 };
13319
13320 #define S2(A,B) { SE_##A, SE_##B }
13321 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13322 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13323
13324 #define X(N, L, C) { N, S##N L }
13325
13326 static struct neon_shape_info neon_shape_tab[] =
13327 {
13328 NEON_SHAPE_DEF
13329 };
13330
13331 #undef X
13332 #undef S2
13333 #undef S3
13334 #undef S4
13335
13336 /* Bit masks used in type checking given instructions.
13337 'N_EQK' means the type must be the same as (or based on in some way) the key
13338 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13339 set, various other bits can be set as well in order to modify the meaning of
13340 the type constraint. */
13341
13342 enum neon_type_mask
13343 {
13344 N_S8 = 0x0000001,
13345 N_S16 = 0x0000002,
13346 N_S32 = 0x0000004,
13347 N_S64 = 0x0000008,
13348 N_U8 = 0x0000010,
13349 N_U16 = 0x0000020,
13350 N_U32 = 0x0000040,
13351 N_U64 = 0x0000080,
13352 N_I8 = 0x0000100,
13353 N_I16 = 0x0000200,
13354 N_I32 = 0x0000400,
13355 N_I64 = 0x0000800,
13356 N_8 = 0x0001000,
13357 N_16 = 0x0002000,
13358 N_32 = 0x0004000,
13359 N_64 = 0x0008000,
13360 N_P8 = 0x0010000,
13361 N_P16 = 0x0020000,
13362 N_F16 = 0x0040000,
13363 N_F32 = 0x0080000,
13364 N_F64 = 0x0100000,
13365 N_P64 = 0x0200000,
13366 N_KEY = 0x1000000, /* Key element (main type specifier). */
13367 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
13368 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
13369 N_UNT = 0x8000000, /* Must be explicitly untyped. */
13370 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
13371 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
13372 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13373 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13374 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13375 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
13376 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13377 N_UTYP = 0,
13378 N_MAX_NONSPECIAL = N_P64
13379 };
13380
13381 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13382
13383 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13384 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13385 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13386 #define N_SUF_32 (N_SU_32 | N_F32)
13387 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13388 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
13389
13390 /* Pass this as the first type argument to neon_check_type to ignore types
13391 altogether. */
13392 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13393
13394 /* Select a "shape" for the current instruction (describing register types or
13395 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13396 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13397 function of operand parsing, so this function doesn't need to be called.
13398 Shapes should be listed in order of decreasing length. */
13399
13400 static enum neon_shape
13401 neon_select_shape (enum neon_shape shape, ...)
13402 {
13403 va_list ap;
13404 enum neon_shape first_shape = shape;
13405
13406 /* Fix missing optional operands. FIXME: we don't know at this point how
13407 many arguments we should have, so this makes the assumption that we have
13408 > 1. This is true of all current Neon opcodes, I think, but may not be
13409 true in the future. */
13410 if (!inst.operands[1].present)
13411 inst.operands[1] = inst.operands[0];
13412
13413 va_start (ap, shape);
13414
13415 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13416 {
13417 unsigned j;
13418 int matches = 1;
13419
13420 for (j = 0; j < neon_shape_tab[shape].els; j++)
13421 {
13422 if (!inst.operands[j].present)
13423 {
13424 matches = 0;
13425 break;
13426 }
13427
13428 switch (neon_shape_tab[shape].el[j])
13429 {
13430 case SE_F:
13431 if (!(inst.operands[j].isreg
13432 && inst.operands[j].isvec
13433 && inst.operands[j].issingle
13434 && !inst.operands[j].isquad))
13435 matches = 0;
13436 break;
13437
13438 case SE_D:
13439 if (!(inst.operands[j].isreg
13440 && inst.operands[j].isvec
13441 && !inst.operands[j].isquad
13442 && !inst.operands[j].issingle))
13443 matches = 0;
13444 break;
13445
13446 case SE_R:
13447 if (!(inst.operands[j].isreg
13448 && !inst.operands[j].isvec))
13449 matches = 0;
13450 break;
13451
13452 case SE_Q:
13453 if (!(inst.operands[j].isreg
13454 && inst.operands[j].isvec
13455 && inst.operands[j].isquad
13456 && !inst.operands[j].issingle))
13457 matches = 0;
13458 break;
13459
13460 case SE_I:
13461 if (!(!inst.operands[j].isreg
13462 && !inst.operands[j].isscalar))
13463 matches = 0;
13464 break;
13465
13466 case SE_S:
13467 if (!(!inst.operands[j].isreg
13468 && inst.operands[j].isscalar))
13469 matches = 0;
13470 break;
13471
13472 case SE_L:
13473 break;
13474 }
13475 if (!matches)
13476 break;
13477 }
13478 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
13479 /* We've matched all the entries in the shape table, and we don't
13480 have any left over operands which have not been matched. */
13481 break;
13482 }
13483
13484 va_end (ap);
13485
13486 if (shape == NS_NULL && first_shape != NS_NULL)
13487 first_error (_("invalid instruction shape"));
13488
13489 return shape;
13490 }
13491
13492 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13493 means the Q bit should be set). */
13494
13495 static int
13496 neon_quad (enum neon_shape shape)
13497 {
13498 return neon_shape_class[shape] == SC_QUAD;
13499 }
13500
13501 static void
13502 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
13503 unsigned *g_size)
13504 {
13505 /* Allow modification to be made to types which are constrained to be
13506 based on the key element, based on bits set alongside N_EQK. */
13507 if ((typebits & N_EQK) != 0)
13508 {
13509 if ((typebits & N_HLF) != 0)
13510 *g_size /= 2;
13511 else if ((typebits & N_DBL) != 0)
13512 *g_size *= 2;
13513 if ((typebits & N_SGN) != 0)
13514 *g_type = NT_signed;
13515 else if ((typebits & N_UNS) != 0)
13516 *g_type = NT_unsigned;
13517 else if ((typebits & N_INT) != 0)
13518 *g_type = NT_integer;
13519 else if ((typebits & N_FLT) != 0)
13520 *g_type = NT_float;
13521 else if ((typebits & N_SIZ) != 0)
13522 *g_type = NT_untyped;
13523 }
13524 }
13525
13526 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13527 operand type, i.e. the single type specified in a Neon instruction when it
13528 is the only one given. */
13529
13530 static struct neon_type_el
13531 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
13532 {
13533 struct neon_type_el dest = *key;
13534
13535 gas_assert ((thisarg & N_EQK) != 0);
13536
13537 neon_modify_type_size (thisarg, &dest.type, &dest.size);
13538
13539 return dest;
13540 }
13541
13542 /* Convert Neon type and size into compact bitmask representation. */
13543
13544 static enum neon_type_mask
13545 type_chk_of_el_type (enum neon_el_type type, unsigned size)
13546 {
13547 switch (type)
13548 {
13549 case NT_untyped:
13550 switch (size)
13551 {
13552 case 8: return N_8;
13553 case 16: return N_16;
13554 case 32: return N_32;
13555 case 64: return N_64;
13556 default: ;
13557 }
13558 break;
13559
13560 case NT_integer:
13561 switch (size)
13562 {
13563 case 8: return N_I8;
13564 case 16: return N_I16;
13565 case 32: return N_I32;
13566 case 64: return N_I64;
13567 default: ;
13568 }
13569 break;
13570
13571 case NT_float:
13572 switch (size)
13573 {
13574 case 16: return N_F16;
13575 case 32: return N_F32;
13576 case 64: return N_F64;
13577 default: ;
13578 }
13579 break;
13580
13581 case NT_poly:
13582 switch (size)
13583 {
13584 case 8: return N_P8;
13585 case 16: return N_P16;
13586 case 64: return N_P64;
13587 default: ;
13588 }
13589 break;
13590
13591 case NT_signed:
13592 switch (size)
13593 {
13594 case 8: return N_S8;
13595 case 16: return N_S16;
13596 case 32: return N_S32;
13597 case 64: return N_S64;
13598 default: ;
13599 }
13600 break;
13601
13602 case NT_unsigned:
13603 switch (size)
13604 {
13605 case 8: return N_U8;
13606 case 16: return N_U16;
13607 case 32: return N_U32;
13608 case 64: return N_U64;
13609 default: ;
13610 }
13611 break;
13612
13613 default: ;
13614 }
13615
13616 return N_UTYP;
13617 }
13618
13619 /* Convert compact Neon bitmask type representation to a type and size. Only
13620 handles the case where a single bit is set in the mask. */
13621
13622 static int
13623 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
13624 enum neon_type_mask mask)
13625 {
13626 if ((mask & N_EQK) != 0)
13627 return FAIL;
13628
13629 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
13630 *size = 8;
13631 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
13632 *size = 16;
13633 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
13634 *size = 32;
13635 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
13636 *size = 64;
13637 else
13638 return FAIL;
13639
13640 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
13641 *type = NT_signed;
13642 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
13643 *type = NT_unsigned;
13644 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
13645 *type = NT_integer;
13646 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
13647 *type = NT_untyped;
13648 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
13649 *type = NT_poly;
13650 else if ((mask & (N_F16 | N_F32 | N_F64)) != 0)
13651 *type = NT_float;
13652 else
13653 return FAIL;
13654
13655 return SUCCESS;
13656 }
13657
13658 /* Modify a bitmask of allowed types. This is only needed for type
13659 relaxation. */
13660
13661 static unsigned
13662 modify_types_allowed (unsigned allowed, unsigned mods)
13663 {
13664 unsigned size;
13665 enum neon_el_type type;
13666 unsigned destmask;
13667 int i;
13668
13669 destmask = 0;
13670
13671 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
13672 {
13673 if (el_type_of_type_chk (&type, &size,
13674 (enum neon_type_mask) (allowed & i)) == SUCCESS)
13675 {
13676 neon_modify_type_size (mods, &type, &size);
13677 destmask |= type_chk_of_el_type (type, size);
13678 }
13679 }
13680
13681 return destmask;
13682 }
13683
13684 /* Check type and return type classification.
13685 The manual states (paraphrase): If one datatype is given, it indicates the
13686 type given in:
13687 - the second operand, if there is one
13688 - the operand, if there is no second operand
13689 - the result, if there are no operands.
13690 This isn't quite good enough though, so we use a concept of a "key" datatype
13691 which is set on a per-instruction basis, which is the one which matters when
13692 only one data type is written.
13693 Note: this function has side-effects (e.g. filling in missing operands). All
13694 Neon instructions should call it before performing bit encoding. */
13695
13696 static struct neon_type_el
13697 neon_check_type (unsigned els, enum neon_shape ns, ...)
13698 {
13699 va_list ap;
13700 unsigned i, pass, key_el = 0;
13701 unsigned types[NEON_MAX_TYPE_ELS];
13702 enum neon_el_type k_type = NT_invtype;
13703 unsigned k_size = -1u;
13704 struct neon_type_el badtype = {NT_invtype, -1};
13705 unsigned key_allowed = 0;
13706
13707 /* Optional registers in Neon instructions are always (not) in operand 1.
13708 Fill in the missing operand here, if it was omitted. */
13709 if (els > 1 && !inst.operands[1].present)
13710 inst.operands[1] = inst.operands[0];
13711
13712 /* Suck up all the varargs. */
13713 va_start (ap, ns);
13714 for (i = 0; i < els; i++)
13715 {
13716 unsigned thisarg = va_arg (ap, unsigned);
13717 if (thisarg == N_IGNORE_TYPE)
13718 {
13719 va_end (ap);
13720 return badtype;
13721 }
13722 types[i] = thisarg;
13723 if ((thisarg & N_KEY) != 0)
13724 key_el = i;
13725 }
13726 va_end (ap);
13727
13728 if (inst.vectype.elems > 0)
13729 for (i = 0; i < els; i++)
13730 if (inst.operands[i].vectype.type != NT_invtype)
13731 {
13732 first_error (_("types specified in both the mnemonic and operands"));
13733 return badtype;
13734 }
13735
13736 /* Duplicate inst.vectype elements here as necessary.
13737 FIXME: No idea if this is exactly the same as the ARM assembler,
13738 particularly when an insn takes one register and one non-register
13739 operand. */
13740 if (inst.vectype.elems == 1 && els > 1)
13741 {
13742 unsigned j;
13743 inst.vectype.elems = els;
13744 inst.vectype.el[key_el] = inst.vectype.el[0];
13745 for (j = 0; j < els; j++)
13746 if (j != key_el)
13747 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13748 types[j]);
13749 }
13750 else if (inst.vectype.elems == 0 && els > 0)
13751 {
13752 unsigned j;
13753 /* No types were given after the mnemonic, so look for types specified
13754 after each operand. We allow some flexibility here; as long as the
13755 "key" operand has a type, we can infer the others. */
13756 for (j = 0; j < els; j++)
13757 if (inst.operands[j].vectype.type != NT_invtype)
13758 inst.vectype.el[j] = inst.operands[j].vectype;
13759
13760 if (inst.operands[key_el].vectype.type != NT_invtype)
13761 {
13762 for (j = 0; j < els; j++)
13763 if (inst.operands[j].vectype.type == NT_invtype)
13764 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13765 types[j]);
13766 }
13767 else
13768 {
13769 first_error (_("operand types can't be inferred"));
13770 return badtype;
13771 }
13772 }
13773 else if (inst.vectype.elems != els)
13774 {
13775 first_error (_("type specifier has the wrong number of parts"));
13776 return badtype;
13777 }
13778
13779 for (pass = 0; pass < 2; pass++)
13780 {
13781 for (i = 0; i < els; i++)
13782 {
13783 unsigned thisarg = types[i];
13784 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
13785 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
13786 enum neon_el_type g_type = inst.vectype.el[i].type;
13787 unsigned g_size = inst.vectype.el[i].size;
13788
13789 /* Decay more-specific signed & unsigned types to sign-insensitive
13790 integer types if sign-specific variants are unavailable. */
13791 if ((g_type == NT_signed || g_type == NT_unsigned)
13792 && (types_allowed & N_SU_ALL) == 0)
13793 g_type = NT_integer;
13794
13795 /* If only untyped args are allowed, decay any more specific types to
13796 them. Some instructions only care about signs for some element
13797 sizes, so handle that properly. */
13798 if (((types_allowed & N_UNT) == 0)
13799 && ((g_size == 8 && (types_allowed & N_8) != 0)
13800 || (g_size == 16 && (types_allowed & N_16) != 0)
13801 || (g_size == 32 && (types_allowed & N_32) != 0)
13802 || (g_size == 64 && (types_allowed & N_64) != 0)))
13803 g_type = NT_untyped;
13804
13805 if (pass == 0)
13806 {
13807 if ((thisarg & N_KEY) != 0)
13808 {
13809 k_type = g_type;
13810 k_size = g_size;
13811 key_allowed = thisarg & ~N_KEY;
13812 }
13813 }
13814 else
13815 {
13816 if ((thisarg & N_VFP) != 0)
13817 {
13818 enum neon_shape_el regshape;
13819 unsigned regwidth, match;
13820
13821 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
13822 if (ns == NS_NULL)
13823 {
13824 first_error (_("invalid instruction shape"));
13825 return badtype;
13826 }
13827 regshape = neon_shape_tab[ns].el[i];
13828 regwidth = neon_shape_el_size[regshape];
13829
13830 /* In VFP mode, operands must match register widths. If we
13831 have a key operand, use its width, else use the width of
13832 the current operand. */
13833 if (k_size != -1u)
13834 match = k_size;
13835 else
13836 match = g_size;
13837
13838 if (regwidth != match)
13839 {
13840 first_error (_("operand size must match register width"));
13841 return badtype;
13842 }
13843 }
13844
13845 if ((thisarg & N_EQK) == 0)
13846 {
13847 unsigned given_type = type_chk_of_el_type (g_type, g_size);
13848
13849 if ((given_type & types_allowed) == 0)
13850 {
13851 first_error (_("bad type in Neon instruction"));
13852 return badtype;
13853 }
13854 }
13855 else
13856 {
13857 enum neon_el_type mod_k_type = k_type;
13858 unsigned mod_k_size = k_size;
13859 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
13860 if (g_type != mod_k_type || g_size != mod_k_size)
13861 {
13862 first_error (_("inconsistent types in Neon instruction"));
13863 return badtype;
13864 }
13865 }
13866 }
13867 }
13868 }
13869
13870 return inst.vectype.el[key_el];
13871 }
13872
13873 /* Neon-style VFP instruction forwarding. */
13874
13875 /* Thumb VFP instructions have 0xE in the condition field. */
13876
13877 static void
13878 do_vfp_cond_or_thumb (void)
13879 {
13880 inst.is_neon = 1;
13881
13882 if (thumb_mode)
13883 inst.instruction |= 0xe0000000;
13884 else
13885 inst.instruction |= inst.cond << 28;
13886 }
13887
13888 /* Look up and encode a simple mnemonic, for use as a helper function for the
13889 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
13890 etc. It is assumed that operand parsing has already been done, and that the
13891 operands are in the form expected by the given opcode (this isn't necessarily
13892 the same as the form in which they were parsed, hence some massaging must
13893 take place before this function is called).
13894 Checks current arch version against that in the looked-up opcode. */
13895
13896 static void
13897 do_vfp_nsyn_opcode (const char *opname)
13898 {
13899 const struct asm_opcode *opcode;
13900
13901 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
13902
13903 if (!opcode)
13904 abort ();
13905
13906 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
13907 thumb_mode ? *opcode->tvariant : *opcode->avariant),
13908 _(BAD_FPU));
13909
13910 inst.is_neon = 1;
13911
13912 if (thumb_mode)
13913 {
13914 inst.instruction = opcode->tvalue;
13915 opcode->tencode ();
13916 }
13917 else
13918 {
13919 inst.instruction = (inst.cond << 28) | opcode->avalue;
13920 opcode->aencode ();
13921 }
13922 }
13923
13924 static void
13925 do_vfp_nsyn_add_sub (enum neon_shape rs)
13926 {
13927 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
13928
13929 if (rs == NS_FFF)
13930 {
13931 if (is_add)
13932 do_vfp_nsyn_opcode ("fadds");
13933 else
13934 do_vfp_nsyn_opcode ("fsubs");
13935 }
13936 else
13937 {
13938 if (is_add)
13939 do_vfp_nsyn_opcode ("faddd");
13940 else
13941 do_vfp_nsyn_opcode ("fsubd");
13942 }
13943 }
13944
13945 /* Check operand types to see if this is a VFP instruction, and if so call
13946 PFN (). */
13947
13948 static int
13949 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
13950 {
13951 enum neon_shape rs;
13952 struct neon_type_el et;
13953
13954 switch (args)
13955 {
13956 case 2:
13957 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13958 et = neon_check_type (2, rs,
13959 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13960 break;
13961
13962 case 3:
13963 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13964 et = neon_check_type (3, rs,
13965 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13966 break;
13967
13968 default:
13969 abort ();
13970 }
13971
13972 if (et.type != NT_invtype)
13973 {
13974 pfn (rs);
13975 return SUCCESS;
13976 }
13977
13978 inst.error = NULL;
13979 return FAIL;
13980 }
13981
13982 static void
13983 do_vfp_nsyn_mla_mls (enum neon_shape rs)
13984 {
13985 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
13986
13987 if (rs == NS_FFF)
13988 {
13989 if (is_mla)
13990 do_vfp_nsyn_opcode ("fmacs");
13991 else
13992 do_vfp_nsyn_opcode ("fnmacs");
13993 }
13994 else
13995 {
13996 if (is_mla)
13997 do_vfp_nsyn_opcode ("fmacd");
13998 else
13999 do_vfp_nsyn_opcode ("fnmacd");
14000 }
14001 }
14002
14003 static void
14004 do_vfp_nsyn_fma_fms (enum neon_shape rs)
14005 {
14006 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
14007
14008 if (rs == NS_FFF)
14009 {
14010 if (is_fma)
14011 do_vfp_nsyn_opcode ("ffmas");
14012 else
14013 do_vfp_nsyn_opcode ("ffnmas");
14014 }
14015 else
14016 {
14017 if (is_fma)
14018 do_vfp_nsyn_opcode ("ffmad");
14019 else
14020 do_vfp_nsyn_opcode ("ffnmad");
14021 }
14022 }
14023
14024 static void
14025 do_vfp_nsyn_mul (enum neon_shape rs)
14026 {
14027 if (rs == NS_FFF)
14028 do_vfp_nsyn_opcode ("fmuls");
14029 else
14030 do_vfp_nsyn_opcode ("fmuld");
14031 }
14032
14033 static void
14034 do_vfp_nsyn_abs_neg (enum neon_shape rs)
14035 {
14036 int is_neg = (inst.instruction & 0x80) != 0;
14037 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
14038
14039 if (rs == NS_FF)
14040 {
14041 if (is_neg)
14042 do_vfp_nsyn_opcode ("fnegs");
14043 else
14044 do_vfp_nsyn_opcode ("fabss");
14045 }
14046 else
14047 {
14048 if (is_neg)
14049 do_vfp_nsyn_opcode ("fnegd");
14050 else
14051 do_vfp_nsyn_opcode ("fabsd");
14052 }
14053 }
14054
14055 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14056 insns belong to Neon, and are handled elsewhere. */
14057
14058 static void
14059 do_vfp_nsyn_ldm_stm (int is_dbmode)
14060 {
14061 int is_ldm = (inst.instruction & (1 << 20)) != 0;
14062 if (is_ldm)
14063 {
14064 if (is_dbmode)
14065 do_vfp_nsyn_opcode ("fldmdbs");
14066 else
14067 do_vfp_nsyn_opcode ("fldmias");
14068 }
14069 else
14070 {
14071 if (is_dbmode)
14072 do_vfp_nsyn_opcode ("fstmdbs");
14073 else
14074 do_vfp_nsyn_opcode ("fstmias");
14075 }
14076 }
14077
14078 static void
14079 do_vfp_nsyn_sqrt (void)
14080 {
14081 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
14082 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
14083
14084 if (rs == NS_FF)
14085 do_vfp_nsyn_opcode ("fsqrts");
14086 else
14087 do_vfp_nsyn_opcode ("fsqrtd");
14088 }
14089
14090 static void
14091 do_vfp_nsyn_div (void)
14092 {
14093 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
14094 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14095 N_F32 | N_F64 | N_KEY | N_VFP);
14096
14097 if (rs == NS_FFF)
14098 do_vfp_nsyn_opcode ("fdivs");
14099 else
14100 do_vfp_nsyn_opcode ("fdivd");
14101 }
14102
14103 static void
14104 do_vfp_nsyn_nmul (void)
14105 {
14106 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
14107 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14108 N_F32 | N_F64 | N_KEY | N_VFP);
14109
14110 if (rs == NS_FFF)
14111 {
14112 NEON_ENCODE (SINGLE, inst);
14113 do_vfp_sp_dyadic ();
14114 }
14115 else
14116 {
14117 NEON_ENCODE (DOUBLE, inst);
14118 do_vfp_dp_rd_rn_rm ();
14119 }
14120 do_vfp_cond_or_thumb ();
14121 }
14122
14123 static void
14124 do_vfp_nsyn_cmp (void)
14125 {
14126 if (inst.operands[1].isreg)
14127 {
14128 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
14129 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
14130
14131 if (rs == NS_FF)
14132 {
14133 NEON_ENCODE (SINGLE, inst);
14134 do_vfp_sp_monadic ();
14135 }
14136 else
14137 {
14138 NEON_ENCODE (DOUBLE, inst);
14139 do_vfp_dp_rd_rm ();
14140 }
14141 }
14142 else
14143 {
14144 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
14145 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
14146
14147 switch (inst.instruction & 0x0fffffff)
14148 {
14149 case N_MNEM_vcmp:
14150 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
14151 break;
14152 case N_MNEM_vcmpe:
14153 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
14154 break;
14155 default:
14156 abort ();
14157 }
14158
14159 if (rs == NS_FI)
14160 {
14161 NEON_ENCODE (SINGLE, inst);
14162 do_vfp_sp_compare_z ();
14163 }
14164 else
14165 {
14166 NEON_ENCODE (DOUBLE, inst);
14167 do_vfp_dp_rd ();
14168 }
14169 }
14170 do_vfp_cond_or_thumb ();
14171 }
14172
14173 static void
14174 nsyn_insert_sp (void)
14175 {
14176 inst.operands[1] = inst.operands[0];
14177 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
14178 inst.operands[0].reg = REG_SP;
14179 inst.operands[0].isreg = 1;
14180 inst.operands[0].writeback = 1;
14181 inst.operands[0].present = 1;
14182 }
14183
14184 static void
14185 do_vfp_nsyn_push (void)
14186 {
14187 nsyn_insert_sp ();
14188 if (inst.operands[1].issingle)
14189 do_vfp_nsyn_opcode ("fstmdbs");
14190 else
14191 do_vfp_nsyn_opcode ("fstmdbd");
14192 }
14193
14194 static void
14195 do_vfp_nsyn_pop (void)
14196 {
14197 nsyn_insert_sp ();
14198 if (inst.operands[1].issingle)
14199 do_vfp_nsyn_opcode ("fldmias");
14200 else
14201 do_vfp_nsyn_opcode ("fldmiad");
14202 }
14203
14204 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14205 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14206
14207 static void
14208 neon_dp_fixup (struct arm_it* insn)
14209 {
14210 unsigned int i = insn->instruction;
14211 insn->is_neon = 1;
14212
14213 if (thumb_mode)
14214 {
14215 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14216 if (i & (1 << 24))
14217 i |= 1 << 28;
14218
14219 i &= ~(1 << 24);
14220
14221 i |= 0xef000000;
14222 }
14223 else
14224 i |= 0xf2000000;
14225
14226 insn->instruction = i;
14227 }
14228
14229 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14230 (0, 1, 2, 3). */
14231
14232 static unsigned
14233 neon_logbits (unsigned x)
14234 {
14235 return ffs (x) - 4;
14236 }
14237
14238 #define LOW4(R) ((R) & 0xf)
14239 #define HI1(R) (((R) >> 4) & 1)
14240
14241 /* Encode insns with bit pattern:
14242
14243 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14244 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14245
14246 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14247 different meaning for some instruction. */
14248
14249 static void
14250 neon_three_same (int isquad, int ubit, int size)
14251 {
14252 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14253 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14254 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14255 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14256 inst.instruction |= LOW4 (inst.operands[2].reg);
14257 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14258 inst.instruction |= (isquad != 0) << 6;
14259 inst.instruction |= (ubit != 0) << 24;
14260 if (size != -1)
14261 inst.instruction |= neon_logbits (size) << 20;
14262
14263 neon_dp_fixup (&inst);
14264 }
14265
14266 /* Encode instructions of the form:
14267
14268 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14269 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14270
14271 Don't write size if SIZE == -1. */
14272
14273 static void
14274 neon_two_same (int qbit, int ubit, int size)
14275 {
14276 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14277 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14278 inst.instruction |= LOW4 (inst.operands[1].reg);
14279 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14280 inst.instruction |= (qbit != 0) << 6;
14281 inst.instruction |= (ubit != 0) << 24;
14282
14283 if (size != -1)
14284 inst.instruction |= neon_logbits (size) << 18;
14285
14286 neon_dp_fixup (&inst);
14287 }
14288
14289 /* Neon instruction encoders, in approximate order of appearance. */
14290
14291 static void
14292 do_neon_dyadic_i_su (void)
14293 {
14294 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14295 struct neon_type_el et = neon_check_type (3, rs,
14296 N_EQK, N_EQK, N_SU_32 | N_KEY);
14297 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14298 }
14299
14300 static void
14301 do_neon_dyadic_i64_su (void)
14302 {
14303 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14304 struct neon_type_el et = neon_check_type (3, rs,
14305 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14306 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14307 }
14308
14309 static void
14310 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14311 unsigned immbits)
14312 {
14313 unsigned size = et.size >> 3;
14314 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14315 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14316 inst.instruction |= LOW4 (inst.operands[1].reg);
14317 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14318 inst.instruction |= (isquad != 0) << 6;
14319 inst.instruction |= immbits << 16;
14320 inst.instruction |= (size >> 3) << 7;
14321 inst.instruction |= (size & 0x7) << 19;
14322 if (write_ubit)
14323 inst.instruction |= (uval != 0) << 24;
14324
14325 neon_dp_fixup (&inst);
14326 }
14327
14328 static void
14329 do_neon_shl_imm (void)
14330 {
14331 if (!inst.operands[2].isreg)
14332 {
14333 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14334 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14335 int imm = inst.operands[2].imm;
14336
14337 constraint (imm < 0 || (unsigned)imm >= et.size,
14338 _("immediate out of range for shift"));
14339 NEON_ENCODE (IMMED, inst);
14340 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14341 }
14342 else
14343 {
14344 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14345 struct neon_type_el et = neon_check_type (3, rs,
14346 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14347 unsigned int tmp;
14348
14349 /* VSHL/VQSHL 3-register variants have syntax such as:
14350 vshl.xx Dd, Dm, Dn
14351 whereas other 3-register operations encoded by neon_three_same have
14352 syntax like:
14353 vadd.xx Dd, Dn, Dm
14354 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14355 here. */
14356 tmp = inst.operands[2].reg;
14357 inst.operands[2].reg = inst.operands[1].reg;
14358 inst.operands[1].reg = tmp;
14359 NEON_ENCODE (INTEGER, inst);
14360 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14361 }
14362 }
14363
14364 static void
14365 do_neon_qshl_imm (void)
14366 {
14367 if (!inst.operands[2].isreg)
14368 {
14369 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14370 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14371 int imm = inst.operands[2].imm;
14372
14373 constraint (imm < 0 || (unsigned)imm >= et.size,
14374 _("immediate out of range for shift"));
14375 NEON_ENCODE (IMMED, inst);
14376 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
14377 }
14378 else
14379 {
14380 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14381 struct neon_type_el et = neon_check_type (3, rs,
14382 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14383 unsigned int tmp;
14384
14385 /* See note in do_neon_shl_imm. */
14386 tmp = inst.operands[2].reg;
14387 inst.operands[2].reg = inst.operands[1].reg;
14388 inst.operands[1].reg = tmp;
14389 NEON_ENCODE (INTEGER, inst);
14390 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14391 }
14392 }
14393
14394 static void
14395 do_neon_rshl (void)
14396 {
14397 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14398 struct neon_type_el et = neon_check_type (3, rs,
14399 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14400 unsigned int tmp;
14401
14402 tmp = inst.operands[2].reg;
14403 inst.operands[2].reg = inst.operands[1].reg;
14404 inst.operands[1].reg = tmp;
14405 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14406 }
14407
14408 static int
14409 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
14410 {
14411 /* Handle .I8 pseudo-instructions. */
14412 if (size == 8)
14413 {
14414 /* Unfortunately, this will make everything apart from zero out-of-range.
14415 FIXME is this the intended semantics? There doesn't seem much point in
14416 accepting .I8 if so. */
14417 immediate |= immediate << 8;
14418 size = 16;
14419 }
14420
14421 if (size >= 32)
14422 {
14423 if (immediate == (immediate & 0x000000ff))
14424 {
14425 *immbits = immediate;
14426 return 0x1;
14427 }
14428 else if (immediate == (immediate & 0x0000ff00))
14429 {
14430 *immbits = immediate >> 8;
14431 return 0x3;
14432 }
14433 else if (immediate == (immediate & 0x00ff0000))
14434 {
14435 *immbits = immediate >> 16;
14436 return 0x5;
14437 }
14438 else if (immediate == (immediate & 0xff000000))
14439 {
14440 *immbits = immediate >> 24;
14441 return 0x7;
14442 }
14443 if ((immediate & 0xffff) != (immediate >> 16))
14444 goto bad_immediate;
14445 immediate &= 0xffff;
14446 }
14447
14448 if (immediate == (immediate & 0x000000ff))
14449 {
14450 *immbits = immediate;
14451 return 0x9;
14452 }
14453 else if (immediate == (immediate & 0x0000ff00))
14454 {
14455 *immbits = immediate >> 8;
14456 return 0xb;
14457 }
14458
14459 bad_immediate:
14460 first_error (_("immediate value out of range"));
14461 return FAIL;
14462 }
14463
14464 static void
14465 do_neon_logic (void)
14466 {
14467 if (inst.operands[2].present && inst.operands[2].isreg)
14468 {
14469 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14470 neon_check_type (3, rs, N_IGNORE_TYPE);
14471 /* U bit and size field were set as part of the bitmask. */
14472 NEON_ENCODE (INTEGER, inst);
14473 neon_three_same (neon_quad (rs), 0, -1);
14474 }
14475 else
14476 {
14477 const int three_ops_form = (inst.operands[2].present
14478 && !inst.operands[2].isreg);
14479 const int immoperand = (three_ops_form ? 2 : 1);
14480 enum neon_shape rs = (three_ops_form
14481 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
14482 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
14483 struct neon_type_el et = neon_check_type (2, rs,
14484 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14485 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
14486 unsigned immbits;
14487 int cmode;
14488
14489 if (et.type == NT_invtype)
14490 return;
14491
14492 if (three_ops_form)
14493 constraint (inst.operands[0].reg != inst.operands[1].reg,
14494 _("first and second operands shall be the same register"));
14495
14496 NEON_ENCODE (IMMED, inst);
14497
14498 immbits = inst.operands[immoperand].imm;
14499 if (et.size == 64)
14500 {
14501 /* .i64 is a pseudo-op, so the immediate must be a repeating
14502 pattern. */
14503 if (immbits != (inst.operands[immoperand].regisimm ?
14504 inst.operands[immoperand].reg : 0))
14505 {
14506 /* Set immbits to an invalid constant. */
14507 immbits = 0xdeadbeef;
14508 }
14509 }
14510
14511 switch (opcode)
14512 {
14513 case N_MNEM_vbic:
14514 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14515 break;
14516
14517 case N_MNEM_vorr:
14518 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14519 break;
14520
14521 case N_MNEM_vand:
14522 /* Pseudo-instruction for VBIC. */
14523 neon_invert_size (&immbits, 0, et.size);
14524 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14525 break;
14526
14527 case N_MNEM_vorn:
14528 /* Pseudo-instruction for VORR. */
14529 neon_invert_size (&immbits, 0, et.size);
14530 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14531 break;
14532
14533 default:
14534 abort ();
14535 }
14536
14537 if (cmode == FAIL)
14538 return;
14539
14540 inst.instruction |= neon_quad (rs) << 6;
14541 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14542 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14543 inst.instruction |= cmode << 8;
14544 neon_write_immbits (immbits);
14545
14546 neon_dp_fixup (&inst);
14547 }
14548 }
14549
14550 static void
14551 do_neon_bitfield (void)
14552 {
14553 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14554 neon_check_type (3, rs, N_IGNORE_TYPE);
14555 neon_three_same (neon_quad (rs), 0, -1);
14556 }
14557
14558 static void
14559 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
14560 unsigned destbits)
14561 {
14562 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14563 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
14564 types | N_KEY);
14565 if (et.type == NT_float)
14566 {
14567 NEON_ENCODE (FLOAT, inst);
14568 neon_three_same (neon_quad (rs), 0, -1);
14569 }
14570 else
14571 {
14572 NEON_ENCODE (INTEGER, inst);
14573 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
14574 }
14575 }
14576
14577 static void
14578 do_neon_dyadic_if_su (void)
14579 {
14580 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14581 }
14582
14583 static void
14584 do_neon_dyadic_if_su_d (void)
14585 {
14586 /* This version only allow D registers, but that constraint is enforced during
14587 operand parsing so we don't need to do anything extra here. */
14588 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14589 }
14590
14591 static void
14592 do_neon_dyadic_if_i_d (void)
14593 {
14594 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14595 affected if we specify unsigned args. */
14596 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14597 }
14598
14599 enum vfp_or_neon_is_neon_bits
14600 {
14601 NEON_CHECK_CC = 1,
14602 NEON_CHECK_ARCH = 2,
14603 NEON_CHECK_ARCH8 = 4
14604 };
14605
14606 /* Call this function if an instruction which may have belonged to the VFP or
14607 Neon instruction sets, but turned out to be a Neon instruction (due to the
14608 operand types involved, etc.). We have to check and/or fix-up a couple of
14609 things:
14610
14611 - Make sure the user hasn't attempted to make a Neon instruction
14612 conditional.
14613 - Alter the value in the condition code field if necessary.
14614 - Make sure that the arch supports Neon instructions.
14615
14616 Which of these operations take place depends on bits from enum
14617 vfp_or_neon_is_neon_bits.
14618
14619 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14620 current instruction's condition is COND_ALWAYS, the condition field is
14621 changed to inst.uncond_value. This is necessary because instructions shared
14622 between VFP and Neon may be conditional for the VFP variants only, and the
14623 unconditional Neon version must have, e.g., 0xF in the condition field. */
14624
14625 static int
14626 vfp_or_neon_is_neon (unsigned check)
14627 {
14628 /* Conditions are always legal in Thumb mode (IT blocks). */
14629 if (!thumb_mode && (check & NEON_CHECK_CC))
14630 {
14631 if (inst.cond != COND_ALWAYS)
14632 {
14633 first_error (_(BAD_COND));
14634 return FAIL;
14635 }
14636 if (inst.uncond_value != -1)
14637 inst.instruction |= inst.uncond_value << 28;
14638 }
14639
14640 if ((check & NEON_CHECK_ARCH)
14641 && !mark_feature_used (&fpu_neon_ext_v1))
14642 {
14643 first_error (_(BAD_FPU));
14644 return FAIL;
14645 }
14646
14647 if ((check & NEON_CHECK_ARCH8)
14648 && !mark_feature_used (&fpu_neon_ext_armv8))
14649 {
14650 first_error (_(BAD_FPU));
14651 return FAIL;
14652 }
14653
14654 return SUCCESS;
14655 }
14656
14657 static void
14658 do_neon_addsub_if_i (void)
14659 {
14660 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
14661 return;
14662
14663 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14664 return;
14665
14666 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14667 affected if we specify unsigned args. */
14668 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
14669 }
14670
14671 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14672 result to be:
14673 V<op> A,B (A is operand 0, B is operand 2)
14674 to mean:
14675 V<op> A,B,A
14676 not:
14677 V<op> A,B,B
14678 so handle that case specially. */
14679
14680 static void
14681 neon_exchange_operands (void)
14682 {
14683 void *scratch = alloca (sizeof (inst.operands[0]));
14684 if (inst.operands[1].present)
14685 {
14686 /* Swap operands[1] and operands[2]. */
14687 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
14688 inst.operands[1] = inst.operands[2];
14689 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
14690 }
14691 else
14692 {
14693 inst.operands[1] = inst.operands[2];
14694 inst.operands[2] = inst.operands[0];
14695 }
14696 }
14697
14698 static void
14699 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
14700 {
14701 if (inst.operands[2].isreg)
14702 {
14703 if (invert)
14704 neon_exchange_operands ();
14705 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
14706 }
14707 else
14708 {
14709 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14710 struct neon_type_el et = neon_check_type (2, rs,
14711 N_EQK | N_SIZ, immtypes | N_KEY);
14712
14713 NEON_ENCODE (IMMED, inst);
14714 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14715 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14716 inst.instruction |= LOW4 (inst.operands[1].reg);
14717 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14718 inst.instruction |= neon_quad (rs) << 6;
14719 inst.instruction |= (et.type == NT_float) << 10;
14720 inst.instruction |= neon_logbits (et.size) << 18;
14721
14722 neon_dp_fixup (&inst);
14723 }
14724 }
14725
14726 static void
14727 do_neon_cmp (void)
14728 {
14729 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
14730 }
14731
14732 static void
14733 do_neon_cmp_inv (void)
14734 {
14735 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
14736 }
14737
14738 static void
14739 do_neon_ceq (void)
14740 {
14741 neon_compare (N_IF_32, N_IF_32, FALSE);
14742 }
14743
14744 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14745 scalars, which are encoded in 5 bits, M : Rm.
14746 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14747 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14748 index in M. */
14749
14750 static unsigned
14751 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
14752 {
14753 unsigned regno = NEON_SCALAR_REG (scalar);
14754 unsigned elno = NEON_SCALAR_INDEX (scalar);
14755
14756 switch (elsize)
14757 {
14758 case 16:
14759 if (regno > 7 || elno > 3)
14760 goto bad_scalar;
14761 return regno | (elno << 3);
14762
14763 case 32:
14764 if (regno > 15 || elno > 1)
14765 goto bad_scalar;
14766 return regno | (elno << 4);
14767
14768 default:
14769 bad_scalar:
14770 first_error (_("scalar out of range for multiply instruction"));
14771 }
14772
14773 return 0;
14774 }
14775
14776 /* Encode multiply / multiply-accumulate scalar instructions. */
14777
14778 static void
14779 neon_mul_mac (struct neon_type_el et, int ubit)
14780 {
14781 unsigned scalar;
14782
14783 /* Give a more helpful error message if we have an invalid type. */
14784 if (et.type == NT_invtype)
14785 return;
14786
14787 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
14788 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14789 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14790 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14791 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14792 inst.instruction |= LOW4 (scalar);
14793 inst.instruction |= HI1 (scalar) << 5;
14794 inst.instruction |= (et.type == NT_float) << 8;
14795 inst.instruction |= neon_logbits (et.size) << 20;
14796 inst.instruction |= (ubit != 0) << 24;
14797
14798 neon_dp_fixup (&inst);
14799 }
14800
14801 static void
14802 do_neon_mac_maybe_scalar (void)
14803 {
14804 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
14805 return;
14806
14807 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14808 return;
14809
14810 if (inst.operands[2].isscalar)
14811 {
14812 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14813 struct neon_type_el et = neon_check_type (3, rs,
14814 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
14815 NEON_ENCODE (SCALAR, inst);
14816 neon_mul_mac (et, neon_quad (rs));
14817 }
14818 else
14819 {
14820 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14821 affected if we specify unsigned args. */
14822 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14823 }
14824 }
14825
14826 static void
14827 do_neon_fmac (void)
14828 {
14829 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
14830 return;
14831
14832 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14833 return;
14834
14835 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14836 }
14837
14838 static void
14839 do_neon_tst (void)
14840 {
14841 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14842 struct neon_type_el et = neon_check_type (3, rs,
14843 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
14844 neon_three_same (neon_quad (rs), 0, et.size);
14845 }
14846
14847 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
14848 same types as the MAC equivalents. The polynomial type for this instruction
14849 is encoded the same as the integer type. */
14850
14851 static void
14852 do_neon_mul (void)
14853 {
14854 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
14855 return;
14856
14857 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14858 return;
14859
14860 if (inst.operands[2].isscalar)
14861 do_neon_mac_maybe_scalar ();
14862 else
14863 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
14864 }
14865
14866 static void
14867 do_neon_qdmulh (void)
14868 {
14869 if (inst.operands[2].isscalar)
14870 {
14871 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14872 struct neon_type_el et = neon_check_type (3, rs,
14873 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14874 NEON_ENCODE (SCALAR, inst);
14875 neon_mul_mac (et, neon_quad (rs));
14876 }
14877 else
14878 {
14879 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14880 struct neon_type_el et = neon_check_type (3, rs,
14881 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14882 NEON_ENCODE (INTEGER, inst);
14883 /* The U bit (rounding) comes from bit mask. */
14884 neon_three_same (neon_quad (rs), 0, et.size);
14885 }
14886 }
14887
14888 static void
14889 do_neon_fcmp_absolute (void)
14890 {
14891 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14892 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14893 /* Size field comes from bit mask. */
14894 neon_three_same (neon_quad (rs), 1, -1);
14895 }
14896
14897 static void
14898 do_neon_fcmp_absolute_inv (void)
14899 {
14900 neon_exchange_operands ();
14901 do_neon_fcmp_absolute ();
14902 }
14903
14904 static void
14905 do_neon_step (void)
14906 {
14907 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14908 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14909 neon_three_same (neon_quad (rs), 0, -1);
14910 }
14911
14912 static void
14913 do_neon_abs_neg (void)
14914 {
14915 enum neon_shape rs;
14916 struct neon_type_el et;
14917
14918 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
14919 return;
14920
14921 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14922 return;
14923
14924 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14925 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
14926
14927 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14928 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14929 inst.instruction |= LOW4 (inst.operands[1].reg);
14930 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14931 inst.instruction |= neon_quad (rs) << 6;
14932 inst.instruction |= (et.type == NT_float) << 10;
14933 inst.instruction |= neon_logbits (et.size) << 18;
14934
14935 neon_dp_fixup (&inst);
14936 }
14937
14938 static void
14939 do_neon_sli (void)
14940 {
14941 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14942 struct neon_type_el et = neon_check_type (2, rs,
14943 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14944 int imm = inst.operands[2].imm;
14945 constraint (imm < 0 || (unsigned)imm >= et.size,
14946 _("immediate out of range for insert"));
14947 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14948 }
14949
14950 static void
14951 do_neon_sri (void)
14952 {
14953 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14954 struct neon_type_el et = neon_check_type (2, rs,
14955 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14956 int imm = inst.operands[2].imm;
14957 constraint (imm < 1 || (unsigned)imm > et.size,
14958 _("immediate out of range for insert"));
14959 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
14960 }
14961
14962 static void
14963 do_neon_qshlu_imm (void)
14964 {
14965 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14966 struct neon_type_el et = neon_check_type (2, rs,
14967 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
14968 int imm = inst.operands[2].imm;
14969 constraint (imm < 0 || (unsigned)imm >= et.size,
14970 _("immediate out of range for shift"));
14971 /* Only encodes the 'U present' variant of the instruction.
14972 In this case, signed types have OP (bit 8) set to 0.
14973 Unsigned types have OP set to 1. */
14974 inst.instruction |= (et.type == NT_unsigned) << 8;
14975 /* The rest of the bits are the same as other immediate shifts. */
14976 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14977 }
14978
14979 static void
14980 do_neon_qmovn (void)
14981 {
14982 struct neon_type_el et = neon_check_type (2, NS_DQ,
14983 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14984 /* Saturating move where operands can be signed or unsigned, and the
14985 destination has the same signedness. */
14986 NEON_ENCODE (INTEGER, inst);
14987 if (et.type == NT_unsigned)
14988 inst.instruction |= 0xc0;
14989 else
14990 inst.instruction |= 0x80;
14991 neon_two_same (0, 1, et.size / 2);
14992 }
14993
14994 static void
14995 do_neon_qmovun (void)
14996 {
14997 struct neon_type_el et = neon_check_type (2, NS_DQ,
14998 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14999 /* Saturating move with unsigned results. Operands must be signed. */
15000 NEON_ENCODE (INTEGER, inst);
15001 neon_two_same (0, 1, et.size / 2);
15002 }
15003
15004 static void
15005 do_neon_rshift_sat_narrow (void)
15006 {
15007 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15008 or unsigned. If operands are unsigned, results must also be unsigned. */
15009 struct neon_type_el et = neon_check_type (2, NS_DQI,
15010 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15011 int imm = inst.operands[2].imm;
15012 /* This gets the bounds check, size encoding and immediate bits calculation
15013 right. */
15014 et.size /= 2;
15015
15016 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15017 VQMOVN.I<size> <Dd>, <Qm>. */
15018 if (imm == 0)
15019 {
15020 inst.operands[2].present = 0;
15021 inst.instruction = N_MNEM_vqmovn;
15022 do_neon_qmovn ();
15023 return;
15024 }
15025
15026 constraint (imm < 1 || (unsigned)imm > et.size,
15027 _("immediate out of range"));
15028 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
15029 }
15030
15031 static void
15032 do_neon_rshift_sat_narrow_u (void)
15033 {
15034 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15035 or unsigned. If operands are unsigned, results must also be unsigned. */
15036 struct neon_type_el et = neon_check_type (2, NS_DQI,
15037 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15038 int imm = inst.operands[2].imm;
15039 /* This gets the bounds check, size encoding and immediate bits calculation
15040 right. */
15041 et.size /= 2;
15042
15043 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15044 VQMOVUN.I<size> <Dd>, <Qm>. */
15045 if (imm == 0)
15046 {
15047 inst.operands[2].present = 0;
15048 inst.instruction = N_MNEM_vqmovun;
15049 do_neon_qmovun ();
15050 return;
15051 }
15052
15053 constraint (imm < 1 || (unsigned)imm > et.size,
15054 _("immediate out of range"));
15055 /* FIXME: The manual is kind of unclear about what value U should have in
15056 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15057 must be 1. */
15058 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
15059 }
15060
15061 static void
15062 do_neon_movn (void)
15063 {
15064 struct neon_type_el et = neon_check_type (2, NS_DQ,
15065 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15066 NEON_ENCODE (INTEGER, inst);
15067 neon_two_same (0, 1, et.size / 2);
15068 }
15069
15070 static void
15071 do_neon_rshift_narrow (void)
15072 {
15073 struct neon_type_el et = neon_check_type (2, NS_DQI,
15074 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15075 int imm = inst.operands[2].imm;
15076 /* This gets the bounds check, size encoding and immediate bits calculation
15077 right. */
15078 et.size /= 2;
15079
15080 /* If immediate is zero then we are a pseudo-instruction for
15081 VMOVN.I<size> <Dd>, <Qm> */
15082 if (imm == 0)
15083 {
15084 inst.operands[2].present = 0;
15085 inst.instruction = N_MNEM_vmovn;
15086 do_neon_movn ();
15087 return;
15088 }
15089
15090 constraint (imm < 1 || (unsigned)imm > et.size,
15091 _("immediate out of range for narrowing operation"));
15092 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
15093 }
15094
15095 static void
15096 do_neon_shll (void)
15097 {
15098 /* FIXME: Type checking when lengthening. */
15099 struct neon_type_el et = neon_check_type (2, NS_QDI,
15100 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
15101 unsigned imm = inst.operands[2].imm;
15102
15103 if (imm == et.size)
15104 {
15105 /* Maximum shift variant. */
15106 NEON_ENCODE (INTEGER, inst);
15107 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15108 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15109 inst.instruction |= LOW4 (inst.operands[1].reg);
15110 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15111 inst.instruction |= neon_logbits (et.size) << 18;
15112
15113 neon_dp_fixup (&inst);
15114 }
15115 else
15116 {
15117 /* A more-specific type check for non-max versions. */
15118 et = neon_check_type (2, NS_QDI,
15119 N_EQK | N_DBL, N_SU_32 | N_KEY);
15120 NEON_ENCODE (IMMED, inst);
15121 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
15122 }
15123 }
15124
15125 /* Check the various types for the VCVT instruction, and return which version
15126 the current instruction is. */
15127
15128 #define CVT_FLAVOUR_VAR \
15129 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15130 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15131 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15132 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15133 /* Half-precision conversions. */ \
15134 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15135 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15136 /* VFP instructions. */ \
15137 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15138 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15139 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15140 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15141 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15142 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15143 /* VFP instructions with bitshift. */ \
15144 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15145 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15146 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15147 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15148 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15149 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15150 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15151 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15152
15153 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15154 neon_cvt_flavour_##C,
15155
15156 /* The different types of conversions we can do. */
15157 enum neon_cvt_flavour
15158 {
15159 CVT_FLAVOUR_VAR
15160 neon_cvt_flavour_invalid,
15161 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
15162 };
15163
15164 #undef CVT_VAR
15165
15166 static enum neon_cvt_flavour
15167 get_neon_cvt_flavour (enum neon_shape rs)
15168 {
15169 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15170 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15171 if (et.type != NT_invtype) \
15172 { \
15173 inst.error = NULL; \
15174 return (neon_cvt_flavour_##C); \
15175 }
15176
15177 struct neon_type_el et;
15178 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
15179 || rs == NS_FF) ? N_VFP : 0;
15180 /* The instruction versions which take an immediate take one register
15181 argument, which is extended to the width of the full register. Thus the
15182 "source" and "destination" registers must have the same width. Hack that
15183 here by making the size equal to the key (wider, in this case) operand. */
15184 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
15185
15186 CVT_FLAVOUR_VAR;
15187
15188 return neon_cvt_flavour_invalid;
15189 #undef CVT_VAR
15190 }
15191
15192 enum neon_cvt_mode
15193 {
15194 neon_cvt_mode_a,
15195 neon_cvt_mode_n,
15196 neon_cvt_mode_p,
15197 neon_cvt_mode_m,
15198 neon_cvt_mode_z,
15199 neon_cvt_mode_x,
15200 neon_cvt_mode_r
15201 };
15202
15203 /* Neon-syntax VFP conversions. */
15204
15205 static void
15206 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
15207 {
15208 const char *opname = 0;
15209
15210 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
15211 {
15212 /* Conversions with immediate bitshift. */
15213 const char *enc[] =
15214 {
15215 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15216 CVT_FLAVOUR_VAR
15217 NULL
15218 #undef CVT_VAR
15219 };
15220
15221 if (flavour < (int) ARRAY_SIZE (enc))
15222 {
15223 opname = enc[flavour];
15224 constraint (inst.operands[0].reg != inst.operands[1].reg,
15225 _("operands 0 and 1 must be the same register"));
15226 inst.operands[1] = inst.operands[2];
15227 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
15228 }
15229 }
15230 else
15231 {
15232 /* Conversions without bitshift. */
15233 const char *enc[] =
15234 {
15235 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15236 CVT_FLAVOUR_VAR
15237 NULL
15238 #undef CVT_VAR
15239 };
15240
15241 if (flavour < (int) ARRAY_SIZE (enc))
15242 opname = enc[flavour];
15243 }
15244
15245 if (opname)
15246 do_vfp_nsyn_opcode (opname);
15247 }
15248
15249 static void
15250 do_vfp_nsyn_cvtz (void)
15251 {
15252 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
15253 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15254 const char *enc[] =
15255 {
15256 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15257 CVT_FLAVOUR_VAR
15258 NULL
15259 #undef CVT_VAR
15260 };
15261
15262 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
15263 do_vfp_nsyn_opcode (enc[flavour]);
15264 }
15265
15266 static void
15267 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
15268 enum neon_cvt_mode mode)
15269 {
15270 int sz, op;
15271 int rm;
15272
15273 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15274 D register operands. */
15275 if (flavour == neon_cvt_flavour_s32_f64
15276 || flavour == neon_cvt_flavour_u32_f64)
15277 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15278 _(BAD_FPU));
15279
15280 set_it_insn_type (OUTSIDE_IT_INSN);
15281
15282 switch (flavour)
15283 {
15284 case neon_cvt_flavour_s32_f64:
15285 sz = 1;
15286 op = 1;
15287 break;
15288 case neon_cvt_flavour_s32_f32:
15289 sz = 0;
15290 op = 1;
15291 break;
15292 case neon_cvt_flavour_u32_f64:
15293 sz = 1;
15294 op = 0;
15295 break;
15296 case neon_cvt_flavour_u32_f32:
15297 sz = 0;
15298 op = 0;
15299 break;
15300 default:
15301 first_error (_("invalid instruction shape"));
15302 return;
15303 }
15304
15305 switch (mode)
15306 {
15307 case neon_cvt_mode_a: rm = 0; break;
15308 case neon_cvt_mode_n: rm = 1; break;
15309 case neon_cvt_mode_p: rm = 2; break;
15310 case neon_cvt_mode_m: rm = 3; break;
15311 default: first_error (_("invalid rounding mode")); return;
15312 }
15313
15314 NEON_ENCODE (FPV8, inst);
15315 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
15316 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
15317 inst.instruction |= sz << 8;
15318 inst.instruction |= op << 7;
15319 inst.instruction |= rm << 16;
15320 inst.instruction |= 0xf0000000;
15321 inst.is_neon = TRUE;
15322 }
15323
15324 static void
15325 do_neon_cvt_1 (enum neon_cvt_mode mode)
15326 {
15327 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
15328 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
15329 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15330
15331 /* PR11109: Handle round-to-zero for VCVT conversions. */
15332 if (mode == neon_cvt_mode_z
15333 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
15334 && (flavour == neon_cvt_flavour_s32_f32
15335 || flavour == neon_cvt_flavour_u32_f32
15336 || flavour == neon_cvt_flavour_s32_f64
15337 || flavour == neon_cvt_flavour_u32_f64)
15338 && (rs == NS_FD || rs == NS_FF))
15339 {
15340 do_vfp_nsyn_cvtz ();
15341 return;
15342 }
15343
15344 /* VFP rather than Neon conversions. */
15345 if (flavour >= neon_cvt_flavour_first_fp)
15346 {
15347 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15348 do_vfp_nsyn_cvt (rs, flavour);
15349 else
15350 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15351
15352 return;
15353 }
15354
15355 switch (rs)
15356 {
15357 case NS_DDI:
15358 case NS_QQI:
15359 {
15360 unsigned immbits;
15361 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
15362
15363 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15364 return;
15365
15366 /* Fixed-point conversion with #0 immediate is encoded as an
15367 integer conversion. */
15368 if (inst.operands[2].present && inst.operands[2].imm == 0)
15369 goto int_encode;
15370 immbits = 32 - inst.operands[2].imm;
15371 NEON_ENCODE (IMMED, inst);
15372 if (flavour != neon_cvt_flavour_invalid)
15373 inst.instruction |= enctab[flavour];
15374 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15375 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15376 inst.instruction |= LOW4 (inst.operands[1].reg);
15377 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15378 inst.instruction |= neon_quad (rs) << 6;
15379 inst.instruction |= 1 << 21;
15380 inst.instruction |= immbits << 16;
15381
15382 neon_dp_fixup (&inst);
15383 }
15384 break;
15385
15386 case NS_DD:
15387 case NS_QQ:
15388 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
15389 {
15390 NEON_ENCODE (FLOAT, inst);
15391 set_it_insn_type (OUTSIDE_IT_INSN);
15392
15393 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
15394 return;
15395
15396 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15397 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15398 inst.instruction |= LOW4 (inst.operands[1].reg);
15399 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15400 inst.instruction |= neon_quad (rs) << 6;
15401 inst.instruction |= (flavour == neon_cvt_flavour_u32_f32) << 7;
15402 inst.instruction |= mode << 8;
15403 if (thumb_mode)
15404 inst.instruction |= 0xfc000000;
15405 else
15406 inst.instruction |= 0xf0000000;
15407 }
15408 else
15409 {
15410 int_encode:
15411 {
15412 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
15413
15414 NEON_ENCODE (INTEGER, inst);
15415
15416 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15417 return;
15418
15419 if (flavour != neon_cvt_flavour_invalid)
15420 inst.instruction |= enctab[flavour];
15421
15422 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15423 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15424 inst.instruction |= LOW4 (inst.operands[1].reg);
15425 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15426 inst.instruction |= neon_quad (rs) << 6;
15427 inst.instruction |= 2 << 18;
15428
15429 neon_dp_fixup (&inst);
15430 }
15431 }
15432 break;
15433
15434 /* Half-precision conversions for Advanced SIMD -- neon. */
15435 case NS_QD:
15436 case NS_DQ:
15437
15438 if ((rs == NS_DQ)
15439 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
15440 {
15441 as_bad (_("operand size must match register width"));
15442 break;
15443 }
15444
15445 if ((rs == NS_QD)
15446 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
15447 {
15448 as_bad (_("operand size must match register width"));
15449 break;
15450 }
15451
15452 if (rs == NS_DQ)
15453 inst.instruction = 0x3b60600;
15454 else
15455 inst.instruction = 0x3b60700;
15456
15457 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15458 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15459 inst.instruction |= LOW4 (inst.operands[1].reg);
15460 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15461 neon_dp_fixup (&inst);
15462 break;
15463
15464 default:
15465 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15466 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15467 do_vfp_nsyn_cvt (rs, flavour);
15468 else
15469 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15470 }
15471 }
15472
15473 static void
15474 do_neon_cvtr (void)
15475 {
15476 do_neon_cvt_1 (neon_cvt_mode_x);
15477 }
15478
15479 static void
15480 do_neon_cvt (void)
15481 {
15482 do_neon_cvt_1 (neon_cvt_mode_z);
15483 }
15484
15485 static void
15486 do_neon_cvta (void)
15487 {
15488 do_neon_cvt_1 (neon_cvt_mode_a);
15489 }
15490
15491 static void
15492 do_neon_cvtn (void)
15493 {
15494 do_neon_cvt_1 (neon_cvt_mode_n);
15495 }
15496
15497 static void
15498 do_neon_cvtp (void)
15499 {
15500 do_neon_cvt_1 (neon_cvt_mode_p);
15501 }
15502
15503 static void
15504 do_neon_cvtm (void)
15505 {
15506 do_neon_cvt_1 (neon_cvt_mode_m);
15507 }
15508
15509 static void
15510 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
15511 {
15512 if (is_double)
15513 mark_feature_used (&fpu_vfp_ext_armv8);
15514
15515 encode_arm_vfp_reg (inst.operands[0].reg,
15516 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
15517 encode_arm_vfp_reg (inst.operands[1].reg,
15518 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
15519 inst.instruction |= to ? 0x10000 : 0;
15520 inst.instruction |= t ? 0x80 : 0;
15521 inst.instruction |= is_double ? 0x100 : 0;
15522 do_vfp_cond_or_thumb ();
15523 }
15524
15525 static void
15526 do_neon_cvttb_1 (bfd_boolean t)
15527 {
15528 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_DF, NS_NULL);
15529
15530 if (rs == NS_NULL)
15531 return;
15532 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
15533 {
15534 inst.error = NULL;
15535 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
15536 }
15537 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
15538 {
15539 inst.error = NULL;
15540 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
15541 }
15542 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
15543 {
15544 /* The VCVTB and VCVTT instructions with D-register operands
15545 don't work for SP only targets. */
15546 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15547 _(BAD_FPU));
15548
15549 inst.error = NULL;
15550 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
15551 }
15552 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
15553 {
15554 /* The VCVTB and VCVTT instructions with D-register operands
15555 don't work for SP only targets. */
15556 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15557 _(BAD_FPU));
15558
15559 inst.error = NULL;
15560 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
15561 }
15562 else
15563 return;
15564 }
15565
15566 static void
15567 do_neon_cvtb (void)
15568 {
15569 do_neon_cvttb_1 (FALSE);
15570 }
15571
15572
15573 static void
15574 do_neon_cvtt (void)
15575 {
15576 do_neon_cvttb_1 (TRUE);
15577 }
15578
15579 static void
15580 neon_move_immediate (void)
15581 {
15582 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
15583 struct neon_type_el et = neon_check_type (2, rs,
15584 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15585 unsigned immlo, immhi = 0, immbits;
15586 int op, cmode, float_p;
15587
15588 constraint (et.type == NT_invtype,
15589 _("operand size must be specified for immediate VMOV"));
15590
15591 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15592 op = (inst.instruction & (1 << 5)) != 0;
15593
15594 immlo = inst.operands[1].imm;
15595 if (inst.operands[1].regisimm)
15596 immhi = inst.operands[1].reg;
15597
15598 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
15599 _("immediate has bits set outside the operand size"));
15600
15601 float_p = inst.operands[1].immisfloat;
15602
15603 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
15604 et.size, et.type)) == FAIL)
15605 {
15606 /* Invert relevant bits only. */
15607 neon_invert_size (&immlo, &immhi, et.size);
15608 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15609 with one or the other; those cases are caught by
15610 neon_cmode_for_move_imm. */
15611 op = !op;
15612 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
15613 &op, et.size, et.type)) == FAIL)
15614 {
15615 first_error (_("immediate out of range"));
15616 return;
15617 }
15618 }
15619
15620 inst.instruction &= ~(1 << 5);
15621 inst.instruction |= op << 5;
15622
15623 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15624 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15625 inst.instruction |= neon_quad (rs) << 6;
15626 inst.instruction |= cmode << 8;
15627
15628 neon_write_immbits (immbits);
15629 }
15630
15631 static void
15632 do_neon_mvn (void)
15633 {
15634 if (inst.operands[1].isreg)
15635 {
15636 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15637
15638 NEON_ENCODE (INTEGER, inst);
15639 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15640 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15641 inst.instruction |= LOW4 (inst.operands[1].reg);
15642 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15643 inst.instruction |= neon_quad (rs) << 6;
15644 }
15645 else
15646 {
15647 NEON_ENCODE (IMMED, inst);
15648 neon_move_immediate ();
15649 }
15650
15651 neon_dp_fixup (&inst);
15652 }
15653
15654 /* Encode instructions of form:
15655
15656 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15657 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
15658
15659 static void
15660 neon_mixed_length (struct neon_type_el et, unsigned size)
15661 {
15662 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15663 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15664 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15665 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15666 inst.instruction |= LOW4 (inst.operands[2].reg);
15667 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15668 inst.instruction |= (et.type == NT_unsigned) << 24;
15669 inst.instruction |= neon_logbits (size) << 20;
15670
15671 neon_dp_fixup (&inst);
15672 }
15673
15674 static void
15675 do_neon_dyadic_long (void)
15676 {
15677 /* FIXME: Type checking for lengthening op. */
15678 struct neon_type_el et = neon_check_type (3, NS_QDD,
15679 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
15680 neon_mixed_length (et, et.size);
15681 }
15682
15683 static void
15684 do_neon_abal (void)
15685 {
15686 struct neon_type_el et = neon_check_type (3, NS_QDD,
15687 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
15688 neon_mixed_length (et, et.size);
15689 }
15690
15691 static void
15692 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
15693 {
15694 if (inst.operands[2].isscalar)
15695 {
15696 struct neon_type_el et = neon_check_type (3, NS_QDS,
15697 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
15698 NEON_ENCODE (SCALAR, inst);
15699 neon_mul_mac (et, et.type == NT_unsigned);
15700 }
15701 else
15702 {
15703 struct neon_type_el et = neon_check_type (3, NS_QDD,
15704 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
15705 NEON_ENCODE (INTEGER, inst);
15706 neon_mixed_length (et, et.size);
15707 }
15708 }
15709
15710 static void
15711 do_neon_mac_maybe_scalar_long (void)
15712 {
15713 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
15714 }
15715
15716 static void
15717 do_neon_dyadic_wide (void)
15718 {
15719 struct neon_type_el et = neon_check_type (3, NS_QQD,
15720 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
15721 neon_mixed_length (et, et.size);
15722 }
15723
15724 static void
15725 do_neon_dyadic_narrow (void)
15726 {
15727 struct neon_type_el et = neon_check_type (3, NS_QDD,
15728 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
15729 /* Operand sign is unimportant, and the U bit is part of the opcode,
15730 so force the operand type to integer. */
15731 et.type = NT_integer;
15732 neon_mixed_length (et, et.size / 2);
15733 }
15734
15735 static void
15736 do_neon_mul_sat_scalar_long (void)
15737 {
15738 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
15739 }
15740
15741 static void
15742 do_neon_vmull (void)
15743 {
15744 if (inst.operands[2].isscalar)
15745 do_neon_mac_maybe_scalar_long ();
15746 else
15747 {
15748 struct neon_type_el et = neon_check_type (3, NS_QDD,
15749 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
15750
15751 if (et.type == NT_poly)
15752 NEON_ENCODE (POLY, inst);
15753 else
15754 NEON_ENCODE (INTEGER, inst);
15755
15756 /* For polynomial encoding the U bit must be zero, and the size must
15757 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
15758 obviously, as 0b10). */
15759 if (et.size == 64)
15760 {
15761 /* Check we're on the correct architecture. */
15762 if (!mark_feature_used (&fpu_crypto_ext_armv8))
15763 inst.error =
15764 _("Instruction form not available on this architecture.");
15765
15766 et.size = 32;
15767 }
15768
15769 neon_mixed_length (et, et.size);
15770 }
15771 }
15772
15773 static void
15774 do_neon_ext (void)
15775 {
15776 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
15777 struct neon_type_el et = neon_check_type (3, rs,
15778 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15779 unsigned imm = (inst.operands[3].imm * et.size) / 8;
15780
15781 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
15782 _("shift out of range"));
15783 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15784 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15785 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15786 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15787 inst.instruction |= LOW4 (inst.operands[2].reg);
15788 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15789 inst.instruction |= neon_quad (rs) << 6;
15790 inst.instruction |= imm << 8;
15791
15792 neon_dp_fixup (&inst);
15793 }
15794
15795 static void
15796 do_neon_rev (void)
15797 {
15798 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15799 struct neon_type_el et = neon_check_type (2, rs,
15800 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15801 unsigned op = (inst.instruction >> 7) & 3;
15802 /* N (width of reversed regions) is encoded as part of the bitmask. We
15803 extract it here to check the elements to be reversed are smaller.
15804 Otherwise we'd get a reserved instruction. */
15805 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
15806 gas_assert (elsize != 0);
15807 constraint (et.size >= elsize,
15808 _("elements must be smaller than reversal region"));
15809 neon_two_same (neon_quad (rs), 1, et.size);
15810 }
15811
15812 static void
15813 do_neon_dup (void)
15814 {
15815 if (inst.operands[1].isscalar)
15816 {
15817 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
15818 struct neon_type_el et = neon_check_type (2, rs,
15819 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15820 unsigned sizebits = et.size >> 3;
15821 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
15822 int logsize = neon_logbits (et.size);
15823 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
15824
15825 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
15826 return;
15827
15828 NEON_ENCODE (SCALAR, inst);
15829 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15830 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15831 inst.instruction |= LOW4 (dm);
15832 inst.instruction |= HI1 (dm) << 5;
15833 inst.instruction |= neon_quad (rs) << 6;
15834 inst.instruction |= x << 17;
15835 inst.instruction |= sizebits << 16;
15836
15837 neon_dp_fixup (&inst);
15838 }
15839 else
15840 {
15841 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
15842 struct neon_type_el et = neon_check_type (2, rs,
15843 N_8 | N_16 | N_32 | N_KEY, N_EQK);
15844 /* Duplicate ARM register to lanes of vector. */
15845 NEON_ENCODE (ARMREG, inst);
15846 switch (et.size)
15847 {
15848 case 8: inst.instruction |= 0x400000; break;
15849 case 16: inst.instruction |= 0x000020; break;
15850 case 32: inst.instruction |= 0x000000; break;
15851 default: break;
15852 }
15853 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
15854 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
15855 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
15856 inst.instruction |= neon_quad (rs) << 21;
15857 /* The encoding for this instruction is identical for the ARM and Thumb
15858 variants, except for the condition field. */
15859 do_vfp_cond_or_thumb ();
15860 }
15861 }
15862
15863 /* VMOV has particularly many variations. It can be one of:
15864 0. VMOV<c><q> <Qd>, <Qm>
15865 1. VMOV<c><q> <Dd>, <Dm>
15866 (Register operations, which are VORR with Rm = Rn.)
15867 2. VMOV<c><q>.<dt> <Qd>, #<imm>
15868 3. VMOV<c><q>.<dt> <Dd>, #<imm>
15869 (Immediate loads.)
15870 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
15871 (ARM register to scalar.)
15872 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
15873 (Two ARM registers to vector.)
15874 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
15875 (Scalar to ARM register.)
15876 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
15877 (Vector to two ARM registers.)
15878 8. VMOV.F32 <Sd>, <Sm>
15879 9. VMOV.F64 <Dd>, <Dm>
15880 (VFP register moves.)
15881 10. VMOV.F32 <Sd>, #imm
15882 11. VMOV.F64 <Dd>, #imm
15883 (VFP float immediate load.)
15884 12. VMOV <Rd>, <Sm>
15885 (VFP single to ARM reg.)
15886 13. VMOV <Sd>, <Rm>
15887 (ARM reg to VFP single.)
15888 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
15889 (Two ARM regs to two VFP singles.)
15890 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
15891 (Two VFP singles to two ARM regs.)
15892
15893 These cases can be disambiguated using neon_select_shape, except cases 1/9
15894 and 3/11 which depend on the operand type too.
15895
15896 All the encoded bits are hardcoded by this function.
15897
15898 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
15899 Cases 5, 7 may be used with VFPv2 and above.
15900
15901 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
15902 can specify a type where it doesn't make sense to, and is ignored). */
15903
15904 static void
15905 do_neon_mov (void)
15906 {
15907 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
15908 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
15909 NS_NULL);
15910 struct neon_type_el et;
15911 const char *ldconst = 0;
15912
15913 switch (rs)
15914 {
15915 case NS_DD: /* case 1/9. */
15916 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15917 /* It is not an error here if no type is given. */
15918 inst.error = NULL;
15919 if (et.type == NT_float && et.size == 64)
15920 {
15921 do_vfp_nsyn_opcode ("fcpyd");
15922 break;
15923 }
15924 /* fall through. */
15925
15926 case NS_QQ: /* case 0/1. */
15927 {
15928 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15929 return;
15930 /* The architecture manual I have doesn't explicitly state which
15931 value the U bit should have for register->register moves, but
15932 the equivalent VORR instruction has U = 0, so do that. */
15933 inst.instruction = 0x0200110;
15934 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15935 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15936 inst.instruction |= LOW4 (inst.operands[1].reg);
15937 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15938 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15939 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15940 inst.instruction |= neon_quad (rs) << 6;
15941
15942 neon_dp_fixup (&inst);
15943 }
15944 break;
15945
15946 case NS_DI: /* case 3/11. */
15947 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15948 inst.error = NULL;
15949 if (et.type == NT_float && et.size == 64)
15950 {
15951 /* case 11 (fconstd). */
15952 ldconst = "fconstd";
15953 goto encode_fconstd;
15954 }
15955 /* fall through. */
15956
15957 case NS_QI: /* case 2/3. */
15958 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15959 return;
15960 inst.instruction = 0x0800010;
15961 neon_move_immediate ();
15962 neon_dp_fixup (&inst);
15963 break;
15964
15965 case NS_SR: /* case 4. */
15966 {
15967 unsigned bcdebits = 0;
15968 int logsize;
15969 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
15970 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
15971
15972 /* .<size> is optional here, defaulting to .32. */
15973 if (inst.vectype.elems == 0
15974 && inst.operands[0].vectype.type == NT_invtype
15975 && inst.operands[1].vectype.type == NT_invtype)
15976 {
15977 inst.vectype.el[0].type = NT_untyped;
15978 inst.vectype.el[0].size = 32;
15979 inst.vectype.elems = 1;
15980 }
15981
15982 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
15983 logsize = neon_logbits (et.size);
15984
15985 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15986 _(BAD_FPU));
15987 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15988 && et.size != 32, _(BAD_FPU));
15989 constraint (et.type == NT_invtype, _("bad type for scalar"));
15990 constraint (x >= 64 / et.size, _("scalar index out of range"));
15991
15992 switch (et.size)
15993 {
15994 case 8: bcdebits = 0x8; break;
15995 case 16: bcdebits = 0x1; break;
15996 case 32: bcdebits = 0x0; break;
15997 default: ;
15998 }
15999
16000 bcdebits |= x << logsize;
16001
16002 inst.instruction = 0xe000b10;
16003 do_vfp_cond_or_thumb ();
16004 inst.instruction |= LOW4 (dn) << 16;
16005 inst.instruction |= HI1 (dn) << 7;
16006 inst.instruction |= inst.operands[1].reg << 12;
16007 inst.instruction |= (bcdebits & 3) << 5;
16008 inst.instruction |= (bcdebits >> 2) << 21;
16009 }
16010 break;
16011
16012 case NS_DRR: /* case 5 (fmdrr). */
16013 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16014 _(BAD_FPU));
16015
16016 inst.instruction = 0xc400b10;
16017 do_vfp_cond_or_thumb ();
16018 inst.instruction |= LOW4 (inst.operands[0].reg);
16019 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
16020 inst.instruction |= inst.operands[1].reg << 12;
16021 inst.instruction |= inst.operands[2].reg << 16;
16022 break;
16023
16024 case NS_RS: /* case 6. */
16025 {
16026 unsigned logsize;
16027 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
16028 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
16029 unsigned abcdebits = 0;
16030
16031 /* .<dt> is optional here, defaulting to .32. */
16032 if (inst.vectype.elems == 0
16033 && inst.operands[0].vectype.type == NT_invtype
16034 && inst.operands[1].vectype.type == NT_invtype)
16035 {
16036 inst.vectype.el[0].type = NT_untyped;
16037 inst.vectype.el[0].size = 32;
16038 inst.vectype.elems = 1;
16039 }
16040
16041 et = neon_check_type (2, NS_NULL,
16042 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
16043 logsize = neon_logbits (et.size);
16044
16045 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16046 _(BAD_FPU));
16047 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16048 && et.size != 32, _(BAD_FPU));
16049 constraint (et.type == NT_invtype, _("bad type for scalar"));
16050 constraint (x >= 64 / et.size, _("scalar index out of range"));
16051
16052 switch (et.size)
16053 {
16054 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
16055 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
16056 case 32: abcdebits = 0x00; break;
16057 default: ;
16058 }
16059
16060 abcdebits |= x << logsize;
16061 inst.instruction = 0xe100b10;
16062 do_vfp_cond_or_thumb ();
16063 inst.instruction |= LOW4 (dn) << 16;
16064 inst.instruction |= HI1 (dn) << 7;
16065 inst.instruction |= inst.operands[0].reg << 12;
16066 inst.instruction |= (abcdebits & 3) << 5;
16067 inst.instruction |= (abcdebits >> 2) << 21;
16068 }
16069 break;
16070
16071 case NS_RRD: /* case 7 (fmrrd). */
16072 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16073 _(BAD_FPU));
16074
16075 inst.instruction = 0xc500b10;
16076 do_vfp_cond_or_thumb ();
16077 inst.instruction |= inst.operands[0].reg << 12;
16078 inst.instruction |= inst.operands[1].reg << 16;
16079 inst.instruction |= LOW4 (inst.operands[2].reg);
16080 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16081 break;
16082
16083 case NS_FF: /* case 8 (fcpys). */
16084 do_vfp_nsyn_opcode ("fcpys");
16085 break;
16086
16087 case NS_FI: /* case 10 (fconsts). */
16088 ldconst = "fconsts";
16089 encode_fconstd:
16090 if (is_quarter_float (inst.operands[1].imm))
16091 {
16092 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
16093 do_vfp_nsyn_opcode (ldconst);
16094 }
16095 else
16096 first_error (_("immediate out of range"));
16097 break;
16098
16099 case NS_RF: /* case 12 (fmrs). */
16100 do_vfp_nsyn_opcode ("fmrs");
16101 break;
16102
16103 case NS_FR: /* case 13 (fmsr). */
16104 do_vfp_nsyn_opcode ("fmsr");
16105 break;
16106
16107 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16108 (one of which is a list), but we have parsed four. Do some fiddling to
16109 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16110 expect. */
16111 case NS_RRFF: /* case 14 (fmrrs). */
16112 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
16113 _("VFP registers must be adjacent"));
16114 inst.operands[2].imm = 2;
16115 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16116 do_vfp_nsyn_opcode ("fmrrs");
16117 break;
16118
16119 case NS_FFRR: /* case 15 (fmsrr). */
16120 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
16121 _("VFP registers must be adjacent"));
16122 inst.operands[1] = inst.operands[2];
16123 inst.operands[2] = inst.operands[3];
16124 inst.operands[0].imm = 2;
16125 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16126 do_vfp_nsyn_opcode ("fmsrr");
16127 break;
16128
16129 case NS_NULL:
16130 /* neon_select_shape has determined that the instruction
16131 shape is wrong and has already set the error message. */
16132 break;
16133
16134 default:
16135 abort ();
16136 }
16137 }
16138
16139 static void
16140 do_neon_rshift_round_imm (void)
16141 {
16142 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16143 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
16144 int imm = inst.operands[2].imm;
16145
16146 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16147 if (imm == 0)
16148 {
16149 inst.operands[2].present = 0;
16150 do_neon_mov ();
16151 return;
16152 }
16153
16154 constraint (imm < 1 || (unsigned)imm > et.size,
16155 _("immediate out of range for shift"));
16156 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
16157 et.size - imm);
16158 }
16159
16160 static void
16161 do_neon_movl (void)
16162 {
16163 struct neon_type_el et = neon_check_type (2, NS_QD,
16164 N_EQK | N_DBL, N_SU_32 | N_KEY);
16165 unsigned sizebits = et.size >> 3;
16166 inst.instruction |= sizebits << 19;
16167 neon_two_same (0, et.type == NT_unsigned, -1);
16168 }
16169
16170 static void
16171 do_neon_trn (void)
16172 {
16173 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16174 struct neon_type_el et = neon_check_type (2, rs,
16175 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16176 NEON_ENCODE (INTEGER, inst);
16177 neon_two_same (neon_quad (rs), 1, et.size);
16178 }
16179
16180 static void
16181 do_neon_zip_uzp (void)
16182 {
16183 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16184 struct neon_type_el et = neon_check_type (2, rs,
16185 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16186 if (rs == NS_DD && et.size == 32)
16187 {
16188 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16189 inst.instruction = N_MNEM_vtrn;
16190 do_neon_trn ();
16191 return;
16192 }
16193 neon_two_same (neon_quad (rs), 1, et.size);
16194 }
16195
16196 static void
16197 do_neon_sat_abs_neg (void)
16198 {
16199 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16200 struct neon_type_el et = neon_check_type (2, rs,
16201 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16202 neon_two_same (neon_quad (rs), 1, et.size);
16203 }
16204
16205 static void
16206 do_neon_pair_long (void)
16207 {
16208 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16209 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
16210 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16211 inst.instruction |= (et.type == NT_unsigned) << 7;
16212 neon_two_same (neon_quad (rs), 1, et.size);
16213 }
16214
16215 static void
16216 do_neon_recip_est (void)
16217 {
16218 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16219 struct neon_type_el et = neon_check_type (2, rs,
16220 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
16221 inst.instruction |= (et.type == NT_float) << 8;
16222 neon_two_same (neon_quad (rs), 1, et.size);
16223 }
16224
16225 static void
16226 do_neon_cls (void)
16227 {
16228 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16229 struct neon_type_el et = neon_check_type (2, rs,
16230 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16231 neon_two_same (neon_quad (rs), 1, et.size);
16232 }
16233
16234 static void
16235 do_neon_clz (void)
16236 {
16237 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16238 struct neon_type_el et = neon_check_type (2, rs,
16239 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
16240 neon_two_same (neon_quad (rs), 1, et.size);
16241 }
16242
16243 static void
16244 do_neon_cnt (void)
16245 {
16246 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16247 struct neon_type_el et = neon_check_type (2, rs,
16248 N_EQK | N_INT, N_8 | N_KEY);
16249 neon_two_same (neon_quad (rs), 1, et.size);
16250 }
16251
16252 static void
16253 do_neon_swp (void)
16254 {
16255 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16256 neon_two_same (neon_quad (rs), 1, -1);
16257 }
16258
16259 static void
16260 do_neon_tbl_tbx (void)
16261 {
16262 unsigned listlenbits;
16263 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
16264
16265 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
16266 {
16267 first_error (_("bad list length for table lookup"));
16268 return;
16269 }
16270
16271 listlenbits = inst.operands[1].imm - 1;
16272 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16273 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16274 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16275 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16276 inst.instruction |= LOW4 (inst.operands[2].reg);
16277 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16278 inst.instruction |= listlenbits << 8;
16279
16280 neon_dp_fixup (&inst);
16281 }
16282
16283 static void
16284 do_neon_ldm_stm (void)
16285 {
16286 /* P, U and L bits are part of bitmask. */
16287 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
16288 unsigned offsetbits = inst.operands[1].imm * 2;
16289
16290 if (inst.operands[1].issingle)
16291 {
16292 do_vfp_nsyn_ldm_stm (is_dbmode);
16293 return;
16294 }
16295
16296 constraint (is_dbmode && !inst.operands[0].writeback,
16297 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16298
16299 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16300 _("register list must contain at least 1 and at most 16 "
16301 "registers"));
16302
16303 inst.instruction |= inst.operands[0].reg << 16;
16304 inst.instruction |= inst.operands[0].writeback << 21;
16305 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16306 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
16307
16308 inst.instruction |= offsetbits;
16309
16310 do_vfp_cond_or_thumb ();
16311 }
16312
16313 static void
16314 do_neon_ldr_str (void)
16315 {
16316 int is_ldr = (inst.instruction & (1 << 20)) != 0;
16317
16318 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16319 And is UNPREDICTABLE in thumb mode. */
16320 if (!is_ldr
16321 && inst.operands[1].reg == REG_PC
16322 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
16323 {
16324 if (thumb_mode)
16325 inst.error = _("Use of PC here is UNPREDICTABLE");
16326 else if (warn_on_deprecated)
16327 as_tsktsk (_("Use of PC here is deprecated"));
16328 }
16329
16330 if (inst.operands[0].issingle)
16331 {
16332 if (is_ldr)
16333 do_vfp_nsyn_opcode ("flds");
16334 else
16335 do_vfp_nsyn_opcode ("fsts");
16336 }
16337 else
16338 {
16339 if (is_ldr)
16340 do_vfp_nsyn_opcode ("fldd");
16341 else
16342 do_vfp_nsyn_opcode ("fstd");
16343 }
16344 }
16345
16346 /* "interleave" version also handles non-interleaving register VLD1/VST1
16347 instructions. */
16348
16349 static void
16350 do_neon_ld_st_interleave (void)
16351 {
16352 struct neon_type_el et = neon_check_type (1, NS_NULL,
16353 N_8 | N_16 | N_32 | N_64);
16354 unsigned alignbits = 0;
16355 unsigned idx;
16356 /* The bits in this table go:
16357 0: register stride of one (0) or two (1)
16358 1,2: register list length, minus one (1, 2, 3, 4).
16359 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16360 We use -1 for invalid entries. */
16361 const int typetable[] =
16362 {
16363 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16364 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16365 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16366 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16367 };
16368 int typebits;
16369
16370 if (et.type == NT_invtype)
16371 return;
16372
16373 if (inst.operands[1].immisalign)
16374 switch (inst.operands[1].imm >> 8)
16375 {
16376 case 64: alignbits = 1; break;
16377 case 128:
16378 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
16379 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16380 goto bad_alignment;
16381 alignbits = 2;
16382 break;
16383 case 256:
16384 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16385 goto bad_alignment;
16386 alignbits = 3;
16387 break;
16388 default:
16389 bad_alignment:
16390 first_error (_("bad alignment"));
16391 return;
16392 }
16393
16394 inst.instruction |= alignbits << 4;
16395 inst.instruction |= neon_logbits (et.size) << 6;
16396
16397 /* Bits [4:6] of the immediate in a list specifier encode register stride
16398 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16399 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16400 up the right value for "type" in a table based on this value and the given
16401 list style, then stick it back. */
16402 idx = ((inst.operands[0].imm >> 4) & 7)
16403 | (((inst.instruction >> 8) & 3) << 3);
16404
16405 typebits = typetable[idx];
16406
16407 constraint (typebits == -1, _("bad list type for instruction"));
16408 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
16409 _("bad element type for instruction"));
16410
16411 inst.instruction &= ~0xf00;
16412 inst.instruction |= typebits << 8;
16413 }
16414
16415 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16416 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16417 otherwise. The variable arguments are a list of pairs of legal (size, align)
16418 values, terminated with -1. */
16419
16420 static int
16421 neon_alignment_bit (int size, int align, int *do_align, ...)
16422 {
16423 va_list ap;
16424 int result = FAIL, thissize, thisalign;
16425
16426 if (!inst.operands[1].immisalign)
16427 {
16428 *do_align = 0;
16429 return SUCCESS;
16430 }
16431
16432 va_start (ap, do_align);
16433
16434 do
16435 {
16436 thissize = va_arg (ap, int);
16437 if (thissize == -1)
16438 break;
16439 thisalign = va_arg (ap, int);
16440
16441 if (size == thissize && align == thisalign)
16442 result = SUCCESS;
16443 }
16444 while (result != SUCCESS);
16445
16446 va_end (ap);
16447
16448 if (result == SUCCESS)
16449 *do_align = 1;
16450 else
16451 first_error (_("unsupported alignment for instruction"));
16452
16453 return result;
16454 }
16455
16456 static void
16457 do_neon_ld_st_lane (void)
16458 {
16459 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16460 int align_good, do_align = 0;
16461 int logsize = neon_logbits (et.size);
16462 int align = inst.operands[1].imm >> 8;
16463 int n = (inst.instruction >> 8) & 3;
16464 int max_el = 64 / et.size;
16465
16466 if (et.type == NT_invtype)
16467 return;
16468
16469 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
16470 _("bad list length"));
16471 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
16472 _("scalar index out of range"));
16473 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
16474 && et.size == 8,
16475 _("stride of 2 unavailable when element size is 8"));
16476
16477 switch (n)
16478 {
16479 case 0: /* VLD1 / VST1. */
16480 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
16481 32, 32, -1);
16482 if (align_good == FAIL)
16483 return;
16484 if (do_align)
16485 {
16486 unsigned alignbits = 0;
16487 switch (et.size)
16488 {
16489 case 16: alignbits = 0x1; break;
16490 case 32: alignbits = 0x3; break;
16491 default: ;
16492 }
16493 inst.instruction |= alignbits << 4;
16494 }
16495 break;
16496
16497 case 1: /* VLD2 / VST2. */
16498 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
16499 32, 64, -1);
16500 if (align_good == FAIL)
16501 return;
16502 if (do_align)
16503 inst.instruction |= 1 << 4;
16504 break;
16505
16506 case 2: /* VLD3 / VST3. */
16507 constraint (inst.operands[1].immisalign,
16508 _("can't use alignment with this instruction"));
16509 break;
16510
16511 case 3: /* VLD4 / VST4. */
16512 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
16513 16, 64, 32, 64, 32, 128, -1);
16514 if (align_good == FAIL)
16515 return;
16516 if (do_align)
16517 {
16518 unsigned alignbits = 0;
16519 switch (et.size)
16520 {
16521 case 8: alignbits = 0x1; break;
16522 case 16: alignbits = 0x1; break;
16523 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
16524 default: ;
16525 }
16526 inst.instruction |= alignbits << 4;
16527 }
16528 break;
16529
16530 default: ;
16531 }
16532
16533 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16534 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16535 inst.instruction |= 1 << (4 + logsize);
16536
16537 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
16538 inst.instruction |= logsize << 10;
16539 }
16540
16541 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16542
16543 static void
16544 do_neon_ld_dup (void)
16545 {
16546 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16547 int align_good, do_align = 0;
16548
16549 if (et.type == NT_invtype)
16550 return;
16551
16552 switch ((inst.instruction >> 8) & 3)
16553 {
16554 case 0: /* VLD1. */
16555 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
16556 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16557 &do_align, 16, 16, 32, 32, -1);
16558 if (align_good == FAIL)
16559 return;
16560 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
16561 {
16562 case 1: break;
16563 case 2: inst.instruction |= 1 << 5; break;
16564 default: first_error (_("bad list length")); return;
16565 }
16566 inst.instruction |= neon_logbits (et.size) << 6;
16567 break;
16568
16569 case 1: /* VLD2. */
16570 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16571 &do_align, 8, 16, 16, 32, 32, 64, -1);
16572 if (align_good == FAIL)
16573 return;
16574 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
16575 _("bad list length"));
16576 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16577 inst.instruction |= 1 << 5;
16578 inst.instruction |= neon_logbits (et.size) << 6;
16579 break;
16580
16581 case 2: /* VLD3. */
16582 constraint (inst.operands[1].immisalign,
16583 _("can't use alignment with this instruction"));
16584 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
16585 _("bad list length"));
16586 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16587 inst.instruction |= 1 << 5;
16588 inst.instruction |= neon_logbits (et.size) << 6;
16589 break;
16590
16591 case 3: /* VLD4. */
16592 {
16593 int align = inst.operands[1].imm >> 8;
16594 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
16595 16, 64, 32, 64, 32, 128, -1);
16596 if (align_good == FAIL)
16597 return;
16598 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
16599 _("bad list length"));
16600 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16601 inst.instruction |= 1 << 5;
16602 if (et.size == 32 && align == 128)
16603 inst.instruction |= 0x3 << 6;
16604 else
16605 inst.instruction |= neon_logbits (et.size) << 6;
16606 }
16607 break;
16608
16609 default: ;
16610 }
16611
16612 inst.instruction |= do_align << 4;
16613 }
16614
16615 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
16616 apart from bits [11:4]. */
16617
16618 static void
16619 do_neon_ldx_stx (void)
16620 {
16621 if (inst.operands[1].isreg)
16622 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
16623
16624 switch (NEON_LANE (inst.operands[0].imm))
16625 {
16626 case NEON_INTERLEAVE_LANES:
16627 NEON_ENCODE (INTERLV, inst);
16628 do_neon_ld_st_interleave ();
16629 break;
16630
16631 case NEON_ALL_LANES:
16632 NEON_ENCODE (DUP, inst);
16633 if (inst.instruction == N_INV)
16634 {
16635 first_error ("only loads support such operands");
16636 break;
16637 }
16638 do_neon_ld_dup ();
16639 break;
16640
16641 default:
16642 NEON_ENCODE (LANE, inst);
16643 do_neon_ld_st_lane ();
16644 }
16645
16646 /* L bit comes from bit mask. */
16647 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16648 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16649 inst.instruction |= inst.operands[1].reg << 16;
16650
16651 if (inst.operands[1].postind)
16652 {
16653 int postreg = inst.operands[1].imm & 0xf;
16654 constraint (!inst.operands[1].immisreg,
16655 _("post-index must be a register"));
16656 constraint (postreg == 0xd || postreg == 0xf,
16657 _("bad register for post-index"));
16658 inst.instruction |= postreg;
16659 }
16660 else
16661 {
16662 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
16663 constraint (inst.reloc.exp.X_op != O_constant
16664 || inst.reloc.exp.X_add_number != 0,
16665 BAD_ADDR_MODE);
16666
16667 if (inst.operands[1].writeback)
16668 {
16669 inst.instruction |= 0xd;
16670 }
16671 else
16672 inst.instruction |= 0xf;
16673 }
16674
16675 if (thumb_mode)
16676 inst.instruction |= 0xf9000000;
16677 else
16678 inst.instruction |= 0xf4000000;
16679 }
16680
16681 /* FP v8. */
16682 static void
16683 do_vfp_nsyn_fpv8 (enum neon_shape rs)
16684 {
16685 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16686 D register operands. */
16687 if (neon_shape_class[rs] == SC_DOUBLE)
16688 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16689 _(BAD_FPU));
16690
16691 NEON_ENCODE (FPV8, inst);
16692
16693 if (rs == NS_FFF)
16694 do_vfp_sp_dyadic ();
16695 else
16696 do_vfp_dp_rd_rn_rm ();
16697
16698 if (rs == NS_DDD)
16699 inst.instruction |= 0x100;
16700
16701 inst.instruction |= 0xf0000000;
16702 }
16703
16704 static void
16705 do_vsel (void)
16706 {
16707 set_it_insn_type (OUTSIDE_IT_INSN);
16708
16709 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
16710 first_error (_("invalid instruction shape"));
16711 }
16712
16713 static void
16714 do_vmaxnm (void)
16715 {
16716 set_it_insn_type (OUTSIDE_IT_INSN);
16717
16718 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
16719 return;
16720
16721 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16722 return;
16723
16724 neon_dyadic_misc (NT_untyped, N_F32, 0);
16725 }
16726
16727 static void
16728 do_vrint_1 (enum neon_cvt_mode mode)
16729 {
16730 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_QQ, NS_NULL);
16731 struct neon_type_el et;
16732
16733 if (rs == NS_NULL)
16734 return;
16735
16736 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16737 D register operands. */
16738 if (neon_shape_class[rs] == SC_DOUBLE)
16739 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16740 _(BAD_FPU));
16741
16742 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
16743 if (et.type != NT_invtype)
16744 {
16745 /* VFP encodings. */
16746 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
16747 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
16748 set_it_insn_type (OUTSIDE_IT_INSN);
16749
16750 NEON_ENCODE (FPV8, inst);
16751 if (rs == NS_FF)
16752 do_vfp_sp_monadic ();
16753 else
16754 do_vfp_dp_rd_rm ();
16755
16756 switch (mode)
16757 {
16758 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
16759 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
16760 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
16761 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
16762 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
16763 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
16764 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
16765 default: abort ();
16766 }
16767
16768 inst.instruction |= (rs == NS_DD) << 8;
16769 do_vfp_cond_or_thumb ();
16770 }
16771 else
16772 {
16773 /* Neon encodings (or something broken...). */
16774 inst.error = NULL;
16775 et = neon_check_type (2, rs, N_EQK, N_F32 | N_KEY);
16776
16777 if (et.type == NT_invtype)
16778 return;
16779
16780 set_it_insn_type (OUTSIDE_IT_INSN);
16781 NEON_ENCODE (FLOAT, inst);
16782
16783 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16784 return;
16785
16786 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16787 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16788 inst.instruction |= LOW4 (inst.operands[1].reg);
16789 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16790 inst.instruction |= neon_quad (rs) << 6;
16791 switch (mode)
16792 {
16793 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
16794 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
16795 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
16796 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
16797 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
16798 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
16799 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
16800 default: abort ();
16801 }
16802
16803 if (thumb_mode)
16804 inst.instruction |= 0xfc000000;
16805 else
16806 inst.instruction |= 0xf0000000;
16807 }
16808 }
16809
16810 static void
16811 do_vrintx (void)
16812 {
16813 do_vrint_1 (neon_cvt_mode_x);
16814 }
16815
16816 static void
16817 do_vrintz (void)
16818 {
16819 do_vrint_1 (neon_cvt_mode_z);
16820 }
16821
16822 static void
16823 do_vrintr (void)
16824 {
16825 do_vrint_1 (neon_cvt_mode_r);
16826 }
16827
16828 static void
16829 do_vrinta (void)
16830 {
16831 do_vrint_1 (neon_cvt_mode_a);
16832 }
16833
16834 static void
16835 do_vrintn (void)
16836 {
16837 do_vrint_1 (neon_cvt_mode_n);
16838 }
16839
16840 static void
16841 do_vrintp (void)
16842 {
16843 do_vrint_1 (neon_cvt_mode_p);
16844 }
16845
16846 static void
16847 do_vrintm (void)
16848 {
16849 do_vrint_1 (neon_cvt_mode_m);
16850 }
16851
16852 /* Crypto v1 instructions. */
16853 static void
16854 do_crypto_2op_1 (unsigned elttype, int op)
16855 {
16856 set_it_insn_type (OUTSIDE_IT_INSN);
16857
16858 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
16859 == NT_invtype)
16860 return;
16861
16862 inst.error = NULL;
16863
16864 NEON_ENCODE (INTEGER, inst);
16865 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16866 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16867 inst.instruction |= LOW4 (inst.operands[1].reg);
16868 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16869 if (op != -1)
16870 inst.instruction |= op << 6;
16871
16872 if (thumb_mode)
16873 inst.instruction |= 0xfc000000;
16874 else
16875 inst.instruction |= 0xf0000000;
16876 }
16877
16878 static void
16879 do_crypto_3op_1 (int u, int op)
16880 {
16881 set_it_insn_type (OUTSIDE_IT_INSN);
16882
16883 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
16884 N_32 | N_UNT | N_KEY).type == NT_invtype)
16885 return;
16886
16887 inst.error = NULL;
16888
16889 NEON_ENCODE (INTEGER, inst);
16890 neon_three_same (1, u, 8 << op);
16891 }
16892
16893 static void
16894 do_aese (void)
16895 {
16896 do_crypto_2op_1 (N_8, 0);
16897 }
16898
16899 static void
16900 do_aesd (void)
16901 {
16902 do_crypto_2op_1 (N_8, 1);
16903 }
16904
16905 static void
16906 do_aesmc (void)
16907 {
16908 do_crypto_2op_1 (N_8, 2);
16909 }
16910
16911 static void
16912 do_aesimc (void)
16913 {
16914 do_crypto_2op_1 (N_8, 3);
16915 }
16916
16917 static void
16918 do_sha1c (void)
16919 {
16920 do_crypto_3op_1 (0, 0);
16921 }
16922
16923 static void
16924 do_sha1p (void)
16925 {
16926 do_crypto_3op_1 (0, 1);
16927 }
16928
16929 static void
16930 do_sha1m (void)
16931 {
16932 do_crypto_3op_1 (0, 2);
16933 }
16934
16935 static void
16936 do_sha1su0 (void)
16937 {
16938 do_crypto_3op_1 (0, 3);
16939 }
16940
16941 static void
16942 do_sha256h (void)
16943 {
16944 do_crypto_3op_1 (1, 0);
16945 }
16946
16947 static void
16948 do_sha256h2 (void)
16949 {
16950 do_crypto_3op_1 (1, 1);
16951 }
16952
16953 static void
16954 do_sha256su1 (void)
16955 {
16956 do_crypto_3op_1 (1, 2);
16957 }
16958
16959 static void
16960 do_sha1h (void)
16961 {
16962 do_crypto_2op_1 (N_32, -1);
16963 }
16964
16965 static void
16966 do_sha1su1 (void)
16967 {
16968 do_crypto_2op_1 (N_32, 0);
16969 }
16970
16971 static void
16972 do_sha256su0 (void)
16973 {
16974 do_crypto_2op_1 (N_32, 1);
16975 }
16976
16977 static void
16978 do_crc32_1 (unsigned int poly, unsigned int sz)
16979 {
16980 unsigned int Rd = inst.operands[0].reg;
16981 unsigned int Rn = inst.operands[1].reg;
16982 unsigned int Rm = inst.operands[2].reg;
16983
16984 set_it_insn_type (OUTSIDE_IT_INSN);
16985 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
16986 inst.instruction |= LOW4 (Rn) << 16;
16987 inst.instruction |= LOW4 (Rm);
16988 inst.instruction |= sz << (thumb_mode ? 4 : 21);
16989 inst.instruction |= poly << (thumb_mode ? 20 : 9);
16990
16991 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
16992 as_warn (UNPRED_REG ("r15"));
16993 if (thumb_mode && (Rd == REG_SP || Rn == REG_SP || Rm == REG_SP))
16994 as_warn (UNPRED_REG ("r13"));
16995 }
16996
16997 static void
16998 do_crc32b (void)
16999 {
17000 do_crc32_1 (0, 0);
17001 }
17002
17003 static void
17004 do_crc32h (void)
17005 {
17006 do_crc32_1 (0, 1);
17007 }
17008
17009 static void
17010 do_crc32w (void)
17011 {
17012 do_crc32_1 (0, 2);
17013 }
17014
17015 static void
17016 do_crc32cb (void)
17017 {
17018 do_crc32_1 (1, 0);
17019 }
17020
17021 static void
17022 do_crc32ch (void)
17023 {
17024 do_crc32_1 (1, 1);
17025 }
17026
17027 static void
17028 do_crc32cw (void)
17029 {
17030 do_crc32_1 (1, 2);
17031 }
17032
17033 \f
17034 /* Overall per-instruction processing. */
17035
17036 /* We need to be able to fix up arbitrary expressions in some statements.
17037 This is so that we can handle symbols that are an arbitrary distance from
17038 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17039 which returns part of an address in a form which will be valid for
17040 a data instruction. We do this by pushing the expression into a symbol
17041 in the expr_section, and creating a fix for that. */
17042
17043 static void
17044 fix_new_arm (fragS * frag,
17045 int where,
17046 short int size,
17047 expressionS * exp,
17048 int pc_rel,
17049 int reloc)
17050 {
17051 fixS * new_fix;
17052
17053 switch (exp->X_op)
17054 {
17055 case O_constant:
17056 if (pc_rel)
17057 {
17058 /* Create an absolute valued symbol, so we have something to
17059 refer to in the object file. Unfortunately for us, gas's
17060 generic expression parsing will already have folded out
17061 any use of .set foo/.type foo %function that may have
17062 been used to set type information of the target location,
17063 that's being specified symbolically. We have to presume
17064 the user knows what they are doing. */
17065 char name[16 + 8];
17066 symbolS *symbol;
17067
17068 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
17069
17070 symbol = symbol_find_or_make (name);
17071 S_SET_SEGMENT (symbol, absolute_section);
17072 symbol_set_frag (symbol, &zero_address_frag);
17073 S_SET_VALUE (symbol, exp->X_add_number);
17074 exp->X_op = O_symbol;
17075 exp->X_add_symbol = symbol;
17076 exp->X_add_number = 0;
17077 }
17078 /* FALLTHROUGH */
17079 case O_symbol:
17080 case O_add:
17081 case O_subtract:
17082 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
17083 (enum bfd_reloc_code_real) reloc);
17084 break;
17085
17086 default:
17087 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
17088 pc_rel, (enum bfd_reloc_code_real) reloc);
17089 break;
17090 }
17091
17092 /* Mark whether the fix is to a THUMB instruction, or an ARM
17093 instruction. */
17094 new_fix->tc_fix_data = thumb_mode;
17095 }
17096
17097 /* Create a frg for an instruction requiring relaxation. */
17098 static void
17099 output_relax_insn (void)
17100 {
17101 char * to;
17102 symbolS *sym;
17103 int offset;
17104
17105 /* The size of the instruction is unknown, so tie the debug info to the
17106 start of the instruction. */
17107 dwarf2_emit_insn (0);
17108
17109 switch (inst.reloc.exp.X_op)
17110 {
17111 case O_symbol:
17112 sym = inst.reloc.exp.X_add_symbol;
17113 offset = inst.reloc.exp.X_add_number;
17114 break;
17115 case O_constant:
17116 sym = NULL;
17117 offset = inst.reloc.exp.X_add_number;
17118 break;
17119 default:
17120 sym = make_expr_symbol (&inst.reloc.exp);
17121 offset = 0;
17122 break;
17123 }
17124 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
17125 inst.relax, sym, offset, NULL/*offset, opcode*/);
17126 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
17127 }
17128
17129 /* Write a 32-bit thumb instruction to buf. */
17130 static void
17131 put_thumb32_insn (char * buf, unsigned long insn)
17132 {
17133 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
17134 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
17135 }
17136
17137 static void
17138 output_inst (const char * str)
17139 {
17140 char * to = NULL;
17141
17142 if (inst.error)
17143 {
17144 as_bad ("%s -- `%s'", inst.error, str);
17145 return;
17146 }
17147 if (inst.relax)
17148 {
17149 output_relax_insn ();
17150 return;
17151 }
17152 if (inst.size == 0)
17153 return;
17154
17155 to = frag_more (inst.size);
17156 /* PR 9814: Record the thumb mode into the current frag so that we know
17157 what type of NOP padding to use, if necessary. We override any previous
17158 setting so that if the mode has changed then the NOPS that we use will
17159 match the encoding of the last instruction in the frag. */
17160 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
17161
17162 if (thumb_mode && (inst.size > THUMB_SIZE))
17163 {
17164 gas_assert (inst.size == (2 * THUMB_SIZE));
17165 put_thumb32_insn (to, inst.instruction);
17166 }
17167 else if (inst.size > INSN_SIZE)
17168 {
17169 gas_assert (inst.size == (2 * INSN_SIZE));
17170 md_number_to_chars (to, inst.instruction, INSN_SIZE);
17171 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
17172 }
17173 else
17174 md_number_to_chars (to, inst.instruction, inst.size);
17175
17176 if (inst.reloc.type != BFD_RELOC_UNUSED)
17177 fix_new_arm (frag_now, to - frag_now->fr_literal,
17178 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
17179 inst.reloc.type);
17180
17181 dwarf2_emit_insn (inst.size);
17182 }
17183
17184 static char *
17185 output_it_inst (int cond, int mask, char * to)
17186 {
17187 unsigned long instruction = 0xbf00;
17188
17189 mask &= 0xf;
17190 instruction |= mask;
17191 instruction |= cond << 4;
17192
17193 if (to == NULL)
17194 {
17195 to = frag_more (2);
17196 #ifdef OBJ_ELF
17197 dwarf2_emit_insn (2);
17198 #endif
17199 }
17200
17201 md_number_to_chars (to, instruction, 2);
17202
17203 return to;
17204 }
17205
17206 /* Tag values used in struct asm_opcode's tag field. */
17207 enum opcode_tag
17208 {
17209 OT_unconditional, /* Instruction cannot be conditionalized.
17210 The ARM condition field is still 0xE. */
17211 OT_unconditionalF, /* Instruction cannot be conditionalized
17212 and carries 0xF in its ARM condition field. */
17213 OT_csuffix, /* Instruction takes a conditional suffix. */
17214 OT_csuffixF, /* Some forms of the instruction take a conditional
17215 suffix, others place 0xF where the condition field
17216 would be. */
17217 OT_cinfix3, /* Instruction takes a conditional infix,
17218 beginning at character index 3. (In
17219 unified mode, it becomes a suffix.) */
17220 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
17221 tsts, cmps, cmns, and teqs. */
17222 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
17223 character index 3, even in unified mode. Used for
17224 legacy instructions where suffix and infix forms
17225 may be ambiguous. */
17226 OT_csuf_or_in3, /* Instruction takes either a conditional
17227 suffix or an infix at character index 3. */
17228 OT_odd_infix_unc, /* This is the unconditional variant of an
17229 instruction that takes a conditional infix
17230 at an unusual position. In unified mode,
17231 this variant will accept a suffix. */
17232 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
17233 are the conditional variants of instructions that
17234 take conditional infixes in unusual positions.
17235 The infix appears at character index
17236 (tag - OT_odd_infix_0). These are not accepted
17237 in unified mode. */
17238 };
17239
17240 /* Subroutine of md_assemble, responsible for looking up the primary
17241 opcode from the mnemonic the user wrote. STR points to the
17242 beginning of the mnemonic.
17243
17244 This is not simply a hash table lookup, because of conditional
17245 variants. Most instructions have conditional variants, which are
17246 expressed with a _conditional affix_ to the mnemonic. If we were
17247 to encode each conditional variant as a literal string in the opcode
17248 table, it would have approximately 20,000 entries.
17249
17250 Most mnemonics take this affix as a suffix, and in unified syntax,
17251 'most' is upgraded to 'all'. However, in the divided syntax, some
17252 instructions take the affix as an infix, notably the s-variants of
17253 the arithmetic instructions. Of those instructions, all but six
17254 have the infix appear after the third character of the mnemonic.
17255
17256 Accordingly, the algorithm for looking up primary opcodes given
17257 an identifier is:
17258
17259 1. Look up the identifier in the opcode table.
17260 If we find a match, go to step U.
17261
17262 2. Look up the last two characters of the identifier in the
17263 conditions table. If we find a match, look up the first N-2
17264 characters of the identifier in the opcode table. If we
17265 find a match, go to step CE.
17266
17267 3. Look up the fourth and fifth characters of the identifier in
17268 the conditions table. If we find a match, extract those
17269 characters from the identifier, and look up the remaining
17270 characters in the opcode table. If we find a match, go
17271 to step CM.
17272
17273 4. Fail.
17274
17275 U. Examine the tag field of the opcode structure, in case this is
17276 one of the six instructions with its conditional infix in an
17277 unusual place. If it is, the tag tells us where to find the
17278 infix; look it up in the conditions table and set inst.cond
17279 accordingly. Otherwise, this is an unconditional instruction.
17280 Again set inst.cond accordingly. Return the opcode structure.
17281
17282 CE. Examine the tag field to make sure this is an instruction that
17283 should receive a conditional suffix. If it is not, fail.
17284 Otherwise, set inst.cond from the suffix we already looked up,
17285 and return the opcode structure.
17286
17287 CM. Examine the tag field to make sure this is an instruction that
17288 should receive a conditional infix after the third character.
17289 If it is not, fail. Otherwise, undo the edits to the current
17290 line of input and proceed as for case CE. */
17291
17292 static const struct asm_opcode *
17293 opcode_lookup (char **str)
17294 {
17295 char *end, *base;
17296 char *affix;
17297 const struct asm_opcode *opcode;
17298 const struct asm_cond *cond;
17299 char save[2];
17300
17301 /* Scan up to the end of the mnemonic, which must end in white space,
17302 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17303 for (base = end = *str; *end != '\0'; end++)
17304 if (*end == ' ' || *end == '.')
17305 break;
17306
17307 if (end == base)
17308 return NULL;
17309
17310 /* Handle a possible width suffix and/or Neon type suffix. */
17311 if (end[0] == '.')
17312 {
17313 int offset = 2;
17314
17315 /* The .w and .n suffixes are only valid if the unified syntax is in
17316 use. */
17317 if (unified_syntax && end[1] == 'w')
17318 inst.size_req = 4;
17319 else if (unified_syntax && end[1] == 'n')
17320 inst.size_req = 2;
17321 else
17322 offset = 0;
17323
17324 inst.vectype.elems = 0;
17325
17326 *str = end + offset;
17327
17328 if (end[offset] == '.')
17329 {
17330 /* See if we have a Neon type suffix (possible in either unified or
17331 non-unified ARM syntax mode). */
17332 if (parse_neon_type (&inst.vectype, str) == FAIL)
17333 return NULL;
17334 }
17335 else if (end[offset] != '\0' && end[offset] != ' ')
17336 return NULL;
17337 }
17338 else
17339 *str = end;
17340
17341 /* Look for unaffixed or special-case affixed mnemonic. */
17342 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17343 end - base);
17344 if (opcode)
17345 {
17346 /* step U */
17347 if (opcode->tag < OT_odd_infix_0)
17348 {
17349 inst.cond = COND_ALWAYS;
17350 return opcode;
17351 }
17352
17353 if (warn_on_deprecated && unified_syntax)
17354 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17355 affix = base + (opcode->tag - OT_odd_infix_0);
17356 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17357 gas_assert (cond);
17358
17359 inst.cond = cond->value;
17360 return opcode;
17361 }
17362
17363 /* Cannot have a conditional suffix on a mnemonic of less than two
17364 characters. */
17365 if (end - base < 3)
17366 return NULL;
17367
17368 /* Look for suffixed mnemonic. */
17369 affix = end - 2;
17370 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17371 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17372 affix - base);
17373 if (opcode && cond)
17374 {
17375 /* step CE */
17376 switch (opcode->tag)
17377 {
17378 case OT_cinfix3_legacy:
17379 /* Ignore conditional suffixes matched on infix only mnemonics. */
17380 break;
17381
17382 case OT_cinfix3:
17383 case OT_cinfix3_deprecated:
17384 case OT_odd_infix_unc:
17385 if (!unified_syntax)
17386 return 0;
17387 /* else fall through */
17388
17389 case OT_csuffix:
17390 case OT_csuffixF:
17391 case OT_csuf_or_in3:
17392 inst.cond = cond->value;
17393 return opcode;
17394
17395 case OT_unconditional:
17396 case OT_unconditionalF:
17397 if (thumb_mode)
17398 inst.cond = cond->value;
17399 else
17400 {
17401 /* Delayed diagnostic. */
17402 inst.error = BAD_COND;
17403 inst.cond = COND_ALWAYS;
17404 }
17405 return opcode;
17406
17407 default:
17408 return NULL;
17409 }
17410 }
17411
17412 /* Cannot have a usual-position infix on a mnemonic of less than
17413 six characters (five would be a suffix). */
17414 if (end - base < 6)
17415 return NULL;
17416
17417 /* Look for infixed mnemonic in the usual position. */
17418 affix = base + 3;
17419 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17420 if (!cond)
17421 return NULL;
17422
17423 memcpy (save, affix, 2);
17424 memmove (affix, affix + 2, (end - affix) - 2);
17425 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17426 (end - base) - 2);
17427 memmove (affix + 2, affix, (end - affix) - 2);
17428 memcpy (affix, save, 2);
17429
17430 if (opcode
17431 && (opcode->tag == OT_cinfix3
17432 || opcode->tag == OT_cinfix3_deprecated
17433 || opcode->tag == OT_csuf_or_in3
17434 || opcode->tag == OT_cinfix3_legacy))
17435 {
17436 /* Step CM. */
17437 if (warn_on_deprecated && unified_syntax
17438 && (opcode->tag == OT_cinfix3
17439 || opcode->tag == OT_cinfix3_deprecated))
17440 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17441
17442 inst.cond = cond->value;
17443 return opcode;
17444 }
17445
17446 return NULL;
17447 }
17448
17449 /* This function generates an initial IT instruction, leaving its block
17450 virtually open for the new instructions. Eventually,
17451 the mask will be updated by now_it_add_mask () each time
17452 a new instruction needs to be included in the IT block.
17453 Finally, the block is closed with close_automatic_it_block ().
17454 The block closure can be requested either from md_assemble (),
17455 a tencode (), or due to a label hook. */
17456
17457 static void
17458 new_automatic_it_block (int cond)
17459 {
17460 now_it.state = AUTOMATIC_IT_BLOCK;
17461 now_it.mask = 0x18;
17462 now_it.cc = cond;
17463 now_it.block_length = 1;
17464 mapping_state (MAP_THUMB);
17465 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
17466 now_it.warn_deprecated = FALSE;
17467 now_it.insn_cond = TRUE;
17468 }
17469
17470 /* Close an automatic IT block.
17471 See comments in new_automatic_it_block (). */
17472
17473 static void
17474 close_automatic_it_block (void)
17475 {
17476 now_it.mask = 0x10;
17477 now_it.block_length = 0;
17478 }
17479
17480 /* Update the mask of the current automatically-generated IT
17481 instruction. See comments in new_automatic_it_block (). */
17482
17483 static void
17484 now_it_add_mask (int cond)
17485 {
17486 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
17487 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
17488 | ((bitvalue) << (nbit)))
17489 const int resulting_bit = (cond & 1);
17490
17491 now_it.mask &= 0xf;
17492 now_it.mask = SET_BIT_VALUE (now_it.mask,
17493 resulting_bit,
17494 (5 - now_it.block_length));
17495 now_it.mask = SET_BIT_VALUE (now_it.mask,
17496 1,
17497 ((5 - now_it.block_length) - 1) );
17498 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
17499
17500 #undef CLEAR_BIT
17501 #undef SET_BIT_VALUE
17502 }
17503
17504 /* The IT blocks handling machinery is accessed through the these functions:
17505 it_fsm_pre_encode () from md_assemble ()
17506 set_it_insn_type () optional, from the tencode functions
17507 set_it_insn_type_last () ditto
17508 in_it_block () ditto
17509 it_fsm_post_encode () from md_assemble ()
17510 force_automatic_it_block_close () from label habdling functions
17511
17512 Rationale:
17513 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17514 initializing the IT insn type with a generic initial value depending
17515 on the inst.condition.
17516 2) During the tencode function, two things may happen:
17517 a) The tencode function overrides the IT insn type by
17518 calling either set_it_insn_type (type) or set_it_insn_type_last ().
17519 b) The tencode function queries the IT block state by
17520 calling in_it_block () (i.e. to determine narrow/not narrow mode).
17521
17522 Both set_it_insn_type and in_it_block run the internal FSM state
17523 handling function (handle_it_state), because: a) setting the IT insn
17524 type may incur in an invalid state (exiting the function),
17525 and b) querying the state requires the FSM to be updated.
17526 Specifically we want to avoid creating an IT block for conditional
17527 branches, so it_fsm_pre_encode is actually a guess and we can't
17528 determine whether an IT block is required until the tencode () routine
17529 has decided what type of instruction this actually it.
17530 Because of this, if set_it_insn_type and in_it_block have to be used,
17531 set_it_insn_type has to be called first.
17532
17533 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17534 determines the insn IT type depending on the inst.cond code.
17535 When a tencode () routine encodes an instruction that can be
17536 either outside an IT block, or, in the case of being inside, has to be
17537 the last one, set_it_insn_type_last () will determine the proper
17538 IT instruction type based on the inst.cond code. Otherwise,
17539 set_it_insn_type can be called for overriding that logic or
17540 for covering other cases.
17541
17542 Calling handle_it_state () may not transition the IT block state to
17543 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
17544 still queried. Instead, if the FSM determines that the state should
17545 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17546 after the tencode () function: that's what it_fsm_post_encode () does.
17547
17548 Since in_it_block () calls the state handling function to get an
17549 updated state, an error may occur (due to invalid insns combination).
17550 In that case, inst.error is set.
17551 Therefore, inst.error has to be checked after the execution of
17552 the tencode () routine.
17553
17554 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17555 any pending state change (if any) that didn't take place in
17556 handle_it_state () as explained above. */
17557
17558 static void
17559 it_fsm_pre_encode (void)
17560 {
17561 if (inst.cond != COND_ALWAYS)
17562 inst.it_insn_type = INSIDE_IT_INSN;
17563 else
17564 inst.it_insn_type = OUTSIDE_IT_INSN;
17565
17566 now_it.state_handled = 0;
17567 }
17568
17569 /* IT state FSM handling function. */
17570
17571 static int
17572 handle_it_state (void)
17573 {
17574 now_it.state_handled = 1;
17575 now_it.insn_cond = FALSE;
17576
17577 switch (now_it.state)
17578 {
17579 case OUTSIDE_IT_BLOCK:
17580 switch (inst.it_insn_type)
17581 {
17582 case OUTSIDE_IT_INSN:
17583 break;
17584
17585 case INSIDE_IT_INSN:
17586 case INSIDE_IT_LAST_INSN:
17587 if (thumb_mode == 0)
17588 {
17589 if (unified_syntax
17590 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
17591 as_tsktsk (_("Warning: conditional outside an IT block"\
17592 " for Thumb."));
17593 }
17594 else
17595 {
17596 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
17597 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
17598 {
17599 /* Automatically generate the IT instruction. */
17600 new_automatic_it_block (inst.cond);
17601 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
17602 close_automatic_it_block ();
17603 }
17604 else
17605 {
17606 inst.error = BAD_OUT_IT;
17607 return FAIL;
17608 }
17609 }
17610 break;
17611
17612 case IF_INSIDE_IT_LAST_INSN:
17613 case NEUTRAL_IT_INSN:
17614 break;
17615
17616 case IT_INSN:
17617 now_it.state = MANUAL_IT_BLOCK;
17618 now_it.block_length = 0;
17619 break;
17620 }
17621 break;
17622
17623 case AUTOMATIC_IT_BLOCK:
17624 /* Three things may happen now:
17625 a) We should increment current it block size;
17626 b) We should close current it block (closing insn or 4 insns);
17627 c) We should close current it block and start a new one (due
17628 to incompatible conditions or
17629 4 insns-length block reached). */
17630
17631 switch (inst.it_insn_type)
17632 {
17633 case OUTSIDE_IT_INSN:
17634 /* The closure of the block shall happen immediatelly,
17635 so any in_it_block () call reports the block as closed. */
17636 force_automatic_it_block_close ();
17637 break;
17638
17639 case INSIDE_IT_INSN:
17640 case INSIDE_IT_LAST_INSN:
17641 case IF_INSIDE_IT_LAST_INSN:
17642 now_it.block_length++;
17643
17644 if (now_it.block_length > 4
17645 || !now_it_compatible (inst.cond))
17646 {
17647 force_automatic_it_block_close ();
17648 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
17649 new_automatic_it_block (inst.cond);
17650 }
17651 else
17652 {
17653 now_it.insn_cond = TRUE;
17654 now_it_add_mask (inst.cond);
17655 }
17656
17657 if (now_it.state == AUTOMATIC_IT_BLOCK
17658 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
17659 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
17660 close_automatic_it_block ();
17661 break;
17662
17663 case NEUTRAL_IT_INSN:
17664 now_it.block_length++;
17665 now_it.insn_cond = TRUE;
17666
17667 if (now_it.block_length > 4)
17668 force_automatic_it_block_close ();
17669 else
17670 now_it_add_mask (now_it.cc & 1);
17671 break;
17672
17673 case IT_INSN:
17674 close_automatic_it_block ();
17675 now_it.state = MANUAL_IT_BLOCK;
17676 break;
17677 }
17678 break;
17679
17680 case MANUAL_IT_BLOCK:
17681 {
17682 /* Check conditional suffixes. */
17683 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
17684 int is_last;
17685 now_it.mask <<= 1;
17686 now_it.mask &= 0x1f;
17687 is_last = (now_it.mask == 0x10);
17688 now_it.insn_cond = TRUE;
17689
17690 switch (inst.it_insn_type)
17691 {
17692 case OUTSIDE_IT_INSN:
17693 inst.error = BAD_NOT_IT;
17694 return FAIL;
17695
17696 case INSIDE_IT_INSN:
17697 if (cond != inst.cond)
17698 {
17699 inst.error = BAD_IT_COND;
17700 return FAIL;
17701 }
17702 break;
17703
17704 case INSIDE_IT_LAST_INSN:
17705 case IF_INSIDE_IT_LAST_INSN:
17706 if (cond != inst.cond)
17707 {
17708 inst.error = BAD_IT_COND;
17709 return FAIL;
17710 }
17711 if (!is_last)
17712 {
17713 inst.error = BAD_BRANCH;
17714 return FAIL;
17715 }
17716 break;
17717
17718 case NEUTRAL_IT_INSN:
17719 /* The BKPT instruction is unconditional even in an IT block. */
17720 break;
17721
17722 case IT_INSN:
17723 inst.error = BAD_IT_IT;
17724 return FAIL;
17725 }
17726 }
17727 break;
17728 }
17729
17730 return SUCCESS;
17731 }
17732
17733 struct depr_insn_mask
17734 {
17735 unsigned long pattern;
17736 unsigned long mask;
17737 const char* description;
17738 };
17739
17740 /* List of 16-bit instruction patterns deprecated in an IT block in
17741 ARMv8. */
17742 static const struct depr_insn_mask depr_it_insns[] = {
17743 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
17744 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
17745 { 0xa000, 0xb800, N_("ADR") },
17746 { 0x4800, 0xf800, N_("Literal loads") },
17747 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
17748 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
17749 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
17750 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
17751 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
17752 { 0, 0, NULL }
17753 };
17754
17755 static void
17756 it_fsm_post_encode (void)
17757 {
17758 int is_last;
17759
17760 if (!now_it.state_handled)
17761 handle_it_state ();
17762
17763 if (now_it.insn_cond
17764 && !now_it.warn_deprecated
17765 && warn_on_deprecated
17766 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
17767 {
17768 if (inst.instruction >= 0x10000)
17769 {
17770 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
17771 "deprecated in ARMv8"));
17772 now_it.warn_deprecated = TRUE;
17773 }
17774 else
17775 {
17776 const struct depr_insn_mask *p = depr_it_insns;
17777
17778 while (p->mask != 0)
17779 {
17780 if ((inst.instruction & p->mask) == p->pattern)
17781 {
17782 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
17783 "of the following class are deprecated in ARMv8: "
17784 "%s"), p->description);
17785 now_it.warn_deprecated = TRUE;
17786 break;
17787 }
17788
17789 ++p;
17790 }
17791 }
17792
17793 if (now_it.block_length > 1)
17794 {
17795 as_tsktsk (_("IT blocks containing more than one conditional "
17796 "instruction are deprecated in ARMv8"));
17797 now_it.warn_deprecated = TRUE;
17798 }
17799 }
17800
17801 is_last = (now_it.mask == 0x10);
17802 if (is_last)
17803 {
17804 now_it.state = OUTSIDE_IT_BLOCK;
17805 now_it.mask = 0;
17806 }
17807 }
17808
17809 static void
17810 force_automatic_it_block_close (void)
17811 {
17812 if (now_it.state == AUTOMATIC_IT_BLOCK)
17813 {
17814 close_automatic_it_block ();
17815 now_it.state = OUTSIDE_IT_BLOCK;
17816 now_it.mask = 0;
17817 }
17818 }
17819
17820 static int
17821 in_it_block (void)
17822 {
17823 if (!now_it.state_handled)
17824 handle_it_state ();
17825
17826 return now_it.state != OUTSIDE_IT_BLOCK;
17827 }
17828
17829 /* Whether OPCODE only has T32 encoding. Since this function is only used by
17830 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
17831 here, hence the "known" in the function name. */
17832
17833 static bfd_boolean
17834 known_t32_only_insn (const struct asm_opcode *opcode)
17835 {
17836 /* Original Thumb-1 wide instruction. */
17837 if (opcode->tencode == do_t_blx
17838 || opcode->tencode == do_t_branch23
17839 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
17840 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
17841 return TRUE;
17842
17843 /* Wide-only instruction added to ARMv8-M. */
17844 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m)
17845 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
17846 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
17847 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
17848 return TRUE;
17849
17850 return FALSE;
17851 }
17852
17853 /* Whether wide instruction variant can be used if available for a valid OPCODE
17854 in ARCH. */
17855
17856 static bfd_boolean
17857 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
17858 {
17859 if (known_t32_only_insn (opcode))
17860 return TRUE;
17861
17862 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
17863 of variant T3 of B.W is checked in do_t_branch. */
17864 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
17865 && opcode->tencode == do_t_branch)
17866 return TRUE;
17867
17868 /* Wide instruction variants of all instructions with narrow *and* wide
17869 variants become available with ARMv6t2. Other opcodes are either
17870 narrow-only or wide-only and are thus available if OPCODE is valid. */
17871 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
17872 return TRUE;
17873
17874 /* OPCODE with narrow only instruction variant or wide variant not
17875 available. */
17876 return FALSE;
17877 }
17878
17879 void
17880 md_assemble (char *str)
17881 {
17882 char *p = str;
17883 const struct asm_opcode * opcode;
17884
17885 /* Align the previous label if needed. */
17886 if (last_label_seen != NULL)
17887 {
17888 symbol_set_frag (last_label_seen, frag_now);
17889 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
17890 S_SET_SEGMENT (last_label_seen, now_seg);
17891 }
17892
17893 memset (&inst, '\0', sizeof (inst));
17894 inst.reloc.type = BFD_RELOC_UNUSED;
17895
17896 opcode = opcode_lookup (&p);
17897 if (!opcode)
17898 {
17899 /* It wasn't an instruction, but it might be a register alias of
17900 the form alias .req reg, or a Neon .dn/.qn directive. */
17901 if (! create_register_alias (str, p)
17902 && ! create_neon_reg_alias (str, p))
17903 as_bad (_("bad instruction `%s'"), str);
17904
17905 return;
17906 }
17907
17908 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
17909 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
17910
17911 /* The value which unconditional instructions should have in place of the
17912 condition field. */
17913 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
17914
17915 if (thumb_mode)
17916 {
17917 arm_feature_set variant;
17918
17919 variant = cpu_variant;
17920 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
17921 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
17922 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
17923 /* Check that this instruction is supported for this CPU. */
17924 if (!opcode->tvariant
17925 || (thumb_mode == 1
17926 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
17927 {
17928 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
17929 return;
17930 }
17931 if (inst.cond != COND_ALWAYS && !unified_syntax
17932 && opcode->tencode != do_t_branch)
17933 {
17934 as_bad (_("Thumb does not support conditional execution"));
17935 return;
17936 }
17937
17938 /* Two things are addressed here:
17939 1) Implicit require narrow instructions on Thumb-1.
17940 This avoids relaxation accidentally introducing Thumb-2
17941 instructions.
17942 2) Reject wide instructions in non Thumb-2 cores.
17943
17944 Only instructions with narrow and wide variants need to be handled
17945 but selecting all non wide-only instructions is easier. */
17946 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
17947 && !t32_insn_ok (variant, opcode))
17948 {
17949 if (inst.size_req == 0)
17950 inst.size_req = 2;
17951 else if (inst.size_req == 4)
17952 {
17953 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
17954 as_bad (_("selected processor does not support 32bit wide "
17955 "variant of instruction `%s'"), str);
17956 else
17957 as_bad (_("selected processor does not support `%s' in "
17958 "Thumb-2 mode"), str);
17959 return;
17960 }
17961 }
17962
17963 inst.instruction = opcode->tvalue;
17964
17965 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
17966 {
17967 /* Prepare the it_insn_type for those encodings that don't set
17968 it. */
17969 it_fsm_pre_encode ();
17970
17971 opcode->tencode ();
17972
17973 it_fsm_post_encode ();
17974 }
17975
17976 if (!(inst.error || inst.relax))
17977 {
17978 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
17979 inst.size = (inst.instruction > 0xffff ? 4 : 2);
17980 if (inst.size_req && inst.size_req != inst.size)
17981 {
17982 as_bad (_("cannot honor width suffix -- `%s'"), str);
17983 return;
17984 }
17985 }
17986
17987 /* Something has gone badly wrong if we try to relax a fixed size
17988 instruction. */
17989 gas_assert (inst.size_req == 0 || !inst.relax);
17990
17991 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
17992 *opcode->tvariant);
17993 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
17994 set those bits when Thumb-2 32-bit instructions are seen. The impact
17995 of relaxable instructions will be considered later after we finish all
17996 relaxation. */
17997 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
17998 variant = arm_arch_none;
17999 else
18000 variant = cpu_variant;
18001 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
18002 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18003 arm_ext_v6t2);
18004
18005 check_neon_suffixes;
18006
18007 if (!inst.error)
18008 {
18009 mapping_state (MAP_THUMB);
18010 }
18011 }
18012 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
18013 {
18014 bfd_boolean is_bx;
18015
18016 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18017 is_bx = (opcode->aencode == do_bx);
18018
18019 /* Check that this instruction is supported for this CPU. */
18020 if (!(is_bx && fix_v4bx)
18021 && !(opcode->avariant &&
18022 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
18023 {
18024 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
18025 return;
18026 }
18027 if (inst.size_req)
18028 {
18029 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
18030 return;
18031 }
18032
18033 inst.instruction = opcode->avalue;
18034 if (opcode->tag == OT_unconditionalF)
18035 inst.instruction |= 0xFU << 28;
18036 else
18037 inst.instruction |= inst.cond << 28;
18038 inst.size = INSN_SIZE;
18039 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
18040 {
18041 it_fsm_pre_encode ();
18042 opcode->aencode ();
18043 it_fsm_post_encode ();
18044 }
18045 /* Arm mode bx is marked as both v4T and v5 because it's still required
18046 on a hypothetical non-thumb v5 core. */
18047 if (is_bx)
18048 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
18049 else
18050 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
18051 *opcode->avariant);
18052
18053 check_neon_suffixes;
18054
18055 if (!inst.error)
18056 {
18057 mapping_state (MAP_ARM);
18058 }
18059 }
18060 else
18061 {
18062 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18063 "-- `%s'"), str);
18064 return;
18065 }
18066 output_inst (str);
18067 }
18068
18069 static void
18070 check_it_blocks_finished (void)
18071 {
18072 #ifdef OBJ_ELF
18073 asection *sect;
18074
18075 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
18076 if (seg_info (sect)->tc_segment_info_data.current_it.state
18077 == MANUAL_IT_BLOCK)
18078 {
18079 as_warn (_("section '%s' finished with an open IT block."),
18080 sect->name);
18081 }
18082 #else
18083 if (now_it.state == MANUAL_IT_BLOCK)
18084 as_warn (_("file finished with an open IT block."));
18085 #endif
18086 }
18087
18088 /* Various frobbings of labels and their addresses. */
18089
18090 void
18091 arm_start_line_hook (void)
18092 {
18093 last_label_seen = NULL;
18094 }
18095
18096 void
18097 arm_frob_label (symbolS * sym)
18098 {
18099 last_label_seen = sym;
18100
18101 ARM_SET_THUMB (sym, thumb_mode);
18102
18103 #if defined OBJ_COFF || defined OBJ_ELF
18104 ARM_SET_INTERWORK (sym, support_interwork);
18105 #endif
18106
18107 force_automatic_it_block_close ();
18108
18109 /* Note - do not allow local symbols (.Lxxx) to be labelled
18110 as Thumb functions. This is because these labels, whilst
18111 they exist inside Thumb code, are not the entry points for
18112 possible ARM->Thumb calls. Also, these labels can be used
18113 as part of a computed goto or switch statement. eg gcc
18114 can generate code that looks like this:
18115
18116 ldr r2, [pc, .Laaa]
18117 lsl r3, r3, #2
18118 ldr r2, [r3, r2]
18119 mov pc, r2
18120
18121 .Lbbb: .word .Lxxx
18122 .Lccc: .word .Lyyy
18123 ..etc...
18124 .Laaa: .word Lbbb
18125
18126 The first instruction loads the address of the jump table.
18127 The second instruction converts a table index into a byte offset.
18128 The third instruction gets the jump address out of the table.
18129 The fourth instruction performs the jump.
18130
18131 If the address stored at .Laaa is that of a symbol which has the
18132 Thumb_Func bit set, then the linker will arrange for this address
18133 to have the bottom bit set, which in turn would mean that the
18134 address computation performed by the third instruction would end
18135 up with the bottom bit set. Since the ARM is capable of unaligned
18136 word loads, the instruction would then load the incorrect address
18137 out of the jump table, and chaos would ensue. */
18138 if (label_is_thumb_function_name
18139 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
18140 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
18141 {
18142 /* When the address of a Thumb function is taken the bottom
18143 bit of that address should be set. This will allow
18144 interworking between Arm and Thumb functions to work
18145 correctly. */
18146
18147 THUMB_SET_FUNC (sym, 1);
18148
18149 label_is_thumb_function_name = FALSE;
18150 }
18151
18152 dwarf2_emit_label (sym);
18153 }
18154
18155 bfd_boolean
18156 arm_data_in_code (void)
18157 {
18158 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
18159 {
18160 *input_line_pointer = '/';
18161 input_line_pointer += 5;
18162 *input_line_pointer = 0;
18163 return TRUE;
18164 }
18165
18166 return FALSE;
18167 }
18168
18169 char *
18170 arm_canonicalize_symbol_name (char * name)
18171 {
18172 int len;
18173
18174 if (thumb_mode && (len = strlen (name)) > 5
18175 && streq (name + len - 5, "/data"))
18176 *(name + len - 5) = 0;
18177
18178 return name;
18179 }
18180 \f
18181 /* Table of all register names defined by default. The user can
18182 define additional names with .req. Note that all register names
18183 should appear in both upper and lowercase variants. Some registers
18184 also have mixed-case names. */
18185
18186 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
18187 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
18188 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
18189 #define REGSET(p,t) \
18190 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
18191 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
18192 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
18193 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
18194 #define REGSETH(p,t) \
18195 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
18196 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
18197 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
18198 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
18199 #define REGSET2(p,t) \
18200 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
18201 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
18202 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
18203 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
18204 #define SPLRBANK(base,bank,t) \
18205 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
18206 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
18207 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
18208 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
18209 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
18210 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
18211
18212 static const struct reg_entry reg_names[] =
18213 {
18214 /* ARM integer registers. */
18215 REGSET(r, RN), REGSET(R, RN),
18216
18217 /* ATPCS synonyms. */
18218 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
18219 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
18220 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
18221
18222 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
18223 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
18224 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
18225
18226 /* Well-known aliases. */
18227 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
18228 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
18229
18230 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
18231 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
18232
18233 /* Coprocessor numbers. */
18234 REGSET(p, CP), REGSET(P, CP),
18235
18236 /* Coprocessor register numbers. The "cr" variants are for backward
18237 compatibility. */
18238 REGSET(c, CN), REGSET(C, CN),
18239 REGSET(cr, CN), REGSET(CR, CN),
18240
18241 /* ARM banked registers. */
18242 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
18243 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
18244 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
18245 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
18246 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
18247 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
18248 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
18249
18250 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
18251 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
18252 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
18253 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
18254 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
18255 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
18256 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
18257 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
18258
18259 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
18260 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
18261 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
18262 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
18263 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
18264 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
18265 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
18266 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
18267 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
18268
18269 /* FPA registers. */
18270 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
18271 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
18272
18273 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
18274 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
18275
18276 /* VFP SP registers. */
18277 REGSET(s,VFS), REGSET(S,VFS),
18278 REGSETH(s,VFS), REGSETH(S,VFS),
18279
18280 /* VFP DP Registers. */
18281 REGSET(d,VFD), REGSET(D,VFD),
18282 /* Extra Neon DP registers. */
18283 REGSETH(d,VFD), REGSETH(D,VFD),
18284
18285 /* Neon QP registers. */
18286 REGSET2(q,NQ), REGSET2(Q,NQ),
18287
18288 /* VFP control registers. */
18289 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
18290 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
18291 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
18292 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
18293 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
18294 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
18295
18296 /* Maverick DSP coprocessor registers. */
18297 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
18298 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
18299
18300 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
18301 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
18302 REGDEF(dspsc,0,DSPSC),
18303
18304 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
18305 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
18306 REGDEF(DSPSC,0,DSPSC),
18307
18308 /* iWMMXt data registers - p0, c0-15. */
18309 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
18310
18311 /* iWMMXt control registers - p1, c0-3. */
18312 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
18313 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
18314 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
18315 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
18316
18317 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
18318 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
18319 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
18320 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
18321 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
18322
18323 /* XScale accumulator registers. */
18324 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
18325 };
18326 #undef REGDEF
18327 #undef REGNUM
18328 #undef REGSET
18329
18330 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18331 within psr_required_here. */
18332 static const struct asm_psr psrs[] =
18333 {
18334 /* Backward compatibility notation. Note that "all" is no longer
18335 truly all possible PSR bits. */
18336 {"all", PSR_c | PSR_f},
18337 {"flg", PSR_f},
18338 {"ctl", PSR_c},
18339
18340 /* Individual flags. */
18341 {"f", PSR_f},
18342 {"c", PSR_c},
18343 {"x", PSR_x},
18344 {"s", PSR_s},
18345
18346 /* Combinations of flags. */
18347 {"fs", PSR_f | PSR_s},
18348 {"fx", PSR_f | PSR_x},
18349 {"fc", PSR_f | PSR_c},
18350 {"sf", PSR_s | PSR_f},
18351 {"sx", PSR_s | PSR_x},
18352 {"sc", PSR_s | PSR_c},
18353 {"xf", PSR_x | PSR_f},
18354 {"xs", PSR_x | PSR_s},
18355 {"xc", PSR_x | PSR_c},
18356 {"cf", PSR_c | PSR_f},
18357 {"cs", PSR_c | PSR_s},
18358 {"cx", PSR_c | PSR_x},
18359 {"fsx", PSR_f | PSR_s | PSR_x},
18360 {"fsc", PSR_f | PSR_s | PSR_c},
18361 {"fxs", PSR_f | PSR_x | PSR_s},
18362 {"fxc", PSR_f | PSR_x | PSR_c},
18363 {"fcs", PSR_f | PSR_c | PSR_s},
18364 {"fcx", PSR_f | PSR_c | PSR_x},
18365 {"sfx", PSR_s | PSR_f | PSR_x},
18366 {"sfc", PSR_s | PSR_f | PSR_c},
18367 {"sxf", PSR_s | PSR_x | PSR_f},
18368 {"sxc", PSR_s | PSR_x | PSR_c},
18369 {"scf", PSR_s | PSR_c | PSR_f},
18370 {"scx", PSR_s | PSR_c | PSR_x},
18371 {"xfs", PSR_x | PSR_f | PSR_s},
18372 {"xfc", PSR_x | PSR_f | PSR_c},
18373 {"xsf", PSR_x | PSR_s | PSR_f},
18374 {"xsc", PSR_x | PSR_s | PSR_c},
18375 {"xcf", PSR_x | PSR_c | PSR_f},
18376 {"xcs", PSR_x | PSR_c | PSR_s},
18377 {"cfs", PSR_c | PSR_f | PSR_s},
18378 {"cfx", PSR_c | PSR_f | PSR_x},
18379 {"csf", PSR_c | PSR_s | PSR_f},
18380 {"csx", PSR_c | PSR_s | PSR_x},
18381 {"cxf", PSR_c | PSR_x | PSR_f},
18382 {"cxs", PSR_c | PSR_x | PSR_s},
18383 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
18384 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
18385 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
18386 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
18387 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
18388 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
18389 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
18390 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
18391 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
18392 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
18393 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
18394 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
18395 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
18396 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
18397 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
18398 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
18399 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
18400 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
18401 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
18402 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
18403 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
18404 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
18405 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
18406 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
18407 };
18408
18409 /* Table of V7M psr names. */
18410 static const struct asm_psr v7m_psrs[] =
18411 {
18412 {"apsr", 0 }, {"APSR", 0 },
18413 {"iapsr", 1 }, {"IAPSR", 1 },
18414 {"eapsr", 2 }, {"EAPSR", 2 },
18415 {"psr", 3 }, {"PSR", 3 },
18416 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
18417 {"ipsr", 5 }, {"IPSR", 5 },
18418 {"epsr", 6 }, {"EPSR", 6 },
18419 {"iepsr", 7 }, {"IEPSR", 7 },
18420 {"msp", 8 }, {"MSP", 8 },
18421 {"psp", 9 }, {"PSP", 9 },
18422 {"primask", 16}, {"PRIMASK", 16},
18423 {"basepri", 17}, {"BASEPRI", 17},
18424 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
18425 {"basepri_max", 18}, {"BASEPRI_MASK", 18}, /* Typo, preserved for backwards compatibility. */
18426 {"faultmask", 19}, {"FAULTMASK", 19},
18427 {"control", 20}, {"CONTROL", 20}
18428 };
18429
18430 /* Table of all shift-in-operand names. */
18431 static const struct asm_shift_name shift_names [] =
18432 {
18433 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
18434 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
18435 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
18436 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
18437 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
18438 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
18439 };
18440
18441 /* Table of all explicit relocation names. */
18442 #ifdef OBJ_ELF
18443 static struct reloc_entry reloc_names[] =
18444 {
18445 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
18446 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
18447 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
18448 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
18449 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
18450 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
18451 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
18452 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
18453 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
18454 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
18455 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
18456 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
18457 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
18458 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
18459 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
18460 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
18461 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
18462 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
18463 };
18464 #endif
18465
18466 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
18467 static const struct asm_cond conds[] =
18468 {
18469 {"eq", 0x0},
18470 {"ne", 0x1},
18471 {"cs", 0x2}, {"hs", 0x2},
18472 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18473 {"mi", 0x4},
18474 {"pl", 0x5},
18475 {"vs", 0x6},
18476 {"vc", 0x7},
18477 {"hi", 0x8},
18478 {"ls", 0x9},
18479 {"ge", 0xa},
18480 {"lt", 0xb},
18481 {"gt", 0xc},
18482 {"le", 0xd},
18483 {"al", 0xe}
18484 };
18485
18486 #define UL_BARRIER(L,U,CODE,FEAT) \
18487 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
18488 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
18489
18490 static struct asm_barrier_opt barrier_opt_names[] =
18491 {
18492 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
18493 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
18494 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
18495 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
18496 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
18497 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
18498 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
18499 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
18500 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
18501 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
18502 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
18503 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
18504 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
18505 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
18506 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
18507 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
18508 };
18509
18510 #undef UL_BARRIER
18511
18512 /* Table of ARM-format instructions. */
18513
18514 /* Macros for gluing together operand strings. N.B. In all cases
18515 other than OPS0, the trailing OP_stop comes from default
18516 zero-initialization of the unspecified elements of the array. */
18517 #define OPS0() { OP_stop, }
18518 #define OPS1(a) { OP_##a, }
18519 #define OPS2(a,b) { OP_##a,OP_##b, }
18520 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
18521 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
18522 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18523 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18524
18525 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18526 This is useful when mixing operands for ARM and THUMB, i.e. using the
18527 MIX_ARM_THUMB_OPERANDS macro.
18528 In order to use these macros, prefix the number of operands with _
18529 e.g. _3. */
18530 #define OPS_1(a) { a, }
18531 #define OPS_2(a,b) { a,b, }
18532 #define OPS_3(a,b,c) { a,b,c, }
18533 #define OPS_4(a,b,c,d) { a,b,c,d, }
18534 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
18535 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18536
18537 /* These macros abstract out the exact format of the mnemonic table and
18538 save some repeated characters. */
18539
18540 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
18541 #define TxCE(mnem, op, top, nops, ops, ae, te) \
18542 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18543 THUMB_VARIANT, do_##ae, do_##te }
18544
18545 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18546 a T_MNEM_xyz enumerator. */
18547 #define TCE(mnem, aop, top, nops, ops, ae, te) \
18548 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18549 #define tCE(mnem, aop, top, nops, ops, ae, te) \
18550 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18551
18552 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18553 infix after the third character. */
18554 #define TxC3(mnem, op, top, nops, ops, ae, te) \
18555 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18556 THUMB_VARIANT, do_##ae, do_##te }
18557 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
18558 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18559 THUMB_VARIANT, do_##ae, do_##te }
18560 #define TC3(mnem, aop, top, nops, ops, ae, te) \
18561 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18562 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
18563 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18564 #define tC3(mnem, aop, top, nops, ops, ae, te) \
18565 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18566 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
18567 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18568
18569 /* Mnemonic that cannot be conditionalized. The ARM condition-code
18570 field is still 0xE. Many of the Thumb variants can be executed
18571 conditionally, so this is checked separately. */
18572 #define TUE(mnem, op, top, nops, ops, ae, te) \
18573 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18574 THUMB_VARIANT, do_##ae, do_##te }
18575
18576 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
18577 Used by mnemonics that have very minimal differences in the encoding for
18578 ARM and Thumb variants and can be handled in a common function. */
18579 #define TUEc(mnem, op, top, nops, ops, en) \
18580 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18581 THUMB_VARIANT, do_##en, do_##en }
18582
18583 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
18584 condition code field. */
18585 #define TUF(mnem, op, top, nops, ops, ae, te) \
18586 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
18587 THUMB_VARIANT, do_##ae, do_##te }
18588
18589 /* ARM-only variants of all the above. */
18590 #define CE(mnem, op, nops, ops, ae) \
18591 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18592
18593 #define C3(mnem, op, nops, ops, ae) \
18594 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18595
18596 /* Legacy mnemonics that always have conditional infix after the third
18597 character. */
18598 #define CL(mnem, op, nops, ops, ae) \
18599 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18600 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18601
18602 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
18603 #define cCE(mnem, op, nops, ops, ae) \
18604 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18605
18606 /* Legacy coprocessor instructions where conditional infix and conditional
18607 suffix are ambiguous. For consistency this includes all FPA instructions,
18608 not just the potentially ambiguous ones. */
18609 #define cCL(mnem, op, nops, ops, ae) \
18610 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18611 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18612
18613 /* Coprocessor, takes either a suffix or a position-3 infix
18614 (for an FPA corner case). */
18615 #define C3E(mnem, op, nops, ops, ae) \
18616 { mnem, OPS##nops ops, OT_csuf_or_in3, \
18617 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18618
18619 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
18620 { m1 #m2 m3, OPS##nops ops, \
18621 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
18622 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18623
18624 #define CM(m1, m2, op, nops, ops, ae) \
18625 xCM_ (m1, , m2, op, nops, ops, ae), \
18626 xCM_ (m1, eq, m2, op, nops, ops, ae), \
18627 xCM_ (m1, ne, m2, op, nops, ops, ae), \
18628 xCM_ (m1, cs, m2, op, nops, ops, ae), \
18629 xCM_ (m1, hs, m2, op, nops, ops, ae), \
18630 xCM_ (m1, cc, m2, op, nops, ops, ae), \
18631 xCM_ (m1, ul, m2, op, nops, ops, ae), \
18632 xCM_ (m1, lo, m2, op, nops, ops, ae), \
18633 xCM_ (m1, mi, m2, op, nops, ops, ae), \
18634 xCM_ (m1, pl, m2, op, nops, ops, ae), \
18635 xCM_ (m1, vs, m2, op, nops, ops, ae), \
18636 xCM_ (m1, vc, m2, op, nops, ops, ae), \
18637 xCM_ (m1, hi, m2, op, nops, ops, ae), \
18638 xCM_ (m1, ls, m2, op, nops, ops, ae), \
18639 xCM_ (m1, ge, m2, op, nops, ops, ae), \
18640 xCM_ (m1, lt, m2, op, nops, ops, ae), \
18641 xCM_ (m1, gt, m2, op, nops, ops, ae), \
18642 xCM_ (m1, le, m2, op, nops, ops, ae), \
18643 xCM_ (m1, al, m2, op, nops, ops, ae)
18644
18645 #define UE(mnem, op, nops, ops, ae) \
18646 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18647
18648 #define UF(mnem, op, nops, ops, ae) \
18649 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18650
18651 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
18652 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
18653 use the same encoding function for each. */
18654 #define NUF(mnem, op, nops, ops, enc) \
18655 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
18656 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18657
18658 /* Neon data processing, version which indirects through neon_enc_tab for
18659 the various overloaded versions of opcodes. */
18660 #define nUF(mnem, op, nops, ops, enc) \
18661 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
18662 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18663
18664 /* Neon insn with conditional suffix for the ARM version, non-overloaded
18665 version. */
18666 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
18667 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
18668 THUMB_VARIANT, do_##enc, do_##enc }
18669
18670 #define NCE(mnem, op, nops, ops, enc) \
18671 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18672
18673 #define NCEF(mnem, op, nops, ops, enc) \
18674 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18675
18676 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
18677 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
18678 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
18679 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18680
18681 #define nCE(mnem, op, nops, ops, enc) \
18682 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18683
18684 #define nCEF(mnem, op, nops, ops, enc) \
18685 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18686
18687 #define do_0 0
18688
18689 static const struct asm_opcode insns[] =
18690 {
18691 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
18692 #define THUMB_VARIANT & arm_ext_v4t
18693 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
18694 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
18695 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
18696 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
18697 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
18698 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
18699 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
18700 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
18701 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
18702 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
18703 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
18704 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
18705 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
18706 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
18707 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
18708 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
18709
18710 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
18711 for setting PSR flag bits. They are obsolete in V6 and do not
18712 have Thumb equivalents. */
18713 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
18714 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
18715 CL("tstp", 110f000, 2, (RR, SH), cmp),
18716 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
18717 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
18718 CL("cmpp", 150f000, 2, (RR, SH), cmp),
18719 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
18720 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
18721 CL("cmnp", 170f000, 2, (RR, SH), cmp),
18722
18723 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
18724 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
18725 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
18726 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
18727
18728 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
18729 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18730 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
18731 OP_RRnpc),
18732 OP_ADDRGLDR),ldst, t_ldst),
18733 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18734
18735 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18736 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18737 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18738 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18739 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18740 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18741
18742 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
18743 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
18744 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
18745 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
18746
18747 /* Pseudo ops. */
18748 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
18749 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
18750 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
18751 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
18752
18753 /* Thumb-compatibility pseudo ops. */
18754 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
18755 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
18756 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
18757 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
18758 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
18759 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
18760 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
18761 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
18762 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
18763 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
18764 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
18765 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
18766
18767 /* These may simplify to neg. */
18768 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
18769 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
18770
18771 #undef THUMB_VARIANT
18772 #define THUMB_VARIANT & arm_ext_v6
18773
18774 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
18775
18776 /* V1 instructions with no Thumb analogue prior to V6T2. */
18777 #undef THUMB_VARIANT
18778 #define THUMB_VARIANT & arm_ext_v6t2
18779
18780 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
18781 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
18782 CL("teqp", 130f000, 2, (RR, SH), cmp),
18783
18784 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18785 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18786 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
18787 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18788
18789 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18790 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18791
18792 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18793 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18794
18795 /* V1 instructions with no Thumb analogue at all. */
18796 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
18797 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
18798
18799 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
18800 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
18801 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
18802 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
18803 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
18804 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
18805 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
18806 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
18807
18808 #undef ARM_VARIANT
18809 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
18810 #undef THUMB_VARIANT
18811 #define THUMB_VARIANT & arm_ext_v4t
18812
18813 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
18814 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
18815
18816 #undef THUMB_VARIANT
18817 #define THUMB_VARIANT & arm_ext_v6t2
18818
18819 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
18820 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
18821
18822 /* Generic coprocessor instructions. */
18823 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
18824 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18825 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18826 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18827 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18828 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18829 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
18830
18831 #undef ARM_VARIANT
18832 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
18833
18834 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18835 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18836
18837 #undef ARM_VARIANT
18838 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
18839 #undef THUMB_VARIANT
18840 #define THUMB_VARIANT & arm_ext_msr
18841
18842 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
18843 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
18844
18845 #undef ARM_VARIANT
18846 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
18847 #undef THUMB_VARIANT
18848 #define THUMB_VARIANT & arm_ext_v6t2
18849
18850 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18851 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18852 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18853 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18854 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18855 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18856 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18857 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18858
18859 #undef ARM_VARIANT
18860 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
18861 #undef THUMB_VARIANT
18862 #define THUMB_VARIANT & arm_ext_v4t
18863
18864 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18865 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18866 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18867 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18868 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18869 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18870
18871 #undef ARM_VARIANT
18872 #define ARM_VARIANT & arm_ext_v4t_5
18873
18874 /* ARM Architecture 4T. */
18875 /* Note: bx (and blx) are required on V5, even if the processor does
18876 not support Thumb. */
18877 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
18878
18879 #undef ARM_VARIANT
18880 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
18881 #undef THUMB_VARIANT
18882 #define THUMB_VARIANT & arm_ext_v5t
18883
18884 /* Note: blx has 2 variants; the .value coded here is for
18885 BLX(2). Only this variant has conditional execution. */
18886 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
18887 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
18888
18889 #undef THUMB_VARIANT
18890 #define THUMB_VARIANT & arm_ext_v6t2
18891
18892 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
18893 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18894 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18895 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18896 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18897 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
18898 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18899 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18900
18901 #undef ARM_VARIANT
18902 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
18903 #undef THUMB_VARIANT
18904 #define THUMB_VARIANT & arm_ext_v5exp
18905
18906 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18907 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18908 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18909 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18910
18911 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18912 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18913
18914 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18915 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18916 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18917 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18918
18919 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18920 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18921 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18922 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18923
18924 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18925 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18926
18927 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18928 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18929 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18930 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18931
18932 #undef ARM_VARIANT
18933 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
18934 #undef THUMB_VARIANT
18935 #define THUMB_VARIANT & arm_ext_v6t2
18936
18937 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
18938 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
18939 ldrd, t_ldstd),
18940 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
18941 ADDRGLDRS), ldrd, t_ldstd),
18942
18943 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18944 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18945
18946 #undef ARM_VARIANT
18947 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
18948
18949 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
18950
18951 #undef ARM_VARIANT
18952 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
18953 #undef THUMB_VARIANT
18954 #define THUMB_VARIANT & arm_ext_v6
18955
18956 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
18957 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
18958 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18959 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18960 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18961 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18962 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18963 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18964 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18965 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
18966
18967 #undef THUMB_VARIANT
18968 #define THUMB_VARIANT & arm_ext_v6t2_v8m
18969
18970 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
18971 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18972 strex, t_strex),
18973 #undef THUMB_VARIANT
18974 #define THUMB_VARIANT & arm_ext_v6t2
18975
18976 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18977 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18978
18979 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
18980 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
18981
18982 /* ARM V6 not included in V7M. */
18983 #undef THUMB_VARIANT
18984 #define THUMB_VARIANT & arm_ext_v6_notm
18985 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18986 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18987 UF(rfeib, 9900a00, 1, (RRw), rfe),
18988 UF(rfeda, 8100a00, 1, (RRw), rfe),
18989 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
18990 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18991 UF(rfefa, 8100a00, 1, (RRw), rfe),
18992 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
18993 UF(rfeed, 9900a00, 1, (RRw), rfe),
18994 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18995 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18996 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18997 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
18998 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
18999 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
19000 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
19001 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19002 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19003 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
19004
19005 /* ARM V6 not included in V7M (eg. integer SIMD). */
19006 #undef THUMB_VARIANT
19007 #define THUMB_VARIANT & arm_ext_v6_dsp
19008 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
19009 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
19010 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19011 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19012 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19013 /* Old name for QASX. */
19014 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19015 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19016 /* Old name for QSAX. */
19017 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19018 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19019 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19020 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19021 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19022 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19023 /* Old name for SASX. */
19024 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19025 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19026 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19027 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19028 /* Old name for SHASX. */
19029 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19030 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19031 /* Old name for SHSAX. */
19032 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19033 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19034 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19035 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19036 /* Old name for SSAX. */
19037 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19038 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19039 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19040 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19041 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19042 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19043 /* Old name for UASX. */
19044 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19045 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19046 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19047 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19048 /* Old name for UHASX. */
19049 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19050 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19051 /* Old name for UHSAX. */
19052 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19053 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19054 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19055 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19056 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19057 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19058 /* Old name for UQASX. */
19059 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19060 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19061 /* Old name for UQSAX. */
19062 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19063 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19064 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19065 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19066 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19067 /* Old name for USAX. */
19068 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19069 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19070 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19071 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19072 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19073 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19074 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19075 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19076 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19077 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19078 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19079 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19080 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19081 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19082 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19083 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19084 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19085 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19086 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19087 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19088 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19089 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19090 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19091 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19092 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19093 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19094 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19095 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19096 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19097 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
19098 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
19099 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19100 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19101 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
19102
19103 #undef ARM_VARIANT
19104 #define ARM_VARIANT & arm_ext_v6k
19105 #undef THUMB_VARIANT
19106 #define THUMB_VARIANT & arm_ext_v6k
19107
19108 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
19109 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
19110 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
19111 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
19112
19113 #undef THUMB_VARIANT
19114 #define THUMB_VARIANT & arm_ext_v6_notm
19115 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
19116 ldrexd, t_ldrexd),
19117 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
19118 RRnpcb), strexd, t_strexd),
19119
19120 #undef THUMB_VARIANT
19121 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19122 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
19123 rd_rn, rd_rn),
19124 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
19125 rd_rn, rd_rn),
19126 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19127 strex, t_strexbh),
19128 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19129 strex, t_strexbh),
19130 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
19131
19132 #undef ARM_VARIANT
19133 #define ARM_VARIANT & arm_ext_sec
19134 #undef THUMB_VARIANT
19135 #define THUMB_VARIANT & arm_ext_sec
19136
19137 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
19138
19139 #undef ARM_VARIANT
19140 #define ARM_VARIANT & arm_ext_virt
19141 #undef THUMB_VARIANT
19142 #define THUMB_VARIANT & arm_ext_virt
19143
19144 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
19145 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
19146
19147 #undef ARM_VARIANT
19148 #define ARM_VARIANT & arm_ext_pan
19149 #undef THUMB_VARIANT
19150 #define THUMB_VARIANT & arm_ext_pan
19151
19152 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
19153
19154 #undef ARM_VARIANT
19155 #define ARM_VARIANT & arm_ext_v6t2
19156 #undef THUMB_VARIANT
19157 #define THUMB_VARIANT & arm_ext_v6t2
19158
19159 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
19160 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
19161 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
19162 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
19163
19164 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19165 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
19166
19167 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19168 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19169 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19170 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19171
19172 #undef THUMB_VARIANT
19173 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19174 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
19175 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
19176
19177 /* Thumb-only instructions. */
19178 #undef ARM_VARIANT
19179 #define ARM_VARIANT NULL
19180 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
19181 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
19182
19183 /* ARM does not really have an IT instruction, so always allow it.
19184 The opcode is copied from Thumb in order to allow warnings in
19185 -mimplicit-it=[never | arm] modes. */
19186 #undef ARM_VARIANT
19187 #define ARM_VARIANT & arm_ext_v1
19188 #undef THUMB_VARIANT
19189 #define THUMB_VARIANT & arm_ext_v6t2
19190
19191 TUE("it", bf08, bf08, 1, (COND), it, t_it),
19192 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
19193 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
19194 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
19195 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
19196 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
19197 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
19198 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
19199 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
19200 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
19201 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
19202 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
19203 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
19204 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
19205 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
19206 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
19207 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
19208 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
19209
19210 /* Thumb2 only instructions. */
19211 #undef ARM_VARIANT
19212 #define ARM_VARIANT NULL
19213
19214 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19215 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19216 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
19217 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
19218 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
19219 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
19220
19221 /* Hardware division instructions. */
19222 #undef ARM_VARIANT
19223 #define ARM_VARIANT & arm_ext_adiv
19224 #undef THUMB_VARIANT
19225 #define THUMB_VARIANT & arm_ext_div
19226
19227 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
19228 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
19229
19230 /* ARM V6M/V7 instructions. */
19231 #undef ARM_VARIANT
19232 #define ARM_VARIANT & arm_ext_barrier
19233 #undef THUMB_VARIANT
19234 #define THUMB_VARIANT & arm_ext_barrier
19235
19236 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
19237 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
19238 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
19239
19240 /* ARM V7 instructions. */
19241 #undef ARM_VARIANT
19242 #define ARM_VARIANT & arm_ext_v7
19243 #undef THUMB_VARIANT
19244 #define THUMB_VARIANT & arm_ext_v7
19245
19246 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
19247 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
19248
19249 #undef ARM_VARIANT
19250 #define ARM_VARIANT & arm_ext_mp
19251 #undef THUMB_VARIANT
19252 #define THUMB_VARIANT & arm_ext_mp
19253
19254 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
19255
19256 /* AArchv8 instructions. */
19257 #undef ARM_VARIANT
19258 #define ARM_VARIANT & arm_ext_v8
19259
19260 /* Instructions shared between armv8-a and armv8-m. */
19261 #undef THUMB_VARIANT
19262 #define THUMB_VARIANT & arm_ext_atomics
19263
19264 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19265 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19266 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19267 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19268 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19269 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19270 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19271 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
19272 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19273 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
19274 stlex, t_stlex),
19275 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
19276 stlex, t_stlex),
19277 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
19278 stlex, t_stlex),
19279 #undef THUMB_VARIANT
19280 #define THUMB_VARIANT & arm_ext_v8
19281
19282 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
19283 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
19284 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
19285 ldrexd, t_ldrexd),
19286 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
19287 strexd, t_strexd),
19288 /* ARMv8 T32 only. */
19289 #undef ARM_VARIANT
19290 #define ARM_VARIANT NULL
19291 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
19292 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
19293 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
19294
19295 /* FP for ARMv8. */
19296 #undef ARM_VARIANT
19297 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
19298 #undef THUMB_VARIANT
19299 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19300
19301 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
19302 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
19303 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
19304 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
19305 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19306 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19307 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
19308 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
19309 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
19310 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
19311 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
19312 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
19313 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
19314 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
19315 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
19316 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
19317 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
19318
19319 /* Crypto v1 extensions. */
19320 #undef ARM_VARIANT
19321 #define ARM_VARIANT & fpu_crypto_ext_armv8
19322 #undef THUMB_VARIANT
19323 #define THUMB_VARIANT & fpu_crypto_ext_armv8
19324
19325 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
19326 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
19327 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
19328 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
19329 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
19330 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
19331 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
19332 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
19333 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
19334 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
19335 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
19336 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
19337 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
19338 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
19339
19340 #undef ARM_VARIANT
19341 #define ARM_VARIANT & crc_ext_armv8
19342 #undef THUMB_VARIANT
19343 #define THUMB_VARIANT & crc_ext_armv8
19344 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
19345 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
19346 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
19347 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
19348 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
19349 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
19350
19351 /* ARMv8.2 RAS extension. */
19352 #undef ARM_VARIANT
19353 #define ARM_VARIANT & arm_ext_v8_2
19354 #undef THUMB_VARIANT
19355 #define THUMB_VARIANT & arm_ext_v8_2
19356 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
19357
19358 #undef ARM_VARIANT
19359 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
19360 #undef THUMB_VARIANT
19361 #define THUMB_VARIANT NULL
19362
19363 cCE("wfs", e200110, 1, (RR), rd),
19364 cCE("rfs", e300110, 1, (RR), rd),
19365 cCE("wfc", e400110, 1, (RR), rd),
19366 cCE("rfc", e500110, 1, (RR), rd),
19367
19368 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
19369 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
19370 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
19371 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
19372
19373 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
19374 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
19375 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
19376 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
19377
19378 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
19379 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
19380 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
19381 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
19382 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
19383 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
19384 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
19385 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
19386 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
19387 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
19388 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
19389 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
19390
19391 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
19392 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
19393 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
19394 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
19395 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
19396 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
19397 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
19398 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
19399 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
19400 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
19401 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
19402 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
19403
19404 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
19405 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
19406 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
19407 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
19408 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
19409 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
19410 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
19411 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
19412 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
19413 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
19414 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
19415 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
19416
19417 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
19418 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
19419 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
19420 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
19421 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
19422 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
19423 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
19424 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
19425 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
19426 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
19427 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
19428 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
19429
19430 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
19431 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
19432 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
19433 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
19434 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
19435 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
19436 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
19437 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
19438 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
19439 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
19440 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
19441 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
19442
19443 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
19444 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
19445 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
19446 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
19447 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
19448 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
19449 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
19450 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
19451 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
19452 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
19453 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
19454 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
19455
19456 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
19457 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
19458 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
19459 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
19460 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
19461 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
19462 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
19463 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
19464 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
19465 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
19466 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
19467 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
19468
19469 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
19470 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
19471 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
19472 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
19473 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
19474 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
19475 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
19476 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
19477 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
19478 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
19479 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
19480 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
19481
19482 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
19483 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
19484 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
19485 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
19486 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
19487 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
19488 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
19489 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
19490 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
19491 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
19492 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
19493 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
19494
19495 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
19496 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
19497 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
19498 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
19499 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
19500 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
19501 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
19502 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
19503 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
19504 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
19505 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
19506 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
19507
19508 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
19509 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
19510 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
19511 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
19512 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
19513 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
19514 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
19515 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
19516 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
19517 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
19518 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
19519 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
19520
19521 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
19522 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
19523 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
19524 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
19525 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
19526 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
19527 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
19528 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
19529 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
19530 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
19531 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
19532 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
19533
19534 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
19535 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
19536 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
19537 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
19538 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
19539 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
19540 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
19541 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
19542 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
19543 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
19544 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
19545 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
19546
19547 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
19548 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
19549 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
19550 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
19551 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
19552 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
19553 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
19554 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
19555 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
19556 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
19557 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
19558 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
19559
19560 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
19561 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
19562 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
19563 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
19564 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
19565 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
19566 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
19567 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
19568 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
19569 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
19570 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
19571 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
19572
19573 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
19574 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
19575 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
19576 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
19577 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
19578 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
19579 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
19580 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
19581 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
19582 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
19583 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
19584 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
19585
19586 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
19587 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
19588 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
19589 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
19590 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
19591 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19592 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19593 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19594 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
19595 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
19596 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
19597 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
19598
19599 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
19600 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
19601 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
19602 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
19603 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
19604 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19605 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19606 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19607 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
19608 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
19609 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
19610 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
19611
19612 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
19613 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
19614 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
19615 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
19616 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
19617 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19618 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19619 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19620 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
19621 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
19622 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
19623 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
19624
19625 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
19626 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
19627 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
19628 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
19629 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
19630 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19631 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19632 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19633 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
19634 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
19635 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
19636 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
19637
19638 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
19639 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
19640 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
19641 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
19642 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
19643 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19644 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19645 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19646 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
19647 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
19648 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
19649 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
19650
19651 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
19652 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
19653 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
19654 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
19655 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
19656 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19657 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19658 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19659 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
19660 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
19661 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
19662 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
19663
19664 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
19665 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
19666 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
19667 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
19668 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
19669 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19670 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19671 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19672 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
19673 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
19674 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
19675 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
19676
19677 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
19678 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
19679 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
19680 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
19681 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
19682 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19683 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19684 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19685 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
19686 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
19687 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
19688 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
19689
19690 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
19691 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
19692 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
19693 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
19694 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
19695 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19696 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19697 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19698 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
19699 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
19700 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
19701 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
19702
19703 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
19704 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
19705 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
19706 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
19707 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
19708 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19709 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19710 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19711 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
19712 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
19713 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
19714 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
19715
19716 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19717 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19718 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19719 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19720 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19721 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19722 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19723 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19724 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19725 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19726 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19727 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19728
19729 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19730 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19731 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19732 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19733 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19734 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19735 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19736 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19737 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19738 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19739 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19740 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19741
19742 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19743 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19744 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19745 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19746 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19747 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19748 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19749 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19750 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19751 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19752 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19753 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19754
19755 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
19756 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
19757 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
19758 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
19759
19760 cCL("flts", e000110, 2, (RF, RR), rn_rd),
19761 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
19762 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
19763 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
19764 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
19765 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
19766 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
19767 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
19768 cCL("flte", e080110, 2, (RF, RR), rn_rd),
19769 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
19770 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
19771 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
19772
19773 /* The implementation of the FIX instruction is broken on some
19774 assemblers, in that it accepts a precision specifier as well as a
19775 rounding specifier, despite the fact that this is meaningless.
19776 To be more compatible, we accept it as well, though of course it
19777 does not set any bits. */
19778 cCE("fix", e100110, 2, (RR, RF), rd_rm),
19779 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
19780 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
19781 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
19782 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
19783 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
19784 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
19785 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
19786 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
19787 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
19788 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
19789 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
19790 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
19791
19792 /* Instructions that were new with the real FPA, call them V2. */
19793 #undef ARM_VARIANT
19794 #define ARM_VARIANT & fpu_fpa_ext_v2
19795
19796 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19797 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19798 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19799 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19800 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19801 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19802
19803 #undef ARM_VARIANT
19804 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
19805
19806 /* Moves and type conversions. */
19807 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
19808 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
19809 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
19810 cCE("fmstat", ef1fa10, 0, (), noargs),
19811 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
19812 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
19813 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
19814 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
19815 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
19816 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
19817 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
19818 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
19819 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
19820 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
19821
19822 /* Memory operations. */
19823 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
19824 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
19825 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19826 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19827 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19828 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19829 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19830 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19831 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19832 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19833 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19834 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19835 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19836 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19837 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19838 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19839 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19840 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19841
19842 /* Monadic operations. */
19843 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
19844 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
19845 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
19846
19847 /* Dyadic operations. */
19848 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19849 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19850 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19851 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19852 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19853 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19854 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19855 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19856 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19857
19858 /* Comparisons. */
19859 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
19860 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
19861 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
19862 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
19863
19864 /* Double precision load/store are still present on single precision
19865 implementations. */
19866 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
19867 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
19868 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19869 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19870 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19871 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19872 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19873 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19874 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19875 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19876
19877 #undef ARM_VARIANT
19878 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
19879
19880 /* Moves and type conversions. */
19881 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19882 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
19883 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19884 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
19885 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
19886 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
19887 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
19888 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
19889 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
19890 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
19891 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19892 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
19893 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19894
19895 /* Monadic operations. */
19896 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19897 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19898 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19899
19900 /* Dyadic operations. */
19901 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19902 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19903 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19904 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19905 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19906 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19907 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19908 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19909 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19910
19911 /* Comparisons. */
19912 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19913 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
19914 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19915 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
19916
19917 #undef ARM_VARIANT
19918 #define ARM_VARIANT & fpu_vfp_ext_v2
19919
19920 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
19921 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
19922 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
19923 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
19924
19925 /* Instructions which may belong to either the Neon or VFP instruction sets.
19926 Individual encoder functions perform additional architecture checks. */
19927 #undef ARM_VARIANT
19928 #define ARM_VARIANT & fpu_vfp_ext_v1xd
19929 #undef THUMB_VARIANT
19930 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
19931
19932 /* These mnemonics are unique to VFP. */
19933 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
19934 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
19935 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19936 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19937 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19938 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
19939 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
19940 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
19941 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
19942 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
19943
19944 /* Mnemonics shared by Neon and VFP. */
19945 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
19946 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19947 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19948
19949 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19950 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19951
19952 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19953 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19954
19955 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19956 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19957 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19958 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19959 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19960 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19961 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19962 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19963
19964 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
19965 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
19966 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
19967 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
19968
19969
19970 /* NOTE: All VMOV encoding is special-cased! */
19971 NCE(vmov, 0, 1, (VMOV), neon_mov),
19972 NCE(vmovq, 0, 1, (VMOV), neon_mov),
19973
19974 #undef THUMB_VARIANT
19975 #define THUMB_VARIANT & fpu_neon_ext_v1
19976 #undef ARM_VARIANT
19977 #define ARM_VARIANT & fpu_neon_ext_v1
19978
19979 /* Data processing with three registers of the same length. */
19980 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
19981 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
19982 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
19983 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19984 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19985 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19986 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19987 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19988 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19989 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
19990 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19991 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
19992 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19993 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
19994 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19995 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
19996 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19997 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
19998 /* If not immediate, fall back to neon_dyadic_i64_su.
19999 shl_imm should accept I8 I16 I32 I64,
20000 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
20001 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
20002 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
20003 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
20004 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
20005 /* Logic ops, types optional & ignored. */
20006 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20007 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20008 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20009 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20010 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20011 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20012 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20013 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20014 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
20015 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
20016 /* Bitfield ops, untyped. */
20017 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20018 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20019 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20020 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20021 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20022 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20023 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
20024 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20025 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20026 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20027 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20028 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20029 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20030 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20031 back to neon_dyadic_if_su. */
20032 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20033 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20034 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20035 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20036 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20037 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20038 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20039 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20040 /* Comparison. Type I8 I16 I32 F32. */
20041 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
20042 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
20043 /* As above, D registers only. */
20044 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20045 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20046 /* Int and float variants, signedness unimportant. */
20047 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20048 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20049 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
20050 /* Add/sub take types I8 I16 I32 I64 F32. */
20051 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20052 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20053 /* vtst takes sizes 8, 16, 32. */
20054 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
20055 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
20056 /* VMUL takes I8 I16 I32 F32 P8. */
20057 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
20058 /* VQD{R}MULH takes S16 S32. */
20059 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20060 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20061 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20062 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20063 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20064 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20065 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20066 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20067 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20068 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20069 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20070 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20071 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20072 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20073 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20074 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20075 /* ARM v8.1 extension. */
20076 nUF(vqrdmlah, _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20077 nUF(vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20078 nUF(vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20079 nUF(vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20080
20081 /* Two address, int/float. Types S8 S16 S32 F32. */
20082 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
20083 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
20084
20085 /* Data processing with two registers and a shift amount. */
20086 /* Right shifts, and variants with rounding.
20087 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
20088 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20089 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20090 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20091 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20092 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20093 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20094 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20095 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20096 /* Shift and insert. Sizes accepted 8 16 32 64. */
20097 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
20098 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
20099 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
20100 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
20101 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
20102 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
20103 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
20104 /* Right shift immediate, saturating & narrowing, with rounding variants.
20105 Types accepted S16 S32 S64 U16 U32 U64. */
20106 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20107 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20108 /* As above, unsigned. Types accepted S16 S32 S64. */
20109 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20110 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20111 /* Right shift narrowing. Types accepted I16 I32 I64. */
20112 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20113 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20114 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
20115 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
20116 /* CVT with optional immediate for fixed-point variant. */
20117 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
20118
20119 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
20120 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
20121
20122 /* Data processing, three registers of different lengths. */
20123 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
20124 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
20125 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
20126 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
20127 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
20128 /* If not scalar, fall back to neon_dyadic_long.
20129 Vector types as above, scalar types S16 S32 U16 U32. */
20130 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20131 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20132 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
20133 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20134 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20135 /* Dyadic, narrowing insns. Types I16 I32 I64. */
20136 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20137 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20138 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20139 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20140 /* Saturating doubling multiplies. Types S16 S32. */
20141 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20142 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20143 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20144 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
20145 S16 S32 U16 U32. */
20146 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
20147
20148 /* Extract. Size 8. */
20149 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
20150 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
20151
20152 /* Two registers, miscellaneous. */
20153 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
20154 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
20155 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
20156 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
20157 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
20158 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
20159 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
20160 /* Vector replicate. Sizes 8 16 32. */
20161 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
20162 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
20163 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
20164 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
20165 /* VMOVN. Types I16 I32 I64. */
20166 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
20167 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
20168 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
20169 /* VQMOVUN. Types S16 S32 S64. */
20170 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
20171 /* VZIP / VUZP. Sizes 8 16 32. */
20172 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
20173 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
20174 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
20175 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
20176 /* VQABS / VQNEG. Types S8 S16 S32. */
20177 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
20178 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
20179 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
20180 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
20181 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
20182 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
20183 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
20184 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
20185 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
20186 /* Reciprocal estimates. Types U32 F32. */
20187 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
20188 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
20189 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
20190 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
20191 /* VCLS. Types S8 S16 S32. */
20192 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
20193 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
20194 /* VCLZ. Types I8 I16 I32. */
20195 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
20196 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
20197 /* VCNT. Size 8. */
20198 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
20199 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
20200 /* Two address, untyped. */
20201 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
20202 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
20203 /* VTRN. Sizes 8 16 32. */
20204 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
20205 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
20206
20207 /* Table lookup. Size 8. */
20208 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20209 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20210
20211 #undef THUMB_VARIANT
20212 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
20213 #undef ARM_VARIANT
20214 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
20215
20216 /* Neon element/structure load/store. */
20217 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
20218 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
20219 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
20220 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
20221 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
20222 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
20223 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
20224 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
20225
20226 #undef THUMB_VARIANT
20227 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
20228 #undef ARM_VARIANT
20229 #define ARM_VARIANT & fpu_vfp_ext_v3xd
20230 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
20231 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20232 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20233 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20234 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20235 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20236 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20237 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20238 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20239
20240 #undef THUMB_VARIANT
20241 #define THUMB_VARIANT & fpu_vfp_ext_v3
20242 #undef ARM_VARIANT
20243 #define ARM_VARIANT & fpu_vfp_ext_v3
20244
20245 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
20246 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20247 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20248 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20249 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20250 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20251 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20252 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20253 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20254
20255 #undef ARM_VARIANT
20256 #define ARM_VARIANT & fpu_vfp_ext_fma
20257 #undef THUMB_VARIANT
20258 #define THUMB_VARIANT & fpu_vfp_ext_fma
20259 /* Mnemonics shared by Neon and VFP. These are included in the
20260 VFP FMA variant; NEON and VFP FMA always includes the NEON
20261 FMA instructions. */
20262 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20263 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20264 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
20265 the v form should always be used. */
20266 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20267 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20268 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20269 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20270 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20271 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20272
20273 #undef THUMB_VARIANT
20274 #undef ARM_VARIANT
20275 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
20276
20277 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20278 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20279 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20280 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20281 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20282 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20283 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
20284 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
20285
20286 #undef ARM_VARIANT
20287 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
20288
20289 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
20290 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
20291 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
20292 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
20293 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
20294 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
20295 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
20296 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
20297 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
20298 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20299 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20300 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20301 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20302 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20303 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20304 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20305 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20306 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20307 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
20308 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
20309 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20310 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20311 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20312 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20313 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20314 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20315 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
20316 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
20317 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
20318 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
20319 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
20320 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
20321 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
20322 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
20323 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
20324 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
20325 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
20326 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20327 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20328 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20329 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20330 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20331 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20332 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20333 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20334 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20335 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
20336 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20337 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20338 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20339 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20340 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20341 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20342 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20343 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20344 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20345 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20346 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20347 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20348 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20349 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20350 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20351 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20352 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20353 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20354 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20355 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20356 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20357 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20358 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20359 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20360 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20361 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20362 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20363 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20364 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20365 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20366 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20367 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20368 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20369 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20370 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20371 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20372 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20373 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20374 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20375 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20376 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20377 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
20378 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20379 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20380 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20381 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20382 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20383 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20384 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20385 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20386 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20387 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20388 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20389 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20390 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20391 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20392 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20393 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20394 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20395 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20396 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20397 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20398 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20399 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
20400 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20401 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20402 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20403 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20404 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20405 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20406 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20407 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20408 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20409 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20410 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20411 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20412 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20413 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20414 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20415 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20416 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20417 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20418 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20419 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20420 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20421 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20422 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20423 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20424 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20425 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20426 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20427 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20428 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20429 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20430 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20431 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
20432 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
20433 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
20434 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
20435 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
20436 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
20437 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20438 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20439 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20440 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
20441 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
20442 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
20443 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
20444 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
20445 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
20446 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20447 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20448 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20449 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20450 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
20451
20452 #undef ARM_VARIANT
20453 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
20454
20455 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
20456 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
20457 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
20458 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
20459 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
20460 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
20461 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20462 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20463 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20464 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20465 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20466 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20467 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20468 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20469 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20470 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20471 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20472 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20473 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20474 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20475 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
20476 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20477 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20478 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20479 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20480 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20481 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20482 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20483 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20484 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20485 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20486 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20487 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20488 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20489 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20490 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20491 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20492 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20493 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20494 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20495 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20496 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20497 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20498 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20499 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20500 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20501 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20502 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20503 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20504 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20505 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20506 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20507 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20508 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20509 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20510 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20511 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20512
20513 #undef ARM_VARIANT
20514 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
20515
20516 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20517 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20518 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20519 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20520 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20521 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20522 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20523 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20524 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
20525 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
20526 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
20527 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
20528 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
20529 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
20530 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
20531 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
20532 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
20533 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
20534 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
20535 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
20536 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
20537 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
20538 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
20539 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
20540 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
20541 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
20542 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
20543 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
20544 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
20545 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
20546 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
20547 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
20548 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
20549 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
20550 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
20551 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
20552 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
20553 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
20554 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
20555 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
20556 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
20557 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
20558 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
20559 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
20560 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
20561 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
20562 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
20563 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
20564 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
20565 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
20566 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
20567 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
20568 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
20569 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
20570 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
20571 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
20572 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
20573 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
20574 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
20575 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
20576 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
20577 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
20578 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
20579 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
20580 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20581 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20582 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20583 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20584 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20585 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20586 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20587 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20588 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20589 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20590 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20591 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20592
20593 #undef ARM_VARIANT
20594 #define ARM_VARIANT NULL
20595 #undef THUMB_VARIANT
20596 #define THUMB_VARIANT & arm_ext_v8m
20597 TUE("tt", 0, e840f000, 2, (RRnpc, RRnpc), 0, tt),
20598 TUE("ttt", 0, e840f040, 2, (RRnpc, RRnpc), 0, tt),
20599 };
20600 #undef ARM_VARIANT
20601 #undef THUMB_VARIANT
20602 #undef TCE
20603 #undef TUE
20604 #undef TUF
20605 #undef TCC
20606 #undef cCE
20607 #undef cCL
20608 #undef C3E
20609 #undef CE
20610 #undef CM
20611 #undef UE
20612 #undef UF
20613 #undef UT
20614 #undef NUF
20615 #undef nUF
20616 #undef NCE
20617 #undef nCE
20618 #undef OPS0
20619 #undef OPS1
20620 #undef OPS2
20621 #undef OPS3
20622 #undef OPS4
20623 #undef OPS5
20624 #undef OPS6
20625 #undef do_0
20626 \f
20627 /* MD interface: bits in the object file. */
20628
20629 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
20630 for use in the a.out file, and stores them in the array pointed to by buf.
20631 This knows about the endian-ness of the target machine and does
20632 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
20633 2 (short) and 4 (long) Floating numbers are put out as a series of
20634 LITTLENUMS (shorts, here at least). */
20635
20636 void
20637 md_number_to_chars (char * buf, valueT val, int n)
20638 {
20639 if (target_big_endian)
20640 number_to_chars_bigendian (buf, val, n);
20641 else
20642 number_to_chars_littleendian (buf, val, n);
20643 }
20644
20645 static valueT
20646 md_chars_to_number (char * buf, int n)
20647 {
20648 valueT result = 0;
20649 unsigned char * where = (unsigned char *) buf;
20650
20651 if (target_big_endian)
20652 {
20653 while (n--)
20654 {
20655 result <<= 8;
20656 result |= (*where++ & 255);
20657 }
20658 }
20659 else
20660 {
20661 while (n--)
20662 {
20663 result <<= 8;
20664 result |= (where[n] & 255);
20665 }
20666 }
20667
20668 return result;
20669 }
20670
20671 /* MD interface: Sections. */
20672
20673 /* Calculate the maximum variable size (i.e., excluding fr_fix)
20674 that an rs_machine_dependent frag may reach. */
20675
20676 unsigned int
20677 arm_frag_max_var (fragS *fragp)
20678 {
20679 /* We only use rs_machine_dependent for variable-size Thumb instructions,
20680 which are either THUMB_SIZE (2) or INSN_SIZE (4).
20681
20682 Note that we generate relaxable instructions even for cases that don't
20683 really need it, like an immediate that's a trivial constant. So we're
20684 overestimating the instruction size for some of those cases. Rather
20685 than putting more intelligence here, it would probably be better to
20686 avoid generating a relaxation frag in the first place when it can be
20687 determined up front that a short instruction will suffice. */
20688
20689 gas_assert (fragp->fr_type == rs_machine_dependent);
20690 return INSN_SIZE;
20691 }
20692
20693 /* Estimate the size of a frag before relaxing. Assume everything fits in
20694 2 bytes. */
20695
20696 int
20697 md_estimate_size_before_relax (fragS * fragp,
20698 segT segtype ATTRIBUTE_UNUSED)
20699 {
20700 fragp->fr_var = 2;
20701 return 2;
20702 }
20703
20704 /* Convert a machine dependent frag. */
20705
20706 void
20707 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
20708 {
20709 unsigned long insn;
20710 unsigned long old_op;
20711 char *buf;
20712 expressionS exp;
20713 fixS *fixp;
20714 int reloc_type;
20715 int pc_rel;
20716 int opcode;
20717
20718 buf = fragp->fr_literal + fragp->fr_fix;
20719
20720 old_op = bfd_get_16(abfd, buf);
20721 if (fragp->fr_symbol)
20722 {
20723 exp.X_op = O_symbol;
20724 exp.X_add_symbol = fragp->fr_symbol;
20725 }
20726 else
20727 {
20728 exp.X_op = O_constant;
20729 }
20730 exp.X_add_number = fragp->fr_offset;
20731 opcode = fragp->fr_subtype;
20732 switch (opcode)
20733 {
20734 case T_MNEM_ldr_pc:
20735 case T_MNEM_ldr_pc2:
20736 case T_MNEM_ldr_sp:
20737 case T_MNEM_str_sp:
20738 case T_MNEM_ldr:
20739 case T_MNEM_ldrb:
20740 case T_MNEM_ldrh:
20741 case T_MNEM_str:
20742 case T_MNEM_strb:
20743 case T_MNEM_strh:
20744 if (fragp->fr_var == 4)
20745 {
20746 insn = THUMB_OP32 (opcode);
20747 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
20748 {
20749 insn |= (old_op & 0x700) << 4;
20750 }
20751 else
20752 {
20753 insn |= (old_op & 7) << 12;
20754 insn |= (old_op & 0x38) << 13;
20755 }
20756 insn |= 0x00000c00;
20757 put_thumb32_insn (buf, insn);
20758 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
20759 }
20760 else
20761 {
20762 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
20763 }
20764 pc_rel = (opcode == T_MNEM_ldr_pc2);
20765 break;
20766 case T_MNEM_adr:
20767 if (fragp->fr_var == 4)
20768 {
20769 insn = THUMB_OP32 (opcode);
20770 insn |= (old_op & 0xf0) << 4;
20771 put_thumb32_insn (buf, insn);
20772 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
20773 }
20774 else
20775 {
20776 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20777 exp.X_add_number -= 4;
20778 }
20779 pc_rel = 1;
20780 break;
20781 case T_MNEM_mov:
20782 case T_MNEM_movs:
20783 case T_MNEM_cmp:
20784 case T_MNEM_cmn:
20785 if (fragp->fr_var == 4)
20786 {
20787 int r0off = (opcode == T_MNEM_mov
20788 || opcode == T_MNEM_movs) ? 0 : 8;
20789 insn = THUMB_OP32 (opcode);
20790 insn = (insn & 0xe1ffffff) | 0x10000000;
20791 insn |= (old_op & 0x700) << r0off;
20792 put_thumb32_insn (buf, insn);
20793 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20794 }
20795 else
20796 {
20797 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
20798 }
20799 pc_rel = 0;
20800 break;
20801 case T_MNEM_b:
20802 if (fragp->fr_var == 4)
20803 {
20804 insn = THUMB_OP32(opcode);
20805 put_thumb32_insn (buf, insn);
20806 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
20807 }
20808 else
20809 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
20810 pc_rel = 1;
20811 break;
20812 case T_MNEM_bcond:
20813 if (fragp->fr_var == 4)
20814 {
20815 insn = THUMB_OP32(opcode);
20816 insn |= (old_op & 0xf00) << 14;
20817 put_thumb32_insn (buf, insn);
20818 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
20819 }
20820 else
20821 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
20822 pc_rel = 1;
20823 break;
20824 case T_MNEM_add_sp:
20825 case T_MNEM_add_pc:
20826 case T_MNEM_inc_sp:
20827 case T_MNEM_dec_sp:
20828 if (fragp->fr_var == 4)
20829 {
20830 /* ??? Choose between add and addw. */
20831 insn = THUMB_OP32 (opcode);
20832 insn |= (old_op & 0xf0) << 4;
20833 put_thumb32_insn (buf, insn);
20834 if (opcode == T_MNEM_add_pc)
20835 reloc_type = BFD_RELOC_ARM_T32_IMM12;
20836 else
20837 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20838 }
20839 else
20840 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20841 pc_rel = 0;
20842 break;
20843
20844 case T_MNEM_addi:
20845 case T_MNEM_addis:
20846 case T_MNEM_subi:
20847 case T_MNEM_subis:
20848 if (fragp->fr_var == 4)
20849 {
20850 insn = THUMB_OP32 (opcode);
20851 insn |= (old_op & 0xf0) << 4;
20852 insn |= (old_op & 0xf) << 16;
20853 put_thumb32_insn (buf, insn);
20854 if (insn & (1 << 20))
20855 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20856 else
20857 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20858 }
20859 else
20860 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20861 pc_rel = 0;
20862 break;
20863 default:
20864 abort ();
20865 }
20866 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
20867 (enum bfd_reloc_code_real) reloc_type);
20868 fixp->fx_file = fragp->fr_file;
20869 fixp->fx_line = fragp->fr_line;
20870 fragp->fr_fix += fragp->fr_var;
20871
20872 /* Set whether we use thumb-2 ISA based on final relaxation results. */
20873 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
20874 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
20875 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
20876 }
20877
20878 /* Return the size of a relaxable immediate operand instruction.
20879 SHIFT and SIZE specify the form of the allowable immediate. */
20880 static int
20881 relax_immediate (fragS *fragp, int size, int shift)
20882 {
20883 offsetT offset;
20884 offsetT mask;
20885 offsetT low;
20886
20887 /* ??? Should be able to do better than this. */
20888 if (fragp->fr_symbol)
20889 return 4;
20890
20891 low = (1 << shift) - 1;
20892 mask = (1 << (shift + size)) - (1 << shift);
20893 offset = fragp->fr_offset;
20894 /* Force misaligned offsets to 32-bit variant. */
20895 if (offset & low)
20896 return 4;
20897 if (offset & ~mask)
20898 return 4;
20899 return 2;
20900 }
20901
20902 /* Get the address of a symbol during relaxation. */
20903 static addressT
20904 relaxed_symbol_addr (fragS *fragp, long stretch)
20905 {
20906 fragS *sym_frag;
20907 addressT addr;
20908 symbolS *sym;
20909
20910 sym = fragp->fr_symbol;
20911 sym_frag = symbol_get_frag (sym);
20912 know (S_GET_SEGMENT (sym) != absolute_section
20913 || sym_frag == &zero_address_frag);
20914 addr = S_GET_VALUE (sym) + fragp->fr_offset;
20915
20916 /* If frag has yet to be reached on this pass, assume it will
20917 move by STRETCH just as we did. If this is not so, it will
20918 be because some frag between grows, and that will force
20919 another pass. */
20920
20921 if (stretch != 0
20922 && sym_frag->relax_marker != fragp->relax_marker)
20923 {
20924 fragS *f;
20925
20926 /* Adjust stretch for any alignment frag. Note that if have
20927 been expanding the earlier code, the symbol may be
20928 defined in what appears to be an earlier frag. FIXME:
20929 This doesn't handle the fr_subtype field, which specifies
20930 a maximum number of bytes to skip when doing an
20931 alignment. */
20932 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
20933 {
20934 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
20935 {
20936 if (stretch < 0)
20937 stretch = - ((- stretch)
20938 & ~ ((1 << (int) f->fr_offset) - 1));
20939 else
20940 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
20941 if (stretch == 0)
20942 break;
20943 }
20944 }
20945 if (f != NULL)
20946 addr += stretch;
20947 }
20948
20949 return addr;
20950 }
20951
20952 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
20953 load. */
20954 static int
20955 relax_adr (fragS *fragp, asection *sec, long stretch)
20956 {
20957 addressT addr;
20958 offsetT val;
20959
20960 /* Assume worst case for symbols not known to be in the same section. */
20961 if (fragp->fr_symbol == NULL
20962 || !S_IS_DEFINED (fragp->fr_symbol)
20963 || sec != S_GET_SEGMENT (fragp->fr_symbol)
20964 || S_IS_WEAK (fragp->fr_symbol))
20965 return 4;
20966
20967 val = relaxed_symbol_addr (fragp, stretch);
20968 addr = fragp->fr_address + fragp->fr_fix;
20969 addr = (addr + 4) & ~3;
20970 /* Force misaligned targets to 32-bit variant. */
20971 if (val & 3)
20972 return 4;
20973 val -= addr;
20974 if (val < 0 || val > 1020)
20975 return 4;
20976 return 2;
20977 }
20978
20979 /* Return the size of a relaxable add/sub immediate instruction. */
20980 static int
20981 relax_addsub (fragS *fragp, asection *sec)
20982 {
20983 char *buf;
20984 int op;
20985
20986 buf = fragp->fr_literal + fragp->fr_fix;
20987 op = bfd_get_16(sec->owner, buf);
20988 if ((op & 0xf) == ((op >> 4) & 0xf))
20989 return relax_immediate (fragp, 8, 0);
20990 else
20991 return relax_immediate (fragp, 3, 0);
20992 }
20993
20994 /* Return TRUE iff the definition of symbol S could be pre-empted
20995 (overridden) at link or load time. */
20996 static bfd_boolean
20997 symbol_preemptible (symbolS *s)
20998 {
20999 /* Weak symbols can always be pre-empted. */
21000 if (S_IS_WEAK (s))
21001 return TRUE;
21002
21003 /* Non-global symbols cannot be pre-empted. */
21004 if (! S_IS_EXTERNAL (s))
21005 return FALSE;
21006
21007 #ifdef OBJ_ELF
21008 /* In ELF, a global symbol can be marked protected, or private. In that
21009 case it can't be pre-empted (other definitions in the same link unit
21010 would violate the ODR). */
21011 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
21012 return FALSE;
21013 #endif
21014
21015 /* Other global symbols might be pre-empted. */
21016 return TRUE;
21017 }
21018
21019 /* Return the size of a relaxable branch instruction. BITS is the
21020 size of the offset field in the narrow instruction. */
21021
21022 static int
21023 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
21024 {
21025 addressT addr;
21026 offsetT val;
21027 offsetT limit;
21028
21029 /* Assume worst case for symbols not known to be in the same section. */
21030 if (!S_IS_DEFINED (fragp->fr_symbol)
21031 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21032 || S_IS_WEAK (fragp->fr_symbol))
21033 return 4;
21034
21035 #ifdef OBJ_ELF
21036 /* A branch to a function in ARM state will require interworking. */
21037 if (S_IS_DEFINED (fragp->fr_symbol)
21038 && ARM_IS_FUNC (fragp->fr_symbol))
21039 return 4;
21040 #endif
21041
21042 if (symbol_preemptible (fragp->fr_symbol))
21043 return 4;
21044
21045 val = relaxed_symbol_addr (fragp, stretch);
21046 addr = fragp->fr_address + fragp->fr_fix + 4;
21047 val -= addr;
21048
21049 /* Offset is a signed value *2 */
21050 limit = 1 << bits;
21051 if (val >= limit || val < -limit)
21052 return 4;
21053 return 2;
21054 }
21055
21056
21057 /* Relax a machine dependent frag. This returns the amount by which
21058 the current size of the frag should change. */
21059
21060 int
21061 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
21062 {
21063 int oldsize;
21064 int newsize;
21065
21066 oldsize = fragp->fr_var;
21067 switch (fragp->fr_subtype)
21068 {
21069 case T_MNEM_ldr_pc2:
21070 newsize = relax_adr (fragp, sec, stretch);
21071 break;
21072 case T_MNEM_ldr_pc:
21073 case T_MNEM_ldr_sp:
21074 case T_MNEM_str_sp:
21075 newsize = relax_immediate (fragp, 8, 2);
21076 break;
21077 case T_MNEM_ldr:
21078 case T_MNEM_str:
21079 newsize = relax_immediate (fragp, 5, 2);
21080 break;
21081 case T_MNEM_ldrh:
21082 case T_MNEM_strh:
21083 newsize = relax_immediate (fragp, 5, 1);
21084 break;
21085 case T_MNEM_ldrb:
21086 case T_MNEM_strb:
21087 newsize = relax_immediate (fragp, 5, 0);
21088 break;
21089 case T_MNEM_adr:
21090 newsize = relax_adr (fragp, sec, stretch);
21091 break;
21092 case T_MNEM_mov:
21093 case T_MNEM_movs:
21094 case T_MNEM_cmp:
21095 case T_MNEM_cmn:
21096 newsize = relax_immediate (fragp, 8, 0);
21097 break;
21098 case T_MNEM_b:
21099 newsize = relax_branch (fragp, sec, 11, stretch);
21100 break;
21101 case T_MNEM_bcond:
21102 newsize = relax_branch (fragp, sec, 8, stretch);
21103 break;
21104 case T_MNEM_add_sp:
21105 case T_MNEM_add_pc:
21106 newsize = relax_immediate (fragp, 8, 2);
21107 break;
21108 case T_MNEM_inc_sp:
21109 case T_MNEM_dec_sp:
21110 newsize = relax_immediate (fragp, 7, 2);
21111 break;
21112 case T_MNEM_addi:
21113 case T_MNEM_addis:
21114 case T_MNEM_subi:
21115 case T_MNEM_subis:
21116 newsize = relax_addsub (fragp, sec);
21117 break;
21118 default:
21119 abort ();
21120 }
21121
21122 fragp->fr_var = newsize;
21123 /* Freeze wide instructions that are at or before the same location as
21124 in the previous pass. This avoids infinite loops.
21125 Don't freeze them unconditionally because targets may be artificially
21126 misaligned by the expansion of preceding frags. */
21127 if (stretch <= 0 && newsize > 2)
21128 {
21129 md_convert_frag (sec->owner, sec, fragp);
21130 frag_wane (fragp);
21131 }
21132
21133 return newsize - oldsize;
21134 }
21135
21136 /* Round up a section size to the appropriate boundary. */
21137
21138 valueT
21139 md_section_align (segT segment ATTRIBUTE_UNUSED,
21140 valueT size)
21141 {
21142 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
21143 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
21144 {
21145 /* For a.out, force the section size to be aligned. If we don't do
21146 this, BFD will align it for us, but it will not write out the
21147 final bytes of the section. This may be a bug in BFD, but it is
21148 easier to fix it here since that is how the other a.out targets
21149 work. */
21150 int align;
21151
21152 align = bfd_get_section_alignment (stdoutput, segment);
21153 size = ((size + (1 << align) - 1) & (-((valueT) 1 << align)));
21154 }
21155 #endif
21156
21157 return size;
21158 }
21159
21160 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
21161 of an rs_align_code fragment. */
21162
21163 void
21164 arm_handle_align (fragS * fragP)
21165 {
21166 static char const arm_noop[2][2][4] =
21167 {
21168 { /* ARMv1 */
21169 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
21170 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
21171 },
21172 { /* ARMv6k */
21173 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
21174 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
21175 },
21176 };
21177 static char const thumb_noop[2][2][2] =
21178 {
21179 { /* Thumb-1 */
21180 {0xc0, 0x46}, /* LE */
21181 {0x46, 0xc0}, /* BE */
21182 },
21183 { /* Thumb-2 */
21184 {0x00, 0xbf}, /* LE */
21185 {0xbf, 0x00} /* BE */
21186 }
21187 };
21188 static char const wide_thumb_noop[2][4] =
21189 { /* Wide Thumb-2 */
21190 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
21191 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
21192 };
21193
21194 unsigned bytes, fix, noop_size;
21195 char * p;
21196 const char * noop;
21197 const char *narrow_noop = NULL;
21198 #ifdef OBJ_ELF
21199 enum mstate state;
21200 #endif
21201
21202 if (fragP->fr_type != rs_align_code)
21203 return;
21204
21205 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
21206 p = fragP->fr_literal + fragP->fr_fix;
21207 fix = 0;
21208
21209 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
21210 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
21211
21212 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
21213
21214 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
21215 {
21216 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21217 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
21218 {
21219 narrow_noop = thumb_noop[1][target_big_endian];
21220 noop = wide_thumb_noop[target_big_endian];
21221 }
21222 else
21223 noop = thumb_noop[0][target_big_endian];
21224 noop_size = 2;
21225 #ifdef OBJ_ELF
21226 state = MAP_THUMB;
21227 #endif
21228 }
21229 else
21230 {
21231 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21232 ? selected_cpu : arm_arch_none,
21233 arm_ext_v6k) != 0]
21234 [target_big_endian];
21235 noop_size = 4;
21236 #ifdef OBJ_ELF
21237 state = MAP_ARM;
21238 #endif
21239 }
21240
21241 fragP->fr_var = noop_size;
21242
21243 if (bytes & (noop_size - 1))
21244 {
21245 fix = bytes & (noop_size - 1);
21246 #ifdef OBJ_ELF
21247 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
21248 #endif
21249 memset (p, 0, fix);
21250 p += fix;
21251 bytes -= fix;
21252 }
21253
21254 if (narrow_noop)
21255 {
21256 if (bytes & noop_size)
21257 {
21258 /* Insert a narrow noop. */
21259 memcpy (p, narrow_noop, noop_size);
21260 p += noop_size;
21261 bytes -= noop_size;
21262 fix += noop_size;
21263 }
21264
21265 /* Use wide noops for the remainder */
21266 noop_size = 4;
21267 }
21268
21269 while (bytes >= noop_size)
21270 {
21271 memcpy (p, noop, noop_size);
21272 p += noop_size;
21273 bytes -= noop_size;
21274 fix += noop_size;
21275 }
21276
21277 fragP->fr_fix += fix;
21278 }
21279
21280 /* Called from md_do_align. Used to create an alignment
21281 frag in a code section. */
21282
21283 void
21284 arm_frag_align_code (int n, int max)
21285 {
21286 char * p;
21287
21288 /* We assume that there will never be a requirement
21289 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
21290 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
21291 {
21292 char err_msg[128];
21293
21294 sprintf (err_msg,
21295 _("alignments greater than %d bytes not supported in .text sections."),
21296 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
21297 as_fatal ("%s", err_msg);
21298 }
21299
21300 p = frag_var (rs_align_code,
21301 MAX_MEM_FOR_RS_ALIGN_CODE,
21302 1,
21303 (relax_substateT) max,
21304 (symbolS *) NULL,
21305 (offsetT) n,
21306 (char *) NULL);
21307 *p = 0;
21308 }
21309
21310 /* Perform target specific initialisation of a frag.
21311 Note - despite the name this initialisation is not done when the frag
21312 is created, but only when its type is assigned. A frag can be created
21313 and used a long time before its type is set, so beware of assuming that
21314 this initialisationis performed first. */
21315
21316 #ifndef OBJ_ELF
21317 void
21318 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
21319 {
21320 /* Record whether this frag is in an ARM or a THUMB area. */
21321 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21322 }
21323
21324 #else /* OBJ_ELF is defined. */
21325 void
21326 arm_init_frag (fragS * fragP, int max_chars)
21327 {
21328 int frag_thumb_mode;
21329
21330 /* If the current ARM vs THUMB mode has not already
21331 been recorded into this frag then do so now. */
21332 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
21333 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21334
21335 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
21336
21337 /* Record a mapping symbol for alignment frags. We will delete this
21338 later if the alignment ends up empty. */
21339 switch (fragP->fr_type)
21340 {
21341 case rs_align:
21342 case rs_align_test:
21343 case rs_fill:
21344 mapping_state_2 (MAP_DATA, max_chars);
21345 break;
21346 case rs_align_code:
21347 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
21348 break;
21349 default:
21350 break;
21351 }
21352 }
21353
21354 /* When we change sections we need to issue a new mapping symbol. */
21355
21356 void
21357 arm_elf_change_section (void)
21358 {
21359 /* Link an unlinked unwind index table section to the .text section. */
21360 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
21361 && elf_linked_to_section (now_seg) == NULL)
21362 elf_linked_to_section (now_seg) = text_section;
21363 }
21364
21365 int
21366 arm_elf_section_type (const char * str, size_t len)
21367 {
21368 if (len == 5 && strncmp (str, "exidx", 5) == 0)
21369 return SHT_ARM_EXIDX;
21370
21371 return -1;
21372 }
21373 \f
21374 /* Code to deal with unwinding tables. */
21375
21376 static void add_unwind_adjustsp (offsetT);
21377
21378 /* Generate any deferred unwind frame offset. */
21379
21380 static void
21381 flush_pending_unwind (void)
21382 {
21383 offsetT offset;
21384
21385 offset = unwind.pending_offset;
21386 unwind.pending_offset = 0;
21387 if (offset != 0)
21388 add_unwind_adjustsp (offset);
21389 }
21390
21391 /* Add an opcode to this list for this function. Two-byte opcodes should
21392 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
21393 order. */
21394
21395 static void
21396 add_unwind_opcode (valueT op, int length)
21397 {
21398 /* Add any deferred stack adjustment. */
21399 if (unwind.pending_offset)
21400 flush_pending_unwind ();
21401
21402 unwind.sp_restored = 0;
21403
21404 if (unwind.opcode_count + length > unwind.opcode_alloc)
21405 {
21406 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
21407 if (unwind.opcodes)
21408 unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,
21409 unwind.opcode_alloc);
21410 else
21411 unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);
21412 }
21413 while (length > 0)
21414 {
21415 length--;
21416 unwind.opcodes[unwind.opcode_count] = op & 0xff;
21417 op >>= 8;
21418 unwind.opcode_count++;
21419 }
21420 }
21421
21422 /* Add unwind opcodes to adjust the stack pointer. */
21423
21424 static void
21425 add_unwind_adjustsp (offsetT offset)
21426 {
21427 valueT op;
21428
21429 if (offset > 0x200)
21430 {
21431 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
21432 char bytes[5];
21433 int n;
21434 valueT o;
21435
21436 /* Long form: 0xb2, uleb128. */
21437 /* This might not fit in a word so add the individual bytes,
21438 remembering the list is built in reverse order. */
21439 o = (valueT) ((offset - 0x204) >> 2);
21440 if (o == 0)
21441 add_unwind_opcode (0, 1);
21442
21443 /* Calculate the uleb128 encoding of the offset. */
21444 n = 0;
21445 while (o)
21446 {
21447 bytes[n] = o & 0x7f;
21448 o >>= 7;
21449 if (o)
21450 bytes[n] |= 0x80;
21451 n++;
21452 }
21453 /* Add the insn. */
21454 for (; n; n--)
21455 add_unwind_opcode (bytes[n - 1], 1);
21456 add_unwind_opcode (0xb2, 1);
21457 }
21458 else if (offset > 0x100)
21459 {
21460 /* Two short opcodes. */
21461 add_unwind_opcode (0x3f, 1);
21462 op = (offset - 0x104) >> 2;
21463 add_unwind_opcode (op, 1);
21464 }
21465 else if (offset > 0)
21466 {
21467 /* Short opcode. */
21468 op = (offset - 4) >> 2;
21469 add_unwind_opcode (op, 1);
21470 }
21471 else if (offset < 0)
21472 {
21473 offset = -offset;
21474 while (offset > 0x100)
21475 {
21476 add_unwind_opcode (0x7f, 1);
21477 offset -= 0x100;
21478 }
21479 op = ((offset - 4) >> 2) | 0x40;
21480 add_unwind_opcode (op, 1);
21481 }
21482 }
21483
21484 /* Finish the list of unwind opcodes for this function. */
21485 static void
21486 finish_unwind_opcodes (void)
21487 {
21488 valueT op;
21489
21490 if (unwind.fp_used)
21491 {
21492 /* Adjust sp as necessary. */
21493 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
21494 flush_pending_unwind ();
21495
21496 /* After restoring sp from the frame pointer. */
21497 op = 0x90 | unwind.fp_reg;
21498 add_unwind_opcode (op, 1);
21499 }
21500 else
21501 flush_pending_unwind ();
21502 }
21503
21504
21505 /* Start an exception table entry. If idx is nonzero this is an index table
21506 entry. */
21507
21508 static void
21509 start_unwind_section (const segT text_seg, int idx)
21510 {
21511 const char * text_name;
21512 const char * prefix;
21513 const char * prefix_once;
21514 const char * group_name;
21515 size_t prefix_len;
21516 size_t text_len;
21517 char * sec_name;
21518 size_t sec_name_len;
21519 int type;
21520 int flags;
21521 int linkonce;
21522
21523 if (idx)
21524 {
21525 prefix = ELF_STRING_ARM_unwind;
21526 prefix_once = ELF_STRING_ARM_unwind_once;
21527 type = SHT_ARM_EXIDX;
21528 }
21529 else
21530 {
21531 prefix = ELF_STRING_ARM_unwind_info;
21532 prefix_once = ELF_STRING_ARM_unwind_info_once;
21533 type = SHT_PROGBITS;
21534 }
21535
21536 text_name = segment_name (text_seg);
21537 if (streq (text_name, ".text"))
21538 text_name = "";
21539
21540 if (strncmp (text_name, ".gnu.linkonce.t.",
21541 strlen (".gnu.linkonce.t.")) == 0)
21542 {
21543 prefix = prefix_once;
21544 text_name += strlen (".gnu.linkonce.t.");
21545 }
21546
21547 prefix_len = strlen (prefix);
21548 text_len = strlen (text_name);
21549 sec_name_len = prefix_len + text_len;
21550 sec_name = (char *) xmalloc (sec_name_len + 1);
21551 memcpy (sec_name, prefix, prefix_len);
21552 memcpy (sec_name + prefix_len, text_name, text_len);
21553 sec_name[prefix_len + text_len] = '\0';
21554
21555 flags = SHF_ALLOC;
21556 linkonce = 0;
21557 group_name = 0;
21558
21559 /* Handle COMDAT group. */
21560 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
21561 {
21562 group_name = elf_group_name (text_seg);
21563 if (group_name == NULL)
21564 {
21565 as_bad (_("Group section `%s' has no group signature"),
21566 segment_name (text_seg));
21567 ignore_rest_of_line ();
21568 return;
21569 }
21570 flags |= SHF_GROUP;
21571 linkonce = 1;
21572 }
21573
21574 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
21575
21576 /* Set the section link for index tables. */
21577 if (idx)
21578 elf_linked_to_section (now_seg) = text_seg;
21579 }
21580
21581
21582 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
21583 personality routine data. Returns zero, or the index table value for
21584 an inline entry. */
21585
21586 static valueT
21587 create_unwind_entry (int have_data)
21588 {
21589 int size;
21590 addressT where;
21591 char *ptr;
21592 /* The current word of data. */
21593 valueT data;
21594 /* The number of bytes left in this word. */
21595 int n;
21596
21597 finish_unwind_opcodes ();
21598
21599 /* Remember the current text section. */
21600 unwind.saved_seg = now_seg;
21601 unwind.saved_subseg = now_subseg;
21602
21603 start_unwind_section (now_seg, 0);
21604
21605 if (unwind.personality_routine == NULL)
21606 {
21607 if (unwind.personality_index == -2)
21608 {
21609 if (have_data)
21610 as_bad (_("handlerdata in cantunwind frame"));
21611 return 1; /* EXIDX_CANTUNWIND. */
21612 }
21613
21614 /* Use a default personality routine if none is specified. */
21615 if (unwind.personality_index == -1)
21616 {
21617 if (unwind.opcode_count > 3)
21618 unwind.personality_index = 1;
21619 else
21620 unwind.personality_index = 0;
21621 }
21622
21623 /* Space for the personality routine entry. */
21624 if (unwind.personality_index == 0)
21625 {
21626 if (unwind.opcode_count > 3)
21627 as_bad (_("too many unwind opcodes for personality routine 0"));
21628
21629 if (!have_data)
21630 {
21631 /* All the data is inline in the index table. */
21632 data = 0x80;
21633 n = 3;
21634 while (unwind.opcode_count > 0)
21635 {
21636 unwind.opcode_count--;
21637 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
21638 n--;
21639 }
21640
21641 /* Pad with "finish" opcodes. */
21642 while (n--)
21643 data = (data << 8) | 0xb0;
21644
21645 return data;
21646 }
21647 size = 0;
21648 }
21649 else
21650 /* We get two opcodes "free" in the first word. */
21651 size = unwind.opcode_count - 2;
21652 }
21653 else
21654 {
21655 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
21656 if (unwind.personality_index != -1)
21657 {
21658 as_bad (_("attempt to recreate an unwind entry"));
21659 return 1;
21660 }
21661
21662 /* An extra byte is required for the opcode count. */
21663 size = unwind.opcode_count + 1;
21664 }
21665
21666 size = (size + 3) >> 2;
21667 if (size > 0xff)
21668 as_bad (_("too many unwind opcodes"));
21669
21670 frag_align (2, 0, 0);
21671 record_alignment (now_seg, 2);
21672 unwind.table_entry = expr_build_dot ();
21673
21674 /* Allocate the table entry. */
21675 ptr = frag_more ((size << 2) + 4);
21676 /* PR 13449: Zero the table entries in case some of them are not used. */
21677 memset (ptr, 0, (size << 2) + 4);
21678 where = frag_now_fix () - ((size << 2) + 4);
21679
21680 switch (unwind.personality_index)
21681 {
21682 case -1:
21683 /* ??? Should this be a PLT generating relocation? */
21684 /* Custom personality routine. */
21685 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
21686 BFD_RELOC_ARM_PREL31);
21687
21688 where += 4;
21689 ptr += 4;
21690
21691 /* Set the first byte to the number of additional words. */
21692 data = size > 0 ? size - 1 : 0;
21693 n = 3;
21694 break;
21695
21696 /* ABI defined personality routines. */
21697 case 0:
21698 /* Three opcodes bytes are packed into the first word. */
21699 data = 0x80;
21700 n = 3;
21701 break;
21702
21703 case 1:
21704 case 2:
21705 /* The size and first two opcode bytes go in the first word. */
21706 data = ((0x80 + unwind.personality_index) << 8) | size;
21707 n = 2;
21708 break;
21709
21710 default:
21711 /* Should never happen. */
21712 abort ();
21713 }
21714
21715 /* Pack the opcodes into words (MSB first), reversing the list at the same
21716 time. */
21717 while (unwind.opcode_count > 0)
21718 {
21719 if (n == 0)
21720 {
21721 md_number_to_chars (ptr, data, 4);
21722 ptr += 4;
21723 n = 4;
21724 data = 0;
21725 }
21726 unwind.opcode_count--;
21727 n--;
21728 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
21729 }
21730
21731 /* Finish off the last word. */
21732 if (n < 4)
21733 {
21734 /* Pad with "finish" opcodes. */
21735 while (n--)
21736 data = (data << 8) | 0xb0;
21737
21738 md_number_to_chars (ptr, data, 4);
21739 }
21740
21741 if (!have_data)
21742 {
21743 /* Add an empty descriptor if there is no user-specified data. */
21744 ptr = frag_more (4);
21745 md_number_to_chars (ptr, 0, 4);
21746 }
21747
21748 return 0;
21749 }
21750
21751
21752 /* Initialize the DWARF-2 unwind information for this procedure. */
21753
21754 void
21755 tc_arm_frame_initial_instructions (void)
21756 {
21757 cfi_add_CFA_def_cfa (REG_SP, 0);
21758 }
21759 #endif /* OBJ_ELF */
21760
21761 /* Convert REGNAME to a DWARF-2 register number. */
21762
21763 int
21764 tc_arm_regname_to_dw2regnum (char *regname)
21765 {
21766 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
21767 if (reg != FAIL)
21768 return reg;
21769
21770 /* PR 16694: Allow VFP registers as well. */
21771 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
21772 if (reg != FAIL)
21773 return 64 + reg;
21774
21775 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
21776 if (reg != FAIL)
21777 return reg + 256;
21778
21779 return -1;
21780 }
21781
21782 #ifdef TE_PE
21783 void
21784 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
21785 {
21786 expressionS exp;
21787
21788 exp.X_op = O_secrel;
21789 exp.X_add_symbol = symbol;
21790 exp.X_add_number = 0;
21791 emit_expr (&exp, size);
21792 }
21793 #endif
21794
21795 /* MD interface: Symbol and relocation handling. */
21796
21797 /* Return the address within the segment that a PC-relative fixup is
21798 relative to. For ARM, PC-relative fixups applied to instructions
21799 are generally relative to the location of the fixup plus 8 bytes.
21800 Thumb branches are offset by 4, and Thumb loads relative to PC
21801 require special handling. */
21802
21803 long
21804 md_pcrel_from_section (fixS * fixP, segT seg)
21805 {
21806 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
21807
21808 /* If this is pc-relative and we are going to emit a relocation
21809 then we just want to put out any pipeline compensation that the linker
21810 will need. Otherwise we want to use the calculated base.
21811 For WinCE we skip the bias for externals as well, since this
21812 is how the MS ARM-CE assembler behaves and we want to be compatible. */
21813 if (fixP->fx_pcrel
21814 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
21815 || (arm_force_relocation (fixP)
21816 #ifdef TE_WINCE
21817 && !S_IS_EXTERNAL (fixP->fx_addsy)
21818 #endif
21819 )))
21820 base = 0;
21821
21822
21823 switch (fixP->fx_r_type)
21824 {
21825 /* PC relative addressing on the Thumb is slightly odd as the
21826 bottom two bits of the PC are forced to zero for the
21827 calculation. This happens *after* application of the
21828 pipeline offset. However, Thumb adrl already adjusts for
21829 this, so we need not do it again. */
21830 case BFD_RELOC_ARM_THUMB_ADD:
21831 return base & ~3;
21832
21833 case BFD_RELOC_ARM_THUMB_OFFSET:
21834 case BFD_RELOC_ARM_T32_OFFSET_IMM:
21835 case BFD_RELOC_ARM_T32_ADD_PC12:
21836 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
21837 return (base + 4) & ~3;
21838
21839 /* Thumb branches are simply offset by +4. */
21840 case BFD_RELOC_THUMB_PCREL_BRANCH7:
21841 case BFD_RELOC_THUMB_PCREL_BRANCH9:
21842 case BFD_RELOC_THUMB_PCREL_BRANCH12:
21843 case BFD_RELOC_THUMB_PCREL_BRANCH20:
21844 case BFD_RELOC_THUMB_PCREL_BRANCH25:
21845 return base + 4;
21846
21847 case BFD_RELOC_THUMB_PCREL_BRANCH23:
21848 if (fixP->fx_addsy
21849 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21850 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21851 && ARM_IS_FUNC (fixP->fx_addsy)
21852 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21853 base = fixP->fx_where + fixP->fx_frag->fr_address;
21854 return base + 4;
21855
21856 /* BLX is like branches above, but forces the low two bits of PC to
21857 zero. */
21858 case BFD_RELOC_THUMB_PCREL_BLX:
21859 if (fixP->fx_addsy
21860 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21861 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21862 && THUMB_IS_FUNC (fixP->fx_addsy)
21863 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21864 base = fixP->fx_where + fixP->fx_frag->fr_address;
21865 return (base + 4) & ~3;
21866
21867 /* ARM mode branches are offset by +8. However, the Windows CE
21868 loader expects the relocation not to take this into account. */
21869 case BFD_RELOC_ARM_PCREL_BLX:
21870 if (fixP->fx_addsy
21871 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21872 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21873 && ARM_IS_FUNC (fixP->fx_addsy)
21874 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21875 base = fixP->fx_where + fixP->fx_frag->fr_address;
21876 return base + 8;
21877
21878 case BFD_RELOC_ARM_PCREL_CALL:
21879 if (fixP->fx_addsy
21880 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21881 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21882 && THUMB_IS_FUNC (fixP->fx_addsy)
21883 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21884 base = fixP->fx_where + fixP->fx_frag->fr_address;
21885 return base + 8;
21886
21887 case BFD_RELOC_ARM_PCREL_BRANCH:
21888 case BFD_RELOC_ARM_PCREL_JUMP:
21889 case BFD_RELOC_ARM_PLT32:
21890 #ifdef TE_WINCE
21891 /* When handling fixups immediately, because we have already
21892 discovered the value of a symbol, or the address of the frag involved
21893 we must account for the offset by +8, as the OS loader will never see the reloc.
21894 see fixup_segment() in write.c
21895 The S_IS_EXTERNAL test handles the case of global symbols.
21896 Those need the calculated base, not just the pipe compensation the linker will need. */
21897 if (fixP->fx_pcrel
21898 && fixP->fx_addsy != NULL
21899 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21900 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
21901 return base + 8;
21902 return base;
21903 #else
21904 return base + 8;
21905 #endif
21906
21907
21908 /* ARM mode loads relative to PC are also offset by +8. Unlike
21909 branches, the Windows CE loader *does* expect the relocation
21910 to take this into account. */
21911 case BFD_RELOC_ARM_OFFSET_IMM:
21912 case BFD_RELOC_ARM_OFFSET_IMM8:
21913 case BFD_RELOC_ARM_HWLITERAL:
21914 case BFD_RELOC_ARM_LITERAL:
21915 case BFD_RELOC_ARM_CP_OFF_IMM:
21916 return base + 8;
21917
21918
21919 /* Other PC-relative relocations are un-offset. */
21920 default:
21921 return base;
21922 }
21923 }
21924
21925 static bfd_boolean flag_warn_syms = TRUE;
21926
21927 bfd_boolean
21928 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
21929 {
21930 /* PR 18347 - Warn if the user attempts to create a symbol with the same
21931 name as an ARM instruction. Whilst strictly speaking it is allowed, it
21932 does mean that the resulting code might be very confusing to the reader.
21933 Also this warning can be triggered if the user omits an operand before
21934 an immediate address, eg:
21935
21936 LDR =foo
21937
21938 GAS treats this as an assignment of the value of the symbol foo to a
21939 symbol LDR, and so (without this code) it will not issue any kind of
21940 warning or error message.
21941
21942 Note - ARM instructions are case-insensitive but the strings in the hash
21943 table are all stored in lower case, so we must first ensure that name is
21944 lower case too. */
21945 if (flag_warn_syms && arm_ops_hsh)
21946 {
21947 char * nbuf = strdup (name);
21948 char * p;
21949
21950 for (p = nbuf; *p; p++)
21951 *p = TOLOWER (*p);
21952 if (hash_find (arm_ops_hsh, nbuf) != NULL)
21953 {
21954 static struct hash_control * already_warned = NULL;
21955
21956 if (already_warned == NULL)
21957 already_warned = hash_new ();
21958 /* Only warn about the symbol once. To keep the code
21959 simple we let hash_insert do the lookup for us. */
21960 if (hash_insert (already_warned, name, NULL) == NULL)
21961 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
21962 }
21963 else
21964 free (nbuf);
21965 }
21966
21967 return FALSE;
21968 }
21969
21970 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
21971 Otherwise we have no need to default values of symbols. */
21972
21973 symbolS *
21974 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
21975 {
21976 #ifdef OBJ_ELF
21977 if (name[0] == '_' && name[1] == 'G'
21978 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
21979 {
21980 if (!GOT_symbol)
21981 {
21982 if (symbol_find (name))
21983 as_bad (_("GOT already in the symbol table"));
21984
21985 GOT_symbol = symbol_new (name, undefined_section,
21986 (valueT) 0, & zero_address_frag);
21987 }
21988
21989 return GOT_symbol;
21990 }
21991 #endif
21992
21993 return NULL;
21994 }
21995
21996 /* Subroutine of md_apply_fix. Check to see if an immediate can be
21997 computed as two separate immediate values, added together. We
21998 already know that this value cannot be computed by just one ARM
21999 instruction. */
22000
22001 static unsigned int
22002 validate_immediate_twopart (unsigned int val,
22003 unsigned int * highpart)
22004 {
22005 unsigned int a;
22006 unsigned int i;
22007
22008 for (i = 0; i < 32; i += 2)
22009 if (((a = rotate_left (val, i)) & 0xff) != 0)
22010 {
22011 if (a & 0xff00)
22012 {
22013 if (a & ~ 0xffff)
22014 continue;
22015 * highpart = (a >> 8) | ((i + 24) << 7);
22016 }
22017 else if (a & 0xff0000)
22018 {
22019 if (a & 0xff000000)
22020 continue;
22021 * highpart = (a >> 16) | ((i + 16) << 7);
22022 }
22023 else
22024 {
22025 gas_assert (a & 0xff000000);
22026 * highpart = (a >> 24) | ((i + 8) << 7);
22027 }
22028
22029 return (a & 0xff) | (i << 7);
22030 }
22031
22032 return FAIL;
22033 }
22034
22035 static int
22036 validate_offset_imm (unsigned int val, int hwse)
22037 {
22038 if ((hwse && val > 255) || val > 4095)
22039 return FAIL;
22040 return val;
22041 }
22042
22043 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22044 negative immediate constant by altering the instruction. A bit of
22045 a hack really.
22046 MOV <-> MVN
22047 AND <-> BIC
22048 ADC <-> SBC
22049 by inverting the second operand, and
22050 ADD <-> SUB
22051 CMP <-> CMN
22052 by negating the second operand. */
22053
22054 static int
22055 negate_data_op (unsigned long * instruction,
22056 unsigned long value)
22057 {
22058 int op, new_inst;
22059 unsigned long negated, inverted;
22060
22061 negated = encode_arm_immediate (-value);
22062 inverted = encode_arm_immediate (~value);
22063
22064 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
22065 switch (op)
22066 {
22067 /* First negates. */
22068 case OPCODE_SUB: /* ADD <-> SUB */
22069 new_inst = OPCODE_ADD;
22070 value = negated;
22071 break;
22072
22073 case OPCODE_ADD:
22074 new_inst = OPCODE_SUB;
22075 value = negated;
22076 break;
22077
22078 case OPCODE_CMP: /* CMP <-> CMN */
22079 new_inst = OPCODE_CMN;
22080 value = negated;
22081 break;
22082
22083 case OPCODE_CMN:
22084 new_inst = OPCODE_CMP;
22085 value = negated;
22086 break;
22087
22088 /* Now Inverted ops. */
22089 case OPCODE_MOV: /* MOV <-> MVN */
22090 new_inst = OPCODE_MVN;
22091 value = inverted;
22092 break;
22093
22094 case OPCODE_MVN:
22095 new_inst = OPCODE_MOV;
22096 value = inverted;
22097 break;
22098
22099 case OPCODE_AND: /* AND <-> BIC */
22100 new_inst = OPCODE_BIC;
22101 value = inverted;
22102 break;
22103
22104 case OPCODE_BIC:
22105 new_inst = OPCODE_AND;
22106 value = inverted;
22107 break;
22108
22109 case OPCODE_ADC: /* ADC <-> SBC */
22110 new_inst = OPCODE_SBC;
22111 value = inverted;
22112 break;
22113
22114 case OPCODE_SBC:
22115 new_inst = OPCODE_ADC;
22116 value = inverted;
22117 break;
22118
22119 /* We cannot do anything. */
22120 default:
22121 return FAIL;
22122 }
22123
22124 if (value == (unsigned) FAIL)
22125 return FAIL;
22126
22127 *instruction &= OPCODE_MASK;
22128 *instruction |= new_inst << DATA_OP_SHIFT;
22129 return value;
22130 }
22131
22132 /* Like negate_data_op, but for Thumb-2. */
22133
22134 static unsigned int
22135 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
22136 {
22137 int op, new_inst;
22138 int rd;
22139 unsigned int negated, inverted;
22140
22141 negated = encode_thumb32_immediate (-value);
22142 inverted = encode_thumb32_immediate (~value);
22143
22144 rd = (*instruction >> 8) & 0xf;
22145 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
22146 switch (op)
22147 {
22148 /* ADD <-> SUB. Includes CMP <-> CMN. */
22149 case T2_OPCODE_SUB:
22150 new_inst = T2_OPCODE_ADD;
22151 value = negated;
22152 break;
22153
22154 case T2_OPCODE_ADD:
22155 new_inst = T2_OPCODE_SUB;
22156 value = negated;
22157 break;
22158
22159 /* ORR <-> ORN. Includes MOV <-> MVN. */
22160 case T2_OPCODE_ORR:
22161 new_inst = T2_OPCODE_ORN;
22162 value = inverted;
22163 break;
22164
22165 case T2_OPCODE_ORN:
22166 new_inst = T2_OPCODE_ORR;
22167 value = inverted;
22168 break;
22169
22170 /* AND <-> BIC. TST has no inverted equivalent. */
22171 case T2_OPCODE_AND:
22172 new_inst = T2_OPCODE_BIC;
22173 if (rd == 15)
22174 value = FAIL;
22175 else
22176 value = inverted;
22177 break;
22178
22179 case T2_OPCODE_BIC:
22180 new_inst = T2_OPCODE_AND;
22181 value = inverted;
22182 break;
22183
22184 /* ADC <-> SBC */
22185 case T2_OPCODE_ADC:
22186 new_inst = T2_OPCODE_SBC;
22187 value = inverted;
22188 break;
22189
22190 case T2_OPCODE_SBC:
22191 new_inst = T2_OPCODE_ADC;
22192 value = inverted;
22193 break;
22194
22195 /* We cannot do anything. */
22196 default:
22197 return FAIL;
22198 }
22199
22200 if (value == (unsigned int)FAIL)
22201 return FAIL;
22202
22203 *instruction &= T2_OPCODE_MASK;
22204 *instruction |= new_inst << T2_DATA_OP_SHIFT;
22205 return value;
22206 }
22207
22208 /* Read a 32-bit thumb instruction from buf. */
22209 static unsigned long
22210 get_thumb32_insn (char * buf)
22211 {
22212 unsigned long insn;
22213 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
22214 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22215
22216 return insn;
22217 }
22218
22219
22220 /* We usually want to set the low bit on the address of thumb function
22221 symbols. In particular .word foo - . should have the low bit set.
22222 Generic code tries to fold the difference of two symbols to
22223 a constant. Prevent this and force a relocation when the first symbols
22224 is a thumb function. */
22225
22226 bfd_boolean
22227 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
22228 {
22229 if (op == O_subtract
22230 && l->X_op == O_symbol
22231 && r->X_op == O_symbol
22232 && THUMB_IS_FUNC (l->X_add_symbol))
22233 {
22234 l->X_op = O_subtract;
22235 l->X_op_symbol = r->X_add_symbol;
22236 l->X_add_number -= r->X_add_number;
22237 return TRUE;
22238 }
22239
22240 /* Process as normal. */
22241 return FALSE;
22242 }
22243
22244 /* Encode Thumb2 unconditional branches and calls. The encoding
22245 for the 2 are identical for the immediate values. */
22246
22247 static void
22248 encode_thumb2_b_bl_offset (char * buf, offsetT value)
22249 {
22250 #define T2I1I2MASK ((1 << 13) | (1 << 11))
22251 offsetT newval;
22252 offsetT newval2;
22253 addressT S, I1, I2, lo, hi;
22254
22255 S = (value >> 24) & 0x01;
22256 I1 = (value >> 23) & 0x01;
22257 I2 = (value >> 22) & 0x01;
22258 hi = (value >> 12) & 0x3ff;
22259 lo = (value >> 1) & 0x7ff;
22260 newval = md_chars_to_number (buf, THUMB_SIZE);
22261 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22262 newval |= (S << 10) | hi;
22263 newval2 &= ~T2I1I2MASK;
22264 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
22265 md_number_to_chars (buf, newval, THUMB_SIZE);
22266 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22267 }
22268
22269 void
22270 md_apply_fix (fixS * fixP,
22271 valueT * valP,
22272 segT seg)
22273 {
22274 offsetT value = * valP;
22275 offsetT newval;
22276 unsigned int newimm;
22277 unsigned long temp;
22278 int sign;
22279 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
22280
22281 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
22282
22283 /* Note whether this will delete the relocation. */
22284
22285 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
22286 fixP->fx_done = 1;
22287
22288 /* On a 64-bit host, silently truncate 'value' to 32 bits for
22289 consistency with the behaviour on 32-bit hosts. Remember value
22290 for emit_reloc. */
22291 value &= 0xffffffff;
22292 value ^= 0x80000000;
22293 value -= 0x80000000;
22294
22295 *valP = value;
22296 fixP->fx_addnumber = value;
22297
22298 /* Same treatment for fixP->fx_offset. */
22299 fixP->fx_offset &= 0xffffffff;
22300 fixP->fx_offset ^= 0x80000000;
22301 fixP->fx_offset -= 0x80000000;
22302
22303 switch (fixP->fx_r_type)
22304 {
22305 case BFD_RELOC_NONE:
22306 /* This will need to go in the object file. */
22307 fixP->fx_done = 0;
22308 break;
22309
22310 case BFD_RELOC_ARM_IMMEDIATE:
22311 /* We claim that this fixup has been processed here,
22312 even if in fact we generate an error because we do
22313 not have a reloc for it, so tc_gen_reloc will reject it. */
22314 fixP->fx_done = 1;
22315
22316 if (fixP->fx_addsy)
22317 {
22318 const char *msg = 0;
22319
22320 if (! S_IS_DEFINED (fixP->fx_addsy))
22321 msg = _("undefined symbol %s used as an immediate value");
22322 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22323 msg = _("symbol %s is in a different section");
22324 else if (S_IS_WEAK (fixP->fx_addsy))
22325 msg = _("symbol %s is weak and may be overridden later");
22326
22327 if (msg)
22328 {
22329 as_bad_where (fixP->fx_file, fixP->fx_line,
22330 msg, S_GET_NAME (fixP->fx_addsy));
22331 break;
22332 }
22333 }
22334
22335 temp = md_chars_to_number (buf, INSN_SIZE);
22336
22337 /* If the offset is negative, we should use encoding A2 for ADR. */
22338 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
22339 newimm = negate_data_op (&temp, value);
22340 else
22341 {
22342 newimm = encode_arm_immediate (value);
22343
22344 /* If the instruction will fail, see if we can fix things up by
22345 changing the opcode. */
22346 if (newimm == (unsigned int) FAIL)
22347 newimm = negate_data_op (&temp, value);
22348 }
22349
22350 if (newimm == (unsigned int) FAIL)
22351 {
22352 as_bad_where (fixP->fx_file, fixP->fx_line,
22353 _("invalid constant (%lx) after fixup"),
22354 (unsigned long) value);
22355 break;
22356 }
22357
22358 newimm |= (temp & 0xfffff000);
22359 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22360 break;
22361
22362 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
22363 {
22364 unsigned int highpart = 0;
22365 unsigned int newinsn = 0xe1a00000; /* nop. */
22366
22367 if (fixP->fx_addsy)
22368 {
22369 const char *msg = 0;
22370
22371 if (! S_IS_DEFINED (fixP->fx_addsy))
22372 msg = _("undefined symbol %s used as an immediate value");
22373 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22374 msg = _("symbol %s is in a different section");
22375 else if (S_IS_WEAK (fixP->fx_addsy))
22376 msg = _("symbol %s is weak and may be overridden later");
22377
22378 if (msg)
22379 {
22380 as_bad_where (fixP->fx_file, fixP->fx_line,
22381 msg, S_GET_NAME (fixP->fx_addsy));
22382 break;
22383 }
22384 }
22385
22386 newimm = encode_arm_immediate (value);
22387 temp = md_chars_to_number (buf, INSN_SIZE);
22388
22389 /* If the instruction will fail, see if we can fix things up by
22390 changing the opcode. */
22391 if (newimm == (unsigned int) FAIL
22392 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
22393 {
22394 /* No ? OK - try using two ADD instructions to generate
22395 the value. */
22396 newimm = validate_immediate_twopart (value, & highpart);
22397
22398 /* Yes - then make sure that the second instruction is
22399 also an add. */
22400 if (newimm != (unsigned int) FAIL)
22401 newinsn = temp;
22402 /* Still No ? Try using a negated value. */
22403 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
22404 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
22405 /* Otherwise - give up. */
22406 else
22407 {
22408 as_bad_where (fixP->fx_file, fixP->fx_line,
22409 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
22410 (long) value);
22411 break;
22412 }
22413
22414 /* Replace the first operand in the 2nd instruction (which
22415 is the PC) with the destination register. We have
22416 already added in the PC in the first instruction and we
22417 do not want to do it again. */
22418 newinsn &= ~ 0xf0000;
22419 newinsn |= ((newinsn & 0x0f000) << 4);
22420 }
22421
22422 newimm |= (temp & 0xfffff000);
22423 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22424
22425 highpart |= (newinsn & 0xfffff000);
22426 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
22427 }
22428 break;
22429
22430 case BFD_RELOC_ARM_OFFSET_IMM:
22431 if (!fixP->fx_done && seg->use_rela_p)
22432 value = 0;
22433
22434 case BFD_RELOC_ARM_LITERAL:
22435 sign = value > 0;
22436
22437 if (value < 0)
22438 value = - value;
22439
22440 if (validate_offset_imm (value, 0) == FAIL)
22441 {
22442 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
22443 as_bad_where (fixP->fx_file, fixP->fx_line,
22444 _("invalid literal constant: pool needs to be closer"));
22445 else
22446 as_bad_where (fixP->fx_file, fixP->fx_line,
22447 _("bad immediate value for offset (%ld)"),
22448 (long) value);
22449 break;
22450 }
22451
22452 newval = md_chars_to_number (buf, INSN_SIZE);
22453 if (value == 0)
22454 newval &= 0xfffff000;
22455 else
22456 {
22457 newval &= 0xff7ff000;
22458 newval |= value | (sign ? INDEX_UP : 0);
22459 }
22460 md_number_to_chars (buf, newval, INSN_SIZE);
22461 break;
22462
22463 case BFD_RELOC_ARM_OFFSET_IMM8:
22464 case BFD_RELOC_ARM_HWLITERAL:
22465 sign = value > 0;
22466
22467 if (value < 0)
22468 value = - value;
22469
22470 if (validate_offset_imm (value, 1) == FAIL)
22471 {
22472 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
22473 as_bad_where (fixP->fx_file, fixP->fx_line,
22474 _("invalid literal constant: pool needs to be closer"));
22475 else
22476 as_bad_where (fixP->fx_file, fixP->fx_line,
22477 _("bad immediate value for 8-bit offset (%ld)"),
22478 (long) value);
22479 break;
22480 }
22481
22482 newval = md_chars_to_number (buf, INSN_SIZE);
22483 if (value == 0)
22484 newval &= 0xfffff0f0;
22485 else
22486 {
22487 newval &= 0xff7ff0f0;
22488 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
22489 }
22490 md_number_to_chars (buf, newval, INSN_SIZE);
22491 break;
22492
22493 case BFD_RELOC_ARM_T32_OFFSET_U8:
22494 if (value < 0 || value > 1020 || value % 4 != 0)
22495 as_bad_where (fixP->fx_file, fixP->fx_line,
22496 _("bad immediate value for offset (%ld)"), (long) value);
22497 value /= 4;
22498
22499 newval = md_chars_to_number (buf+2, THUMB_SIZE);
22500 newval |= value;
22501 md_number_to_chars (buf+2, newval, THUMB_SIZE);
22502 break;
22503
22504 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22505 /* This is a complicated relocation used for all varieties of Thumb32
22506 load/store instruction with immediate offset:
22507
22508 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22509 *4, optional writeback(W)
22510 (doubleword load/store)
22511
22512 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22513 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22514 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22515 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22516 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22517
22518 Uppercase letters indicate bits that are already encoded at
22519 this point. Lowercase letters are our problem. For the
22520 second block of instructions, the secondary opcode nybble
22521 (bits 8..11) is present, and bit 23 is zero, even if this is
22522 a PC-relative operation. */
22523 newval = md_chars_to_number (buf, THUMB_SIZE);
22524 newval <<= 16;
22525 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
22526
22527 if ((newval & 0xf0000000) == 0xe0000000)
22528 {
22529 /* Doubleword load/store: 8-bit offset, scaled by 4. */
22530 if (value >= 0)
22531 newval |= (1 << 23);
22532 else
22533 value = -value;
22534 if (value % 4 != 0)
22535 {
22536 as_bad_where (fixP->fx_file, fixP->fx_line,
22537 _("offset not a multiple of 4"));
22538 break;
22539 }
22540 value /= 4;
22541 if (value > 0xff)
22542 {
22543 as_bad_where (fixP->fx_file, fixP->fx_line,
22544 _("offset out of range"));
22545 break;
22546 }
22547 newval &= ~0xff;
22548 }
22549 else if ((newval & 0x000f0000) == 0x000f0000)
22550 {
22551 /* PC-relative, 12-bit offset. */
22552 if (value >= 0)
22553 newval |= (1 << 23);
22554 else
22555 value = -value;
22556 if (value > 0xfff)
22557 {
22558 as_bad_where (fixP->fx_file, fixP->fx_line,
22559 _("offset out of range"));
22560 break;
22561 }
22562 newval &= ~0xfff;
22563 }
22564 else if ((newval & 0x00000100) == 0x00000100)
22565 {
22566 /* Writeback: 8-bit, +/- offset. */
22567 if (value >= 0)
22568 newval |= (1 << 9);
22569 else
22570 value = -value;
22571 if (value > 0xff)
22572 {
22573 as_bad_where (fixP->fx_file, fixP->fx_line,
22574 _("offset out of range"));
22575 break;
22576 }
22577 newval &= ~0xff;
22578 }
22579 else if ((newval & 0x00000f00) == 0x00000e00)
22580 {
22581 /* T-instruction: positive 8-bit offset. */
22582 if (value < 0 || value > 0xff)
22583 {
22584 as_bad_where (fixP->fx_file, fixP->fx_line,
22585 _("offset out of range"));
22586 break;
22587 }
22588 newval &= ~0xff;
22589 newval |= value;
22590 }
22591 else
22592 {
22593 /* Positive 12-bit or negative 8-bit offset. */
22594 int limit;
22595 if (value >= 0)
22596 {
22597 newval |= (1 << 23);
22598 limit = 0xfff;
22599 }
22600 else
22601 {
22602 value = -value;
22603 limit = 0xff;
22604 }
22605 if (value > limit)
22606 {
22607 as_bad_where (fixP->fx_file, fixP->fx_line,
22608 _("offset out of range"));
22609 break;
22610 }
22611 newval &= ~limit;
22612 }
22613
22614 newval |= value;
22615 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
22616 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
22617 break;
22618
22619 case BFD_RELOC_ARM_SHIFT_IMM:
22620 newval = md_chars_to_number (buf, INSN_SIZE);
22621 if (((unsigned long) value) > 32
22622 || (value == 32
22623 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
22624 {
22625 as_bad_where (fixP->fx_file, fixP->fx_line,
22626 _("shift expression is too large"));
22627 break;
22628 }
22629
22630 if (value == 0)
22631 /* Shifts of zero must be done as lsl. */
22632 newval &= ~0x60;
22633 else if (value == 32)
22634 value = 0;
22635 newval &= 0xfffff07f;
22636 newval |= (value & 0x1f) << 7;
22637 md_number_to_chars (buf, newval, INSN_SIZE);
22638 break;
22639
22640 case BFD_RELOC_ARM_T32_IMMEDIATE:
22641 case BFD_RELOC_ARM_T32_ADD_IMM:
22642 case BFD_RELOC_ARM_T32_IMM12:
22643 case BFD_RELOC_ARM_T32_ADD_PC12:
22644 /* We claim that this fixup has been processed here,
22645 even if in fact we generate an error because we do
22646 not have a reloc for it, so tc_gen_reloc will reject it. */
22647 fixP->fx_done = 1;
22648
22649 if (fixP->fx_addsy
22650 && ! S_IS_DEFINED (fixP->fx_addsy))
22651 {
22652 as_bad_where (fixP->fx_file, fixP->fx_line,
22653 _("undefined symbol %s used as an immediate value"),
22654 S_GET_NAME (fixP->fx_addsy));
22655 break;
22656 }
22657
22658 newval = md_chars_to_number (buf, THUMB_SIZE);
22659 newval <<= 16;
22660 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
22661
22662 newimm = FAIL;
22663 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
22664 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
22665 {
22666 newimm = encode_thumb32_immediate (value);
22667 if (newimm == (unsigned int) FAIL)
22668 newimm = thumb32_negate_data_op (&newval, value);
22669 }
22670 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
22671 && newimm == (unsigned int) FAIL)
22672 {
22673 /* Turn add/sum into addw/subw. */
22674 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
22675 newval = (newval & 0xfeffffff) | 0x02000000;
22676 /* No flat 12-bit imm encoding for addsw/subsw. */
22677 if ((newval & 0x00100000) == 0)
22678 {
22679 /* 12 bit immediate for addw/subw. */
22680 if (value < 0)
22681 {
22682 value = -value;
22683 newval ^= 0x00a00000;
22684 }
22685 if (value > 0xfff)
22686 newimm = (unsigned int) FAIL;
22687 else
22688 newimm = value;
22689 }
22690 }
22691
22692 if (newimm == (unsigned int)FAIL)
22693 {
22694 as_bad_where (fixP->fx_file, fixP->fx_line,
22695 _("invalid constant (%lx) after fixup"),
22696 (unsigned long) value);
22697 break;
22698 }
22699
22700 newval |= (newimm & 0x800) << 15;
22701 newval |= (newimm & 0x700) << 4;
22702 newval |= (newimm & 0x0ff);
22703
22704 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
22705 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
22706 break;
22707
22708 case BFD_RELOC_ARM_SMC:
22709 if (((unsigned long) value) > 0xffff)
22710 as_bad_where (fixP->fx_file, fixP->fx_line,
22711 _("invalid smc expression"));
22712 newval = md_chars_to_number (buf, INSN_SIZE);
22713 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
22714 md_number_to_chars (buf, newval, INSN_SIZE);
22715 break;
22716
22717 case BFD_RELOC_ARM_HVC:
22718 if (((unsigned long) value) > 0xffff)
22719 as_bad_where (fixP->fx_file, fixP->fx_line,
22720 _("invalid hvc expression"));
22721 newval = md_chars_to_number (buf, INSN_SIZE);
22722 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
22723 md_number_to_chars (buf, newval, INSN_SIZE);
22724 break;
22725
22726 case BFD_RELOC_ARM_SWI:
22727 if (fixP->tc_fix_data != 0)
22728 {
22729 if (((unsigned long) value) > 0xff)
22730 as_bad_where (fixP->fx_file, fixP->fx_line,
22731 _("invalid swi expression"));
22732 newval = md_chars_to_number (buf, THUMB_SIZE);
22733 newval |= value;
22734 md_number_to_chars (buf, newval, THUMB_SIZE);
22735 }
22736 else
22737 {
22738 if (((unsigned long) value) > 0x00ffffff)
22739 as_bad_where (fixP->fx_file, fixP->fx_line,
22740 _("invalid swi expression"));
22741 newval = md_chars_to_number (buf, INSN_SIZE);
22742 newval |= value;
22743 md_number_to_chars (buf, newval, INSN_SIZE);
22744 }
22745 break;
22746
22747 case BFD_RELOC_ARM_MULTI:
22748 if (((unsigned long) value) > 0xffff)
22749 as_bad_where (fixP->fx_file, fixP->fx_line,
22750 _("invalid expression in load/store multiple"));
22751 newval = value | md_chars_to_number (buf, INSN_SIZE);
22752 md_number_to_chars (buf, newval, INSN_SIZE);
22753 break;
22754
22755 #ifdef OBJ_ELF
22756 case BFD_RELOC_ARM_PCREL_CALL:
22757
22758 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22759 && fixP->fx_addsy
22760 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22761 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22762 && THUMB_IS_FUNC (fixP->fx_addsy))
22763 /* Flip the bl to blx. This is a simple flip
22764 bit here because we generate PCREL_CALL for
22765 unconditional bls. */
22766 {
22767 newval = md_chars_to_number (buf, INSN_SIZE);
22768 newval = newval | 0x10000000;
22769 md_number_to_chars (buf, newval, INSN_SIZE);
22770 temp = 1;
22771 fixP->fx_done = 1;
22772 }
22773 else
22774 temp = 3;
22775 goto arm_branch_common;
22776
22777 case BFD_RELOC_ARM_PCREL_JUMP:
22778 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22779 && fixP->fx_addsy
22780 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22781 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22782 && THUMB_IS_FUNC (fixP->fx_addsy))
22783 {
22784 /* This would map to a bl<cond>, b<cond>,
22785 b<always> to a Thumb function. We
22786 need to force a relocation for this particular
22787 case. */
22788 newval = md_chars_to_number (buf, INSN_SIZE);
22789 fixP->fx_done = 0;
22790 }
22791
22792 case BFD_RELOC_ARM_PLT32:
22793 #endif
22794 case BFD_RELOC_ARM_PCREL_BRANCH:
22795 temp = 3;
22796 goto arm_branch_common;
22797
22798 case BFD_RELOC_ARM_PCREL_BLX:
22799
22800 temp = 1;
22801 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22802 && fixP->fx_addsy
22803 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22804 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22805 && ARM_IS_FUNC (fixP->fx_addsy))
22806 {
22807 /* Flip the blx to a bl and warn. */
22808 const char *name = S_GET_NAME (fixP->fx_addsy);
22809 newval = 0xeb000000;
22810 as_warn_where (fixP->fx_file, fixP->fx_line,
22811 _("blx to '%s' an ARM ISA state function changed to bl"),
22812 name);
22813 md_number_to_chars (buf, newval, INSN_SIZE);
22814 temp = 3;
22815 fixP->fx_done = 1;
22816 }
22817
22818 #ifdef OBJ_ELF
22819 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
22820 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
22821 #endif
22822
22823 arm_branch_common:
22824 /* We are going to store value (shifted right by two) in the
22825 instruction, in a 24 bit, signed field. Bits 26 through 32 either
22826 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
22827 also be be clear. */
22828 if (value & temp)
22829 as_bad_where (fixP->fx_file, fixP->fx_line,
22830 _("misaligned branch destination"));
22831 if ((value & (offsetT)0xfe000000) != (offsetT)0
22832 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
22833 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22834
22835 if (fixP->fx_done || !seg->use_rela_p)
22836 {
22837 newval = md_chars_to_number (buf, INSN_SIZE);
22838 newval |= (value >> 2) & 0x00ffffff;
22839 /* Set the H bit on BLX instructions. */
22840 if (temp == 1)
22841 {
22842 if (value & 2)
22843 newval |= 0x01000000;
22844 else
22845 newval &= ~0x01000000;
22846 }
22847 md_number_to_chars (buf, newval, INSN_SIZE);
22848 }
22849 break;
22850
22851 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
22852 /* CBZ can only branch forward. */
22853
22854 /* Attempts to use CBZ to branch to the next instruction
22855 (which, strictly speaking, are prohibited) will be turned into
22856 no-ops.
22857
22858 FIXME: It may be better to remove the instruction completely and
22859 perform relaxation. */
22860 if (value == -2)
22861 {
22862 newval = md_chars_to_number (buf, THUMB_SIZE);
22863 newval = 0xbf00; /* NOP encoding T1 */
22864 md_number_to_chars (buf, newval, THUMB_SIZE);
22865 }
22866 else
22867 {
22868 if (value & ~0x7e)
22869 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22870
22871 if (fixP->fx_done || !seg->use_rela_p)
22872 {
22873 newval = md_chars_to_number (buf, THUMB_SIZE);
22874 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
22875 md_number_to_chars (buf, newval, THUMB_SIZE);
22876 }
22877 }
22878 break;
22879
22880 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
22881 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
22882 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22883
22884 if (fixP->fx_done || !seg->use_rela_p)
22885 {
22886 newval = md_chars_to_number (buf, THUMB_SIZE);
22887 newval |= (value & 0x1ff) >> 1;
22888 md_number_to_chars (buf, newval, THUMB_SIZE);
22889 }
22890 break;
22891
22892 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
22893 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
22894 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22895
22896 if (fixP->fx_done || !seg->use_rela_p)
22897 {
22898 newval = md_chars_to_number (buf, THUMB_SIZE);
22899 newval |= (value & 0xfff) >> 1;
22900 md_number_to_chars (buf, newval, THUMB_SIZE);
22901 }
22902 break;
22903
22904 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22905 if (fixP->fx_addsy
22906 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22907 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22908 && ARM_IS_FUNC (fixP->fx_addsy)
22909 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22910 {
22911 /* Force a relocation for a branch 20 bits wide. */
22912 fixP->fx_done = 0;
22913 }
22914 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
22915 as_bad_where (fixP->fx_file, fixP->fx_line,
22916 _("conditional branch out of range"));
22917
22918 if (fixP->fx_done || !seg->use_rela_p)
22919 {
22920 offsetT newval2;
22921 addressT S, J1, J2, lo, hi;
22922
22923 S = (value & 0x00100000) >> 20;
22924 J2 = (value & 0x00080000) >> 19;
22925 J1 = (value & 0x00040000) >> 18;
22926 hi = (value & 0x0003f000) >> 12;
22927 lo = (value & 0x00000ffe) >> 1;
22928
22929 newval = md_chars_to_number (buf, THUMB_SIZE);
22930 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22931 newval |= (S << 10) | hi;
22932 newval2 |= (J1 << 13) | (J2 << 11) | lo;
22933 md_number_to_chars (buf, newval, THUMB_SIZE);
22934 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22935 }
22936 break;
22937
22938 case BFD_RELOC_THUMB_PCREL_BLX:
22939 /* If there is a blx from a thumb state function to
22940 another thumb function flip this to a bl and warn
22941 about it. */
22942
22943 if (fixP->fx_addsy
22944 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22945 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22946 && THUMB_IS_FUNC (fixP->fx_addsy))
22947 {
22948 const char *name = S_GET_NAME (fixP->fx_addsy);
22949 as_warn_where (fixP->fx_file, fixP->fx_line,
22950 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
22951 name);
22952 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22953 newval = newval | 0x1000;
22954 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22955 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22956 fixP->fx_done = 1;
22957 }
22958
22959
22960 goto thumb_bl_common;
22961
22962 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22963 /* A bl from Thumb state ISA to an internal ARM state function
22964 is converted to a blx. */
22965 if (fixP->fx_addsy
22966 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22967 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22968 && ARM_IS_FUNC (fixP->fx_addsy)
22969 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22970 {
22971 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22972 newval = newval & ~0x1000;
22973 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22974 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
22975 fixP->fx_done = 1;
22976 }
22977
22978 thumb_bl_common:
22979
22980 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22981 /* For a BLX instruction, make sure that the relocation is rounded up
22982 to a word boundary. This follows the semantics of the instruction
22983 which specifies that bit 1 of the target address will come from bit
22984 1 of the base address. */
22985 value = (value + 3) & ~ 3;
22986
22987 #ifdef OBJ_ELF
22988 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
22989 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22990 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22991 #endif
22992
22993 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
22994 {
22995 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
22996 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22997 else if ((value & ~0x1ffffff)
22998 && ((value & ~0x1ffffff) != ~0x1ffffff))
22999 as_bad_where (fixP->fx_file, fixP->fx_line,
23000 _("Thumb2 branch out of range"));
23001 }
23002
23003 if (fixP->fx_done || !seg->use_rela_p)
23004 encode_thumb2_b_bl_offset (buf, value);
23005
23006 break;
23007
23008 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23009 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
23010 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23011
23012 if (fixP->fx_done || !seg->use_rela_p)
23013 encode_thumb2_b_bl_offset (buf, value);
23014
23015 break;
23016
23017 case BFD_RELOC_8:
23018 if (fixP->fx_done || !seg->use_rela_p)
23019 *buf = value;
23020 break;
23021
23022 case BFD_RELOC_16:
23023 if (fixP->fx_done || !seg->use_rela_p)
23024 md_number_to_chars (buf, value, 2);
23025 break;
23026
23027 #ifdef OBJ_ELF
23028 case BFD_RELOC_ARM_TLS_CALL:
23029 case BFD_RELOC_ARM_THM_TLS_CALL:
23030 case BFD_RELOC_ARM_TLS_DESCSEQ:
23031 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23032 case BFD_RELOC_ARM_TLS_GOTDESC:
23033 case BFD_RELOC_ARM_TLS_GD32:
23034 case BFD_RELOC_ARM_TLS_LE32:
23035 case BFD_RELOC_ARM_TLS_IE32:
23036 case BFD_RELOC_ARM_TLS_LDM32:
23037 case BFD_RELOC_ARM_TLS_LDO32:
23038 S_SET_THREAD_LOCAL (fixP->fx_addsy);
23039 break;
23040
23041 case BFD_RELOC_ARM_GOT32:
23042 case BFD_RELOC_ARM_GOTOFF:
23043 break;
23044
23045 case BFD_RELOC_ARM_GOT_PREL:
23046 if (fixP->fx_done || !seg->use_rela_p)
23047 md_number_to_chars (buf, value, 4);
23048 break;
23049
23050 case BFD_RELOC_ARM_TARGET2:
23051 /* TARGET2 is not partial-inplace, so we need to write the
23052 addend here for REL targets, because it won't be written out
23053 during reloc processing later. */
23054 if (fixP->fx_done || !seg->use_rela_p)
23055 md_number_to_chars (buf, fixP->fx_offset, 4);
23056 break;
23057 #endif
23058
23059 case BFD_RELOC_RVA:
23060 case BFD_RELOC_32:
23061 case BFD_RELOC_ARM_TARGET1:
23062 case BFD_RELOC_ARM_ROSEGREL32:
23063 case BFD_RELOC_ARM_SBREL32:
23064 case BFD_RELOC_32_PCREL:
23065 #ifdef TE_PE
23066 case BFD_RELOC_32_SECREL:
23067 #endif
23068 if (fixP->fx_done || !seg->use_rela_p)
23069 #ifdef TE_WINCE
23070 /* For WinCE we only do this for pcrel fixups. */
23071 if (fixP->fx_done || fixP->fx_pcrel)
23072 #endif
23073 md_number_to_chars (buf, value, 4);
23074 break;
23075
23076 #ifdef OBJ_ELF
23077 case BFD_RELOC_ARM_PREL31:
23078 if (fixP->fx_done || !seg->use_rela_p)
23079 {
23080 newval = md_chars_to_number (buf, 4) & 0x80000000;
23081 if ((value ^ (value >> 1)) & 0x40000000)
23082 {
23083 as_bad_where (fixP->fx_file, fixP->fx_line,
23084 _("rel31 relocation overflow"));
23085 }
23086 newval |= value & 0x7fffffff;
23087 md_number_to_chars (buf, newval, 4);
23088 }
23089 break;
23090 #endif
23091
23092 case BFD_RELOC_ARM_CP_OFF_IMM:
23093 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
23094 if (value < -1023 || value > 1023 || (value & 3))
23095 as_bad_where (fixP->fx_file, fixP->fx_line,
23096 _("co-processor offset out of range"));
23097 cp_off_common:
23098 sign = value > 0;
23099 if (value < 0)
23100 value = -value;
23101 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23102 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23103 newval = md_chars_to_number (buf, INSN_SIZE);
23104 else
23105 newval = get_thumb32_insn (buf);
23106 if (value == 0)
23107 newval &= 0xffffff00;
23108 else
23109 {
23110 newval &= 0xff7fff00;
23111 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
23112 }
23113 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23114 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23115 md_number_to_chars (buf, newval, INSN_SIZE);
23116 else
23117 put_thumb32_insn (buf, newval);
23118 break;
23119
23120 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
23121 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
23122 if (value < -255 || value > 255)
23123 as_bad_where (fixP->fx_file, fixP->fx_line,
23124 _("co-processor offset out of range"));
23125 value *= 4;
23126 goto cp_off_common;
23127
23128 case BFD_RELOC_ARM_THUMB_OFFSET:
23129 newval = md_chars_to_number (buf, THUMB_SIZE);
23130 /* Exactly what ranges, and where the offset is inserted depends
23131 on the type of instruction, we can establish this from the
23132 top 4 bits. */
23133 switch (newval >> 12)
23134 {
23135 case 4: /* PC load. */
23136 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
23137 forced to zero for these loads; md_pcrel_from has already
23138 compensated for this. */
23139 if (value & 3)
23140 as_bad_where (fixP->fx_file, fixP->fx_line,
23141 _("invalid offset, target not word aligned (0x%08lX)"),
23142 (((unsigned long) fixP->fx_frag->fr_address
23143 + (unsigned long) fixP->fx_where) & ~3)
23144 + (unsigned long) value);
23145
23146 if (value & ~0x3fc)
23147 as_bad_where (fixP->fx_file, fixP->fx_line,
23148 _("invalid offset, value too big (0x%08lX)"),
23149 (long) value);
23150
23151 newval |= value >> 2;
23152 break;
23153
23154 case 9: /* SP load/store. */
23155 if (value & ~0x3fc)
23156 as_bad_where (fixP->fx_file, fixP->fx_line,
23157 _("invalid offset, value too big (0x%08lX)"),
23158 (long) value);
23159 newval |= value >> 2;
23160 break;
23161
23162 case 6: /* Word load/store. */
23163 if (value & ~0x7c)
23164 as_bad_where (fixP->fx_file, fixP->fx_line,
23165 _("invalid offset, value too big (0x%08lX)"),
23166 (long) value);
23167 newval |= value << 4; /* 6 - 2. */
23168 break;
23169
23170 case 7: /* Byte load/store. */
23171 if (value & ~0x1f)
23172 as_bad_where (fixP->fx_file, fixP->fx_line,
23173 _("invalid offset, value too big (0x%08lX)"),
23174 (long) value);
23175 newval |= value << 6;
23176 break;
23177
23178 case 8: /* Halfword load/store. */
23179 if (value & ~0x3e)
23180 as_bad_where (fixP->fx_file, fixP->fx_line,
23181 _("invalid offset, value too big (0x%08lX)"),
23182 (long) value);
23183 newval |= value << 5; /* 6 - 1. */
23184 break;
23185
23186 default:
23187 as_bad_where (fixP->fx_file, fixP->fx_line,
23188 "Unable to process relocation for thumb opcode: %lx",
23189 (unsigned long) newval);
23190 break;
23191 }
23192 md_number_to_chars (buf, newval, THUMB_SIZE);
23193 break;
23194
23195 case BFD_RELOC_ARM_THUMB_ADD:
23196 /* This is a complicated relocation, since we use it for all of
23197 the following immediate relocations:
23198
23199 3bit ADD/SUB
23200 8bit ADD/SUB
23201 9bit ADD/SUB SP word-aligned
23202 10bit ADD PC/SP word-aligned
23203
23204 The type of instruction being processed is encoded in the
23205 instruction field:
23206
23207 0x8000 SUB
23208 0x00F0 Rd
23209 0x000F Rs
23210 */
23211 newval = md_chars_to_number (buf, THUMB_SIZE);
23212 {
23213 int rd = (newval >> 4) & 0xf;
23214 int rs = newval & 0xf;
23215 int subtract = !!(newval & 0x8000);
23216
23217 /* Check for HI regs, only very restricted cases allowed:
23218 Adjusting SP, and using PC or SP to get an address. */
23219 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
23220 || (rs > 7 && rs != REG_SP && rs != REG_PC))
23221 as_bad_where (fixP->fx_file, fixP->fx_line,
23222 _("invalid Hi register with immediate"));
23223
23224 /* If value is negative, choose the opposite instruction. */
23225 if (value < 0)
23226 {
23227 value = -value;
23228 subtract = !subtract;
23229 if (value < 0)
23230 as_bad_where (fixP->fx_file, fixP->fx_line,
23231 _("immediate value out of range"));
23232 }
23233
23234 if (rd == REG_SP)
23235 {
23236 if (value & ~0x1fc)
23237 as_bad_where (fixP->fx_file, fixP->fx_line,
23238 _("invalid immediate for stack address calculation"));
23239 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
23240 newval |= value >> 2;
23241 }
23242 else if (rs == REG_PC || rs == REG_SP)
23243 {
23244 /* PR gas/18541. If the addition is for a defined symbol
23245 within range of an ADR instruction then accept it. */
23246 if (subtract
23247 && value == 4
23248 && fixP->fx_addsy != NULL)
23249 {
23250 subtract = 0;
23251
23252 if (! S_IS_DEFINED (fixP->fx_addsy)
23253 || S_GET_SEGMENT (fixP->fx_addsy) != seg
23254 || S_IS_WEAK (fixP->fx_addsy))
23255 {
23256 as_bad_where (fixP->fx_file, fixP->fx_line,
23257 _("address calculation needs a strongly defined nearby symbol"));
23258 }
23259 else
23260 {
23261 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
23262
23263 /* Round up to the next 4-byte boundary. */
23264 if (v & 3)
23265 v = (v + 3) & ~ 3;
23266 else
23267 v += 4;
23268 v = S_GET_VALUE (fixP->fx_addsy) - v;
23269
23270 if (v & ~0x3fc)
23271 {
23272 as_bad_where (fixP->fx_file, fixP->fx_line,
23273 _("symbol too far away"));
23274 }
23275 else
23276 {
23277 fixP->fx_done = 1;
23278 value = v;
23279 }
23280 }
23281 }
23282
23283 if (subtract || value & ~0x3fc)
23284 as_bad_where (fixP->fx_file, fixP->fx_line,
23285 _("invalid immediate for address calculation (value = 0x%08lX)"),
23286 (unsigned long) (subtract ? - value : value));
23287 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
23288 newval |= rd << 8;
23289 newval |= value >> 2;
23290 }
23291 else if (rs == rd)
23292 {
23293 if (value & ~0xff)
23294 as_bad_where (fixP->fx_file, fixP->fx_line,
23295 _("immediate value out of range"));
23296 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
23297 newval |= (rd << 8) | value;
23298 }
23299 else
23300 {
23301 if (value & ~0x7)
23302 as_bad_where (fixP->fx_file, fixP->fx_line,
23303 _("immediate value out of range"));
23304 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
23305 newval |= rd | (rs << 3) | (value << 6);
23306 }
23307 }
23308 md_number_to_chars (buf, newval, THUMB_SIZE);
23309 break;
23310
23311 case BFD_RELOC_ARM_THUMB_IMM:
23312 newval = md_chars_to_number (buf, THUMB_SIZE);
23313 if (value < 0 || value > 255)
23314 as_bad_where (fixP->fx_file, fixP->fx_line,
23315 _("invalid immediate: %ld is out of range"),
23316 (long) value);
23317 newval |= value;
23318 md_number_to_chars (buf, newval, THUMB_SIZE);
23319 break;
23320
23321 case BFD_RELOC_ARM_THUMB_SHIFT:
23322 /* 5bit shift value (0..32). LSL cannot take 32. */
23323 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
23324 temp = newval & 0xf800;
23325 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
23326 as_bad_where (fixP->fx_file, fixP->fx_line,
23327 _("invalid shift value: %ld"), (long) value);
23328 /* Shifts of zero must be encoded as LSL. */
23329 if (value == 0)
23330 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
23331 /* Shifts of 32 are encoded as zero. */
23332 else if (value == 32)
23333 value = 0;
23334 newval |= value << 6;
23335 md_number_to_chars (buf, newval, THUMB_SIZE);
23336 break;
23337
23338 case BFD_RELOC_VTABLE_INHERIT:
23339 case BFD_RELOC_VTABLE_ENTRY:
23340 fixP->fx_done = 0;
23341 return;
23342
23343 case BFD_RELOC_ARM_MOVW:
23344 case BFD_RELOC_ARM_MOVT:
23345 case BFD_RELOC_ARM_THUMB_MOVW:
23346 case BFD_RELOC_ARM_THUMB_MOVT:
23347 if (fixP->fx_done || !seg->use_rela_p)
23348 {
23349 /* REL format relocations are limited to a 16-bit addend. */
23350 if (!fixP->fx_done)
23351 {
23352 if (value < -0x8000 || value > 0x7fff)
23353 as_bad_where (fixP->fx_file, fixP->fx_line,
23354 _("offset out of range"));
23355 }
23356 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
23357 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
23358 {
23359 value >>= 16;
23360 }
23361
23362 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
23363 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
23364 {
23365 newval = get_thumb32_insn (buf);
23366 newval &= 0xfbf08f00;
23367 newval |= (value & 0xf000) << 4;
23368 newval |= (value & 0x0800) << 15;
23369 newval |= (value & 0x0700) << 4;
23370 newval |= (value & 0x00ff);
23371 put_thumb32_insn (buf, newval);
23372 }
23373 else
23374 {
23375 newval = md_chars_to_number (buf, 4);
23376 newval &= 0xfff0f000;
23377 newval |= value & 0x0fff;
23378 newval |= (value & 0xf000) << 4;
23379 md_number_to_chars (buf, newval, 4);
23380 }
23381 }
23382 return;
23383
23384 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
23385 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
23386 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
23387 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
23388 gas_assert (!fixP->fx_done);
23389 {
23390 bfd_vma insn;
23391 bfd_boolean is_mov;
23392 bfd_vma encoded_addend = value;
23393
23394 /* Check that addend can be encoded in instruction. */
23395 if (!seg->use_rela_p && (value < 0 || value > 255))
23396 as_bad_where (fixP->fx_file, fixP->fx_line,
23397 _("the offset 0x%08lX is not representable"),
23398 (unsigned long) encoded_addend);
23399
23400 /* Extract the instruction. */
23401 insn = md_chars_to_number (buf, THUMB_SIZE);
23402 is_mov = (insn & 0xf800) == 0x2000;
23403
23404 /* Encode insn. */
23405 if (is_mov)
23406 {
23407 if (!seg->use_rela_p)
23408 insn |= encoded_addend;
23409 }
23410 else
23411 {
23412 int rd, rs;
23413
23414 /* Extract the instruction. */
23415 /* Encoding is the following
23416 0x8000 SUB
23417 0x00F0 Rd
23418 0x000F Rs
23419 */
23420 /* The following conditions must be true :
23421 - ADD
23422 - Rd == Rs
23423 - Rd <= 7
23424 */
23425 rd = (insn >> 4) & 0xf;
23426 rs = insn & 0xf;
23427 if ((insn & 0x8000) || (rd != rs) || rd > 7)
23428 as_bad_where (fixP->fx_file, fixP->fx_line,
23429 _("Unable to process relocation for thumb opcode: %lx"),
23430 (unsigned long) insn);
23431
23432 /* Encode as ADD immediate8 thumb 1 code. */
23433 insn = 0x3000 | (rd << 8);
23434
23435 /* Place the encoded addend into the first 8 bits of the
23436 instruction. */
23437 if (!seg->use_rela_p)
23438 insn |= encoded_addend;
23439 }
23440
23441 /* Update the instruction. */
23442 md_number_to_chars (buf, insn, THUMB_SIZE);
23443 }
23444 break;
23445
23446 case BFD_RELOC_ARM_ALU_PC_G0_NC:
23447 case BFD_RELOC_ARM_ALU_PC_G0:
23448 case BFD_RELOC_ARM_ALU_PC_G1_NC:
23449 case BFD_RELOC_ARM_ALU_PC_G1:
23450 case BFD_RELOC_ARM_ALU_PC_G2:
23451 case BFD_RELOC_ARM_ALU_SB_G0_NC:
23452 case BFD_RELOC_ARM_ALU_SB_G0:
23453 case BFD_RELOC_ARM_ALU_SB_G1_NC:
23454 case BFD_RELOC_ARM_ALU_SB_G1:
23455 case BFD_RELOC_ARM_ALU_SB_G2:
23456 gas_assert (!fixP->fx_done);
23457 if (!seg->use_rela_p)
23458 {
23459 bfd_vma insn;
23460 bfd_vma encoded_addend;
23461 bfd_vma addend_abs = abs (value);
23462
23463 /* Check that the absolute value of the addend can be
23464 expressed as an 8-bit constant plus a rotation. */
23465 encoded_addend = encode_arm_immediate (addend_abs);
23466 if (encoded_addend == (unsigned int) FAIL)
23467 as_bad_where (fixP->fx_file, fixP->fx_line,
23468 _("the offset 0x%08lX is not representable"),
23469 (unsigned long) addend_abs);
23470
23471 /* Extract the instruction. */
23472 insn = md_chars_to_number (buf, INSN_SIZE);
23473
23474 /* If the addend is positive, use an ADD instruction.
23475 Otherwise use a SUB. Take care not to destroy the S bit. */
23476 insn &= 0xff1fffff;
23477 if (value < 0)
23478 insn |= 1 << 22;
23479 else
23480 insn |= 1 << 23;
23481
23482 /* Place the encoded addend into the first 12 bits of the
23483 instruction. */
23484 insn &= 0xfffff000;
23485 insn |= encoded_addend;
23486
23487 /* Update the instruction. */
23488 md_number_to_chars (buf, insn, INSN_SIZE);
23489 }
23490 break;
23491
23492 case BFD_RELOC_ARM_LDR_PC_G0:
23493 case BFD_RELOC_ARM_LDR_PC_G1:
23494 case BFD_RELOC_ARM_LDR_PC_G2:
23495 case BFD_RELOC_ARM_LDR_SB_G0:
23496 case BFD_RELOC_ARM_LDR_SB_G1:
23497 case BFD_RELOC_ARM_LDR_SB_G2:
23498 gas_assert (!fixP->fx_done);
23499 if (!seg->use_rela_p)
23500 {
23501 bfd_vma insn;
23502 bfd_vma addend_abs = abs (value);
23503
23504 /* Check that the absolute value of the addend can be
23505 encoded in 12 bits. */
23506 if (addend_abs >= 0x1000)
23507 as_bad_where (fixP->fx_file, fixP->fx_line,
23508 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
23509 (unsigned long) addend_abs);
23510
23511 /* Extract the instruction. */
23512 insn = md_chars_to_number (buf, INSN_SIZE);
23513
23514 /* If the addend is negative, clear bit 23 of the instruction.
23515 Otherwise set it. */
23516 if (value < 0)
23517 insn &= ~(1 << 23);
23518 else
23519 insn |= 1 << 23;
23520
23521 /* Place the absolute value of the addend into the first 12 bits
23522 of the instruction. */
23523 insn &= 0xfffff000;
23524 insn |= addend_abs;
23525
23526 /* Update the instruction. */
23527 md_number_to_chars (buf, insn, INSN_SIZE);
23528 }
23529 break;
23530
23531 case BFD_RELOC_ARM_LDRS_PC_G0:
23532 case BFD_RELOC_ARM_LDRS_PC_G1:
23533 case BFD_RELOC_ARM_LDRS_PC_G2:
23534 case BFD_RELOC_ARM_LDRS_SB_G0:
23535 case BFD_RELOC_ARM_LDRS_SB_G1:
23536 case BFD_RELOC_ARM_LDRS_SB_G2:
23537 gas_assert (!fixP->fx_done);
23538 if (!seg->use_rela_p)
23539 {
23540 bfd_vma insn;
23541 bfd_vma addend_abs = abs (value);
23542
23543 /* Check that the absolute value of the addend can be
23544 encoded in 8 bits. */
23545 if (addend_abs >= 0x100)
23546 as_bad_where (fixP->fx_file, fixP->fx_line,
23547 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
23548 (unsigned long) addend_abs);
23549
23550 /* Extract the instruction. */
23551 insn = md_chars_to_number (buf, INSN_SIZE);
23552
23553 /* If the addend is negative, clear bit 23 of the instruction.
23554 Otherwise set it. */
23555 if (value < 0)
23556 insn &= ~(1 << 23);
23557 else
23558 insn |= 1 << 23;
23559
23560 /* Place the first four bits of the absolute value of the addend
23561 into the first 4 bits of the instruction, and the remaining
23562 four into bits 8 .. 11. */
23563 insn &= 0xfffff0f0;
23564 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
23565
23566 /* Update the instruction. */
23567 md_number_to_chars (buf, insn, INSN_SIZE);
23568 }
23569 break;
23570
23571 case BFD_RELOC_ARM_LDC_PC_G0:
23572 case BFD_RELOC_ARM_LDC_PC_G1:
23573 case BFD_RELOC_ARM_LDC_PC_G2:
23574 case BFD_RELOC_ARM_LDC_SB_G0:
23575 case BFD_RELOC_ARM_LDC_SB_G1:
23576 case BFD_RELOC_ARM_LDC_SB_G2:
23577 gas_assert (!fixP->fx_done);
23578 if (!seg->use_rela_p)
23579 {
23580 bfd_vma insn;
23581 bfd_vma addend_abs = abs (value);
23582
23583 /* Check that the absolute value of the addend is a multiple of
23584 four and, when divided by four, fits in 8 bits. */
23585 if (addend_abs & 0x3)
23586 as_bad_where (fixP->fx_file, fixP->fx_line,
23587 _("bad offset 0x%08lX (must be word-aligned)"),
23588 (unsigned long) addend_abs);
23589
23590 if ((addend_abs >> 2) > 0xff)
23591 as_bad_where (fixP->fx_file, fixP->fx_line,
23592 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
23593 (unsigned long) addend_abs);
23594
23595 /* Extract the instruction. */
23596 insn = md_chars_to_number (buf, INSN_SIZE);
23597
23598 /* If the addend is negative, clear bit 23 of the instruction.
23599 Otherwise set it. */
23600 if (value < 0)
23601 insn &= ~(1 << 23);
23602 else
23603 insn |= 1 << 23;
23604
23605 /* Place the addend (divided by four) into the first eight
23606 bits of the instruction. */
23607 insn &= 0xfffffff0;
23608 insn |= addend_abs >> 2;
23609
23610 /* Update the instruction. */
23611 md_number_to_chars (buf, insn, INSN_SIZE);
23612 }
23613 break;
23614
23615 case BFD_RELOC_ARM_V4BX:
23616 /* This will need to go in the object file. */
23617 fixP->fx_done = 0;
23618 break;
23619
23620 case BFD_RELOC_UNUSED:
23621 default:
23622 as_bad_where (fixP->fx_file, fixP->fx_line,
23623 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
23624 }
23625 }
23626
23627 /* Translate internal representation of relocation info to BFD target
23628 format. */
23629
23630 arelent *
23631 tc_gen_reloc (asection *section, fixS *fixp)
23632 {
23633 arelent * reloc;
23634 bfd_reloc_code_real_type code;
23635
23636 reloc = (arelent *) xmalloc (sizeof (arelent));
23637
23638 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
23639 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
23640 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
23641
23642 if (fixp->fx_pcrel)
23643 {
23644 if (section->use_rela_p)
23645 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
23646 else
23647 fixp->fx_offset = reloc->address;
23648 }
23649 reloc->addend = fixp->fx_offset;
23650
23651 switch (fixp->fx_r_type)
23652 {
23653 case BFD_RELOC_8:
23654 if (fixp->fx_pcrel)
23655 {
23656 code = BFD_RELOC_8_PCREL;
23657 break;
23658 }
23659
23660 case BFD_RELOC_16:
23661 if (fixp->fx_pcrel)
23662 {
23663 code = BFD_RELOC_16_PCREL;
23664 break;
23665 }
23666
23667 case BFD_RELOC_32:
23668 if (fixp->fx_pcrel)
23669 {
23670 code = BFD_RELOC_32_PCREL;
23671 break;
23672 }
23673
23674 case BFD_RELOC_ARM_MOVW:
23675 if (fixp->fx_pcrel)
23676 {
23677 code = BFD_RELOC_ARM_MOVW_PCREL;
23678 break;
23679 }
23680
23681 case BFD_RELOC_ARM_MOVT:
23682 if (fixp->fx_pcrel)
23683 {
23684 code = BFD_RELOC_ARM_MOVT_PCREL;
23685 break;
23686 }
23687
23688 case BFD_RELOC_ARM_THUMB_MOVW:
23689 if (fixp->fx_pcrel)
23690 {
23691 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
23692 break;
23693 }
23694
23695 case BFD_RELOC_ARM_THUMB_MOVT:
23696 if (fixp->fx_pcrel)
23697 {
23698 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
23699 break;
23700 }
23701
23702 case BFD_RELOC_NONE:
23703 case BFD_RELOC_ARM_PCREL_BRANCH:
23704 case BFD_RELOC_ARM_PCREL_BLX:
23705 case BFD_RELOC_RVA:
23706 case BFD_RELOC_THUMB_PCREL_BRANCH7:
23707 case BFD_RELOC_THUMB_PCREL_BRANCH9:
23708 case BFD_RELOC_THUMB_PCREL_BRANCH12:
23709 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23710 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23711 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23712 case BFD_RELOC_VTABLE_ENTRY:
23713 case BFD_RELOC_VTABLE_INHERIT:
23714 #ifdef TE_PE
23715 case BFD_RELOC_32_SECREL:
23716 #endif
23717 code = fixp->fx_r_type;
23718 break;
23719
23720 case BFD_RELOC_THUMB_PCREL_BLX:
23721 #ifdef OBJ_ELF
23722 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23723 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
23724 else
23725 #endif
23726 code = BFD_RELOC_THUMB_PCREL_BLX;
23727 break;
23728
23729 case BFD_RELOC_ARM_LITERAL:
23730 case BFD_RELOC_ARM_HWLITERAL:
23731 /* If this is called then the a literal has
23732 been referenced across a section boundary. */
23733 as_bad_where (fixp->fx_file, fixp->fx_line,
23734 _("literal referenced across section boundary"));
23735 return NULL;
23736
23737 #ifdef OBJ_ELF
23738 case BFD_RELOC_ARM_TLS_CALL:
23739 case BFD_RELOC_ARM_THM_TLS_CALL:
23740 case BFD_RELOC_ARM_TLS_DESCSEQ:
23741 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23742 case BFD_RELOC_ARM_GOT32:
23743 case BFD_RELOC_ARM_GOTOFF:
23744 case BFD_RELOC_ARM_GOT_PREL:
23745 case BFD_RELOC_ARM_PLT32:
23746 case BFD_RELOC_ARM_TARGET1:
23747 case BFD_RELOC_ARM_ROSEGREL32:
23748 case BFD_RELOC_ARM_SBREL32:
23749 case BFD_RELOC_ARM_PREL31:
23750 case BFD_RELOC_ARM_TARGET2:
23751 case BFD_RELOC_ARM_TLS_LDO32:
23752 case BFD_RELOC_ARM_PCREL_CALL:
23753 case BFD_RELOC_ARM_PCREL_JUMP:
23754 case BFD_RELOC_ARM_ALU_PC_G0_NC:
23755 case BFD_RELOC_ARM_ALU_PC_G0:
23756 case BFD_RELOC_ARM_ALU_PC_G1_NC:
23757 case BFD_RELOC_ARM_ALU_PC_G1:
23758 case BFD_RELOC_ARM_ALU_PC_G2:
23759 case BFD_RELOC_ARM_LDR_PC_G0:
23760 case BFD_RELOC_ARM_LDR_PC_G1:
23761 case BFD_RELOC_ARM_LDR_PC_G2:
23762 case BFD_RELOC_ARM_LDRS_PC_G0:
23763 case BFD_RELOC_ARM_LDRS_PC_G1:
23764 case BFD_RELOC_ARM_LDRS_PC_G2:
23765 case BFD_RELOC_ARM_LDC_PC_G0:
23766 case BFD_RELOC_ARM_LDC_PC_G1:
23767 case BFD_RELOC_ARM_LDC_PC_G2:
23768 case BFD_RELOC_ARM_ALU_SB_G0_NC:
23769 case BFD_RELOC_ARM_ALU_SB_G0:
23770 case BFD_RELOC_ARM_ALU_SB_G1_NC:
23771 case BFD_RELOC_ARM_ALU_SB_G1:
23772 case BFD_RELOC_ARM_ALU_SB_G2:
23773 case BFD_RELOC_ARM_LDR_SB_G0:
23774 case BFD_RELOC_ARM_LDR_SB_G1:
23775 case BFD_RELOC_ARM_LDR_SB_G2:
23776 case BFD_RELOC_ARM_LDRS_SB_G0:
23777 case BFD_RELOC_ARM_LDRS_SB_G1:
23778 case BFD_RELOC_ARM_LDRS_SB_G2:
23779 case BFD_RELOC_ARM_LDC_SB_G0:
23780 case BFD_RELOC_ARM_LDC_SB_G1:
23781 case BFD_RELOC_ARM_LDC_SB_G2:
23782 case BFD_RELOC_ARM_V4BX:
23783 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
23784 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
23785 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
23786 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
23787 code = fixp->fx_r_type;
23788 break;
23789
23790 case BFD_RELOC_ARM_TLS_GOTDESC:
23791 case BFD_RELOC_ARM_TLS_GD32:
23792 case BFD_RELOC_ARM_TLS_LE32:
23793 case BFD_RELOC_ARM_TLS_IE32:
23794 case BFD_RELOC_ARM_TLS_LDM32:
23795 /* BFD will include the symbol's address in the addend.
23796 But we don't want that, so subtract it out again here. */
23797 if (!S_IS_COMMON (fixp->fx_addsy))
23798 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
23799 code = fixp->fx_r_type;
23800 break;
23801 #endif
23802
23803 case BFD_RELOC_ARM_IMMEDIATE:
23804 as_bad_where (fixp->fx_file, fixp->fx_line,
23805 _("internal relocation (type: IMMEDIATE) not fixed up"));
23806 return NULL;
23807
23808 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
23809 as_bad_where (fixp->fx_file, fixp->fx_line,
23810 _("ADRL used for a symbol not defined in the same file"));
23811 return NULL;
23812
23813 case BFD_RELOC_ARM_OFFSET_IMM:
23814 if (section->use_rela_p)
23815 {
23816 code = fixp->fx_r_type;
23817 break;
23818 }
23819
23820 if (fixp->fx_addsy != NULL
23821 && !S_IS_DEFINED (fixp->fx_addsy)
23822 && S_IS_LOCAL (fixp->fx_addsy))
23823 {
23824 as_bad_where (fixp->fx_file, fixp->fx_line,
23825 _("undefined local label `%s'"),
23826 S_GET_NAME (fixp->fx_addsy));
23827 return NULL;
23828 }
23829
23830 as_bad_where (fixp->fx_file, fixp->fx_line,
23831 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
23832 return NULL;
23833
23834 default:
23835 {
23836 char * type;
23837
23838 switch (fixp->fx_r_type)
23839 {
23840 case BFD_RELOC_NONE: type = "NONE"; break;
23841 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
23842 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
23843 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
23844 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
23845 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
23846 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
23847 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
23848 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
23849 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
23850 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
23851 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
23852 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
23853 default: type = _("<unknown>"); break;
23854 }
23855 as_bad_where (fixp->fx_file, fixp->fx_line,
23856 _("cannot represent %s relocation in this object file format"),
23857 type);
23858 return NULL;
23859 }
23860 }
23861
23862 #ifdef OBJ_ELF
23863 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
23864 && GOT_symbol
23865 && fixp->fx_addsy == GOT_symbol)
23866 {
23867 code = BFD_RELOC_ARM_GOTPC;
23868 reloc->addend = fixp->fx_offset = reloc->address;
23869 }
23870 #endif
23871
23872 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
23873
23874 if (reloc->howto == NULL)
23875 {
23876 as_bad_where (fixp->fx_file, fixp->fx_line,
23877 _("cannot represent %s relocation in this object file format"),
23878 bfd_get_reloc_code_name (code));
23879 return NULL;
23880 }
23881
23882 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
23883 vtable entry to be used in the relocation's section offset. */
23884 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
23885 reloc->address = fixp->fx_offset;
23886
23887 return reloc;
23888 }
23889
23890 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
23891
23892 void
23893 cons_fix_new_arm (fragS * frag,
23894 int where,
23895 int size,
23896 expressionS * exp,
23897 bfd_reloc_code_real_type reloc)
23898 {
23899 int pcrel = 0;
23900
23901 /* Pick a reloc.
23902 FIXME: @@ Should look at CPU word size. */
23903 switch (size)
23904 {
23905 case 1:
23906 reloc = BFD_RELOC_8;
23907 break;
23908 case 2:
23909 reloc = BFD_RELOC_16;
23910 break;
23911 case 4:
23912 default:
23913 reloc = BFD_RELOC_32;
23914 break;
23915 case 8:
23916 reloc = BFD_RELOC_64;
23917 break;
23918 }
23919
23920 #ifdef TE_PE
23921 if (exp->X_op == O_secrel)
23922 {
23923 exp->X_op = O_symbol;
23924 reloc = BFD_RELOC_32_SECREL;
23925 }
23926 #endif
23927
23928 fix_new_exp (frag, where, size, exp, pcrel, reloc);
23929 }
23930
23931 #if defined (OBJ_COFF)
23932 void
23933 arm_validate_fix (fixS * fixP)
23934 {
23935 /* If the destination of the branch is a defined symbol which does not have
23936 the THUMB_FUNC attribute, then we must be calling a function which has
23937 the (interfacearm) attribute. We look for the Thumb entry point to that
23938 function and change the branch to refer to that function instead. */
23939 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
23940 && fixP->fx_addsy != NULL
23941 && S_IS_DEFINED (fixP->fx_addsy)
23942 && ! THUMB_IS_FUNC (fixP->fx_addsy))
23943 {
23944 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
23945 }
23946 }
23947 #endif
23948
23949
23950 int
23951 arm_force_relocation (struct fix * fixp)
23952 {
23953 #if defined (OBJ_COFF) && defined (TE_PE)
23954 if (fixp->fx_r_type == BFD_RELOC_RVA)
23955 return 1;
23956 #endif
23957
23958 /* In case we have a call or a branch to a function in ARM ISA mode from
23959 a thumb function or vice-versa force the relocation. These relocations
23960 are cleared off for some cores that might have blx and simple transformations
23961 are possible. */
23962
23963 #ifdef OBJ_ELF
23964 switch (fixp->fx_r_type)
23965 {
23966 case BFD_RELOC_ARM_PCREL_JUMP:
23967 case BFD_RELOC_ARM_PCREL_CALL:
23968 case BFD_RELOC_THUMB_PCREL_BLX:
23969 if (THUMB_IS_FUNC (fixp->fx_addsy))
23970 return 1;
23971 break;
23972
23973 case BFD_RELOC_ARM_PCREL_BLX:
23974 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23975 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23976 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23977 if (ARM_IS_FUNC (fixp->fx_addsy))
23978 return 1;
23979 break;
23980
23981 default:
23982 break;
23983 }
23984 #endif
23985
23986 /* Resolve these relocations even if the symbol is extern or weak.
23987 Technically this is probably wrong due to symbol preemption.
23988 In practice these relocations do not have enough range to be useful
23989 at dynamic link time, and some code (e.g. in the Linux kernel)
23990 expects these references to be resolved. */
23991 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
23992 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
23993 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
23994 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
23995 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23996 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
23997 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
23998 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
23999 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
24000 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
24001 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
24002 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
24003 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
24004 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
24005 return 0;
24006
24007 /* Always leave these relocations for the linker. */
24008 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24009 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24010 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24011 return 1;
24012
24013 /* Always generate relocations against function symbols. */
24014 if (fixp->fx_r_type == BFD_RELOC_32
24015 && fixp->fx_addsy
24016 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
24017 return 1;
24018
24019 return generic_force_reloc (fixp);
24020 }
24021
24022 #if defined (OBJ_ELF) || defined (OBJ_COFF)
24023 /* Relocations against function names must be left unadjusted,
24024 so that the linker can use this information to generate interworking
24025 stubs. The MIPS version of this function
24026 also prevents relocations that are mips-16 specific, but I do not
24027 know why it does this.
24028
24029 FIXME:
24030 There is one other problem that ought to be addressed here, but
24031 which currently is not: Taking the address of a label (rather
24032 than a function) and then later jumping to that address. Such
24033 addresses also ought to have their bottom bit set (assuming that
24034 they reside in Thumb code), but at the moment they will not. */
24035
24036 bfd_boolean
24037 arm_fix_adjustable (fixS * fixP)
24038 {
24039 if (fixP->fx_addsy == NULL)
24040 return 1;
24041
24042 /* Preserve relocations against symbols with function type. */
24043 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
24044 return FALSE;
24045
24046 if (THUMB_IS_FUNC (fixP->fx_addsy)
24047 && fixP->fx_subsy == NULL)
24048 return FALSE;
24049
24050 /* We need the symbol name for the VTABLE entries. */
24051 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
24052 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24053 return FALSE;
24054
24055 /* Don't allow symbols to be discarded on GOT related relocs. */
24056 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
24057 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
24058 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
24059 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
24060 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
24061 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
24062 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
24063 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
24064 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
24065 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
24066 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
24067 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
24068 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
24069 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
24070 return FALSE;
24071
24072 /* Similarly for group relocations. */
24073 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24074 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24075 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24076 return FALSE;
24077
24078 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
24079 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
24080 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
24081 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
24082 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
24083 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
24084 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
24085 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
24086 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
24087 return FALSE;
24088
24089 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
24090 offsets, so keep these symbols. */
24091 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
24092 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
24093 return FALSE;
24094
24095 return TRUE;
24096 }
24097 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
24098
24099 #ifdef OBJ_ELF
24100 const char *
24101 elf32_arm_target_format (void)
24102 {
24103 #ifdef TE_SYMBIAN
24104 return (target_big_endian
24105 ? "elf32-bigarm-symbian"
24106 : "elf32-littlearm-symbian");
24107 #elif defined (TE_VXWORKS)
24108 return (target_big_endian
24109 ? "elf32-bigarm-vxworks"
24110 : "elf32-littlearm-vxworks");
24111 #elif defined (TE_NACL)
24112 return (target_big_endian
24113 ? "elf32-bigarm-nacl"
24114 : "elf32-littlearm-nacl");
24115 #else
24116 if (target_big_endian)
24117 return "elf32-bigarm";
24118 else
24119 return "elf32-littlearm";
24120 #endif
24121 }
24122
24123 void
24124 armelf_frob_symbol (symbolS * symp,
24125 int * puntp)
24126 {
24127 elf_frob_symbol (symp, puntp);
24128 }
24129 #endif
24130
24131 /* MD interface: Finalization. */
24132
24133 void
24134 arm_cleanup (void)
24135 {
24136 literal_pool * pool;
24137
24138 /* Ensure that all the IT blocks are properly closed. */
24139 check_it_blocks_finished ();
24140
24141 for (pool = list_of_pools; pool; pool = pool->next)
24142 {
24143 /* Put it at the end of the relevant section. */
24144 subseg_set (pool->section, pool->sub_section);
24145 #ifdef OBJ_ELF
24146 arm_elf_change_section ();
24147 #endif
24148 s_ltorg (0);
24149 }
24150 }
24151
24152 #ifdef OBJ_ELF
24153 /* Remove any excess mapping symbols generated for alignment frags in
24154 SEC. We may have created a mapping symbol before a zero byte
24155 alignment; remove it if there's a mapping symbol after the
24156 alignment. */
24157 static void
24158 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
24159 void *dummy ATTRIBUTE_UNUSED)
24160 {
24161 segment_info_type *seginfo = seg_info (sec);
24162 fragS *fragp;
24163
24164 if (seginfo == NULL || seginfo->frchainP == NULL)
24165 return;
24166
24167 for (fragp = seginfo->frchainP->frch_root;
24168 fragp != NULL;
24169 fragp = fragp->fr_next)
24170 {
24171 symbolS *sym = fragp->tc_frag_data.last_map;
24172 fragS *next = fragp->fr_next;
24173
24174 /* Variable-sized frags have been converted to fixed size by
24175 this point. But if this was variable-sized to start with,
24176 there will be a fixed-size frag after it. So don't handle
24177 next == NULL. */
24178 if (sym == NULL || next == NULL)
24179 continue;
24180
24181 if (S_GET_VALUE (sym) < next->fr_address)
24182 /* Not at the end of this frag. */
24183 continue;
24184 know (S_GET_VALUE (sym) == next->fr_address);
24185
24186 do
24187 {
24188 if (next->tc_frag_data.first_map != NULL)
24189 {
24190 /* Next frag starts with a mapping symbol. Discard this
24191 one. */
24192 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24193 break;
24194 }
24195
24196 if (next->fr_next == NULL)
24197 {
24198 /* This mapping symbol is at the end of the section. Discard
24199 it. */
24200 know (next->fr_fix == 0 && next->fr_var == 0);
24201 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24202 break;
24203 }
24204
24205 /* As long as we have empty frags without any mapping symbols,
24206 keep looking. */
24207 /* If the next frag is non-empty and does not start with a
24208 mapping symbol, then this mapping symbol is required. */
24209 if (next->fr_address != next->fr_next->fr_address)
24210 break;
24211
24212 next = next->fr_next;
24213 }
24214 while (next != NULL);
24215 }
24216 }
24217 #endif
24218
24219 /* Adjust the symbol table. This marks Thumb symbols as distinct from
24220 ARM ones. */
24221
24222 void
24223 arm_adjust_symtab (void)
24224 {
24225 #ifdef OBJ_COFF
24226 symbolS * sym;
24227
24228 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24229 {
24230 if (ARM_IS_THUMB (sym))
24231 {
24232 if (THUMB_IS_FUNC (sym))
24233 {
24234 /* Mark the symbol as a Thumb function. */
24235 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
24236 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
24237 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
24238
24239 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
24240 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
24241 else
24242 as_bad (_("%s: unexpected function type: %d"),
24243 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
24244 }
24245 else switch (S_GET_STORAGE_CLASS (sym))
24246 {
24247 case C_EXT:
24248 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
24249 break;
24250 case C_STAT:
24251 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
24252 break;
24253 case C_LABEL:
24254 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
24255 break;
24256 default:
24257 /* Do nothing. */
24258 break;
24259 }
24260 }
24261
24262 if (ARM_IS_INTERWORK (sym))
24263 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
24264 }
24265 #endif
24266 #ifdef OBJ_ELF
24267 symbolS * sym;
24268 char bind;
24269
24270 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24271 {
24272 if (ARM_IS_THUMB (sym))
24273 {
24274 elf_symbol_type * elf_sym;
24275
24276 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
24277 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
24278
24279 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
24280 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
24281 {
24282 /* If it's a .thumb_func, declare it as so,
24283 otherwise tag label as .code 16. */
24284 if (THUMB_IS_FUNC (sym))
24285 elf_sym->internal_elf_sym.st_target_internal
24286 = ST_BRANCH_TO_THUMB;
24287 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
24288 elf_sym->internal_elf_sym.st_info =
24289 ELF_ST_INFO (bind, STT_ARM_16BIT);
24290 }
24291 }
24292 }
24293
24294 /* Remove any overlapping mapping symbols generated by alignment frags. */
24295 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
24296 /* Now do generic ELF adjustments. */
24297 elf_adjust_symtab ();
24298 #endif
24299 }
24300
24301 /* MD interface: Initialization. */
24302
24303 static void
24304 set_constant_flonums (void)
24305 {
24306 int i;
24307
24308 for (i = 0; i < NUM_FLOAT_VALS; i++)
24309 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
24310 abort ();
24311 }
24312
24313 /* Auto-select Thumb mode if it's the only available instruction set for the
24314 given architecture. */
24315
24316 static void
24317 autoselect_thumb_from_cpu_variant (void)
24318 {
24319 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
24320 opcode_select (16);
24321 }
24322
24323 void
24324 md_begin (void)
24325 {
24326 unsigned mach;
24327 unsigned int i;
24328
24329 if ( (arm_ops_hsh = hash_new ()) == NULL
24330 || (arm_cond_hsh = hash_new ()) == NULL
24331 || (arm_shift_hsh = hash_new ()) == NULL
24332 || (arm_psr_hsh = hash_new ()) == NULL
24333 || (arm_v7m_psr_hsh = hash_new ()) == NULL
24334 || (arm_reg_hsh = hash_new ()) == NULL
24335 || (arm_reloc_hsh = hash_new ()) == NULL
24336 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
24337 as_fatal (_("virtual memory exhausted"));
24338
24339 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
24340 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
24341 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
24342 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
24343 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
24344 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
24345 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
24346 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
24347 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
24348 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
24349 (void *) (v7m_psrs + i));
24350 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
24351 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
24352 for (i = 0;
24353 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
24354 i++)
24355 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
24356 (void *) (barrier_opt_names + i));
24357 #ifdef OBJ_ELF
24358 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
24359 {
24360 struct reloc_entry * entry = reloc_names + i;
24361
24362 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
24363 /* This makes encode_branch() use the EABI versions of this relocation. */
24364 entry->reloc = BFD_RELOC_UNUSED;
24365
24366 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
24367 }
24368 #endif
24369
24370 set_constant_flonums ();
24371
24372 /* Set the cpu variant based on the command-line options. We prefer
24373 -mcpu= over -march= if both are set (as for GCC); and we prefer
24374 -mfpu= over any other way of setting the floating point unit.
24375 Use of legacy options with new options are faulted. */
24376 if (legacy_cpu)
24377 {
24378 if (mcpu_cpu_opt || march_cpu_opt)
24379 as_bad (_("use of old and new-style options to set CPU type"));
24380
24381 mcpu_cpu_opt = legacy_cpu;
24382 }
24383 else if (!mcpu_cpu_opt)
24384 mcpu_cpu_opt = march_cpu_opt;
24385
24386 if (legacy_fpu)
24387 {
24388 if (mfpu_opt)
24389 as_bad (_("use of old and new-style options to set FPU type"));
24390
24391 mfpu_opt = legacy_fpu;
24392 }
24393 else if (!mfpu_opt)
24394 {
24395 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
24396 || defined (TE_NetBSD) || defined (TE_VXWORKS))
24397 /* Some environments specify a default FPU. If they don't, infer it
24398 from the processor. */
24399 if (mcpu_fpu_opt)
24400 mfpu_opt = mcpu_fpu_opt;
24401 else
24402 mfpu_opt = march_fpu_opt;
24403 #else
24404 mfpu_opt = &fpu_default;
24405 #endif
24406 }
24407
24408 if (!mfpu_opt)
24409 {
24410 if (mcpu_cpu_opt != NULL)
24411 mfpu_opt = &fpu_default;
24412 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
24413 mfpu_opt = &fpu_arch_vfp_v2;
24414 else
24415 mfpu_opt = &fpu_arch_fpa;
24416 }
24417
24418 #ifdef CPU_DEFAULT
24419 if (!mcpu_cpu_opt)
24420 {
24421 mcpu_cpu_opt = &cpu_default;
24422 selected_cpu = cpu_default;
24423 }
24424 else if (no_cpu_selected ())
24425 selected_cpu = cpu_default;
24426 #else
24427 if (mcpu_cpu_opt)
24428 selected_cpu = *mcpu_cpu_opt;
24429 else
24430 mcpu_cpu_opt = &arm_arch_any;
24431 #endif
24432
24433 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24434
24435 autoselect_thumb_from_cpu_variant ();
24436
24437 arm_arch_used = thumb_arch_used = arm_arch_none;
24438
24439 #if defined OBJ_COFF || defined OBJ_ELF
24440 {
24441 unsigned int flags = 0;
24442
24443 #if defined OBJ_ELF
24444 flags = meabi_flags;
24445
24446 switch (meabi_flags)
24447 {
24448 case EF_ARM_EABI_UNKNOWN:
24449 #endif
24450 /* Set the flags in the private structure. */
24451 if (uses_apcs_26) flags |= F_APCS26;
24452 if (support_interwork) flags |= F_INTERWORK;
24453 if (uses_apcs_float) flags |= F_APCS_FLOAT;
24454 if (pic_code) flags |= F_PIC;
24455 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
24456 flags |= F_SOFT_FLOAT;
24457
24458 switch (mfloat_abi_opt)
24459 {
24460 case ARM_FLOAT_ABI_SOFT:
24461 case ARM_FLOAT_ABI_SOFTFP:
24462 flags |= F_SOFT_FLOAT;
24463 break;
24464
24465 case ARM_FLOAT_ABI_HARD:
24466 if (flags & F_SOFT_FLOAT)
24467 as_bad (_("hard-float conflicts with specified fpu"));
24468 break;
24469 }
24470
24471 /* Using pure-endian doubles (even if soft-float). */
24472 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
24473 flags |= F_VFP_FLOAT;
24474
24475 #if defined OBJ_ELF
24476 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
24477 flags |= EF_ARM_MAVERICK_FLOAT;
24478 break;
24479
24480 case EF_ARM_EABI_VER4:
24481 case EF_ARM_EABI_VER5:
24482 /* No additional flags to set. */
24483 break;
24484
24485 default:
24486 abort ();
24487 }
24488 #endif
24489 bfd_set_private_flags (stdoutput, flags);
24490
24491 /* We have run out flags in the COFF header to encode the
24492 status of ATPCS support, so instead we create a dummy,
24493 empty, debug section called .arm.atpcs. */
24494 if (atpcs)
24495 {
24496 asection * sec;
24497
24498 sec = bfd_make_section (stdoutput, ".arm.atpcs");
24499
24500 if (sec != NULL)
24501 {
24502 bfd_set_section_flags
24503 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
24504 bfd_set_section_size (stdoutput, sec, 0);
24505 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
24506 }
24507 }
24508 }
24509 #endif
24510
24511 /* Record the CPU type as well. */
24512 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
24513 mach = bfd_mach_arm_iWMMXt2;
24514 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
24515 mach = bfd_mach_arm_iWMMXt;
24516 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
24517 mach = bfd_mach_arm_XScale;
24518 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
24519 mach = bfd_mach_arm_ep9312;
24520 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
24521 mach = bfd_mach_arm_5TE;
24522 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
24523 {
24524 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
24525 mach = bfd_mach_arm_5T;
24526 else
24527 mach = bfd_mach_arm_5;
24528 }
24529 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
24530 {
24531 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
24532 mach = bfd_mach_arm_4T;
24533 else
24534 mach = bfd_mach_arm_4;
24535 }
24536 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
24537 mach = bfd_mach_arm_3M;
24538 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
24539 mach = bfd_mach_arm_3;
24540 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
24541 mach = bfd_mach_arm_2a;
24542 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
24543 mach = bfd_mach_arm_2;
24544 else
24545 mach = bfd_mach_arm_unknown;
24546
24547 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
24548 }
24549
24550 /* Command line processing. */
24551
24552 /* md_parse_option
24553 Invocation line includes a switch not recognized by the base assembler.
24554 See if it's a processor-specific option.
24555
24556 This routine is somewhat complicated by the need for backwards
24557 compatibility (since older releases of gcc can't be changed).
24558 The new options try to make the interface as compatible as
24559 possible with GCC.
24560
24561 New options (supported) are:
24562
24563 -mcpu=<cpu name> Assemble for selected processor
24564 -march=<architecture name> Assemble for selected architecture
24565 -mfpu=<fpu architecture> Assemble for selected FPU.
24566 -EB/-mbig-endian Big-endian
24567 -EL/-mlittle-endian Little-endian
24568 -k Generate PIC code
24569 -mthumb Start in Thumb mode
24570 -mthumb-interwork Code supports ARM/Thumb interworking
24571
24572 -m[no-]warn-deprecated Warn about deprecated features
24573 -m[no-]warn-syms Warn when symbols match instructions
24574
24575 For now we will also provide support for:
24576
24577 -mapcs-32 32-bit Program counter
24578 -mapcs-26 26-bit Program counter
24579 -macps-float Floats passed in FP registers
24580 -mapcs-reentrant Reentrant code
24581 -matpcs
24582 (sometime these will probably be replaced with -mapcs=<list of options>
24583 and -matpcs=<list of options>)
24584
24585 The remaining options are only supported for back-wards compatibility.
24586 Cpu variants, the arm part is optional:
24587 -m[arm]1 Currently not supported.
24588 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
24589 -m[arm]3 Arm 3 processor
24590 -m[arm]6[xx], Arm 6 processors
24591 -m[arm]7[xx][t][[d]m] Arm 7 processors
24592 -m[arm]8[10] Arm 8 processors
24593 -m[arm]9[20][tdmi] Arm 9 processors
24594 -mstrongarm[110[0]] StrongARM processors
24595 -mxscale XScale processors
24596 -m[arm]v[2345[t[e]]] Arm architectures
24597 -mall All (except the ARM1)
24598 FP variants:
24599 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
24600 -mfpe-old (No float load/store multiples)
24601 -mvfpxd VFP Single precision
24602 -mvfp All VFP
24603 -mno-fpu Disable all floating point instructions
24604
24605 The following CPU names are recognized:
24606 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
24607 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
24608 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
24609 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
24610 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
24611 arm10t arm10e, arm1020t, arm1020e, arm10200e,
24612 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
24613
24614 */
24615
24616 const char * md_shortopts = "m:k";
24617
24618 #ifdef ARM_BI_ENDIAN
24619 #define OPTION_EB (OPTION_MD_BASE + 0)
24620 #define OPTION_EL (OPTION_MD_BASE + 1)
24621 #else
24622 #if TARGET_BYTES_BIG_ENDIAN
24623 #define OPTION_EB (OPTION_MD_BASE + 0)
24624 #else
24625 #define OPTION_EL (OPTION_MD_BASE + 1)
24626 #endif
24627 #endif
24628 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
24629
24630 struct option md_longopts[] =
24631 {
24632 #ifdef OPTION_EB
24633 {"EB", no_argument, NULL, OPTION_EB},
24634 #endif
24635 #ifdef OPTION_EL
24636 {"EL", no_argument, NULL, OPTION_EL},
24637 #endif
24638 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
24639 {NULL, no_argument, NULL, 0}
24640 };
24641
24642
24643 size_t md_longopts_size = sizeof (md_longopts);
24644
24645 struct arm_option_table
24646 {
24647 char *option; /* Option name to match. */
24648 char *help; /* Help information. */
24649 int *var; /* Variable to change. */
24650 int value; /* What to change it to. */
24651 char *deprecated; /* If non-null, print this message. */
24652 };
24653
24654 struct arm_option_table arm_opts[] =
24655 {
24656 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
24657 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
24658 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
24659 &support_interwork, 1, NULL},
24660 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
24661 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
24662 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
24663 1, NULL},
24664 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
24665 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
24666 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
24667 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
24668 NULL},
24669
24670 /* These are recognized by the assembler, but have no affect on code. */
24671 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
24672 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
24673
24674 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
24675 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
24676 &warn_on_deprecated, 0, NULL},
24677 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
24678 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
24679 {NULL, NULL, NULL, 0, NULL}
24680 };
24681
24682 struct arm_legacy_option_table
24683 {
24684 char *option; /* Option name to match. */
24685 const arm_feature_set **var; /* Variable to change. */
24686 const arm_feature_set value; /* What to change it to. */
24687 char *deprecated; /* If non-null, print this message. */
24688 };
24689
24690 const struct arm_legacy_option_table arm_legacy_opts[] =
24691 {
24692 /* DON'T add any new processors to this list -- we want the whole list
24693 to go away... Add them to the processors table instead. */
24694 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
24695 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
24696 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
24697 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
24698 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
24699 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
24700 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
24701 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
24702 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
24703 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
24704 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
24705 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
24706 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
24707 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
24708 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
24709 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
24710 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
24711 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
24712 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
24713 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
24714 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
24715 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
24716 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
24717 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
24718 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
24719 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
24720 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
24721 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
24722 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
24723 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
24724 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
24725 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
24726 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
24727 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
24728 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
24729 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
24730 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
24731 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
24732 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
24733 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
24734 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
24735 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
24736 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
24737 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
24738 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
24739 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
24740 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24741 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24742 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24743 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24744 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
24745 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
24746 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
24747 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
24748 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
24749 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
24750 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
24751 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
24752 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
24753 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
24754 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
24755 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
24756 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
24757 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
24758 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
24759 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
24760 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
24761 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
24762 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
24763 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
24764 N_("use -mcpu=strongarm110")},
24765 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
24766 N_("use -mcpu=strongarm1100")},
24767 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
24768 N_("use -mcpu=strongarm1110")},
24769 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
24770 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
24771 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
24772
24773 /* Architecture variants -- don't add any more to this list either. */
24774 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
24775 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
24776 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
24777 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
24778 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
24779 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
24780 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
24781 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
24782 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
24783 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
24784 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
24785 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
24786 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
24787 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
24788 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
24789 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
24790 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
24791 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
24792
24793 /* Floating point variants -- don't add any more to this list either. */
24794 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
24795 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
24796 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
24797 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
24798 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
24799
24800 {NULL, NULL, ARM_ARCH_NONE, NULL}
24801 };
24802
24803 struct arm_cpu_option_table
24804 {
24805 char *name;
24806 size_t name_len;
24807 const arm_feature_set value;
24808 /* For some CPUs we assume an FPU unless the user explicitly sets
24809 -mfpu=... */
24810 const arm_feature_set default_fpu;
24811 /* The canonical name of the CPU, or NULL to use NAME converted to upper
24812 case. */
24813 const char *canonical_name;
24814 };
24815
24816 /* This list should, at a minimum, contain all the cpu names
24817 recognized by GCC. */
24818 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
24819 static const struct arm_cpu_option_table arm_cpus[] =
24820 {
24821 ARM_CPU_OPT ("all", ARM_ANY, FPU_ARCH_FPA, NULL),
24822 ARM_CPU_OPT ("arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL),
24823 ARM_CPU_OPT ("arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL),
24824 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
24825 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
24826 ARM_CPU_OPT ("arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24827 ARM_CPU_OPT ("arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24828 ARM_CPU_OPT ("arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24829 ARM_CPU_OPT ("arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24830 ARM_CPU_OPT ("arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24831 ARM_CPU_OPT ("arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24832 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24833 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24834 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24835 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24836 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24837 ARM_CPU_OPT ("arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24838 ARM_CPU_OPT ("arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24839 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24840 ARM_CPU_OPT ("arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24841 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24842 ARM_CPU_OPT ("arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24843 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24844 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24845 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24846 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24847 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24848 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24849 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24850 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24851 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24852 ARM_CPU_OPT ("arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24853 ARM_CPU_OPT ("arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24854 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24855 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24856 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24857 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24858 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24859 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24860 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"),
24861 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24862 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24863 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24864 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24865 ARM_CPU_OPT ("fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24866 ARM_CPU_OPT ("fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24867 /* For V5 or later processors we default to using VFP; but the user
24868 should really set the FPU type explicitly. */
24869 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24870 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24871 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
24872 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
24873 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
24874 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24875 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"),
24876 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24877 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24878 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"),
24879 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24880 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24881 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24882 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24883 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24884 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"),
24885 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24886 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24887 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24888 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2,
24889 "ARM1026EJ-S"),
24890 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
24891 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24892 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24893 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24894 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24895 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24896 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"),
24897 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL),
24898 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2,
24899 "ARM1136JF-S"),
24900 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL),
24901 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, "MPCore"),
24902 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, "MPCore"),
24903 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL),
24904 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL),
24905 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6KZ, FPU_NONE, NULL),
24906 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6KZ, FPU_ARCH_VFP_V2, NULL),
24907 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC,
24908 FPU_NONE, "Cortex-A5"),
24909 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24910 "Cortex-A7"),
24911 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC,
24912 ARM_FEATURE_COPROC (FPU_VFP_V3
24913 | FPU_NEON_EXT_V1),
24914 "Cortex-A8"),
24915 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC,
24916 ARM_FEATURE_COPROC (FPU_VFP_V3
24917 | FPU_NEON_EXT_V1),
24918 "Cortex-A9"),
24919 ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24920 "Cortex-A12"),
24921 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24922 "Cortex-A15"),
24923 ARM_CPU_OPT ("cortex-a17", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24924 "Cortex-A17"),
24925 ARM_CPU_OPT ("cortex-a32", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24926 "Cortex-A32"),
24927 ARM_CPU_OPT ("cortex-a35", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24928 "Cortex-A35"),
24929 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24930 "Cortex-A53"),
24931 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24932 "Cortex-A57"),
24933 ARM_CPU_OPT ("cortex-a72", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24934 "Cortex-A72"),
24935 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R, FPU_NONE, "Cortex-R4"),
24936 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R, FPU_ARCH_VFP_V3D16,
24937 "Cortex-R4F"),
24938 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV,
24939 FPU_NONE, "Cortex-R5"),
24940 ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV,
24941 FPU_ARCH_VFP_V3D16,
24942 "Cortex-R7"),
24943 ARM_CPU_OPT ("cortex-m7", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M7"),
24944 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M4"),
24945 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M, FPU_NONE, "Cortex-M3"),
24946 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M1"),
24947 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0"),
24948 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0+"),
24949 ARM_CPU_OPT ("exynos-m1", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24950 "Samsung " \
24951 "Exynos M1"),
24952 ARM_CPU_OPT ("qdf24xx", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24953 "Qualcomm "
24954 "QDF24XX"),
24955
24956 /* ??? XSCALE is really an architecture. */
24957 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
24958 /* ??? iwmmxt is not a processor. */
24959 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
24960 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
24961 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
24962 /* Maverick */
24963 ARM_CPU_OPT ("ep9312", ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
24964 FPU_ARCH_MAVERICK, "ARM920T"),
24965 /* Marvell processors. */
24966 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP
24967 | ARM_EXT_SEC,
24968 ARM_EXT2_V6T2_V8M),
24969 FPU_ARCH_VFP_V3D16, NULL),
24970 ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP
24971 | ARM_EXT_SEC,
24972 ARM_EXT2_V6T2_V8M),
24973 FPU_ARCH_NEON_VFP_V4, NULL),
24974 /* APM X-Gene family. */
24975 ARM_CPU_OPT ("xgene1", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24976 "APM X-Gene 1"),
24977 ARM_CPU_OPT ("xgene2", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24978 "APM X-Gene 2"),
24979
24980 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
24981 };
24982 #undef ARM_CPU_OPT
24983
24984 struct arm_arch_option_table
24985 {
24986 char *name;
24987 size_t name_len;
24988 const arm_feature_set value;
24989 const arm_feature_set default_fpu;
24990 };
24991
24992 /* This list should, at a minimum, contain all the architecture names
24993 recognized by GCC. */
24994 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
24995 static const struct arm_arch_option_table arm_archs[] =
24996 {
24997 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
24998 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
24999 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
25000 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
25001 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
25002 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
25003 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
25004 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
25005 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
25006 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
25007 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
25008 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
25009 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
25010 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
25011 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
25012 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
25013 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
25014 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
25015 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
25016 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
25017 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
25018 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
25019 kept to preserve existing behaviour. */
25020 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
25021 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
25022 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
25023 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
25024 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
25025 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
25026 kept to preserve existing behaviour. */
25027 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
25028 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
25029 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
25030 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
25031 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
25032 /* The official spelling of the ARMv7 profile variants is the dashed form.
25033 Accept the non-dashed form for compatibility with old toolchains. */
25034 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
25035 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP),
25036 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
25037 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
25038 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
25039 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
25040 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
25041 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
25042 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
25043 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP),
25044 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
25045 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP),
25046 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP),
25047 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
25048 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
25049 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
25050 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
25051 };
25052 #undef ARM_ARCH_OPT
25053
25054 /* ISA extensions in the co-processor and main instruction set space. */
25055 struct arm_option_extension_value_table
25056 {
25057 char *name;
25058 size_t name_len;
25059 const arm_feature_set merge_value;
25060 const arm_feature_set clear_value;
25061 const arm_feature_set allowed_archs;
25062 };
25063
25064 /* The following table must be in alphabetical order with a NULL last entry.
25065 */
25066 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, AA }
25067 static const struct arm_option_extension_value_table arm_extensions[] =
25068 {
25069 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25070 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25071 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25072 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
25073 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25074 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
25075 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25076 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
25077 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
25078 ARM_ARCH_V8_2A),
25079 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
25080 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
25081 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A | ARM_EXT_V7R)),
25082 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
25083 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ANY),
25084 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
25085 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ANY),
25086 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
25087 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ANY),
25088 ARM_EXT_OPT ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
25089 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
25090 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A | ARM_EXT_V7R)),
25091 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
25092 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
25093 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25094 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
25095 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
25096 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
25097 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
25098 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
25099 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25100 ARM_EXT_OPT ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25101 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25102 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K | ARM_EXT_V7A)),
25103 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
25104 | ARM_EXT_DIV),
25105 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
25106 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
25107 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8,
25108 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
25109 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25110 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
25111 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ANY),
25112 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE }
25113 };
25114 #undef ARM_EXT_OPT
25115
25116 /* ISA floating-point and Advanced SIMD extensions. */
25117 struct arm_option_fpu_value_table
25118 {
25119 char *name;
25120 const arm_feature_set value;
25121 };
25122
25123 /* This list should, at a minimum, contain all the fpu names
25124 recognized by GCC. */
25125 static const struct arm_option_fpu_value_table arm_fpus[] =
25126 {
25127 {"softfpa", FPU_NONE},
25128 {"fpe", FPU_ARCH_FPE},
25129 {"fpe2", FPU_ARCH_FPE},
25130 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
25131 {"fpa", FPU_ARCH_FPA},
25132 {"fpa10", FPU_ARCH_FPA},
25133 {"fpa11", FPU_ARCH_FPA},
25134 {"arm7500fe", FPU_ARCH_FPA},
25135 {"softvfp", FPU_ARCH_VFP},
25136 {"softvfp+vfp", FPU_ARCH_VFP_V2},
25137 {"vfp", FPU_ARCH_VFP_V2},
25138 {"vfp9", FPU_ARCH_VFP_V2},
25139 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
25140 {"vfp10", FPU_ARCH_VFP_V2},
25141 {"vfp10-r0", FPU_ARCH_VFP_V1},
25142 {"vfpxd", FPU_ARCH_VFP_V1xD},
25143 {"vfpv2", FPU_ARCH_VFP_V2},
25144 {"vfpv3", FPU_ARCH_VFP_V3},
25145 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
25146 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
25147 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
25148 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
25149 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
25150 {"arm1020t", FPU_ARCH_VFP_V1},
25151 {"arm1020e", FPU_ARCH_VFP_V2},
25152 {"arm1136jfs", FPU_ARCH_VFP_V2},
25153 {"arm1136jf-s", FPU_ARCH_VFP_V2},
25154 {"maverick", FPU_ARCH_MAVERICK},
25155 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
25156 {"neon-fp16", FPU_ARCH_NEON_FP16},
25157 {"vfpv4", FPU_ARCH_VFP_V4},
25158 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
25159 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
25160 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
25161 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
25162 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
25163 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
25164 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
25165 {"crypto-neon-fp-armv8",
25166 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
25167 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
25168 {"crypto-neon-fp-armv8.1",
25169 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
25170 {NULL, ARM_ARCH_NONE}
25171 };
25172
25173 struct arm_option_value_table
25174 {
25175 char *name;
25176 long value;
25177 };
25178
25179 static const struct arm_option_value_table arm_float_abis[] =
25180 {
25181 {"hard", ARM_FLOAT_ABI_HARD},
25182 {"softfp", ARM_FLOAT_ABI_SOFTFP},
25183 {"soft", ARM_FLOAT_ABI_SOFT},
25184 {NULL, 0}
25185 };
25186
25187 #ifdef OBJ_ELF
25188 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
25189 static const struct arm_option_value_table arm_eabis[] =
25190 {
25191 {"gnu", EF_ARM_EABI_UNKNOWN},
25192 {"4", EF_ARM_EABI_VER4},
25193 {"5", EF_ARM_EABI_VER5},
25194 {NULL, 0}
25195 };
25196 #endif
25197
25198 struct arm_long_option_table
25199 {
25200 char * option; /* Substring to match. */
25201 char * help; /* Help information. */
25202 int (* func) (char * subopt); /* Function to decode sub-option. */
25203 char * deprecated; /* If non-null, print this message. */
25204 };
25205
25206 static bfd_boolean
25207 arm_parse_extension (char *str, const arm_feature_set **opt_p)
25208 {
25209 arm_feature_set *ext_set = (arm_feature_set *)
25210 xmalloc (sizeof (arm_feature_set));
25211
25212 /* We insist on extensions being specified in alphabetical order, and with
25213 extensions being added before being removed. We achieve this by having
25214 the global ARM_EXTENSIONS table in alphabetical order, and using the
25215 ADDING_VALUE variable to indicate whether we are adding an extension (1)
25216 or removing it (0) and only allowing it to change in the order
25217 -1 -> 1 -> 0. */
25218 const struct arm_option_extension_value_table * opt = NULL;
25219 int adding_value = -1;
25220
25221 /* Copy the feature set, so that we can modify it. */
25222 *ext_set = **opt_p;
25223 *opt_p = ext_set;
25224
25225 while (str != NULL && *str != 0)
25226 {
25227 char *ext;
25228 size_t len;
25229
25230 if (*str != '+')
25231 {
25232 as_bad (_("invalid architectural extension"));
25233 return FALSE;
25234 }
25235
25236 str++;
25237 ext = strchr (str, '+');
25238
25239 if (ext != NULL)
25240 len = ext - str;
25241 else
25242 len = strlen (str);
25243
25244 if (len >= 2 && strncmp (str, "no", 2) == 0)
25245 {
25246 if (adding_value != 0)
25247 {
25248 adding_value = 0;
25249 opt = arm_extensions;
25250 }
25251
25252 len -= 2;
25253 str += 2;
25254 }
25255 else if (len > 0)
25256 {
25257 if (adding_value == -1)
25258 {
25259 adding_value = 1;
25260 opt = arm_extensions;
25261 }
25262 else if (adding_value != 1)
25263 {
25264 as_bad (_("must specify extensions to add before specifying "
25265 "those to remove"));
25266 return FALSE;
25267 }
25268 }
25269
25270 if (len == 0)
25271 {
25272 as_bad (_("missing architectural extension"));
25273 return FALSE;
25274 }
25275
25276 gas_assert (adding_value != -1);
25277 gas_assert (opt != NULL);
25278
25279 /* Scan over the options table trying to find an exact match. */
25280 for (; opt->name != NULL; opt++)
25281 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25282 {
25283 /* Check we can apply the extension to this architecture. */
25284 if (!ARM_CPU_HAS_FEATURE (*ext_set, opt->allowed_archs))
25285 {
25286 as_bad (_("extension does not apply to the base architecture"));
25287 return FALSE;
25288 }
25289
25290 /* Add or remove the extension. */
25291 if (adding_value)
25292 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
25293 else
25294 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
25295
25296 break;
25297 }
25298
25299 if (opt->name == NULL)
25300 {
25301 /* Did we fail to find an extension because it wasn't specified in
25302 alphabetical order, or because it does not exist? */
25303
25304 for (opt = arm_extensions; opt->name != NULL; opt++)
25305 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25306 break;
25307
25308 if (opt->name == NULL)
25309 as_bad (_("unknown architectural extension `%s'"), str);
25310 else
25311 as_bad (_("architectural extensions must be specified in "
25312 "alphabetical order"));
25313
25314 return FALSE;
25315 }
25316 else
25317 {
25318 /* We should skip the extension we've just matched the next time
25319 round. */
25320 opt++;
25321 }
25322
25323 str = ext;
25324 };
25325
25326 return TRUE;
25327 }
25328
25329 static bfd_boolean
25330 arm_parse_cpu (char *str)
25331 {
25332 const struct arm_cpu_option_table *opt;
25333 char *ext = strchr (str, '+');
25334 size_t len;
25335
25336 if (ext != NULL)
25337 len = ext - str;
25338 else
25339 len = strlen (str);
25340
25341 if (len == 0)
25342 {
25343 as_bad (_("missing cpu name `%s'"), str);
25344 return FALSE;
25345 }
25346
25347 for (opt = arm_cpus; opt->name != NULL; opt++)
25348 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25349 {
25350 mcpu_cpu_opt = &opt->value;
25351 mcpu_fpu_opt = &opt->default_fpu;
25352 if (opt->canonical_name)
25353 {
25354 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
25355 strcpy (selected_cpu_name, opt->canonical_name);
25356 }
25357 else
25358 {
25359 size_t i;
25360
25361 if (len >= sizeof selected_cpu_name)
25362 len = (sizeof selected_cpu_name) - 1;
25363
25364 for (i = 0; i < len; i++)
25365 selected_cpu_name[i] = TOUPPER (opt->name[i]);
25366 selected_cpu_name[i] = 0;
25367 }
25368
25369 if (ext != NULL)
25370 return arm_parse_extension (ext, &mcpu_cpu_opt);
25371
25372 return TRUE;
25373 }
25374
25375 as_bad (_("unknown cpu `%s'"), str);
25376 return FALSE;
25377 }
25378
25379 static bfd_boolean
25380 arm_parse_arch (char *str)
25381 {
25382 const struct arm_arch_option_table *opt;
25383 char *ext = strchr (str, '+');
25384 size_t len;
25385
25386 if (ext != NULL)
25387 len = ext - str;
25388 else
25389 len = strlen (str);
25390
25391 if (len == 0)
25392 {
25393 as_bad (_("missing architecture name `%s'"), str);
25394 return FALSE;
25395 }
25396
25397 for (opt = arm_archs; opt->name != NULL; opt++)
25398 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25399 {
25400 march_cpu_opt = &opt->value;
25401 march_fpu_opt = &opt->default_fpu;
25402 strcpy (selected_cpu_name, opt->name);
25403
25404 if (ext != NULL)
25405 return arm_parse_extension (ext, &march_cpu_opt);
25406
25407 return TRUE;
25408 }
25409
25410 as_bad (_("unknown architecture `%s'\n"), str);
25411 return FALSE;
25412 }
25413
25414 static bfd_boolean
25415 arm_parse_fpu (char * str)
25416 {
25417 const struct arm_option_fpu_value_table * opt;
25418
25419 for (opt = arm_fpus; opt->name != NULL; opt++)
25420 if (streq (opt->name, str))
25421 {
25422 mfpu_opt = &opt->value;
25423 return TRUE;
25424 }
25425
25426 as_bad (_("unknown floating point format `%s'\n"), str);
25427 return FALSE;
25428 }
25429
25430 static bfd_boolean
25431 arm_parse_float_abi (char * str)
25432 {
25433 const struct arm_option_value_table * opt;
25434
25435 for (opt = arm_float_abis; opt->name != NULL; opt++)
25436 if (streq (opt->name, str))
25437 {
25438 mfloat_abi_opt = opt->value;
25439 return TRUE;
25440 }
25441
25442 as_bad (_("unknown floating point abi `%s'\n"), str);
25443 return FALSE;
25444 }
25445
25446 #ifdef OBJ_ELF
25447 static bfd_boolean
25448 arm_parse_eabi (char * str)
25449 {
25450 const struct arm_option_value_table *opt;
25451
25452 for (opt = arm_eabis; opt->name != NULL; opt++)
25453 if (streq (opt->name, str))
25454 {
25455 meabi_flags = opt->value;
25456 return TRUE;
25457 }
25458 as_bad (_("unknown EABI `%s'\n"), str);
25459 return FALSE;
25460 }
25461 #endif
25462
25463 static bfd_boolean
25464 arm_parse_it_mode (char * str)
25465 {
25466 bfd_boolean ret = TRUE;
25467
25468 if (streq ("arm", str))
25469 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
25470 else if (streq ("thumb", str))
25471 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
25472 else if (streq ("always", str))
25473 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
25474 else if (streq ("never", str))
25475 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
25476 else
25477 {
25478 as_bad (_("unknown implicit IT mode `%s', should be "\
25479 "arm, thumb, always, or never."), str);
25480 ret = FALSE;
25481 }
25482
25483 return ret;
25484 }
25485
25486 static bfd_boolean
25487 arm_ccs_mode (char * unused ATTRIBUTE_UNUSED)
25488 {
25489 codecomposer_syntax = TRUE;
25490 arm_comment_chars[0] = ';';
25491 arm_line_separator_chars[0] = 0;
25492 return TRUE;
25493 }
25494
25495 struct arm_long_option_table arm_long_opts[] =
25496 {
25497 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
25498 arm_parse_cpu, NULL},
25499 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
25500 arm_parse_arch, NULL},
25501 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
25502 arm_parse_fpu, NULL},
25503 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
25504 arm_parse_float_abi, NULL},
25505 #ifdef OBJ_ELF
25506 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
25507 arm_parse_eabi, NULL},
25508 #endif
25509 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
25510 arm_parse_it_mode, NULL},
25511 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
25512 arm_ccs_mode, NULL},
25513 {NULL, NULL, 0, NULL}
25514 };
25515
25516 int
25517 md_parse_option (int c, char * arg)
25518 {
25519 struct arm_option_table *opt;
25520 const struct arm_legacy_option_table *fopt;
25521 struct arm_long_option_table *lopt;
25522
25523 switch (c)
25524 {
25525 #ifdef OPTION_EB
25526 case OPTION_EB:
25527 target_big_endian = 1;
25528 break;
25529 #endif
25530
25531 #ifdef OPTION_EL
25532 case OPTION_EL:
25533 target_big_endian = 0;
25534 break;
25535 #endif
25536
25537 case OPTION_FIX_V4BX:
25538 fix_v4bx = TRUE;
25539 break;
25540
25541 case 'a':
25542 /* Listing option. Just ignore these, we don't support additional
25543 ones. */
25544 return 0;
25545
25546 default:
25547 for (opt = arm_opts; opt->option != NULL; opt++)
25548 {
25549 if (c == opt->option[0]
25550 && ((arg == NULL && opt->option[1] == 0)
25551 || streq (arg, opt->option + 1)))
25552 {
25553 /* If the option is deprecated, tell the user. */
25554 if (warn_on_deprecated && opt->deprecated != NULL)
25555 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
25556 arg ? arg : "", _(opt->deprecated));
25557
25558 if (opt->var != NULL)
25559 *opt->var = opt->value;
25560
25561 return 1;
25562 }
25563 }
25564
25565 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
25566 {
25567 if (c == fopt->option[0]
25568 && ((arg == NULL && fopt->option[1] == 0)
25569 || streq (arg, fopt->option + 1)))
25570 {
25571 /* If the option is deprecated, tell the user. */
25572 if (warn_on_deprecated && fopt->deprecated != NULL)
25573 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
25574 arg ? arg : "", _(fopt->deprecated));
25575
25576 if (fopt->var != NULL)
25577 *fopt->var = &fopt->value;
25578
25579 return 1;
25580 }
25581 }
25582
25583 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
25584 {
25585 /* These options are expected to have an argument. */
25586 if (c == lopt->option[0]
25587 && arg != NULL
25588 && strncmp (arg, lopt->option + 1,
25589 strlen (lopt->option + 1)) == 0)
25590 {
25591 /* If the option is deprecated, tell the user. */
25592 if (warn_on_deprecated && lopt->deprecated != NULL)
25593 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
25594 _(lopt->deprecated));
25595
25596 /* Call the sup-option parser. */
25597 return lopt->func (arg + strlen (lopt->option) - 1);
25598 }
25599 }
25600
25601 return 0;
25602 }
25603
25604 return 1;
25605 }
25606
25607 void
25608 md_show_usage (FILE * fp)
25609 {
25610 struct arm_option_table *opt;
25611 struct arm_long_option_table *lopt;
25612
25613 fprintf (fp, _(" ARM-specific assembler options:\n"));
25614
25615 for (opt = arm_opts; opt->option != NULL; opt++)
25616 if (opt->help != NULL)
25617 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
25618
25619 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
25620 if (lopt->help != NULL)
25621 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
25622
25623 #ifdef OPTION_EB
25624 fprintf (fp, _("\
25625 -EB assemble code for a big-endian cpu\n"));
25626 #endif
25627
25628 #ifdef OPTION_EL
25629 fprintf (fp, _("\
25630 -EL assemble code for a little-endian cpu\n"));
25631 #endif
25632
25633 fprintf (fp, _("\
25634 --fix-v4bx Allow BX in ARMv4 code\n"));
25635 }
25636
25637
25638 #ifdef OBJ_ELF
25639 typedef struct
25640 {
25641 int val;
25642 arm_feature_set flags;
25643 } cpu_arch_ver_table;
25644
25645 /* Mapping from CPU features to EABI CPU arch values. As a general rule, table
25646 must be sorted least features first but some reordering is needed, eg. for
25647 Thumb-2 instructions to be detected as coming from ARMv6T2. */
25648 static const cpu_arch_ver_table cpu_arch_ver[] =
25649 {
25650 {1, ARM_ARCH_V4},
25651 {2, ARM_ARCH_V4T},
25652 {3, ARM_ARCH_V5},
25653 {3, ARM_ARCH_V5T},
25654 {4, ARM_ARCH_V5TE},
25655 {5, ARM_ARCH_V5TEJ},
25656 {6, ARM_ARCH_V6},
25657 {9, ARM_ARCH_V6K},
25658 {7, ARM_ARCH_V6Z},
25659 {11, ARM_ARCH_V6M},
25660 {12, ARM_ARCH_V6SM},
25661 {8, ARM_ARCH_V6T2},
25662 {10, ARM_ARCH_V7VE},
25663 {10, ARM_ARCH_V7R},
25664 {10, ARM_ARCH_V7M},
25665 {14, ARM_ARCH_V8A},
25666 {16, ARM_ARCH_V8M_BASE},
25667 {17, ARM_ARCH_V8M_MAIN},
25668 {0, ARM_ARCH_NONE}
25669 };
25670
25671 /* Set an attribute if it has not already been set by the user. */
25672 static void
25673 aeabi_set_attribute_int (int tag, int value)
25674 {
25675 if (tag < 1
25676 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
25677 || !attributes_set_explicitly[tag])
25678 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
25679 }
25680
25681 static void
25682 aeabi_set_attribute_string (int tag, const char *value)
25683 {
25684 if (tag < 1
25685 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
25686 || !attributes_set_explicitly[tag])
25687 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
25688 }
25689
25690 /* Set the public EABI object attributes. */
25691 void
25692 aeabi_set_public_attributes (void)
25693 {
25694 int arch;
25695 char profile;
25696 int virt_sec = 0;
25697 int fp16_optional = 0;
25698 arm_feature_set flags;
25699 arm_feature_set tmp;
25700 arm_feature_set arm_arch_v8m_base = ARM_ARCH_V8M_BASE;
25701 const cpu_arch_ver_table *p;
25702
25703 /* Choose the architecture based on the capabilities of the requested cpu
25704 (if any) and/or the instructions actually used. */
25705 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
25706 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
25707 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
25708
25709 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
25710 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
25711
25712 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
25713 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
25714
25715 selected_cpu = flags;
25716
25717 /* Allow the user to override the reported architecture. */
25718 if (object_arch)
25719 {
25720 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
25721 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
25722 }
25723
25724 /* We need to make sure that the attributes do not identify us as v6S-M
25725 when the only v6S-M feature in use is the Operating System Extensions. */
25726 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
25727 if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
25728 ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
25729
25730 tmp = flags;
25731 arch = 0;
25732 for (p = cpu_arch_ver; p->val; p++)
25733 {
25734 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
25735 {
25736 arch = p->val;
25737 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
25738 }
25739 }
25740
25741 /* The table lookup above finds the last architecture to contribute
25742 a new feature. Unfortunately, Tag13 is a subset of the union of
25743 v6T2 and v7-M, so it is never seen as contributing a new feature.
25744 We can not search for the last entry which is entirely used,
25745 because if no CPU is specified we build up only those flags
25746 actually used. Perhaps we should separate out the specified
25747 and implicit cases. Avoid taking this path for -march=all by
25748 checking for contradictory v7-A / v7-M features. */
25749 if (arch == TAG_CPU_ARCH_V7
25750 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
25751 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
25752 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
25753 arch = TAG_CPU_ARCH_V7E_M;
25754
25755 ARM_CLEAR_FEATURE (tmp, flags, arm_arch_v8m_base);
25756 if (arch == TAG_CPU_ARCH_V8M_BASE && ARM_CPU_HAS_FEATURE (tmp, arm_arch_any))
25757 arch = TAG_CPU_ARCH_V8M_MAIN;
25758
25759 /* In cpu_arch_ver ARMv8-A is before ARMv8-M for atomics to be detected as
25760 coming from ARMv8-A. However, since ARMv8-A has more instructions than
25761 ARMv8-M, -march=all must be detected as ARMv8-A. */
25762 if (arch == TAG_CPU_ARCH_V8M_MAIN
25763 && ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
25764 arch = TAG_CPU_ARCH_V8;
25765
25766 /* Tag_CPU_name. */
25767 if (selected_cpu_name[0])
25768 {
25769 char *q;
25770
25771 q = selected_cpu_name;
25772 if (strncmp (q, "armv", 4) == 0)
25773 {
25774 int i;
25775
25776 q += 4;
25777 for (i = 0; q[i]; i++)
25778 q[i] = TOUPPER (q[i]);
25779 }
25780 aeabi_set_attribute_string (Tag_CPU_name, q);
25781 }
25782
25783 /* Tag_CPU_arch. */
25784 aeabi_set_attribute_int (Tag_CPU_arch, arch);
25785
25786 /* Tag_CPU_arch_profile. */
25787 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
25788 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
25789 || (ARM_CPU_HAS_FEATURE (flags, arm_ext_atomics)
25790 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m)))
25791 profile = 'A';
25792 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
25793 profile = 'R';
25794 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
25795 profile = 'M';
25796 else
25797 profile = '\0';
25798
25799 if (profile != '\0')
25800 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
25801
25802 /* Tag_ARM_ISA_use. */
25803 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
25804 || arch == 0)
25805 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
25806
25807 /* Tag_THUMB_ISA_use. */
25808 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
25809 || arch == 0)
25810 {
25811 int thumb_isa_use;
25812
25813 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
25814 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
25815 thumb_isa_use = 3;
25816 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
25817 thumb_isa_use = 2;
25818 else
25819 thumb_isa_use = 1;
25820 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
25821 }
25822
25823 /* Tag_VFP_arch. */
25824 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
25825 aeabi_set_attribute_int (Tag_VFP_arch,
25826 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
25827 ? 7 : 8);
25828 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
25829 aeabi_set_attribute_int (Tag_VFP_arch,
25830 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
25831 ? 5 : 6);
25832 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
25833 {
25834 fp16_optional = 1;
25835 aeabi_set_attribute_int (Tag_VFP_arch, 3);
25836 }
25837 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
25838 {
25839 aeabi_set_attribute_int (Tag_VFP_arch, 4);
25840 fp16_optional = 1;
25841 }
25842 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
25843 aeabi_set_attribute_int (Tag_VFP_arch, 2);
25844 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
25845 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
25846 aeabi_set_attribute_int (Tag_VFP_arch, 1);
25847
25848 /* Tag_ABI_HardFP_use. */
25849 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
25850 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
25851 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
25852
25853 /* Tag_WMMX_arch. */
25854 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
25855 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
25856 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
25857 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
25858
25859 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
25860 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
25861 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
25862 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
25863 {
25864 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
25865 {
25866 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
25867 }
25868 else
25869 {
25870 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
25871 fp16_optional = 1;
25872 }
25873 }
25874
25875 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
25876 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
25877 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
25878
25879 /* Tag_DIV_use.
25880
25881 We set Tag_DIV_use to two when integer divide instructions have been used
25882 in ARM state, or when Thumb integer divide instructions have been used,
25883 but we have no architecture profile set, nor have we any ARM instructions.
25884
25885 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
25886 by the base architecture.
25887
25888 For new architectures we will have to check these tests. */
25889 gas_assert (arch <= TAG_CPU_ARCH_V8
25890 || (arch >= TAG_CPU_ARCH_V8M_BASE
25891 && arch <= TAG_CPU_ARCH_V8M_MAIN));
25892 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
25893 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
25894 aeabi_set_attribute_int (Tag_DIV_use, 0);
25895 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
25896 || (profile == '\0'
25897 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
25898 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
25899 aeabi_set_attribute_int (Tag_DIV_use, 2);
25900
25901 /* Tag_MP_extension_use. */
25902 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
25903 aeabi_set_attribute_int (Tag_MPextension_use, 1);
25904
25905 /* Tag Virtualization_use. */
25906 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
25907 virt_sec |= 1;
25908 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
25909 virt_sec |= 2;
25910 if (virt_sec != 0)
25911 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
25912 }
25913
25914 /* Add the default contents for the .ARM.attributes section. */
25915 void
25916 arm_md_end (void)
25917 {
25918 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
25919 return;
25920
25921 aeabi_set_public_attributes ();
25922 }
25923 #endif /* OBJ_ELF */
25924
25925
25926 /* Parse a .cpu directive. */
25927
25928 static void
25929 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
25930 {
25931 const struct arm_cpu_option_table *opt;
25932 char *name;
25933 char saved_char;
25934
25935 name = input_line_pointer;
25936 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25937 input_line_pointer++;
25938 saved_char = *input_line_pointer;
25939 *input_line_pointer = 0;
25940
25941 /* Skip the first "all" entry. */
25942 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
25943 if (streq (opt->name, name))
25944 {
25945 mcpu_cpu_opt = &opt->value;
25946 selected_cpu = opt->value;
25947 if (opt->canonical_name)
25948 strcpy (selected_cpu_name, opt->canonical_name);
25949 else
25950 {
25951 int i;
25952 for (i = 0; opt->name[i]; i++)
25953 selected_cpu_name[i] = TOUPPER (opt->name[i]);
25954
25955 selected_cpu_name[i] = 0;
25956 }
25957 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25958 *input_line_pointer = saved_char;
25959 demand_empty_rest_of_line ();
25960 return;
25961 }
25962 as_bad (_("unknown cpu `%s'"), name);
25963 *input_line_pointer = saved_char;
25964 ignore_rest_of_line ();
25965 }
25966
25967
25968 /* Parse a .arch directive. */
25969
25970 static void
25971 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
25972 {
25973 const struct arm_arch_option_table *opt;
25974 char saved_char;
25975 char *name;
25976
25977 name = input_line_pointer;
25978 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25979 input_line_pointer++;
25980 saved_char = *input_line_pointer;
25981 *input_line_pointer = 0;
25982
25983 /* Skip the first "all" entry. */
25984 for (opt = arm_archs + 1; opt->name != NULL; opt++)
25985 if (streq (opt->name, name))
25986 {
25987 mcpu_cpu_opt = &opt->value;
25988 selected_cpu = opt->value;
25989 strcpy (selected_cpu_name, opt->name);
25990 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25991 *input_line_pointer = saved_char;
25992 demand_empty_rest_of_line ();
25993 return;
25994 }
25995
25996 as_bad (_("unknown architecture `%s'\n"), name);
25997 *input_line_pointer = saved_char;
25998 ignore_rest_of_line ();
25999 }
26000
26001
26002 /* Parse a .object_arch directive. */
26003
26004 static void
26005 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
26006 {
26007 const struct arm_arch_option_table *opt;
26008 char saved_char;
26009 char *name;
26010
26011 name = input_line_pointer;
26012 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26013 input_line_pointer++;
26014 saved_char = *input_line_pointer;
26015 *input_line_pointer = 0;
26016
26017 /* Skip the first "all" entry. */
26018 for (opt = arm_archs + 1; opt->name != NULL; opt++)
26019 if (streq (opt->name, name))
26020 {
26021 object_arch = &opt->value;
26022 *input_line_pointer = saved_char;
26023 demand_empty_rest_of_line ();
26024 return;
26025 }
26026
26027 as_bad (_("unknown architecture `%s'\n"), name);
26028 *input_line_pointer = saved_char;
26029 ignore_rest_of_line ();
26030 }
26031
26032 /* Parse a .arch_extension directive. */
26033
26034 static void
26035 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
26036 {
26037 const struct arm_option_extension_value_table *opt;
26038 char saved_char;
26039 char *name;
26040 int adding_value = 1;
26041
26042 name = input_line_pointer;
26043 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26044 input_line_pointer++;
26045 saved_char = *input_line_pointer;
26046 *input_line_pointer = 0;
26047
26048 if (strlen (name) >= 2
26049 && strncmp (name, "no", 2) == 0)
26050 {
26051 adding_value = 0;
26052 name += 2;
26053 }
26054
26055 for (opt = arm_extensions; opt->name != NULL; opt++)
26056 if (streq (opt->name, name))
26057 {
26058 if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt, opt->allowed_archs))
26059 {
26060 as_bad (_("architectural extension `%s' is not allowed for the "
26061 "current base architecture"), name);
26062 break;
26063 }
26064
26065 if (adding_value)
26066 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu,
26067 opt->merge_value);
26068 else
26069 ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->clear_value);
26070
26071 mcpu_cpu_opt = &selected_cpu;
26072 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26073 *input_line_pointer = saved_char;
26074 demand_empty_rest_of_line ();
26075 return;
26076 }
26077
26078 if (opt->name == NULL)
26079 as_bad (_("unknown architecture extension `%s'\n"), name);
26080
26081 *input_line_pointer = saved_char;
26082 ignore_rest_of_line ();
26083 }
26084
26085 /* Parse a .fpu directive. */
26086
26087 static void
26088 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
26089 {
26090 const struct arm_option_fpu_value_table *opt;
26091 char saved_char;
26092 char *name;
26093
26094 name = input_line_pointer;
26095 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26096 input_line_pointer++;
26097 saved_char = *input_line_pointer;
26098 *input_line_pointer = 0;
26099
26100 for (opt = arm_fpus; opt->name != NULL; opt++)
26101 if (streq (opt->name, name))
26102 {
26103 mfpu_opt = &opt->value;
26104 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26105 *input_line_pointer = saved_char;
26106 demand_empty_rest_of_line ();
26107 return;
26108 }
26109
26110 as_bad (_("unknown floating point format `%s'\n"), name);
26111 *input_line_pointer = saved_char;
26112 ignore_rest_of_line ();
26113 }
26114
26115 /* Copy symbol information. */
26116
26117 void
26118 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
26119 {
26120 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
26121 }
26122
26123 #ifdef OBJ_ELF
26124 /* Given a symbolic attribute NAME, return the proper integer value.
26125 Returns -1 if the attribute is not known. */
26126
26127 int
26128 arm_convert_symbolic_attribute (const char *name)
26129 {
26130 static const struct
26131 {
26132 const char * name;
26133 const int tag;
26134 }
26135 attribute_table[] =
26136 {
26137 /* When you modify this table you should
26138 also modify the list in doc/c-arm.texi. */
26139 #define T(tag) {#tag, tag}
26140 T (Tag_CPU_raw_name),
26141 T (Tag_CPU_name),
26142 T (Tag_CPU_arch),
26143 T (Tag_CPU_arch_profile),
26144 T (Tag_ARM_ISA_use),
26145 T (Tag_THUMB_ISA_use),
26146 T (Tag_FP_arch),
26147 T (Tag_VFP_arch),
26148 T (Tag_WMMX_arch),
26149 T (Tag_Advanced_SIMD_arch),
26150 T (Tag_PCS_config),
26151 T (Tag_ABI_PCS_R9_use),
26152 T (Tag_ABI_PCS_RW_data),
26153 T (Tag_ABI_PCS_RO_data),
26154 T (Tag_ABI_PCS_GOT_use),
26155 T (Tag_ABI_PCS_wchar_t),
26156 T (Tag_ABI_FP_rounding),
26157 T (Tag_ABI_FP_denormal),
26158 T (Tag_ABI_FP_exceptions),
26159 T (Tag_ABI_FP_user_exceptions),
26160 T (Tag_ABI_FP_number_model),
26161 T (Tag_ABI_align_needed),
26162 T (Tag_ABI_align8_needed),
26163 T (Tag_ABI_align_preserved),
26164 T (Tag_ABI_align8_preserved),
26165 T (Tag_ABI_enum_size),
26166 T (Tag_ABI_HardFP_use),
26167 T (Tag_ABI_VFP_args),
26168 T (Tag_ABI_WMMX_args),
26169 T (Tag_ABI_optimization_goals),
26170 T (Tag_ABI_FP_optimization_goals),
26171 T (Tag_compatibility),
26172 T (Tag_CPU_unaligned_access),
26173 T (Tag_FP_HP_extension),
26174 T (Tag_VFP_HP_extension),
26175 T (Tag_ABI_FP_16bit_format),
26176 T (Tag_MPextension_use),
26177 T (Tag_DIV_use),
26178 T (Tag_nodefaults),
26179 T (Tag_also_compatible_with),
26180 T (Tag_conformance),
26181 T (Tag_T2EE_use),
26182 T (Tag_Virtualization_use),
26183 /* We deliberately do not include Tag_MPextension_use_legacy. */
26184 #undef T
26185 };
26186 unsigned int i;
26187
26188 if (name == NULL)
26189 return -1;
26190
26191 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
26192 if (streq (name, attribute_table[i].name))
26193 return attribute_table[i].tag;
26194
26195 return -1;
26196 }
26197
26198
26199 /* Apply sym value for relocations only in the case that they are for
26200 local symbols in the same segment as the fixup and you have the
26201 respective architectural feature for blx and simple switches. */
26202 int
26203 arm_apply_sym_value (struct fix * fixP, segT this_seg)
26204 {
26205 if (fixP->fx_addsy
26206 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
26207 /* PR 17444: If the local symbol is in a different section then a reloc
26208 will always be generated for it, so applying the symbol value now
26209 will result in a double offset being stored in the relocation. */
26210 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
26211 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
26212 {
26213 switch (fixP->fx_r_type)
26214 {
26215 case BFD_RELOC_ARM_PCREL_BLX:
26216 case BFD_RELOC_THUMB_PCREL_BRANCH23:
26217 if (ARM_IS_FUNC (fixP->fx_addsy))
26218 return 1;
26219 break;
26220
26221 case BFD_RELOC_ARM_PCREL_CALL:
26222 case BFD_RELOC_THUMB_PCREL_BLX:
26223 if (THUMB_IS_FUNC (fixP->fx_addsy))
26224 return 1;
26225 break;
26226
26227 default:
26228 break;
26229 }
26230
26231 }
26232 return 0;
26233 }
26234 #endif /* OBJ_ELF */
This page took 0.773088 seconds and 5 git commands to generate.