* gas/config/tc-arm.c (arm_ext_adiv): New variable.
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
10
11 This file is part of GAS, the GNU Assembler.
12
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 3, or (at your option)
16 any later version.
17
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26 02110-1301, USA. */
27
28 #include "as.h"
29 #include <limits.h>
30 #include <stdarg.h>
31 #define NO_RELOC 0
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "obstack.h"
35
36 #include "opcode/arm.h"
37
38 #ifdef OBJ_ELF
39 #include "elf/arm.h"
40 #include "dw2gencfi.h"
41 #endif
42
43 #include "dwarf2dbg.h"
44
45 #ifdef OBJ_ELF
46 /* Must be at least the size of the largest unwind opcode (currently two). */
47 #define ARM_OPCODE_CHUNK_SIZE 8
48
49 /* This structure holds the unwinding state. */
50
51 static struct
52 {
53 symbolS * proc_start;
54 symbolS * table_entry;
55 symbolS * personality_routine;
56 int personality_index;
57 /* The segment containing the function. */
58 segT saved_seg;
59 subsegT saved_subseg;
60 /* Opcodes generated from this function. */
61 unsigned char * opcodes;
62 int opcode_count;
63 int opcode_alloc;
64 /* The number of bytes pushed to the stack. */
65 offsetT frame_size;
66 /* We don't add stack adjustment opcodes immediately so that we can merge
67 multiple adjustments. We can also omit the final adjustment
68 when using a frame pointer. */
69 offsetT pending_offset;
70 /* These two fields are set by both unwind_movsp and unwind_setfp. They
71 hold the reg+offset to use when restoring sp from a frame pointer. */
72 offsetT fp_offset;
73 int fp_reg;
74 /* Nonzero if an unwind_setfp directive has been seen. */
75 unsigned fp_used:1;
76 /* Nonzero if the last opcode restores sp from fp_reg. */
77 unsigned sp_restored:1;
78 } unwind;
79
80 #endif /* OBJ_ELF */
81
82 /* Results from operand parsing worker functions. */
83
84 typedef enum
85 {
86 PARSE_OPERAND_SUCCESS,
87 PARSE_OPERAND_FAIL,
88 PARSE_OPERAND_FAIL_NO_BACKTRACK
89 } parse_operand_result;
90
91 enum arm_float_abi
92 {
93 ARM_FLOAT_ABI_HARD,
94 ARM_FLOAT_ABI_SOFTFP,
95 ARM_FLOAT_ABI_SOFT
96 };
97
98 /* Types of processor to assemble for. */
99 #ifndef CPU_DEFAULT
100 /* The code that was here used to select a default CPU depending on compiler
101 pre-defines which were only present when doing native builds, thus
102 changing gas' default behaviour depending upon the build host.
103
104 If you have a target that requires a default CPU option then the you
105 should define CPU_DEFAULT here. */
106 #endif
107
108 #ifndef FPU_DEFAULT
109 # ifdef TE_LINUX
110 # define FPU_DEFAULT FPU_ARCH_FPA
111 # elif defined (TE_NetBSD)
112 # ifdef OBJ_ELF
113 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
114 # else
115 /* Legacy a.out format. */
116 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
117 # endif
118 # elif defined (TE_VXWORKS)
119 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
120 # else
121 /* For backwards compatibility, default to FPA. */
122 # define FPU_DEFAULT FPU_ARCH_FPA
123 # endif
124 #endif /* ifndef FPU_DEFAULT */
125
126 #define streq(a, b) (strcmp (a, b) == 0)
127
128 static arm_feature_set cpu_variant;
129 static arm_feature_set arm_arch_used;
130 static arm_feature_set thumb_arch_used;
131
132 /* Flags stored in private area of BFD structure. */
133 static int uses_apcs_26 = FALSE;
134 static int atpcs = FALSE;
135 static int support_interwork = FALSE;
136 static int uses_apcs_float = FALSE;
137 static int pic_code = FALSE;
138 static int fix_v4bx = FALSE;
139 /* Warn on using deprecated features. */
140 static int warn_on_deprecated = TRUE;
141
142
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
145 assembly flags. */
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
148
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
154 static const arm_feature_set *object_arch = NULL;
155
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default = FPU_DEFAULT;
158 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
159 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
161 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
162 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
165 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
166
167 #ifdef CPU_DEFAULT
168 static const arm_feature_set cpu_default = CPU_DEFAULT;
169 #endif
170
171 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
172 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
173 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
174 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
175 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
176 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
177 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
178 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
179 static const arm_feature_set arm_ext_v4t_5 =
180 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
181 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
182 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
183 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
184 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
185 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
186 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
187 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
188 static const arm_feature_set arm_ext_v6m = ARM_FEATURE (ARM_EXT_V6M, 0);
189 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
190 static const arm_feature_set arm_ext_v6_dsp = ARM_FEATURE (ARM_EXT_V6_DSP, 0);
191 static const arm_feature_set arm_ext_barrier = ARM_FEATURE (ARM_EXT_BARRIER, 0);
192 static const arm_feature_set arm_ext_msr = ARM_FEATURE (ARM_EXT_THUMB_MSR, 0);
193 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
194 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
195 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
196 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
197 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
198 static const arm_feature_set arm_ext_m =
199 ARM_FEATURE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M, 0);
200 static const arm_feature_set arm_ext_mp = ARM_FEATURE (ARM_EXT_MP, 0);
201 static const arm_feature_set arm_ext_sec = ARM_FEATURE (ARM_EXT_SEC, 0);
202 static const arm_feature_set arm_ext_os = ARM_FEATURE (ARM_EXT_OS, 0);
203 static const arm_feature_set arm_ext_adiv = ARM_FEATURE (ARM_EXT_ADIV, 0);
204
205 static const arm_feature_set arm_arch_any = ARM_ANY;
206 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
207 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
208 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
209
210 static const arm_feature_set arm_cext_iwmmxt2 =
211 ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
212 static const arm_feature_set arm_cext_iwmmxt =
213 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
214 static const arm_feature_set arm_cext_xscale =
215 ARM_FEATURE (0, ARM_CEXT_XSCALE);
216 static const arm_feature_set arm_cext_maverick =
217 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
218 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
219 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
220 static const arm_feature_set fpu_vfp_ext_v1xd =
221 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
222 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
223 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
224 static const arm_feature_set fpu_vfp_ext_v3xd = ARM_FEATURE (0, FPU_VFP_EXT_V3xD);
225 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
226 static const arm_feature_set fpu_vfp_ext_d32 =
227 ARM_FEATURE (0, FPU_VFP_EXT_D32);
228 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
229 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
230 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
231 static const arm_feature_set fpu_vfp_fp16 = ARM_FEATURE (0, FPU_VFP_EXT_FP16);
232 static const arm_feature_set fpu_neon_ext_fma = ARM_FEATURE (0, FPU_NEON_EXT_FMA);
233 static const arm_feature_set fpu_vfp_ext_fma = ARM_FEATURE (0, FPU_VFP_EXT_FMA);
234
235 static int mfloat_abi_opt = -1;
236 /* Record user cpu selection for object attributes. */
237 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
238 /* Must be long enough to hold any of the names in arm_cpus. */
239 static char selected_cpu_name[16];
240 #ifdef OBJ_ELF
241 # ifdef EABI_DEFAULT
242 static int meabi_flags = EABI_DEFAULT;
243 # else
244 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
245 # endif
246
247 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
248
249 bfd_boolean
250 arm_is_eabi (void)
251 {
252 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
253 }
254 #endif
255
256 #ifdef OBJ_ELF
257 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
258 symbolS * GOT_symbol;
259 #endif
260
261 /* 0: assemble for ARM,
262 1: assemble for Thumb,
263 2: assemble for Thumb even though target CPU does not support thumb
264 instructions. */
265 static int thumb_mode = 0;
266 /* A value distinct from the possible values for thumb_mode that we
267 can use to record whether thumb_mode has been copied into the
268 tc_frag_data field of a frag. */
269 #define MODE_RECORDED (1 << 4)
270
271 /* Specifies the intrinsic IT insn behavior mode. */
272 enum implicit_it_mode
273 {
274 IMPLICIT_IT_MODE_NEVER = 0x00,
275 IMPLICIT_IT_MODE_ARM = 0x01,
276 IMPLICIT_IT_MODE_THUMB = 0x02,
277 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
278 };
279 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
280
281 /* If unified_syntax is true, we are processing the new unified
282 ARM/Thumb syntax. Important differences from the old ARM mode:
283
284 - Immediate operands do not require a # prefix.
285 - Conditional affixes always appear at the end of the
286 instruction. (For backward compatibility, those instructions
287 that formerly had them in the middle, continue to accept them
288 there.)
289 - The IT instruction may appear, and if it does is validated
290 against subsequent conditional affixes. It does not generate
291 machine code.
292
293 Important differences from the old Thumb mode:
294
295 - Immediate operands do not require a # prefix.
296 - Most of the V6T2 instructions are only available in unified mode.
297 - The .N and .W suffixes are recognized and honored (it is an error
298 if they cannot be honored).
299 - All instructions set the flags if and only if they have an 's' affix.
300 - Conditional affixes may be used. They are validated against
301 preceding IT instructions. Unlike ARM mode, you cannot use a
302 conditional affix except in the scope of an IT instruction. */
303
304 static bfd_boolean unified_syntax = FALSE;
305
306 enum neon_el_type
307 {
308 NT_invtype,
309 NT_untyped,
310 NT_integer,
311 NT_float,
312 NT_poly,
313 NT_signed,
314 NT_unsigned
315 };
316
317 struct neon_type_el
318 {
319 enum neon_el_type type;
320 unsigned size;
321 };
322
323 #define NEON_MAX_TYPE_ELS 4
324
325 struct neon_type
326 {
327 struct neon_type_el el[NEON_MAX_TYPE_ELS];
328 unsigned elems;
329 };
330
331 enum it_instruction_type
332 {
333 OUTSIDE_IT_INSN,
334 INSIDE_IT_INSN,
335 INSIDE_IT_LAST_INSN,
336 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
337 if inside, should be the last one. */
338 NEUTRAL_IT_INSN, /* This could be either inside or outside,
339 i.e. BKPT and NOP. */
340 IT_INSN /* The IT insn has been parsed. */
341 };
342
343 struct arm_it
344 {
345 const char * error;
346 unsigned long instruction;
347 int size;
348 int size_req;
349 int cond;
350 /* "uncond_value" is set to the value in place of the conditional field in
351 unconditional versions of the instruction, or -1 if nothing is
352 appropriate. */
353 int uncond_value;
354 struct neon_type vectype;
355 /* This does not indicate an actual NEON instruction, only that
356 the mnemonic accepts neon-style type suffixes. */
357 int is_neon;
358 /* Set to the opcode if the instruction needs relaxation.
359 Zero if the instruction is not relaxed. */
360 unsigned long relax;
361 struct
362 {
363 bfd_reloc_code_real_type type;
364 expressionS exp;
365 int pc_rel;
366 } reloc;
367
368 enum it_instruction_type it_insn_type;
369
370 struct
371 {
372 unsigned reg;
373 signed int imm;
374 struct neon_type_el vectype;
375 unsigned present : 1; /* Operand present. */
376 unsigned isreg : 1; /* Operand was a register. */
377 unsigned immisreg : 1; /* .imm field is a second register. */
378 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
379 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
380 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
381 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
382 instructions. This allows us to disambiguate ARM <-> vector insns. */
383 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
384 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
385 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
386 unsigned issingle : 1; /* Operand is VFP single-precision register. */
387 unsigned hasreloc : 1; /* Operand has relocation suffix. */
388 unsigned writeback : 1; /* Operand has trailing ! */
389 unsigned preind : 1; /* Preindexed address. */
390 unsigned postind : 1; /* Postindexed address. */
391 unsigned negative : 1; /* Index register was negated. */
392 unsigned shifted : 1; /* Shift applied to operation. */
393 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
394 } operands[6];
395 };
396
397 static struct arm_it inst;
398
399 #define NUM_FLOAT_VALS 8
400
401 const char * fp_const[] =
402 {
403 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
404 };
405
406 /* Number of littlenums required to hold an extended precision number. */
407 #define MAX_LITTLENUMS 6
408
409 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
410
411 #define FAIL (-1)
412 #define SUCCESS (0)
413
414 #define SUFF_S 1
415 #define SUFF_D 2
416 #define SUFF_E 3
417 #define SUFF_P 4
418
419 #define CP_T_X 0x00008000
420 #define CP_T_Y 0x00400000
421
422 #define CONDS_BIT 0x00100000
423 #define LOAD_BIT 0x00100000
424
425 #define DOUBLE_LOAD_FLAG 0x00000001
426
427 struct asm_cond
428 {
429 const char * template_name;
430 unsigned long value;
431 };
432
433 #define COND_ALWAYS 0xE
434
435 struct asm_psr
436 {
437 const char * template_name;
438 unsigned long field;
439 };
440
441 struct asm_barrier_opt
442 {
443 const char * template_name;
444 unsigned long value;
445 };
446
447 /* The bit that distinguishes CPSR and SPSR. */
448 #define SPSR_BIT (1 << 22)
449
450 /* The individual PSR flag bits. */
451 #define PSR_c (1 << 16)
452 #define PSR_x (1 << 17)
453 #define PSR_s (1 << 18)
454 #define PSR_f (1 << 19)
455
456 struct reloc_entry
457 {
458 char * name;
459 bfd_reloc_code_real_type reloc;
460 };
461
462 enum vfp_reg_pos
463 {
464 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
465 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
466 };
467
468 enum vfp_ldstm_type
469 {
470 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
471 };
472
473 /* Bits for DEFINED field in neon_typed_alias. */
474 #define NTA_HASTYPE 1
475 #define NTA_HASINDEX 2
476
477 struct neon_typed_alias
478 {
479 unsigned char defined;
480 unsigned char index;
481 struct neon_type_el eltype;
482 };
483
484 /* ARM register categories. This includes coprocessor numbers and various
485 architecture extensions' registers. */
486 enum arm_reg_type
487 {
488 REG_TYPE_RN,
489 REG_TYPE_CP,
490 REG_TYPE_CN,
491 REG_TYPE_FN,
492 REG_TYPE_VFS,
493 REG_TYPE_VFD,
494 REG_TYPE_NQ,
495 REG_TYPE_VFSD,
496 REG_TYPE_NDQ,
497 REG_TYPE_NSDQ,
498 REG_TYPE_VFC,
499 REG_TYPE_MVF,
500 REG_TYPE_MVD,
501 REG_TYPE_MVFX,
502 REG_TYPE_MVDX,
503 REG_TYPE_MVAX,
504 REG_TYPE_DSPSC,
505 REG_TYPE_MMXWR,
506 REG_TYPE_MMXWC,
507 REG_TYPE_MMXWCG,
508 REG_TYPE_XSCALE,
509 };
510
511 /* Structure for a hash table entry for a register.
512 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
513 information which states whether a vector type or index is specified (for a
514 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
515 struct reg_entry
516 {
517 const char * name;
518 unsigned char number;
519 unsigned char type;
520 unsigned char builtin;
521 struct neon_typed_alias * neon;
522 };
523
524 /* Diagnostics used when we don't get a register of the expected type. */
525 const char * const reg_expected_msgs[] =
526 {
527 N_("ARM register expected"),
528 N_("bad or missing co-processor number"),
529 N_("co-processor register expected"),
530 N_("FPA register expected"),
531 N_("VFP single precision register expected"),
532 N_("VFP/Neon double precision register expected"),
533 N_("Neon quad precision register expected"),
534 N_("VFP single or double precision register expected"),
535 N_("Neon double or quad precision register expected"),
536 N_("VFP single, double or Neon quad precision register expected"),
537 N_("VFP system register expected"),
538 N_("Maverick MVF register expected"),
539 N_("Maverick MVD register expected"),
540 N_("Maverick MVFX register expected"),
541 N_("Maverick MVDX register expected"),
542 N_("Maverick MVAX register expected"),
543 N_("Maverick DSPSC register expected"),
544 N_("iWMMXt data register expected"),
545 N_("iWMMXt control register expected"),
546 N_("iWMMXt scalar register expected"),
547 N_("XScale accumulator register expected"),
548 };
549
550 /* Some well known registers that we refer to directly elsewhere. */
551 #define REG_SP 13
552 #define REG_LR 14
553 #define REG_PC 15
554
555 /* ARM instructions take 4bytes in the object file, Thumb instructions
556 take 2: */
557 #define INSN_SIZE 4
558
559 struct asm_opcode
560 {
561 /* Basic string to match. */
562 const char * template_name;
563
564 /* Parameters to instruction. */
565 unsigned int operands[8];
566
567 /* Conditional tag - see opcode_lookup. */
568 unsigned int tag : 4;
569
570 /* Basic instruction code. */
571 unsigned int avalue : 28;
572
573 /* Thumb-format instruction code. */
574 unsigned int tvalue;
575
576 /* Which architecture variant provides this instruction. */
577 const arm_feature_set * avariant;
578 const arm_feature_set * tvariant;
579
580 /* Function to call to encode instruction in ARM format. */
581 void (* aencode) (void);
582
583 /* Function to call to encode instruction in Thumb format. */
584 void (* tencode) (void);
585 };
586
587 /* Defines for various bits that we will want to toggle. */
588 #define INST_IMMEDIATE 0x02000000
589 #define OFFSET_REG 0x02000000
590 #define HWOFFSET_IMM 0x00400000
591 #define SHIFT_BY_REG 0x00000010
592 #define PRE_INDEX 0x01000000
593 #define INDEX_UP 0x00800000
594 #define WRITE_BACK 0x00200000
595 #define LDM_TYPE_2_OR_3 0x00400000
596 #define CPSI_MMOD 0x00020000
597
598 #define LITERAL_MASK 0xf000f000
599 #define OPCODE_MASK 0xfe1fffff
600 #define V4_STR_BIT 0x00000020
601
602 #define T2_SUBS_PC_LR 0xf3de8f00
603
604 #define DATA_OP_SHIFT 21
605
606 #define T2_OPCODE_MASK 0xfe1fffff
607 #define T2_DATA_OP_SHIFT 21
608
609 /* Codes to distinguish the arithmetic instructions. */
610 #define OPCODE_AND 0
611 #define OPCODE_EOR 1
612 #define OPCODE_SUB 2
613 #define OPCODE_RSB 3
614 #define OPCODE_ADD 4
615 #define OPCODE_ADC 5
616 #define OPCODE_SBC 6
617 #define OPCODE_RSC 7
618 #define OPCODE_TST 8
619 #define OPCODE_TEQ 9
620 #define OPCODE_CMP 10
621 #define OPCODE_CMN 11
622 #define OPCODE_ORR 12
623 #define OPCODE_MOV 13
624 #define OPCODE_BIC 14
625 #define OPCODE_MVN 15
626
627 #define T2_OPCODE_AND 0
628 #define T2_OPCODE_BIC 1
629 #define T2_OPCODE_ORR 2
630 #define T2_OPCODE_ORN 3
631 #define T2_OPCODE_EOR 4
632 #define T2_OPCODE_ADD 8
633 #define T2_OPCODE_ADC 10
634 #define T2_OPCODE_SBC 11
635 #define T2_OPCODE_SUB 13
636 #define T2_OPCODE_RSB 14
637
638 #define T_OPCODE_MUL 0x4340
639 #define T_OPCODE_TST 0x4200
640 #define T_OPCODE_CMN 0x42c0
641 #define T_OPCODE_NEG 0x4240
642 #define T_OPCODE_MVN 0x43c0
643
644 #define T_OPCODE_ADD_R3 0x1800
645 #define T_OPCODE_SUB_R3 0x1a00
646 #define T_OPCODE_ADD_HI 0x4400
647 #define T_OPCODE_ADD_ST 0xb000
648 #define T_OPCODE_SUB_ST 0xb080
649 #define T_OPCODE_ADD_SP 0xa800
650 #define T_OPCODE_ADD_PC 0xa000
651 #define T_OPCODE_ADD_I8 0x3000
652 #define T_OPCODE_SUB_I8 0x3800
653 #define T_OPCODE_ADD_I3 0x1c00
654 #define T_OPCODE_SUB_I3 0x1e00
655
656 #define T_OPCODE_ASR_R 0x4100
657 #define T_OPCODE_LSL_R 0x4080
658 #define T_OPCODE_LSR_R 0x40c0
659 #define T_OPCODE_ROR_R 0x41c0
660 #define T_OPCODE_ASR_I 0x1000
661 #define T_OPCODE_LSL_I 0x0000
662 #define T_OPCODE_LSR_I 0x0800
663
664 #define T_OPCODE_MOV_I8 0x2000
665 #define T_OPCODE_CMP_I8 0x2800
666 #define T_OPCODE_CMP_LR 0x4280
667 #define T_OPCODE_MOV_HR 0x4600
668 #define T_OPCODE_CMP_HR 0x4500
669
670 #define T_OPCODE_LDR_PC 0x4800
671 #define T_OPCODE_LDR_SP 0x9800
672 #define T_OPCODE_STR_SP 0x9000
673 #define T_OPCODE_LDR_IW 0x6800
674 #define T_OPCODE_STR_IW 0x6000
675 #define T_OPCODE_LDR_IH 0x8800
676 #define T_OPCODE_STR_IH 0x8000
677 #define T_OPCODE_LDR_IB 0x7800
678 #define T_OPCODE_STR_IB 0x7000
679 #define T_OPCODE_LDR_RW 0x5800
680 #define T_OPCODE_STR_RW 0x5000
681 #define T_OPCODE_LDR_RH 0x5a00
682 #define T_OPCODE_STR_RH 0x5200
683 #define T_OPCODE_LDR_RB 0x5c00
684 #define T_OPCODE_STR_RB 0x5400
685
686 #define T_OPCODE_PUSH 0xb400
687 #define T_OPCODE_POP 0xbc00
688
689 #define T_OPCODE_BRANCH 0xe000
690
691 #define THUMB_SIZE 2 /* Size of thumb instruction. */
692 #define THUMB_PP_PC_LR 0x0100
693 #define THUMB_LOAD_BIT 0x0800
694 #define THUMB2_LOAD_BIT 0x00100000
695
696 #define BAD_ARGS _("bad arguments to instruction")
697 #define BAD_SP _("r13 not allowed here")
698 #define BAD_PC _("r15 not allowed here")
699 #define BAD_COND _("instruction cannot be conditional")
700 #define BAD_OVERLAP _("registers may not be the same")
701 #define BAD_HIREG _("lo register required")
702 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
703 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
704 #define BAD_BRANCH _("branch must be last instruction in IT block")
705 #define BAD_NOT_IT _("instruction not allowed in IT block")
706 #define BAD_FPU _("selected FPU does not support instruction")
707 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
708 #define BAD_IT_COND _("incorrect condition in IT block")
709 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
710 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
711 #define BAD_PC_ADDRESSING \
712 _("cannot use register index with PC-relative addressing")
713 #define BAD_PC_WRITEBACK \
714 _("cannot use writeback with PC-relative addressing")
715
716 static struct hash_control * arm_ops_hsh;
717 static struct hash_control * arm_cond_hsh;
718 static struct hash_control * arm_shift_hsh;
719 static struct hash_control * arm_psr_hsh;
720 static struct hash_control * arm_v7m_psr_hsh;
721 static struct hash_control * arm_reg_hsh;
722 static struct hash_control * arm_reloc_hsh;
723 static struct hash_control * arm_barrier_opt_hsh;
724
725 /* Stuff needed to resolve the label ambiguity
726 As:
727 ...
728 label: <insn>
729 may differ from:
730 ...
731 label:
732 <insn> */
733
734 symbolS * last_label_seen;
735 static int label_is_thumb_function_name = FALSE;
736
737 /* Literal pool structure. Held on a per-section
738 and per-sub-section basis. */
739
740 #define MAX_LITERAL_POOL_SIZE 1024
741 typedef struct literal_pool
742 {
743 expressionS literals [MAX_LITERAL_POOL_SIZE];
744 unsigned int next_free_entry;
745 unsigned int id;
746 symbolS * symbol;
747 segT section;
748 subsegT sub_section;
749 struct literal_pool * next;
750 } literal_pool;
751
752 /* Pointer to a linked list of literal pools. */
753 literal_pool * list_of_pools = NULL;
754
755 #ifdef OBJ_ELF
756 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
757 #else
758 static struct current_it now_it;
759 #endif
760
761 static inline int
762 now_it_compatible (int cond)
763 {
764 return (cond & ~1) == (now_it.cc & ~1);
765 }
766
767 static inline int
768 conditional_insn (void)
769 {
770 return inst.cond != COND_ALWAYS;
771 }
772
773 static int in_it_block (void);
774
775 static int handle_it_state (void);
776
777 static void force_automatic_it_block_close (void);
778
779 static void it_fsm_post_encode (void);
780
781 #define set_it_insn_type(type) \
782 do \
783 { \
784 inst.it_insn_type = type; \
785 if (handle_it_state () == FAIL) \
786 return; \
787 } \
788 while (0)
789
790 #define set_it_insn_type_nonvoid(type, failret) \
791 do \
792 { \
793 inst.it_insn_type = type; \
794 if (handle_it_state () == FAIL) \
795 return failret; \
796 } \
797 while(0)
798
799 #define set_it_insn_type_last() \
800 do \
801 { \
802 if (inst.cond == COND_ALWAYS) \
803 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
804 else \
805 set_it_insn_type (INSIDE_IT_LAST_INSN); \
806 } \
807 while (0)
808
809 /* Pure syntax. */
810
811 /* This array holds the chars that always start a comment. If the
812 pre-processor is disabled, these aren't very useful. */
813 const char comment_chars[] = "@";
814
815 /* This array holds the chars that only start a comment at the beginning of
816 a line. If the line seems to have the form '# 123 filename'
817 .line and .file directives will appear in the pre-processed output. */
818 /* Note that input_file.c hand checks for '#' at the beginning of the
819 first line of the input file. This is because the compiler outputs
820 #NO_APP at the beginning of its output. */
821 /* Also note that comments like this one will always work. */
822 const char line_comment_chars[] = "#";
823
824 const char line_separator_chars[] = ";";
825
826 /* Chars that can be used to separate mant
827 from exp in floating point numbers. */
828 const char EXP_CHARS[] = "eE";
829
830 /* Chars that mean this number is a floating point constant. */
831 /* As in 0f12.456 */
832 /* or 0d1.2345e12 */
833
834 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
835
836 /* Prefix characters that indicate the start of an immediate
837 value. */
838 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
839
840 /* Separator character handling. */
841
842 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
843
844 static inline int
845 skip_past_char (char ** str, char c)
846 {
847 if (**str == c)
848 {
849 (*str)++;
850 return SUCCESS;
851 }
852 else
853 return FAIL;
854 }
855
856 #define skip_past_comma(str) skip_past_char (str, ',')
857
858 /* Arithmetic expressions (possibly involving symbols). */
859
860 /* Return TRUE if anything in the expression is a bignum. */
861
862 static int
863 walk_no_bignums (symbolS * sp)
864 {
865 if (symbol_get_value_expression (sp)->X_op == O_big)
866 return 1;
867
868 if (symbol_get_value_expression (sp)->X_add_symbol)
869 {
870 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
871 || (symbol_get_value_expression (sp)->X_op_symbol
872 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
873 }
874
875 return 0;
876 }
877
878 static int in_my_get_expression = 0;
879
880 /* Third argument to my_get_expression. */
881 #define GE_NO_PREFIX 0
882 #define GE_IMM_PREFIX 1
883 #define GE_OPT_PREFIX 2
884 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
885 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
886 #define GE_OPT_PREFIX_BIG 3
887
888 static int
889 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
890 {
891 char * save_in;
892 segT seg;
893
894 /* In unified syntax, all prefixes are optional. */
895 if (unified_syntax)
896 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
897 : GE_OPT_PREFIX;
898
899 switch (prefix_mode)
900 {
901 case GE_NO_PREFIX: break;
902 case GE_IMM_PREFIX:
903 if (!is_immediate_prefix (**str))
904 {
905 inst.error = _("immediate expression requires a # prefix");
906 return FAIL;
907 }
908 (*str)++;
909 break;
910 case GE_OPT_PREFIX:
911 case GE_OPT_PREFIX_BIG:
912 if (is_immediate_prefix (**str))
913 (*str)++;
914 break;
915 default: abort ();
916 }
917
918 memset (ep, 0, sizeof (expressionS));
919
920 save_in = input_line_pointer;
921 input_line_pointer = *str;
922 in_my_get_expression = 1;
923 seg = expression (ep);
924 in_my_get_expression = 0;
925
926 if (ep->X_op == O_illegal || ep->X_op == O_absent)
927 {
928 /* We found a bad or missing expression in md_operand(). */
929 *str = input_line_pointer;
930 input_line_pointer = save_in;
931 if (inst.error == NULL)
932 inst.error = (ep->X_op == O_absent
933 ? _("missing expression") :_("bad expression"));
934 return 1;
935 }
936
937 #ifdef OBJ_AOUT
938 if (seg != absolute_section
939 && seg != text_section
940 && seg != data_section
941 && seg != bss_section
942 && seg != undefined_section)
943 {
944 inst.error = _("bad segment");
945 *str = input_line_pointer;
946 input_line_pointer = save_in;
947 return 1;
948 }
949 #else
950 (void) seg;
951 #endif
952
953 /* Get rid of any bignums now, so that we don't generate an error for which
954 we can't establish a line number later on. Big numbers are never valid
955 in instructions, which is where this routine is always called. */
956 if (prefix_mode != GE_OPT_PREFIX_BIG
957 && (ep->X_op == O_big
958 || (ep->X_add_symbol
959 && (walk_no_bignums (ep->X_add_symbol)
960 || (ep->X_op_symbol
961 && walk_no_bignums (ep->X_op_symbol))))))
962 {
963 inst.error = _("invalid constant");
964 *str = input_line_pointer;
965 input_line_pointer = save_in;
966 return 1;
967 }
968
969 *str = input_line_pointer;
970 input_line_pointer = save_in;
971 return 0;
972 }
973
974 /* Turn a string in input_line_pointer into a floating point constant
975 of type TYPE, and store the appropriate bytes in *LITP. The number
976 of LITTLENUMS emitted is stored in *SIZEP. An error message is
977 returned, or NULL on OK.
978
979 Note that fp constants aren't represent in the normal way on the ARM.
980 In big endian mode, things are as expected. However, in little endian
981 mode fp constants are big-endian word-wise, and little-endian byte-wise
982 within the words. For example, (double) 1.1 in big endian mode is
983 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
984 the byte sequence 99 99 f1 3f 9a 99 99 99.
985
986 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
987
988 char *
989 md_atof (int type, char * litP, int * sizeP)
990 {
991 int prec;
992 LITTLENUM_TYPE words[MAX_LITTLENUMS];
993 char *t;
994 int i;
995
996 switch (type)
997 {
998 case 'f':
999 case 'F':
1000 case 's':
1001 case 'S':
1002 prec = 2;
1003 break;
1004
1005 case 'd':
1006 case 'D':
1007 case 'r':
1008 case 'R':
1009 prec = 4;
1010 break;
1011
1012 case 'x':
1013 case 'X':
1014 prec = 5;
1015 break;
1016
1017 case 'p':
1018 case 'P':
1019 prec = 5;
1020 break;
1021
1022 default:
1023 *sizeP = 0;
1024 return _("Unrecognized or unsupported floating point constant");
1025 }
1026
1027 t = atof_ieee (input_line_pointer, type, words);
1028 if (t)
1029 input_line_pointer = t;
1030 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1031
1032 if (target_big_endian)
1033 {
1034 for (i = 0; i < prec; i++)
1035 {
1036 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1037 litP += sizeof (LITTLENUM_TYPE);
1038 }
1039 }
1040 else
1041 {
1042 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1043 for (i = prec - 1; i >= 0; i--)
1044 {
1045 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1046 litP += sizeof (LITTLENUM_TYPE);
1047 }
1048 else
1049 /* For a 4 byte float the order of elements in `words' is 1 0.
1050 For an 8 byte float the order is 1 0 3 2. */
1051 for (i = 0; i < prec; i += 2)
1052 {
1053 md_number_to_chars (litP, (valueT) words[i + 1],
1054 sizeof (LITTLENUM_TYPE));
1055 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1056 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1057 litP += 2 * sizeof (LITTLENUM_TYPE);
1058 }
1059 }
1060
1061 return NULL;
1062 }
1063
1064 /* We handle all bad expressions here, so that we can report the faulty
1065 instruction in the error message. */
1066 void
1067 md_operand (expressionS * exp)
1068 {
1069 if (in_my_get_expression)
1070 exp->X_op = O_illegal;
1071 }
1072
1073 /* Immediate values. */
1074
1075 /* Generic immediate-value read function for use in directives.
1076 Accepts anything that 'expression' can fold to a constant.
1077 *val receives the number. */
1078 #ifdef OBJ_ELF
1079 static int
1080 immediate_for_directive (int *val)
1081 {
1082 expressionS exp;
1083 exp.X_op = O_illegal;
1084
1085 if (is_immediate_prefix (*input_line_pointer))
1086 {
1087 input_line_pointer++;
1088 expression (&exp);
1089 }
1090
1091 if (exp.X_op != O_constant)
1092 {
1093 as_bad (_("expected #constant"));
1094 ignore_rest_of_line ();
1095 return FAIL;
1096 }
1097 *val = exp.X_add_number;
1098 return SUCCESS;
1099 }
1100 #endif
1101
1102 /* Register parsing. */
1103
1104 /* Generic register parser. CCP points to what should be the
1105 beginning of a register name. If it is indeed a valid register
1106 name, advance CCP over it and return the reg_entry structure;
1107 otherwise return NULL. Does not issue diagnostics. */
1108
1109 static struct reg_entry *
1110 arm_reg_parse_multi (char **ccp)
1111 {
1112 char *start = *ccp;
1113 char *p;
1114 struct reg_entry *reg;
1115
1116 #ifdef REGISTER_PREFIX
1117 if (*start != REGISTER_PREFIX)
1118 return NULL;
1119 start++;
1120 #endif
1121 #ifdef OPTIONAL_REGISTER_PREFIX
1122 if (*start == OPTIONAL_REGISTER_PREFIX)
1123 start++;
1124 #endif
1125
1126 p = start;
1127 if (!ISALPHA (*p) || !is_name_beginner (*p))
1128 return NULL;
1129
1130 do
1131 p++;
1132 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1133
1134 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1135
1136 if (!reg)
1137 return NULL;
1138
1139 *ccp = p;
1140 return reg;
1141 }
1142
1143 static int
1144 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1145 enum arm_reg_type type)
1146 {
1147 /* Alternative syntaxes are accepted for a few register classes. */
1148 switch (type)
1149 {
1150 case REG_TYPE_MVF:
1151 case REG_TYPE_MVD:
1152 case REG_TYPE_MVFX:
1153 case REG_TYPE_MVDX:
1154 /* Generic coprocessor register names are allowed for these. */
1155 if (reg && reg->type == REG_TYPE_CN)
1156 return reg->number;
1157 break;
1158
1159 case REG_TYPE_CP:
1160 /* For backward compatibility, a bare number is valid here. */
1161 {
1162 unsigned long processor = strtoul (start, ccp, 10);
1163 if (*ccp != start && processor <= 15)
1164 return processor;
1165 }
1166
1167 case REG_TYPE_MMXWC:
1168 /* WC includes WCG. ??? I'm not sure this is true for all
1169 instructions that take WC registers. */
1170 if (reg && reg->type == REG_TYPE_MMXWCG)
1171 return reg->number;
1172 break;
1173
1174 default:
1175 break;
1176 }
1177
1178 return FAIL;
1179 }
1180
1181 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1182 return value is the register number or FAIL. */
1183
1184 static int
1185 arm_reg_parse (char **ccp, enum arm_reg_type type)
1186 {
1187 char *start = *ccp;
1188 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1189 int ret;
1190
1191 /* Do not allow a scalar (reg+index) to parse as a register. */
1192 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1193 return FAIL;
1194
1195 if (reg && reg->type == type)
1196 return reg->number;
1197
1198 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1199 return ret;
1200
1201 *ccp = start;
1202 return FAIL;
1203 }
1204
1205 /* Parse a Neon type specifier. *STR should point at the leading '.'
1206 character. Does no verification at this stage that the type fits the opcode
1207 properly. E.g.,
1208
1209 .i32.i32.s16
1210 .s32.f32
1211 .u16
1212
1213 Can all be legally parsed by this function.
1214
1215 Fills in neon_type struct pointer with parsed information, and updates STR
1216 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1217 type, FAIL if not. */
1218
1219 static int
1220 parse_neon_type (struct neon_type *type, char **str)
1221 {
1222 char *ptr = *str;
1223
1224 if (type)
1225 type->elems = 0;
1226
1227 while (type->elems < NEON_MAX_TYPE_ELS)
1228 {
1229 enum neon_el_type thistype = NT_untyped;
1230 unsigned thissize = -1u;
1231
1232 if (*ptr != '.')
1233 break;
1234
1235 ptr++;
1236
1237 /* Just a size without an explicit type. */
1238 if (ISDIGIT (*ptr))
1239 goto parsesize;
1240
1241 switch (TOLOWER (*ptr))
1242 {
1243 case 'i': thistype = NT_integer; break;
1244 case 'f': thistype = NT_float; break;
1245 case 'p': thistype = NT_poly; break;
1246 case 's': thistype = NT_signed; break;
1247 case 'u': thistype = NT_unsigned; break;
1248 case 'd':
1249 thistype = NT_float;
1250 thissize = 64;
1251 ptr++;
1252 goto done;
1253 default:
1254 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1255 return FAIL;
1256 }
1257
1258 ptr++;
1259
1260 /* .f is an abbreviation for .f32. */
1261 if (thistype == NT_float && !ISDIGIT (*ptr))
1262 thissize = 32;
1263 else
1264 {
1265 parsesize:
1266 thissize = strtoul (ptr, &ptr, 10);
1267
1268 if (thissize != 8 && thissize != 16 && thissize != 32
1269 && thissize != 64)
1270 {
1271 as_bad (_("bad size %d in type specifier"), thissize);
1272 return FAIL;
1273 }
1274 }
1275
1276 done:
1277 if (type)
1278 {
1279 type->el[type->elems].type = thistype;
1280 type->el[type->elems].size = thissize;
1281 type->elems++;
1282 }
1283 }
1284
1285 /* Empty/missing type is not a successful parse. */
1286 if (type->elems == 0)
1287 return FAIL;
1288
1289 *str = ptr;
1290
1291 return SUCCESS;
1292 }
1293
1294 /* Errors may be set multiple times during parsing or bit encoding
1295 (particularly in the Neon bits), but usually the earliest error which is set
1296 will be the most meaningful. Avoid overwriting it with later (cascading)
1297 errors by calling this function. */
1298
1299 static void
1300 first_error (const char *err)
1301 {
1302 if (!inst.error)
1303 inst.error = err;
1304 }
1305
1306 /* Parse a single type, e.g. ".s32", leading period included. */
1307 static int
1308 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1309 {
1310 char *str = *ccp;
1311 struct neon_type optype;
1312
1313 if (*str == '.')
1314 {
1315 if (parse_neon_type (&optype, &str) == SUCCESS)
1316 {
1317 if (optype.elems == 1)
1318 *vectype = optype.el[0];
1319 else
1320 {
1321 first_error (_("only one type should be specified for operand"));
1322 return FAIL;
1323 }
1324 }
1325 else
1326 {
1327 first_error (_("vector type expected"));
1328 return FAIL;
1329 }
1330 }
1331 else
1332 return FAIL;
1333
1334 *ccp = str;
1335
1336 return SUCCESS;
1337 }
1338
1339 /* Special meanings for indices (which have a range of 0-7), which will fit into
1340 a 4-bit integer. */
1341
1342 #define NEON_ALL_LANES 15
1343 #define NEON_INTERLEAVE_LANES 14
1344
1345 /* Parse either a register or a scalar, with an optional type. Return the
1346 register number, and optionally fill in the actual type of the register
1347 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1348 type/index information in *TYPEINFO. */
1349
1350 static int
1351 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1352 enum arm_reg_type *rtype,
1353 struct neon_typed_alias *typeinfo)
1354 {
1355 char *str = *ccp;
1356 struct reg_entry *reg = arm_reg_parse_multi (&str);
1357 struct neon_typed_alias atype;
1358 struct neon_type_el parsetype;
1359
1360 atype.defined = 0;
1361 atype.index = -1;
1362 atype.eltype.type = NT_invtype;
1363 atype.eltype.size = -1;
1364
1365 /* Try alternate syntax for some types of register. Note these are mutually
1366 exclusive with the Neon syntax extensions. */
1367 if (reg == NULL)
1368 {
1369 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1370 if (altreg != FAIL)
1371 *ccp = str;
1372 if (typeinfo)
1373 *typeinfo = atype;
1374 return altreg;
1375 }
1376
1377 /* Undo polymorphism when a set of register types may be accepted. */
1378 if ((type == REG_TYPE_NDQ
1379 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1380 || (type == REG_TYPE_VFSD
1381 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1382 || (type == REG_TYPE_NSDQ
1383 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1384 || reg->type == REG_TYPE_NQ))
1385 || (type == REG_TYPE_MMXWC
1386 && (reg->type == REG_TYPE_MMXWCG)))
1387 type = (enum arm_reg_type) reg->type;
1388
1389 if (type != reg->type)
1390 return FAIL;
1391
1392 if (reg->neon)
1393 atype = *reg->neon;
1394
1395 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1396 {
1397 if ((atype.defined & NTA_HASTYPE) != 0)
1398 {
1399 first_error (_("can't redefine type for operand"));
1400 return FAIL;
1401 }
1402 atype.defined |= NTA_HASTYPE;
1403 atype.eltype = parsetype;
1404 }
1405
1406 if (skip_past_char (&str, '[') == SUCCESS)
1407 {
1408 if (type != REG_TYPE_VFD)
1409 {
1410 first_error (_("only D registers may be indexed"));
1411 return FAIL;
1412 }
1413
1414 if ((atype.defined & NTA_HASINDEX) != 0)
1415 {
1416 first_error (_("can't change index for operand"));
1417 return FAIL;
1418 }
1419
1420 atype.defined |= NTA_HASINDEX;
1421
1422 if (skip_past_char (&str, ']') == SUCCESS)
1423 atype.index = NEON_ALL_LANES;
1424 else
1425 {
1426 expressionS exp;
1427
1428 my_get_expression (&exp, &str, GE_NO_PREFIX);
1429
1430 if (exp.X_op != O_constant)
1431 {
1432 first_error (_("constant expression required"));
1433 return FAIL;
1434 }
1435
1436 if (skip_past_char (&str, ']') == FAIL)
1437 return FAIL;
1438
1439 atype.index = exp.X_add_number;
1440 }
1441 }
1442
1443 if (typeinfo)
1444 *typeinfo = atype;
1445
1446 if (rtype)
1447 *rtype = type;
1448
1449 *ccp = str;
1450
1451 return reg->number;
1452 }
1453
1454 /* Like arm_reg_parse, but allow allow the following extra features:
1455 - If RTYPE is non-zero, return the (possibly restricted) type of the
1456 register (e.g. Neon double or quad reg when either has been requested).
1457 - If this is a Neon vector type with additional type information, fill
1458 in the struct pointed to by VECTYPE (if non-NULL).
1459 This function will fault on encountering a scalar. */
1460
1461 static int
1462 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1463 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1464 {
1465 struct neon_typed_alias atype;
1466 char *str = *ccp;
1467 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1468
1469 if (reg == FAIL)
1470 return FAIL;
1471
1472 /* Do not allow a scalar (reg+index) to parse as a register. */
1473 if ((atype.defined & NTA_HASINDEX) != 0)
1474 {
1475 first_error (_("register operand expected, but got scalar"));
1476 return FAIL;
1477 }
1478
1479 if (vectype)
1480 *vectype = atype.eltype;
1481
1482 *ccp = str;
1483
1484 return reg;
1485 }
1486
1487 #define NEON_SCALAR_REG(X) ((X) >> 4)
1488 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1489
1490 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1491 have enough information to be able to do a good job bounds-checking. So, we
1492 just do easy checks here, and do further checks later. */
1493
1494 static int
1495 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1496 {
1497 int reg;
1498 char *str = *ccp;
1499 struct neon_typed_alias atype;
1500
1501 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1502
1503 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1504 return FAIL;
1505
1506 if (atype.index == NEON_ALL_LANES)
1507 {
1508 first_error (_("scalar must have an index"));
1509 return FAIL;
1510 }
1511 else if (atype.index >= 64 / elsize)
1512 {
1513 first_error (_("scalar index out of range"));
1514 return FAIL;
1515 }
1516
1517 if (type)
1518 *type = atype.eltype;
1519
1520 *ccp = str;
1521
1522 return reg * 16 + atype.index;
1523 }
1524
1525 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1526
1527 static long
1528 parse_reg_list (char ** strp)
1529 {
1530 char * str = * strp;
1531 long range = 0;
1532 int another_range;
1533
1534 /* We come back here if we get ranges concatenated by '+' or '|'. */
1535 do
1536 {
1537 another_range = 0;
1538
1539 if (*str == '{')
1540 {
1541 int in_range = 0;
1542 int cur_reg = -1;
1543
1544 str++;
1545 do
1546 {
1547 int reg;
1548
1549 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1550 {
1551 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1552 return FAIL;
1553 }
1554
1555 if (in_range)
1556 {
1557 int i;
1558
1559 if (reg <= cur_reg)
1560 {
1561 first_error (_("bad range in register list"));
1562 return FAIL;
1563 }
1564
1565 for (i = cur_reg + 1; i < reg; i++)
1566 {
1567 if (range & (1 << i))
1568 as_tsktsk
1569 (_("Warning: duplicated register (r%d) in register list"),
1570 i);
1571 else
1572 range |= 1 << i;
1573 }
1574 in_range = 0;
1575 }
1576
1577 if (range & (1 << reg))
1578 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1579 reg);
1580 else if (reg <= cur_reg)
1581 as_tsktsk (_("Warning: register range not in ascending order"));
1582
1583 range |= 1 << reg;
1584 cur_reg = reg;
1585 }
1586 while (skip_past_comma (&str) != FAIL
1587 || (in_range = 1, *str++ == '-'));
1588 str--;
1589
1590 if (*str++ != '}')
1591 {
1592 first_error (_("missing `}'"));
1593 return FAIL;
1594 }
1595 }
1596 else
1597 {
1598 expressionS exp;
1599
1600 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1601 return FAIL;
1602
1603 if (exp.X_op == O_constant)
1604 {
1605 if (exp.X_add_number
1606 != (exp.X_add_number & 0x0000ffff))
1607 {
1608 inst.error = _("invalid register mask");
1609 return FAIL;
1610 }
1611
1612 if ((range & exp.X_add_number) != 0)
1613 {
1614 int regno = range & exp.X_add_number;
1615
1616 regno &= -regno;
1617 regno = (1 << regno) - 1;
1618 as_tsktsk
1619 (_("Warning: duplicated register (r%d) in register list"),
1620 regno);
1621 }
1622
1623 range |= exp.X_add_number;
1624 }
1625 else
1626 {
1627 if (inst.reloc.type != 0)
1628 {
1629 inst.error = _("expression too complex");
1630 return FAIL;
1631 }
1632
1633 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1634 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1635 inst.reloc.pc_rel = 0;
1636 }
1637 }
1638
1639 if (*str == '|' || *str == '+')
1640 {
1641 str++;
1642 another_range = 1;
1643 }
1644 }
1645 while (another_range);
1646
1647 *strp = str;
1648 return range;
1649 }
1650
1651 /* Types of registers in a list. */
1652
1653 enum reg_list_els
1654 {
1655 REGLIST_VFP_S,
1656 REGLIST_VFP_D,
1657 REGLIST_NEON_D
1658 };
1659
1660 /* Parse a VFP register list. If the string is invalid return FAIL.
1661 Otherwise return the number of registers, and set PBASE to the first
1662 register. Parses registers of type ETYPE.
1663 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1664 - Q registers can be used to specify pairs of D registers
1665 - { } can be omitted from around a singleton register list
1666 FIXME: This is not implemented, as it would require backtracking in
1667 some cases, e.g.:
1668 vtbl.8 d3,d4,d5
1669 This could be done (the meaning isn't really ambiguous), but doesn't
1670 fit in well with the current parsing framework.
1671 - 32 D registers may be used (also true for VFPv3).
1672 FIXME: Types are ignored in these register lists, which is probably a
1673 bug. */
1674
1675 static int
1676 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1677 {
1678 char *str = *ccp;
1679 int base_reg;
1680 int new_base;
1681 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1682 int max_regs = 0;
1683 int count = 0;
1684 int warned = 0;
1685 unsigned long mask = 0;
1686 int i;
1687
1688 if (*str != '{')
1689 {
1690 inst.error = _("expecting {");
1691 return FAIL;
1692 }
1693
1694 str++;
1695
1696 switch (etype)
1697 {
1698 case REGLIST_VFP_S:
1699 regtype = REG_TYPE_VFS;
1700 max_regs = 32;
1701 break;
1702
1703 case REGLIST_VFP_D:
1704 regtype = REG_TYPE_VFD;
1705 break;
1706
1707 case REGLIST_NEON_D:
1708 regtype = REG_TYPE_NDQ;
1709 break;
1710 }
1711
1712 if (etype != REGLIST_VFP_S)
1713 {
1714 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1715 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1716 {
1717 max_regs = 32;
1718 if (thumb_mode)
1719 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1720 fpu_vfp_ext_d32);
1721 else
1722 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1723 fpu_vfp_ext_d32);
1724 }
1725 else
1726 max_regs = 16;
1727 }
1728
1729 base_reg = max_regs;
1730
1731 do
1732 {
1733 int setmask = 1, addregs = 1;
1734
1735 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1736
1737 if (new_base == FAIL)
1738 {
1739 first_error (_(reg_expected_msgs[regtype]));
1740 return FAIL;
1741 }
1742
1743 if (new_base >= max_regs)
1744 {
1745 first_error (_("register out of range in list"));
1746 return FAIL;
1747 }
1748
1749 /* Note: a value of 2 * n is returned for the register Q<n>. */
1750 if (regtype == REG_TYPE_NQ)
1751 {
1752 setmask = 3;
1753 addregs = 2;
1754 }
1755
1756 if (new_base < base_reg)
1757 base_reg = new_base;
1758
1759 if (mask & (setmask << new_base))
1760 {
1761 first_error (_("invalid register list"));
1762 return FAIL;
1763 }
1764
1765 if ((mask >> new_base) != 0 && ! warned)
1766 {
1767 as_tsktsk (_("register list not in ascending order"));
1768 warned = 1;
1769 }
1770
1771 mask |= setmask << new_base;
1772 count += addregs;
1773
1774 if (*str == '-') /* We have the start of a range expression */
1775 {
1776 int high_range;
1777
1778 str++;
1779
1780 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1781 == FAIL)
1782 {
1783 inst.error = gettext (reg_expected_msgs[regtype]);
1784 return FAIL;
1785 }
1786
1787 if (high_range >= max_regs)
1788 {
1789 first_error (_("register out of range in list"));
1790 return FAIL;
1791 }
1792
1793 if (regtype == REG_TYPE_NQ)
1794 high_range = high_range + 1;
1795
1796 if (high_range <= new_base)
1797 {
1798 inst.error = _("register range not in ascending order");
1799 return FAIL;
1800 }
1801
1802 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1803 {
1804 if (mask & (setmask << new_base))
1805 {
1806 inst.error = _("invalid register list");
1807 return FAIL;
1808 }
1809
1810 mask |= setmask << new_base;
1811 count += addregs;
1812 }
1813 }
1814 }
1815 while (skip_past_comma (&str) != FAIL);
1816
1817 str++;
1818
1819 /* Sanity check -- should have raised a parse error above. */
1820 if (count == 0 || count > max_regs)
1821 abort ();
1822
1823 *pbase = base_reg;
1824
1825 /* Final test -- the registers must be consecutive. */
1826 mask >>= base_reg;
1827 for (i = 0; i < count; i++)
1828 {
1829 if ((mask & (1u << i)) == 0)
1830 {
1831 inst.error = _("non-contiguous register range");
1832 return FAIL;
1833 }
1834 }
1835
1836 *ccp = str;
1837
1838 return count;
1839 }
1840
1841 /* True if two alias types are the same. */
1842
1843 static bfd_boolean
1844 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1845 {
1846 if (!a && !b)
1847 return TRUE;
1848
1849 if (!a || !b)
1850 return FALSE;
1851
1852 if (a->defined != b->defined)
1853 return FALSE;
1854
1855 if ((a->defined & NTA_HASTYPE) != 0
1856 && (a->eltype.type != b->eltype.type
1857 || a->eltype.size != b->eltype.size))
1858 return FALSE;
1859
1860 if ((a->defined & NTA_HASINDEX) != 0
1861 && (a->index != b->index))
1862 return FALSE;
1863
1864 return TRUE;
1865 }
1866
1867 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1868 The base register is put in *PBASE.
1869 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1870 the return value.
1871 The register stride (minus one) is put in bit 4 of the return value.
1872 Bits [6:5] encode the list length (minus one).
1873 The type of the list elements is put in *ELTYPE, if non-NULL. */
1874
1875 #define NEON_LANE(X) ((X) & 0xf)
1876 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1877 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1878
1879 static int
1880 parse_neon_el_struct_list (char **str, unsigned *pbase,
1881 struct neon_type_el *eltype)
1882 {
1883 char *ptr = *str;
1884 int base_reg = -1;
1885 int reg_incr = -1;
1886 int count = 0;
1887 int lane = -1;
1888 int leading_brace = 0;
1889 enum arm_reg_type rtype = REG_TYPE_NDQ;
1890 const char *const incr_error = _("register stride must be 1 or 2");
1891 const char *const type_error = _("mismatched element/structure types in list");
1892 struct neon_typed_alias firsttype;
1893
1894 if (skip_past_char (&ptr, '{') == SUCCESS)
1895 leading_brace = 1;
1896
1897 do
1898 {
1899 struct neon_typed_alias atype;
1900 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1901
1902 if (getreg == FAIL)
1903 {
1904 first_error (_(reg_expected_msgs[rtype]));
1905 return FAIL;
1906 }
1907
1908 if (base_reg == -1)
1909 {
1910 base_reg = getreg;
1911 if (rtype == REG_TYPE_NQ)
1912 {
1913 reg_incr = 1;
1914 }
1915 firsttype = atype;
1916 }
1917 else if (reg_incr == -1)
1918 {
1919 reg_incr = getreg - base_reg;
1920 if (reg_incr < 1 || reg_incr > 2)
1921 {
1922 first_error (_(incr_error));
1923 return FAIL;
1924 }
1925 }
1926 else if (getreg != base_reg + reg_incr * count)
1927 {
1928 first_error (_(incr_error));
1929 return FAIL;
1930 }
1931
1932 if (! neon_alias_types_same (&atype, &firsttype))
1933 {
1934 first_error (_(type_error));
1935 return FAIL;
1936 }
1937
1938 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1939 modes. */
1940 if (ptr[0] == '-')
1941 {
1942 struct neon_typed_alias htype;
1943 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1944 if (lane == -1)
1945 lane = NEON_INTERLEAVE_LANES;
1946 else if (lane != NEON_INTERLEAVE_LANES)
1947 {
1948 first_error (_(type_error));
1949 return FAIL;
1950 }
1951 if (reg_incr == -1)
1952 reg_incr = 1;
1953 else if (reg_incr != 1)
1954 {
1955 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1956 return FAIL;
1957 }
1958 ptr++;
1959 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1960 if (hireg == FAIL)
1961 {
1962 first_error (_(reg_expected_msgs[rtype]));
1963 return FAIL;
1964 }
1965 if (! neon_alias_types_same (&htype, &firsttype))
1966 {
1967 first_error (_(type_error));
1968 return FAIL;
1969 }
1970 count += hireg + dregs - getreg;
1971 continue;
1972 }
1973
1974 /* If we're using Q registers, we can't use [] or [n] syntax. */
1975 if (rtype == REG_TYPE_NQ)
1976 {
1977 count += 2;
1978 continue;
1979 }
1980
1981 if ((atype.defined & NTA_HASINDEX) != 0)
1982 {
1983 if (lane == -1)
1984 lane = atype.index;
1985 else if (lane != atype.index)
1986 {
1987 first_error (_(type_error));
1988 return FAIL;
1989 }
1990 }
1991 else if (lane == -1)
1992 lane = NEON_INTERLEAVE_LANES;
1993 else if (lane != NEON_INTERLEAVE_LANES)
1994 {
1995 first_error (_(type_error));
1996 return FAIL;
1997 }
1998 count++;
1999 }
2000 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2001
2002 /* No lane set by [x]. We must be interleaving structures. */
2003 if (lane == -1)
2004 lane = NEON_INTERLEAVE_LANES;
2005
2006 /* Sanity check. */
2007 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2008 || (count > 1 && reg_incr == -1))
2009 {
2010 first_error (_("error parsing element/structure list"));
2011 return FAIL;
2012 }
2013
2014 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2015 {
2016 first_error (_("expected }"));
2017 return FAIL;
2018 }
2019
2020 if (reg_incr == -1)
2021 reg_incr = 1;
2022
2023 if (eltype)
2024 *eltype = firsttype.eltype;
2025
2026 *pbase = base_reg;
2027 *str = ptr;
2028
2029 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2030 }
2031
2032 /* Parse an explicit relocation suffix on an expression. This is
2033 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2034 arm_reloc_hsh contains no entries, so this function can only
2035 succeed if there is no () after the word. Returns -1 on error,
2036 BFD_RELOC_UNUSED if there wasn't any suffix. */
2037 static int
2038 parse_reloc (char **str)
2039 {
2040 struct reloc_entry *r;
2041 char *p, *q;
2042
2043 if (**str != '(')
2044 return BFD_RELOC_UNUSED;
2045
2046 p = *str + 1;
2047 q = p;
2048
2049 while (*q && *q != ')' && *q != ',')
2050 q++;
2051 if (*q != ')')
2052 return -1;
2053
2054 if ((r = (struct reloc_entry *)
2055 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2056 return -1;
2057
2058 *str = q + 1;
2059 return r->reloc;
2060 }
2061
2062 /* Directives: register aliases. */
2063
2064 static struct reg_entry *
2065 insert_reg_alias (char *str, int number, int type)
2066 {
2067 struct reg_entry *new_reg;
2068 const char *name;
2069
2070 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2071 {
2072 if (new_reg->builtin)
2073 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2074
2075 /* Only warn about a redefinition if it's not defined as the
2076 same register. */
2077 else if (new_reg->number != number || new_reg->type != type)
2078 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2079
2080 return NULL;
2081 }
2082
2083 name = xstrdup (str);
2084 new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));
2085
2086 new_reg->name = name;
2087 new_reg->number = number;
2088 new_reg->type = type;
2089 new_reg->builtin = FALSE;
2090 new_reg->neon = NULL;
2091
2092 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2093 abort ();
2094
2095 return new_reg;
2096 }
2097
2098 static void
2099 insert_neon_reg_alias (char *str, int number, int type,
2100 struct neon_typed_alias *atype)
2101 {
2102 struct reg_entry *reg = insert_reg_alias (str, number, type);
2103
2104 if (!reg)
2105 {
2106 first_error (_("attempt to redefine typed alias"));
2107 return;
2108 }
2109
2110 if (atype)
2111 {
2112 reg->neon = (struct neon_typed_alias *)
2113 xmalloc (sizeof (struct neon_typed_alias));
2114 *reg->neon = *atype;
2115 }
2116 }
2117
2118 /* Look for the .req directive. This is of the form:
2119
2120 new_register_name .req existing_register_name
2121
2122 If we find one, or if it looks sufficiently like one that we want to
2123 handle any error here, return TRUE. Otherwise return FALSE. */
2124
2125 static bfd_boolean
2126 create_register_alias (char * newname, char *p)
2127 {
2128 struct reg_entry *old;
2129 char *oldname, *nbuf;
2130 size_t nlen;
2131
2132 /* The input scrubber ensures that whitespace after the mnemonic is
2133 collapsed to single spaces. */
2134 oldname = p;
2135 if (strncmp (oldname, " .req ", 6) != 0)
2136 return FALSE;
2137
2138 oldname += 6;
2139 if (*oldname == '\0')
2140 return FALSE;
2141
2142 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2143 if (!old)
2144 {
2145 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2146 return TRUE;
2147 }
2148
2149 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2150 the desired alias name, and p points to its end. If not, then
2151 the desired alias name is in the global original_case_string. */
2152 #ifdef TC_CASE_SENSITIVE
2153 nlen = p - newname;
2154 #else
2155 newname = original_case_string;
2156 nlen = strlen (newname);
2157 #endif
2158
2159 nbuf = (char *) alloca (nlen + 1);
2160 memcpy (nbuf, newname, nlen);
2161 nbuf[nlen] = '\0';
2162
2163 /* Create aliases under the new name as stated; an all-lowercase
2164 version of the new name; and an all-uppercase version of the new
2165 name. */
2166 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2167 {
2168 for (p = nbuf; *p; p++)
2169 *p = TOUPPER (*p);
2170
2171 if (strncmp (nbuf, newname, nlen))
2172 {
2173 /* If this attempt to create an additional alias fails, do not bother
2174 trying to create the all-lower case alias. We will fail and issue
2175 a second, duplicate error message. This situation arises when the
2176 programmer does something like:
2177 foo .req r0
2178 Foo .req r1
2179 The second .req creates the "Foo" alias but then fails to create
2180 the artificial FOO alias because it has already been created by the
2181 first .req. */
2182 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2183 return TRUE;
2184 }
2185
2186 for (p = nbuf; *p; p++)
2187 *p = TOLOWER (*p);
2188
2189 if (strncmp (nbuf, newname, nlen))
2190 insert_reg_alias (nbuf, old->number, old->type);
2191 }
2192
2193 return TRUE;
2194 }
2195
2196 /* Create a Neon typed/indexed register alias using directives, e.g.:
2197 X .dn d5.s32[1]
2198 Y .qn 6.s16
2199 Z .dn d7
2200 T .dn Z[0]
2201 These typed registers can be used instead of the types specified after the
2202 Neon mnemonic, so long as all operands given have types. Types can also be
2203 specified directly, e.g.:
2204 vadd d0.s32, d1.s32, d2.s32 */
2205
2206 static bfd_boolean
2207 create_neon_reg_alias (char *newname, char *p)
2208 {
2209 enum arm_reg_type basetype;
2210 struct reg_entry *basereg;
2211 struct reg_entry mybasereg;
2212 struct neon_type ntype;
2213 struct neon_typed_alias typeinfo;
2214 char *namebuf, *nameend;
2215 int namelen;
2216
2217 typeinfo.defined = 0;
2218 typeinfo.eltype.type = NT_invtype;
2219 typeinfo.eltype.size = -1;
2220 typeinfo.index = -1;
2221
2222 nameend = p;
2223
2224 if (strncmp (p, " .dn ", 5) == 0)
2225 basetype = REG_TYPE_VFD;
2226 else if (strncmp (p, " .qn ", 5) == 0)
2227 basetype = REG_TYPE_NQ;
2228 else
2229 return FALSE;
2230
2231 p += 5;
2232
2233 if (*p == '\0')
2234 return FALSE;
2235
2236 basereg = arm_reg_parse_multi (&p);
2237
2238 if (basereg && basereg->type != basetype)
2239 {
2240 as_bad (_("bad type for register"));
2241 return FALSE;
2242 }
2243
2244 if (basereg == NULL)
2245 {
2246 expressionS exp;
2247 /* Try parsing as an integer. */
2248 my_get_expression (&exp, &p, GE_NO_PREFIX);
2249 if (exp.X_op != O_constant)
2250 {
2251 as_bad (_("expression must be constant"));
2252 return FALSE;
2253 }
2254 basereg = &mybasereg;
2255 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2256 : exp.X_add_number;
2257 basereg->neon = 0;
2258 }
2259
2260 if (basereg->neon)
2261 typeinfo = *basereg->neon;
2262
2263 if (parse_neon_type (&ntype, &p) == SUCCESS)
2264 {
2265 /* We got a type. */
2266 if (typeinfo.defined & NTA_HASTYPE)
2267 {
2268 as_bad (_("can't redefine the type of a register alias"));
2269 return FALSE;
2270 }
2271
2272 typeinfo.defined |= NTA_HASTYPE;
2273 if (ntype.elems != 1)
2274 {
2275 as_bad (_("you must specify a single type only"));
2276 return FALSE;
2277 }
2278 typeinfo.eltype = ntype.el[0];
2279 }
2280
2281 if (skip_past_char (&p, '[') == SUCCESS)
2282 {
2283 expressionS exp;
2284 /* We got a scalar index. */
2285
2286 if (typeinfo.defined & NTA_HASINDEX)
2287 {
2288 as_bad (_("can't redefine the index of a scalar alias"));
2289 return FALSE;
2290 }
2291
2292 my_get_expression (&exp, &p, GE_NO_PREFIX);
2293
2294 if (exp.X_op != O_constant)
2295 {
2296 as_bad (_("scalar index must be constant"));
2297 return FALSE;
2298 }
2299
2300 typeinfo.defined |= NTA_HASINDEX;
2301 typeinfo.index = exp.X_add_number;
2302
2303 if (skip_past_char (&p, ']') == FAIL)
2304 {
2305 as_bad (_("expecting ]"));
2306 return FALSE;
2307 }
2308 }
2309
2310 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2311 the desired alias name, and p points to its end. If not, then
2312 the desired alias name is in the global original_case_string. */
2313 #ifdef TC_CASE_SENSITIVE
2314 namelen = nameend - newname;
2315 #else
2316 newname = original_case_string;
2317 namelen = strlen (newname);
2318 #endif
2319
2320 namebuf = (char *) alloca (namelen + 1);
2321 strncpy (namebuf, newname, namelen);
2322 namebuf[namelen] = '\0';
2323
2324 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2325 typeinfo.defined != 0 ? &typeinfo : NULL);
2326
2327 /* Insert name in all uppercase. */
2328 for (p = namebuf; *p; p++)
2329 *p = TOUPPER (*p);
2330
2331 if (strncmp (namebuf, newname, namelen))
2332 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2333 typeinfo.defined != 0 ? &typeinfo : NULL);
2334
2335 /* Insert name in all lowercase. */
2336 for (p = namebuf; *p; p++)
2337 *p = TOLOWER (*p);
2338
2339 if (strncmp (namebuf, newname, namelen))
2340 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2341 typeinfo.defined != 0 ? &typeinfo : NULL);
2342
2343 return TRUE;
2344 }
2345
2346 /* Should never be called, as .req goes between the alias and the
2347 register name, not at the beginning of the line. */
2348
2349 static void
2350 s_req (int a ATTRIBUTE_UNUSED)
2351 {
2352 as_bad (_("invalid syntax for .req directive"));
2353 }
2354
2355 static void
2356 s_dn (int a ATTRIBUTE_UNUSED)
2357 {
2358 as_bad (_("invalid syntax for .dn directive"));
2359 }
2360
2361 static void
2362 s_qn (int a ATTRIBUTE_UNUSED)
2363 {
2364 as_bad (_("invalid syntax for .qn directive"));
2365 }
2366
2367 /* The .unreq directive deletes an alias which was previously defined
2368 by .req. For example:
2369
2370 my_alias .req r11
2371 .unreq my_alias */
2372
2373 static void
2374 s_unreq (int a ATTRIBUTE_UNUSED)
2375 {
2376 char * name;
2377 char saved_char;
2378
2379 name = input_line_pointer;
2380
2381 while (*input_line_pointer != 0
2382 && *input_line_pointer != ' '
2383 && *input_line_pointer != '\n')
2384 ++input_line_pointer;
2385
2386 saved_char = *input_line_pointer;
2387 *input_line_pointer = 0;
2388
2389 if (!*name)
2390 as_bad (_("invalid syntax for .unreq directive"));
2391 else
2392 {
2393 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2394 name);
2395
2396 if (!reg)
2397 as_bad (_("unknown register alias '%s'"), name);
2398 else if (reg->builtin)
2399 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2400 name);
2401 else
2402 {
2403 char * p;
2404 char * nbuf;
2405
2406 hash_delete (arm_reg_hsh, name, FALSE);
2407 free ((char *) reg->name);
2408 if (reg->neon)
2409 free (reg->neon);
2410 free (reg);
2411
2412 /* Also locate the all upper case and all lower case versions.
2413 Do not complain if we cannot find one or the other as it
2414 was probably deleted above. */
2415
2416 nbuf = strdup (name);
2417 for (p = nbuf; *p; p++)
2418 *p = TOUPPER (*p);
2419 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2420 if (reg)
2421 {
2422 hash_delete (arm_reg_hsh, nbuf, FALSE);
2423 free ((char *) reg->name);
2424 if (reg->neon)
2425 free (reg->neon);
2426 free (reg);
2427 }
2428
2429 for (p = nbuf; *p; p++)
2430 *p = TOLOWER (*p);
2431 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2432 if (reg)
2433 {
2434 hash_delete (arm_reg_hsh, nbuf, FALSE);
2435 free ((char *) reg->name);
2436 if (reg->neon)
2437 free (reg->neon);
2438 free (reg);
2439 }
2440
2441 free (nbuf);
2442 }
2443 }
2444
2445 *input_line_pointer = saved_char;
2446 demand_empty_rest_of_line ();
2447 }
2448
2449 /* Directives: Instruction set selection. */
2450
2451 #ifdef OBJ_ELF
2452 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2453 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2454 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2455 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2456
2457 /* Create a new mapping symbol for the transition to STATE. */
2458
2459 static void
2460 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2461 {
2462 symbolS * symbolP;
2463 const char * symname;
2464 int type;
2465
2466 switch (state)
2467 {
2468 case MAP_DATA:
2469 symname = "$d";
2470 type = BSF_NO_FLAGS;
2471 break;
2472 case MAP_ARM:
2473 symname = "$a";
2474 type = BSF_NO_FLAGS;
2475 break;
2476 case MAP_THUMB:
2477 symname = "$t";
2478 type = BSF_NO_FLAGS;
2479 break;
2480 default:
2481 abort ();
2482 }
2483
2484 symbolP = symbol_new (symname, now_seg, value, frag);
2485 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2486
2487 switch (state)
2488 {
2489 case MAP_ARM:
2490 THUMB_SET_FUNC (symbolP, 0);
2491 ARM_SET_THUMB (symbolP, 0);
2492 ARM_SET_INTERWORK (symbolP, support_interwork);
2493 break;
2494
2495 case MAP_THUMB:
2496 THUMB_SET_FUNC (symbolP, 1);
2497 ARM_SET_THUMB (symbolP, 1);
2498 ARM_SET_INTERWORK (symbolP, support_interwork);
2499 break;
2500
2501 case MAP_DATA:
2502 default:
2503 break;
2504 }
2505
2506 /* Save the mapping symbols for future reference. Also check that
2507 we do not place two mapping symbols at the same offset within a
2508 frag. We'll handle overlap between frags in
2509 check_mapping_symbols.
2510
2511 If .fill or other data filling directive generates zero sized data,
2512 the mapping symbol for the following code will have the same value
2513 as the one generated for the data filling directive. In this case,
2514 we replace the old symbol with the new one at the same address. */
2515 if (value == 0)
2516 {
2517 if (frag->tc_frag_data.first_map != NULL)
2518 {
2519 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2520 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2521 }
2522 frag->tc_frag_data.first_map = symbolP;
2523 }
2524 if (frag->tc_frag_data.last_map != NULL)
2525 {
2526 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2527 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2528 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2529 }
2530 frag->tc_frag_data.last_map = symbolP;
2531 }
2532
2533 /* We must sometimes convert a region marked as code to data during
2534 code alignment, if an odd number of bytes have to be padded. The
2535 code mapping symbol is pushed to an aligned address. */
2536
2537 static void
2538 insert_data_mapping_symbol (enum mstate state,
2539 valueT value, fragS *frag, offsetT bytes)
2540 {
2541 /* If there was already a mapping symbol, remove it. */
2542 if (frag->tc_frag_data.last_map != NULL
2543 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2544 {
2545 symbolS *symp = frag->tc_frag_data.last_map;
2546
2547 if (value == 0)
2548 {
2549 know (frag->tc_frag_data.first_map == symp);
2550 frag->tc_frag_data.first_map = NULL;
2551 }
2552 frag->tc_frag_data.last_map = NULL;
2553 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2554 }
2555
2556 make_mapping_symbol (MAP_DATA, value, frag);
2557 make_mapping_symbol (state, value + bytes, frag);
2558 }
2559
2560 static void mapping_state_2 (enum mstate state, int max_chars);
2561
2562 /* Set the mapping state to STATE. Only call this when about to
2563 emit some STATE bytes to the file. */
2564
2565 void
2566 mapping_state (enum mstate state)
2567 {
2568 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2569
2570 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2571
2572 if (mapstate == state)
2573 /* The mapping symbol has already been emitted.
2574 There is nothing else to do. */
2575 return;
2576 else if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2577 /* This case will be evaluated later in the next else. */
2578 return;
2579 else if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2580 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2581 {
2582 /* Only add the symbol if the offset is > 0:
2583 if we're at the first frag, check it's size > 0;
2584 if we're not at the first frag, then for sure
2585 the offset is > 0. */
2586 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2587 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2588
2589 if (add_symbol)
2590 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2591 }
2592
2593 mapping_state_2 (state, 0);
2594 #undef TRANSITION
2595 }
2596
2597 /* Same as mapping_state, but MAX_CHARS bytes have already been
2598 allocated. Put the mapping symbol that far back. */
2599
2600 static void
2601 mapping_state_2 (enum mstate state, int max_chars)
2602 {
2603 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2604
2605 if (!SEG_NORMAL (now_seg))
2606 return;
2607
2608 if (mapstate == state)
2609 /* The mapping symbol has already been emitted.
2610 There is nothing else to do. */
2611 return;
2612
2613 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2614 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2615 }
2616 #else
2617 #define mapping_state(x) ((void)0)
2618 #define mapping_state_2(x, y) ((void)0)
2619 #endif
2620
2621 /* Find the real, Thumb encoded start of a Thumb function. */
2622
2623 #ifdef OBJ_COFF
2624 static symbolS *
2625 find_real_start (symbolS * symbolP)
2626 {
2627 char * real_start;
2628 const char * name = S_GET_NAME (symbolP);
2629 symbolS * new_target;
2630
2631 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2632 #define STUB_NAME ".real_start_of"
2633
2634 if (name == NULL)
2635 abort ();
2636
2637 /* The compiler may generate BL instructions to local labels because
2638 it needs to perform a branch to a far away location. These labels
2639 do not have a corresponding ".real_start_of" label. We check
2640 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2641 the ".real_start_of" convention for nonlocal branches. */
2642 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2643 return symbolP;
2644
2645 real_start = ACONCAT ((STUB_NAME, name, NULL));
2646 new_target = symbol_find (real_start);
2647
2648 if (new_target == NULL)
2649 {
2650 as_warn (_("Failed to find real start of function: %s\n"), name);
2651 new_target = symbolP;
2652 }
2653
2654 return new_target;
2655 }
2656 #endif
2657
2658 static void
2659 opcode_select (int width)
2660 {
2661 switch (width)
2662 {
2663 case 16:
2664 if (! thumb_mode)
2665 {
2666 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2667 as_bad (_("selected processor does not support THUMB opcodes"));
2668
2669 thumb_mode = 1;
2670 /* No need to force the alignment, since we will have been
2671 coming from ARM mode, which is word-aligned. */
2672 record_alignment (now_seg, 1);
2673 }
2674 break;
2675
2676 case 32:
2677 if (thumb_mode)
2678 {
2679 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2680 as_bad (_("selected processor does not support ARM opcodes"));
2681
2682 thumb_mode = 0;
2683
2684 if (!need_pass_2)
2685 frag_align (2, 0, 0);
2686
2687 record_alignment (now_seg, 1);
2688 }
2689 break;
2690
2691 default:
2692 as_bad (_("invalid instruction size selected (%d)"), width);
2693 }
2694 }
2695
2696 static void
2697 s_arm (int ignore ATTRIBUTE_UNUSED)
2698 {
2699 opcode_select (32);
2700 demand_empty_rest_of_line ();
2701 }
2702
2703 static void
2704 s_thumb (int ignore ATTRIBUTE_UNUSED)
2705 {
2706 opcode_select (16);
2707 demand_empty_rest_of_line ();
2708 }
2709
2710 static void
2711 s_code (int unused ATTRIBUTE_UNUSED)
2712 {
2713 int temp;
2714
2715 temp = get_absolute_expression ();
2716 switch (temp)
2717 {
2718 case 16:
2719 case 32:
2720 opcode_select (temp);
2721 break;
2722
2723 default:
2724 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2725 }
2726 }
2727
2728 static void
2729 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2730 {
2731 /* If we are not already in thumb mode go into it, EVEN if
2732 the target processor does not support thumb instructions.
2733 This is used by gcc/config/arm/lib1funcs.asm for example
2734 to compile interworking support functions even if the
2735 target processor should not support interworking. */
2736 if (! thumb_mode)
2737 {
2738 thumb_mode = 2;
2739 record_alignment (now_seg, 1);
2740 }
2741
2742 demand_empty_rest_of_line ();
2743 }
2744
2745 static void
2746 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2747 {
2748 s_thumb (0);
2749
2750 /* The following label is the name/address of the start of a Thumb function.
2751 We need to know this for the interworking support. */
2752 label_is_thumb_function_name = TRUE;
2753 }
2754
2755 /* Perform a .set directive, but also mark the alias as
2756 being a thumb function. */
2757
2758 static void
2759 s_thumb_set (int equiv)
2760 {
2761 /* XXX the following is a duplicate of the code for s_set() in read.c
2762 We cannot just call that code as we need to get at the symbol that
2763 is created. */
2764 char * name;
2765 char delim;
2766 char * end_name;
2767 symbolS * symbolP;
2768
2769 /* Especial apologies for the random logic:
2770 This just grew, and could be parsed much more simply!
2771 Dean - in haste. */
2772 name = input_line_pointer;
2773 delim = get_symbol_end ();
2774 end_name = input_line_pointer;
2775 *end_name = delim;
2776
2777 if (*input_line_pointer != ',')
2778 {
2779 *end_name = 0;
2780 as_bad (_("expected comma after name \"%s\""), name);
2781 *end_name = delim;
2782 ignore_rest_of_line ();
2783 return;
2784 }
2785
2786 input_line_pointer++;
2787 *end_name = 0;
2788
2789 if (name[0] == '.' && name[1] == '\0')
2790 {
2791 /* XXX - this should not happen to .thumb_set. */
2792 abort ();
2793 }
2794
2795 if ((symbolP = symbol_find (name)) == NULL
2796 && (symbolP = md_undefined_symbol (name)) == NULL)
2797 {
2798 #ifndef NO_LISTING
2799 /* When doing symbol listings, play games with dummy fragments living
2800 outside the normal fragment chain to record the file and line info
2801 for this symbol. */
2802 if (listing & LISTING_SYMBOLS)
2803 {
2804 extern struct list_info_struct * listing_tail;
2805 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2806
2807 memset (dummy_frag, 0, sizeof (fragS));
2808 dummy_frag->fr_type = rs_fill;
2809 dummy_frag->line = listing_tail;
2810 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2811 dummy_frag->fr_symbol = symbolP;
2812 }
2813 else
2814 #endif
2815 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2816
2817 #ifdef OBJ_COFF
2818 /* "set" symbols are local unless otherwise specified. */
2819 SF_SET_LOCAL (symbolP);
2820 #endif /* OBJ_COFF */
2821 } /* Make a new symbol. */
2822
2823 symbol_table_insert (symbolP);
2824
2825 * end_name = delim;
2826
2827 if (equiv
2828 && S_IS_DEFINED (symbolP)
2829 && S_GET_SEGMENT (symbolP) != reg_section)
2830 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2831
2832 pseudo_set (symbolP);
2833
2834 demand_empty_rest_of_line ();
2835
2836 /* XXX Now we come to the Thumb specific bit of code. */
2837
2838 THUMB_SET_FUNC (symbolP, 1);
2839 ARM_SET_THUMB (symbolP, 1);
2840 #if defined OBJ_ELF || defined OBJ_COFF
2841 ARM_SET_INTERWORK (symbolP, support_interwork);
2842 #endif
2843 }
2844
2845 /* Directives: Mode selection. */
2846
2847 /* .syntax [unified|divided] - choose the new unified syntax
2848 (same for Arm and Thumb encoding, modulo slight differences in what
2849 can be represented) or the old divergent syntax for each mode. */
2850 static void
2851 s_syntax (int unused ATTRIBUTE_UNUSED)
2852 {
2853 char *name, delim;
2854
2855 name = input_line_pointer;
2856 delim = get_symbol_end ();
2857
2858 if (!strcasecmp (name, "unified"))
2859 unified_syntax = TRUE;
2860 else if (!strcasecmp (name, "divided"))
2861 unified_syntax = FALSE;
2862 else
2863 {
2864 as_bad (_("unrecognized syntax mode \"%s\""), name);
2865 return;
2866 }
2867 *input_line_pointer = delim;
2868 demand_empty_rest_of_line ();
2869 }
2870
2871 /* Directives: sectioning and alignment. */
2872
2873 /* Same as s_align_ptwo but align 0 => align 2. */
2874
2875 static void
2876 s_align (int unused ATTRIBUTE_UNUSED)
2877 {
2878 int temp;
2879 bfd_boolean fill_p;
2880 long temp_fill;
2881 long max_alignment = 15;
2882
2883 temp = get_absolute_expression ();
2884 if (temp > max_alignment)
2885 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2886 else if (temp < 0)
2887 {
2888 as_bad (_("alignment negative. 0 assumed."));
2889 temp = 0;
2890 }
2891
2892 if (*input_line_pointer == ',')
2893 {
2894 input_line_pointer++;
2895 temp_fill = get_absolute_expression ();
2896 fill_p = TRUE;
2897 }
2898 else
2899 {
2900 fill_p = FALSE;
2901 temp_fill = 0;
2902 }
2903
2904 if (!temp)
2905 temp = 2;
2906
2907 /* Only make a frag if we HAVE to. */
2908 if (temp && !need_pass_2)
2909 {
2910 if (!fill_p && subseg_text_p (now_seg))
2911 frag_align_code (temp, 0);
2912 else
2913 frag_align (temp, (int) temp_fill, 0);
2914 }
2915 demand_empty_rest_of_line ();
2916
2917 record_alignment (now_seg, temp);
2918 }
2919
2920 static void
2921 s_bss (int ignore ATTRIBUTE_UNUSED)
2922 {
2923 /* We don't support putting frags in the BSS segment, we fake it by
2924 marking in_bss, then looking at s_skip for clues. */
2925 subseg_set (bss_section, 0);
2926 demand_empty_rest_of_line ();
2927
2928 #ifdef md_elf_section_change_hook
2929 md_elf_section_change_hook ();
2930 #endif
2931 }
2932
2933 static void
2934 s_even (int ignore ATTRIBUTE_UNUSED)
2935 {
2936 /* Never make frag if expect extra pass. */
2937 if (!need_pass_2)
2938 frag_align (1, 0, 0);
2939
2940 record_alignment (now_seg, 1);
2941
2942 demand_empty_rest_of_line ();
2943 }
2944
2945 /* Directives: Literal pools. */
2946
2947 static literal_pool *
2948 find_literal_pool (void)
2949 {
2950 literal_pool * pool;
2951
2952 for (pool = list_of_pools; pool != NULL; pool = pool->next)
2953 {
2954 if (pool->section == now_seg
2955 && pool->sub_section == now_subseg)
2956 break;
2957 }
2958
2959 return pool;
2960 }
2961
2962 static literal_pool *
2963 find_or_make_literal_pool (void)
2964 {
2965 /* Next literal pool ID number. */
2966 static unsigned int latest_pool_num = 1;
2967 literal_pool * pool;
2968
2969 pool = find_literal_pool ();
2970
2971 if (pool == NULL)
2972 {
2973 /* Create a new pool. */
2974 pool = (literal_pool *) xmalloc (sizeof (* pool));
2975 if (! pool)
2976 return NULL;
2977
2978 pool->next_free_entry = 0;
2979 pool->section = now_seg;
2980 pool->sub_section = now_subseg;
2981 pool->next = list_of_pools;
2982 pool->symbol = NULL;
2983
2984 /* Add it to the list. */
2985 list_of_pools = pool;
2986 }
2987
2988 /* New pools, and emptied pools, will have a NULL symbol. */
2989 if (pool->symbol == NULL)
2990 {
2991 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
2992 (valueT) 0, &zero_address_frag);
2993 pool->id = latest_pool_num ++;
2994 }
2995
2996 /* Done. */
2997 return pool;
2998 }
2999
3000 /* Add the literal in the global 'inst'
3001 structure to the relevant literal pool. */
3002
3003 static int
3004 add_to_lit_pool (void)
3005 {
3006 literal_pool * pool;
3007 unsigned int entry;
3008
3009 pool = find_or_make_literal_pool ();
3010
3011 /* Check if this literal value is already in the pool. */
3012 for (entry = 0; entry < pool->next_free_entry; entry ++)
3013 {
3014 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3015 && (inst.reloc.exp.X_op == O_constant)
3016 && (pool->literals[entry].X_add_number
3017 == inst.reloc.exp.X_add_number)
3018 && (pool->literals[entry].X_unsigned
3019 == inst.reloc.exp.X_unsigned))
3020 break;
3021
3022 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3023 && (inst.reloc.exp.X_op == O_symbol)
3024 && (pool->literals[entry].X_add_number
3025 == inst.reloc.exp.X_add_number)
3026 && (pool->literals[entry].X_add_symbol
3027 == inst.reloc.exp.X_add_symbol)
3028 && (pool->literals[entry].X_op_symbol
3029 == inst.reloc.exp.X_op_symbol))
3030 break;
3031 }
3032
3033 /* Do we need to create a new entry? */
3034 if (entry == pool->next_free_entry)
3035 {
3036 if (entry >= MAX_LITERAL_POOL_SIZE)
3037 {
3038 inst.error = _("literal pool overflow");
3039 return FAIL;
3040 }
3041
3042 pool->literals[entry] = inst.reloc.exp;
3043 pool->next_free_entry += 1;
3044 }
3045
3046 inst.reloc.exp.X_op = O_symbol;
3047 inst.reloc.exp.X_add_number = ((int) entry) * 4;
3048 inst.reloc.exp.X_add_symbol = pool->symbol;
3049
3050 return SUCCESS;
3051 }
3052
3053 /* Can't use symbol_new here, so have to create a symbol and then at
3054 a later date assign it a value. Thats what these functions do. */
3055
3056 static void
3057 symbol_locate (symbolS * symbolP,
3058 const char * name, /* It is copied, the caller can modify. */
3059 segT segment, /* Segment identifier (SEG_<something>). */
3060 valueT valu, /* Symbol value. */
3061 fragS * frag) /* Associated fragment. */
3062 {
3063 unsigned int name_length;
3064 char * preserved_copy_of_name;
3065
3066 name_length = strlen (name) + 1; /* +1 for \0. */
3067 obstack_grow (&notes, name, name_length);
3068 preserved_copy_of_name = (char *) obstack_finish (&notes);
3069
3070 #ifdef tc_canonicalize_symbol_name
3071 preserved_copy_of_name =
3072 tc_canonicalize_symbol_name (preserved_copy_of_name);
3073 #endif
3074
3075 S_SET_NAME (symbolP, preserved_copy_of_name);
3076
3077 S_SET_SEGMENT (symbolP, segment);
3078 S_SET_VALUE (symbolP, valu);
3079 symbol_clear_list_pointers (symbolP);
3080
3081 symbol_set_frag (symbolP, frag);
3082
3083 /* Link to end of symbol chain. */
3084 {
3085 extern int symbol_table_frozen;
3086
3087 if (symbol_table_frozen)
3088 abort ();
3089 }
3090
3091 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3092
3093 obj_symbol_new_hook (symbolP);
3094
3095 #ifdef tc_symbol_new_hook
3096 tc_symbol_new_hook (symbolP);
3097 #endif
3098
3099 #ifdef DEBUG_SYMS
3100 verify_symbol_chain (symbol_rootP, symbol_lastP);
3101 #endif /* DEBUG_SYMS */
3102 }
3103
3104
3105 static void
3106 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3107 {
3108 unsigned int entry;
3109 literal_pool * pool;
3110 char sym_name[20];
3111
3112 pool = find_literal_pool ();
3113 if (pool == NULL
3114 || pool->symbol == NULL
3115 || pool->next_free_entry == 0)
3116 return;
3117
3118 mapping_state (MAP_DATA);
3119
3120 /* Align pool as you have word accesses.
3121 Only make a frag if we have to. */
3122 if (!need_pass_2)
3123 frag_align (2, 0, 0);
3124
3125 record_alignment (now_seg, 2);
3126
3127 sprintf (sym_name, "$$lit_\002%x", pool->id);
3128
3129 symbol_locate (pool->symbol, sym_name, now_seg,
3130 (valueT) frag_now_fix (), frag_now);
3131 symbol_table_insert (pool->symbol);
3132
3133 ARM_SET_THUMB (pool->symbol, thumb_mode);
3134
3135 #if defined OBJ_COFF || defined OBJ_ELF
3136 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3137 #endif
3138
3139 for (entry = 0; entry < pool->next_free_entry; entry ++)
3140 /* First output the expression in the instruction to the pool. */
3141 emit_expr (&(pool->literals[entry]), 4); /* .word */
3142
3143 /* Mark the pool as empty. */
3144 pool->next_free_entry = 0;
3145 pool->symbol = NULL;
3146 }
3147
3148 #ifdef OBJ_ELF
3149 /* Forward declarations for functions below, in the MD interface
3150 section. */
3151 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3152 static valueT create_unwind_entry (int);
3153 static void start_unwind_section (const segT, int);
3154 static void add_unwind_opcode (valueT, int);
3155 static void flush_pending_unwind (void);
3156
3157 /* Directives: Data. */
3158
3159 static void
3160 s_arm_elf_cons (int nbytes)
3161 {
3162 expressionS exp;
3163
3164 #ifdef md_flush_pending_output
3165 md_flush_pending_output ();
3166 #endif
3167
3168 if (is_it_end_of_statement ())
3169 {
3170 demand_empty_rest_of_line ();
3171 return;
3172 }
3173
3174 #ifdef md_cons_align
3175 md_cons_align (nbytes);
3176 #endif
3177
3178 mapping_state (MAP_DATA);
3179 do
3180 {
3181 int reloc;
3182 char *base = input_line_pointer;
3183
3184 expression (& exp);
3185
3186 if (exp.X_op != O_symbol)
3187 emit_expr (&exp, (unsigned int) nbytes);
3188 else
3189 {
3190 char *before_reloc = input_line_pointer;
3191 reloc = parse_reloc (&input_line_pointer);
3192 if (reloc == -1)
3193 {
3194 as_bad (_("unrecognized relocation suffix"));
3195 ignore_rest_of_line ();
3196 return;
3197 }
3198 else if (reloc == BFD_RELOC_UNUSED)
3199 emit_expr (&exp, (unsigned int) nbytes);
3200 else
3201 {
3202 reloc_howto_type *howto = (reloc_howto_type *)
3203 bfd_reloc_type_lookup (stdoutput,
3204 (bfd_reloc_code_real_type) reloc);
3205 int size = bfd_get_reloc_size (howto);
3206
3207 if (reloc == BFD_RELOC_ARM_PLT32)
3208 {
3209 as_bad (_("(plt) is only valid on branch targets"));
3210 reloc = BFD_RELOC_UNUSED;
3211 size = 0;
3212 }
3213
3214 if (size > nbytes)
3215 as_bad (_("%s relocations do not fit in %d bytes"),
3216 howto->name, nbytes);
3217 else
3218 {
3219 /* We've parsed an expression stopping at O_symbol.
3220 But there may be more expression left now that we
3221 have parsed the relocation marker. Parse it again.
3222 XXX Surely there is a cleaner way to do this. */
3223 char *p = input_line_pointer;
3224 int offset;
3225 char *save_buf = (char *) alloca (input_line_pointer - base);
3226 memcpy (save_buf, base, input_line_pointer - base);
3227 memmove (base + (input_line_pointer - before_reloc),
3228 base, before_reloc - base);
3229
3230 input_line_pointer = base + (input_line_pointer-before_reloc);
3231 expression (&exp);
3232 memcpy (base, save_buf, p - base);
3233
3234 offset = nbytes - size;
3235 p = frag_more ((int) nbytes);
3236 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3237 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3238 }
3239 }
3240 }
3241 }
3242 while (*input_line_pointer++ == ',');
3243
3244 /* Put terminator back into stream. */
3245 input_line_pointer --;
3246 demand_empty_rest_of_line ();
3247 }
3248
3249 /* Emit an expression containing a 32-bit thumb instruction.
3250 Implementation based on put_thumb32_insn. */
3251
3252 static void
3253 emit_thumb32_expr (expressionS * exp)
3254 {
3255 expressionS exp_high = *exp;
3256
3257 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3258 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3259 exp->X_add_number &= 0xffff;
3260 emit_expr (exp, (unsigned int) THUMB_SIZE);
3261 }
3262
3263 /* Guess the instruction size based on the opcode. */
3264
3265 static int
3266 thumb_insn_size (int opcode)
3267 {
3268 if ((unsigned int) opcode < 0xe800u)
3269 return 2;
3270 else if ((unsigned int) opcode >= 0xe8000000u)
3271 return 4;
3272 else
3273 return 0;
3274 }
3275
3276 static bfd_boolean
3277 emit_insn (expressionS *exp, int nbytes)
3278 {
3279 int size = 0;
3280
3281 if (exp->X_op == O_constant)
3282 {
3283 size = nbytes;
3284
3285 if (size == 0)
3286 size = thumb_insn_size (exp->X_add_number);
3287
3288 if (size != 0)
3289 {
3290 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3291 {
3292 as_bad (_(".inst.n operand too big. "\
3293 "Use .inst.w instead"));
3294 size = 0;
3295 }
3296 else
3297 {
3298 if (now_it.state == AUTOMATIC_IT_BLOCK)
3299 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3300 else
3301 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3302
3303 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3304 emit_thumb32_expr (exp);
3305 else
3306 emit_expr (exp, (unsigned int) size);
3307
3308 it_fsm_post_encode ();
3309 }
3310 }
3311 else
3312 as_bad (_("cannot determine Thumb instruction size. " \
3313 "Use .inst.n/.inst.w instead"));
3314 }
3315 else
3316 as_bad (_("constant expression required"));
3317
3318 return (size != 0);
3319 }
3320
3321 /* Like s_arm_elf_cons but do not use md_cons_align and
3322 set the mapping state to MAP_ARM/MAP_THUMB. */
3323
3324 static void
3325 s_arm_elf_inst (int nbytes)
3326 {
3327 if (is_it_end_of_statement ())
3328 {
3329 demand_empty_rest_of_line ();
3330 return;
3331 }
3332
3333 /* Calling mapping_state () here will not change ARM/THUMB,
3334 but will ensure not to be in DATA state. */
3335
3336 if (thumb_mode)
3337 mapping_state (MAP_THUMB);
3338 else
3339 {
3340 if (nbytes != 0)
3341 {
3342 as_bad (_("width suffixes are invalid in ARM mode"));
3343 ignore_rest_of_line ();
3344 return;
3345 }
3346
3347 nbytes = 4;
3348
3349 mapping_state (MAP_ARM);
3350 }
3351
3352 do
3353 {
3354 expressionS exp;
3355
3356 expression (& exp);
3357
3358 if (! emit_insn (& exp, nbytes))
3359 {
3360 ignore_rest_of_line ();
3361 return;
3362 }
3363 }
3364 while (*input_line_pointer++ == ',');
3365
3366 /* Put terminator back into stream. */
3367 input_line_pointer --;
3368 demand_empty_rest_of_line ();
3369 }
3370
3371 /* Parse a .rel31 directive. */
3372
3373 static void
3374 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3375 {
3376 expressionS exp;
3377 char *p;
3378 valueT highbit;
3379
3380 highbit = 0;
3381 if (*input_line_pointer == '1')
3382 highbit = 0x80000000;
3383 else if (*input_line_pointer != '0')
3384 as_bad (_("expected 0 or 1"));
3385
3386 input_line_pointer++;
3387 if (*input_line_pointer != ',')
3388 as_bad (_("missing comma"));
3389 input_line_pointer++;
3390
3391 #ifdef md_flush_pending_output
3392 md_flush_pending_output ();
3393 #endif
3394
3395 #ifdef md_cons_align
3396 md_cons_align (4);
3397 #endif
3398
3399 mapping_state (MAP_DATA);
3400
3401 expression (&exp);
3402
3403 p = frag_more (4);
3404 md_number_to_chars (p, highbit, 4);
3405 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3406 BFD_RELOC_ARM_PREL31);
3407
3408 demand_empty_rest_of_line ();
3409 }
3410
3411 /* Directives: AEABI stack-unwind tables. */
3412
3413 /* Parse an unwind_fnstart directive. Simply records the current location. */
3414
3415 static void
3416 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3417 {
3418 demand_empty_rest_of_line ();
3419 if (unwind.proc_start)
3420 {
3421 as_bad (_("duplicate .fnstart directive"));
3422 return;
3423 }
3424
3425 /* Mark the start of the function. */
3426 unwind.proc_start = expr_build_dot ();
3427
3428 /* Reset the rest of the unwind info. */
3429 unwind.opcode_count = 0;
3430 unwind.table_entry = NULL;
3431 unwind.personality_routine = NULL;
3432 unwind.personality_index = -1;
3433 unwind.frame_size = 0;
3434 unwind.fp_offset = 0;
3435 unwind.fp_reg = REG_SP;
3436 unwind.fp_used = 0;
3437 unwind.sp_restored = 0;
3438 }
3439
3440
3441 /* Parse a handlerdata directive. Creates the exception handling table entry
3442 for the function. */
3443
3444 static void
3445 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3446 {
3447 demand_empty_rest_of_line ();
3448 if (!unwind.proc_start)
3449 as_bad (MISSING_FNSTART);
3450
3451 if (unwind.table_entry)
3452 as_bad (_("duplicate .handlerdata directive"));
3453
3454 create_unwind_entry (1);
3455 }
3456
3457 /* Parse an unwind_fnend directive. Generates the index table entry. */
3458
3459 static void
3460 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3461 {
3462 long where;
3463 char *ptr;
3464 valueT val;
3465 unsigned int marked_pr_dependency;
3466
3467 demand_empty_rest_of_line ();
3468
3469 if (!unwind.proc_start)
3470 {
3471 as_bad (_(".fnend directive without .fnstart"));
3472 return;
3473 }
3474
3475 /* Add eh table entry. */
3476 if (unwind.table_entry == NULL)
3477 val = create_unwind_entry (0);
3478 else
3479 val = 0;
3480
3481 /* Add index table entry. This is two words. */
3482 start_unwind_section (unwind.saved_seg, 1);
3483 frag_align (2, 0, 0);
3484 record_alignment (now_seg, 2);
3485
3486 ptr = frag_more (8);
3487 where = frag_now_fix () - 8;
3488
3489 /* Self relative offset of the function start. */
3490 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3491 BFD_RELOC_ARM_PREL31);
3492
3493 /* Indicate dependency on EHABI-defined personality routines to the
3494 linker, if it hasn't been done already. */
3495 marked_pr_dependency
3496 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3497 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3498 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3499 {
3500 static const char *const name[] =
3501 {
3502 "__aeabi_unwind_cpp_pr0",
3503 "__aeabi_unwind_cpp_pr1",
3504 "__aeabi_unwind_cpp_pr2"
3505 };
3506 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3507 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3508 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3509 |= 1 << unwind.personality_index;
3510 }
3511
3512 if (val)
3513 /* Inline exception table entry. */
3514 md_number_to_chars (ptr + 4, val, 4);
3515 else
3516 /* Self relative offset of the table entry. */
3517 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3518 BFD_RELOC_ARM_PREL31);
3519
3520 /* Restore the original section. */
3521 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3522
3523 unwind.proc_start = NULL;
3524 }
3525
3526
3527 /* Parse an unwind_cantunwind directive. */
3528
3529 static void
3530 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3531 {
3532 demand_empty_rest_of_line ();
3533 if (!unwind.proc_start)
3534 as_bad (MISSING_FNSTART);
3535
3536 if (unwind.personality_routine || unwind.personality_index != -1)
3537 as_bad (_("personality routine specified for cantunwind frame"));
3538
3539 unwind.personality_index = -2;
3540 }
3541
3542
3543 /* Parse a personalityindex directive. */
3544
3545 static void
3546 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3547 {
3548 expressionS exp;
3549
3550 if (!unwind.proc_start)
3551 as_bad (MISSING_FNSTART);
3552
3553 if (unwind.personality_routine || unwind.personality_index != -1)
3554 as_bad (_("duplicate .personalityindex directive"));
3555
3556 expression (&exp);
3557
3558 if (exp.X_op != O_constant
3559 || exp.X_add_number < 0 || exp.X_add_number > 15)
3560 {
3561 as_bad (_("bad personality routine number"));
3562 ignore_rest_of_line ();
3563 return;
3564 }
3565
3566 unwind.personality_index = exp.X_add_number;
3567
3568 demand_empty_rest_of_line ();
3569 }
3570
3571
3572 /* Parse a personality directive. */
3573
3574 static void
3575 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3576 {
3577 char *name, *p, c;
3578
3579 if (!unwind.proc_start)
3580 as_bad (MISSING_FNSTART);
3581
3582 if (unwind.personality_routine || unwind.personality_index != -1)
3583 as_bad (_("duplicate .personality directive"));
3584
3585 name = input_line_pointer;
3586 c = get_symbol_end ();
3587 p = input_line_pointer;
3588 unwind.personality_routine = symbol_find_or_make (name);
3589 *p = c;
3590 demand_empty_rest_of_line ();
3591 }
3592
3593
3594 /* Parse a directive saving core registers. */
3595
3596 static void
3597 s_arm_unwind_save_core (void)
3598 {
3599 valueT op;
3600 long range;
3601 int n;
3602
3603 range = parse_reg_list (&input_line_pointer);
3604 if (range == FAIL)
3605 {
3606 as_bad (_("expected register list"));
3607 ignore_rest_of_line ();
3608 return;
3609 }
3610
3611 demand_empty_rest_of_line ();
3612
3613 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3614 into .unwind_save {..., sp...}. We aren't bothered about the value of
3615 ip because it is clobbered by calls. */
3616 if (unwind.sp_restored && unwind.fp_reg == 12
3617 && (range & 0x3000) == 0x1000)
3618 {
3619 unwind.opcode_count--;
3620 unwind.sp_restored = 0;
3621 range = (range | 0x2000) & ~0x1000;
3622 unwind.pending_offset = 0;
3623 }
3624
3625 /* Pop r4-r15. */
3626 if (range & 0xfff0)
3627 {
3628 /* See if we can use the short opcodes. These pop a block of up to 8
3629 registers starting with r4, plus maybe r14. */
3630 for (n = 0; n < 8; n++)
3631 {
3632 /* Break at the first non-saved register. */
3633 if ((range & (1 << (n + 4))) == 0)
3634 break;
3635 }
3636 /* See if there are any other bits set. */
3637 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3638 {
3639 /* Use the long form. */
3640 op = 0x8000 | ((range >> 4) & 0xfff);
3641 add_unwind_opcode (op, 2);
3642 }
3643 else
3644 {
3645 /* Use the short form. */
3646 if (range & 0x4000)
3647 op = 0xa8; /* Pop r14. */
3648 else
3649 op = 0xa0; /* Do not pop r14. */
3650 op |= (n - 1);
3651 add_unwind_opcode (op, 1);
3652 }
3653 }
3654
3655 /* Pop r0-r3. */
3656 if (range & 0xf)
3657 {
3658 op = 0xb100 | (range & 0xf);
3659 add_unwind_opcode (op, 2);
3660 }
3661
3662 /* Record the number of bytes pushed. */
3663 for (n = 0; n < 16; n++)
3664 {
3665 if (range & (1 << n))
3666 unwind.frame_size += 4;
3667 }
3668 }
3669
3670
3671 /* Parse a directive saving FPA registers. */
3672
3673 static void
3674 s_arm_unwind_save_fpa (int reg)
3675 {
3676 expressionS exp;
3677 int num_regs;
3678 valueT op;
3679
3680 /* Get Number of registers to transfer. */
3681 if (skip_past_comma (&input_line_pointer) != FAIL)
3682 expression (&exp);
3683 else
3684 exp.X_op = O_illegal;
3685
3686 if (exp.X_op != O_constant)
3687 {
3688 as_bad (_("expected , <constant>"));
3689 ignore_rest_of_line ();
3690 return;
3691 }
3692
3693 num_regs = exp.X_add_number;
3694
3695 if (num_regs < 1 || num_regs > 4)
3696 {
3697 as_bad (_("number of registers must be in the range [1:4]"));
3698 ignore_rest_of_line ();
3699 return;
3700 }
3701
3702 demand_empty_rest_of_line ();
3703
3704 if (reg == 4)
3705 {
3706 /* Short form. */
3707 op = 0xb4 | (num_regs - 1);
3708 add_unwind_opcode (op, 1);
3709 }
3710 else
3711 {
3712 /* Long form. */
3713 op = 0xc800 | (reg << 4) | (num_regs - 1);
3714 add_unwind_opcode (op, 2);
3715 }
3716 unwind.frame_size += num_regs * 12;
3717 }
3718
3719
3720 /* Parse a directive saving VFP registers for ARMv6 and above. */
3721
3722 static void
3723 s_arm_unwind_save_vfp_armv6 (void)
3724 {
3725 int count;
3726 unsigned int start;
3727 valueT op;
3728 int num_vfpv3_regs = 0;
3729 int num_regs_below_16;
3730
3731 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
3732 if (count == FAIL)
3733 {
3734 as_bad (_("expected register list"));
3735 ignore_rest_of_line ();
3736 return;
3737 }
3738
3739 demand_empty_rest_of_line ();
3740
3741 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3742 than FSTMX/FLDMX-style ones). */
3743
3744 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3745 if (start >= 16)
3746 num_vfpv3_regs = count;
3747 else if (start + count > 16)
3748 num_vfpv3_regs = start + count - 16;
3749
3750 if (num_vfpv3_regs > 0)
3751 {
3752 int start_offset = start > 16 ? start - 16 : 0;
3753 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
3754 add_unwind_opcode (op, 2);
3755 }
3756
3757 /* Generate opcode for registers numbered in the range 0 .. 15. */
3758 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
3759 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
3760 if (num_regs_below_16 > 0)
3761 {
3762 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
3763 add_unwind_opcode (op, 2);
3764 }
3765
3766 unwind.frame_size += count * 8;
3767 }
3768
3769
3770 /* Parse a directive saving VFP registers for pre-ARMv6. */
3771
3772 static void
3773 s_arm_unwind_save_vfp (void)
3774 {
3775 int count;
3776 unsigned int reg;
3777 valueT op;
3778
3779 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3780 if (count == FAIL)
3781 {
3782 as_bad (_("expected register list"));
3783 ignore_rest_of_line ();
3784 return;
3785 }
3786
3787 demand_empty_rest_of_line ();
3788
3789 if (reg == 8)
3790 {
3791 /* Short form. */
3792 op = 0xb8 | (count - 1);
3793 add_unwind_opcode (op, 1);
3794 }
3795 else
3796 {
3797 /* Long form. */
3798 op = 0xb300 | (reg << 4) | (count - 1);
3799 add_unwind_opcode (op, 2);
3800 }
3801 unwind.frame_size += count * 8 + 4;
3802 }
3803
3804
3805 /* Parse a directive saving iWMMXt data registers. */
3806
3807 static void
3808 s_arm_unwind_save_mmxwr (void)
3809 {
3810 int reg;
3811 int hi_reg;
3812 int i;
3813 unsigned mask = 0;
3814 valueT op;
3815
3816 if (*input_line_pointer == '{')
3817 input_line_pointer++;
3818
3819 do
3820 {
3821 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3822
3823 if (reg == FAIL)
3824 {
3825 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3826 goto error;
3827 }
3828
3829 if (mask >> reg)
3830 as_tsktsk (_("register list not in ascending order"));
3831 mask |= 1 << reg;
3832
3833 if (*input_line_pointer == '-')
3834 {
3835 input_line_pointer++;
3836 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3837 if (hi_reg == FAIL)
3838 {
3839 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3840 goto error;
3841 }
3842 else if (reg >= hi_reg)
3843 {
3844 as_bad (_("bad register range"));
3845 goto error;
3846 }
3847 for (; reg < hi_reg; reg++)
3848 mask |= 1 << reg;
3849 }
3850 }
3851 while (skip_past_comma (&input_line_pointer) != FAIL);
3852
3853 if (*input_line_pointer == '}')
3854 input_line_pointer++;
3855
3856 demand_empty_rest_of_line ();
3857
3858 /* Generate any deferred opcodes because we're going to be looking at
3859 the list. */
3860 flush_pending_unwind ();
3861
3862 for (i = 0; i < 16; i++)
3863 {
3864 if (mask & (1 << i))
3865 unwind.frame_size += 8;
3866 }
3867
3868 /* Attempt to combine with a previous opcode. We do this because gcc
3869 likes to output separate unwind directives for a single block of
3870 registers. */
3871 if (unwind.opcode_count > 0)
3872 {
3873 i = unwind.opcodes[unwind.opcode_count - 1];
3874 if ((i & 0xf8) == 0xc0)
3875 {
3876 i &= 7;
3877 /* Only merge if the blocks are contiguous. */
3878 if (i < 6)
3879 {
3880 if ((mask & 0xfe00) == (1 << 9))
3881 {
3882 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3883 unwind.opcode_count--;
3884 }
3885 }
3886 else if (i == 6 && unwind.opcode_count >= 2)
3887 {
3888 i = unwind.opcodes[unwind.opcode_count - 2];
3889 reg = i >> 4;
3890 i &= 0xf;
3891
3892 op = 0xffff << (reg - 1);
3893 if (reg > 0
3894 && ((mask & op) == (1u << (reg - 1))))
3895 {
3896 op = (1 << (reg + i + 1)) - 1;
3897 op &= ~((1 << reg) - 1);
3898 mask |= op;
3899 unwind.opcode_count -= 2;
3900 }
3901 }
3902 }
3903 }
3904
3905 hi_reg = 15;
3906 /* We want to generate opcodes in the order the registers have been
3907 saved, ie. descending order. */
3908 for (reg = 15; reg >= -1; reg--)
3909 {
3910 /* Save registers in blocks. */
3911 if (reg < 0
3912 || !(mask & (1 << reg)))
3913 {
3914 /* We found an unsaved reg. Generate opcodes to save the
3915 preceding block. */
3916 if (reg != hi_reg)
3917 {
3918 if (reg == 9)
3919 {
3920 /* Short form. */
3921 op = 0xc0 | (hi_reg - 10);
3922 add_unwind_opcode (op, 1);
3923 }
3924 else
3925 {
3926 /* Long form. */
3927 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3928 add_unwind_opcode (op, 2);
3929 }
3930 }
3931 hi_reg = reg - 1;
3932 }
3933 }
3934
3935 return;
3936 error:
3937 ignore_rest_of_line ();
3938 }
3939
3940 static void
3941 s_arm_unwind_save_mmxwcg (void)
3942 {
3943 int reg;
3944 int hi_reg;
3945 unsigned mask = 0;
3946 valueT op;
3947
3948 if (*input_line_pointer == '{')
3949 input_line_pointer++;
3950
3951 do
3952 {
3953 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3954
3955 if (reg == FAIL)
3956 {
3957 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
3958 goto error;
3959 }
3960
3961 reg -= 8;
3962 if (mask >> reg)
3963 as_tsktsk (_("register list not in ascending order"));
3964 mask |= 1 << reg;
3965
3966 if (*input_line_pointer == '-')
3967 {
3968 input_line_pointer++;
3969 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3970 if (hi_reg == FAIL)
3971 {
3972 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
3973 goto error;
3974 }
3975 else if (reg >= hi_reg)
3976 {
3977 as_bad (_("bad register range"));
3978 goto error;
3979 }
3980 for (; reg < hi_reg; reg++)
3981 mask |= 1 << reg;
3982 }
3983 }
3984 while (skip_past_comma (&input_line_pointer) != FAIL);
3985
3986 if (*input_line_pointer == '}')
3987 input_line_pointer++;
3988
3989 demand_empty_rest_of_line ();
3990
3991 /* Generate any deferred opcodes because we're going to be looking at
3992 the list. */
3993 flush_pending_unwind ();
3994
3995 for (reg = 0; reg < 16; reg++)
3996 {
3997 if (mask & (1 << reg))
3998 unwind.frame_size += 4;
3999 }
4000 op = 0xc700 | mask;
4001 add_unwind_opcode (op, 2);
4002 return;
4003 error:
4004 ignore_rest_of_line ();
4005 }
4006
4007
4008 /* Parse an unwind_save directive.
4009 If the argument is non-zero, this is a .vsave directive. */
4010
4011 static void
4012 s_arm_unwind_save (int arch_v6)
4013 {
4014 char *peek;
4015 struct reg_entry *reg;
4016 bfd_boolean had_brace = FALSE;
4017
4018 if (!unwind.proc_start)
4019 as_bad (MISSING_FNSTART);
4020
4021 /* Figure out what sort of save we have. */
4022 peek = input_line_pointer;
4023
4024 if (*peek == '{')
4025 {
4026 had_brace = TRUE;
4027 peek++;
4028 }
4029
4030 reg = arm_reg_parse_multi (&peek);
4031
4032 if (!reg)
4033 {
4034 as_bad (_("register expected"));
4035 ignore_rest_of_line ();
4036 return;
4037 }
4038
4039 switch (reg->type)
4040 {
4041 case REG_TYPE_FN:
4042 if (had_brace)
4043 {
4044 as_bad (_("FPA .unwind_save does not take a register list"));
4045 ignore_rest_of_line ();
4046 return;
4047 }
4048 input_line_pointer = peek;
4049 s_arm_unwind_save_fpa (reg->number);
4050 return;
4051
4052 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
4053 case REG_TYPE_VFD:
4054 if (arch_v6)
4055 s_arm_unwind_save_vfp_armv6 ();
4056 else
4057 s_arm_unwind_save_vfp ();
4058 return;
4059 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
4060 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
4061
4062 default:
4063 as_bad (_(".unwind_save does not support this kind of register"));
4064 ignore_rest_of_line ();
4065 }
4066 }
4067
4068
4069 /* Parse an unwind_movsp directive. */
4070
4071 static void
4072 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4073 {
4074 int reg;
4075 valueT op;
4076 int offset;
4077
4078 if (!unwind.proc_start)
4079 as_bad (MISSING_FNSTART);
4080
4081 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4082 if (reg == FAIL)
4083 {
4084 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4085 ignore_rest_of_line ();
4086 return;
4087 }
4088
4089 /* Optional constant. */
4090 if (skip_past_comma (&input_line_pointer) != FAIL)
4091 {
4092 if (immediate_for_directive (&offset) == FAIL)
4093 return;
4094 }
4095 else
4096 offset = 0;
4097
4098 demand_empty_rest_of_line ();
4099
4100 if (reg == REG_SP || reg == REG_PC)
4101 {
4102 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4103 return;
4104 }
4105
4106 if (unwind.fp_reg != REG_SP)
4107 as_bad (_("unexpected .unwind_movsp directive"));
4108
4109 /* Generate opcode to restore the value. */
4110 op = 0x90 | reg;
4111 add_unwind_opcode (op, 1);
4112
4113 /* Record the information for later. */
4114 unwind.fp_reg = reg;
4115 unwind.fp_offset = unwind.frame_size - offset;
4116 unwind.sp_restored = 1;
4117 }
4118
4119 /* Parse an unwind_pad directive. */
4120
4121 static void
4122 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4123 {
4124 int offset;
4125
4126 if (!unwind.proc_start)
4127 as_bad (MISSING_FNSTART);
4128
4129 if (immediate_for_directive (&offset) == FAIL)
4130 return;
4131
4132 if (offset & 3)
4133 {
4134 as_bad (_("stack increment must be multiple of 4"));
4135 ignore_rest_of_line ();
4136 return;
4137 }
4138
4139 /* Don't generate any opcodes, just record the details for later. */
4140 unwind.frame_size += offset;
4141 unwind.pending_offset += offset;
4142
4143 demand_empty_rest_of_line ();
4144 }
4145
4146 /* Parse an unwind_setfp directive. */
4147
4148 static void
4149 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4150 {
4151 int sp_reg;
4152 int fp_reg;
4153 int offset;
4154
4155 if (!unwind.proc_start)
4156 as_bad (MISSING_FNSTART);
4157
4158 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4159 if (skip_past_comma (&input_line_pointer) == FAIL)
4160 sp_reg = FAIL;
4161 else
4162 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4163
4164 if (fp_reg == FAIL || sp_reg == FAIL)
4165 {
4166 as_bad (_("expected <reg>, <reg>"));
4167 ignore_rest_of_line ();
4168 return;
4169 }
4170
4171 /* Optional constant. */
4172 if (skip_past_comma (&input_line_pointer) != FAIL)
4173 {
4174 if (immediate_for_directive (&offset) == FAIL)
4175 return;
4176 }
4177 else
4178 offset = 0;
4179
4180 demand_empty_rest_of_line ();
4181
4182 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4183 {
4184 as_bad (_("register must be either sp or set by a previous"
4185 "unwind_movsp directive"));
4186 return;
4187 }
4188
4189 /* Don't generate any opcodes, just record the information for later. */
4190 unwind.fp_reg = fp_reg;
4191 unwind.fp_used = 1;
4192 if (sp_reg == REG_SP)
4193 unwind.fp_offset = unwind.frame_size - offset;
4194 else
4195 unwind.fp_offset -= offset;
4196 }
4197
4198 /* Parse an unwind_raw directive. */
4199
4200 static void
4201 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4202 {
4203 expressionS exp;
4204 /* This is an arbitrary limit. */
4205 unsigned char op[16];
4206 int count;
4207
4208 if (!unwind.proc_start)
4209 as_bad (MISSING_FNSTART);
4210
4211 expression (&exp);
4212 if (exp.X_op == O_constant
4213 && skip_past_comma (&input_line_pointer) != FAIL)
4214 {
4215 unwind.frame_size += exp.X_add_number;
4216 expression (&exp);
4217 }
4218 else
4219 exp.X_op = O_illegal;
4220
4221 if (exp.X_op != O_constant)
4222 {
4223 as_bad (_("expected <offset>, <opcode>"));
4224 ignore_rest_of_line ();
4225 return;
4226 }
4227
4228 count = 0;
4229
4230 /* Parse the opcode. */
4231 for (;;)
4232 {
4233 if (count >= 16)
4234 {
4235 as_bad (_("unwind opcode too long"));
4236 ignore_rest_of_line ();
4237 }
4238 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4239 {
4240 as_bad (_("invalid unwind opcode"));
4241 ignore_rest_of_line ();
4242 return;
4243 }
4244 op[count++] = exp.X_add_number;
4245
4246 /* Parse the next byte. */
4247 if (skip_past_comma (&input_line_pointer) == FAIL)
4248 break;
4249
4250 expression (&exp);
4251 }
4252
4253 /* Add the opcode bytes in reverse order. */
4254 while (count--)
4255 add_unwind_opcode (op[count], 1);
4256
4257 demand_empty_rest_of_line ();
4258 }
4259
4260
4261 /* Parse a .eabi_attribute directive. */
4262
4263 static void
4264 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4265 {
4266 int tag = s_vendor_attribute (OBJ_ATTR_PROC);
4267
4268 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4269 attributes_set_explicitly[tag] = 1;
4270 }
4271 #endif /* OBJ_ELF */
4272
4273 static void s_arm_arch (int);
4274 static void s_arm_object_arch (int);
4275 static void s_arm_cpu (int);
4276 static void s_arm_fpu (int);
4277 static void s_arm_arch_extension (int);
4278
4279 #ifdef TE_PE
4280
4281 static void
4282 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4283 {
4284 expressionS exp;
4285
4286 do
4287 {
4288 expression (&exp);
4289 if (exp.X_op == O_symbol)
4290 exp.X_op = O_secrel;
4291
4292 emit_expr (&exp, 4);
4293 }
4294 while (*input_line_pointer++ == ',');
4295
4296 input_line_pointer--;
4297 demand_empty_rest_of_line ();
4298 }
4299 #endif /* TE_PE */
4300
4301 /* This table describes all the machine specific pseudo-ops the assembler
4302 has to support. The fields are:
4303 pseudo-op name without dot
4304 function to call to execute this pseudo-op
4305 Integer arg to pass to the function. */
4306
4307 const pseudo_typeS md_pseudo_table[] =
4308 {
4309 /* Never called because '.req' does not start a line. */
4310 { "req", s_req, 0 },
4311 /* Following two are likewise never called. */
4312 { "dn", s_dn, 0 },
4313 { "qn", s_qn, 0 },
4314 { "unreq", s_unreq, 0 },
4315 { "bss", s_bss, 0 },
4316 { "align", s_align, 0 },
4317 { "arm", s_arm, 0 },
4318 { "thumb", s_thumb, 0 },
4319 { "code", s_code, 0 },
4320 { "force_thumb", s_force_thumb, 0 },
4321 { "thumb_func", s_thumb_func, 0 },
4322 { "thumb_set", s_thumb_set, 0 },
4323 { "even", s_even, 0 },
4324 { "ltorg", s_ltorg, 0 },
4325 { "pool", s_ltorg, 0 },
4326 { "syntax", s_syntax, 0 },
4327 { "cpu", s_arm_cpu, 0 },
4328 { "arch", s_arm_arch, 0 },
4329 { "object_arch", s_arm_object_arch, 0 },
4330 { "fpu", s_arm_fpu, 0 },
4331 { "arch_extension", s_arm_arch_extension, 0 },
4332 #ifdef OBJ_ELF
4333 { "word", s_arm_elf_cons, 4 },
4334 { "long", s_arm_elf_cons, 4 },
4335 { "inst.n", s_arm_elf_inst, 2 },
4336 { "inst.w", s_arm_elf_inst, 4 },
4337 { "inst", s_arm_elf_inst, 0 },
4338 { "rel31", s_arm_rel31, 0 },
4339 { "fnstart", s_arm_unwind_fnstart, 0 },
4340 { "fnend", s_arm_unwind_fnend, 0 },
4341 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4342 { "personality", s_arm_unwind_personality, 0 },
4343 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4344 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4345 { "save", s_arm_unwind_save, 0 },
4346 { "vsave", s_arm_unwind_save, 1 },
4347 { "movsp", s_arm_unwind_movsp, 0 },
4348 { "pad", s_arm_unwind_pad, 0 },
4349 { "setfp", s_arm_unwind_setfp, 0 },
4350 { "unwind_raw", s_arm_unwind_raw, 0 },
4351 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4352 #else
4353 { "word", cons, 4},
4354
4355 /* These are used for dwarf. */
4356 {"2byte", cons, 2},
4357 {"4byte", cons, 4},
4358 {"8byte", cons, 8},
4359 /* These are used for dwarf2. */
4360 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4361 { "loc", dwarf2_directive_loc, 0 },
4362 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4363 #endif
4364 { "extend", float_cons, 'x' },
4365 { "ldouble", float_cons, 'x' },
4366 { "packed", float_cons, 'p' },
4367 #ifdef TE_PE
4368 {"secrel32", pe_directive_secrel, 0},
4369 #endif
4370 { 0, 0, 0 }
4371 };
4372 \f
4373 /* Parser functions used exclusively in instruction operands. */
4374
4375 /* Generic immediate-value read function for use in insn parsing.
4376 STR points to the beginning of the immediate (the leading #);
4377 VAL receives the value; if the value is outside [MIN, MAX]
4378 issue an error. PREFIX_OPT is true if the immediate prefix is
4379 optional. */
4380
4381 static int
4382 parse_immediate (char **str, int *val, int min, int max,
4383 bfd_boolean prefix_opt)
4384 {
4385 expressionS exp;
4386 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4387 if (exp.X_op != O_constant)
4388 {
4389 inst.error = _("constant expression required");
4390 return FAIL;
4391 }
4392
4393 if (exp.X_add_number < min || exp.X_add_number > max)
4394 {
4395 inst.error = _("immediate value out of range");
4396 return FAIL;
4397 }
4398
4399 *val = exp.X_add_number;
4400 return SUCCESS;
4401 }
4402
4403 /* Less-generic immediate-value read function with the possibility of loading a
4404 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4405 instructions. Puts the result directly in inst.operands[i]. */
4406
4407 static int
4408 parse_big_immediate (char **str, int i)
4409 {
4410 expressionS exp;
4411 char *ptr = *str;
4412
4413 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
4414
4415 if (exp.X_op == O_constant)
4416 {
4417 inst.operands[i].imm = exp.X_add_number & 0xffffffff;
4418 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4419 O_constant. We have to be careful not to break compilation for
4420 32-bit X_add_number, though. */
4421 if ((exp.X_add_number & ~0xffffffffl) != 0)
4422 {
4423 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4424 inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff;
4425 inst.operands[i].regisimm = 1;
4426 }
4427 }
4428 else if (exp.X_op == O_big
4429 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32)
4430 {
4431 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4432
4433 /* Bignums have their least significant bits in
4434 generic_bignum[0]. Make sure we put 32 bits in imm and
4435 32 bits in reg, in a (hopefully) portable way. */
4436 gas_assert (parts != 0);
4437
4438 /* Make sure that the number is not too big.
4439 PR 11972: Bignums can now be sign-extended to the
4440 size of a .octa so check that the out of range bits
4441 are all zero or all one. */
4442 if (LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 64)
4443 {
4444 LITTLENUM_TYPE m = -1;
4445
4446 if (generic_bignum[parts * 2] != 0
4447 && generic_bignum[parts * 2] != m)
4448 return FAIL;
4449
4450 for (j = parts * 2 + 1; j < (unsigned) exp.X_add_number; j++)
4451 if (generic_bignum[j] != generic_bignum[j-1])
4452 return FAIL;
4453 }
4454
4455 inst.operands[i].imm = 0;
4456 for (j = 0; j < parts; j++, idx++)
4457 inst.operands[i].imm |= generic_bignum[idx]
4458 << (LITTLENUM_NUMBER_OF_BITS * j);
4459 inst.operands[i].reg = 0;
4460 for (j = 0; j < parts; j++, idx++)
4461 inst.operands[i].reg |= generic_bignum[idx]
4462 << (LITTLENUM_NUMBER_OF_BITS * j);
4463 inst.operands[i].regisimm = 1;
4464 }
4465 else
4466 return FAIL;
4467
4468 *str = ptr;
4469
4470 return SUCCESS;
4471 }
4472
4473 /* Returns the pseudo-register number of an FPA immediate constant,
4474 or FAIL if there isn't a valid constant here. */
4475
4476 static int
4477 parse_fpa_immediate (char ** str)
4478 {
4479 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4480 char * save_in;
4481 expressionS exp;
4482 int i;
4483 int j;
4484
4485 /* First try and match exact strings, this is to guarantee
4486 that some formats will work even for cross assembly. */
4487
4488 for (i = 0; fp_const[i]; i++)
4489 {
4490 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4491 {
4492 char *start = *str;
4493
4494 *str += strlen (fp_const[i]);
4495 if (is_end_of_line[(unsigned char) **str])
4496 return i + 8;
4497 *str = start;
4498 }
4499 }
4500
4501 /* Just because we didn't get a match doesn't mean that the constant
4502 isn't valid, just that it is in a format that we don't
4503 automatically recognize. Try parsing it with the standard
4504 expression routines. */
4505
4506 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4507
4508 /* Look for a raw floating point number. */
4509 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4510 && is_end_of_line[(unsigned char) *save_in])
4511 {
4512 for (i = 0; i < NUM_FLOAT_VALS; i++)
4513 {
4514 for (j = 0; j < MAX_LITTLENUMS; j++)
4515 {
4516 if (words[j] != fp_values[i][j])
4517 break;
4518 }
4519
4520 if (j == MAX_LITTLENUMS)
4521 {
4522 *str = save_in;
4523 return i + 8;
4524 }
4525 }
4526 }
4527
4528 /* Try and parse a more complex expression, this will probably fail
4529 unless the code uses a floating point prefix (eg "0f"). */
4530 save_in = input_line_pointer;
4531 input_line_pointer = *str;
4532 if (expression (&exp) == absolute_section
4533 && exp.X_op == O_big
4534 && exp.X_add_number < 0)
4535 {
4536 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4537 Ditto for 15. */
4538 if (gen_to_words (words, 5, (long) 15) == 0)
4539 {
4540 for (i = 0; i < NUM_FLOAT_VALS; i++)
4541 {
4542 for (j = 0; j < MAX_LITTLENUMS; j++)
4543 {
4544 if (words[j] != fp_values[i][j])
4545 break;
4546 }
4547
4548 if (j == MAX_LITTLENUMS)
4549 {
4550 *str = input_line_pointer;
4551 input_line_pointer = save_in;
4552 return i + 8;
4553 }
4554 }
4555 }
4556 }
4557
4558 *str = input_line_pointer;
4559 input_line_pointer = save_in;
4560 inst.error = _("invalid FPA immediate expression");
4561 return FAIL;
4562 }
4563
4564 /* Returns 1 if a number has "quarter-precision" float format
4565 0baBbbbbbc defgh000 00000000 00000000. */
4566
4567 static int
4568 is_quarter_float (unsigned imm)
4569 {
4570 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4571 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4572 }
4573
4574 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4575 0baBbbbbbc defgh000 00000000 00000000.
4576 The zero and minus-zero cases need special handling, since they can't be
4577 encoded in the "quarter-precision" float format, but can nonetheless be
4578 loaded as integer constants. */
4579
4580 static unsigned
4581 parse_qfloat_immediate (char **ccp, int *immed)
4582 {
4583 char *str = *ccp;
4584 char *fpnum;
4585 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4586 int found_fpchar = 0;
4587
4588 skip_past_char (&str, '#');
4589
4590 /* We must not accidentally parse an integer as a floating-point number. Make
4591 sure that the value we parse is not an integer by checking for special
4592 characters '.' or 'e'.
4593 FIXME: This is a horrible hack, but doing better is tricky because type
4594 information isn't in a very usable state at parse time. */
4595 fpnum = str;
4596 skip_whitespace (fpnum);
4597
4598 if (strncmp (fpnum, "0x", 2) == 0)
4599 return FAIL;
4600 else
4601 {
4602 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4603 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4604 {
4605 found_fpchar = 1;
4606 break;
4607 }
4608
4609 if (!found_fpchar)
4610 return FAIL;
4611 }
4612
4613 if ((str = atof_ieee (str, 's', words)) != NULL)
4614 {
4615 unsigned fpword = 0;
4616 int i;
4617
4618 /* Our FP word must be 32 bits (single-precision FP). */
4619 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4620 {
4621 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4622 fpword |= words[i];
4623 }
4624
4625 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
4626 *immed = fpword;
4627 else
4628 return FAIL;
4629
4630 *ccp = str;
4631
4632 return SUCCESS;
4633 }
4634
4635 return FAIL;
4636 }
4637
4638 /* Shift operands. */
4639 enum shift_kind
4640 {
4641 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4642 };
4643
4644 struct asm_shift_name
4645 {
4646 const char *name;
4647 enum shift_kind kind;
4648 };
4649
4650 /* Third argument to parse_shift. */
4651 enum parse_shift_mode
4652 {
4653 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4654 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4655 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4656 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4657 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4658 };
4659
4660 /* Parse a <shift> specifier on an ARM data processing instruction.
4661 This has three forms:
4662
4663 (LSL|LSR|ASL|ASR|ROR) Rs
4664 (LSL|LSR|ASL|ASR|ROR) #imm
4665 RRX
4666
4667 Note that ASL is assimilated to LSL in the instruction encoding, and
4668 RRX to ROR #0 (which cannot be written as such). */
4669
4670 static int
4671 parse_shift (char **str, int i, enum parse_shift_mode mode)
4672 {
4673 const struct asm_shift_name *shift_name;
4674 enum shift_kind shift;
4675 char *s = *str;
4676 char *p = s;
4677 int reg;
4678
4679 for (p = *str; ISALPHA (*p); p++)
4680 ;
4681
4682 if (p == *str)
4683 {
4684 inst.error = _("shift expression expected");
4685 return FAIL;
4686 }
4687
4688 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
4689 p - *str);
4690
4691 if (shift_name == NULL)
4692 {
4693 inst.error = _("shift expression expected");
4694 return FAIL;
4695 }
4696
4697 shift = shift_name->kind;
4698
4699 switch (mode)
4700 {
4701 case NO_SHIFT_RESTRICT:
4702 case SHIFT_IMMEDIATE: break;
4703
4704 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4705 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4706 {
4707 inst.error = _("'LSL' or 'ASR' required");
4708 return FAIL;
4709 }
4710 break;
4711
4712 case SHIFT_LSL_IMMEDIATE:
4713 if (shift != SHIFT_LSL)
4714 {
4715 inst.error = _("'LSL' required");
4716 return FAIL;
4717 }
4718 break;
4719
4720 case SHIFT_ASR_IMMEDIATE:
4721 if (shift != SHIFT_ASR)
4722 {
4723 inst.error = _("'ASR' required");
4724 return FAIL;
4725 }
4726 break;
4727
4728 default: abort ();
4729 }
4730
4731 if (shift != SHIFT_RRX)
4732 {
4733 /* Whitespace can appear here if the next thing is a bare digit. */
4734 skip_whitespace (p);
4735
4736 if (mode == NO_SHIFT_RESTRICT
4737 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4738 {
4739 inst.operands[i].imm = reg;
4740 inst.operands[i].immisreg = 1;
4741 }
4742 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4743 return FAIL;
4744 }
4745 inst.operands[i].shift_kind = shift;
4746 inst.operands[i].shifted = 1;
4747 *str = p;
4748 return SUCCESS;
4749 }
4750
4751 /* Parse a <shifter_operand> for an ARM data processing instruction:
4752
4753 #<immediate>
4754 #<immediate>, <rotate>
4755 <Rm>
4756 <Rm>, <shift>
4757
4758 where <shift> is defined by parse_shift above, and <rotate> is a
4759 multiple of 2 between 0 and 30. Validation of immediate operands
4760 is deferred to md_apply_fix. */
4761
4762 static int
4763 parse_shifter_operand (char **str, int i)
4764 {
4765 int value;
4766 expressionS exp;
4767
4768 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4769 {
4770 inst.operands[i].reg = value;
4771 inst.operands[i].isreg = 1;
4772
4773 /* parse_shift will override this if appropriate */
4774 inst.reloc.exp.X_op = O_constant;
4775 inst.reloc.exp.X_add_number = 0;
4776
4777 if (skip_past_comma (str) == FAIL)
4778 return SUCCESS;
4779
4780 /* Shift operation on register. */
4781 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4782 }
4783
4784 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4785 return FAIL;
4786
4787 if (skip_past_comma (str) == SUCCESS)
4788 {
4789 /* #x, y -- ie explicit rotation by Y. */
4790 if (my_get_expression (&exp, str, GE_NO_PREFIX))
4791 return FAIL;
4792
4793 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4794 {
4795 inst.error = _("constant expression expected");
4796 return FAIL;
4797 }
4798
4799 value = exp.X_add_number;
4800 if (value < 0 || value > 30 || value % 2 != 0)
4801 {
4802 inst.error = _("invalid rotation");
4803 return FAIL;
4804 }
4805 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4806 {
4807 inst.error = _("invalid constant");
4808 return FAIL;
4809 }
4810
4811 /* Convert to decoded value. md_apply_fix will put it back. */
4812 inst.reloc.exp.X_add_number
4813 = (((inst.reloc.exp.X_add_number << (32 - value))
4814 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
4815 }
4816
4817 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4818 inst.reloc.pc_rel = 0;
4819 return SUCCESS;
4820 }
4821
4822 /* Group relocation information. Each entry in the table contains the
4823 textual name of the relocation as may appear in assembler source
4824 and must end with a colon.
4825 Along with this textual name are the relocation codes to be used if
4826 the corresponding instruction is an ALU instruction (ADD or SUB only),
4827 an LDR, an LDRS, or an LDC. */
4828
4829 struct group_reloc_table_entry
4830 {
4831 const char *name;
4832 int alu_code;
4833 int ldr_code;
4834 int ldrs_code;
4835 int ldc_code;
4836 };
4837
4838 typedef enum
4839 {
4840 /* Varieties of non-ALU group relocation. */
4841
4842 GROUP_LDR,
4843 GROUP_LDRS,
4844 GROUP_LDC
4845 } group_reloc_type;
4846
4847 static struct group_reloc_table_entry group_reloc_table[] =
4848 { /* Program counter relative: */
4849 { "pc_g0_nc",
4850 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
4851 0, /* LDR */
4852 0, /* LDRS */
4853 0 }, /* LDC */
4854 { "pc_g0",
4855 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
4856 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
4857 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
4858 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
4859 { "pc_g1_nc",
4860 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
4861 0, /* LDR */
4862 0, /* LDRS */
4863 0 }, /* LDC */
4864 { "pc_g1",
4865 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
4866 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
4867 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
4868 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
4869 { "pc_g2",
4870 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
4871 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
4872 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
4873 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
4874 /* Section base relative */
4875 { "sb_g0_nc",
4876 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
4877 0, /* LDR */
4878 0, /* LDRS */
4879 0 }, /* LDC */
4880 { "sb_g0",
4881 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
4882 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
4883 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
4884 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
4885 { "sb_g1_nc",
4886 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
4887 0, /* LDR */
4888 0, /* LDRS */
4889 0 }, /* LDC */
4890 { "sb_g1",
4891 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
4892 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
4893 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
4894 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
4895 { "sb_g2",
4896 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
4897 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
4898 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
4899 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
4900
4901 /* Given the address of a pointer pointing to the textual name of a group
4902 relocation as may appear in assembler source, attempt to find its details
4903 in group_reloc_table. The pointer will be updated to the character after
4904 the trailing colon. On failure, FAIL will be returned; SUCCESS
4905 otherwise. On success, *entry will be updated to point at the relevant
4906 group_reloc_table entry. */
4907
4908 static int
4909 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
4910 {
4911 unsigned int i;
4912 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
4913 {
4914 int length = strlen (group_reloc_table[i].name);
4915
4916 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
4917 && (*str)[length] == ':')
4918 {
4919 *out = &group_reloc_table[i];
4920 *str += (length + 1);
4921 return SUCCESS;
4922 }
4923 }
4924
4925 return FAIL;
4926 }
4927
4928 /* Parse a <shifter_operand> for an ARM data processing instruction
4929 (as for parse_shifter_operand) where group relocations are allowed:
4930
4931 #<immediate>
4932 #<immediate>, <rotate>
4933 #:<group_reloc>:<expression>
4934 <Rm>
4935 <Rm>, <shift>
4936
4937 where <group_reloc> is one of the strings defined in group_reloc_table.
4938 The hashes are optional.
4939
4940 Everything else is as for parse_shifter_operand. */
4941
4942 static parse_operand_result
4943 parse_shifter_operand_group_reloc (char **str, int i)
4944 {
4945 /* Determine if we have the sequence of characters #: or just :
4946 coming next. If we do, then we check for a group relocation.
4947 If we don't, punt the whole lot to parse_shifter_operand. */
4948
4949 if (((*str)[0] == '#' && (*str)[1] == ':')
4950 || (*str)[0] == ':')
4951 {
4952 struct group_reloc_table_entry *entry;
4953
4954 if ((*str)[0] == '#')
4955 (*str) += 2;
4956 else
4957 (*str)++;
4958
4959 /* Try to parse a group relocation. Anything else is an error. */
4960 if (find_group_reloc_table_entry (str, &entry) == FAIL)
4961 {
4962 inst.error = _("unknown group relocation");
4963 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4964 }
4965
4966 /* We now have the group relocation table entry corresponding to
4967 the name in the assembler source. Next, we parse the expression. */
4968 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
4969 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4970
4971 /* Record the relocation type (always the ALU variant here). */
4972 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
4973 gas_assert (inst.reloc.type != 0);
4974
4975 return PARSE_OPERAND_SUCCESS;
4976 }
4977 else
4978 return parse_shifter_operand (str, i) == SUCCESS
4979 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
4980
4981 /* Never reached. */
4982 }
4983
4984 /* Parse a Neon alignment expression. Information is written to
4985 inst.operands[i]. We assume the initial ':' has been skipped.
4986
4987 align .imm = align << 8, .immisalign=1, .preind=0 */
4988 static parse_operand_result
4989 parse_neon_alignment (char **str, int i)
4990 {
4991 char *p = *str;
4992 expressionS exp;
4993
4994 my_get_expression (&exp, &p, GE_NO_PREFIX);
4995
4996 if (exp.X_op != O_constant)
4997 {
4998 inst.error = _("alignment must be constant");
4999 return PARSE_OPERAND_FAIL;
5000 }
5001
5002 inst.operands[i].imm = exp.X_add_number << 8;
5003 inst.operands[i].immisalign = 1;
5004 /* Alignments are not pre-indexes. */
5005 inst.operands[i].preind = 0;
5006
5007 *str = p;
5008 return PARSE_OPERAND_SUCCESS;
5009 }
5010
5011 /* Parse all forms of an ARM address expression. Information is written
5012 to inst.operands[i] and/or inst.reloc.
5013
5014 Preindexed addressing (.preind=1):
5015
5016 [Rn, #offset] .reg=Rn .reloc.exp=offset
5017 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5018 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5019 .shift_kind=shift .reloc.exp=shift_imm
5020
5021 These three may have a trailing ! which causes .writeback to be set also.
5022
5023 Postindexed addressing (.postind=1, .writeback=1):
5024
5025 [Rn], #offset .reg=Rn .reloc.exp=offset
5026 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5027 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5028 .shift_kind=shift .reloc.exp=shift_imm
5029
5030 Unindexed addressing (.preind=0, .postind=0):
5031
5032 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5033
5034 Other:
5035
5036 [Rn]{!} shorthand for [Rn,#0]{!}
5037 =immediate .isreg=0 .reloc.exp=immediate
5038 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5039
5040 It is the caller's responsibility to check for addressing modes not
5041 supported by the instruction, and to set inst.reloc.type. */
5042
5043 static parse_operand_result
5044 parse_address_main (char **str, int i, int group_relocations,
5045 group_reloc_type group_type)
5046 {
5047 char *p = *str;
5048 int reg;
5049
5050 if (skip_past_char (&p, '[') == FAIL)
5051 {
5052 if (skip_past_char (&p, '=') == FAIL)
5053 {
5054 /* Bare address - translate to PC-relative offset. */
5055 inst.reloc.pc_rel = 1;
5056 inst.operands[i].reg = REG_PC;
5057 inst.operands[i].isreg = 1;
5058 inst.operands[i].preind = 1;
5059 }
5060 /* Otherwise a load-constant pseudo op, no special treatment needed here. */
5061
5062 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5063 return PARSE_OPERAND_FAIL;
5064
5065 *str = p;
5066 return PARSE_OPERAND_SUCCESS;
5067 }
5068
5069 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5070 {
5071 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5072 return PARSE_OPERAND_FAIL;
5073 }
5074 inst.operands[i].reg = reg;
5075 inst.operands[i].isreg = 1;
5076
5077 if (skip_past_comma (&p) == SUCCESS)
5078 {
5079 inst.operands[i].preind = 1;
5080
5081 if (*p == '+') p++;
5082 else if (*p == '-') p++, inst.operands[i].negative = 1;
5083
5084 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5085 {
5086 inst.operands[i].imm = reg;
5087 inst.operands[i].immisreg = 1;
5088
5089 if (skip_past_comma (&p) == SUCCESS)
5090 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5091 return PARSE_OPERAND_FAIL;
5092 }
5093 else if (skip_past_char (&p, ':') == SUCCESS)
5094 {
5095 /* FIXME: '@' should be used here, but it's filtered out by generic
5096 code before we get to see it here. This may be subject to
5097 change. */
5098 parse_operand_result result = parse_neon_alignment (&p, i);
5099
5100 if (result != PARSE_OPERAND_SUCCESS)
5101 return result;
5102 }
5103 else
5104 {
5105 if (inst.operands[i].negative)
5106 {
5107 inst.operands[i].negative = 0;
5108 p--;
5109 }
5110
5111 if (group_relocations
5112 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5113 {
5114 struct group_reloc_table_entry *entry;
5115
5116 /* Skip over the #: or : sequence. */
5117 if (*p == '#')
5118 p += 2;
5119 else
5120 p++;
5121
5122 /* Try to parse a group relocation. Anything else is an
5123 error. */
5124 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5125 {
5126 inst.error = _("unknown group relocation");
5127 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5128 }
5129
5130 /* We now have the group relocation table entry corresponding to
5131 the name in the assembler source. Next, we parse the
5132 expression. */
5133 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5134 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5135
5136 /* Record the relocation type. */
5137 switch (group_type)
5138 {
5139 case GROUP_LDR:
5140 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5141 break;
5142
5143 case GROUP_LDRS:
5144 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5145 break;
5146
5147 case GROUP_LDC:
5148 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5149 break;
5150
5151 default:
5152 gas_assert (0);
5153 }
5154
5155 if (inst.reloc.type == 0)
5156 {
5157 inst.error = _("this group relocation is not allowed on this instruction");
5158 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5159 }
5160 }
5161 else
5162 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5163 return PARSE_OPERAND_FAIL;
5164 }
5165 }
5166 else if (skip_past_char (&p, ':') == SUCCESS)
5167 {
5168 /* FIXME: '@' should be used here, but it's filtered out by generic code
5169 before we get to see it here. This may be subject to change. */
5170 parse_operand_result result = parse_neon_alignment (&p, i);
5171
5172 if (result != PARSE_OPERAND_SUCCESS)
5173 return result;
5174 }
5175
5176 if (skip_past_char (&p, ']') == FAIL)
5177 {
5178 inst.error = _("']' expected");
5179 return PARSE_OPERAND_FAIL;
5180 }
5181
5182 if (skip_past_char (&p, '!') == SUCCESS)
5183 inst.operands[i].writeback = 1;
5184
5185 else if (skip_past_comma (&p) == SUCCESS)
5186 {
5187 if (skip_past_char (&p, '{') == SUCCESS)
5188 {
5189 /* [Rn], {expr} - unindexed, with option */
5190 if (parse_immediate (&p, &inst.operands[i].imm,
5191 0, 255, TRUE) == FAIL)
5192 return PARSE_OPERAND_FAIL;
5193
5194 if (skip_past_char (&p, '}') == FAIL)
5195 {
5196 inst.error = _("'}' expected at end of 'option' field");
5197 return PARSE_OPERAND_FAIL;
5198 }
5199 if (inst.operands[i].preind)
5200 {
5201 inst.error = _("cannot combine index with option");
5202 return PARSE_OPERAND_FAIL;
5203 }
5204 *str = p;
5205 return PARSE_OPERAND_SUCCESS;
5206 }
5207 else
5208 {
5209 inst.operands[i].postind = 1;
5210 inst.operands[i].writeback = 1;
5211
5212 if (inst.operands[i].preind)
5213 {
5214 inst.error = _("cannot combine pre- and post-indexing");
5215 return PARSE_OPERAND_FAIL;
5216 }
5217
5218 if (*p == '+') p++;
5219 else if (*p == '-') p++, inst.operands[i].negative = 1;
5220
5221 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5222 {
5223 /* We might be using the immediate for alignment already. If we
5224 are, OR the register number into the low-order bits. */
5225 if (inst.operands[i].immisalign)
5226 inst.operands[i].imm |= reg;
5227 else
5228 inst.operands[i].imm = reg;
5229 inst.operands[i].immisreg = 1;
5230
5231 if (skip_past_comma (&p) == SUCCESS)
5232 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5233 return PARSE_OPERAND_FAIL;
5234 }
5235 else
5236 {
5237 if (inst.operands[i].negative)
5238 {
5239 inst.operands[i].negative = 0;
5240 p--;
5241 }
5242 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5243 return PARSE_OPERAND_FAIL;
5244 }
5245 }
5246 }
5247
5248 /* If at this point neither .preind nor .postind is set, we have a
5249 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5250 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5251 {
5252 inst.operands[i].preind = 1;
5253 inst.reloc.exp.X_op = O_constant;
5254 inst.reloc.exp.X_add_number = 0;
5255 }
5256 *str = p;
5257 return PARSE_OPERAND_SUCCESS;
5258 }
5259
5260 static int
5261 parse_address (char **str, int i)
5262 {
5263 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5264 ? SUCCESS : FAIL;
5265 }
5266
5267 static parse_operand_result
5268 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5269 {
5270 return parse_address_main (str, i, 1, type);
5271 }
5272
5273 /* Parse an operand for a MOVW or MOVT instruction. */
5274 static int
5275 parse_half (char **str)
5276 {
5277 char * p;
5278
5279 p = *str;
5280 skip_past_char (&p, '#');
5281 if (strncasecmp (p, ":lower16:", 9) == 0)
5282 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5283 else if (strncasecmp (p, ":upper16:", 9) == 0)
5284 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5285
5286 if (inst.reloc.type != BFD_RELOC_UNUSED)
5287 {
5288 p += 9;
5289 skip_whitespace (p);
5290 }
5291
5292 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5293 return FAIL;
5294
5295 if (inst.reloc.type == BFD_RELOC_UNUSED)
5296 {
5297 if (inst.reloc.exp.X_op != O_constant)
5298 {
5299 inst.error = _("constant expression expected");
5300 return FAIL;
5301 }
5302 if (inst.reloc.exp.X_add_number < 0
5303 || inst.reloc.exp.X_add_number > 0xffff)
5304 {
5305 inst.error = _("immediate value out of range");
5306 return FAIL;
5307 }
5308 }
5309 *str = p;
5310 return SUCCESS;
5311 }
5312
5313 /* Miscellaneous. */
5314
5315 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5316 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5317 static int
5318 parse_psr (char **str)
5319 {
5320 char *p;
5321 unsigned long psr_field;
5322 const struct asm_psr *psr;
5323 char *start;
5324
5325 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5326 feature for ease of use and backwards compatibility. */
5327 p = *str;
5328 if (strncasecmp (p, "SPSR", 4) == 0)
5329 psr_field = SPSR_BIT;
5330 else if (strncasecmp (p, "CPSR", 4) == 0
5331 || (strncasecmp (p, "APSR", 4) == 0
5332 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m)))
5333 psr_field = 0;
5334 else
5335 {
5336 start = p;
5337 do
5338 p++;
5339 while (ISALNUM (*p) || *p == '_');
5340
5341 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5342 p - start);
5343 if (!psr)
5344 return FAIL;
5345
5346 *str = p;
5347 return psr->field;
5348 }
5349
5350 p += 4;
5351 if (*p == '_')
5352 {
5353 /* A suffix follows. */
5354 p++;
5355 start = p;
5356
5357 do
5358 p++;
5359 while (ISALNUM (*p) || *p == '_');
5360
5361 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5362 p - start);
5363 if (!psr)
5364 goto error;
5365
5366 psr_field |= psr->field;
5367 }
5368 else
5369 {
5370 if (ISALNUM (*p))
5371 goto error; /* Garbage after "[CS]PSR". */
5372
5373 psr_field |= (PSR_c | PSR_f);
5374 }
5375 *str = p;
5376 return psr_field;
5377
5378 error:
5379 inst.error = _("flag for {c}psr instruction expected");
5380 return FAIL;
5381 }
5382
5383 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5384 value suitable for splatting into the AIF field of the instruction. */
5385
5386 static int
5387 parse_cps_flags (char **str)
5388 {
5389 int val = 0;
5390 int saw_a_flag = 0;
5391 char *s = *str;
5392
5393 for (;;)
5394 switch (*s++)
5395 {
5396 case '\0': case ',':
5397 goto done;
5398
5399 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
5400 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
5401 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
5402
5403 default:
5404 inst.error = _("unrecognized CPS flag");
5405 return FAIL;
5406 }
5407
5408 done:
5409 if (saw_a_flag == 0)
5410 {
5411 inst.error = _("missing CPS flags");
5412 return FAIL;
5413 }
5414
5415 *str = s - 1;
5416 return val;
5417 }
5418
5419 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5420 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5421
5422 static int
5423 parse_endian_specifier (char **str)
5424 {
5425 int little_endian;
5426 char *s = *str;
5427
5428 if (strncasecmp (s, "BE", 2))
5429 little_endian = 0;
5430 else if (strncasecmp (s, "LE", 2))
5431 little_endian = 1;
5432 else
5433 {
5434 inst.error = _("valid endian specifiers are be or le");
5435 return FAIL;
5436 }
5437
5438 if (ISALNUM (s[2]) || s[2] == '_')
5439 {
5440 inst.error = _("valid endian specifiers are be or le");
5441 return FAIL;
5442 }
5443
5444 *str = s + 2;
5445 return little_endian;
5446 }
5447
5448 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5449 value suitable for poking into the rotate field of an sxt or sxta
5450 instruction, or FAIL on error. */
5451
5452 static int
5453 parse_ror (char **str)
5454 {
5455 int rot;
5456 char *s = *str;
5457
5458 if (strncasecmp (s, "ROR", 3) == 0)
5459 s += 3;
5460 else
5461 {
5462 inst.error = _("missing rotation field after comma");
5463 return FAIL;
5464 }
5465
5466 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
5467 return FAIL;
5468
5469 switch (rot)
5470 {
5471 case 0: *str = s; return 0x0;
5472 case 8: *str = s; return 0x1;
5473 case 16: *str = s; return 0x2;
5474 case 24: *str = s; return 0x3;
5475
5476 default:
5477 inst.error = _("rotation can only be 0, 8, 16, or 24");
5478 return FAIL;
5479 }
5480 }
5481
5482 /* Parse a conditional code (from conds[] below). The value returned is in the
5483 range 0 .. 14, or FAIL. */
5484 static int
5485 parse_cond (char **str)
5486 {
5487 char *q;
5488 const struct asm_cond *c;
5489 int n;
5490 /* Condition codes are always 2 characters, so matching up to
5491 3 characters is sufficient. */
5492 char cond[3];
5493
5494 q = *str;
5495 n = 0;
5496 while (ISALPHA (*q) && n < 3)
5497 {
5498 cond[n] = TOLOWER (*q);
5499 q++;
5500 n++;
5501 }
5502
5503 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
5504 if (!c)
5505 {
5506 inst.error = _("condition required");
5507 return FAIL;
5508 }
5509
5510 *str = q;
5511 return c->value;
5512 }
5513
5514 /* Parse an option for a barrier instruction. Returns the encoding for the
5515 option, or FAIL. */
5516 static int
5517 parse_barrier (char **str)
5518 {
5519 char *p, *q;
5520 const struct asm_barrier_opt *o;
5521
5522 p = q = *str;
5523 while (ISALPHA (*q))
5524 q++;
5525
5526 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
5527 q - p);
5528 if (!o)
5529 return FAIL;
5530
5531 *str = q;
5532 return o->value;
5533 }
5534
5535 /* Parse the operands of a table branch instruction. Similar to a memory
5536 operand. */
5537 static int
5538 parse_tb (char **str)
5539 {
5540 char * p = *str;
5541 int reg;
5542
5543 if (skip_past_char (&p, '[') == FAIL)
5544 {
5545 inst.error = _("'[' expected");
5546 return FAIL;
5547 }
5548
5549 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5550 {
5551 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5552 return FAIL;
5553 }
5554 inst.operands[0].reg = reg;
5555
5556 if (skip_past_comma (&p) == FAIL)
5557 {
5558 inst.error = _("',' expected");
5559 return FAIL;
5560 }
5561
5562 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5563 {
5564 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5565 return FAIL;
5566 }
5567 inst.operands[0].imm = reg;
5568
5569 if (skip_past_comma (&p) == SUCCESS)
5570 {
5571 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
5572 return FAIL;
5573 if (inst.reloc.exp.X_add_number != 1)
5574 {
5575 inst.error = _("invalid shift");
5576 return FAIL;
5577 }
5578 inst.operands[0].shifted = 1;
5579 }
5580
5581 if (skip_past_char (&p, ']') == FAIL)
5582 {
5583 inst.error = _("']' expected");
5584 return FAIL;
5585 }
5586 *str = p;
5587 return SUCCESS;
5588 }
5589
5590 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5591 information on the types the operands can take and how they are encoded.
5592 Up to four operands may be read; this function handles setting the
5593 ".present" field for each read operand itself.
5594 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5595 else returns FAIL. */
5596
5597 static int
5598 parse_neon_mov (char **str, int *which_operand)
5599 {
5600 int i = *which_operand, val;
5601 enum arm_reg_type rtype;
5602 char *ptr = *str;
5603 struct neon_type_el optype;
5604
5605 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5606 {
5607 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5608 inst.operands[i].reg = val;
5609 inst.operands[i].isscalar = 1;
5610 inst.operands[i].vectype = optype;
5611 inst.operands[i++].present = 1;
5612
5613 if (skip_past_comma (&ptr) == FAIL)
5614 goto wanted_comma;
5615
5616 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5617 goto wanted_arm;
5618
5619 inst.operands[i].reg = val;
5620 inst.operands[i].isreg = 1;
5621 inst.operands[i].present = 1;
5622 }
5623 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
5624 != FAIL)
5625 {
5626 /* Cases 0, 1, 2, 3, 5 (D only). */
5627 if (skip_past_comma (&ptr) == FAIL)
5628 goto wanted_comma;
5629
5630 inst.operands[i].reg = val;
5631 inst.operands[i].isreg = 1;
5632 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5633 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5634 inst.operands[i].isvec = 1;
5635 inst.operands[i].vectype = optype;
5636 inst.operands[i++].present = 1;
5637
5638 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5639 {
5640 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5641 Case 13: VMOV <Sd>, <Rm> */
5642 inst.operands[i].reg = val;
5643 inst.operands[i].isreg = 1;
5644 inst.operands[i].present = 1;
5645
5646 if (rtype == REG_TYPE_NQ)
5647 {
5648 first_error (_("can't use Neon quad register here"));
5649 return FAIL;
5650 }
5651 else if (rtype != REG_TYPE_VFS)
5652 {
5653 i++;
5654 if (skip_past_comma (&ptr) == FAIL)
5655 goto wanted_comma;
5656 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5657 goto wanted_arm;
5658 inst.operands[i].reg = val;
5659 inst.operands[i].isreg = 1;
5660 inst.operands[i].present = 1;
5661 }
5662 }
5663 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
5664 &optype)) != FAIL)
5665 {
5666 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5667 Case 1: VMOV<c><q> <Dd>, <Dm>
5668 Case 8: VMOV.F32 <Sd>, <Sm>
5669 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5670
5671 inst.operands[i].reg = val;
5672 inst.operands[i].isreg = 1;
5673 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5674 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5675 inst.operands[i].isvec = 1;
5676 inst.operands[i].vectype = optype;
5677 inst.operands[i].present = 1;
5678
5679 if (skip_past_comma (&ptr) == SUCCESS)
5680 {
5681 /* Case 15. */
5682 i++;
5683
5684 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5685 goto wanted_arm;
5686
5687 inst.operands[i].reg = val;
5688 inst.operands[i].isreg = 1;
5689 inst.operands[i++].present = 1;
5690
5691 if (skip_past_comma (&ptr) == FAIL)
5692 goto wanted_comma;
5693
5694 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5695 goto wanted_arm;
5696
5697 inst.operands[i].reg = val;
5698 inst.operands[i].isreg = 1;
5699 inst.operands[i++].present = 1;
5700 }
5701 }
5702 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
5703 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5704 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5705 Case 10: VMOV.F32 <Sd>, #<imm>
5706 Case 11: VMOV.F64 <Dd>, #<imm> */
5707 inst.operands[i].immisfloat = 1;
5708 else if (parse_big_immediate (&ptr, i) == SUCCESS)
5709 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5710 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5711 ;
5712 else
5713 {
5714 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5715 return FAIL;
5716 }
5717 }
5718 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5719 {
5720 /* Cases 6, 7. */
5721 inst.operands[i].reg = val;
5722 inst.operands[i].isreg = 1;
5723 inst.operands[i++].present = 1;
5724
5725 if (skip_past_comma (&ptr) == FAIL)
5726 goto wanted_comma;
5727
5728 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5729 {
5730 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5731 inst.operands[i].reg = val;
5732 inst.operands[i].isscalar = 1;
5733 inst.operands[i].present = 1;
5734 inst.operands[i].vectype = optype;
5735 }
5736 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5737 {
5738 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5739 inst.operands[i].reg = val;
5740 inst.operands[i].isreg = 1;
5741 inst.operands[i++].present = 1;
5742
5743 if (skip_past_comma (&ptr) == FAIL)
5744 goto wanted_comma;
5745
5746 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
5747 == FAIL)
5748 {
5749 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
5750 return FAIL;
5751 }
5752
5753 inst.operands[i].reg = val;
5754 inst.operands[i].isreg = 1;
5755 inst.operands[i].isvec = 1;
5756 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5757 inst.operands[i].vectype = optype;
5758 inst.operands[i].present = 1;
5759
5760 if (rtype == REG_TYPE_VFS)
5761 {
5762 /* Case 14. */
5763 i++;
5764 if (skip_past_comma (&ptr) == FAIL)
5765 goto wanted_comma;
5766 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
5767 &optype)) == FAIL)
5768 {
5769 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
5770 return FAIL;
5771 }
5772 inst.operands[i].reg = val;
5773 inst.operands[i].isreg = 1;
5774 inst.operands[i].isvec = 1;
5775 inst.operands[i].issingle = 1;
5776 inst.operands[i].vectype = optype;
5777 inst.operands[i].present = 1;
5778 }
5779 }
5780 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
5781 != FAIL)
5782 {
5783 /* Case 13. */
5784 inst.operands[i].reg = val;
5785 inst.operands[i].isreg = 1;
5786 inst.operands[i].isvec = 1;
5787 inst.operands[i].issingle = 1;
5788 inst.operands[i].vectype = optype;
5789 inst.operands[i++].present = 1;
5790 }
5791 }
5792 else
5793 {
5794 first_error (_("parse error"));
5795 return FAIL;
5796 }
5797
5798 /* Successfully parsed the operands. Update args. */
5799 *which_operand = i;
5800 *str = ptr;
5801 return SUCCESS;
5802
5803 wanted_comma:
5804 first_error (_("expected comma"));
5805 return FAIL;
5806
5807 wanted_arm:
5808 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
5809 return FAIL;
5810 }
5811
5812 /* Use this macro when the operand constraints are different
5813 for ARM and THUMB (e.g. ldrd). */
5814 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
5815 ((arm_operand) | ((thumb_operand) << 16))
5816
5817 /* Matcher codes for parse_operands. */
5818 enum operand_parse_code
5819 {
5820 OP_stop, /* end of line */
5821
5822 OP_RR, /* ARM register */
5823 OP_RRnpc, /* ARM register, not r15 */
5824 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
5825 OP_RRnpcb, /* ARM register, not r15, in square brackets */
5826 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
5827 optional trailing ! */
5828 OP_RRw, /* ARM register, not r15, optional trailing ! */
5829 OP_RCP, /* Coprocessor number */
5830 OP_RCN, /* Coprocessor register */
5831 OP_RF, /* FPA register */
5832 OP_RVS, /* VFP single precision register */
5833 OP_RVD, /* VFP double precision register (0..15) */
5834 OP_RND, /* Neon double precision register (0..31) */
5835 OP_RNQ, /* Neon quad precision register */
5836 OP_RVSD, /* VFP single or double precision register */
5837 OP_RNDQ, /* Neon double or quad precision register */
5838 OP_RNSDQ, /* Neon single, double or quad precision register */
5839 OP_RNSC, /* Neon scalar D[X] */
5840 OP_RVC, /* VFP control register */
5841 OP_RMF, /* Maverick F register */
5842 OP_RMD, /* Maverick D register */
5843 OP_RMFX, /* Maverick FX register */
5844 OP_RMDX, /* Maverick DX register */
5845 OP_RMAX, /* Maverick AX register */
5846 OP_RMDS, /* Maverick DSPSC register */
5847 OP_RIWR, /* iWMMXt wR register */
5848 OP_RIWC, /* iWMMXt wC register */
5849 OP_RIWG, /* iWMMXt wCG register */
5850 OP_RXA, /* XScale accumulator register */
5851
5852 OP_REGLST, /* ARM register list */
5853 OP_VRSLST, /* VFP single-precision register list */
5854 OP_VRDLST, /* VFP double-precision register list */
5855 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
5856 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
5857 OP_NSTRLST, /* Neon element/structure list */
5858
5859 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
5860 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
5861 OP_RR_RNSC, /* ARM reg or Neon scalar. */
5862 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
5863 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
5864 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
5865 OP_VMOV, /* Neon VMOV operands. */
5866 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
5867 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
5868 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5869
5870 OP_I0, /* immediate zero */
5871 OP_I7, /* immediate value 0 .. 7 */
5872 OP_I15, /* 0 .. 15 */
5873 OP_I16, /* 1 .. 16 */
5874 OP_I16z, /* 0 .. 16 */
5875 OP_I31, /* 0 .. 31 */
5876 OP_I31w, /* 0 .. 31, optional trailing ! */
5877 OP_I32, /* 1 .. 32 */
5878 OP_I32z, /* 0 .. 32 */
5879 OP_I63, /* 0 .. 63 */
5880 OP_I63s, /* -64 .. 63 */
5881 OP_I64, /* 1 .. 64 */
5882 OP_I64z, /* 0 .. 64 */
5883 OP_I255, /* 0 .. 255 */
5884
5885 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
5886 OP_I7b, /* 0 .. 7 */
5887 OP_I15b, /* 0 .. 15 */
5888 OP_I31b, /* 0 .. 31 */
5889
5890 OP_SH, /* shifter operand */
5891 OP_SHG, /* shifter operand with possible group relocation */
5892 OP_ADDR, /* Memory address expression (any mode) */
5893 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
5894 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
5895 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
5896 OP_EXP, /* arbitrary expression */
5897 OP_EXPi, /* same, with optional immediate prefix */
5898 OP_EXPr, /* same, with optional relocation suffix */
5899 OP_HALF, /* 0 .. 65535 or low/high reloc. */
5900
5901 OP_CPSF, /* CPS flags */
5902 OP_ENDI, /* Endianness specifier */
5903 OP_PSR, /* CPSR/SPSR mask for msr */
5904 OP_COND, /* conditional code */
5905 OP_TB, /* Table branch. */
5906
5907 OP_RVC_PSR, /* CPSR/SPSR mask for msr, or VFP control register. */
5908 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
5909
5910 OP_RRnpc_I0, /* ARM register or literal 0 */
5911 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
5912 OP_RR_EXi, /* ARM register or expression with imm prefix */
5913 OP_RF_IF, /* FPA register or immediate */
5914 OP_RIWR_RIWC, /* iWMMXt R or C reg */
5915 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
5916
5917 /* Optional operands. */
5918 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
5919 OP_oI31b, /* 0 .. 31 */
5920 OP_oI32b, /* 1 .. 32 */
5921 OP_oIffffb, /* 0 .. 65535 */
5922 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
5923
5924 OP_oRR, /* ARM register */
5925 OP_oRRnpc, /* ARM register, not the PC */
5926 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
5927 OP_oRRw, /* ARM register, not r15, optional trailing ! */
5928 OP_oRND, /* Optional Neon double precision register */
5929 OP_oRNQ, /* Optional Neon quad precision register */
5930 OP_oRNDQ, /* Optional Neon double or quad precision register */
5931 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
5932 OP_oSHll, /* LSL immediate */
5933 OP_oSHar, /* ASR immediate */
5934 OP_oSHllar, /* LSL or ASR immediate */
5935 OP_oROR, /* ROR 0/8/16/24 */
5936 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
5937
5938 /* Some pre-defined mixed (ARM/THUMB) operands. */
5939 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
5940 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
5941 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
5942
5943 OP_FIRST_OPTIONAL = OP_oI7b
5944 };
5945
5946 /* Generic instruction operand parser. This does no encoding and no
5947 semantic validation; it merely squirrels values away in the inst
5948 structure. Returns SUCCESS or FAIL depending on whether the
5949 specified grammar matched. */
5950 static int
5951 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
5952 {
5953 unsigned const int *upat = pattern;
5954 char *backtrack_pos = 0;
5955 const char *backtrack_error = 0;
5956 int i, val, backtrack_index = 0;
5957 enum arm_reg_type rtype;
5958 parse_operand_result result;
5959 unsigned int op_parse_code;
5960
5961 #define po_char_or_fail(chr) \
5962 do \
5963 { \
5964 if (skip_past_char (&str, chr) == FAIL) \
5965 goto bad_args; \
5966 } \
5967 while (0)
5968
5969 #define po_reg_or_fail(regtype) \
5970 do \
5971 { \
5972 val = arm_typed_reg_parse (& str, regtype, & rtype, \
5973 & inst.operands[i].vectype); \
5974 if (val == FAIL) \
5975 { \
5976 first_error (_(reg_expected_msgs[regtype])); \
5977 goto failure; \
5978 } \
5979 inst.operands[i].reg = val; \
5980 inst.operands[i].isreg = 1; \
5981 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5982 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5983 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5984 || rtype == REG_TYPE_VFD \
5985 || rtype == REG_TYPE_NQ); \
5986 } \
5987 while (0)
5988
5989 #define po_reg_or_goto(regtype, label) \
5990 do \
5991 { \
5992 val = arm_typed_reg_parse (& str, regtype, & rtype, \
5993 & inst.operands[i].vectype); \
5994 if (val == FAIL) \
5995 goto label; \
5996 \
5997 inst.operands[i].reg = val; \
5998 inst.operands[i].isreg = 1; \
5999 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6000 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6001 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6002 || rtype == REG_TYPE_VFD \
6003 || rtype == REG_TYPE_NQ); \
6004 } \
6005 while (0)
6006
6007 #define po_imm_or_fail(min, max, popt) \
6008 do \
6009 { \
6010 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6011 goto failure; \
6012 inst.operands[i].imm = val; \
6013 } \
6014 while (0)
6015
6016 #define po_scalar_or_goto(elsz, label) \
6017 do \
6018 { \
6019 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6020 if (val == FAIL) \
6021 goto label; \
6022 inst.operands[i].reg = val; \
6023 inst.operands[i].isscalar = 1; \
6024 } \
6025 while (0)
6026
6027 #define po_misc_or_fail(expr) \
6028 do \
6029 { \
6030 if (expr) \
6031 goto failure; \
6032 } \
6033 while (0)
6034
6035 #define po_misc_or_fail_no_backtrack(expr) \
6036 do \
6037 { \
6038 result = expr; \
6039 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6040 backtrack_pos = 0; \
6041 if (result != PARSE_OPERAND_SUCCESS) \
6042 goto failure; \
6043 } \
6044 while (0)
6045
6046 #define po_barrier_or_imm(str) \
6047 do \
6048 { \
6049 val = parse_barrier (&str); \
6050 if (val == FAIL) \
6051 { \
6052 if (ISALPHA (*str)) \
6053 goto failure; \
6054 else \
6055 goto immediate; \
6056 } \
6057 else \
6058 { \
6059 if ((inst.instruction & 0xf0) == 0x60 \
6060 && val != 0xf) \
6061 { \
6062 /* ISB can only take SY as an option. */ \
6063 inst.error = _("invalid barrier type"); \
6064 goto failure; \
6065 } \
6066 } \
6067 } \
6068 while (0)
6069
6070 skip_whitespace (str);
6071
6072 for (i = 0; upat[i] != OP_stop; i++)
6073 {
6074 op_parse_code = upat[i];
6075 if (op_parse_code >= 1<<16)
6076 op_parse_code = thumb ? (op_parse_code >> 16)
6077 : (op_parse_code & ((1<<16)-1));
6078
6079 if (op_parse_code >= OP_FIRST_OPTIONAL)
6080 {
6081 /* Remember where we are in case we need to backtrack. */
6082 gas_assert (!backtrack_pos);
6083 backtrack_pos = str;
6084 backtrack_error = inst.error;
6085 backtrack_index = i;
6086 }
6087
6088 if (i > 0 && (i > 1 || inst.operands[0].present))
6089 po_char_or_fail (',');
6090
6091 switch (op_parse_code)
6092 {
6093 /* Registers */
6094 case OP_oRRnpc:
6095 case OP_oRRnpcsp:
6096 case OP_RRnpc:
6097 case OP_RRnpcsp:
6098 case OP_oRR:
6099 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6100 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6101 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6102 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6103 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6104 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6105 case OP_oRND:
6106 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6107 case OP_RVC:
6108 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6109 break;
6110 /* Also accept generic coprocessor regs for unknown registers. */
6111 coproc_reg:
6112 po_reg_or_fail (REG_TYPE_CN);
6113 break;
6114 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6115 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6116 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6117 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6118 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6119 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6120 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6121 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6122 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6123 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6124 case OP_oRNQ:
6125 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6126 case OP_oRNDQ:
6127 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6128 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6129 case OP_oRNSDQ:
6130 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6131
6132 /* Neon scalar. Using an element size of 8 means that some invalid
6133 scalars are accepted here, so deal with those in later code. */
6134 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6135
6136 case OP_RNDQ_I0:
6137 {
6138 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6139 break;
6140 try_imm0:
6141 po_imm_or_fail (0, 0, TRUE);
6142 }
6143 break;
6144
6145 case OP_RVSD_I0:
6146 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6147 break;
6148
6149 case OP_RR_RNSC:
6150 {
6151 po_scalar_or_goto (8, try_rr);
6152 break;
6153 try_rr:
6154 po_reg_or_fail (REG_TYPE_RN);
6155 }
6156 break;
6157
6158 case OP_RNSDQ_RNSC:
6159 {
6160 po_scalar_or_goto (8, try_nsdq);
6161 break;
6162 try_nsdq:
6163 po_reg_or_fail (REG_TYPE_NSDQ);
6164 }
6165 break;
6166
6167 case OP_RNDQ_RNSC:
6168 {
6169 po_scalar_or_goto (8, try_ndq);
6170 break;
6171 try_ndq:
6172 po_reg_or_fail (REG_TYPE_NDQ);
6173 }
6174 break;
6175
6176 case OP_RND_RNSC:
6177 {
6178 po_scalar_or_goto (8, try_vfd);
6179 break;
6180 try_vfd:
6181 po_reg_or_fail (REG_TYPE_VFD);
6182 }
6183 break;
6184
6185 case OP_VMOV:
6186 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6187 not careful then bad things might happen. */
6188 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6189 break;
6190
6191 case OP_RNDQ_Ibig:
6192 {
6193 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6194 break;
6195 try_immbig:
6196 /* There's a possibility of getting a 64-bit immediate here, so
6197 we need special handling. */
6198 if (parse_big_immediate (&str, i) == FAIL)
6199 {
6200 inst.error = _("immediate value is out of range");
6201 goto failure;
6202 }
6203 }
6204 break;
6205
6206 case OP_RNDQ_I63b:
6207 {
6208 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6209 break;
6210 try_shimm:
6211 po_imm_or_fail (0, 63, TRUE);
6212 }
6213 break;
6214
6215 case OP_RRnpcb:
6216 po_char_or_fail ('[');
6217 po_reg_or_fail (REG_TYPE_RN);
6218 po_char_or_fail (']');
6219 break;
6220
6221 case OP_RRnpctw:
6222 case OP_RRw:
6223 case OP_oRRw:
6224 po_reg_or_fail (REG_TYPE_RN);
6225 if (skip_past_char (&str, '!') == SUCCESS)
6226 inst.operands[i].writeback = 1;
6227 break;
6228
6229 /* Immediates */
6230 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6231 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6232 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6233 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6234 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6235 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6236 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6237 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6238 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6239 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6240 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6241 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6242
6243 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6244 case OP_oI7b:
6245 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6246 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6247 case OP_oI31b:
6248 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6249 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6250 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6251
6252 /* Immediate variants */
6253 case OP_oI255c:
6254 po_char_or_fail ('{');
6255 po_imm_or_fail (0, 255, TRUE);
6256 po_char_or_fail ('}');
6257 break;
6258
6259 case OP_I31w:
6260 /* The expression parser chokes on a trailing !, so we have
6261 to find it first and zap it. */
6262 {
6263 char *s = str;
6264 while (*s && *s != ',')
6265 s++;
6266 if (s[-1] == '!')
6267 {
6268 s[-1] = '\0';
6269 inst.operands[i].writeback = 1;
6270 }
6271 po_imm_or_fail (0, 31, TRUE);
6272 if (str == s - 1)
6273 str = s;
6274 }
6275 break;
6276
6277 /* Expressions */
6278 case OP_EXPi: EXPi:
6279 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6280 GE_OPT_PREFIX));
6281 break;
6282
6283 case OP_EXP:
6284 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6285 GE_NO_PREFIX));
6286 break;
6287
6288 case OP_EXPr: EXPr:
6289 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6290 GE_NO_PREFIX));
6291 if (inst.reloc.exp.X_op == O_symbol)
6292 {
6293 val = parse_reloc (&str);
6294 if (val == -1)
6295 {
6296 inst.error = _("unrecognized relocation suffix");
6297 goto failure;
6298 }
6299 else if (val != BFD_RELOC_UNUSED)
6300 {
6301 inst.operands[i].imm = val;
6302 inst.operands[i].hasreloc = 1;
6303 }
6304 }
6305 break;
6306
6307 /* Operand for MOVW or MOVT. */
6308 case OP_HALF:
6309 po_misc_or_fail (parse_half (&str));
6310 break;
6311
6312 /* Register or expression. */
6313 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6314 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6315
6316 /* Register or immediate. */
6317 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6318 I0: po_imm_or_fail (0, 0, FALSE); break;
6319
6320 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6321 IF:
6322 if (!is_immediate_prefix (*str))
6323 goto bad_args;
6324 str++;
6325 val = parse_fpa_immediate (&str);
6326 if (val == FAIL)
6327 goto failure;
6328 /* FPA immediates are encoded as registers 8-15.
6329 parse_fpa_immediate has already applied the offset. */
6330 inst.operands[i].reg = val;
6331 inst.operands[i].isreg = 1;
6332 break;
6333
6334 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6335 I32z: po_imm_or_fail (0, 32, FALSE); break;
6336
6337 /* Two kinds of register. */
6338 case OP_RIWR_RIWC:
6339 {
6340 struct reg_entry *rege = arm_reg_parse_multi (&str);
6341 if (!rege
6342 || (rege->type != REG_TYPE_MMXWR
6343 && rege->type != REG_TYPE_MMXWC
6344 && rege->type != REG_TYPE_MMXWCG))
6345 {
6346 inst.error = _("iWMMXt data or control register expected");
6347 goto failure;
6348 }
6349 inst.operands[i].reg = rege->number;
6350 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6351 }
6352 break;
6353
6354 case OP_RIWC_RIWG:
6355 {
6356 struct reg_entry *rege = arm_reg_parse_multi (&str);
6357 if (!rege
6358 || (rege->type != REG_TYPE_MMXWC
6359 && rege->type != REG_TYPE_MMXWCG))
6360 {
6361 inst.error = _("iWMMXt control register expected");
6362 goto failure;
6363 }
6364 inst.operands[i].reg = rege->number;
6365 inst.operands[i].isreg = 1;
6366 }
6367 break;
6368
6369 /* Misc */
6370 case OP_CPSF: val = parse_cps_flags (&str); break;
6371 case OP_ENDI: val = parse_endian_specifier (&str); break;
6372 case OP_oROR: val = parse_ror (&str); break;
6373 case OP_PSR: val = parse_psr (&str); break;
6374 case OP_COND: val = parse_cond (&str); break;
6375 case OP_oBARRIER_I15:
6376 po_barrier_or_imm (str); break;
6377 immediate:
6378 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
6379 goto failure;
6380 break;
6381
6382 case OP_RVC_PSR:
6383 po_reg_or_goto (REG_TYPE_VFC, try_psr);
6384 inst.operands[i].isvec = 1; /* Mark VFP control reg as vector. */
6385 break;
6386 try_psr:
6387 val = parse_psr (&str);
6388 break;
6389
6390 case OP_APSR_RR:
6391 po_reg_or_goto (REG_TYPE_RN, try_apsr);
6392 break;
6393 try_apsr:
6394 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
6395 instruction). */
6396 if (strncasecmp (str, "APSR_", 5) == 0)
6397 {
6398 unsigned found = 0;
6399 str += 5;
6400 while (found < 15)
6401 switch (*str++)
6402 {
6403 case 'c': found = (found & 1) ? 16 : found | 1; break;
6404 case 'n': found = (found & 2) ? 16 : found | 2; break;
6405 case 'z': found = (found & 4) ? 16 : found | 4; break;
6406 case 'v': found = (found & 8) ? 16 : found | 8; break;
6407 default: found = 16;
6408 }
6409 if (found != 15)
6410 goto failure;
6411 inst.operands[i].isvec = 1;
6412 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
6413 inst.operands[i].reg = REG_PC;
6414 }
6415 else
6416 goto failure;
6417 break;
6418
6419 case OP_TB:
6420 po_misc_or_fail (parse_tb (&str));
6421 break;
6422
6423 /* Register lists. */
6424 case OP_REGLST:
6425 val = parse_reg_list (&str);
6426 if (*str == '^')
6427 {
6428 inst.operands[1].writeback = 1;
6429 str++;
6430 }
6431 break;
6432
6433 case OP_VRSLST:
6434 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
6435 break;
6436
6437 case OP_VRDLST:
6438 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
6439 break;
6440
6441 case OP_VRSDLST:
6442 /* Allow Q registers too. */
6443 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6444 REGLIST_NEON_D);
6445 if (val == FAIL)
6446 {
6447 inst.error = NULL;
6448 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6449 REGLIST_VFP_S);
6450 inst.operands[i].issingle = 1;
6451 }
6452 break;
6453
6454 case OP_NRDLST:
6455 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6456 REGLIST_NEON_D);
6457 break;
6458
6459 case OP_NSTRLST:
6460 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
6461 &inst.operands[i].vectype);
6462 break;
6463
6464 /* Addressing modes */
6465 case OP_ADDR:
6466 po_misc_or_fail (parse_address (&str, i));
6467 break;
6468
6469 case OP_ADDRGLDR:
6470 po_misc_or_fail_no_backtrack (
6471 parse_address_group_reloc (&str, i, GROUP_LDR));
6472 break;
6473
6474 case OP_ADDRGLDRS:
6475 po_misc_or_fail_no_backtrack (
6476 parse_address_group_reloc (&str, i, GROUP_LDRS));
6477 break;
6478
6479 case OP_ADDRGLDC:
6480 po_misc_or_fail_no_backtrack (
6481 parse_address_group_reloc (&str, i, GROUP_LDC));
6482 break;
6483
6484 case OP_SH:
6485 po_misc_or_fail (parse_shifter_operand (&str, i));
6486 break;
6487
6488 case OP_SHG:
6489 po_misc_or_fail_no_backtrack (
6490 parse_shifter_operand_group_reloc (&str, i));
6491 break;
6492
6493 case OP_oSHll:
6494 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
6495 break;
6496
6497 case OP_oSHar:
6498 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
6499 break;
6500
6501 case OP_oSHllar:
6502 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
6503 break;
6504
6505 default:
6506 as_fatal (_("unhandled operand code %d"), op_parse_code);
6507 }
6508
6509 /* Various value-based sanity checks and shared operations. We
6510 do not signal immediate failures for the register constraints;
6511 this allows a syntax error to take precedence. */
6512 switch (op_parse_code)
6513 {
6514 case OP_oRRnpc:
6515 case OP_RRnpc:
6516 case OP_RRnpcb:
6517 case OP_RRw:
6518 case OP_oRRw:
6519 case OP_RRnpc_I0:
6520 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
6521 inst.error = BAD_PC;
6522 break;
6523
6524 case OP_oRRnpcsp:
6525 case OP_RRnpcsp:
6526 if (inst.operands[i].isreg)
6527 {
6528 if (inst.operands[i].reg == REG_PC)
6529 inst.error = BAD_PC;
6530 else if (inst.operands[i].reg == REG_SP)
6531 inst.error = BAD_SP;
6532 }
6533 break;
6534
6535 case OP_RRnpctw:
6536 if (inst.operands[i].isreg
6537 && inst.operands[i].reg == REG_PC
6538 && (inst.operands[i].writeback || thumb))
6539 inst.error = BAD_PC;
6540 break;
6541
6542 case OP_CPSF:
6543 case OP_ENDI:
6544 case OP_oROR:
6545 case OP_PSR:
6546 case OP_RVC_PSR:
6547 case OP_COND:
6548 case OP_oBARRIER_I15:
6549 case OP_REGLST:
6550 case OP_VRSLST:
6551 case OP_VRDLST:
6552 case OP_VRSDLST:
6553 case OP_NRDLST:
6554 case OP_NSTRLST:
6555 if (val == FAIL)
6556 goto failure;
6557 inst.operands[i].imm = val;
6558 break;
6559
6560 default:
6561 break;
6562 }
6563
6564 /* If we get here, this operand was successfully parsed. */
6565 inst.operands[i].present = 1;
6566 continue;
6567
6568 bad_args:
6569 inst.error = BAD_ARGS;
6570
6571 failure:
6572 if (!backtrack_pos)
6573 {
6574 /* The parse routine should already have set inst.error, but set a
6575 default here just in case. */
6576 if (!inst.error)
6577 inst.error = _("syntax error");
6578 return FAIL;
6579 }
6580
6581 /* Do not backtrack over a trailing optional argument that
6582 absorbed some text. We will only fail again, with the
6583 'garbage following instruction' error message, which is
6584 probably less helpful than the current one. */
6585 if (backtrack_index == i && backtrack_pos != str
6586 && upat[i+1] == OP_stop)
6587 {
6588 if (!inst.error)
6589 inst.error = _("syntax error");
6590 return FAIL;
6591 }
6592
6593 /* Try again, skipping the optional argument at backtrack_pos. */
6594 str = backtrack_pos;
6595 inst.error = backtrack_error;
6596 inst.operands[backtrack_index].present = 0;
6597 i = backtrack_index;
6598 backtrack_pos = 0;
6599 }
6600
6601 /* Check that we have parsed all the arguments. */
6602 if (*str != '\0' && !inst.error)
6603 inst.error = _("garbage following instruction");
6604
6605 return inst.error ? FAIL : SUCCESS;
6606 }
6607
6608 #undef po_char_or_fail
6609 #undef po_reg_or_fail
6610 #undef po_reg_or_goto
6611 #undef po_imm_or_fail
6612 #undef po_scalar_or_fail
6613 #undef po_barrier_or_imm
6614
6615 /* Shorthand macro for instruction encoding functions issuing errors. */
6616 #define constraint(expr, err) \
6617 do \
6618 { \
6619 if (expr) \
6620 { \
6621 inst.error = err; \
6622 return; \
6623 } \
6624 } \
6625 while (0)
6626
6627 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
6628 instructions are unpredictable if these registers are used. This
6629 is the BadReg predicate in ARM's Thumb-2 documentation. */
6630 #define reject_bad_reg(reg) \
6631 do \
6632 if (reg == REG_SP || reg == REG_PC) \
6633 { \
6634 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
6635 return; \
6636 } \
6637 while (0)
6638
6639 /* If REG is R13 (the stack pointer), warn that its use is
6640 deprecated. */
6641 #define warn_deprecated_sp(reg) \
6642 do \
6643 if (warn_on_deprecated && reg == REG_SP) \
6644 as_warn (_("use of r13 is deprecated")); \
6645 while (0)
6646
6647 /* Functions for operand encoding. ARM, then Thumb. */
6648
6649 #define rotate_left(v, n) (v << n | v >> (32 - n))
6650
6651 /* If VAL can be encoded in the immediate field of an ARM instruction,
6652 return the encoded form. Otherwise, return FAIL. */
6653
6654 static unsigned int
6655 encode_arm_immediate (unsigned int val)
6656 {
6657 unsigned int a, i;
6658
6659 for (i = 0; i < 32; i += 2)
6660 if ((a = rotate_left (val, i)) <= 0xff)
6661 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
6662
6663 return FAIL;
6664 }
6665
6666 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6667 return the encoded form. Otherwise, return FAIL. */
6668 static unsigned int
6669 encode_thumb32_immediate (unsigned int val)
6670 {
6671 unsigned int a, i;
6672
6673 if (val <= 0xff)
6674 return val;
6675
6676 for (i = 1; i <= 24; i++)
6677 {
6678 a = val >> i;
6679 if ((val & ~(0xff << i)) == 0)
6680 return ((val >> i) & 0x7f) | ((32 - i) << 7);
6681 }
6682
6683 a = val & 0xff;
6684 if (val == ((a << 16) | a))
6685 return 0x100 | a;
6686 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
6687 return 0x300 | a;
6688
6689 a = val & 0xff00;
6690 if (val == ((a << 16) | a))
6691 return 0x200 | (a >> 8);
6692
6693 return FAIL;
6694 }
6695 /* Encode a VFP SP or DP register number into inst.instruction. */
6696
6697 static void
6698 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
6699 {
6700 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
6701 && reg > 15)
6702 {
6703 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
6704 {
6705 if (thumb_mode)
6706 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
6707 fpu_vfp_ext_d32);
6708 else
6709 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
6710 fpu_vfp_ext_d32);
6711 }
6712 else
6713 {
6714 first_error (_("D register out of range for selected VFP version"));
6715 return;
6716 }
6717 }
6718
6719 switch (pos)
6720 {
6721 case VFP_REG_Sd:
6722 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
6723 break;
6724
6725 case VFP_REG_Sn:
6726 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
6727 break;
6728
6729 case VFP_REG_Sm:
6730 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
6731 break;
6732
6733 case VFP_REG_Dd:
6734 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
6735 break;
6736
6737 case VFP_REG_Dn:
6738 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
6739 break;
6740
6741 case VFP_REG_Dm:
6742 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
6743 break;
6744
6745 default:
6746 abort ();
6747 }
6748 }
6749
6750 /* Encode a <shift> in an ARM-format instruction. The immediate,
6751 if any, is handled by md_apply_fix. */
6752 static void
6753 encode_arm_shift (int i)
6754 {
6755 if (inst.operands[i].shift_kind == SHIFT_RRX)
6756 inst.instruction |= SHIFT_ROR << 5;
6757 else
6758 {
6759 inst.instruction |= inst.operands[i].shift_kind << 5;
6760 if (inst.operands[i].immisreg)
6761 {
6762 inst.instruction |= SHIFT_BY_REG;
6763 inst.instruction |= inst.operands[i].imm << 8;
6764 }
6765 else
6766 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6767 }
6768 }
6769
6770 static void
6771 encode_arm_shifter_operand (int i)
6772 {
6773 if (inst.operands[i].isreg)
6774 {
6775 inst.instruction |= inst.operands[i].reg;
6776 encode_arm_shift (i);
6777 }
6778 else
6779 inst.instruction |= INST_IMMEDIATE;
6780 }
6781
6782 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6783 static void
6784 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
6785 {
6786 gas_assert (inst.operands[i].isreg);
6787 inst.instruction |= inst.operands[i].reg << 16;
6788
6789 if (inst.operands[i].preind)
6790 {
6791 if (is_t)
6792 {
6793 inst.error = _("instruction does not accept preindexed addressing");
6794 return;
6795 }
6796 inst.instruction |= PRE_INDEX;
6797 if (inst.operands[i].writeback)
6798 inst.instruction |= WRITE_BACK;
6799
6800 }
6801 else if (inst.operands[i].postind)
6802 {
6803 gas_assert (inst.operands[i].writeback);
6804 if (is_t)
6805 inst.instruction |= WRITE_BACK;
6806 }
6807 else /* unindexed - only for coprocessor */
6808 {
6809 inst.error = _("instruction does not accept unindexed addressing");
6810 return;
6811 }
6812
6813 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
6814 && (((inst.instruction & 0x000f0000) >> 16)
6815 == ((inst.instruction & 0x0000f000) >> 12)))
6816 as_warn ((inst.instruction & LOAD_BIT)
6817 ? _("destination register same as write-back base")
6818 : _("source register same as write-back base"));
6819 }
6820
6821 /* inst.operands[i] was set up by parse_address. Encode it into an
6822 ARM-format mode 2 load or store instruction. If is_t is true,
6823 reject forms that cannot be used with a T instruction (i.e. not
6824 post-indexed). */
6825 static void
6826 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
6827 {
6828 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
6829
6830 encode_arm_addr_mode_common (i, is_t);
6831
6832 if (inst.operands[i].immisreg)
6833 {
6834 constraint ((inst.operands[i].imm == REG_PC
6835 || (is_pc && inst.operands[i].writeback)),
6836 BAD_PC_ADDRESSING);
6837 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
6838 inst.instruction |= inst.operands[i].imm;
6839 if (!inst.operands[i].negative)
6840 inst.instruction |= INDEX_UP;
6841 if (inst.operands[i].shifted)
6842 {
6843 if (inst.operands[i].shift_kind == SHIFT_RRX)
6844 inst.instruction |= SHIFT_ROR << 5;
6845 else
6846 {
6847 inst.instruction |= inst.operands[i].shift_kind << 5;
6848 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6849 }
6850 }
6851 }
6852 else /* immediate offset in inst.reloc */
6853 {
6854 if (is_pc && !inst.reloc.pc_rel)
6855 {
6856 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
6857
6858 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
6859 cannot use PC in addressing.
6860 PC cannot be used in writeback addressing, either. */
6861 constraint ((is_t || inst.operands[i].writeback),
6862 BAD_PC_ADDRESSING);
6863
6864 /* Use of PC in str is deprecated for ARMv7. */
6865 if (warn_on_deprecated
6866 && !is_load
6867 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
6868 as_warn (_("use of PC in this instruction is deprecated"));
6869 }
6870
6871 if (inst.reloc.type == BFD_RELOC_UNUSED)
6872 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
6873 }
6874 }
6875
6876 /* inst.operands[i] was set up by parse_address. Encode it into an
6877 ARM-format mode 3 load or store instruction. Reject forms that
6878 cannot be used with such instructions. If is_t is true, reject
6879 forms that cannot be used with a T instruction (i.e. not
6880 post-indexed). */
6881 static void
6882 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
6883 {
6884 if (inst.operands[i].immisreg && inst.operands[i].shifted)
6885 {
6886 inst.error = _("instruction does not accept scaled register index");
6887 return;
6888 }
6889
6890 encode_arm_addr_mode_common (i, is_t);
6891
6892 if (inst.operands[i].immisreg)
6893 {
6894 constraint ((inst.operands[i].imm == REG_PC
6895 || inst.operands[i].reg == REG_PC),
6896 BAD_PC_ADDRESSING);
6897 inst.instruction |= inst.operands[i].imm;
6898 if (!inst.operands[i].negative)
6899 inst.instruction |= INDEX_UP;
6900 }
6901 else /* immediate offset in inst.reloc */
6902 {
6903 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
6904 && inst.operands[i].writeback),
6905 BAD_PC_WRITEBACK);
6906 inst.instruction |= HWOFFSET_IMM;
6907 if (inst.reloc.type == BFD_RELOC_UNUSED)
6908 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
6909 }
6910 }
6911
6912 /* inst.operands[i] was set up by parse_address. Encode it into an
6913 ARM-format instruction. Reject all forms which cannot be encoded
6914 into a coprocessor load/store instruction. If wb_ok is false,
6915 reject use of writeback; if unind_ok is false, reject use of
6916 unindexed addressing. If reloc_override is not 0, use it instead
6917 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6918 (in which case it is preserved). */
6919
6920 static int
6921 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
6922 {
6923 inst.instruction |= inst.operands[i].reg << 16;
6924
6925 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
6926
6927 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
6928 {
6929 gas_assert (!inst.operands[i].writeback);
6930 if (!unind_ok)
6931 {
6932 inst.error = _("instruction does not support unindexed addressing");
6933 return FAIL;
6934 }
6935 inst.instruction |= inst.operands[i].imm;
6936 inst.instruction |= INDEX_UP;
6937 return SUCCESS;
6938 }
6939
6940 if (inst.operands[i].preind)
6941 inst.instruction |= PRE_INDEX;
6942
6943 if (inst.operands[i].writeback)
6944 {
6945 if (inst.operands[i].reg == REG_PC)
6946 {
6947 inst.error = _("pc may not be used with write-back");
6948 return FAIL;
6949 }
6950 if (!wb_ok)
6951 {
6952 inst.error = _("instruction does not support writeback");
6953 return FAIL;
6954 }
6955 inst.instruction |= WRITE_BACK;
6956 }
6957
6958 if (reloc_override)
6959 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
6960 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
6961 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
6962 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
6963 {
6964 if (thumb_mode)
6965 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
6966 else
6967 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
6968 }
6969
6970 return SUCCESS;
6971 }
6972
6973 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6974 Determine whether it can be performed with a move instruction; if
6975 it can, convert inst.instruction to that move instruction and
6976 return TRUE; if it can't, convert inst.instruction to a literal-pool
6977 load and return FALSE. If this is not a valid thing to do in the
6978 current context, set inst.error and return TRUE.
6979
6980 inst.operands[i] describes the destination register. */
6981
6982 static bfd_boolean
6983 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
6984 {
6985 unsigned long tbit;
6986
6987 if (thumb_p)
6988 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
6989 else
6990 tbit = LOAD_BIT;
6991
6992 if ((inst.instruction & tbit) == 0)
6993 {
6994 inst.error = _("invalid pseudo operation");
6995 return TRUE;
6996 }
6997 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
6998 {
6999 inst.error = _("constant expression expected");
7000 return TRUE;
7001 }
7002 if (inst.reloc.exp.X_op == O_constant)
7003 {
7004 if (thumb_p)
7005 {
7006 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
7007 {
7008 /* This can be done with a mov(1) instruction. */
7009 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7010 inst.instruction |= inst.reloc.exp.X_add_number;
7011 return TRUE;
7012 }
7013 }
7014 else
7015 {
7016 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
7017 if (value != FAIL)
7018 {
7019 /* This can be done with a mov instruction. */
7020 inst.instruction &= LITERAL_MASK;
7021 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
7022 inst.instruction |= value & 0xfff;
7023 return TRUE;
7024 }
7025
7026 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
7027 if (value != FAIL)
7028 {
7029 /* This can be done with a mvn instruction. */
7030 inst.instruction &= LITERAL_MASK;
7031 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
7032 inst.instruction |= value & 0xfff;
7033 return TRUE;
7034 }
7035 }
7036 }
7037
7038 if (add_to_lit_pool () == FAIL)
7039 {
7040 inst.error = _("literal pool insertion failed");
7041 return TRUE;
7042 }
7043 inst.operands[1].reg = REG_PC;
7044 inst.operands[1].isreg = 1;
7045 inst.operands[1].preind = 1;
7046 inst.reloc.pc_rel = 1;
7047 inst.reloc.type = (thumb_p
7048 ? BFD_RELOC_ARM_THUMB_OFFSET
7049 : (mode_3
7050 ? BFD_RELOC_ARM_HWLITERAL
7051 : BFD_RELOC_ARM_LITERAL));
7052 return FALSE;
7053 }
7054
7055 /* Functions for instruction encoding, sorted by sub-architecture.
7056 First some generics; their names are taken from the conventional
7057 bit positions for register arguments in ARM format instructions. */
7058
7059 static void
7060 do_noargs (void)
7061 {
7062 }
7063
7064 static void
7065 do_rd (void)
7066 {
7067 inst.instruction |= inst.operands[0].reg << 12;
7068 }
7069
7070 static void
7071 do_rd_rm (void)
7072 {
7073 inst.instruction |= inst.operands[0].reg << 12;
7074 inst.instruction |= inst.operands[1].reg;
7075 }
7076
7077 static void
7078 do_rd_rn (void)
7079 {
7080 inst.instruction |= inst.operands[0].reg << 12;
7081 inst.instruction |= inst.operands[1].reg << 16;
7082 }
7083
7084 static void
7085 do_rn_rd (void)
7086 {
7087 inst.instruction |= inst.operands[0].reg << 16;
7088 inst.instruction |= inst.operands[1].reg << 12;
7089 }
7090
7091 static void
7092 do_rd_rm_rn (void)
7093 {
7094 unsigned Rn = inst.operands[2].reg;
7095 /* Enforce restrictions on SWP instruction. */
7096 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
7097 {
7098 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
7099 _("Rn must not overlap other operands"));
7100
7101 /* SWP{b} is deprecated for ARMv6* and ARMv7. */
7102 if (warn_on_deprecated
7103 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
7104 as_warn (_("swp{b} use is deprecated for this architecture"));
7105
7106 }
7107 inst.instruction |= inst.operands[0].reg << 12;
7108 inst.instruction |= inst.operands[1].reg;
7109 inst.instruction |= Rn << 16;
7110 }
7111
7112 static void
7113 do_rd_rn_rm (void)
7114 {
7115 inst.instruction |= inst.operands[0].reg << 12;
7116 inst.instruction |= inst.operands[1].reg << 16;
7117 inst.instruction |= inst.operands[2].reg;
7118 }
7119
7120 static void
7121 do_rm_rd_rn (void)
7122 {
7123 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
7124 constraint (((inst.reloc.exp.X_op != O_constant
7125 && inst.reloc.exp.X_op != O_illegal)
7126 || inst.reloc.exp.X_add_number != 0),
7127 BAD_ADDR_MODE);
7128 inst.instruction |= inst.operands[0].reg;
7129 inst.instruction |= inst.operands[1].reg << 12;
7130 inst.instruction |= inst.operands[2].reg << 16;
7131 }
7132
7133 static void
7134 do_imm0 (void)
7135 {
7136 inst.instruction |= inst.operands[0].imm;
7137 }
7138
7139 static void
7140 do_rd_cpaddr (void)
7141 {
7142 inst.instruction |= inst.operands[0].reg << 12;
7143 encode_arm_cp_address (1, TRUE, TRUE, 0);
7144 }
7145
7146 /* ARM instructions, in alphabetical order by function name (except
7147 that wrapper functions appear immediately after the function they
7148 wrap). */
7149
7150 /* This is a pseudo-op of the form "adr rd, label" to be converted
7151 into a relative address of the form "add rd, pc, #label-.-8". */
7152
7153 static void
7154 do_adr (void)
7155 {
7156 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
7157
7158 /* Frag hacking will turn this into a sub instruction if the offset turns
7159 out to be negative. */
7160 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7161 inst.reloc.pc_rel = 1;
7162 inst.reloc.exp.X_add_number -= 8;
7163 }
7164
7165 /* This is a pseudo-op of the form "adrl rd, label" to be converted
7166 into a relative address of the form:
7167 add rd, pc, #low(label-.-8)"
7168 add rd, rd, #high(label-.-8)" */
7169
7170 static void
7171 do_adrl (void)
7172 {
7173 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
7174
7175 /* Frag hacking will turn this into a sub instruction if the offset turns
7176 out to be negative. */
7177 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
7178 inst.reloc.pc_rel = 1;
7179 inst.size = INSN_SIZE * 2;
7180 inst.reloc.exp.X_add_number -= 8;
7181 }
7182
7183 static void
7184 do_arit (void)
7185 {
7186 if (!inst.operands[1].present)
7187 inst.operands[1].reg = inst.operands[0].reg;
7188 inst.instruction |= inst.operands[0].reg << 12;
7189 inst.instruction |= inst.operands[1].reg << 16;
7190 encode_arm_shifter_operand (2);
7191 }
7192
7193 static void
7194 do_barrier (void)
7195 {
7196 if (inst.operands[0].present)
7197 {
7198 constraint ((inst.instruction & 0xf0) != 0x40
7199 && inst.operands[0].imm > 0xf
7200 && inst.operands[0].imm < 0x0,
7201 _("bad barrier type"));
7202 inst.instruction |= inst.operands[0].imm;
7203 }
7204 else
7205 inst.instruction |= 0xf;
7206 }
7207
7208 static void
7209 do_bfc (void)
7210 {
7211 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
7212 constraint (msb > 32, _("bit-field extends past end of register"));
7213 /* The instruction encoding stores the LSB and MSB,
7214 not the LSB and width. */
7215 inst.instruction |= inst.operands[0].reg << 12;
7216 inst.instruction |= inst.operands[1].imm << 7;
7217 inst.instruction |= (msb - 1) << 16;
7218 }
7219
7220 static void
7221 do_bfi (void)
7222 {
7223 unsigned int msb;
7224
7225 /* #0 in second position is alternative syntax for bfc, which is
7226 the same instruction but with REG_PC in the Rm field. */
7227 if (!inst.operands[1].isreg)
7228 inst.operands[1].reg = REG_PC;
7229
7230 msb = inst.operands[2].imm + inst.operands[3].imm;
7231 constraint (msb > 32, _("bit-field extends past end of register"));
7232 /* The instruction encoding stores the LSB and MSB,
7233 not the LSB and width. */
7234 inst.instruction |= inst.operands[0].reg << 12;
7235 inst.instruction |= inst.operands[1].reg;
7236 inst.instruction |= inst.operands[2].imm << 7;
7237 inst.instruction |= (msb - 1) << 16;
7238 }
7239
7240 static void
7241 do_bfx (void)
7242 {
7243 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
7244 _("bit-field extends past end of register"));
7245 inst.instruction |= inst.operands[0].reg << 12;
7246 inst.instruction |= inst.operands[1].reg;
7247 inst.instruction |= inst.operands[2].imm << 7;
7248 inst.instruction |= (inst.operands[3].imm - 1) << 16;
7249 }
7250
7251 /* ARM V5 breakpoint instruction (argument parse)
7252 BKPT <16 bit unsigned immediate>
7253 Instruction is not conditional.
7254 The bit pattern given in insns[] has the COND_ALWAYS condition,
7255 and it is an error if the caller tried to override that. */
7256
7257 static void
7258 do_bkpt (void)
7259 {
7260 /* Top 12 of 16 bits to bits 19:8. */
7261 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
7262
7263 /* Bottom 4 of 16 bits to bits 3:0. */
7264 inst.instruction |= inst.operands[0].imm & 0xf;
7265 }
7266
7267 static void
7268 encode_branch (int default_reloc)
7269 {
7270 if (inst.operands[0].hasreloc)
7271 {
7272 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32,
7273 _("the only suffix valid here is '(plt)'"));
7274 inst.reloc.type = BFD_RELOC_ARM_PLT32;
7275 }
7276 else
7277 {
7278 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
7279 }
7280 inst.reloc.pc_rel = 1;
7281 }
7282
7283 static void
7284 do_branch (void)
7285 {
7286 #ifdef OBJ_ELF
7287 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7288 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7289 else
7290 #endif
7291 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7292 }
7293
7294 static void
7295 do_bl (void)
7296 {
7297 #ifdef OBJ_ELF
7298 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7299 {
7300 if (inst.cond == COND_ALWAYS)
7301 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
7302 else
7303 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7304 }
7305 else
7306 #endif
7307 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7308 }
7309
7310 /* ARM V5 branch-link-exchange instruction (argument parse)
7311 BLX <target_addr> ie BLX(1)
7312 BLX{<condition>} <Rm> ie BLX(2)
7313 Unfortunately, there are two different opcodes for this mnemonic.
7314 So, the insns[].value is not used, and the code here zaps values
7315 into inst.instruction.
7316 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
7317
7318 static void
7319 do_blx (void)
7320 {
7321 if (inst.operands[0].isreg)
7322 {
7323 /* Arg is a register; the opcode provided by insns[] is correct.
7324 It is not illegal to do "blx pc", just useless. */
7325 if (inst.operands[0].reg == REG_PC)
7326 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
7327
7328 inst.instruction |= inst.operands[0].reg;
7329 }
7330 else
7331 {
7332 /* Arg is an address; this instruction cannot be executed
7333 conditionally, and the opcode must be adjusted.
7334 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
7335 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
7336 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7337 inst.instruction = 0xfa000000;
7338 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
7339 }
7340 }
7341
7342 static void
7343 do_bx (void)
7344 {
7345 bfd_boolean want_reloc;
7346
7347 if (inst.operands[0].reg == REG_PC)
7348 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
7349
7350 inst.instruction |= inst.operands[0].reg;
7351 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
7352 it is for ARMv4t or earlier. */
7353 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
7354 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
7355 want_reloc = TRUE;
7356
7357 #ifdef OBJ_ELF
7358 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
7359 #endif
7360 want_reloc = FALSE;
7361
7362 if (want_reloc)
7363 inst.reloc.type = BFD_RELOC_ARM_V4BX;
7364 }
7365
7366
7367 /* ARM v5TEJ. Jump to Jazelle code. */
7368
7369 static void
7370 do_bxj (void)
7371 {
7372 if (inst.operands[0].reg == REG_PC)
7373 as_tsktsk (_("use of r15 in bxj is not really useful"));
7374
7375 inst.instruction |= inst.operands[0].reg;
7376 }
7377
7378 /* Co-processor data operation:
7379 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
7380 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
7381 static void
7382 do_cdp (void)
7383 {
7384 inst.instruction |= inst.operands[0].reg << 8;
7385 inst.instruction |= inst.operands[1].imm << 20;
7386 inst.instruction |= inst.operands[2].reg << 12;
7387 inst.instruction |= inst.operands[3].reg << 16;
7388 inst.instruction |= inst.operands[4].reg;
7389 inst.instruction |= inst.operands[5].imm << 5;
7390 }
7391
7392 static void
7393 do_cmp (void)
7394 {
7395 inst.instruction |= inst.operands[0].reg << 16;
7396 encode_arm_shifter_operand (1);
7397 }
7398
7399 /* Transfer between coprocessor and ARM registers.
7400 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
7401 MRC2
7402 MCR{cond}
7403 MCR2
7404
7405 No special properties. */
7406
7407 static void
7408 do_co_reg (void)
7409 {
7410 unsigned Rd;
7411
7412 Rd = inst.operands[2].reg;
7413 if (thumb_mode)
7414 {
7415 if (inst.instruction == 0xee000010
7416 || inst.instruction == 0xfe000010)
7417 /* MCR, MCR2 */
7418 reject_bad_reg (Rd);
7419 else
7420 /* MRC, MRC2 */
7421 constraint (Rd == REG_SP, BAD_SP);
7422 }
7423 else
7424 {
7425 /* MCR */
7426 if (inst.instruction == 0xe000010)
7427 constraint (Rd == REG_PC, BAD_PC);
7428 }
7429
7430
7431 inst.instruction |= inst.operands[0].reg << 8;
7432 inst.instruction |= inst.operands[1].imm << 21;
7433 inst.instruction |= Rd << 12;
7434 inst.instruction |= inst.operands[3].reg << 16;
7435 inst.instruction |= inst.operands[4].reg;
7436 inst.instruction |= inst.operands[5].imm << 5;
7437 }
7438
7439 /* Transfer between coprocessor register and pair of ARM registers.
7440 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
7441 MCRR2
7442 MRRC{cond}
7443 MRRC2
7444
7445 Two XScale instructions are special cases of these:
7446
7447 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
7448 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
7449
7450 Result unpredictable if Rd or Rn is R15. */
7451
7452 static void
7453 do_co_reg2c (void)
7454 {
7455 unsigned Rd, Rn;
7456
7457 Rd = inst.operands[2].reg;
7458 Rn = inst.operands[3].reg;
7459
7460 if (thumb_mode)
7461 {
7462 reject_bad_reg (Rd);
7463 reject_bad_reg (Rn);
7464 }
7465 else
7466 {
7467 constraint (Rd == REG_PC, BAD_PC);
7468 constraint (Rn == REG_PC, BAD_PC);
7469 }
7470
7471 inst.instruction |= inst.operands[0].reg << 8;
7472 inst.instruction |= inst.operands[1].imm << 4;
7473 inst.instruction |= Rd << 12;
7474 inst.instruction |= Rn << 16;
7475 inst.instruction |= inst.operands[4].reg;
7476 }
7477
7478 static void
7479 do_cpsi (void)
7480 {
7481 inst.instruction |= inst.operands[0].imm << 6;
7482 if (inst.operands[1].present)
7483 {
7484 inst.instruction |= CPSI_MMOD;
7485 inst.instruction |= inst.operands[1].imm;
7486 }
7487 }
7488
7489 static void
7490 do_dbg (void)
7491 {
7492 inst.instruction |= inst.operands[0].imm;
7493 }
7494
7495 static void
7496 do_div (void)
7497 {
7498 unsigned Rd, Rn, Rm;
7499
7500 Rd = inst.operands[0].reg;
7501 Rn = (inst.operands[1].present
7502 ? inst.operands[1].reg : Rd);
7503 Rm = inst.operands[2].reg;
7504
7505 constraint ((Rd == REG_PC), BAD_PC);
7506 constraint ((Rn == REG_PC), BAD_PC);
7507 constraint ((Rm == REG_PC), BAD_PC);
7508
7509 inst.instruction |= Rd << 16;
7510 inst.instruction |= Rn << 0;
7511 inst.instruction |= Rm << 8;
7512 }
7513
7514 static void
7515 do_it (void)
7516 {
7517 /* There is no IT instruction in ARM mode. We
7518 process it to do the validation as if in
7519 thumb mode, just in case the code gets
7520 assembled for thumb using the unified syntax. */
7521
7522 inst.size = 0;
7523 if (unified_syntax)
7524 {
7525 set_it_insn_type (IT_INSN);
7526 now_it.mask = (inst.instruction & 0xf) | 0x10;
7527 now_it.cc = inst.operands[0].imm;
7528 }
7529 }
7530
7531 static void
7532 do_ldmstm (void)
7533 {
7534 int base_reg = inst.operands[0].reg;
7535 int range = inst.operands[1].imm;
7536
7537 inst.instruction |= base_reg << 16;
7538 inst.instruction |= range;
7539
7540 if (inst.operands[1].writeback)
7541 inst.instruction |= LDM_TYPE_2_OR_3;
7542
7543 if (inst.operands[0].writeback)
7544 {
7545 inst.instruction |= WRITE_BACK;
7546 /* Check for unpredictable uses of writeback. */
7547 if (inst.instruction & LOAD_BIT)
7548 {
7549 /* Not allowed in LDM type 2. */
7550 if ((inst.instruction & LDM_TYPE_2_OR_3)
7551 && ((range & (1 << REG_PC)) == 0))
7552 as_warn (_("writeback of base register is UNPREDICTABLE"));
7553 /* Only allowed if base reg not in list for other types. */
7554 else if (range & (1 << base_reg))
7555 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
7556 }
7557 else /* STM. */
7558 {
7559 /* Not allowed for type 2. */
7560 if (inst.instruction & LDM_TYPE_2_OR_3)
7561 as_warn (_("writeback of base register is UNPREDICTABLE"));
7562 /* Only allowed if base reg not in list, or first in list. */
7563 else if ((range & (1 << base_reg))
7564 && (range & ((1 << base_reg) - 1)))
7565 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
7566 }
7567 }
7568 }
7569
7570 /* ARMv5TE load-consecutive (argument parse)
7571 Mode is like LDRH.
7572
7573 LDRccD R, mode
7574 STRccD R, mode. */
7575
7576 static void
7577 do_ldrd (void)
7578 {
7579 constraint (inst.operands[0].reg % 2 != 0,
7580 _("first destination register must be even"));
7581 constraint (inst.operands[1].present
7582 && inst.operands[1].reg != inst.operands[0].reg + 1,
7583 _("can only load two consecutive registers"));
7584 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7585 constraint (!inst.operands[2].isreg, _("'[' expected"));
7586
7587 if (!inst.operands[1].present)
7588 inst.operands[1].reg = inst.operands[0].reg + 1;
7589
7590 if (inst.instruction & LOAD_BIT)
7591 {
7592 /* encode_arm_addr_mode_3 will diagnose overlap between the base
7593 register and the first register written; we have to diagnose
7594 overlap between the base and the second register written here. */
7595
7596 if (inst.operands[2].reg == inst.operands[1].reg
7597 && (inst.operands[2].writeback || inst.operands[2].postind))
7598 as_warn (_("base register written back, and overlaps "
7599 "second destination register"));
7600
7601 /* For an index-register load, the index register must not overlap the
7602 destination (even if not write-back). */
7603 else if (inst.operands[2].immisreg
7604 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
7605 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
7606 as_warn (_("index register overlaps destination register"));
7607 }
7608
7609 inst.instruction |= inst.operands[0].reg << 12;
7610 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
7611 }
7612
7613 static void
7614 do_ldrex (void)
7615 {
7616 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
7617 || inst.operands[1].postind || inst.operands[1].writeback
7618 || inst.operands[1].immisreg || inst.operands[1].shifted
7619 || inst.operands[1].negative
7620 /* This can arise if the programmer has written
7621 strex rN, rM, foo
7622 or if they have mistakenly used a register name as the last
7623 operand, eg:
7624 strex rN, rM, rX
7625 It is very difficult to distinguish between these two cases
7626 because "rX" might actually be a label. ie the register
7627 name has been occluded by a symbol of the same name. So we
7628 just generate a general 'bad addressing mode' type error
7629 message and leave it up to the programmer to discover the
7630 true cause and fix their mistake. */
7631 || (inst.operands[1].reg == REG_PC),
7632 BAD_ADDR_MODE);
7633
7634 constraint (inst.reloc.exp.X_op != O_constant
7635 || inst.reloc.exp.X_add_number != 0,
7636 _("offset must be zero in ARM encoding"));
7637
7638 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
7639
7640 inst.instruction |= inst.operands[0].reg << 12;
7641 inst.instruction |= inst.operands[1].reg << 16;
7642 inst.reloc.type = BFD_RELOC_UNUSED;
7643 }
7644
7645 static void
7646 do_ldrexd (void)
7647 {
7648 constraint (inst.operands[0].reg % 2 != 0,
7649 _("even register required"));
7650 constraint (inst.operands[1].present
7651 && inst.operands[1].reg != inst.operands[0].reg + 1,
7652 _("can only load two consecutive registers"));
7653 /* If op 1 were present and equal to PC, this function wouldn't
7654 have been called in the first place. */
7655 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7656
7657 inst.instruction |= inst.operands[0].reg << 12;
7658 inst.instruction |= inst.operands[2].reg << 16;
7659 }
7660
7661 static void
7662 do_ldst (void)
7663 {
7664 inst.instruction |= inst.operands[0].reg << 12;
7665 if (!inst.operands[1].isreg)
7666 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
7667 return;
7668 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
7669 }
7670
7671 static void
7672 do_ldstt (void)
7673 {
7674 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7675 reject [Rn,...]. */
7676 if (inst.operands[1].preind)
7677 {
7678 constraint (inst.reloc.exp.X_op != O_constant
7679 || inst.reloc.exp.X_add_number != 0,
7680 _("this instruction requires a post-indexed address"));
7681
7682 inst.operands[1].preind = 0;
7683 inst.operands[1].postind = 1;
7684 inst.operands[1].writeback = 1;
7685 }
7686 inst.instruction |= inst.operands[0].reg << 12;
7687 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
7688 }
7689
7690 /* Halfword and signed-byte load/store operations. */
7691
7692 static void
7693 do_ldstv4 (void)
7694 {
7695 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
7696 inst.instruction |= inst.operands[0].reg << 12;
7697 if (!inst.operands[1].isreg)
7698 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
7699 return;
7700 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
7701 }
7702
7703 static void
7704 do_ldsttv4 (void)
7705 {
7706 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7707 reject [Rn,...]. */
7708 if (inst.operands[1].preind)
7709 {
7710 constraint (inst.reloc.exp.X_op != O_constant
7711 || inst.reloc.exp.X_add_number != 0,
7712 _("this instruction requires a post-indexed address"));
7713
7714 inst.operands[1].preind = 0;
7715 inst.operands[1].postind = 1;
7716 inst.operands[1].writeback = 1;
7717 }
7718 inst.instruction |= inst.operands[0].reg << 12;
7719 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
7720 }
7721
7722 /* Co-processor register load/store.
7723 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7724 static void
7725 do_lstc (void)
7726 {
7727 inst.instruction |= inst.operands[0].reg << 8;
7728 inst.instruction |= inst.operands[1].reg << 12;
7729 encode_arm_cp_address (2, TRUE, TRUE, 0);
7730 }
7731
7732 static void
7733 do_mlas (void)
7734 {
7735 /* This restriction does not apply to mls (nor to mla in v6 or later). */
7736 if (inst.operands[0].reg == inst.operands[1].reg
7737 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
7738 && !(inst.instruction & 0x00400000))
7739 as_tsktsk (_("Rd and Rm should be different in mla"));
7740
7741 inst.instruction |= inst.operands[0].reg << 16;
7742 inst.instruction |= inst.operands[1].reg;
7743 inst.instruction |= inst.operands[2].reg << 8;
7744 inst.instruction |= inst.operands[3].reg << 12;
7745 }
7746
7747 static void
7748 do_mov (void)
7749 {
7750 inst.instruction |= inst.operands[0].reg << 12;
7751 encode_arm_shifter_operand (1);
7752 }
7753
7754 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7755 static void
7756 do_mov16 (void)
7757 {
7758 bfd_vma imm;
7759 bfd_boolean top;
7760
7761 top = (inst.instruction & 0x00400000) != 0;
7762 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
7763 _(":lower16: not allowed this instruction"));
7764 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
7765 _(":upper16: not allowed instruction"));
7766 inst.instruction |= inst.operands[0].reg << 12;
7767 if (inst.reloc.type == BFD_RELOC_UNUSED)
7768 {
7769 imm = inst.reloc.exp.X_add_number;
7770 /* The value is in two pieces: 0:11, 16:19. */
7771 inst.instruction |= (imm & 0x00000fff);
7772 inst.instruction |= (imm & 0x0000f000) << 4;
7773 }
7774 }
7775
7776 static void do_vfp_nsyn_opcode (const char *);
7777
7778 static int
7779 do_vfp_nsyn_mrs (void)
7780 {
7781 if (inst.operands[0].isvec)
7782 {
7783 if (inst.operands[1].reg != 1)
7784 first_error (_("operand 1 must be FPSCR"));
7785 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
7786 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
7787 do_vfp_nsyn_opcode ("fmstat");
7788 }
7789 else if (inst.operands[1].isvec)
7790 do_vfp_nsyn_opcode ("fmrx");
7791 else
7792 return FAIL;
7793
7794 return SUCCESS;
7795 }
7796
7797 static int
7798 do_vfp_nsyn_msr (void)
7799 {
7800 if (inst.operands[0].isvec)
7801 do_vfp_nsyn_opcode ("fmxr");
7802 else
7803 return FAIL;
7804
7805 return SUCCESS;
7806 }
7807
7808 static void
7809 do_vmrs (void)
7810 {
7811 unsigned Rt = inst.operands[0].reg;
7812
7813 if (thumb_mode && inst.operands[0].reg == REG_SP)
7814 {
7815 inst.error = BAD_SP;
7816 return;
7817 }
7818
7819 /* APSR_ sets isvec. All other refs to PC are illegal. */
7820 if (!inst.operands[0].isvec && inst.operands[0].reg == REG_PC)
7821 {
7822 inst.error = BAD_PC;
7823 return;
7824 }
7825
7826 if (inst.operands[1].reg != 1)
7827 first_error (_("operand 1 must be FPSCR"));
7828
7829 inst.instruction |= (Rt << 12);
7830 }
7831
7832 static void
7833 do_vmsr (void)
7834 {
7835 unsigned Rt = inst.operands[1].reg;
7836
7837 if (thumb_mode)
7838 reject_bad_reg (Rt);
7839 else if (Rt == REG_PC)
7840 {
7841 inst.error = BAD_PC;
7842 return;
7843 }
7844
7845 if (inst.operands[0].reg != 1)
7846 first_error (_("operand 0 must be FPSCR"));
7847
7848 inst.instruction |= (Rt << 12);
7849 }
7850
7851 static void
7852 do_mrs (void)
7853 {
7854 if (do_vfp_nsyn_mrs () == SUCCESS)
7855 return;
7856
7857 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7858 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
7859 != (PSR_c|PSR_f),
7860 _("'CPSR' or 'SPSR' expected"));
7861 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
7862 inst.instruction |= inst.operands[0].reg << 12;
7863 inst.instruction |= (inst.operands[1].imm & SPSR_BIT);
7864 }
7865
7866 /* Two possible forms:
7867 "{C|S}PSR_<field>, Rm",
7868 "{C|S}PSR_f, #expression". */
7869
7870 static void
7871 do_msr (void)
7872 {
7873 if (do_vfp_nsyn_msr () == SUCCESS)
7874 return;
7875
7876 inst.instruction |= inst.operands[0].imm;
7877 if (inst.operands[1].isreg)
7878 inst.instruction |= inst.operands[1].reg;
7879 else
7880 {
7881 inst.instruction |= INST_IMMEDIATE;
7882 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7883 inst.reloc.pc_rel = 0;
7884 }
7885 }
7886
7887 static void
7888 do_mul (void)
7889 {
7890 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
7891
7892 if (!inst.operands[2].present)
7893 inst.operands[2].reg = inst.operands[0].reg;
7894 inst.instruction |= inst.operands[0].reg << 16;
7895 inst.instruction |= inst.operands[1].reg;
7896 inst.instruction |= inst.operands[2].reg << 8;
7897
7898 if (inst.operands[0].reg == inst.operands[1].reg
7899 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
7900 as_tsktsk (_("Rd and Rm should be different in mul"));
7901 }
7902
7903 /* Long Multiply Parser
7904 UMULL RdLo, RdHi, Rm, Rs
7905 SMULL RdLo, RdHi, Rm, Rs
7906 UMLAL RdLo, RdHi, Rm, Rs
7907 SMLAL RdLo, RdHi, Rm, Rs. */
7908
7909 static void
7910 do_mull (void)
7911 {
7912 inst.instruction |= inst.operands[0].reg << 12;
7913 inst.instruction |= inst.operands[1].reg << 16;
7914 inst.instruction |= inst.operands[2].reg;
7915 inst.instruction |= inst.operands[3].reg << 8;
7916
7917 /* rdhi and rdlo must be different. */
7918 if (inst.operands[0].reg == inst.operands[1].reg)
7919 as_tsktsk (_("rdhi and rdlo must be different"));
7920
7921 /* rdhi, rdlo and rm must all be different before armv6. */
7922 if ((inst.operands[0].reg == inst.operands[2].reg
7923 || inst.operands[1].reg == inst.operands[2].reg)
7924 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
7925 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7926 }
7927
7928 static void
7929 do_nop (void)
7930 {
7931 if (inst.operands[0].present
7932 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
7933 {
7934 /* Architectural NOP hints are CPSR sets with no bits selected. */
7935 inst.instruction &= 0xf0000000;
7936 inst.instruction |= 0x0320f000;
7937 if (inst.operands[0].present)
7938 inst.instruction |= inst.operands[0].imm;
7939 }
7940 }
7941
7942 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7943 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7944 Condition defaults to COND_ALWAYS.
7945 Error if Rd, Rn or Rm are R15. */
7946
7947 static void
7948 do_pkhbt (void)
7949 {
7950 inst.instruction |= inst.operands[0].reg << 12;
7951 inst.instruction |= inst.operands[1].reg << 16;
7952 inst.instruction |= inst.operands[2].reg;
7953 if (inst.operands[3].present)
7954 encode_arm_shift (3);
7955 }
7956
7957 /* ARM V6 PKHTB (Argument Parse). */
7958
7959 static void
7960 do_pkhtb (void)
7961 {
7962 if (!inst.operands[3].present)
7963 {
7964 /* If the shift specifier is omitted, turn the instruction
7965 into pkhbt rd, rm, rn. */
7966 inst.instruction &= 0xfff00010;
7967 inst.instruction |= inst.operands[0].reg << 12;
7968 inst.instruction |= inst.operands[1].reg;
7969 inst.instruction |= inst.operands[2].reg << 16;
7970 }
7971 else
7972 {
7973 inst.instruction |= inst.operands[0].reg << 12;
7974 inst.instruction |= inst.operands[1].reg << 16;
7975 inst.instruction |= inst.operands[2].reg;
7976 encode_arm_shift (3);
7977 }
7978 }
7979
7980 /* ARMv5TE: Preload-Cache
7981 MP Extensions: Preload for write
7982
7983 PLD(W) <addr_mode>
7984
7985 Syntactically, like LDR with B=1, W=0, L=1. */
7986
7987 static void
7988 do_pld (void)
7989 {
7990 constraint (!inst.operands[0].isreg,
7991 _("'[' expected after PLD mnemonic"));
7992 constraint (inst.operands[0].postind,
7993 _("post-indexed expression used in preload instruction"));
7994 constraint (inst.operands[0].writeback,
7995 _("writeback used in preload instruction"));
7996 constraint (!inst.operands[0].preind,
7997 _("unindexed addressing used in preload instruction"));
7998 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7999 }
8000
8001 /* ARMv7: PLI <addr_mode> */
8002 static void
8003 do_pli (void)
8004 {
8005 constraint (!inst.operands[0].isreg,
8006 _("'[' expected after PLI mnemonic"));
8007 constraint (inst.operands[0].postind,
8008 _("post-indexed expression used in preload instruction"));
8009 constraint (inst.operands[0].writeback,
8010 _("writeback used in preload instruction"));
8011 constraint (!inst.operands[0].preind,
8012 _("unindexed addressing used in preload instruction"));
8013 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
8014 inst.instruction &= ~PRE_INDEX;
8015 }
8016
8017 static void
8018 do_push_pop (void)
8019 {
8020 inst.operands[1] = inst.operands[0];
8021 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
8022 inst.operands[0].isreg = 1;
8023 inst.operands[0].writeback = 1;
8024 inst.operands[0].reg = REG_SP;
8025 do_ldmstm ();
8026 }
8027
8028 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
8029 word at the specified address and the following word
8030 respectively.
8031 Unconditionally executed.
8032 Error if Rn is R15. */
8033
8034 static void
8035 do_rfe (void)
8036 {
8037 inst.instruction |= inst.operands[0].reg << 16;
8038 if (inst.operands[0].writeback)
8039 inst.instruction |= WRITE_BACK;
8040 }
8041
8042 /* ARM V6 ssat (argument parse). */
8043
8044 static void
8045 do_ssat (void)
8046 {
8047 inst.instruction |= inst.operands[0].reg << 12;
8048 inst.instruction |= (inst.operands[1].imm - 1) << 16;
8049 inst.instruction |= inst.operands[2].reg;
8050
8051 if (inst.operands[3].present)
8052 encode_arm_shift (3);
8053 }
8054
8055 /* ARM V6 usat (argument parse). */
8056
8057 static void
8058 do_usat (void)
8059 {
8060 inst.instruction |= inst.operands[0].reg << 12;
8061 inst.instruction |= inst.operands[1].imm << 16;
8062 inst.instruction |= inst.operands[2].reg;
8063
8064 if (inst.operands[3].present)
8065 encode_arm_shift (3);
8066 }
8067
8068 /* ARM V6 ssat16 (argument parse). */
8069
8070 static void
8071 do_ssat16 (void)
8072 {
8073 inst.instruction |= inst.operands[0].reg << 12;
8074 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
8075 inst.instruction |= inst.operands[2].reg;
8076 }
8077
8078 static void
8079 do_usat16 (void)
8080 {
8081 inst.instruction |= inst.operands[0].reg << 12;
8082 inst.instruction |= inst.operands[1].imm << 16;
8083 inst.instruction |= inst.operands[2].reg;
8084 }
8085
8086 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
8087 preserving the other bits.
8088
8089 setend <endian_specifier>, where <endian_specifier> is either
8090 BE or LE. */
8091
8092 static void
8093 do_setend (void)
8094 {
8095 if (inst.operands[0].imm)
8096 inst.instruction |= 0x200;
8097 }
8098
8099 static void
8100 do_shift (void)
8101 {
8102 unsigned int Rm = (inst.operands[1].present
8103 ? inst.operands[1].reg
8104 : inst.operands[0].reg);
8105
8106 inst.instruction |= inst.operands[0].reg << 12;
8107 inst.instruction |= Rm;
8108 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
8109 {
8110 inst.instruction |= inst.operands[2].reg << 8;
8111 inst.instruction |= SHIFT_BY_REG;
8112 }
8113 else
8114 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
8115 }
8116
8117 static void
8118 do_smc (void)
8119 {
8120 inst.reloc.type = BFD_RELOC_ARM_SMC;
8121 inst.reloc.pc_rel = 0;
8122 }
8123
8124 static void
8125 do_swi (void)
8126 {
8127 inst.reloc.type = BFD_RELOC_ARM_SWI;
8128 inst.reloc.pc_rel = 0;
8129 }
8130
8131 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
8132 SMLAxy{cond} Rd,Rm,Rs,Rn
8133 SMLAWy{cond} Rd,Rm,Rs,Rn
8134 Error if any register is R15. */
8135
8136 static void
8137 do_smla (void)
8138 {
8139 inst.instruction |= inst.operands[0].reg << 16;
8140 inst.instruction |= inst.operands[1].reg;
8141 inst.instruction |= inst.operands[2].reg << 8;
8142 inst.instruction |= inst.operands[3].reg << 12;
8143 }
8144
8145 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
8146 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
8147 Error if any register is R15.
8148 Warning if Rdlo == Rdhi. */
8149
8150 static void
8151 do_smlal (void)
8152 {
8153 inst.instruction |= inst.operands[0].reg << 12;
8154 inst.instruction |= inst.operands[1].reg << 16;
8155 inst.instruction |= inst.operands[2].reg;
8156 inst.instruction |= inst.operands[3].reg << 8;
8157
8158 if (inst.operands[0].reg == inst.operands[1].reg)
8159 as_tsktsk (_("rdhi and rdlo must be different"));
8160 }
8161
8162 /* ARM V5E (El Segundo) signed-multiply (argument parse)
8163 SMULxy{cond} Rd,Rm,Rs
8164 Error if any register is R15. */
8165
8166 static void
8167 do_smul (void)
8168 {
8169 inst.instruction |= inst.operands[0].reg << 16;
8170 inst.instruction |= inst.operands[1].reg;
8171 inst.instruction |= inst.operands[2].reg << 8;
8172 }
8173
8174 /* ARM V6 srs (argument parse). The variable fields in the encoding are
8175 the same for both ARM and Thumb-2. */
8176
8177 static void
8178 do_srs (void)
8179 {
8180 int reg;
8181
8182 if (inst.operands[0].present)
8183 {
8184 reg = inst.operands[0].reg;
8185 constraint (reg != REG_SP, _("SRS base register must be r13"));
8186 }
8187 else
8188 reg = REG_SP;
8189
8190 inst.instruction |= reg << 16;
8191 inst.instruction |= inst.operands[1].imm;
8192 if (inst.operands[0].writeback || inst.operands[1].writeback)
8193 inst.instruction |= WRITE_BACK;
8194 }
8195
8196 /* ARM V6 strex (argument parse). */
8197
8198 static void
8199 do_strex (void)
8200 {
8201 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
8202 || inst.operands[2].postind || inst.operands[2].writeback
8203 || inst.operands[2].immisreg || inst.operands[2].shifted
8204 || inst.operands[2].negative
8205 /* See comment in do_ldrex(). */
8206 || (inst.operands[2].reg == REG_PC),
8207 BAD_ADDR_MODE);
8208
8209 constraint (inst.operands[0].reg == inst.operands[1].reg
8210 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8211
8212 constraint (inst.reloc.exp.X_op != O_constant
8213 || inst.reloc.exp.X_add_number != 0,
8214 _("offset must be zero in ARM encoding"));
8215
8216 inst.instruction |= inst.operands[0].reg << 12;
8217 inst.instruction |= inst.operands[1].reg;
8218 inst.instruction |= inst.operands[2].reg << 16;
8219 inst.reloc.type = BFD_RELOC_UNUSED;
8220 }
8221
8222 static void
8223 do_strexd (void)
8224 {
8225 constraint (inst.operands[1].reg % 2 != 0,
8226 _("even register required"));
8227 constraint (inst.operands[2].present
8228 && inst.operands[2].reg != inst.operands[1].reg + 1,
8229 _("can only store two consecutive registers"));
8230 /* If op 2 were present and equal to PC, this function wouldn't
8231 have been called in the first place. */
8232 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
8233
8234 constraint (inst.operands[0].reg == inst.operands[1].reg
8235 || inst.operands[0].reg == inst.operands[1].reg + 1
8236 || inst.operands[0].reg == inst.operands[3].reg,
8237 BAD_OVERLAP);
8238
8239 inst.instruction |= inst.operands[0].reg << 12;
8240 inst.instruction |= inst.operands[1].reg;
8241 inst.instruction |= inst.operands[3].reg << 16;
8242 }
8243
8244 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
8245 extends it to 32-bits, and adds the result to a value in another
8246 register. You can specify a rotation by 0, 8, 16, or 24 bits
8247 before extracting the 16-bit value.
8248 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
8249 Condition defaults to COND_ALWAYS.
8250 Error if any register uses R15. */
8251
8252 static void
8253 do_sxtah (void)
8254 {
8255 inst.instruction |= inst.operands[0].reg << 12;
8256 inst.instruction |= inst.operands[1].reg << 16;
8257 inst.instruction |= inst.operands[2].reg;
8258 inst.instruction |= inst.operands[3].imm << 10;
8259 }
8260
8261 /* ARM V6 SXTH.
8262
8263 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
8264 Condition defaults to COND_ALWAYS.
8265 Error if any register uses R15. */
8266
8267 static void
8268 do_sxth (void)
8269 {
8270 inst.instruction |= inst.operands[0].reg << 12;
8271 inst.instruction |= inst.operands[1].reg;
8272 inst.instruction |= inst.operands[2].imm << 10;
8273 }
8274 \f
8275 /* VFP instructions. In a logical order: SP variant first, monad
8276 before dyad, arithmetic then move then load/store. */
8277
8278 static void
8279 do_vfp_sp_monadic (void)
8280 {
8281 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8282 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8283 }
8284
8285 static void
8286 do_vfp_sp_dyadic (void)
8287 {
8288 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8289 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8290 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8291 }
8292
8293 static void
8294 do_vfp_sp_compare_z (void)
8295 {
8296 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8297 }
8298
8299 static void
8300 do_vfp_dp_sp_cvt (void)
8301 {
8302 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8303 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8304 }
8305
8306 static void
8307 do_vfp_sp_dp_cvt (void)
8308 {
8309 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8310 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8311 }
8312
8313 static void
8314 do_vfp_reg_from_sp (void)
8315 {
8316 inst.instruction |= inst.operands[0].reg << 12;
8317 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8318 }
8319
8320 static void
8321 do_vfp_reg2_from_sp2 (void)
8322 {
8323 constraint (inst.operands[2].imm != 2,
8324 _("only two consecutive VFP SP registers allowed here"));
8325 inst.instruction |= inst.operands[0].reg << 12;
8326 inst.instruction |= inst.operands[1].reg << 16;
8327 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8328 }
8329
8330 static void
8331 do_vfp_sp_from_reg (void)
8332 {
8333 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
8334 inst.instruction |= inst.operands[1].reg << 12;
8335 }
8336
8337 static void
8338 do_vfp_sp2_from_reg2 (void)
8339 {
8340 constraint (inst.operands[0].imm != 2,
8341 _("only two consecutive VFP SP registers allowed here"));
8342 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
8343 inst.instruction |= inst.operands[1].reg << 12;
8344 inst.instruction |= inst.operands[2].reg << 16;
8345 }
8346
8347 static void
8348 do_vfp_sp_ldst (void)
8349 {
8350 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8351 encode_arm_cp_address (1, FALSE, TRUE, 0);
8352 }
8353
8354 static void
8355 do_vfp_dp_ldst (void)
8356 {
8357 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8358 encode_arm_cp_address (1, FALSE, TRUE, 0);
8359 }
8360
8361
8362 static void
8363 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
8364 {
8365 if (inst.operands[0].writeback)
8366 inst.instruction |= WRITE_BACK;
8367 else
8368 constraint (ldstm_type != VFP_LDSTMIA,
8369 _("this addressing mode requires base-register writeback"));
8370 inst.instruction |= inst.operands[0].reg << 16;
8371 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
8372 inst.instruction |= inst.operands[1].imm;
8373 }
8374
8375 static void
8376 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
8377 {
8378 int count;
8379
8380 if (inst.operands[0].writeback)
8381 inst.instruction |= WRITE_BACK;
8382 else
8383 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
8384 _("this addressing mode requires base-register writeback"));
8385
8386 inst.instruction |= inst.operands[0].reg << 16;
8387 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8388
8389 count = inst.operands[1].imm << 1;
8390 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
8391 count += 1;
8392
8393 inst.instruction |= count;
8394 }
8395
8396 static void
8397 do_vfp_sp_ldstmia (void)
8398 {
8399 vfp_sp_ldstm (VFP_LDSTMIA);
8400 }
8401
8402 static void
8403 do_vfp_sp_ldstmdb (void)
8404 {
8405 vfp_sp_ldstm (VFP_LDSTMDB);
8406 }
8407
8408 static void
8409 do_vfp_dp_ldstmia (void)
8410 {
8411 vfp_dp_ldstm (VFP_LDSTMIA);
8412 }
8413
8414 static void
8415 do_vfp_dp_ldstmdb (void)
8416 {
8417 vfp_dp_ldstm (VFP_LDSTMDB);
8418 }
8419
8420 static void
8421 do_vfp_xp_ldstmia (void)
8422 {
8423 vfp_dp_ldstm (VFP_LDSTMIAX);
8424 }
8425
8426 static void
8427 do_vfp_xp_ldstmdb (void)
8428 {
8429 vfp_dp_ldstm (VFP_LDSTMDBX);
8430 }
8431
8432 static void
8433 do_vfp_dp_rd_rm (void)
8434 {
8435 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8436 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8437 }
8438
8439 static void
8440 do_vfp_dp_rn_rd (void)
8441 {
8442 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
8443 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8444 }
8445
8446 static void
8447 do_vfp_dp_rd_rn (void)
8448 {
8449 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8450 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8451 }
8452
8453 static void
8454 do_vfp_dp_rd_rn_rm (void)
8455 {
8456 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8457 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8458 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
8459 }
8460
8461 static void
8462 do_vfp_dp_rd (void)
8463 {
8464 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8465 }
8466
8467 static void
8468 do_vfp_dp_rm_rd_rn (void)
8469 {
8470 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
8471 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8472 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
8473 }
8474
8475 /* VFPv3 instructions. */
8476 static void
8477 do_vfp_sp_const (void)
8478 {
8479 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8480 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
8481 inst.instruction |= (inst.operands[1].imm & 0x0f);
8482 }
8483
8484 static void
8485 do_vfp_dp_const (void)
8486 {
8487 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8488 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
8489 inst.instruction |= (inst.operands[1].imm & 0x0f);
8490 }
8491
8492 static void
8493 vfp_conv (int srcsize)
8494 {
8495 unsigned immbits = srcsize - inst.operands[1].imm;
8496 inst.instruction |= (immbits & 1) << 5;
8497 inst.instruction |= (immbits >> 1);
8498 }
8499
8500 static void
8501 do_vfp_sp_conv_16 (void)
8502 {
8503 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8504 vfp_conv (16);
8505 }
8506
8507 static void
8508 do_vfp_dp_conv_16 (void)
8509 {
8510 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8511 vfp_conv (16);
8512 }
8513
8514 static void
8515 do_vfp_sp_conv_32 (void)
8516 {
8517 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8518 vfp_conv (32);
8519 }
8520
8521 static void
8522 do_vfp_dp_conv_32 (void)
8523 {
8524 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8525 vfp_conv (32);
8526 }
8527 \f
8528 /* FPA instructions. Also in a logical order. */
8529
8530 static void
8531 do_fpa_cmp (void)
8532 {
8533 inst.instruction |= inst.operands[0].reg << 16;
8534 inst.instruction |= inst.operands[1].reg;
8535 }
8536
8537 static void
8538 do_fpa_ldmstm (void)
8539 {
8540 inst.instruction |= inst.operands[0].reg << 12;
8541 switch (inst.operands[1].imm)
8542 {
8543 case 1: inst.instruction |= CP_T_X; break;
8544 case 2: inst.instruction |= CP_T_Y; break;
8545 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
8546 case 4: break;
8547 default: abort ();
8548 }
8549
8550 if (inst.instruction & (PRE_INDEX | INDEX_UP))
8551 {
8552 /* The instruction specified "ea" or "fd", so we can only accept
8553 [Rn]{!}. The instruction does not really support stacking or
8554 unstacking, so we have to emulate these by setting appropriate
8555 bits and offsets. */
8556 constraint (inst.reloc.exp.X_op != O_constant
8557 || inst.reloc.exp.X_add_number != 0,
8558 _("this instruction does not support indexing"));
8559
8560 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
8561 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
8562
8563 if (!(inst.instruction & INDEX_UP))
8564 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
8565
8566 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
8567 {
8568 inst.operands[2].preind = 0;
8569 inst.operands[2].postind = 1;
8570 }
8571 }
8572
8573 encode_arm_cp_address (2, TRUE, TRUE, 0);
8574 }
8575 \f
8576 /* iWMMXt instructions: strictly in alphabetical order. */
8577
8578 static void
8579 do_iwmmxt_tandorc (void)
8580 {
8581 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
8582 }
8583
8584 static void
8585 do_iwmmxt_textrc (void)
8586 {
8587 inst.instruction |= inst.operands[0].reg << 12;
8588 inst.instruction |= inst.operands[1].imm;
8589 }
8590
8591 static void
8592 do_iwmmxt_textrm (void)
8593 {
8594 inst.instruction |= inst.operands[0].reg << 12;
8595 inst.instruction |= inst.operands[1].reg << 16;
8596 inst.instruction |= inst.operands[2].imm;
8597 }
8598
8599 static void
8600 do_iwmmxt_tinsr (void)
8601 {
8602 inst.instruction |= inst.operands[0].reg << 16;
8603 inst.instruction |= inst.operands[1].reg << 12;
8604 inst.instruction |= inst.operands[2].imm;
8605 }
8606
8607 static void
8608 do_iwmmxt_tmia (void)
8609 {
8610 inst.instruction |= inst.operands[0].reg << 5;
8611 inst.instruction |= inst.operands[1].reg;
8612 inst.instruction |= inst.operands[2].reg << 12;
8613 }
8614
8615 static void
8616 do_iwmmxt_waligni (void)
8617 {
8618 inst.instruction |= inst.operands[0].reg << 12;
8619 inst.instruction |= inst.operands[1].reg << 16;
8620 inst.instruction |= inst.operands[2].reg;
8621 inst.instruction |= inst.operands[3].imm << 20;
8622 }
8623
8624 static void
8625 do_iwmmxt_wmerge (void)
8626 {
8627 inst.instruction |= inst.operands[0].reg << 12;
8628 inst.instruction |= inst.operands[1].reg << 16;
8629 inst.instruction |= inst.operands[2].reg;
8630 inst.instruction |= inst.operands[3].imm << 21;
8631 }
8632
8633 static void
8634 do_iwmmxt_wmov (void)
8635 {
8636 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
8637 inst.instruction |= inst.operands[0].reg << 12;
8638 inst.instruction |= inst.operands[1].reg << 16;
8639 inst.instruction |= inst.operands[1].reg;
8640 }
8641
8642 static void
8643 do_iwmmxt_wldstbh (void)
8644 {
8645 int reloc;
8646 inst.instruction |= inst.operands[0].reg << 12;
8647 if (thumb_mode)
8648 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
8649 else
8650 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
8651 encode_arm_cp_address (1, TRUE, FALSE, reloc);
8652 }
8653
8654 static void
8655 do_iwmmxt_wldstw (void)
8656 {
8657 /* RIWR_RIWC clears .isreg for a control register. */
8658 if (!inst.operands[0].isreg)
8659 {
8660 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8661 inst.instruction |= 0xf0000000;
8662 }
8663
8664 inst.instruction |= inst.operands[0].reg << 12;
8665 encode_arm_cp_address (1, TRUE, TRUE, 0);
8666 }
8667
8668 static void
8669 do_iwmmxt_wldstd (void)
8670 {
8671 inst.instruction |= inst.operands[0].reg << 12;
8672 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
8673 && inst.operands[1].immisreg)
8674 {
8675 inst.instruction &= ~0x1a000ff;
8676 inst.instruction |= (0xf << 28);
8677 if (inst.operands[1].preind)
8678 inst.instruction |= PRE_INDEX;
8679 if (!inst.operands[1].negative)
8680 inst.instruction |= INDEX_UP;
8681 if (inst.operands[1].writeback)
8682 inst.instruction |= WRITE_BACK;
8683 inst.instruction |= inst.operands[1].reg << 16;
8684 inst.instruction |= inst.reloc.exp.X_add_number << 4;
8685 inst.instruction |= inst.operands[1].imm;
8686 }
8687 else
8688 encode_arm_cp_address (1, TRUE, FALSE, 0);
8689 }
8690
8691 static void
8692 do_iwmmxt_wshufh (void)
8693 {
8694 inst.instruction |= inst.operands[0].reg << 12;
8695 inst.instruction |= inst.operands[1].reg << 16;
8696 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
8697 inst.instruction |= (inst.operands[2].imm & 0x0f);
8698 }
8699
8700 static void
8701 do_iwmmxt_wzero (void)
8702 {
8703 /* WZERO reg is an alias for WANDN reg, reg, reg. */
8704 inst.instruction |= inst.operands[0].reg;
8705 inst.instruction |= inst.operands[0].reg << 12;
8706 inst.instruction |= inst.operands[0].reg << 16;
8707 }
8708
8709 static void
8710 do_iwmmxt_wrwrwr_or_imm5 (void)
8711 {
8712 if (inst.operands[2].isreg)
8713 do_rd_rn_rm ();
8714 else {
8715 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
8716 _("immediate operand requires iWMMXt2"));
8717 do_rd_rn ();
8718 if (inst.operands[2].imm == 0)
8719 {
8720 switch ((inst.instruction >> 20) & 0xf)
8721 {
8722 case 4:
8723 case 5:
8724 case 6:
8725 case 7:
8726 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
8727 inst.operands[2].imm = 16;
8728 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
8729 break;
8730 case 8:
8731 case 9:
8732 case 10:
8733 case 11:
8734 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
8735 inst.operands[2].imm = 32;
8736 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
8737 break;
8738 case 12:
8739 case 13:
8740 case 14:
8741 case 15:
8742 {
8743 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
8744 unsigned long wrn;
8745 wrn = (inst.instruction >> 16) & 0xf;
8746 inst.instruction &= 0xff0fff0f;
8747 inst.instruction |= wrn;
8748 /* Bail out here; the instruction is now assembled. */
8749 return;
8750 }
8751 }
8752 }
8753 /* Map 32 -> 0, etc. */
8754 inst.operands[2].imm &= 0x1f;
8755 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
8756 }
8757 }
8758 \f
8759 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
8760 operations first, then control, shift, and load/store. */
8761
8762 /* Insns like "foo X,Y,Z". */
8763
8764 static void
8765 do_mav_triple (void)
8766 {
8767 inst.instruction |= inst.operands[0].reg << 16;
8768 inst.instruction |= inst.operands[1].reg;
8769 inst.instruction |= inst.operands[2].reg << 12;
8770 }
8771
8772 /* Insns like "foo W,X,Y,Z".
8773 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
8774
8775 static void
8776 do_mav_quad (void)
8777 {
8778 inst.instruction |= inst.operands[0].reg << 5;
8779 inst.instruction |= inst.operands[1].reg << 12;
8780 inst.instruction |= inst.operands[2].reg << 16;
8781 inst.instruction |= inst.operands[3].reg;
8782 }
8783
8784 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8785 static void
8786 do_mav_dspsc (void)
8787 {
8788 inst.instruction |= inst.operands[1].reg << 12;
8789 }
8790
8791 /* Maverick shift immediate instructions.
8792 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8793 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8794
8795 static void
8796 do_mav_shift (void)
8797 {
8798 int imm = inst.operands[2].imm;
8799
8800 inst.instruction |= inst.operands[0].reg << 12;
8801 inst.instruction |= inst.operands[1].reg << 16;
8802
8803 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8804 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8805 Bit 4 should be 0. */
8806 imm = (imm & 0xf) | ((imm & 0x70) << 1);
8807
8808 inst.instruction |= imm;
8809 }
8810 \f
8811 /* XScale instructions. Also sorted arithmetic before move. */
8812
8813 /* Xscale multiply-accumulate (argument parse)
8814 MIAcc acc0,Rm,Rs
8815 MIAPHcc acc0,Rm,Rs
8816 MIAxycc acc0,Rm,Rs. */
8817
8818 static void
8819 do_xsc_mia (void)
8820 {
8821 inst.instruction |= inst.operands[1].reg;
8822 inst.instruction |= inst.operands[2].reg << 12;
8823 }
8824
8825 /* Xscale move-accumulator-register (argument parse)
8826
8827 MARcc acc0,RdLo,RdHi. */
8828
8829 static void
8830 do_xsc_mar (void)
8831 {
8832 inst.instruction |= inst.operands[1].reg << 12;
8833 inst.instruction |= inst.operands[2].reg << 16;
8834 }
8835
8836 /* Xscale move-register-accumulator (argument parse)
8837
8838 MRAcc RdLo,RdHi,acc0. */
8839
8840 static void
8841 do_xsc_mra (void)
8842 {
8843 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
8844 inst.instruction |= inst.operands[0].reg << 12;
8845 inst.instruction |= inst.operands[1].reg << 16;
8846 }
8847 \f
8848 /* Encoding functions relevant only to Thumb. */
8849
8850 /* inst.operands[i] is a shifted-register operand; encode
8851 it into inst.instruction in the format used by Thumb32. */
8852
8853 static void
8854 encode_thumb32_shifted_operand (int i)
8855 {
8856 unsigned int value = inst.reloc.exp.X_add_number;
8857 unsigned int shift = inst.operands[i].shift_kind;
8858
8859 constraint (inst.operands[i].immisreg,
8860 _("shift by register not allowed in thumb mode"));
8861 inst.instruction |= inst.operands[i].reg;
8862 if (shift == SHIFT_RRX)
8863 inst.instruction |= SHIFT_ROR << 4;
8864 else
8865 {
8866 constraint (inst.reloc.exp.X_op != O_constant,
8867 _("expression too complex"));
8868
8869 constraint (value > 32
8870 || (value == 32 && (shift == SHIFT_LSL
8871 || shift == SHIFT_ROR)),
8872 _("shift expression is too large"));
8873
8874 if (value == 0)
8875 shift = SHIFT_LSL;
8876 else if (value == 32)
8877 value = 0;
8878
8879 inst.instruction |= shift << 4;
8880 inst.instruction |= (value & 0x1c) << 10;
8881 inst.instruction |= (value & 0x03) << 6;
8882 }
8883 }
8884
8885
8886 /* inst.operands[i] was set up by parse_address. Encode it into a
8887 Thumb32 format load or store instruction. Reject forms that cannot
8888 be used with such instructions. If is_t is true, reject forms that
8889 cannot be used with a T instruction; if is_d is true, reject forms
8890 that cannot be used with a D instruction. If it is a store insn,
8891 reject PC in Rn. */
8892
8893 static void
8894 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
8895 {
8896 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
8897
8898 constraint (!inst.operands[i].isreg,
8899 _("Instruction does not support =N addresses"));
8900
8901 inst.instruction |= inst.operands[i].reg << 16;
8902 if (inst.operands[i].immisreg)
8903 {
8904 constraint (is_pc, BAD_PC_ADDRESSING);
8905 constraint (is_t || is_d, _("cannot use register index with this instruction"));
8906 constraint (inst.operands[i].negative,
8907 _("Thumb does not support negative register indexing"));
8908 constraint (inst.operands[i].postind,
8909 _("Thumb does not support register post-indexing"));
8910 constraint (inst.operands[i].writeback,
8911 _("Thumb does not support register indexing with writeback"));
8912 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
8913 _("Thumb supports only LSL in shifted register indexing"));
8914
8915 inst.instruction |= inst.operands[i].imm;
8916 if (inst.operands[i].shifted)
8917 {
8918 constraint (inst.reloc.exp.X_op != O_constant,
8919 _("expression too complex"));
8920 constraint (inst.reloc.exp.X_add_number < 0
8921 || inst.reloc.exp.X_add_number > 3,
8922 _("shift out of range"));
8923 inst.instruction |= inst.reloc.exp.X_add_number << 4;
8924 }
8925 inst.reloc.type = BFD_RELOC_UNUSED;
8926 }
8927 else if (inst.operands[i].preind)
8928 {
8929 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
8930 constraint (is_t && inst.operands[i].writeback,
8931 _("cannot use writeback with this instruction"));
8932 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0)
8933 && !inst.reloc.pc_rel, BAD_PC_ADDRESSING);
8934
8935 if (is_d)
8936 {
8937 inst.instruction |= 0x01000000;
8938 if (inst.operands[i].writeback)
8939 inst.instruction |= 0x00200000;
8940 }
8941 else
8942 {
8943 inst.instruction |= 0x00000c00;
8944 if (inst.operands[i].writeback)
8945 inst.instruction |= 0x00000100;
8946 }
8947 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8948 }
8949 else if (inst.operands[i].postind)
8950 {
8951 gas_assert (inst.operands[i].writeback);
8952 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
8953 constraint (is_t, _("cannot use post-indexing with this instruction"));
8954
8955 if (is_d)
8956 inst.instruction |= 0x00200000;
8957 else
8958 inst.instruction |= 0x00000900;
8959 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8960 }
8961 else /* unindexed - only for coprocessor */
8962 inst.error = _("instruction does not accept unindexed addressing");
8963 }
8964
8965 /* Table of Thumb instructions which exist in both 16- and 32-bit
8966 encodings (the latter only in post-V6T2 cores). The index is the
8967 value used in the insns table below. When there is more than one
8968 possible 16-bit encoding for the instruction, this table always
8969 holds variant (1).
8970 Also contains several pseudo-instructions used during relaxation. */
8971 #define T16_32_TAB \
8972 X(_adc, 4140, eb400000), \
8973 X(_adcs, 4140, eb500000), \
8974 X(_add, 1c00, eb000000), \
8975 X(_adds, 1c00, eb100000), \
8976 X(_addi, 0000, f1000000), \
8977 X(_addis, 0000, f1100000), \
8978 X(_add_pc,000f, f20f0000), \
8979 X(_add_sp,000d, f10d0000), \
8980 X(_adr, 000f, f20f0000), \
8981 X(_and, 4000, ea000000), \
8982 X(_ands, 4000, ea100000), \
8983 X(_asr, 1000, fa40f000), \
8984 X(_asrs, 1000, fa50f000), \
8985 X(_b, e000, f000b000), \
8986 X(_bcond, d000, f0008000), \
8987 X(_bic, 4380, ea200000), \
8988 X(_bics, 4380, ea300000), \
8989 X(_cmn, 42c0, eb100f00), \
8990 X(_cmp, 2800, ebb00f00), \
8991 X(_cpsie, b660, f3af8400), \
8992 X(_cpsid, b670, f3af8600), \
8993 X(_cpy, 4600, ea4f0000), \
8994 X(_dec_sp,80dd, f1ad0d00), \
8995 X(_eor, 4040, ea800000), \
8996 X(_eors, 4040, ea900000), \
8997 X(_inc_sp,00dd, f10d0d00), \
8998 X(_ldmia, c800, e8900000), \
8999 X(_ldr, 6800, f8500000), \
9000 X(_ldrb, 7800, f8100000), \
9001 X(_ldrh, 8800, f8300000), \
9002 X(_ldrsb, 5600, f9100000), \
9003 X(_ldrsh, 5e00, f9300000), \
9004 X(_ldr_pc,4800, f85f0000), \
9005 X(_ldr_pc2,4800, f85f0000), \
9006 X(_ldr_sp,9800, f85d0000), \
9007 X(_lsl, 0000, fa00f000), \
9008 X(_lsls, 0000, fa10f000), \
9009 X(_lsr, 0800, fa20f000), \
9010 X(_lsrs, 0800, fa30f000), \
9011 X(_mov, 2000, ea4f0000), \
9012 X(_movs, 2000, ea5f0000), \
9013 X(_mul, 4340, fb00f000), \
9014 X(_muls, 4340, ffffffff), /* no 32b muls */ \
9015 X(_mvn, 43c0, ea6f0000), \
9016 X(_mvns, 43c0, ea7f0000), \
9017 X(_neg, 4240, f1c00000), /* rsb #0 */ \
9018 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
9019 X(_orr, 4300, ea400000), \
9020 X(_orrs, 4300, ea500000), \
9021 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
9022 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
9023 X(_rev, ba00, fa90f080), \
9024 X(_rev16, ba40, fa90f090), \
9025 X(_revsh, bac0, fa90f0b0), \
9026 X(_ror, 41c0, fa60f000), \
9027 X(_rors, 41c0, fa70f000), \
9028 X(_sbc, 4180, eb600000), \
9029 X(_sbcs, 4180, eb700000), \
9030 X(_stmia, c000, e8800000), \
9031 X(_str, 6000, f8400000), \
9032 X(_strb, 7000, f8000000), \
9033 X(_strh, 8000, f8200000), \
9034 X(_str_sp,9000, f84d0000), \
9035 X(_sub, 1e00, eba00000), \
9036 X(_subs, 1e00, ebb00000), \
9037 X(_subi, 8000, f1a00000), \
9038 X(_subis, 8000, f1b00000), \
9039 X(_sxtb, b240, fa4ff080), \
9040 X(_sxth, b200, fa0ff080), \
9041 X(_tst, 4200, ea100f00), \
9042 X(_uxtb, b2c0, fa5ff080), \
9043 X(_uxth, b280, fa1ff080), \
9044 X(_nop, bf00, f3af8000), \
9045 X(_yield, bf10, f3af8001), \
9046 X(_wfe, bf20, f3af8002), \
9047 X(_wfi, bf30, f3af8003), \
9048 X(_sev, bf40, f3af8004),
9049
9050 /* To catch errors in encoding functions, the codes are all offset by
9051 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
9052 as 16-bit instructions. */
9053 #define X(a,b,c) T_MNEM##a
9054 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
9055 #undef X
9056
9057 #define X(a,b,c) 0x##b
9058 static const unsigned short thumb_op16[] = { T16_32_TAB };
9059 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
9060 #undef X
9061
9062 #define X(a,b,c) 0x##c
9063 static const unsigned int thumb_op32[] = { T16_32_TAB };
9064 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
9065 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
9066 #undef X
9067 #undef T16_32_TAB
9068
9069 /* Thumb instruction encoders, in alphabetical order. */
9070
9071 /* ADDW or SUBW. */
9072
9073 static void
9074 do_t_add_sub_w (void)
9075 {
9076 int Rd, Rn;
9077
9078 Rd = inst.operands[0].reg;
9079 Rn = inst.operands[1].reg;
9080
9081 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
9082 is the SP-{plus,minus}-immediate form of the instruction. */
9083 if (Rn == REG_SP)
9084 constraint (Rd == REG_PC, BAD_PC);
9085 else
9086 reject_bad_reg (Rd);
9087
9088 inst.instruction |= (Rn << 16) | (Rd << 8);
9089 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
9090 }
9091
9092 /* Parse an add or subtract instruction. We get here with inst.instruction
9093 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
9094
9095 static void
9096 do_t_add_sub (void)
9097 {
9098 int Rd, Rs, Rn;
9099
9100 Rd = inst.operands[0].reg;
9101 Rs = (inst.operands[1].present
9102 ? inst.operands[1].reg /* Rd, Rs, foo */
9103 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9104
9105 if (Rd == REG_PC)
9106 set_it_insn_type_last ();
9107
9108 if (unified_syntax)
9109 {
9110 bfd_boolean flags;
9111 bfd_boolean narrow;
9112 int opcode;
9113
9114 flags = (inst.instruction == T_MNEM_adds
9115 || inst.instruction == T_MNEM_subs);
9116 if (flags)
9117 narrow = !in_it_block ();
9118 else
9119 narrow = in_it_block ();
9120 if (!inst.operands[2].isreg)
9121 {
9122 int add;
9123
9124 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
9125
9126 add = (inst.instruction == T_MNEM_add
9127 || inst.instruction == T_MNEM_adds);
9128 opcode = 0;
9129 if (inst.size_req != 4)
9130 {
9131 /* Attempt to use a narrow opcode, with relaxation if
9132 appropriate. */
9133 if (Rd == REG_SP && Rs == REG_SP && !flags)
9134 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
9135 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
9136 opcode = T_MNEM_add_sp;
9137 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
9138 opcode = T_MNEM_add_pc;
9139 else if (Rd <= 7 && Rs <= 7 && narrow)
9140 {
9141 if (flags)
9142 opcode = add ? T_MNEM_addis : T_MNEM_subis;
9143 else
9144 opcode = add ? T_MNEM_addi : T_MNEM_subi;
9145 }
9146 if (opcode)
9147 {
9148 inst.instruction = THUMB_OP16(opcode);
9149 inst.instruction |= (Rd << 4) | Rs;
9150 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9151 if (inst.size_req != 2)
9152 inst.relax = opcode;
9153 }
9154 else
9155 constraint (inst.size_req == 2, BAD_HIREG);
9156 }
9157 if (inst.size_req == 4
9158 || (inst.size_req != 2 && !opcode))
9159 {
9160 if (Rd == REG_PC)
9161 {
9162 constraint (add, BAD_PC);
9163 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
9164 _("only SUBS PC, LR, #const allowed"));
9165 constraint (inst.reloc.exp.X_op != O_constant,
9166 _("expression too complex"));
9167 constraint (inst.reloc.exp.X_add_number < 0
9168 || inst.reloc.exp.X_add_number > 0xff,
9169 _("immediate value out of range"));
9170 inst.instruction = T2_SUBS_PC_LR
9171 | inst.reloc.exp.X_add_number;
9172 inst.reloc.type = BFD_RELOC_UNUSED;
9173 return;
9174 }
9175 else if (Rs == REG_PC)
9176 {
9177 /* Always use addw/subw. */
9178 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
9179 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
9180 }
9181 else
9182 {
9183 inst.instruction = THUMB_OP32 (inst.instruction);
9184 inst.instruction = (inst.instruction & 0xe1ffffff)
9185 | 0x10000000;
9186 if (flags)
9187 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9188 else
9189 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
9190 }
9191 inst.instruction |= Rd << 8;
9192 inst.instruction |= Rs << 16;
9193 }
9194 }
9195 else
9196 {
9197 Rn = inst.operands[2].reg;
9198 /* See if we can do this with a 16-bit instruction. */
9199 if (!inst.operands[2].shifted && inst.size_req != 4)
9200 {
9201 if (Rd > 7 || Rs > 7 || Rn > 7)
9202 narrow = FALSE;
9203
9204 if (narrow)
9205 {
9206 inst.instruction = ((inst.instruction == T_MNEM_adds
9207 || inst.instruction == T_MNEM_add)
9208 ? T_OPCODE_ADD_R3
9209 : T_OPCODE_SUB_R3);
9210 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
9211 return;
9212 }
9213
9214 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
9215 {
9216 /* Thumb-1 cores (except v6-M) require at least one high
9217 register in a narrow non flag setting add. */
9218 if (Rd > 7 || Rn > 7
9219 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
9220 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
9221 {
9222 if (Rd == Rn)
9223 {
9224 Rn = Rs;
9225 Rs = Rd;
9226 }
9227 inst.instruction = T_OPCODE_ADD_HI;
9228 inst.instruction |= (Rd & 8) << 4;
9229 inst.instruction |= (Rd & 7);
9230 inst.instruction |= Rn << 3;
9231 return;
9232 }
9233 }
9234 }
9235
9236 constraint (Rd == REG_PC, BAD_PC);
9237 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
9238 constraint (Rs == REG_PC, BAD_PC);
9239 reject_bad_reg (Rn);
9240
9241 /* If we get here, it can't be done in 16 bits. */
9242 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
9243 _("shift must be constant"));
9244 inst.instruction = THUMB_OP32 (inst.instruction);
9245 inst.instruction |= Rd << 8;
9246 inst.instruction |= Rs << 16;
9247 encode_thumb32_shifted_operand (2);
9248 }
9249 }
9250 else
9251 {
9252 constraint (inst.instruction == T_MNEM_adds
9253 || inst.instruction == T_MNEM_subs,
9254 BAD_THUMB32);
9255
9256 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
9257 {
9258 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
9259 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
9260 BAD_HIREG);
9261
9262 inst.instruction = (inst.instruction == T_MNEM_add
9263 ? 0x0000 : 0x8000);
9264 inst.instruction |= (Rd << 4) | Rs;
9265 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9266 return;
9267 }
9268
9269 Rn = inst.operands[2].reg;
9270 constraint (inst.operands[2].shifted, _("unshifted register required"));
9271
9272 /* We now have Rd, Rs, and Rn set to registers. */
9273 if (Rd > 7 || Rs > 7 || Rn > 7)
9274 {
9275 /* Can't do this for SUB. */
9276 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
9277 inst.instruction = T_OPCODE_ADD_HI;
9278 inst.instruction |= (Rd & 8) << 4;
9279 inst.instruction |= (Rd & 7);
9280 if (Rs == Rd)
9281 inst.instruction |= Rn << 3;
9282 else if (Rn == Rd)
9283 inst.instruction |= Rs << 3;
9284 else
9285 constraint (1, _("dest must overlap one source register"));
9286 }
9287 else
9288 {
9289 inst.instruction = (inst.instruction == T_MNEM_add
9290 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
9291 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
9292 }
9293 }
9294 }
9295
9296 static void
9297 do_t_adr (void)
9298 {
9299 unsigned Rd;
9300
9301 Rd = inst.operands[0].reg;
9302 reject_bad_reg (Rd);
9303
9304 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
9305 {
9306 /* Defer to section relaxation. */
9307 inst.relax = inst.instruction;
9308 inst.instruction = THUMB_OP16 (inst.instruction);
9309 inst.instruction |= Rd << 4;
9310 }
9311 else if (unified_syntax && inst.size_req != 2)
9312 {
9313 /* Generate a 32-bit opcode. */
9314 inst.instruction = THUMB_OP32 (inst.instruction);
9315 inst.instruction |= Rd << 8;
9316 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
9317 inst.reloc.pc_rel = 1;
9318 }
9319 else
9320 {
9321 /* Generate a 16-bit opcode. */
9322 inst.instruction = THUMB_OP16 (inst.instruction);
9323 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9324 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
9325 inst.reloc.pc_rel = 1;
9326
9327 inst.instruction |= Rd << 4;
9328 }
9329 }
9330
9331 /* Arithmetic instructions for which there is just one 16-bit
9332 instruction encoding, and it allows only two low registers.
9333 For maximal compatibility with ARM syntax, we allow three register
9334 operands even when Thumb-32 instructions are not available, as long
9335 as the first two are identical. For instance, both "sbc r0,r1" and
9336 "sbc r0,r0,r1" are allowed. */
9337 static void
9338 do_t_arit3 (void)
9339 {
9340 int Rd, Rs, Rn;
9341
9342 Rd = inst.operands[0].reg;
9343 Rs = (inst.operands[1].present
9344 ? inst.operands[1].reg /* Rd, Rs, foo */
9345 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9346 Rn = inst.operands[2].reg;
9347
9348 reject_bad_reg (Rd);
9349 reject_bad_reg (Rs);
9350 if (inst.operands[2].isreg)
9351 reject_bad_reg (Rn);
9352
9353 if (unified_syntax)
9354 {
9355 if (!inst.operands[2].isreg)
9356 {
9357 /* For an immediate, we always generate a 32-bit opcode;
9358 section relaxation will shrink it later if possible. */
9359 inst.instruction = THUMB_OP32 (inst.instruction);
9360 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9361 inst.instruction |= Rd << 8;
9362 inst.instruction |= Rs << 16;
9363 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9364 }
9365 else
9366 {
9367 bfd_boolean narrow;
9368
9369 /* See if we can do this with a 16-bit instruction. */
9370 if (THUMB_SETS_FLAGS (inst.instruction))
9371 narrow = !in_it_block ();
9372 else
9373 narrow = in_it_block ();
9374
9375 if (Rd > 7 || Rn > 7 || Rs > 7)
9376 narrow = FALSE;
9377 if (inst.operands[2].shifted)
9378 narrow = FALSE;
9379 if (inst.size_req == 4)
9380 narrow = FALSE;
9381
9382 if (narrow
9383 && Rd == Rs)
9384 {
9385 inst.instruction = THUMB_OP16 (inst.instruction);
9386 inst.instruction |= Rd;
9387 inst.instruction |= Rn << 3;
9388 return;
9389 }
9390
9391 /* If we get here, it can't be done in 16 bits. */
9392 constraint (inst.operands[2].shifted
9393 && inst.operands[2].immisreg,
9394 _("shift must be constant"));
9395 inst.instruction = THUMB_OP32 (inst.instruction);
9396 inst.instruction |= Rd << 8;
9397 inst.instruction |= Rs << 16;
9398 encode_thumb32_shifted_operand (2);
9399 }
9400 }
9401 else
9402 {
9403 /* On its face this is a lie - the instruction does set the
9404 flags. However, the only supported mnemonic in this mode
9405 says it doesn't. */
9406 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9407
9408 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
9409 _("unshifted register required"));
9410 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
9411 constraint (Rd != Rs,
9412 _("dest and source1 must be the same register"));
9413
9414 inst.instruction = THUMB_OP16 (inst.instruction);
9415 inst.instruction |= Rd;
9416 inst.instruction |= Rn << 3;
9417 }
9418 }
9419
9420 /* Similarly, but for instructions where the arithmetic operation is
9421 commutative, so we can allow either of them to be different from
9422 the destination operand in a 16-bit instruction. For instance, all
9423 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
9424 accepted. */
9425 static void
9426 do_t_arit3c (void)
9427 {
9428 int Rd, Rs, Rn;
9429
9430 Rd = inst.operands[0].reg;
9431 Rs = (inst.operands[1].present
9432 ? inst.operands[1].reg /* Rd, Rs, foo */
9433 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9434 Rn = inst.operands[2].reg;
9435
9436 reject_bad_reg (Rd);
9437 reject_bad_reg (Rs);
9438 if (inst.operands[2].isreg)
9439 reject_bad_reg (Rn);
9440
9441 if (unified_syntax)
9442 {
9443 if (!inst.operands[2].isreg)
9444 {
9445 /* For an immediate, we always generate a 32-bit opcode;
9446 section relaxation will shrink it later if possible. */
9447 inst.instruction = THUMB_OP32 (inst.instruction);
9448 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9449 inst.instruction |= Rd << 8;
9450 inst.instruction |= Rs << 16;
9451 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9452 }
9453 else
9454 {
9455 bfd_boolean narrow;
9456
9457 /* See if we can do this with a 16-bit instruction. */
9458 if (THUMB_SETS_FLAGS (inst.instruction))
9459 narrow = !in_it_block ();
9460 else
9461 narrow = in_it_block ();
9462
9463 if (Rd > 7 || Rn > 7 || Rs > 7)
9464 narrow = FALSE;
9465 if (inst.operands[2].shifted)
9466 narrow = FALSE;
9467 if (inst.size_req == 4)
9468 narrow = FALSE;
9469
9470 if (narrow)
9471 {
9472 if (Rd == Rs)
9473 {
9474 inst.instruction = THUMB_OP16 (inst.instruction);
9475 inst.instruction |= Rd;
9476 inst.instruction |= Rn << 3;
9477 return;
9478 }
9479 if (Rd == Rn)
9480 {
9481 inst.instruction = THUMB_OP16 (inst.instruction);
9482 inst.instruction |= Rd;
9483 inst.instruction |= Rs << 3;
9484 return;
9485 }
9486 }
9487
9488 /* If we get here, it can't be done in 16 bits. */
9489 constraint (inst.operands[2].shifted
9490 && inst.operands[2].immisreg,
9491 _("shift must be constant"));
9492 inst.instruction = THUMB_OP32 (inst.instruction);
9493 inst.instruction |= Rd << 8;
9494 inst.instruction |= Rs << 16;
9495 encode_thumb32_shifted_operand (2);
9496 }
9497 }
9498 else
9499 {
9500 /* On its face this is a lie - the instruction does set the
9501 flags. However, the only supported mnemonic in this mode
9502 says it doesn't. */
9503 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9504
9505 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
9506 _("unshifted register required"));
9507 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
9508
9509 inst.instruction = THUMB_OP16 (inst.instruction);
9510 inst.instruction |= Rd;
9511
9512 if (Rd == Rs)
9513 inst.instruction |= Rn << 3;
9514 else if (Rd == Rn)
9515 inst.instruction |= Rs << 3;
9516 else
9517 constraint (1, _("dest must overlap one source register"));
9518 }
9519 }
9520
9521 static void
9522 do_t_barrier (void)
9523 {
9524 if (inst.operands[0].present)
9525 {
9526 constraint ((inst.instruction & 0xf0) != 0x40
9527 && inst.operands[0].imm > 0xf
9528 && inst.operands[0].imm < 0x0,
9529 _("bad barrier type"));
9530 inst.instruction |= inst.operands[0].imm;
9531 }
9532 else
9533 inst.instruction |= 0xf;
9534 }
9535
9536 static void
9537 do_t_bfc (void)
9538 {
9539 unsigned Rd;
9540 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
9541 constraint (msb > 32, _("bit-field extends past end of register"));
9542 /* The instruction encoding stores the LSB and MSB,
9543 not the LSB and width. */
9544 Rd = inst.operands[0].reg;
9545 reject_bad_reg (Rd);
9546 inst.instruction |= Rd << 8;
9547 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
9548 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
9549 inst.instruction |= msb - 1;
9550 }
9551
9552 static void
9553 do_t_bfi (void)
9554 {
9555 int Rd, Rn;
9556 unsigned int msb;
9557
9558 Rd = inst.operands[0].reg;
9559 reject_bad_reg (Rd);
9560
9561 /* #0 in second position is alternative syntax for bfc, which is
9562 the same instruction but with REG_PC in the Rm field. */
9563 if (!inst.operands[1].isreg)
9564 Rn = REG_PC;
9565 else
9566 {
9567 Rn = inst.operands[1].reg;
9568 reject_bad_reg (Rn);
9569 }
9570
9571 msb = inst.operands[2].imm + inst.operands[3].imm;
9572 constraint (msb > 32, _("bit-field extends past end of register"));
9573 /* The instruction encoding stores the LSB and MSB,
9574 not the LSB and width. */
9575 inst.instruction |= Rd << 8;
9576 inst.instruction |= Rn << 16;
9577 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
9578 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
9579 inst.instruction |= msb - 1;
9580 }
9581
9582 static void
9583 do_t_bfx (void)
9584 {
9585 unsigned Rd, Rn;
9586
9587 Rd = inst.operands[0].reg;
9588 Rn = inst.operands[1].reg;
9589
9590 reject_bad_reg (Rd);
9591 reject_bad_reg (Rn);
9592
9593 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
9594 _("bit-field extends past end of register"));
9595 inst.instruction |= Rd << 8;
9596 inst.instruction |= Rn << 16;
9597 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
9598 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
9599 inst.instruction |= inst.operands[3].imm - 1;
9600 }
9601
9602 /* ARM V5 Thumb BLX (argument parse)
9603 BLX <target_addr> which is BLX(1)
9604 BLX <Rm> which is BLX(2)
9605 Unfortunately, there are two different opcodes for this mnemonic.
9606 So, the insns[].value is not used, and the code here zaps values
9607 into inst.instruction.
9608
9609 ??? How to take advantage of the additional two bits of displacement
9610 available in Thumb32 mode? Need new relocation? */
9611
9612 static void
9613 do_t_blx (void)
9614 {
9615 set_it_insn_type_last ();
9616
9617 if (inst.operands[0].isreg)
9618 {
9619 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9620 /* We have a register, so this is BLX(2). */
9621 inst.instruction |= inst.operands[0].reg << 3;
9622 }
9623 else
9624 {
9625 /* No register. This must be BLX(1). */
9626 inst.instruction = 0xf000e800;
9627 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX;
9628 inst.reloc.pc_rel = 1;
9629 }
9630 }
9631
9632 static void
9633 do_t_branch (void)
9634 {
9635 int opcode;
9636 int cond;
9637
9638 cond = inst.cond;
9639 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
9640
9641 if (in_it_block ())
9642 {
9643 /* Conditional branches inside IT blocks are encoded as unconditional
9644 branches. */
9645 cond = COND_ALWAYS;
9646 }
9647 else
9648 cond = inst.cond;
9649
9650 if (cond != COND_ALWAYS)
9651 opcode = T_MNEM_bcond;
9652 else
9653 opcode = inst.instruction;
9654
9655 if (unified_syntax && inst.size_req == 4)
9656 {
9657 inst.instruction = THUMB_OP32(opcode);
9658 if (cond == COND_ALWAYS)
9659 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25;
9660 else
9661 {
9662 gas_assert (cond != 0xF);
9663 inst.instruction |= cond << 22;
9664 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20;
9665 }
9666 }
9667 else
9668 {
9669 inst.instruction = THUMB_OP16(opcode);
9670 if (cond == COND_ALWAYS)
9671 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
9672 else
9673 {
9674 inst.instruction |= cond << 8;
9675 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
9676 }
9677 /* Allow section relaxation. */
9678 if (unified_syntax && inst.size_req != 2)
9679 inst.relax = opcode;
9680 }
9681
9682 inst.reloc.pc_rel = 1;
9683 }
9684
9685 static void
9686 do_t_bkpt (void)
9687 {
9688 constraint (inst.cond != COND_ALWAYS,
9689 _("instruction is always unconditional"));
9690 if (inst.operands[0].present)
9691 {
9692 constraint (inst.operands[0].imm > 255,
9693 _("immediate value out of range"));
9694 inst.instruction |= inst.operands[0].imm;
9695 set_it_insn_type (NEUTRAL_IT_INSN);
9696 }
9697 }
9698
9699 static void
9700 do_t_branch23 (void)
9701 {
9702 set_it_insn_type_last ();
9703 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
9704 inst.reloc.pc_rel = 1;
9705
9706 #if defined(OBJ_COFF)
9707 /* If the destination of the branch is a defined symbol which does not have
9708 the THUMB_FUNC attribute, then we must be calling a function which has
9709 the (interfacearm) attribute. We look for the Thumb entry point to that
9710 function and change the branch to refer to that function instead. */
9711 if ( inst.reloc.exp.X_op == O_symbol
9712 && inst.reloc.exp.X_add_symbol != NULL
9713 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
9714 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
9715 inst.reloc.exp.X_add_symbol =
9716 find_real_start (inst.reloc.exp.X_add_symbol);
9717 #endif
9718 }
9719
9720 static void
9721 do_t_bx (void)
9722 {
9723 set_it_insn_type_last ();
9724 inst.instruction |= inst.operands[0].reg << 3;
9725 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
9726 should cause the alignment to be checked once it is known. This is
9727 because BX PC only works if the instruction is word aligned. */
9728 }
9729
9730 static void
9731 do_t_bxj (void)
9732 {
9733 int Rm;
9734
9735 set_it_insn_type_last ();
9736 Rm = inst.operands[0].reg;
9737 reject_bad_reg (Rm);
9738 inst.instruction |= Rm << 16;
9739 }
9740
9741 static void
9742 do_t_clz (void)
9743 {
9744 unsigned Rd;
9745 unsigned Rm;
9746
9747 Rd = inst.operands[0].reg;
9748 Rm = inst.operands[1].reg;
9749
9750 reject_bad_reg (Rd);
9751 reject_bad_reg (Rm);
9752
9753 inst.instruction |= Rd << 8;
9754 inst.instruction |= Rm << 16;
9755 inst.instruction |= Rm;
9756 }
9757
9758 static void
9759 do_t_cps (void)
9760 {
9761 set_it_insn_type (OUTSIDE_IT_INSN);
9762 inst.instruction |= inst.operands[0].imm;
9763 }
9764
9765 static void
9766 do_t_cpsi (void)
9767 {
9768 set_it_insn_type (OUTSIDE_IT_INSN);
9769 if (unified_syntax
9770 && (inst.operands[1].present || inst.size_req == 4)
9771 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
9772 {
9773 unsigned int imod = (inst.instruction & 0x0030) >> 4;
9774 inst.instruction = 0xf3af8000;
9775 inst.instruction |= imod << 9;
9776 inst.instruction |= inst.operands[0].imm << 5;
9777 if (inst.operands[1].present)
9778 inst.instruction |= 0x100 | inst.operands[1].imm;
9779 }
9780 else
9781 {
9782 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
9783 && (inst.operands[0].imm & 4),
9784 _("selected processor does not support 'A' form "
9785 "of this instruction"));
9786 constraint (inst.operands[1].present || inst.size_req == 4,
9787 _("Thumb does not support the 2-argument "
9788 "form of this instruction"));
9789 inst.instruction |= inst.operands[0].imm;
9790 }
9791 }
9792
9793 /* THUMB CPY instruction (argument parse). */
9794
9795 static void
9796 do_t_cpy (void)
9797 {
9798 if (inst.size_req == 4)
9799 {
9800 inst.instruction = THUMB_OP32 (T_MNEM_mov);
9801 inst.instruction |= inst.operands[0].reg << 8;
9802 inst.instruction |= inst.operands[1].reg;
9803 }
9804 else
9805 {
9806 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9807 inst.instruction |= (inst.operands[0].reg & 0x7);
9808 inst.instruction |= inst.operands[1].reg << 3;
9809 }
9810 }
9811
9812 static void
9813 do_t_cbz (void)
9814 {
9815 set_it_insn_type (OUTSIDE_IT_INSN);
9816 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9817 inst.instruction |= inst.operands[0].reg;
9818 inst.reloc.pc_rel = 1;
9819 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
9820 }
9821
9822 static void
9823 do_t_dbg (void)
9824 {
9825 inst.instruction |= inst.operands[0].imm;
9826 }
9827
9828 static void
9829 do_t_div (void)
9830 {
9831 unsigned Rd, Rn, Rm;
9832
9833 Rd = inst.operands[0].reg;
9834 Rn = (inst.operands[1].present
9835 ? inst.operands[1].reg : Rd);
9836 Rm = inst.operands[2].reg;
9837
9838 reject_bad_reg (Rd);
9839 reject_bad_reg (Rn);
9840 reject_bad_reg (Rm);
9841
9842 inst.instruction |= Rd << 8;
9843 inst.instruction |= Rn << 16;
9844 inst.instruction |= Rm;
9845 }
9846
9847 static void
9848 do_t_hint (void)
9849 {
9850 if (unified_syntax && inst.size_req == 4)
9851 inst.instruction = THUMB_OP32 (inst.instruction);
9852 else
9853 inst.instruction = THUMB_OP16 (inst.instruction);
9854 }
9855
9856 static void
9857 do_t_it (void)
9858 {
9859 unsigned int cond = inst.operands[0].imm;
9860
9861 set_it_insn_type (IT_INSN);
9862 now_it.mask = (inst.instruction & 0xf) | 0x10;
9863 now_it.cc = cond;
9864
9865 /* If the condition is a negative condition, invert the mask. */
9866 if ((cond & 0x1) == 0x0)
9867 {
9868 unsigned int mask = inst.instruction & 0x000f;
9869
9870 if ((mask & 0x7) == 0)
9871 /* no conversion needed */;
9872 else if ((mask & 0x3) == 0)
9873 mask ^= 0x8;
9874 else if ((mask & 0x1) == 0)
9875 mask ^= 0xC;
9876 else
9877 mask ^= 0xE;
9878
9879 inst.instruction &= 0xfff0;
9880 inst.instruction |= mask;
9881 }
9882
9883 inst.instruction |= cond << 4;
9884 }
9885
9886 /* Helper function used for both push/pop and ldm/stm. */
9887 static void
9888 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
9889 {
9890 bfd_boolean load;
9891
9892 load = (inst.instruction & (1 << 20)) != 0;
9893
9894 if (mask & (1 << 13))
9895 inst.error = _("SP not allowed in register list");
9896
9897 if ((mask & (1 << base)) != 0
9898 && writeback)
9899 inst.error = _("having the base register in the register list when "
9900 "using write back is UNPREDICTABLE");
9901
9902 if (load)
9903 {
9904 if (mask & (1 << 15))
9905 {
9906 if (mask & (1 << 14))
9907 inst.error = _("LR and PC should not both be in register list");
9908 else
9909 set_it_insn_type_last ();
9910 }
9911 }
9912 else
9913 {
9914 if (mask & (1 << 15))
9915 inst.error = _("PC not allowed in register list");
9916 }
9917
9918 if ((mask & (mask - 1)) == 0)
9919 {
9920 /* Single register transfers implemented as str/ldr. */
9921 if (writeback)
9922 {
9923 if (inst.instruction & (1 << 23))
9924 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
9925 else
9926 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
9927 }
9928 else
9929 {
9930 if (inst.instruction & (1 << 23))
9931 inst.instruction = 0x00800000; /* ia -> [base] */
9932 else
9933 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
9934 }
9935
9936 inst.instruction |= 0xf8400000;
9937 if (load)
9938 inst.instruction |= 0x00100000;
9939
9940 mask = ffs (mask) - 1;
9941 mask <<= 12;
9942 }
9943 else if (writeback)
9944 inst.instruction |= WRITE_BACK;
9945
9946 inst.instruction |= mask;
9947 inst.instruction |= base << 16;
9948 }
9949
9950 static void
9951 do_t_ldmstm (void)
9952 {
9953 /* This really doesn't seem worth it. */
9954 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9955 _("expression too complex"));
9956 constraint (inst.operands[1].writeback,
9957 _("Thumb load/store multiple does not support {reglist}^"));
9958
9959 if (unified_syntax)
9960 {
9961 bfd_boolean narrow;
9962 unsigned mask;
9963
9964 narrow = FALSE;
9965 /* See if we can use a 16-bit instruction. */
9966 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
9967 && inst.size_req != 4
9968 && !(inst.operands[1].imm & ~0xff))
9969 {
9970 mask = 1 << inst.operands[0].reg;
9971
9972 if (inst.operands[0].reg <= 7)
9973 {
9974 if (inst.instruction == T_MNEM_stmia
9975 ? inst.operands[0].writeback
9976 : (inst.operands[0].writeback
9977 == !(inst.operands[1].imm & mask)))
9978 {
9979 if (inst.instruction == T_MNEM_stmia
9980 && (inst.operands[1].imm & mask)
9981 && (inst.operands[1].imm & (mask - 1)))
9982 as_warn (_("value stored for r%d is UNKNOWN"),
9983 inst.operands[0].reg);
9984
9985 inst.instruction = THUMB_OP16 (inst.instruction);
9986 inst.instruction |= inst.operands[0].reg << 8;
9987 inst.instruction |= inst.operands[1].imm;
9988 narrow = TRUE;
9989 }
9990 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
9991 {
9992 /* This means 1 register in reg list one of 3 situations:
9993 1. Instruction is stmia, but without writeback.
9994 2. lmdia without writeback, but with Rn not in
9995 reglist.
9996 3. ldmia with writeback, but with Rn in reglist.
9997 Case 3 is UNPREDICTABLE behaviour, so we handle
9998 case 1 and 2 which can be converted into a 16-bit
9999 str or ldr. The SP cases are handled below. */
10000 unsigned long opcode;
10001 /* First, record an error for Case 3. */
10002 if (inst.operands[1].imm & mask
10003 && inst.operands[0].writeback)
10004 inst.error =
10005 _("having the base register in the register list when "
10006 "using write back is UNPREDICTABLE");
10007
10008 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
10009 : T_MNEM_ldr);
10010 inst.instruction = THUMB_OP16 (opcode);
10011 inst.instruction |= inst.operands[0].reg << 3;
10012 inst.instruction |= (ffs (inst.operands[1].imm)-1);
10013 narrow = TRUE;
10014 }
10015 }
10016 else if (inst.operands[0] .reg == REG_SP)
10017 {
10018 if (inst.operands[0].writeback)
10019 {
10020 inst.instruction =
10021 THUMB_OP16 (inst.instruction == T_MNEM_stmia
10022 ? T_MNEM_push : T_MNEM_pop);
10023 inst.instruction |= inst.operands[1].imm;
10024 narrow = TRUE;
10025 }
10026 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
10027 {
10028 inst.instruction =
10029 THUMB_OP16 (inst.instruction == T_MNEM_stmia
10030 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
10031 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
10032 narrow = TRUE;
10033 }
10034 }
10035 }
10036
10037 if (!narrow)
10038 {
10039 if (inst.instruction < 0xffff)
10040 inst.instruction = THUMB_OP32 (inst.instruction);
10041
10042 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
10043 inst.operands[0].writeback);
10044 }
10045 }
10046 else
10047 {
10048 constraint (inst.operands[0].reg > 7
10049 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
10050 constraint (inst.instruction != T_MNEM_ldmia
10051 && inst.instruction != T_MNEM_stmia,
10052 _("Thumb-2 instruction only valid in unified syntax"));
10053 if (inst.instruction == T_MNEM_stmia)
10054 {
10055 if (!inst.operands[0].writeback)
10056 as_warn (_("this instruction will write back the base register"));
10057 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
10058 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
10059 as_warn (_("value stored for r%d is UNKNOWN"),
10060 inst.operands[0].reg);
10061 }
10062 else
10063 {
10064 if (!inst.operands[0].writeback
10065 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
10066 as_warn (_("this instruction will write back the base register"));
10067 else if (inst.operands[0].writeback
10068 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
10069 as_warn (_("this instruction will not write back the base register"));
10070 }
10071
10072 inst.instruction = THUMB_OP16 (inst.instruction);
10073 inst.instruction |= inst.operands[0].reg << 8;
10074 inst.instruction |= inst.operands[1].imm;
10075 }
10076 }
10077
10078 static void
10079 do_t_ldrex (void)
10080 {
10081 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
10082 || inst.operands[1].postind || inst.operands[1].writeback
10083 || inst.operands[1].immisreg || inst.operands[1].shifted
10084 || inst.operands[1].negative,
10085 BAD_ADDR_MODE);
10086
10087 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
10088
10089 inst.instruction |= inst.operands[0].reg << 12;
10090 inst.instruction |= inst.operands[1].reg << 16;
10091 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
10092 }
10093
10094 static void
10095 do_t_ldrexd (void)
10096 {
10097 if (!inst.operands[1].present)
10098 {
10099 constraint (inst.operands[0].reg == REG_LR,
10100 _("r14 not allowed as first register "
10101 "when second register is omitted"));
10102 inst.operands[1].reg = inst.operands[0].reg + 1;
10103 }
10104 constraint (inst.operands[0].reg == inst.operands[1].reg,
10105 BAD_OVERLAP);
10106
10107 inst.instruction |= inst.operands[0].reg << 12;
10108 inst.instruction |= inst.operands[1].reg << 8;
10109 inst.instruction |= inst.operands[2].reg << 16;
10110 }
10111
10112 static void
10113 do_t_ldst (void)
10114 {
10115 unsigned long opcode;
10116 int Rn;
10117
10118 if (inst.operands[0].isreg
10119 && !inst.operands[0].preind
10120 && inst.operands[0].reg == REG_PC)
10121 set_it_insn_type_last ();
10122
10123 opcode = inst.instruction;
10124 if (unified_syntax)
10125 {
10126 if (!inst.operands[1].isreg)
10127 {
10128 if (opcode <= 0xffff)
10129 inst.instruction = THUMB_OP32 (opcode);
10130 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
10131 return;
10132 }
10133 if (inst.operands[1].isreg
10134 && !inst.operands[1].writeback
10135 && !inst.operands[1].shifted && !inst.operands[1].postind
10136 && !inst.operands[1].negative && inst.operands[0].reg <= 7
10137 && opcode <= 0xffff
10138 && inst.size_req != 4)
10139 {
10140 /* Insn may have a 16-bit form. */
10141 Rn = inst.operands[1].reg;
10142 if (inst.operands[1].immisreg)
10143 {
10144 inst.instruction = THUMB_OP16 (opcode);
10145 /* [Rn, Rik] */
10146 if (Rn <= 7 && inst.operands[1].imm <= 7)
10147 goto op16;
10148 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
10149 reject_bad_reg (inst.operands[1].imm);
10150 }
10151 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
10152 && opcode != T_MNEM_ldrsb)
10153 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
10154 || (Rn == REG_SP && opcode == T_MNEM_str))
10155 {
10156 /* [Rn, #const] */
10157 if (Rn > 7)
10158 {
10159 if (Rn == REG_PC)
10160 {
10161 if (inst.reloc.pc_rel)
10162 opcode = T_MNEM_ldr_pc2;
10163 else
10164 opcode = T_MNEM_ldr_pc;
10165 }
10166 else
10167 {
10168 if (opcode == T_MNEM_ldr)
10169 opcode = T_MNEM_ldr_sp;
10170 else
10171 opcode = T_MNEM_str_sp;
10172 }
10173 inst.instruction = inst.operands[0].reg << 8;
10174 }
10175 else
10176 {
10177 inst.instruction = inst.operands[0].reg;
10178 inst.instruction |= inst.operands[1].reg << 3;
10179 }
10180 inst.instruction |= THUMB_OP16 (opcode);
10181 if (inst.size_req == 2)
10182 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10183 else
10184 inst.relax = opcode;
10185 return;
10186 }
10187 }
10188 /* Definitely a 32-bit variant. */
10189
10190 /* Do some validations regarding addressing modes. */
10191 if (inst.operands[1].immisreg && opcode != T_MNEM_ldr
10192 && opcode != T_MNEM_str)
10193 reject_bad_reg (inst.operands[1].imm);
10194
10195 inst.instruction = THUMB_OP32 (opcode);
10196 inst.instruction |= inst.operands[0].reg << 12;
10197 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
10198 return;
10199 }
10200
10201 constraint (inst.operands[0].reg > 7, BAD_HIREG);
10202
10203 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
10204 {
10205 /* Only [Rn,Rm] is acceptable. */
10206 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
10207 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
10208 || inst.operands[1].postind || inst.operands[1].shifted
10209 || inst.operands[1].negative,
10210 _("Thumb does not support this addressing mode"));
10211 inst.instruction = THUMB_OP16 (inst.instruction);
10212 goto op16;
10213 }
10214
10215 inst.instruction = THUMB_OP16 (inst.instruction);
10216 if (!inst.operands[1].isreg)
10217 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
10218 return;
10219
10220 constraint (!inst.operands[1].preind
10221 || inst.operands[1].shifted
10222 || inst.operands[1].writeback,
10223 _("Thumb does not support this addressing mode"));
10224 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
10225 {
10226 constraint (inst.instruction & 0x0600,
10227 _("byte or halfword not valid for base register"));
10228 constraint (inst.operands[1].reg == REG_PC
10229 && !(inst.instruction & THUMB_LOAD_BIT),
10230 _("r15 based store not allowed"));
10231 constraint (inst.operands[1].immisreg,
10232 _("invalid base register for register offset"));
10233
10234 if (inst.operands[1].reg == REG_PC)
10235 inst.instruction = T_OPCODE_LDR_PC;
10236 else if (inst.instruction & THUMB_LOAD_BIT)
10237 inst.instruction = T_OPCODE_LDR_SP;
10238 else
10239 inst.instruction = T_OPCODE_STR_SP;
10240
10241 inst.instruction |= inst.operands[0].reg << 8;
10242 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10243 return;
10244 }
10245
10246 constraint (inst.operands[1].reg > 7, BAD_HIREG);
10247 if (!inst.operands[1].immisreg)
10248 {
10249 /* Immediate offset. */
10250 inst.instruction |= inst.operands[0].reg;
10251 inst.instruction |= inst.operands[1].reg << 3;
10252 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10253 return;
10254 }
10255
10256 /* Register offset. */
10257 constraint (inst.operands[1].imm > 7, BAD_HIREG);
10258 constraint (inst.operands[1].negative,
10259 _("Thumb does not support this addressing mode"));
10260
10261 op16:
10262 switch (inst.instruction)
10263 {
10264 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
10265 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
10266 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
10267 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
10268 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
10269 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
10270 case 0x5600 /* ldrsb */:
10271 case 0x5e00 /* ldrsh */: break;
10272 default: abort ();
10273 }
10274
10275 inst.instruction |= inst.operands[0].reg;
10276 inst.instruction |= inst.operands[1].reg << 3;
10277 inst.instruction |= inst.operands[1].imm << 6;
10278 }
10279
10280 static void
10281 do_t_ldstd (void)
10282 {
10283 if (!inst.operands[1].present)
10284 {
10285 inst.operands[1].reg = inst.operands[0].reg + 1;
10286 constraint (inst.operands[0].reg == REG_LR,
10287 _("r14 not allowed here"));
10288 }
10289 inst.instruction |= inst.operands[0].reg << 12;
10290 inst.instruction |= inst.operands[1].reg << 8;
10291 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
10292 }
10293
10294 static void
10295 do_t_ldstt (void)
10296 {
10297 inst.instruction |= inst.operands[0].reg << 12;
10298 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
10299 }
10300
10301 static void
10302 do_t_mla (void)
10303 {
10304 unsigned Rd, Rn, Rm, Ra;
10305
10306 Rd = inst.operands[0].reg;
10307 Rn = inst.operands[1].reg;
10308 Rm = inst.operands[2].reg;
10309 Ra = inst.operands[3].reg;
10310
10311 reject_bad_reg (Rd);
10312 reject_bad_reg (Rn);
10313 reject_bad_reg (Rm);
10314 reject_bad_reg (Ra);
10315
10316 inst.instruction |= Rd << 8;
10317 inst.instruction |= Rn << 16;
10318 inst.instruction |= Rm;
10319 inst.instruction |= Ra << 12;
10320 }
10321
10322 static void
10323 do_t_mlal (void)
10324 {
10325 unsigned RdLo, RdHi, Rn, Rm;
10326
10327 RdLo = inst.operands[0].reg;
10328 RdHi = inst.operands[1].reg;
10329 Rn = inst.operands[2].reg;
10330 Rm = inst.operands[3].reg;
10331
10332 reject_bad_reg (RdLo);
10333 reject_bad_reg (RdHi);
10334 reject_bad_reg (Rn);
10335 reject_bad_reg (Rm);
10336
10337 inst.instruction |= RdLo << 12;
10338 inst.instruction |= RdHi << 8;
10339 inst.instruction |= Rn << 16;
10340 inst.instruction |= Rm;
10341 }
10342
10343 static void
10344 do_t_mov_cmp (void)
10345 {
10346 unsigned Rn, Rm;
10347
10348 Rn = inst.operands[0].reg;
10349 Rm = inst.operands[1].reg;
10350
10351 if (Rn == REG_PC)
10352 set_it_insn_type_last ();
10353
10354 if (unified_syntax)
10355 {
10356 int r0off = (inst.instruction == T_MNEM_mov
10357 || inst.instruction == T_MNEM_movs) ? 8 : 16;
10358 unsigned long opcode;
10359 bfd_boolean narrow;
10360 bfd_boolean low_regs;
10361
10362 low_regs = (Rn <= 7 && Rm <= 7);
10363 opcode = inst.instruction;
10364 if (in_it_block ())
10365 narrow = opcode != T_MNEM_movs;
10366 else
10367 narrow = opcode != T_MNEM_movs || low_regs;
10368 if (inst.size_req == 4
10369 || inst.operands[1].shifted)
10370 narrow = FALSE;
10371
10372 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
10373 if (opcode == T_MNEM_movs && inst.operands[1].isreg
10374 && !inst.operands[1].shifted
10375 && Rn == REG_PC
10376 && Rm == REG_LR)
10377 {
10378 inst.instruction = T2_SUBS_PC_LR;
10379 return;
10380 }
10381
10382 if (opcode == T_MNEM_cmp)
10383 {
10384 constraint (Rn == REG_PC, BAD_PC);
10385 if (narrow)
10386 {
10387 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
10388 but valid. */
10389 warn_deprecated_sp (Rm);
10390 /* R15 was documented as a valid choice for Rm in ARMv6,
10391 but as UNPREDICTABLE in ARMv7. ARM's proprietary
10392 tools reject R15, so we do too. */
10393 constraint (Rm == REG_PC, BAD_PC);
10394 }
10395 else
10396 reject_bad_reg (Rm);
10397 }
10398 else if (opcode == T_MNEM_mov
10399 || opcode == T_MNEM_movs)
10400 {
10401 if (inst.operands[1].isreg)
10402 {
10403 if (opcode == T_MNEM_movs)
10404 {
10405 reject_bad_reg (Rn);
10406 reject_bad_reg (Rm);
10407 }
10408 else if (narrow)
10409 {
10410 /* This is mov.n. */
10411 if ((Rn == REG_SP || Rn == REG_PC)
10412 && (Rm == REG_SP || Rm == REG_PC))
10413 {
10414 as_warn (_("Use of r%u as a source register is "
10415 "deprecated when r%u is the destination "
10416 "register."), Rm, Rn);
10417 }
10418 }
10419 else
10420 {
10421 /* This is mov.w. */
10422 constraint (Rn == REG_PC, BAD_PC);
10423 constraint (Rm == REG_PC, BAD_PC);
10424 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
10425 }
10426 }
10427 else
10428 reject_bad_reg (Rn);
10429 }
10430
10431 if (!inst.operands[1].isreg)
10432 {
10433 /* Immediate operand. */
10434 if (!in_it_block () && opcode == T_MNEM_mov)
10435 narrow = 0;
10436 if (low_regs && narrow)
10437 {
10438 inst.instruction = THUMB_OP16 (opcode);
10439 inst.instruction |= Rn << 8;
10440 if (inst.size_req == 2)
10441 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
10442 else
10443 inst.relax = opcode;
10444 }
10445 else
10446 {
10447 inst.instruction = THUMB_OP32 (inst.instruction);
10448 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10449 inst.instruction |= Rn << r0off;
10450 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10451 }
10452 }
10453 else if (inst.operands[1].shifted && inst.operands[1].immisreg
10454 && (inst.instruction == T_MNEM_mov
10455 || inst.instruction == T_MNEM_movs))
10456 {
10457 /* Register shifts are encoded as separate shift instructions. */
10458 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
10459
10460 if (in_it_block ())
10461 narrow = !flags;
10462 else
10463 narrow = flags;
10464
10465 if (inst.size_req == 4)
10466 narrow = FALSE;
10467
10468 if (!low_regs || inst.operands[1].imm > 7)
10469 narrow = FALSE;
10470
10471 if (Rn != Rm)
10472 narrow = FALSE;
10473
10474 switch (inst.operands[1].shift_kind)
10475 {
10476 case SHIFT_LSL:
10477 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
10478 break;
10479 case SHIFT_ASR:
10480 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
10481 break;
10482 case SHIFT_LSR:
10483 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
10484 break;
10485 case SHIFT_ROR:
10486 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
10487 break;
10488 default:
10489 abort ();
10490 }
10491
10492 inst.instruction = opcode;
10493 if (narrow)
10494 {
10495 inst.instruction |= Rn;
10496 inst.instruction |= inst.operands[1].imm << 3;
10497 }
10498 else
10499 {
10500 if (flags)
10501 inst.instruction |= CONDS_BIT;
10502
10503 inst.instruction |= Rn << 8;
10504 inst.instruction |= Rm << 16;
10505 inst.instruction |= inst.operands[1].imm;
10506 }
10507 }
10508 else if (!narrow)
10509 {
10510 /* Some mov with immediate shift have narrow variants.
10511 Register shifts are handled above. */
10512 if (low_regs && inst.operands[1].shifted
10513 && (inst.instruction == T_MNEM_mov
10514 || inst.instruction == T_MNEM_movs))
10515 {
10516 if (in_it_block ())
10517 narrow = (inst.instruction == T_MNEM_mov);
10518 else
10519 narrow = (inst.instruction == T_MNEM_movs);
10520 }
10521
10522 if (narrow)
10523 {
10524 switch (inst.operands[1].shift_kind)
10525 {
10526 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
10527 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
10528 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
10529 default: narrow = FALSE; break;
10530 }
10531 }
10532
10533 if (narrow)
10534 {
10535 inst.instruction |= Rn;
10536 inst.instruction |= Rm << 3;
10537 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10538 }
10539 else
10540 {
10541 inst.instruction = THUMB_OP32 (inst.instruction);
10542 inst.instruction |= Rn << r0off;
10543 encode_thumb32_shifted_operand (1);
10544 }
10545 }
10546 else
10547 switch (inst.instruction)
10548 {
10549 case T_MNEM_mov:
10550 inst.instruction = T_OPCODE_MOV_HR;
10551 inst.instruction |= (Rn & 0x8) << 4;
10552 inst.instruction |= (Rn & 0x7);
10553 inst.instruction |= Rm << 3;
10554 break;
10555
10556 case T_MNEM_movs:
10557 /* We know we have low registers at this point.
10558 Generate LSLS Rd, Rs, #0. */
10559 inst.instruction = T_OPCODE_LSL_I;
10560 inst.instruction |= Rn;
10561 inst.instruction |= Rm << 3;
10562 break;
10563
10564 case T_MNEM_cmp:
10565 if (low_regs)
10566 {
10567 inst.instruction = T_OPCODE_CMP_LR;
10568 inst.instruction |= Rn;
10569 inst.instruction |= Rm << 3;
10570 }
10571 else
10572 {
10573 inst.instruction = T_OPCODE_CMP_HR;
10574 inst.instruction |= (Rn & 0x8) << 4;
10575 inst.instruction |= (Rn & 0x7);
10576 inst.instruction |= Rm << 3;
10577 }
10578 break;
10579 }
10580 return;
10581 }
10582
10583 inst.instruction = THUMB_OP16 (inst.instruction);
10584
10585 /* PR 10443: Do not silently ignore shifted operands. */
10586 constraint (inst.operands[1].shifted,
10587 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
10588
10589 if (inst.operands[1].isreg)
10590 {
10591 if (Rn < 8 && Rm < 8)
10592 {
10593 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
10594 since a MOV instruction produces unpredictable results. */
10595 if (inst.instruction == T_OPCODE_MOV_I8)
10596 inst.instruction = T_OPCODE_ADD_I3;
10597 else
10598 inst.instruction = T_OPCODE_CMP_LR;
10599
10600 inst.instruction |= Rn;
10601 inst.instruction |= Rm << 3;
10602 }
10603 else
10604 {
10605 if (inst.instruction == T_OPCODE_MOV_I8)
10606 inst.instruction = T_OPCODE_MOV_HR;
10607 else
10608 inst.instruction = T_OPCODE_CMP_HR;
10609 do_t_cpy ();
10610 }
10611 }
10612 else
10613 {
10614 constraint (Rn > 7,
10615 _("only lo regs allowed with immediate"));
10616 inst.instruction |= Rn << 8;
10617 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
10618 }
10619 }
10620
10621 static void
10622 do_t_mov16 (void)
10623 {
10624 unsigned Rd;
10625 bfd_vma imm;
10626 bfd_boolean top;
10627
10628 top = (inst.instruction & 0x00800000) != 0;
10629 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
10630 {
10631 constraint (top, _(":lower16: not allowed this instruction"));
10632 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
10633 }
10634 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
10635 {
10636 constraint (!top, _(":upper16: not allowed this instruction"));
10637 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
10638 }
10639
10640 Rd = inst.operands[0].reg;
10641 reject_bad_reg (Rd);
10642
10643 inst.instruction |= Rd << 8;
10644 if (inst.reloc.type == BFD_RELOC_UNUSED)
10645 {
10646 imm = inst.reloc.exp.X_add_number;
10647 inst.instruction |= (imm & 0xf000) << 4;
10648 inst.instruction |= (imm & 0x0800) << 15;
10649 inst.instruction |= (imm & 0x0700) << 4;
10650 inst.instruction |= (imm & 0x00ff);
10651 }
10652 }
10653
10654 static void
10655 do_t_mvn_tst (void)
10656 {
10657 unsigned Rn, Rm;
10658
10659 Rn = inst.operands[0].reg;
10660 Rm = inst.operands[1].reg;
10661
10662 if (inst.instruction == T_MNEM_cmp
10663 || inst.instruction == T_MNEM_cmn)
10664 constraint (Rn == REG_PC, BAD_PC);
10665 else
10666 reject_bad_reg (Rn);
10667 reject_bad_reg (Rm);
10668
10669 if (unified_syntax)
10670 {
10671 int r0off = (inst.instruction == T_MNEM_mvn
10672 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
10673 bfd_boolean narrow;
10674
10675 if (inst.size_req == 4
10676 || inst.instruction > 0xffff
10677 || inst.operands[1].shifted
10678 || Rn > 7 || Rm > 7)
10679 narrow = FALSE;
10680 else if (inst.instruction == T_MNEM_cmn)
10681 narrow = TRUE;
10682 else if (THUMB_SETS_FLAGS (inst.instruction))
10683 narrow = !in_it_block ();
10684 else
10685 narrow = in_it_block ();
10686
10687 if (!inst.operands[1].isreg)
10688 {
10689 /* For an immediate, we always generate a 32-bit opcode;
10690 section relaxation will shrink it later if possible. */
10691 if (inst.instruction < 0xffff)
10692 inst.instruction = THUMB_OP32 (inst.instruction);
10693 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10694 inst.instruction |= Rn << r0off;
10695 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10696 }
10697 else
10698 {
10699 /* See if we can do this with a 16-bit instruction. */
10700 if (narrow)
10701 {
10702 inst.instruction = THUMB_OP16 (inst.instruction);
10703 inst.instruction |= Rn;
10704 inst.instruction |= Rm << 3;
10705 }
10706 else
10707 {
10708 constraint (inst.operands[1].shifted
10709 && inst.operands[1].immisreg,
10710 _("shift must be constant"));
10711 if (inst.instruction < 0xffff)
10712 inst.instruction = THUMB_OP32 (inst.instruction);
10713 inst.instruction |= Rn << r0off;
10714 encode_thumb32_shifted_operand (1);
10715 }
10716 }
10717 }
10718 else
10719 {
10720 constraint (inst.instruction > 0xffff
10721 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
10722 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
10723 _("unshifted register required"));
10724 constraint (Rn > 7 || Rm > 7,
10725 BAD_HIREG);
10726
10727 inst.instruction = THUMB_OP16 (inst.instruction);
10728 inst.instruction |= Rn;
10729 inst.instruction |= Rm << 3;
10730 }
10731 }
10732
10733 static void
10734 do_t_mrs (void)
10735 {
10736 unsigned Rd;
10737 int flags;
10738
10739 if (do_vfp_nsyn_mrs () == SUCCESS)
10740 return;
10741
10742 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
10743 if (flags == 0)
10744 {
10745 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m),
10746 _("selected processor does not support "
10747 "requested special purpose register"));
10748 }
10749 else
10750 {
10751 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
10752 _("selected processor does not support "
10753 "requested special purpose register"));
10754 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
10755 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
10756 _("'CPSR' or 'SPSR' expected"));
10757 }
10758
10759 Rd = inst.operands[0].reg;
10760 reject_bad_reg (Rd);
10761
10762 inst.instruction |= Rd << 8;
10763 inst.instruction |= (flags & SPSR_BIT) >> 2;
10764 inst.instruction |= inst.operands[1].imm & 0xff;
10765 }
10766
10767 static void
10768 do_t_msr (void)
10769 {
10770 int flags;
10771 unsigned Rn;
10772
10773 if (do_vfp_nsyn_msr () == SUCCESS)
10774 return;
10775
10776 constraint (!inst.operands[1].isreg,
10777 _("Thumb encoding does not support an immediate here"));
10778 flags = inst.operands[0].imm;
10779 if (flags & ~0xff)
10780 {
10781 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
10782 _("selected processor does not support "
10783 "requested special purpose register"));
10784 }
10785 else
10786 {
10787 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m),
10788 _("selected processor does not support "
10789 "requested special purpose register"));
10790 flags |= PSR_f;
10791 }
10792
10793 Rn = inst.operands[1].reg;
10794 reject_bad_reg (Rn);
10795
10796 inst.instruction |= (flags & SPSR_BIT) >> 2;
10797 inst.instruction |= (flags & ~SPSR_BIT) >> 8;
10798 inst.instruction |= (flags & 0xff);
10799 inst.instruction |= Rn << 16;
10800 }
10801
10802 static void
10803 do_t_mul (void)
10804 {
10805 bfd_boolean narrow;
10806 unsigned Rd, Rn, Rm;
10807
10808 if (!inst.operands[2].present)
10809 inst.operands[2].reg = inst.operands[0].reg;
10810
10811 Rd = inst.operands[0].reg;
10812 Rn = inst.operands[1].reg;
10813 Rm = inst.operands[2].reg;
10814
10815 if (unified_syntax)
10816 {
10817 if (inst.size_req == 4
10818 || (Rd != Rn
10819 && Rd != Rm)
10820 || Rn > 7
10821 || Rm > 7)
10822 narrow = FALSE;
10823 else if (inst.instruction == T_MNEM_muls)
10824 narrow = !in_it_block ();
10825 else
10826 narrow = in_it_block ();
10827 }
10828 else
10829 {
10830 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
10831 constraint (Rn > 7 || Rm > 7,
10832 BAD_HIREG);
10833 narrow = TRUE;
10834 }
10835
10836 if (narrow)
10837 {
10838 /* 16-bit MULS/Conditional MUL. */
10839 inst.instruction = THUMB_OP16 (inst.instruction);
10840 inst.instruction |= Rd;
10841
10842 if (Rd == Rn)
10843 inst.instruction |= Rm << 3;
10844 else if (Rd == Rm)
10845 inst.instruction |= Rn << 3;
10846 else
10847 constraint (1, _("dest must overlap one source register"));
10848 }
10849 else
10850 {
10851 constraint (inst.instruction != T_MNEM_mul,
10852 _("Thumb-2 MUL must not set flags"));
10853 /* 32-bit MUL. */
10854 inst.instruction = THUMB_OP32 (inst.instruction);
10855 inst.instruction |= Rd << 8;
10856 inst.instruction |= Rn << 16;
10857 inst.instruction |= Rm << 0;
10858
10859 reject_bad_reg (Rd);
10860 reject_bad_reg (Rn);
10861 reject_bad_reg (Rm);
10862 }
10863 }
10864
10865 static void
10866 do_t_mull (void)
10867 {
10868 unsigned RdLo, RdHi, Rn, Rm;
10869
10870 RdLo = inst.operands[0].reg;
10871 RdHi = inst.operands[1].reg;
10872 Rn = inst.operands[2].reg;
10873 Rm = inst.operands[3].reg;
10874
10875 reject_bad_reg (RdLo);
10876 reject_bad_reg (RdHi);
10877 reject_bad_reg (Rn);
10878 reject_bad_reg (Rm);
10879
10880 inst.instruction |= RdLo << 12;
10881 inst.instruction |= RdHi << 8;
10882 inst.instruction |= Rn << 16;
10883 inst.instruction |= Rm;
10884
10885 if (RdLo == RdHi)
10886 as_tsktsk (_("rdhi and rdlo must be different"));
10887 }
10888
10889 static void
10890 do_t_nop (void)
10891 {
10892 set_it_insn_type (NEUTRAL_IT_INSN);
10893
10894 if (unified_syntax)
10895 {
10896 if (inst.size_req == 4 || inst.operands[0].imm > 15)
10897 {
10898 inst.instruction = THUMB_OP32 (inst.instruction);
10899 inst.instruction |= inst.operands[0].imm;
10900 }
10901 else
10902 {
10903 /* PR9722: Check for Thumb2 availability before
10904 generating a thumb2 nop instruction. */
10905 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
10906 {
10907 inst.instruction = THUMB_OP16 (inst.instruction);
10908 inst.instruction |= inst.operands[0].imm << 4;
10909 }
10910 else
10911 inst.instruction = 0x46c0;
10912 }
10913 }
10914 else
10915 {
10916 constraint (inst.operands[0].present,
10917 _("Thumb does not support NOP with hints"));
10918 inst.instruction = 0x46c0;
10919 }
10920 }
10921
10922 static void
10923 do_t_neg (void)
10924 {
10925 if (unified_syntax)
10926 {
10927 bfd_boolean narrow;
10928
10929 if (THUMB_SETS_FLAGS (inst.instruction))
10930 narrow = !in_it_block ();
10931 else
10932 narrow = in_it_block ();
10933 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
10934 narrow = FALSE;
10935 if (inst.size_req == 4)
10936 narrow = FALSE;
10937
10938 if (!narrow)
10939 {
10940 inst.instruction = THUMB_OP32 (inst.instruction);
10941 inst.instruction |= inst.operands[0].reg << 8;
10942 inst.instruction |= inst.operands[1].reg << 16;
10943 }
10944 else
10945 {
10946 inst.instruction = THUMB_OP16 (inst.instruction);
10947 inst.instruction |= inst.operands[0].reg;
10948 inst.instruction |= inst.operands[1].reg << 3;
10949 }
10950 }
10951 else
10952 {
10953 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
10954 BAD_HIREG);
10955 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10956
10957 inst.instruction = THUMB_OP16 (inst.instruction);
10958 inst.instruction |= inst.operands[0].reg;
10959 inst.instruction |= inst.operands[1].reg << 3;
10960 }
10961 }
10962
10963 static void
10964 do_t_orn (void)
10965 {
10966 unsigned Rd, Rn;
10967
10968 Rd = inst.operands[0].reg;
10969 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
10970
10971 reject_bad_reg (Rd);
10972 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
10973 reject_bad_reg (Rn);
10974
10975 inst.instruction |= Rd << 8;
10976 inst.instruction |= Rn << 16;
10977
10978 if (!inst.operands[2].isreg)
10979 {
10980 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10981 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10982 }
10983 else
10984 {
10985 unsigned Rm;
10986
10987 Rm = inst.operands[2].reg;
10988 reject_bad_reg (Rm);
10989
10990 constraint (inst.operands[2].shifted
10991 && inst.operands[2].immisreg,
10992 _("shift must be constant"));
10993 encode_thumb32_shifted_operand (2);
10994 }
10995 }
10996
10997 static void
10998 do_t_pkhbt (void)
10999 {
11000 unsigned Rd, Rn, Rm;
11001
11002 Rd = inst.operands[0].reg;
11003 Rn = inst.operands[1].reg;
11004 Rm = inst.operands[2].reg;
11005
11006 reject_bad_reg (Rd);
11007 reject_bad_reg (Rn);
11008 reject_bad_reg (Rm);
11009
11010 inst.instruction |= Rd << 8;
11011 inst.instruction |= Rn << 16;
11012 inst.instruction |= Rm;
11013 if (inst.operands[3].present)
11014 {
11015 unsigned int val = inst.reloc.exp.X_add_number;
11016 constraint (inst.reloc.exp.X_op != O_constant,
11017 _("expression too complex"));
11018 inst.instruction |= (val & 0x1c) << 10;
11019 inst.instruction |= (val & 0x03) << 6;
11020 }
11021 }
11022
11023 static void
11024 do_t_pkhtb (void)
11025 {
11026 if (!inst.operands[3].present)
11027 {
11028 unsigned Rtmp;
11029
11030 inst.instruction &= ~0x00000020;
11031
11032 /* PR 10168. Swap the Rm and Rn registers. */
11033 Rtmp = inst.operands[1].reg;
11034 inst.operands[1].reg = inst.operands[2].reg;
11035 inst.operands[2].reg = Rtmp;
11036 }
11037 do_t_pkhbt ();
11038 }
11039
11040 static void
11041 do_t_pld (void)
11042 {
11043 if (inst.operands[0].immisreg)
11044 reject_bad_reg (inst.operands[0].imm);
11045
11046 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
11047 }
11048
11049 static void
11050 do_t_push_pop (void)
11051 {
11052 unsigned mask;
11053
11054 constraint (inst.operands[0].writeback,
11055 _("push/pop do not support {reglist}^"));
11056 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11057 _("expression too complex"));
11058
11059 mask = inst.operands[0].imm;
11060 if ((mask & ~0xff) == 0)
11061 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
11062 else if ((inst.instruction == T_MNEM_push
11063 && (mask & ~0xff) == 1 << REG_LR)
11064 || (inst.instruction == T_MNEM_pop
11065 && (mask & ~0xff) == 1 << REG_PC))
11066 {
11067 inst.instruction = THUMB_OP16 (inst.instruction);
11068 inst.instruction |= THUMB_PP_PC_LR;
11069 inst.instruction |= mask & 0xff;
11070 }
11071 else if (unified_syntax)
11072 {
11073 inst.instruction = THUMB_OP32 (inst.instruction);
11074 encode_thumb2_ldmstm (13, mask, TRUE);
11075 }
11076 else
11077 {
11078 inst.error = _("invalid register list to push/pop instruction");
11079 return;
11080 }
11081 }
11082
11083 static void
11084 do_t_rbit (void)
11085 {
11086 unsigned Rd, Rm;
11087
11088 Rd = inst.operands[0].reg;
11089 Rm = inst.operands[1].reg;
11090
11091 reject_bad_reg (Rd);
11092 reject_bad_reg (Rm);
11093
11094 inst.instruction |= Rd << 8;
11095 inst.instruction |= Rm << 16;
11096 inst.instruction |= Rm;
11097 }
11098
11099 static void
11100 do_t_rev (void)
11101 {
11102 unsigned Rd, Rm;
11103
11104 Rd = inst.operands[0].reg;
11105 Rm = inst.operands[1].reg;
11106
11107 reject_bad_reg (Rd);
11108 reject_bad_reg (Rm);
11109
11110 if (Rd <= 7 && Rm <= 7
11111 && inst.size_req != 4)
11112 {
11113 inst.instruction = THUMB_OP16 (inst.instruction);
11114 inst.instruction |= Rd;
11115 inst.instruction |= Rm << 3;
11116 }
11117 else if (unified_syntax)
11118 {
11119 inst.instruction = THUMB_OP32 (inst.instruction);
11120 inst.instruction |= Rd << 8;
11121 inst.instruction |= Rm << 16;
11122 inst.instruction |= Rm;
11123 }
11124 else
11125 inst.error = BAD_HIREG;
11126 }
11127
11128 static void
11129 do_t_rrx (void)
11130 {
11131 unsigned Rd, Rm;
11132
11133 Rd = inst.operands[0].reg;
11134 Rm = inst.operands[1].reg;
11135
11136 reject_bad_reg (Rd);
11137 reject_bad_reg (Rm);
11138
11139 inst.instruction |= Rd << 8;
11140 inst.instruction |= Rm;
11141 }
11142
11143 static void
11144 do_t_rsb (void)
11145 {
11146 unsigned Rd, Rs;
11147
11148 Rd = inst.operands[0].reg;
11149 Rs = (inst.operands[1].present
11150 ? inst.operands[1].reg /* Rd, Rs, foo */
11151 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11152
11153 reject_bad_reg (Rd);
11154 reject_bad_reg (Rs);
11155 if (inst.operands[2].isreg)
11156 reject_bad_reg (inst.operands[2].reg);
11157
11158 inst.instruction |= Rd << 8;
11159 inst.instruction |= Rs << 16;
11160 if (!inst.operands[2].isreg)
11161 {
11162 bfd_boolean narrow;
11163
11164 if ((inst.instruction & 0x00100000) != 0)
11165 narrow = !in_it_block ();
11166 else
11167 narrow = in_it_block ();
11168
11169 if (Rd > 7 || Rs > 7)
11170 narrow = FALSE;
11171
11172 if (inst.size_req == 4 || !unified_syntax)
11173 narrow = FALSE;
11174
11175 if (inst.reloc.exp.X_op != O_constant
11176 || inst.reloc.exp.X_add_number != 0)
11177 narrow = FALSE;
11178
11179 /* Turn rsb #0 into 16-bit neg. We should probably do this via
11180 relaxation, but it doesn't seem worth the hassle. */
11181 if (narrow)
11182 {
11183 inst.reloc.type = BFD_RELOC_UNUSED;
11184 inst.instruction = THUMB_OP16 (T_MNEM_negs);
11185 inst.instruction |= Rs << 3;
11186 inst.instruction |= Rd;
11187 }
11188 else
11189 {
11190 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11191 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11192 }
11193 }
11194 else
11195 encode_thumb32_shifted_operand (2);
11196 }
11197
11198 static void
11199 do_t_setend (void)
11200 {
11201 set_it_insn_type (OUTSIDE_IT_INSN);
11202 if (inst.operands[0].imm)
11203 inst.instruction |= 0x8;
11204 }
11205
11206 static void
11207 do_t_shift (void)
11208 {
11209 if (!inst.operands[1].present)
11210 inst.operands[1].reg = inst.operands[0].reg;
11211
11212 if (unified_syntax)
11213 {
11214 bfd_boolean narrow;
11215 int shift_kind;
11216
11217 switch (inst.instruction)
11218 {
11219 case T_MNEM_asr:
11220 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
11221 case T_MNEM_lsl:
11222 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
11223 case T_MNEM_lsr:
11224 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
11225 case T_MNEM_ror:
11226 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
11227 default: abort ();
11228 }
11229
11230 if (THUMB_SETS_FLAGS (inst.instruction))
11231 narrow = !in_it_block ();
11232 else
11233 narrow = in_it_block ();
11234 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
11235 narrow = FALSE;
11236 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
11237 narrow = FALSE;
11238 if (inst.operands[2].isreg
11239 && (inst.operands[1].reg != inst.operands[0].reg
11240 || inst.operands[2].reg > 7))
11241 narrow = FALSE;
11242 if (inst.size_req == 4)
11243 narrow = FALSE;
11244
11245 reject_bad_reg (inst.operands[0].reg);
11246 reject_bad_reg (inst.operands[1].reg);
11247
11248 if (!narrow)
11249 {
11250 if (inst.operands[2].isreg)
11251 {
11252 reject_bad_reg (inst.operands[2].reg);
11253 inst.instruction = THUMB_OP32 (inst.instruction);
11254 inst.instruction |= inst.operands[0].reg << 8;
11255 inst.instruction |= inst.operands[1].reg << 16;
11256 inst.instruction |= inst.operands[2].reg;
11257 }
11258 else
11259 {
11260 inst.operands[1].shifted = 1;
11261 inst.operands[1].shift_kind = shift_kind;
11262 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
11263 ? T_MNEM_movs : T_MNEM_mov);
11264 inst.instruction |= inst.operands[0].reg << 8;
11265 encode_thumb32_shifted_operand (1);
11266 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
11267 inst.reloc.type = BFD_RELOC_UNUSED;
11268 }
11269 }
11270 else
11271 {
11272 if (inst.operands[2].isreg)
11273 {
11274 switch (shift_kind)
11275 {
11276 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
11277 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
11278 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
11279 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
11280 default: abort ();
11281 }
11282
11283 inst.instruction |= inst.operands[0].reg;
11284 inst.instruction |= inst.operands[2].reg << 3;
11285 }
11286 else
11287 {
11288 switch (shift_kind)
11289 {
11290 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11291 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11292 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11293 default: abort ();
11294 }
11295 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11296 inst.instruction |= inst.operands[0].reg;
11297 inst.instruction |= inst.operands[1].reg << 3;
11298 }
11299 }
11300 }
11301 else
11302 {
11303 constraint (inst.operands[0].reg > 7
11304 || inst.operands[1].reg > 7, BAD_HIREG);
11305 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11306
11307 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
11308 {
11309 constraint (inst.operands[2].reg > 7, BAD_HIREG);
11310 constraint (inst.operands[0].reg != inst.operands[1].reg,
11311 _("source1 and dest must be same register"));
11312
11313 switch (inst.instruction)
11314 {
11315 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
11316 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
11317 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
11318 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
11319 default: abort ();
11320 }
11321
11322 inst.instruction |= inst.operands[0].reg;
11323 inst.instruction |= inst.operands[2].reg << 3;
11324 }
11325 else
11326 {
11327 switch (inst.instruction)
11328 {
11329 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
11330 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
11331 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
11332 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
11333 default: abort ();
11334 }
11335 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11336 inst.instruction |= inst.operands[0].reg;
11337 inst.instruction |= inst.operands[1].reg << 3;
11338 }
11339 }
11340 }
11341
11342 static void
11343 do_t_simd (void)
11344 {
11345 unsigned Rd, Rn, Rm;
11346
11347 Rd = inst.operands[0].reg;
11348 Rn = inst.operands[1].reg;
11349 Rm = inst.operands[2].reg;
11350
11351 reject_bad_reg (Rd);
11352 reject_bad_reg (Rn);
11353 reject_bad_reg (Rm);
11354
11355 inst.instruction |= Rd << 8;
11356 inst.instruction |= Rn << 16;
11357 inst.instruction |= Rm;
11358 }
11359
11360 static void
11361 do_t_simd2 (void)
11362 {
11363 unsigned Rd, Rn, Rm;
11364
11365 Rd = inst.operands[0].reg;
11366 Rm = inst.operands[1].reg;
11367 Rn = inst.operands[2].reg;
11368
11369 reject_bad_reg (Rd);
11370 reject_bad_reg (Rn);
11371 reject_bad_reg (Rm);
11372
11373 inst.instruction |= Rd << 8;
11374 inst.instruction |= Rn << 16;
11375 inst.instruction |= Rm;
11376 }
11377
11378 static void
11379 do_t_smc (void)
11380 {
11381 unsigned int value = inst.reloc.exp.X_add_number;
11382 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
11383 _("SMC is not permitted on this architecture"));
11384 constraint (inst.reloc.exp.X_op != O_constant,
11385 _("expression too complex"));
11386 inst.reloc.type = BFD_RELOC_UNUSED;
11387 inst.instruction |= (value & 0xf000) >> 12;
11388 inst.instruction |= (value & 0x0ff0);
11389 inst.instruction |= (value & 0x000f) << 16;
11390 }
11391
11392 static void
11393 do_t_ssat_usat (int bias)
11394 {
11395 unsigned Rd, Rn;
11396
11397 Rd = inst.operands[0].reg;
11398 Rn = inst.operands[2].reg;
11399
11400 reject_bad_reg (Rd);
11401 reject_bad_reg (Rn);
11402
11403 inst.instruction |= Rd << 8;
11404 inst.instruction |= inst.operands[1].imm - bias;
11405 inst.instruction |= Rn << 16;
11406
11407 if (inst.operands[3].present)
11408 {
11409 offsetT shift_amount = inst.reloc.exp.X_add_number;
11410
11411 inst.reloc.type = BFD_RELOC_UNUSED;
11412
11413 constraint (inst.reloc.exp.X_op != O_constant,
11414 _("expression too complex"));
11415
11416 if (shift_amount != 0)
11417 {
11418 constraint (shift_amount > 31,
11419 _("shift expression is too large"));
11420
11421 if (inst.operands[3].shift_kind == SHIFT_ASR)
11422 inst.instruction |= 0x00200000; /* sh bit. */
11423
11424 inst.instruction |= (shift_amount & 0x1c) << 10;
11425 inst.instruction |= (shift_amount & 0x03) << 6;
11426 }
11427 }
11428 }
11429
11430 static void
11431 do_t_ssat (void)
11432 {
11433 do_t_ssat_usat (1);
11434 }
11435
11436 static void
11437 do_t_ssat16 (void)
11438 {
11439 unsigned Rd, Rn;
11440
11441 Rd = inst.operands[0].reg;
11442 Rn = inst.operands[2].reg;
11443
11444 reject_bad_reg (Rd);
11445 reject_bad_reg (Rn);
11446
11447 inst.instruction |= Rd << 8;
11448 inst.instruction |= inst.operands[1].imm - 1;
11449 inst.instruction |= Rn << 16;
11450 }
11451
11452 static void
11453 do_t_strex (void)
11454 {
11455 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
11456 || inst.operands[2].postind || inst.operands[2].writeback
11457 || inst.operands[2].immisreg || inst.operands[2].shifted
11458 || inst.operands[2].negative,
11459 BAD_ADDR_MODE);
11460
11461 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
11462
11463 inst.instruction |= inst.operands[0].reg << 8;
11464 inst.instruction |= inst.operands[1].reg << 12;
11465 inst.instruction |= inst.operands[2].reg << 16;
11466 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11467 }
11468
11469 static void
11470 do_t_strexd (void)
11471 {
11472 if (!inst.operands[2].present)
11473 inst.operands[2].reg = inst.operands[1].reg + 1;
11474
11475 constraint (inst.operands[0].reg == inst.operands[1].reg
11476 || inst.operands[0].reg == inst.operands[2].reg
11477 || inst.operands[0].reg == inst.operands[3].reg,
11478 BAD_OVERLAP);
11479
11480 inst.instruction |= inst.operands[0].reg;
11481 inst.instruction |= inst.operands[1].reg << 12;
11482 inst.instruction |= inst.operands[2].reg << 8;
11483 inst.instruction |= inst.operands[3].reg << 16;
11484 }
11485
11486 static void
11487 do_t_sxtah (void)
11488 {
11489 unsigned Rd, Rn, Rm;
11490
11491 Rd = inst.operands[0].reg;
11492 Rn = inst.operands[1].reg;
11493 Rm = inst.operands[2].reg;
11494
11495 reject_bad_reg (Rd);
11496 reject_bad_reg (Rn);
11497 reject_bad_reg (Rm);
11498
11499 inst.instruction |= Rd << 8;
11500 inst.instruction |= Rn << 16;
11501 inst.instruction |= Rm;
11502 inst.instruction |= inst.operands[3].imm << 4;
11503 }
11504
11505 static void
11506 do_t_sxth (void)
11507 {
11508 unsigned Rd, Rm;
11509
11510 Rd = inst.operands[0].reg;
11511 Rm = inst.operands[1].reg;
11512
11513 reject_bad_reg (Rd);
11514 reject_bad_reg (Rm);
11515
11516 if (inst.instruction <= 0xffff
11517 && inst.size_req != 4
11518 && Rd <= 7 && Rm <= 7
11519 && (!inst.operands[2].present || inst.operands[2].imm == 0))
11520 {
11521 inst.instruction = THUMB_OP16 (inst.instruction);
11522 inst.instruction |= Rd;
11523 inst.instruction |= Rm << 3;
11524 }
11525 else if (unified_syntax)
11526 {
11527 if (inst.instruction <= 0xffff)
11528 inst.instruction = THUMB_OP32 (inst.instruction);
11529 inst.instruction |= Rd << 8;
11530 inst.instruction |= Rm;
11531 inst.instruction |= inst.operands[2].imm << 4;
11532 }
11533 else
11534 {
11535 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
11536 _("Thumb encoding does not support rotation"));
11537 constraint (1, BAD_HIREG);
11538 }
11539 }
11540
11541 static void
11542 do_t_swi (void)
11543 {
11544 /* We have to do the following check manually as ARM_EXT_OS only applies
11545 to ARM_EXT_V6M. */
11546 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
11547 {
11548 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os))
11549 as_bad (_("SVC is not permitted on this architecture"));
11550 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
11551 }
11552
11553 inst.reloc.type = BFD_RELOC_ARM_SWI;
11554 }
11555
11556 static void
11557 do_t_tb (void)
11558 {
11559 unsigned Rn, Rm;
11560 int half;
11561
11562 half = (inst.instruction & 0x10) != 0;
11563 set_it_insn_type_last ();
11564 constraint (inst.operands[0].immisreg,
11565 _("instruction requires register index"));
11566
11567 Rn = inst.operands[0].reg;
11568 Rm = inst.operands[0].imm;
11569
11570 constraint (Rn == REG_SP, BAD_SP);
11571 reject_bad_reg (Rm);
11572
11573 constraint (!half && inst.operands[0].shifted,
11574 _("instruction does not allow shifted index"));
11575 inst.instruction |= (Rn << 16) | Rm;
11576 }
11577
11578 static void
11579 do_t_usat (void)
11580 {
11581 do_t_ssat_usat (0);
11582 }
11583
11584 static void
11585 do_t_usat16 (void)
11586 {
11587 unsigned Rd, Rn;
11588
11589 Rd = inst.operands[0].reg;
11590 Rn = inst.operands[2].reg;
11591
11592 reject_bad_reg (Rd);
11593 reject_bad_reg (Rn);
11594
11595 inst.instruction |= Rd << 8;
11596 inst.instruction |= inst.operands[1].imm;
11597 inst.instruction |= Rn << 16;
11598 }
11599
11600 /* Neon instruction encoder helpers. */
11601
11602 /* Encodings for the different types for various Neon opcodes. */
11603
11604 /* An "invalid" code for the following tables. */
11605 #define N_INV -1u
11606
11607 struct neon_tab_entry
11608 {
11609 unsigned integer;
11610 unsigned float_or_poly;
11611 unsigned scalar_or_imm;
11612 };
11613
11614 /* Map overloaded Neon opcodes to their respective encodings. */
11615 #define NEON_ENC_TAB \
11616 X(vabd, 0x0000700, 0x1200d00, N_INV), \
11617 X(vmax, 0x0000600, 0x0000f00, N_INV), \
11618 X(vmin, 0x0000610, 0x0200f00, N_INV), \
11619 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
11620 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
11621 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
11622 X(vadd, 0x0000800, 0x0000d00, N_INV), \
11623 X(vsub, 0x1000800, 0x0200d00, N_INV), \
11624 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
11625 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
11626 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
11627 /* Register variants of the following two instructions are encoded as
11628 vcge / vcgt with the operands reversed. */ \
11629 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
11630 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
11631 X(vfma, N_INV, 0x0000c10, N_INV), \
11632 X(vfms, N_INV, 0x0200c10, N_INV), \
11633 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
11634 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
11635 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
11636 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
11637 X(vmlal, 0x0800800, N_INV, 0x0800240), \
11638 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
11639 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
11640 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
11641 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
11642 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
11643 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
11644 X(vshl, 0x0000400, N_INV, 0x0800510), \
11645 X(vqshl, 0x0000410, N_INV, 0x0800710), \
11646 X(vand, 0x0000110, N_INV, 0x0800030), \
11647 X(vbic, 0x0100110, N_INV, 0x0800030), \
11648 X(veor, 0x1000110, N_INV, N_INV), \
11649 X(vorn, 0x0300110, N_INV, 0x0800010), \
11650 X(vorr, 0x0200110, N_INV, 0x0800010), \
11651 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
11652 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
11653 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
11654 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
11655 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
11656 X(vst1, 0x0000000, 0x0800000, N_INV), \
11657 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
11658 X(vst2, 0x0000100, 0x0800100, N_INV), \
11659 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
11660 X(vst3, 0x0000200, 0x0800200, N_INV), \
11661 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
11662 X(vst4, 0x0000300, 0x0800300, N_INV), \
11663 X(vmovn, 0x1b20200, N_INV, N_INV), \
11664 X(vtrn, 0x1b20080, N_INV, N_INV), \
11665 X(vqmovn, 0x1b20200, N_INV, N_INV), \
11666 X(vqmovun, 0x1b20240, N_INV, N_INV), \
11667 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
11668 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
11669 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
11670 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
11671 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
11672 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
11673 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
11674 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
11675 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
11676
11677 enum neon_opc
11678 {
11679 #define X(OPC,I,F,S) N_MNEM_##OPC
11680 NEON_ENC_TAB
11681 #undef X
11682 };
11683
11684 static const struct neon_tab_entry neon_enc_tab[] =
11685 {
11686 #define X(OPC,I,F,S) { (I), (F), (S) }
11687 NEON_ENC_TAB
11688 #undef X
11689 };
11690
11691 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
11692 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11693 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11694 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11695 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11696 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11697 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11698 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11699 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11700 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11701 #define NEON_ENC_SINGLE_(X) \
11702 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
11703 #define NEON_ENC_DOUBLE_(X) \
11704 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
11705
11706 #define NEON_ENCODE(type, inst) \
11707 do \
11708 { \
11709 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
11710 inst.is_neon = 1; \
11711 } \
11712 while (0)
11713
11714 #define check_neon_suffixes \
11715 do \
11716 { \
11717 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
11718 { \
11719 as_bad (_("invalid neon suffix for non neon instruction")); \
11720 return; \
11721 } \
11722 } \
11723 while (0)
11724
11725 /* Define shapes for instruction operands. The following mnemonic characters
11726 are used in this table:
11727
11728 F - VFP S<n> register
11729 D - Neon D<n> register
11730 Q - Neon Q<n> register
11731 I - Immediate
11732 S - Scalar
11733 R - ARM register
11734 L - D<n> register list
11735
11736 This table is used to generate various data:
11737 - enumerations of the form NS_DDR to be used as arguments to
11738 neon_select_shape.
11739 - a table classifying shapes into single, double, quad, mixed.
11740 - a table used to drive neon_select_shape. */
11741
11742 #define NEON_SHAPE_DEF \
11743 X(3, (D, D, D), DOUBLE), \
11744 X(3, (Q, Q, Q), QUAD), \
11745 X(3, (D, D, I), DOUBLE), \
11746 X(3, (Q, Q, I), QUAD), \
11747 X(3, (D, D, S), DOUBLE), \
11748 X(3, (Q, Q, S), QUAD), \
11749 X(2, (D, D), DOUBLE), \
11750 X(2, (Q, Q), QUAD), \
11751 X(2, (D, S), DOUBLE), \
11752 X(2, (Q, S), QUAD), \
11753 X(2, (D, R), DOUBLE), \
11754 X(2, (Q, R), QUAD), \
11755 X(2, (D, I), DOUBLE), \
11756 X(2, (Q, I), QUAD), \
11757 X(3, (D, L, D), DOUBLE), \
11758 X(2, (D, Q), MIXED), \
11759 X(2, (Q, D), MIXED), \
11760 X(3, (D, Q, I), MIXED), \
11761 X(3, (Q, D, I), MIXED), \
11762 X(3, (Q, D, D), MIXED), \
11763 X(3, (D, Q, Q), MIXED), \
11764 X(3, (Q, Q, D), MIXED), \
11765 X(3, (Q, D, S), MIXED), \
11766 X(3, (D, Q, S), MIXED), \
11767 X(4, (D, D, D, I), DOUBLE), \
11768 X(4, (Q, Q, Q, I), QUAD), \
11769 X(2, (F, F), SINGLE), \
11770 X(3, (F, F, F), SINGLE), \
11771 X(2, (F, I), SINGLE), \
11772 X(2, (F, D), MIXED), \
11773 X(2, (D, F), MIXED), \
11774 X(3, (F, F, I), MIXED), \
11775 X(4, (R, R, F, F), SINGLE), \
11776 X(4, (F, F, R, R), SINGLE), \
11777 X(3, (D, R, R), DOUBLE), \
11778 X(3, (R, R, D), DOUBLE), \
11779 X(2, (S, R), SINGLE), \
11780 X(2, (R, S), SINGLE), \
11781 X(2, (F, R), SINGLE), \
11782 X(2, (R, F), SINGLE)
11783
11784 #define S2(A,B) NS_##A##B
11785 #define S3(A,B,C) NS_##A##B##C
11786 #define S4(A,B,C,D) NS_##A##B##C##D
11787
11788 #define X(N, L, C) S##N L
11789
11790 enum neon_shape
11791 {
11792 NEON_SHAPE_DEF,
11793 NS_NULL
11794 };
11795
11796 #undef X
11797 #undef S2
11798 #undef S3
11799 #undef S4
11800
11801 enum neon_shape_class
11802 {
11803 SC_SINGLE,
11804 SC_DOUBLE,
11805 SC_QUAD,
11806 SC_MIXED
11807 };
11808
11809 #define X(N, L, C) SC_##C
11810
11811 static enum neon_shape_class neon_shape_class[] =
11812 {
11813 NEON_SHAPE_DEF
11814 };
11815
11816 #undef X
11817
11818 enum neon_shape_el
11819 {
11820 SE_F,
11821 SE_D,
11822 SE_Q,
11823 SE_I,
11824 SE_S,
11825 SE_R,
11826 SE_L
11827 };
11828
11829 /* Register widths of above. */
11830 static unsigned neon_shape_el_size[] =
11831 {
11832 32,
11833 64,
11834 128,
11835 0,
11836 32,
11837 32,
11838 0
11839 };
11840
11841 struct neon_shape_info
11842 {
11843 unsigned els;
11844 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
11845 };
11846
11847 #define S2(A,B) { SE_##A, SE_##B }
11848 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
11849 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
11850
11851 #define X(N, L, C) { N, S##N L }
11852
11853 static struct neon_shape_info neon_shape_tab[] =
11854 {
11855 NEON_SHAPE_DEF
11856 };
11857
11858 #undef X
11859 #undef S2
11860 #undef S3
11861 #undef S4
11862
11863 /* Bit masks used in type checking given instructions.
11864 'N_EQK' means the type must be the same as (or based on in some way) the key
11865 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
11866 set, various other bits can be set as well in order to modify the meaning of
11867 the type constraint. */
11868
11869 enum neon_type_mask
11870 {
11871 N_S8 = 0x0000001,
11872 N_S16 = 0x0000002,
11873 N_S32 = 0x0000004,
11874 N_S64 = 0x0000008,
11875 N_U8 = 0x0000010,
11876 N_U16 = 0x0000020,
11877 N_U32 = 0x0000040,
11878 N_U64 = 0x0000080,
11879 N_I8 = 0x0000100,
11880 N_I16 = 0x0000200,
11881 N_I32 = 0x0000400,
11882 N_I64 = 0x0000800,
11883 N_8 = 0x0001000,
11884 N_16 = 0x0002000,
11885 N_32 = 0x0004000,
11886 N_64 = 0x0008000,
11887 N_P8 = 0x0010000,
11888 N_P16 = 0x0020000,
11889 N_F16 = 0x0040000,
11890 N_F32 = 0x0080000,
11891 N_F64 = 0x0100000,
11892 N_KEY = 0x1000000, /* Key element (main type specifier). */
11893 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
11894 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
11895 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
11896 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
11897 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
11898 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
11899 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
11900 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
11901 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
11902 N_UTYP = 0,
11903 N_MAX_NONSPECIAL = N_F64
11904 };
11905
11906 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
11907
11908 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
11909 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
11910 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
11911 #define N_SUF_32 (N_SU_32 | N_F32)
11912 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
11913 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
11914
11915 /* Pass this as the first type argument to neon_check_type to ignore types
11916 altogether. */
11917 #define N_IGNORE_TYPE (N_KEY | N_EQK)
11918
11919 /* Select a "shape" for the current instruction (describing register types or
11920 sizes) from a list of alternatives. Return NS_NULL if the current instruction
11921 doesn't fit. For non-polymorphic shapes, checking is usually done as a
11922 function of operand parsing, so this function doesn't need to be called.
11923 Shapes should be listed in order of decreasing length. */
11924
11925 static enum neon_shape
11926 neon_select_shape (enum neon_shape shape, ...)
11927 {
11928 va_list ap;
11929 enum neon_shape first_shape = shape;
11930
11931 /* Fix missing optional operands. FIXME: we don't know at this point how
11932 many arguments we should have, so this makes the assumption that we have
11933 > 1. This is true of all current Neon opcodes, I think, but may not be
11934 true in the future. */
11935 if (!inst.operands[1].present)
11936 inst.operands[1] = inst.operands[0];
11937
11938 va_start (ap, shape);
11939
11940 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
11941 {
11942 unsigned j;
11943 int matches = 1;
11944
11945 for (j = 0; j < neon_shape_tab[shape].els; j++)
11946 {
11947 if (!inst.operands[j].present)
11948 {
11949 matches = 0;
11950 break;
11951 }
11952
11953 switch (neon_shape_tab[shape].el[j])
11954 {
11955 case SE_F:
11956 if (!(inst.operands[j].isreg
11957 && inst.operands[j].isvec
11958 && inst.operands[j].issingle
11959 && !inst.operands[j].isquad))
11960 matches = 0;
11961 break;
11962
11963 case SE_D:
11964 if (!(inst.operands[j].isreg
11965 && inst.operands[j].isvec
11966 && !inst.operands[j].isquad
11967 && !inst.operands[j].issingle))
11968 matches = 0;
11969 break;
11970
11971 case SE_R:
11972 if (!(inst.operands[j].isreg
11973 && !inst.operands[j].isvec))
11974 matches = 0;
11975 break;
11976
11977 case SE_Q:
11978 if (!(inst.operands[j].isreg
11979 && inst.operands[j].isvec
11980 && inst.operands[j].isquad
11981 && !inst.operands[j].issingle))
11982 matches = 0;
11983 break;
11984
11985 case SE_I:
11986 if (!(!inst.operands[j].isreg
11987 && !inst.operands[j].isscalar))
11988 matches = 0;
11989 break;
11990
11991 case SE_S:
11992 if (!(!inst.operands[j].isreg
11993 && inst.operands[j].isscalar))
11994 matches = 0;
11995 break;
11996
11997 case SE_L:
11998 break;
11999 }
12000 if (!matches)
12001 break;
12002 }
12003 if (matches)
12004 break;
12005 }
12006
12007 va_end (ap);
12008
12009 if (shape == NS_NULL && first_shape != NS_NULL)
12010 first_error (_("invalid instruction shape"));
12011
12012 return shape;
12013 }
12014
12015 /* True if SHAPE is predominantly a quadword operation (most of the time, this
12016 means the Q bit should be set). */
12017
12018 static int
12019 neon_quad (enum neon_shape shape)
12020 {
12021 return neon_shape_class[shape] == SC_QUAD;
12022 }
12023
12024 static void
12025 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
12026 unsigned *g_size)
12027 {
12028 /* Allow modification to be made to types which are constrained to be
12029 based on the key element, based on bits set alongside N_EQK. */
12030 if ((typebits & N_EQK) != 0)
12031 {
12032 if ((typebits & N_HLF) != 0)
12033 *g_size /= 2;
12034 else if ((typebits & N_DBL) != 0)
12035 *g_size *= 2;
12036 if ((typebits & N_SGN) != 0)
12037 *g_type = NT_signed;
12038 else if ((typebits & N_UNS) != 0)
12039 *g_type = NT_unsigned;
12040 else if ((typebits & N_INT) != 0)
12041 *g_type = NT_integer;
12042 else if ((typebits & N_FLT) != 0)
12043 *g_type = NT_float;
12044 else if ((typebits & N_SIZ) != 0)
12045 *g_type = NT_untyped;
12046 }
12047 }
12048
12049 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
12050 operand type, i.e. the single type specified in a Neon instruction when it
12051 is the only one given. */
12052
12053 static struct neon_type_el
12054 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
12055 {
12056 struct neon_type_el dest = *key;
12057
12058 gas_assert ((thisarg & N_EQK) != 0);
12059
12060 neon_modify_type_size (thisarg, &dest.type, &dest.size);
12061
12062 return dest;
12063 }
12064
12065 /* Convert Neon type and size into compact bitmask representation. */
12066
12067 static enum neon_type_mask
12068 type_chk_of_el_type (enum neon_el_type type, unsigned size)
12069 {
12070 switch (type)
12071 {
12072 case NT_untyped:
12073 switch (size)
12074 {
12075 case 8: return N_8;
12076 case 16: return N_16;
12077 case 32: return N_32;
12078 case 64: return N_64;
12079 default: ;
12080 }
12081 break;
12082
12083 case NT_integer:
12084 switch (size)
12085 {
12086 case 8: return N_I8;
12087 case 16: return N_I16;
12088 case 32: return N_I32;
12089 case 64: return N_I64;
12090 default: ;
12091 }
12092 break;
12093
12094 case NT_float:
12095 switch (size)
12096 {
12097 case 16: return N_F16;
12098 case 32: return N_F32;
12099 case 64: return N_F64;
12100 default: ;
12101 }
12102 break;
12103
12104 case NT_poly:
12105 switch (size)
12106 {
12107 case 8: return N_P8;
12108 case 16: return N_P16;
12109 default: ;
12110 }
12111 break;
12112
12113 case NT_signed:
12114 switch (size)
12115 {
12116 case 8: return N_S8;
12117 case 16: return N_S16;
12118 case 32: return N_S32;
12119 case 64: return N_S64;
12120 default: ;
12121 }
12122 break;
12123
12124 case NT_unsigned:
12125 switch (size)
12126 {
12127 case 8: return N_U8;
12128 case 16: return N_U16;
12129 case 32: return N_U32;
12130 case 64: return N_U64;
12131 default: ;
12132 }
12133 break;
12134
12135 default: ;
12136 }
12137
12138 return N_UTYP;
12139 }
12140
12141 /* Convert compact Neon bitmask type representation to a type and size. Only
12142 handles the case where a single bit is set in the mask. */
12143
12144 static int
12145 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
12146 enum neon_type_mask mask)
12147 {
12148 if ((mask & N_EQK) != 0)
12149 return FAIL;
12150
12151 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
12152 *size = 8;
12153 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
12154 *size = 16;
12155 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
12156 *size = 32;
12157 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0)
12158 *size = 64;
12159 else
12160 return FAIL;
12161
12162 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
12163 *type = NT_signed;
12164 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
12165 *type = NT_unsigned;
12166 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
12167 *type = NT_integer;
12168 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
12169 *type = NT_untyped;
12170 else if ((mask & (N_P8 | N_P16)) != 0)
12171 *type = NT_poly;
12172 else if ((mask & (N_F32 | N_F64)) != 0)
12173 *type = NT_float;
12174 else
12175 return FAIL;
12176
12177 return SUCCESS;
12178 }
12179
12180 /* Modify a bitmask of allowed types. This is only needed for type
12181 relaxation. */
12182
12183 static unsigned
12184 modify_types_allowed (unsigned allowed, unsigned mods)
12185 {
12186 unsigned size;
12187 enum neon_el_type type;
12188 unsigned destmask;
12189 int i;
12190
12191 destmask = 0;
12192
12193 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
12194 {
12195 if (el_type_of_type_chk (&type, &size,
12196 (enum neon_type_mask) (allowed & i)) == SUCCESS)
12197 {
12198 neon_modify_type_size (mods, &type, &size);
12199 destmask |= type_chk_of_el_type (type, size);
12200 }
12201 }
12202
12203 return destmask;
12204 }
12205
12206 /* Check type and return type classification.
12207 The manual states (paraphrase): If one datatype is given, it indicates the
12208 type given in:
12209 - the second operand, if there is one
12210 - the operand, if there is no second operand
12211 - the result, if there are no operands.
12212 This isn't quite good enough though, so we use a concept of a "key" datatype
12213 which is set on a per-instruction basis, which is the one which matters when
12214 only one data type is written.
12215 Note: this function has side-effects (e.g. filling in missing operands). All
12216 Neon instructions should call it before performing bit encoding. */
12217
12218 static struct neon_type_el
12219 neon_check_type (unsigned els, enum neon_shape ns, ...)
12220 {
12221 va_list ap;
12222 unsigned i, pass, key_el = 0;
12223 unsigned types[NEON_MAX_TYPE_ELS];
12224 enum neon_el_type k_type = NT_invtype;
12225 unsigned k_size = -1u;
12226 struct neon_type_el badtype = {NT_invtype, -1};
12227 unsigned key_allowed = 0;
12228
12229 /* Optional registers in Neon instructions are always (not) in operand 1.
12230 Fill in the missing operand here, if it was omitted. */
12231 if (els > 1 && !inst.operands[1].present)
12232 inst.operands[1] = inst.operands[0];
12233
12234 /* Suck up all the varargs. */
12235 va_start (ap, ns);
12236 for (i = 0; i < els; i++)
12237 {
12238 unsigned thisarg = va_arg (ap, unsigned);
12239 if (thisarg == N_IGNORE_TYPE)
12240 {
12241 va_end (ap);
12242 return badtype;
12243 }
12244 types[i] = thisarg;
12245 if ((thisarg & N_KEY) != 0)
12246 key_el = i;
12247 }
12248 va_end (ap);
12249
12250 if (inst.vectype.elems > 0)
12251 for (i = 0; i < els; i++)
12252 if (inst.operands[i].vectype.type != NT_invtype)
12253 {
12254 first_error (_("types specified in both the mnemonic and operands"));
12255 return badtype;
12256 }
12257
12258 /* Duplicate inst.vectype elements here as necessary.
12259 FIXME: No idea if this is exactly the same as the ARM assembler,
12260 particularly when an insn takes one register and one non-register
12261 operand. */
12262 if (inst.vectype.elems == 1 && els > 1)
12263 {
12264 unsigned j;
12265 inst.vectype.elems = els;
12266 inst.vectype.el[key_el] = inst.vectype.el[0];
12267 for (j = 0; j < els; j++)
12268 if (j != key_el)
12269 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
12270 types[j]);
12271 }
12272 else if (inst.vectype.elems == 0 && els > 0)
12273 {
12274 unsigned j;
12275 /* No types were given after the mnemonic, so look for types specified
12276 after each operand. We allow some flexibility here; as long as the
12277 "key" operand has a type, we can infer the others. */
12278 for (j = 0; j < els; j++)
12279 if (inst.operands[j].vectype.type != NT_invtype)
12280 inst.vectype.el[j] = inst.operands[j].vectype;
12281
12282 if (inst.operands[key_el].vectype.type != NT_invtype)
12283 {
12284 for (j = 0; j < els; j++)
12285 if (inst.operands[j].vectype.type == NT_invtype)
12286 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
12287 types[j]);
12288 }
12289 else
12290 {
12291 first_error (_("operand types can't be inferred"));
12292 return badtype;
12293 }
12294 }
12295 else if (inst.vectype.elems != els)
12296 {
12297 first_error (_("type specifier has the wrong number of parts"));
12298 return badtype;
12299 }
12300
12301 for (pass = 0; pass < 2; pass++)
12302 {
12303 for (i = 0; i < els; i++)
12304 {
12305 unsigned thisarg = types[i];
12306 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
12307 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
12308 enum neon_el_type g_type = inst.vectype.el[i].type;
12309 unsigned g_size = inst.vectype.el[i].size;
12310
12311 /* Decay more-specific signed & unsigned types to sign-insensitive
12312 integer types if sign-specific variants are unavailable. */
12313 if ((g_type == NT_signed || g_type == NT_unsigned)
12314 && (types_allowed & N_SU_ALL) == 0)
12315 g_type = NT_integer;
12316
12317 /* If only untyped args are allowed, decay any more specific types to
12318 them. Some instructions only care about signs for some element
12319 sizes, so handle that properly. */
12320 if ((g_size == 8 && (types_allowed & N_8) != 0)
12321 || (g_size == 16 && (types_allowed & N_16) != 0)
12322 || (g_size == 32 && (types_allowed & N_32) != 0)
12323 || (g_size == 64 && (types_allowed & N_64) != 0))
12324 g_type = NT_untyped;
12325
12326 if (pass == 0)
12327 {
12328 if ((thisarg & N_KEY) != 0)
12329 {
12330 k_type = g_type;
12331 k_size = g_size;
12332 key_allowed = thisarg & ~N_KEY;
12333 }
12334 }
12335 else
12336 {
12337 if ((thisarg & N_VFP) != 0)
12338 {
12339 enum neon_shape_el regshape;
12340 unsigned regwidth, match;
12341
12342 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
12343 if (ns == NS_NULL)
12344 {
12345 first_error (_("invalid instruction shape"));
12346 return badtype;
12347 }
12348 regshape = neon_shape_tab[ns].el[i];
12349 regwidth = neon_shape_el_size[regshape];
12350
12351 /* In VFP mode, operands must match register widths. If we
12352 have a key operand, use its width, else use the width of
12353 the current operand. */
12354 if (k_size != -1u)
12355 match = k_size;
12356 else
12357 match = g_size;
12358
12359 if (regwidth != match)
12360 {
12361 first_error (_("operand size must match register width"));
12362 return badtype;
12363 }
12364 }
12365
12366 if ((thisarg & N_EQK) == 0)
12367 {
12368 unsigned given_type = type_chk_of_el_type (g_type, g_size);
12369
12370 if ((given_type & types_allowed) == 0)
12371 {
12372 first_error (_("bad type in Neon instruction"));
12373 return badtype;
12374 }
12375 }
12376 else
12377 {
12378 enum neon_el_type mod_k_type = k_type;
12379 unsigned mod_k_size = k_size;
12380 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
12381 if (g_type != mod_k_type || g_size != mod_k_size)
12382 {
12383 first_error (_("inconsistent types in Neon instruction"));
12384 return badtype;
12385 }
12386 }
12387 }
12388 }
12389 }
12390
12391 return inst.vectype.el[key_el];
12392 }
12393
12394 /* Neon-style VFP instruction forwarding. */
12395
12396 /* Thumb VFP instructions have 0xE in the condition field. */
12397
12398 static void
12399 do_vfp_cond_or_thumb (void)
12400 {
12401 inst.is_neon = 1;
12402
12403 if (thumb_mode)
12404 inst.instruction |= 0xe0000000;
12405 else
12406 inst.instruction |= inst.cond << 28;
12407 }
12408
12409 /* Look up and encode a simple mnemonic, for use as a helper function for the
12410 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
12411 etc. It is assumed that operand parsing has already been done, and that the
12412 operands are in the form expected by the given opcode (this isn't necessarily
12413 the same as the form in which they were parsed, hence some massaging must
12414 take place before this function is called).
12415 Checks current arch version against that in the looked-up opcode. */
12416
12417 static void
12418 do_vfp_nsyn_opcode (const char *opname)
12419 {
12420 const struct asm_opcode *opcode;
12421
12422 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
12423
12424 if (!opcode)
12425 abort ();
12426
12427 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
12428 thumb_mode ? *opcode->tvariant : *opcode->avariant),
12429 _(BAD_FPU));
12430
12431 inst.is_neon = 1;
12432
12433 if (thumb_mode)
12434 {
12435 inst.instruction = opcode->tvalue;
12436 opcode->tencode ();
12437 }
12438 else
12439 {
12440 inst.instruction = (inst.cond << 28) | opcode->avalue;
12441 opcode->aencode ();
12442 }
12443 }
12444
12445 static void
12446 do_vfp_nsyn_add_sub (enum neon_shape rs)
12447 {
12448 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
12449
12450 if (rs == NS_FFF)
12451 {
12452 if (is_add)
12453 do_vfp_nsyn_opcode ("fadds");
12454 else
12455 do_vfp_nsyn_opcode ("fsubs");
12456 }
12457 else
12458 {
12459 if (is_add)
12460 do_vfp_nsyn_opcode ("faddd");
12461 else
12462 do_vfp_nsyn_opcode ("fsubd");
12463 }
12464 }
12465
12466 /* Check operand types to see if this is a VFP instruction, and if so call
12467 PFN (). */
12468
12469 static int
12470 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
12471 {
12472 enum neon_shape rs;
12473 struct neon_type_el et;
12474
12475 switch (args)
12476 {
12477 case 2:
12478 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
12479 et = neon_check_type (2, rs,
12480 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12481 break;
12482
12483 case 3:
12484 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
12485 et = neon_check_type (3, rs,
12486 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12487 break;
12488
12489 default:
12490 abort ();
12491 }
12492
12493 if (et.type != NT_invtype)
12494 {
12495 pfn (rs);
12496 return SUCCESS;
12497 }
12498
12499 inst.error = NULL;
12500 return FAIL;
12501 }
12502
12503 static void
12504 do_vfp_nsyn_mla_mls (enum neon_shape rs)
12505 {
12506 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
12507
12508 if (rs == NS_FFF)
12509 {
12510 if (is_mla)
12511 do_vfp_nsyn_opcode ("fmacs");
12512 else
12513 do_vfp_nsyn_opcode ("fnmacs");
12514 }
12515 else
12516 {
12517 if (is_mla)
12518 do_vfp_nsyn_opcode ("fmacd");
12519 else
12520 do_vfp_nsyn_opcode ("fnmacd");
12521 }
12522 }
12523
12524 static void
12525 do_vfp_nsyn_fma_fms (enum neon_shape rs)
12526 {
12527 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
12528
12529 if (rs == NS_FFF)
12530 {
12531 if (is_fma)
12532 do_vfp_nsyn_opcode ("ffmas");
12533 else
12534 do_vfp_nsyn_opcode ("ffnmas");
12535 }
12536 else
12537 {
12538 if (is_fma)
12539 do_vfp_nsyn_opcode ("ffmad");
12540 else
12541 do_vfp_nsyn_opcode ("ffnmad");
12542 }
12543 }
12544
12545 static void
12546 do_vfp_nsyn_mul (enum neon_shape rs)
12547 {
12548 if (rs == NS_FFF)
12549 do_vfp_nsyn_opcode ("fmuls");
12550 else
12551 do_vfp_nsyn_opcode ("fmuld");
12552 }
12553
12554 static void
12555 do_vfp_nsyn_abs_neg (enum neon_shape rs)
12556 {
12557 int is_neg = (inst.instruction & 0x80) != 0;
12558 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
12559
12560 if (rs == NS_FF)
12561 {
12562 if (is_neg)
12563 do_vfp_nsyn_opcode ("fnegs");
12564 else
12565 do_vfp_nsyn_opcode ("fabss");
12566 }
12567 else
12568 {
12569 if (is_neg)
12570 do_vfp_nsyn_opcode ("fnegd");
12571 else
12572 do_vfp_nsyn_opcode ("fabsd");
12573 }
12574 }
12575
12576 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
12577 insns belong to Neon, and are handled elsewhere. */
12578
12579 static void
12580 do_vfp_nsyn_ldm_stm (int is_dbmode)
12581 {
12582 int is_ldm = (inst.instruction & (1 << 20)) != 0;
12583 if (is_ldm)
12584 {
12585 if (is_dbmode)
12586 do_vfp_nsyn_opcode ("fldmdbs");
12587 else
12588 do_vfp_nsyn_opcode ("fldmias");
12589 }
12590 else
12591 {
12592 if (is_dbmode)
12593 do_vfp_nsyn_opcode ("fstmdbs");
12594 else
12595 do_vfp_nsyn_opcode ("fstmias");
12596 }
12597 }
12598
12599 static void
12600 do_vfp_nsyn_sqrt (void)
12601 {
12602 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
12603 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12604
12605 if (rs == NS_FF)
12606 do_vfp_nsyn_opcode ("fsqrts");
12607 else
12608 do_vfp_nsyn_opcode ("fsqrtd");
12609 }
12610
12611 static void
12612 do_vfp_nsyn_div (void)
12613 {
12614 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
12615 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
12616 N_F32 | N_F64 | N_KEY | N_VFP);
12617
12618 if (rs == NS_FFF)
12619 do_vfp_nsyn_opcode ("fdivs");
12620 else
12621 do_vfp_nsyn_opcode ("fdivd");
12622 }
12623
12624 static void
12625 do_vfp_nsyn_nmul (void)
12626 {
12627 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
12628 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
12629 N_F32 | N_F64 | N_KEY | N_VFP);
12630
12631 if (rs == NS_FFF)
12632 {
12633 NEON_ENCODE (SINGLE, inst);
12634 do_vfp_sp_dyadic ();
12635 }
12636 else
12637 {
12638 NEON_ENCODE (DOUBLE, inst);
12639 do_vfp_dp_rd_rn_rm ();
12640 }
12641 do_vfp_cond_or_thumb ();
12642 }
12643
12644 static void
12645 do_vfp_nsyn_cmp (void)
12646 {
12647 if (inst.operands[1].isreg)
12648 {
12649 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
12650 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12651
12652 if (rs == NS_FF)
12653 {
12654 NEON_ENCODE (SINGLE, inst);
12655 do_vfp_sp_monadic ();
12656 }
12657 else
12658 {
12659 NEON_ENCODE (DOUBLE, inst);
12660 do_vfp_dp_rd_rm ();
12661 }
12662 }
12663 else
12664 {
12665 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
12666 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
12667
12668 switch (inst.instruction & 0x0fffffff)
12669 {
12670 case N_MNEM_vcmp:
12671 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
12672 break;
12673 case N_MNEM_vcmpe:
12674 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
12675 break;
12676 default:
12677 abort ();
12678 }
12679
12680 if (rs == NS_FI)
12681 {
12682 NEON_ENCODE (SINGLE, inst);
12683 do_vfp_sp_compare_z ();
12684 }
12685 else
12686 {
12687 NEON_ENCODE (DOUBLE, inst);
12688 do_vfp_dp_rd ();
12689 }
12690 }
12691 do_vfp_cond_or_thumb ();
12692 }
12693
12694 static void
12695 nsyn_insert_sp (void)
12696 {
12697 inst.operands[1] = inst.operands[0];
12698 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
12699 inst.operands[0].reg = REG_SP;
12700 inst.operands[0].isreg = 1;
12701 inst.operands[0].writeback = 1;
12702 inst.operands[0].present = 1;
12703 }
12704
12705 static void
12706 do_vfp_nsyn_push (void)
12707 {
12708 nsyn_insert_sp ();
12709 if (inst.operands[1].issingle)
12710 do_vfp_nsyn_opcode ("fstmdbs");
12711 else
12712 do_vfp_nsyn_opcode ("fstmdbd");
12713 }
12714
12715 static void
12716 do_vfp_nsyn_pop (void)
12717 {
12718 nsyn_insert_sp ();
12719 if (inst.operands[1].issingle)
12720 do_vfp_nsyn_opcode ("fldmias");
12721 else
12722 do_vfp_nsyn_opcode ("fldmiad");
12723 }
12724
12725 /* Fix up Neon data-processing instructions, ORing in the correct bits for
12726 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
12727
12728 static void
12729 neon_dp_fixup (struct arm_it* insn)
12730 {
12731 unsigned int i = insn->instruction;
12732 insn->is_neon = 1;
12733
12734 if (thumb_mode)
12735 {
12736 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
12737 if (i & (1 << 24))
12738 i |= 1 << 28;
12739
12740 i &= ~(1 << 24);
12741
12742 i |= 0xef000000;
12743 }
12744 else
12745 i |= 0xf2000000;
12746
12747 insn->instruction = i;
12748 }
12749
12750 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
12751 (0, 1, 2, 3). */
12752
12753 static unsigned
12754 neon_logbits (unsigned x)
12755 {
12756 return ffs (x) - 4;
12757 }
12758
12759 #define LOW4(R) ((R) & 0xf)
12760 #define HI1(R) (((R) >> 4) & 1)
12761
12762 /* Encode insns with bit pattern:
12763
12764 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
12765 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
12766
12767 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
12768 different meaning for some instruction. */
12769
12770 static void
12771 neon_three_same (int isquad, int ubit, int size)
12772 {
12773 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12774 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12775 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12776 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12777 inst.instruction |= LOW4 (inst.operands[2].reg);
12778 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12779 inst.instruction |= (isquad != 0) << 6;
12780 inst.instruction |= (ubit != 0) << 24;
12781 if (size != -1)
12782 inst.instruction |= neon_logbits (size) << 20;
12783
12784 neon_dp_fixup (&inst);
12785 }
12786
12787 /* Encode instructions of the form:
12788
12789 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
12790 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
12791
12792 Don't write size if SIZE == -1. */
12793
12794 static void
12795 neon_two_same (int qbit, int ubit, int size)
12796 {
12797 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12798 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12799 inst.instruction |= LOW4 (inst.operands[1].reg);
12800 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12801 inst.instruction |= (qbit != 0) << 6;
12802 inst.instruction |= (ubit != 0) << 24;
12803
12804 if (size != -1)
12805 inst.instruction |= neon_logbits (size) << 18;
12806
12807 neon_dp_fixup (&inst);
12808 }
12809
12810 /* Neon instruction encoders, in approximate order of appearance. */
12811
12812 static void
12813 do_neon_dyadic_i_su (void)
12814 {
12815 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12816 struct neon_type_el et = neon_check_type (3, rs,
12817 N_EQK, N_EQK, N_SU_32 | N_KEY);
12818 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12819 }
12820
12821 static void
12822 do_neon_dyadic_i64_su (void)
12823 {
12824 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12825 struct neon_type_el et = neon_check_type (3, rs,
12826 N_EQK, N_EQK, N_SU_ALL | N_KEY);
12827 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12828 }
12829
12830 static void
12831 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
12832 unsigned immbits)
12833 {
12834 unsigned size = et.size >> 3;
12835 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12836 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12837 inst.instruction |= LOW4 (inst.operands[1].reg);
12838 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12839 inst.instruction |= (isquad != 0) << 6;
12840 inst.instruction |= immbits << 16;
12841 inst.instruction |= (size >> 3) << 7;
12842 inst.instruction |= (size & 0x7) << 19;
12843 if (write_ubit)
12844 inst.instruction |= (uval != 0) << 24;
12845
12846 neon_dp_fixup (&inst);
12847 }
12848
12849 static void
12850 do_neon_shl_imm (void)
12851 {
12852 if (!inst.operands[2].isreg)
12853 {
12854 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12855 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
12856 NEON_ENCODE (IMMED, inst);
12857 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
12858 }
12859 else
12860 {
12861 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12862 struct neon_type_el et = neon_check_type (3, rs,
12863 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
12864 unsigned int tmp;
12865
12866 /* VSHL/VQSHL 3-register variants have syntax such as:
12867 vshl.xx Dd, Dm, Dn
12868 whereas other 3-register operations encoded by neon_three_same have
12869 syntax like:
12870 vadd.xx Dd, Dn, Dm
12871 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
12872 here. */
12873 tmp = inst.operands[2].reg;
12874 inst.operands[2].reg = inst.operands[1].reg;
12875 inst.operands[1].reg = tmp;
12876 NEON_ENCODE (INTEGER, inst);
12877 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12878 }
12879 }
12880
12881 static void
12882 do_neon_qshl_imm (void)
12883 {
12884 if (!inst.operands[2].isreg)
12885 {
12886 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12887 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
12888
12889 NEON_ENCODE (IMMED, inst);
12890 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
12891 inst.operands[2].imm);
12892 }
12893 else
12894 {
12895 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12896 struct neon_type_el et = neon_check_type (3, rs,
12897 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
12898 unsigned int tmp;
12899
12900 /* See note in do_neon_shl_imm. */
12901 tmp = inst.operands[2].reg;
12902 inst.operands[2].reg = inst.operands[1].reg;
12903 inst.operands[1].reg = tmp;
12904 NEON_ENCODE (INTEGER, inst);
12905 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12906 }
12907 }
12908
12909 static void
12910 do_neon_rshl (void)
12911 {
12912 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12913 struct neon_type_el et = neon_check_type (3, rs,
12914 N_EQK, N_EQK, N_SU_ALL | N_KEY);
12915 unsigned int tmp;
12916
12917 tmp = inst.operands[2].reg;
12918 inst.operands[2].reg = inst.operands[1].reg;
12919 inst.operands[1].reg = tmp;
12920 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12921 }
12922
12923 static int
12924 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
12925 {
12926 /* Handle .I8 pseudo-instructions. */
12927 if (size == 8)
12928 {
12929 /* Unfortunately, this will make everything apart from zero out-of-range.
12930 FIXME is this the intended semantics? There doesn't seem much point in
12931 accepting .I8 if so. */
12932 immediate |= immediate << 8;
12933 size = 16;
12934 }
12935
12936 if (size >= 32)
12937 {
12938 if (immediate == (immediate & 0x000000ff))
12939 {
12940 *immbits = immediate;
12941 return 0x1;
12942 }
12943 else if (immediate == (immediate & 0x0000ff00))
12944 {
12945 *immbits = immediate >> 8;
12946 return 0x3;
12947 }
12948 else if (immediate == (immediate & 0x00ff0000))
12949 {
12950 *immbits = immediate >> 16;
12951 return 0x5;
12952 }
12953 else if (immediate == (immediate & 0xff000000))
12954 {
12955 *immbits = immediate >> 24;
12956 return 0x7;
12957 }
12958 if ((immediate & 0xffff) != (immediate >> 16))
12959 goto bad_immediate;
12960 immediate &= 0xffff;
12961 }
12962
12963 if (immediate == (immediate & 0x000000ff))
12964 {
12965 *immbits = immediate;
12966 return 0x9;
12967 }
12968 else if (immediate == (immediate & 0x0000ff00))
12969 {
12970 *immbits = immediate >> 8;
12971 return 0xb;
12972 }
12973
12974 bad_immediate:
12975 first_error (_("immediate value out of range"));
12976 return FAIL;
12977 }
12978
12979 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
12980 A, B, C, D. */
12981
12982 static int
12983 neon_bits_same_in_bytes (unsigned imm)
12984 {
12985 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
12986 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
12987 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
12988 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
12989 }
12990
12991 /* For immediate of above form, return 0bABCD. */
12992
12993 static unsigned
12994 neon_squash_bits (unsigned imm)
12995 {
12996 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
12997 | ((imm & 0x01000000) >> 21);
12998 }
12999
13000 /* Compress quarter-float representation to 0b...000 abcdefgh. */
13001
13002 static unsigned
13003 neon_qfloat_bits (unsigned imm)
13004 {
13005 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
13006 }
13007
13008 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
13009 the instruction. *OP is passed as the initial value of the op field, and
13010 may be set to a different value depending on the constant (i.e.
13011 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
13012 MVN). If the immediate looks like a repeated pattern then also
13013 try smaller element sizes. */
13014
13015 static int
13016 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
13017 unsigned *immbits, int *op, int size,
13018 enum neon_el_type type)
13019 {
13020 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
13021 float. */
13022 if (type == NT_float && !float_p)
13023 return FAIL;
13024
13025 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
13026 {
13027 if (size != 32 || *op == 1)
13028 return FAIL;
13029 *immbits = neon_qfloat_bits (immlo);
13030 return 0xf;
13031 }
13032
13033 if (size == 64)
13034 {
13035 if (neon_bits_same_in_bytes (immhi)
13036 && neon_bits_same_in_bytes (immlo))
13037 {
13038 if (*op == 1)
13039 return FAIL;
13040 *immbits = (neon_squash_bits (immhi) << 4)
13041 | neon_squash_bits (immlo);
13042 *op = 1;
13043 return 0xe;
13044 }
13045
13046 if (immhi != immlo)
13047 return FAIL;
13048 }
13049
13050 if (size >= 32)
13051 {
13052 if (immlo == (immlo & 0x000000ff))
13053 {
13054 *immbits = immlo;
13055 return 0x0;
13056 }
13057 else if (immlo == (immlo & 0x0000ff00))
13058 {
13059 *immbits = immlo >> 8;
13060 return 0x2;
13061 }
13062 else if (immlo == (immlo & 0x00ff0000))
13063 {
13064 *immbits = immlo >> 16;
13065 return 0x4;
13066 }
13067 else if (immlo == (immlo & 0xff000000))
13068 {
13069 *immbits = immlo >> 24;
13070 return 0x6;
13071 }
13072 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
13073 {
13074 *immbits = (immlo >> 8) & 0xff;
13075 return 0xc;
13076 }
13077 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
13078 {
13079 *immbits = (immlo >> 16) & 0xff;
13080 return 0xd;
13081 }
13082
13083 if ((immlo & 0xffff) != (immlo >> 16))
13084 return FAIL;
13085 immlo &= 0xffff;
13086 }
13087
13088 if (size >= 16)
13089 {
13090 if (immlo == (immlo & 0x000000ff))
13091 {
13092 *immbits = immlo;
13093 return 0x8;
13094 }
13095 else if (immlo == (immlo & 0x0000ff00))
13096 {
13097 *immbits = immlo >> 8;
13098 return 0xa;
13099 }
13100
13101 if ((immlo & 0xff) != (immlo >> 8))
13102 return FAIL;
13103 immlo &= 0xff;
13104 }
13105
13106 if (immlo == (immlo & 0x000000ff))
13107 {
13108 /* Don't allow MVN with 8-bit immediate. */
13109 if (*op == 1)
13110 return FAIL;
13111 *immbits = immlo;
13112 return 0xe;
13113 }
13114
13115 return FAIL;
13116 }
13117
13118 /* Write immediate bits [7:0] to the following locations:
13119
13120 |28/24|23 19|18 16|15 4|3 0|
13121 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
13122
13123 This function is used by VMOV/VMVN/VORR/VBIC. */
13124
13125 static void
13126 neon_write_immbits (unsigned immbits)
13127 {
13128 inst.instruction |= immbits & 0xf;
13129 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
13130 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
13131 }
13132
13133 /* Invert low-order SIZE bits of XHI:XLO. */
13134
13135 static void
13136 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
13137 {
13138 unsigned immlo = xlo ? *xlo : 0;
13139 unsigned immhi = xhi ? *xhi : 0;
13140
13141 switch (size)
13142 {
13143 case 8:
13144 immlo = (~immlo) & 0xff;
13145 break;
13146
13147 case 16:
13148 immlo = (~immlo) & 0xffff;
13149 break;
13150
13151 case 64:
13152 immhi = (~immhi) & 0xffffffff;
13153 /* fall through. */
13154
13155 case 32:
13156 immlo = (~immlo) & 0xffffffff;
13157 break;
13158
13159 default:
13160 abort ();
13161 }
13162
13163 if (xlo)
13164 *xlo = immlo;
13165
13166 if (xhi)
13167 *xhi = immhi;
13168 }
13169
13170 static void
13171 do_neon_logic (void)
13172 {
13173 if (inst.operands[2].present && inst.operands[2].isreg)
13174 {
13175 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13176 neon_check_type (3, rs, N_IGNORE_TYPE);
13177 /* U bit and size field were set as part of the bitmask. */
13178 NEON_ENCODE (INTEGER, inst);
13179 neon_three_same (neon_quad (rs), 0, -1);
13180 }
13181 else
13182 {
13183 const int three_ops_form = (inst.operands[2].present
13184 && !inst.operands[2].isreg);
13185 const int immoperand = (three_ops_form ? 2 : 1);
13186 enum neon_shape rs = (three_ops_form
13187 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
13188 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
13189 struct neon_type_el et = neon_check_type (2, rs,
13190 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
13191 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
13192 unsigned immbits;
13193 int cmode;
13194
13195 if (et.type == NT_invtype)
13196 return;
13197
13198 if (three_ops_form)
13199 constraint (inst.operands[0].reg != inst.operands[1].reg,
13200 _("first and second operands shall be the same register"));
13201
13202 NEON_ENCODE (IMMED, inst);
13203
13204 immbits = inst.operands[immoperand].imm;
13205 if (et.size == 64)
13206 {
13207 /* .i64 is a pseudo-op, so the immediate must be a repeating
13208 pattern. */
13209 if (immbits != (inst.operands[immoperand].regisimm ?
13210 inst.operands[immoperand].reg : 0))
13211 {
13212 /* Set immbits to an invalid constant. */
13213 immbits = 0xdeadbeef;
13214 }
13215 }
13216
13217 switch (opcode)
13218 {
13219 case N_MNEM_vbic:
13220 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13221 break;
13222
13223 case N_MNEM_vorr:
13224 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13225 break;
13226
13227 case N_MNEM_vand:
13228 /* Pseudo-instruction for VBIC. */
13229 neon_invert_size (&immbits, 0, et.size);
13230 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13231 break;
13232
13233 case N_MNEM_vorn:
13234 /* Pseudo-instruction for VORR. */
13235 neon_invert_size (&immbits, 0, et.size);
13236 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13237 break;
13238
13239 default:
13240 abort ();
13241 }
13242
13243 if (cmode == FAIL)
13244 return;
13245
13246 inst.instruction |= neon_quad (rs) << 6;
13247 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13248 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13249 inst.instruction |= cmode << 8;
13250 neon_write_immbits (immbits);
13251
13252 neon_dp_fixup (&inst);
13253 }
13254 }
13255
13256 static void
13257 do_neon_bitfield (void)
13258 {
13259 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13260 neon_check_type (3, rs, N_IGNORE_TYPE);
13261 neon_three_same (neon_quad (rs), 0, -1);
13262 }
13263
13264 static void
13265 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
13266 unsigned destbits)
13267 {
13268 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13269 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
13270 types | N_KEY);
13271 if (et.type == NT_float)
13272 {
13273 NEON_ENCODE (FLOAT, inst);
13274 neon_three_same (neon_quad (rs), 0, -1);
13275 }
13276 else
13277 {
13278 NEON_ENCODE (INTEGER, inst);
13279 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
13280 }
13281 }
13282
13283 static void
13284 do_neon_dyadic_if_su (void)
13285 {
13286 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
13287 }
13288
13289 static void
13290 do_neon_dyadic_if_su_d (void)
13291 {
13292 /* This version only allow D registers, but that constraint is enforced during
13293 operand parsing so we don't need to do anything extra here. */
13294 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
13295 }
13296
13297 static void
13298 do_neon_dyadic_if_i_d (void)
13299 {
13300 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13301 affected if we specify unsigned args. */
13302 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13303 }
13304
13305 enum vfp_or_neon_is_neon_bits
13306 {
13307 NEON_CHECK_CC = 1,
13308 NEON_CHECK_ARCH = 2
13309 };
13310
13311 /* Call this function if an instruction which may have belonged to the VFP or
13312 Neon instruction sets, but turned out to be a Neon instruction (due to the
13313 operand types involved, etc.). We have to check and/or fix-up a couple of
13314 things:
13315
13316 - Make sure the user hasn't attempted to make a Neon instruction
13317 conditional.
13318 - Alter the value in the condition code field if necessary.
13319 - Make sure that the arch supports Neon instructions.
13320
13321 Which of these operations take place depends on bits from enum
13322 vfp_or_neon_is_neon_bits.
13323
13324 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
13325 current instruction's condition is COND_ALWAYS, the condition field is
13326 changed to inst.uncond_value. This is necessary because instructions shared
13327 between VFP and Neon may be conditional for the VFP variants only, and the
13328 unconditional Neon version must have, e.g., 0xF in the condition field. */
13329
13330 static int
13331 vfp_or_neon_is_neon (unsigned check)
13332 {
13333 /* Conditions are always legal in Thumb mode (IT blocks). */
13334 if (!thumb_mode && (check & NEON_CHECK_CC))
13335 {
13336 if (inst.cond != COND_ALWAYS)
13337 {
13338 first_error (_(BAD_COND));
13339 return FAIL;
13340 }
13341 if (inst.uncond_value != -1)
13342 inst.instruction |= inst.uncond_value << 28;
13343 }
13344
13345 if ((check & NEON_CHECK_ARCH)
13346 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
13347 {
13348 first_error (_(BAD_FPU));
13349 return FAIL;
13350 }
13351
13352 return SUCCESS;
13353 }
13354
13355 static void
13356 do_neon_addsub_if_i (void)
13357 {
13358 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
13359 return;
13360
13361 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13362 return;
13363
13364 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13365 affected if we specify unsigned args. */
13366 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
13367 }
13368
13369 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
13370 result to be:
13371 V<op> A,B (A is operand 0, B is operand 2)
13372 to mean:
13373 V<op> A,B,A
13374 not:
13375 V<op> A,B,B
13376 so handle that case specially. */
13377
13378 static void
13379 neon_exchange_operands (void)
13380 {
13381 void *scratch = alloca (sizeof (inst.operands[0]));
13382 if (inst.operands[1].present)
13383 {
13384 /* Swap operands[1] and operands[2]. */
13385 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
13386 inst.operands[1] = inst.operands[2];
13387 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
13388 }
13389 else
13390 {
13391 inst.operands[1] = inst.operands[2];
13392 inst.operands[2] = inst.operands[0];
13393 }
13394 }
13395
13396 static void
13397 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
13398 {
13399 if (inst.operands[2].isreg)
13400 {
13401 if (invert)
13402 neon_exchange_operands ();
13403 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
13404 }
13405 else
13406 {
13407 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13408 struct neon_type_el et = neon_check_type (2, rs,
13409 N_EQK | N_SIZ, immtypes | N_KEY);
13410
13411 NEON_ENCODE (IMMED, inst);
13412 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13413 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13414 inst.instruction |= LOW4 (inst.operands[1].reg);
13415 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13416 inst.instruction |= neon_quad (rs) << 6;
13417 inst.instruction |= (et.type == NT_float) << 10;
13418 inst.instruction |= neon_logbits (et.size) << 18;
13419
13420 neon_dp_fixup (&inst);
13421 }
13422 }
13423
13424 static void
13425 do_neon_cmp (void)
13426 {
13427 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
13428 }
13429
13430 static void
13431 do_neon_cmp_inv (void)
13432 {
13433 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
13434 }
13435
13436 static void
13437 do_neon_ceq (void)
13438 {
13439 neon_compare (N_IF_32, N_IF_32, FALSE);
13440 }
13441
13442 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
13443 scalars, which are encoded in 5 bits, M : Rm.
13444 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
13445 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
13446 index in M. */
13447
13448 static unsigned
13449 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
13450 {
13451 unsigned regno = NEON_SCALAR_REG (scalar);
13452 unsigned elno = NEON_SCALAR_INDEX (scalar);
13453
13454 switch (elsize)
13455 {
13456 case 16:
13457 if (regno > 7 || elno > 3)
13458 goto bad_scalar;
13459 return regno | (elno << 3);
13460
13461 case 32:
13462 if (regno > 15 || elno > 1)
13463 goto bad_scalar;
13464 return regno | (elno << 4);
13465
13466 default:
13467 bad_scalar:
13468 first_error (_("scalar out of range for multiply instruction"));
13469 }
13470
13471 return 0;
13472 }
13473
13474 /* Encode multiply / multiply-accumulate scalar instructions. */
13475
13476 static void
13477 neon_mul_mac (struct neon_type_el et, int ubit)
13478 {
13479 unsigned scalar;
13480
13481 /* Give a more helpful error message if we have an invalid type. */
13482 if (et.type == NT_invtype)
13483 return;
13484
13485 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
13486 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13487 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13488 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13489 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13490 inst.instruction |= LOW4 (scalar);
13491 inst.instruction |= HI1 (scalar) << 5;
13492 inst.instruction |= (et.type == NT_float) << 8;
13493 inst.instruction |= neon_logbits (et.size) << 20;
13494 inst.instruction |= (ubit != 0) << 24;
13495
13496 neon_dp_fixup (&inst);
13497 }
13498
13499 static void
13500 do_neon_mac_maybe_scalar (void)
13501 {
13502 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
13503 return;
13504
13505 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13506 return;
13507
13508 if (inst.operands[2].isscalar)
13509 {
13510 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
13511 struct neon_type_el et = neon_check_type (3, rs,
13512 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
13513 NEON_ENCODE (SCALAR, inst);
13514 neon_mul_mac (et, neon_quad (rs));
13515 }
13516 else
13517 {
13518 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13519 affected if we specify unsigned args. */
13520 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13521 }
13522 }
13523
13524 static void
13525 do_neon_fmac (void)
13526 {
13527 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
13528 return;
13529
13530 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13531 return;
13532
13533 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13534 }
13535
13536 static void
13537 do_neon_tst (void)
13538 {
13539 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13540 struct neon_type_el et = neon_check_type (3, rs,
13541 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
13542 neon_three_same (neon_quad (rs), 0, et.size);
13543 }
13544
13545 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
13546 same types as the MAC equivalents. The polynomial type for this instruction
13547 is encoded the same as the integer type. */
13548
13549 static void
13550 do_neon_mul (void)
13551 {
13552 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
13553 return;
13554
13555 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13556 return;
13557
13558 if (inst.operands[2].isscalar)
13559 do_neon_mac_maybe_scalar ();
13560 else
13561 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
13562 }
13563
13564 static void
13565 do_neon_qdmulh (void)
13566 {
13567 if (inst.operands[2].isscalar)
13568 {
13569 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
13570 struct neon_type_el et = neon_check_type (3, rs,
13571 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
13572 NEON_ENCODE (SCALAR, inst);
13573 neon_mul_mac (et, neon_quad (rs));
13574 }
13575 else
13576 {
13577 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13578 struct neon_type_el et = neon_check_type (3, rs,
13579 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
13580 NEON_ENCODE (INTEGER, inst);
13581 /* The U bit (rounding) comes from bit mask. */
13582 neon_three_same (neon_quad (rs), 0, et.size);
13583 }
13584 }
13585
13586 static void
13587 do_neon_fcmp_absolute (void)
13588 {
13589 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13590 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
13591 /* Size field comes from bit mask. */
13592 neon_three_same (neon_quad (rs), 1, -1);
13593 }
13594
13595 static void
13596 do_neon_fcmp_absolute_inv (void)
13597 {
13598 neon_exchange_operands ();
13599 do_neon_fcmp_absolute ();
13600 }
13601
13602 static void
13603 do_neon_step (void)
13604 {
13605 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13606 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
13607 neon_three_same (neon_quad (rs), 0, -1);
13608 }
13609
13610 static void
13611 do_neon_abs_neg (void)
13612 {
13613 enum neon_shape rs;
13614 struct neon_type_el et;
13615
13616 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
13617 return;
13618
13619 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13620 return;
13621
13622 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13623 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
13624
13625 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13626 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13627 inst.instruction |= LOW4 (inst.operands[1].reg);
13628 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13629 inst.instruction |= neon_quad (rs) << 6;
13630 inst.instruction |= (et.type == NT_float) << 10;
13631 inst.instruction |= neon_logbits (et.size) << 18;
13632
13633 neon_dp_fixup (&inst);
13634 }
13635
13636 static void
13637 do_neon_sli (void)
13638 {
13639 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13640 struct neon_type_el et = neon_check_type (2, rs,
13641 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
13642 int imm = inst.operands[2].imm;
13643 constraint (imm < 0 || (unsigned)imm >= et.size,
13644 _("immediate out of range for insert"));
13645 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
13646 }
13647
13648 static void
13649 do_neon_sri (void)
13650 {
13651 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13652 struct neon_type_el et = neon_check_type (2, rs,
13653 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
13654 int imm = inst.operands[2].imm;
13655 constraint (imm < 1 || (unsigned)imm > et.size,
13656 _("immediate out of range for insert"));
13657 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
13658 }
13659
13660 static void
13661 do_neon_qshlu_imm (void)
13662 {
13663 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13664 struct neon_type_el et = neon_check_type (2, rs,
13665 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
13666 int imm = inst.operands[2].imm;
13667 constraint (imm < 0 || (unsigned)imm >= et.size,
13668 _("immediate out of range for shift"));
13669 /* Only encodes the 'U present' variant of the instruction.
13670 In this case, signed types have OP (bit 8) set to 0.
13671 Unsigned types have OP set to 1. */
13672 inst.instruction |= (et.type == NT_unsigned) << 8;
13673 /* The rest of the bits are the same as other immediate shifts. */
13674 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
13675 }
13676
13677 static void
13678 do_neon_qmovn (void)
13679 {
13680 struct neon_type_el et = neon_check_type (2, NS_DQ,
13681 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
13682 /* Saturating move where operands can be signed or unsigned, and the
13683 destination has the same signedness. */
13684 NEON_ENCODE (INTEGER, inst);
13685 if (et.type == NT_unsigned)
13686 inst.instruction |= 0xc0;
13687 else
13688 inst.instruction |= 0x80;
13689 neon_two_same (0, 1, et.size / 2);
13690 }
13691
13692 static void
13693 do_neon_qmovun (void)
13694 {
13695 struct neon_type_el et = neon_check_type (2, NS_DQ,
13696 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
13697 /* Saturating move with unsigned results. Operands must be signed. */
13698 NEON_ENCODE (INTEGER, inst);
13699 neon_two_same (0, 1, et.size / 2);
13700 }
13701
13702 static void
13703 do_neon_rshift_sat_narrow (void)
13704 {
13705 /* FIXME: Types for narrowing. If operands are signed, results can be signed
13706 or unsigned. If operands are unsigned, results must also be unsigned. */
13707 struct neon_type_el et = neon_check_type (2, NS_DQI,
13708 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
13709 int imm = inst.operands[2].imm;
13710 /* This gets the bounds check, size encoding and immediate bits calculation
13711 right. */
13712 et.size /= 2;
13713
13714 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
13715 VQMOVN.I<size> <Dd>, <Qm>. */
13716 if (imm == 0)
13717 {
13718 inst.operands[2].present = 0;
13719 inst.instruction = N_MNEM_vqmovn;
13720 do_neon_qmovn ();
13721 return;
13722 }
13723
13724 constraint (imm < 1 || (unsigned)imm > et.size,
13725 _("immediate out of range"));
13726 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
13727 }
13728
13729 static void
13730 do_neon_rshift_sat_narrow_u (void)
13731 {
13732 /* FIXME: Types for narrowing. If operands are signed, results can be signed
13733 or unsigned. If operands are unsigned, results must also be unsigned. */
13734 struct neon_type_el et = neon_check_type (2, NS_DQI,
13735 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
13736 int imm = inst.operands[2].imm;
13737 /* This gets the bounds check, size encoding and immediate bits calculation
13738 right. */
13739 et.size /= 2;
13740
13741 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
13742 VQMOVUN.I<size> <Dd>, <Qm>. */
13743 if (imm == 0)
13744 {
13745 inst.operands[2].present = 0;
13746 inst.instruction = N_MNEM_vqmovun;
13747 do_neon_qmovun ();
13748 return;
13749 }
13750
13751 constraint (imm < 1 || (unsigned)imm > et.size,
13752 _("immediate out of range"));
13753 /* FIXME: The manual is kind of unclear about what value U should have in
13754 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
13755 must be 1. */
13756 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
13757 }
13758
13759 static void
13760 do_neon_movn (void)
13761 {
13762 struct neon_type_el et = neon_check_type (2, NS_DQ,
13763 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
13764 NEON_ENCODE (INTEGER, inst);
13765 neon_two_same (0, 1, et.size / 2);
13766 }
13767
13768 static void
13769 do_neon_rshift_narrow (void)
13770 {
13771 struct neon_type_el et = neon_check_type (2, NS_DQI,
13772 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
13773 int imm = inst.operands[2].imm;
13774 /* This gets the bounds check, size encoding and immediate bits calculation
13775 right. */
13776 et.size /= 2;
13777
13778 /* If immediate is zero then we are a pseudo-instruction for
13779 VMOVN.I<size> <Dd>, <Qm> */
13780 if (imm == 0)
13781 {
13782 inst.operands[2].present = 0;
13783 inst.instruction = N_MNEM_vmovn;
13784 do_neon_movn ();
13785 return;
13786 }
13787
13788 constraint (imm < 1 || (unsigned)imm > et.size,
13789 _("immediate out of range for narrowing operation"));
13790 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
13791 }
13792
13793 static void
13794 do_neon_shll (void)
13795 {
13796 /* FIXME: Type checking when lengthening. */
13797 struct neon_type_el et = neon_check_type (2, NS_QDI,
13798 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
13799 unsigned imm = inst.operands[2].imm;
13800
13801 if (imm == et.size)
13802 {
13803 /* Maximum shift variant. */
13804 NEON_ENCODE (INTEGER, inst);
13805 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13806 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13807 inst.instruction |= LOW4 (inst.operands[1].reg);
13808 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13809 inst.instruction |= neon_logbits (et.size) << 18;
13810
13811 neon_dp_fixup (&inst);
13812 }
13813 else
13814 {
13815 /* A more-specific type check for non-max versions. */
13816 et = neon_check_type (2, NS_QDI,
13817 N_EQK | N_DBL, N_SU_32 | N_KEY);
13818 NEON_ENCODE (IMMED, inst);
13819 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
13820 }
13821 }
13822
13823 /* Check the various types for the VCVT instruction, and return which version
13824 the current instruction is. */
13825
13826 static int
13827 neon_cvt_flavour (enum neon_shape rs)
13828 {
13829 #define CVT_VAR(C,X,Y) \
13830 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
13831 if (et.type != NT_invtype) \
13832 { \
13833 inst.error = NULL; \
13834 return (C); \
13835 }
13836 struct neon_type_el et;
13837 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
13838 || rs == NS_FF) ? N_VFP : 0;
13839 /* The instruction versions which take an immediate take one register
13840 argument, which is extended to the width of the full register. Thus the
13841 "source" and "destination" registers must have the same width. Hack that
13842 here by making the size equal to the key (wider, in this case) operand. */
13843 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
13844
13845 CVT_VAR (0, N_S32, N_F32);
13846 CVT_VAR (1, N_U32, N_F32);
13847 CVT_VAR (2, N_F32, N_S32);
13848 CVT_VAR (3, N_F32, N_U32);
13849 /* Half-precision conversions. */
13850 CVT_VAR (4, N_F32, N_F16);
13851 CVT_VAR (5, N_F16, N_F32);
13852
13853 whole_reg = N_VFP;
13854
13855 /* VFP instructions. */
13856 CVT_VAR (6, N_F32, N_F64);
13857 CVT_VAR (7, N_F64, N_F32);
13858 CVT_VAR (8, N_S32, N_F64 | key);
13859 CVT_VAR (9, N_U32, N_F64 | key);
13860 CVT_VAR (10, N_F64 | key, N_S32);
13861 CVT_VAR (11, N_F64 | key, N_U32);
13862 /* VFP instructions with bitshift. */
13863 CVT_VAR (12, N_F32 | key, N_S16);
13864 CVT_VAR (13, N_F32 | key, N_U16);
13865 CVT_VAR (14, N_F64 | key, N_S16);
13866 CVT_VAR (15, N_F64 | key, N_U16);
13867 CVT_VAR (16, N_S16, N_F32 | key);
13868 CVT_VAR (17, N_U16, N_F32 | key);
13869 CVT_VAR (18, N_S16, N_F64 | key);
13870 CVT_VAR (19, N_U16, N_F64 | key);
13871
13872 return -1;
13873 #undef CVT_VAR
13874 }
13875
13876 /* Neon-syntax VFP conversions. */
13877
13878 static void
13879 do_vfp_nsyn_cvt (enum neon_shape rs, int flavour)
13880 {
13881 const char *opname = 0;
13882
13883 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
13884 {
13885 /* Conversions with immediate bitshift. */
13886 const char *enc[] =
13887 {
13888 "ftosls",
13889 "ftouls",
13890 "fsltos",
13891 "fultos",
13892 NULL,
13893 NULL,
13894 NULL,
13895 NULL,
13896 "ftosld",
13897 "ftould",
13898 "fsltod",
13899 "fultod",
13900 "fshtos",
13901 "fuhtos",
13902 "fshtod",
13903 "fuhtod",
13904 "ftoshs",
13905 "ftouhs",
13906 "ftoshd",
13907 "ftouhd"
13908 };
13909
13910 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
13911 {
13912 opname = enc[flavour];
13913 constraint (inst.operands[0].reg != inst.operands[1].reg,
13914 _("operands 0 and 1 must be the same register"));
13915 inst.operands[1] = inst.operands[2];
13916 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
13917 }
13918 }
13919 else
13920 {
13921 /* Conversions without bitshift. */
13922 const char *enc[] =
13923 {
13924 "ftosis",
13925 "ftouis",
13926 "fsitos",
13927 "fuitos",
13928 "NULL",
13929 "NULL",
13930 "fcvtsd",
13931 "fcvtds",
13932 "ftosid",
13933 "ftouid",
13934 "fsitod",
13935 "fuitod"
13936 };
13937
13938 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
13939 opname = enc[flavour];
13940 }
13941
13942 if (opname)
13943 do_vfp_nsyn_opcode (opname);
13944 }
13945
13946 static void
13947 do_vfp_nsyn_cvtz (void)
13948 {
13949 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
13950 int flavour = neon_cvt_flavour (rs);
13951 const char *enc[] =
13952 {
13953 "ftosizs",
13954 "ftouizs",
13955 NULL,
13956 NULL,
13957 NULL,
13958 NULL,
13959 NULL,
13960 NULL,
13961 "ftosizd",
13962 "ftouizd"
13963 };
13964
13965 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
13966 do_vfp_nsyn_opcode (enc[flavour]);
13967 }
13968
13969 static void
13970 do_neon_cvt_1 (bfd_boolean round_to_zero ATTRIBUTE_UNUSED)
13971 {
13972 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
13973 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
13974 int flavour = neon_cvt_flavour (rs);
13975
13976 /* PR11109: Handle round-to-zero for VCVT conversions. */
13977 if (round_to_zero
13978 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
13979 && (flavour == 0 || flavour == 1 || flavour == 8 || flavour == 9)
13980 && (rs == NS_FD || rs == NS_FF))
13981 {
13982 do_vfp_nsyn_cvtz ();
13983 return;
13984 }
13985
13986 /* VFP rather than Neon conversions. */
13987 if (flavour >= 6)
13988 {
13989 do_vfp_nsyn_cvt (rs, flavour);
13990 return;
13991 }
13992
13993 switch (rs)
13994 {
13995 case NS_DDI:
13996 case NS_QQI:
13997 {
13998 unsigned immbits;
13999 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
14000
14001 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14002 return;
14003
14004 /* Fixed-point conversion with #0 immediate is encoded as an
14005 integer conversion. */
14006 if (inst.operands[2].present && inst.operands[2].imm == 0)
14007 goto int_encode;
14008 immbits = 32 - inst.operands[2].imm;
14009 NEON_ENCODE (IMMED, inst);
14010 if (flavour != -1)
14011 inst.instruction |= enctab[flavour];
14012 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14013 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14014 inst.instruction |= LOW4 (inst.operands[1].reg);
14015 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14016 inst.instruction |= neon_quad (rs) << 6;
14017 inst.instruction |= 1 << 21;
14018 inst.instruction |= immbits << 16;
14019
14020 neon_dp_fixup (&inst);
14021 }
14022 break;
14023
14024 case NS_DD:
14025 case NS_QQ:
14026 int_encode:
14027 {
14028 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
14029
14030 NEON_ENCODE (INTEGER, inst);
14031
14032 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14033 return;
14034
14035 if (flavour != -1)
14036 inst.instruction |= enctab[flavour];
14037
14038 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14039 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14040 inst.instruction |= LOW4 (inst.operands[1].reg);
14041 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14042 inst.instruction |= neon_quad (rs) << 6;
14043 inst.instruction |= 2 << 18;
14044
14045 neon_dp_fixup (&inst);
14046 }
14047 break;
14048
14049 /* Half-precision conversions for Advanced SIMD -- neon. */
14050 case NS_QD:
14051 case NS_DQ:
14052
14053 if ((rs == NS_DQ)
14054 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
14055 {
14056 as_bad (_("operand size must match register width"));
14057 break;
14058 }
14059
14060 if ((rs == NS_QD)
14061 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
14062 {
14063 as_bad (_("operand size must match register width"));
14064 break;
14065 }
14066
14067 if (rs == NS_DQ)
14068 inst.instruction = 0x3b60600;
14069 else
14070 inst.instruction = 0x3b60700;
14071
14072 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14073 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14074 inst.instruction |= LOW4 (inst.operands[1].reg);
14075 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14076 neon_dp_fixup (&inst);
14077 break;
14078
14079 default:
14080 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
14081 do_vfp_nsyn_cvt (rs, flavour);
14082 }
14083 }
14084
14085 static void
14086 do_neon_cvtr (void)
14087 {
14088 do_neon_cvt_1 (FALSE);
14089 }
14090
14091 static void
14092 do_neon_cvt (void)
14093 {
14094 do_neon_cvt_1 (TRUE);
14095 }
14096
14097 static void
14098 do_neon_cvtb (void)
14099 {
14100 inst.instruction = 0xeb20a40;
14101
14102 /* The sizes are attached to the mnemonic. */
14103 if (inst.vectype.el[0].type != NT_invtype
14104 && inst.vectype.el[0].size == 16)
14105 inst.instruction |= 0x00010000;
14106
14107 /* Programmer's syntax: the sizes are attached to the operands. */
14108 else if (inst.operands[0].vectype.type != NT_invtype
14109 && inst.operands[0].vectype.size == 16)
14110 inst.instruction |= 0x00010000;
14111
14112 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
14113 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
14114 do_vfp_cond_or_thumb ();
14115 }
14116
14117
14118 static void
14119 do_neon_cvtt (void)
14120 {
14121 do_neon_cvtb ();
14122 inst.instruction |= 0x80;
14123 }
14124
14125 static void
14126 neon_move_immediate (void)
14127 {
14128 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
14129 struct neon_type_el et = neon_check_type (2, rs,
14130 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14131 unsigned immlo, immhi = 0, immbits;
14132 int op, cmode, float_p;
14133
14134 constraint (et.type == NT_invtype,
14135 _("operand size must be specified for immediate VMOV"));
14136
14137 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
14138 op = (inst.instruction & (1 << 5)) != 0;
14139
14140 immlo = inst.operands[1].imm;
14141 if (inst.operands[1].regisimm)
14142 immhi = inst.operands[1].reg;
14143
14144 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
14145 _("immediate has bits set outside the operand size"));
14146
14147 float_p = inst.operands[1].immisfloat;
14148
14149 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
14150 et.size, et.type)) == FAIL)
14151 {
14152 /* Invert relevant bits only. */
14153 neon_invert_size (&immlo, &immhi, et.size);
14154 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
14155 with one or the other; those cases are caught by
14156 neon_cmode_for_move_imm. */
14157 op = !op;
14158 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
14159 &op, et.size, et.type)) == FAIL)
14160 {
14161 first_error (_("immediate out of range"));
14162 return;
14163 }
14164 }
14165
14166 inst.instruction &= ~(1 << 5);
14167 inst.instruction |= op << 5;
14168
14169 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14170 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14171 inst.instruction |= neon_quad (rs) << 6;
14172 inst.instruction |= cmode << 8;
14173
14174 neon_write_immbits (immbits);
14175 }
14176
14177 static void
14178 do_neon_mvn (void)
14179 {
14180 if (inst.operands[1].isreg)
14181 {
14182 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14183
14184 NEON_ENCODE (INTEGER, inst);
14185 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14186 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14187 inst.instruction |= LOW4 (inst.operands[1].reg);
14188 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14189 inst.instruction |= neon_quad (rs) << 6;
14190 }
14191 else
14192 {
14193 NEON_ENCODE (IMMED, inst);
14194 neon_move_immediate ();
14195 }
14196
14197 neon_dp_fixup (&inst);
14198 }
14199
14200 /* Encode instructions of form:
14201
14202 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14203 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
14204
14205 static void
14206 neon_mixed_length (struct neon_type_el et, unsigned size)
14207 {
14208 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14209 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14210 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14211 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14212 inst.instruction |= LOW4 (inst.operands[2].reg);
14213 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14214 inst.instruction |= (et.type == NT_unsigned) << 24;
14215 inst.instruction |= neon_logbits (size) << 20;
14216
14217 neon_dp_fixup (&inst);
14218 }
14219
14220 static void
14221 do_neon_dyadic_long (void)
14222 {
14223 /* FIXME: Type checking for lengthening op. */
14224 struct neon_type_el et = neon_check_type (3, NS_QDD,
14225 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
14226 neon_mixed_length (et, et.size);
14227 }
14228
14229 static void
14230 do_neon_abal (void)
14231 {
14232 struct neon_type_el et = neon_check_type (3, NS_QDD,
14233 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
14234 neon_mixed_length (et, et.size);
14235 }
14236
14237 static void
14238 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
14239 {
14240 if (inst.operands[2].isscalar)
14241 {
14242 struct neon_type_el et = neon_check_type (3, NS_QDS,
14243 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
14244 NEON_ENCODE (SCALAR, inst);
14245 neon_mul_mac (et, et.type == NT_unsigned);
14246 }
14247 else
14248 {
14249 struct neon_type_el et = neon_check_type (3, NS_QDD,
14250 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
14251 NEON_ENCODE (INTEGER, inst);
14252 neon_mixed_length (et, et.size);
14253 }
14254 }
14255
14256 static void
14257 do_neon_mac_maybe_scalar_long (void)
14258 {
14259 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
14260 }
14261
14262 static void
14263 do_neon_dyadic_wide (void)
14264 {
14265 struct neon_type_el et = neon_check_type (3, NS_QQD,
14266 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
14267 neon_mixed_length (et, et.size);
14268 }
14269
14270 static void
14271 do_neon_dyadic_narrow (void)
14272 {
14273 struct neon_type_el et = neon_check_type (3, NS_QDD,
14274 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
14275 /* Operand sign is unimportant, and the U bit is part of the opcode,
14276 so force the operand type to integer. */
14277 et.type = NT_integer;
14278 neon_mixed_length (et, et.size / 2);
14279 }
14280
14281 static void
14282 do_neon_mul_sat_scalar_long (void)
14283 {
14284 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
14285 }
14286
14287 static void
14288 do_neon_vmull (void)
14289 {
14290 if (inst.operands[2].isscalar)
14291 do_neon_mac_maybe_scalar_long ();
14292 else
14293 {
14294 struct neon_type_el et = neon_check_type (3, NS_QDD,
14295 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
14296 if (et.type == NT_poly)
14297 NEON_ENCODE (POLY, inst);
14298 else
14299 NEON_ENCODE (INTEGER, inst);
14300 /* For polynomial encoding, size field must be 0b00 and the U bit must be
14301 zero. Should be OK as-is. */
14302 neon_mixed_length (et, et.size);
14303 }
14304 }
14305
14306 static void
14307 do_neon_ext (void)
14308 {
14309 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
14310 struct neon_type_el et = neon_check_type (3, rs,
14311 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14312 unsigned imm = (inst.operands[3].imm * et.size) / 8;
14313
14314 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
14315 _("shift out of range"));
14316 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14317 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14318 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14319 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14320 inst.instruction |= LOW4 (inst.operands[2].reg);
14321 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14322 inst.instruction |= neon_quad (rs) << 6;
14323 inst.instruction |= imm << 8;
14324
14325 neon_dp_fixup (&inst);
14326 }
14327
14328 static void
14329 do_neon_rev (void)
14330 {
14331 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14332 struct neon_type_el et = neon_check_type (2, rs,
14333 N_EQK, N_8 | N_16 | N_32 | N_KEY);
14334 unsigned op = (inst.instruction >> 7) & 3;
14335 /* N (width of reversed regions) is encoded as part of the bitmask. We
14336 extract it here to check the elements to be reversed are smaller.
14337 Otherwise we'd get a reserved instruction. */
14338 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
14339 gas_assert (elsize != 0);
14340 constraint (et.size >= elsize,
14341 _("elements must be smaller than reversal region"));
14342 neon_two_same (neon_quad (rs), 1, et.size);
14343 }
14344
14345 static void
14346 do_neon_dup (void)
14347 {
14348 if (inst.operands[1].isscalar)
14349 {
14350 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
14351 struct neon_type_el et = neon_check_type (2, rs,
14352 N_EQK, N_8 | N_16 | N_32 | N_KEY);
14353 unsigned sizebits = et.size >> 3;
14354 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
14355 int logsize = neon_logbits (et.size);
14356 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
14357
14358 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
14359 return;
14360
14361 NEON_ENCODE (SCALAR, inst);
14362 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14363 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14364 inst.instruction |= LOW4 (dm);
14365 inst.instruction |= HI1 (dm) << 5;
14366 inst.instruction |= neon_quad (rs) << 6;
14367 inst.instruction |= x << 17;
14368 inst.instruction |= sizebits << 16;
14369
14370 neon_dp_fixup (&inst);
14371 }
14372 else
14373 {
14374 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
14375 struct neon_type_el et = neon_check_type (2, rs,
14376 N_8 | N_16 | N_32 | N_KEY, N_EQK);
14377 /* Duplicate ARM register to lanes of vector. */
14378 NEON_ENCODE (ARMREG, inst);
14379 switch (et.size)
14380 {
14381 case 8: inst.instruction |= 0x400000; break;
14382 case 16: inst.instruction |= 0x000020; break;
14383 case 32: inst.instruction |= 0x000000; break;
14384 default: break;
14385 }
14386 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
14387 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
14388 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
14389 inst.instruction |= neon_quad (rs) << 21;
14390 /* The encoding for this instruction is identical for the ARM and Thumb
14391 variants, except for the condition field. */
14392 do_vfp_cond_or_thumb ();
14393 }
14394 }
14395
14396 /* VMOV has particularly many variations. It can be one of:
14397 0. VMOV<c><q> <Qd>, <Qm>
14398 1. VMOV<c><q> <Dd>, <Dm>
14399 (Register operations, which are VORR with Rm = Rn.)
14400 2. VMOV<c><q>.<dt> <Qd>, #<imm>
14401 3. VMOV<c><q>.<dt> <Dd>, #<imm>
14402 (Immediate loads.)
14403 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
14404 (ARM register to scalar.)
14405 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
14406 (Two ARM registers to vector.)
14407 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
14408 (Scalar to ARM register.)
14409 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
14410 (Vector to two ARM registers.)
14411 8. VMOV.F32 <Sd>, <Sm>
14412 9. VMOV.F64 <Dd>, <Dm>
14413 (VFP register moves.)
14414 10. VMOV.F32 <Sd>, #imm
14415 11. VMOV.F64 <Dd>, #imm
14416 (VFP float immediate load.)
14417 12. VMOV <Rd>, <Sm>
14418 (VFP single to ARM reg.)
14419 13. VMOV <Sd>, <Rm>
14420 (ARM reg to VFP single.)
14421 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
14422 (Two ARM regs to two VFP singles.)
14423 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
14424 (Two VFP singles to two ARM regs.)
14425
14426 These cases can be disambiguated using neon_select_shape, except cases 1/9
14427 and 3/11 which depend on the operand type too.
14428
14429 All the encoded bits are hardcoded by this function.
14430
14431 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
14432 Cases 5, 7 may be used with VFPv2 and above.
14433
14434 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
14435 can specify a type where it doesn't make sense to, and is ignored). */
14436
14437 static void
14438 do_neon_mov (void)
14439 {
14440 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
14441 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
14442 NS_NULL);
14443 struct neon_type_el et;
14444 const char *ldconst = 0;
14445
14446 switch (rs)
14447 {
14448 case NS_DD: /* case 1/9. */
14449 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
14450 /* It is not an error here if no type is given. */
14451 inst.error = NULL;
14452 if (et.type == NT_float && et.size == 64)
14453 {
14454 do_vfp_nsyn_opcode ("fcpyd");
14455 break;
14456 }
14457 /* fall through. */
14458
14459 case NS_QQ: /* case 0/1. */
14460 {
14461 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14462 return;
14463 /* The architecture manual I have doesn't explicitly state which
14464 value the U bit should have for register->register moves, but
14465 the equivalent VORR instruction has U = 0, so do that. */
14466 inst.instruction = 0x0200110;
14467 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14468 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14469 inst.instruction |= LOW4 (inst.operands[1].reg);
14470 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14471 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14472 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14473 inst.instruction |= neon_quad (rs) << 6;
14474
14475 neon_dp_fixup (&inst);
14476 }
14477 break;
14478
14479 case NS_DI: /* case 3/11. */
14480 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
14481 inst.error = NULL;
14482 if (et.type == NT_float && et.size == 64)
14483 {
14484 /* case 11 (fconstd). */
14485 ldconst = "fconstd";
14486 goto encode_fconstd;
14487 }
14488 /* fall through. */
14489
14490 case NS_QI: /* case 2/3. */
14491 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14492 return;
14493 inst.instruction = 0x0800010;
14494 neon_move_immediate ();
14495 neon_dp_fixup (&inst);
14496 break;
14497
14498 case NS_SR: /* case 4. */
14499 {
14500 unsigned bcdebits = 0;
14501 int logsize;
14502 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
14503 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
14504
14505 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
14506 logsize = neon_logbits (et.size);
14507
14508 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
14509 _(BAD_FPU));
14510 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
14511 && et.size != 32, _(BAD_FPU));
14512 constraint (et.type == NT_invtype, _("bad type for scalar"));
14513 constraint (x >= 64 / et.size, _("scalar index out of range"));
14514
14515 switch (et.size)
14516 {
14517 case 8: bcdebits = 0x8; break;
14518 case 16: bcdebits = 0x1; break;
14519 case 32: bcdebits = 0x0; break;
14520 default: ;
14521 }
14522
14523 bcdebits |= x << logsize;
14524
14525 inst.instruction = 0xe000b10;
14526 do_vfp_cond_or_thumb ();
14527 inst.instruction |= LOW4 (dn) << 16;
14528 inst.instruction |= HI1 (dn) << 7;
14529 inst.instruction |= inst.operands[1].reg << 12;
14530 inst.instruction |= (bcdebits & 3) << 5;
14531 inst.instruction |= (bcdebits >> 2) << 21;
14532 }
14533 break;
14534
14535 case NS_DRR: /* case 5 (fmdrr). */
14536 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
14537 _(BAD_FPU));
14538
14539 inst.instruction = 0xc400b10;
14540 do_vfp_cond_or_thumb ();
14541 inst.instruction |= LOW4 (inst.operands[0].reg);
14542 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
14543 inst.instruction |= inst.operands[1].reg << 12;
14544 inst.instruction |= inst.operands[2].reg << 16;
14545 break;
14546
14547 case NS_RS: /* case 6. */
14548 {
14549 unsigned logsize;
14550 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
14551 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
14552 unsigned abcdebits = 0;
14553
14554 et = neon_check_type (2, NS_NULL,
14555 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
14556 logsize = neon_logbits (et.size);
14557
14558 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
14559 _(BAD_FPU));
14560 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
14561 && et.size != 32, _(BAD_FPU));
14562 constraint (et.type == NT_invtype, _("bad type for scalar"));
14563 constraint (x >= 64 / et.size, _("scalar index out of range"));
14564
14565 switch (et.size)
14566 {
14567 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
14568 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
14569 case 32: abcdebits = 0x00; break;
14570 default: ;
14571 }
14572
14573 abcdebits |= x << logsize;
14574 inst.instruction = 0xe100b10;
14575 do_vfp_cond_or_thumb ();
14576 inst.instruction |= LOW4 (dn) << 16;
14577 inst.instruction |= HI1 (dn) << 7;
14578 inst.instruction |= inst.operands[0].reg << 12;
14579 inst.instruction |= (abcdebits & 3) << 5;
14580 inst.instruction |= (abcdebits >> 2) << 21;
14581 }
14582 break;
14583
14584 case NS_RRD: /* case 7 (fmrrd). */
14585 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
14586 _(BAD_FPU));
14587
14588 inst.instruction = 0xc500b10;
14589 do_vfp_cond_or_thumb ();
14590 inst.instruction |= inst.operands[0].reg << 12;
14591 inst.instruction |= inst.operands[1].reg << 16;
14592 inst.instruction |= LOW4 (inst.operands[2].reg);
14593 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14594 break;
14595
14596 case NS_FF: /* case 8 (fcpys). */
14597 do_vfp_nsyn_opcode ("fcpys");
14598 break;
14599
14600 case NS_FI: /* case 10 (fconsts). */
14601 ldconst = "fconsts";
14602 encode_fconstd:
14603 if (is_quarter_float (inst.operands[1].imm))
14604 {
14605 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
14606 do_vfp_nsyn_opcode (ldconst);
14607 }
14608 else
14609 first_error (_("immediate out of range"));
14610 break;
14611
14612 case NS_RF: /* case 12 (fmrs). */
14613 do_vfp_nsyn_opcode ("fmrs");
14614 break;
14615
14616 case NS_FR: /* case 13 (fmsr). */
14617 do_vfp_nsyn_opcode ("fmsr");
14618 break;
14619
14620 /* The encoders for the fmrrs and fmsrr instructions expect three operands
14621 (one of which is a list), but we have parsed four. Do some fiddling to
14622 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
14623 expect. */
14624 case NS_RRFF: /* case 14 (fmrrs). */
14625 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
14626 _("VFP registers must be adjacent"));
14627 inst.operands[2].imm = 2;
14628 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
14629 do_vfp_nsyn_opcode ("fmrrs");
14630 break;
14631
14632 case NS_FFRR: /* case 15 (fmsrr). */
14633 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
14634 _("VFP registers must be adjacent"));
14635 inst.operands[1] = inst.operands[2];
14636 inst.operands[2] = inst.operands[3];
14637 inst.operands[0].imm = 2;
14638 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
14639 do_vfp_nsyn_opcode ("fmsrr");
14640 break;
14641
14642 default:
14643 abort ();
14644 }
14645 }
14646
14647 static void
14648 do_neon_rshift_round_imm (void)
14649 {
14650 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14651 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14652 int imm = inst.operands[2].imm;
14653
14654 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
14655 if (imm == 0)
14656 {
14657 inst.operands[2].present = 0;
14658 do_neon_mov ();
14659 return;
14660 }
14661
14662 constraint (imm < 1 || (unsigned)imm > et.size,
14663 _("immediate out of range for shift"));
14664 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
14665 et.size - imm);
14666 }
14667
14668 static void
14669 do_neon_movl (void)
14670 {
14671 struct neon_type_el et = neon_check_type (2, NS_QD,
14672 N_EQK | N_DBL, N_SU_32 | N_KEY);
14673 unsigned sizebits = et.size >> 3;
14674 inst.instruction |= sizebits << 19;
14675 neon_two_same (0, et.type == NT_unsigned, -1);
14676 }
14677
14678 static void
14679 do_neon_trn (void)
14680 {
14681 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14682 struct neon_type_el et = neon_check_type (2, rs,
14683 N_EQK, N_8 | N_16 | N_32 | N_KEY);
14684 NEON_ENCODE (INTEGER, inst);
14685 neon_two_same (neon_quad (rs), 1, et.size);
14686 }
14687
14688 static void
14689 do_neon_zip_uzp (void)
14690 {
14691 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14692 struct neon_type_el et = neon_check_type (2, rs,
14693 N_EQK, N_8 | N_16 | N_32 | N_KEY);
14694 if (rs == NS_DD && et.size == 32)
14695 {
14696 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
14697 inst.instruction = N_MNEM_vtrn;
14698 do_neon_trn ();
14699 return;
14700 }
14701 neon_two_same (neon_quad (rs), 1, et.size);
14702 }
14703
14704 static void
14705 do_neon_sat_abs_neg (void)
14706 {
14707 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14708 struct neon_type_el et = neon_check_type (2, rs,
14709 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
14710 neon_two_same (neon_quad (rs), 1, et.size);
14711 }
14712
14713 static void
14714 do_neon_pair_long (void)
14715 {
14716 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14717 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
14718 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
14719 inst.instruction |= (et.type == NT_unsigned) << 7;
14720 neon_two_same (neon_quad (rs), 1, et.size);
14721 }
14722
14723 static void
14724 do_neon_recip_est (void)
14725 {
14726 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14727 struct neon_type_el et = neon_check_type (2, rs,
14728 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
14729 inst.instruction |= (et.type == NT_float) << 8;
14730 neon_two_same (neon_quad (rs), 1, et.size);
14731 }
14732
14733 static void
14734 do_neon_cls (void)
14735 {
14736 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14737 struct neon_type_el et = neon_check_type (2, rs,
14738 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
14739 neon_two_same (neon_quad (rs), 1, et.size);
14740 }
14741
14742 static void
14743 do_neon_clz (void)
14744 {
14745 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14746 struct neon_type_el et = neon_check_type (2, rs,
14747 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
14748 neon_two_same (neon_quad (rs), 1, et.size);
14749 }
14750
14751 static void
14752 do_neon_cnt (void)
14753 {
14754 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14755 struct neon_type_el et = neon_check_type (2, rs,
14756 N_EQK | N_INT, N_8 | N_KEY);
14757 neon_two_same (neon_quad (rs), 1, et.size);
14758 }
14759
14760 static void
14761 do_neon_swp (void)
14762 {
14763 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14764 neon_two_same (neon_quad (rs), 1, -1);
14765 }
14766
14767 static void
14768 do_neon_tbl_tbx (void)
14769 {
14770 unsigned listlenbits;
14771 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
14772
14773 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
14774 {
14775 first_error (_("bad list length for table lookup"));
14776 return;
14777 }
14778
14779 listlenbits = inst.operands[1].imm - 1;
14780 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14781 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14782 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14783 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14784 inst.instruction |= LOW4 (inst.operands[2].reg);
14785 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14786 inst.instruction |= listlenbits << 8;
14787
14788 neon_dp_fixup (&inst);
14789 }
14790
14791 static void
14792 do_neon_ldm_stm (void)
14793 {
14794 /* P, U and L bits are part of bitmask. */
14795 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
14796 unsigned offsetbits = inst.operands[1].imm * 2;
14797
14798 if (inst.operands[1].issingle)
14799 {
14800 do_vfp_nsyn_ldm_stm (is_dbmode);
14801 return;
14802 }
14803
14804 constraint (is_dbmode && !inst.operands[0].writeback,
14805 _("writeback (!) must be used for VLDMDB and VSTMDB"));
14806
14807 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14808 _("register list must contain at least 1 and at most 16 "
14809 "registers"));
14810
14811 inst.instruction |= inst.operands[0].reg << 16;
14812 inst.instruction |= inst.operands[0].writeback << 21;
14813 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
14814 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
14815
14816 inst.instruction |= offsetbits;
14817
14818 do_vfp_cond_or_thumb ();
14819 }
14820
14821 static void
14822 do_neon_ldr_str (void)
14823 {
14824 int is_ldr = (inst.instruction & (1 << 20)) != 0;
14825
14826 if (inst.operands[0].issingle)
14827 {
14828 if (is_ldr)
14829 do_vfp_nsyn_opcode ("flds");
14830 else
14831 do_vfp_nsyn_opcode ("fsts");
14832 }
14833 else
14834 {
14835 if (is_ldr)
14836 do_vfp_nsyn_opcode ("fldd");
14837 else
14838 do_vfp_nsyn_opcode ("fstd");
14839 }
14840 }
14841
14842 /* "interleave" version also handles non-interleaving register VLD1/VST1
14843 instructions. */
14844
14845 static void
14846 do_neon_ld_st_interleave (void)
14847 {
14848 struct neon_type_el et = neon_check_type (1, NS_NULL,
14849 N_8 | N_16 | N_32 | N_64);
14850 unsigned alignbits = 0;
14851 unsigned idx;
14852 /* The bits in this table go:
14853 0: register stride of one (0) or two (1)
14854 1,2: register list length, minus one (1, 2, 3, 4).
14855 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
14856 We use -1 for invalid entries. */
14857 const int typetable[] =
14858 {
14859 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
14860 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
14861 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
14862 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
14863 };
14864 int typebits;
14865
14866 if (et.type == NT_invtype)
14867 return;
14868
14869 if (inst.operands[1].immisalign)
14870 switch (inst.operands[1].imm >> 8)
14871 {
14872 case 64: alignbits = 1; break;
14873 case 128:
14874 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
14875 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
14876 goto bad_alignment;
14877 alignbits = 2;
14878 break;
14879 case 256:
14880 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
14881 goto bad_alignment;
14882 alignbits = 3;
14883 break;
14884 default:
14885 bad_alignment:
14886 first_error (_("bad alignment"));
14887 return;
14888 }
14889
14890 inst.instruction |= alignbits << 4;
14891 inst.instruction |= neon_logbits (et.size) << 6;
14892
14893 /* Bits [4:6] of the immediate in a list specifier encode register stride
14894 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
14895 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
14896 up the right value for "type" in a table based on this value and the given
14897 list style, then stick it back. */
14898 idx = ((inst.operands[0].imm >> 4) & 7)
14899 | (((inst.instruction >> 8) & 3) << 3);
14900
14901 typebits = typetable[idx];
14902
14903 constraint (typebits == -1, _("bad list type for instruction"));
14904
14905 inst.instruction &= ~0xf00;
14906 inst.instruction |= typebits << 8;
14907 }
14908
14909 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
14910 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
14911 otherwise. The variable arguments are a list of pairs of legal (size, align)
14912 values, terminated with -1. */
14913
14914 static int
14915 neon_alignment_bit (int size, int align, int *do_align, ...)
14916 {
14917 va_list ap;
14918 int result = FAIL, thissize, thisalign;
14919
14920 if (!inst.operands[1].immisalign)
14921 {
14922 *do_align = 0;
14923 return SUCCESS;
14924 }
14925
14926 va_start (ap, do_align);
14927
14928 do
14929 {
14930 thissize = va_arg (ap, int);
14931 if (thissize == -1)
14932 break;
14933 thisalign = va_arg (ap, int);
14934
14935 if (size == thissize && align == thisalign)
14936 result = SUCCESS;
14937 }
14938 while (result != SUCCESS);
14939
14940 va_end (ap);
14941
14942 if (result == SUCCESS)
14943 *do_align = 1;
14944 else
14945 first_error (_("unsupported alignment for instruction"));
14946
14947 return result;
14948 }
14949
14950 static void
14951 do_neon_ld_st_lane (void)
14952 {
14953 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
14954 int align_good, do_align = 0;
14955 int logsize = neon_logbits (et.size);
14956 int align = inst.operands[1].imm >> 8;
14957 int n = (inst.instruction >> 8) & 3;
14958 int max_el = 64 / et.size;
14959
14960 if (et.type == NT_invtype)
14961 return;
14962
14963 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
14964 _("bad list length"));
14965 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
14966 _("scalar index out of range"));
14967 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
14968 && et.size == 8,
14969 _("stride of 2 unavailable when element size is 8"));
14970
14971 switch (n)
14972 {
14973 case 0: /* VLD1 / VST1. */
14974 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
14975 32, 32, -1);
14976 if (align_good == FAIL)
14977 return;
14978 if (do_align)
14979 {
14980 unsigned alignbits = 0;
14981 switch (et.size)
14982 {
14983 case 16: alignbits = 0x1; break;
14984 case 32: alignbits = 0x3; break;
14985 default: ;
14986 }
14987 inst.instruction |= alignbits << 4;
14988 }
14989 break;
14990
14991 case 1: /* VLD2 / VST2. */
14992 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
14993 32, 64, -1);
14994 if (align_good == FAIL)
14995 return;
14996 if (do_align)
14997 inst.instruction |= 1 << 4;
14998 break;
14999
15000 case 2: /* VLD3 / VST3. */
15001 constraint (inst.operands[1].immisalign,
15002 _("can't use alignment with this instruction"));
15003 break;
15004
15005 case 3: /* VLD4 / VST4. */
15006 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
15007 16, 64, 32, 64, 32, 128, -1);
15008 if (align_good == FAIL)
15009 return;
15010 if (do_align)
15011 {
15012 unsigned alignbits = 0;
15013 switch (et.size)
15014 {
15015 case 8: alignbits = 0x1; break;
15016 case 16: alignbits = 0x1; break;
15017 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
15018 default: ;
15019 }
15020 inst.instruction |= alignbits << 4;
15021 }
15022 break;
15023
15024 default: ;
15025 }
15026
15027 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
15028 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15029 inst.instruction |= 1 << (4 + logsize);
15030
15031 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
15032 inst.instruction |= logsize << 10;
15033 }
15034
15035 /* Encode single n-element structure to all lanes VLD<n> instructions. */
15036
15037 static void
15038 do_neon_ld_dup (void)
15039 {
15040 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
15041 int align_good, do_align = 0;
15042
15043 if (et.type == NT_invtype)
15044 return;
15045
15046 switch ((inst.instruction >> 8) & 3)
15047 {
15048 case 0: /* VLD1. */
15049 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
15050 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
15051 &do_align, 16, 16, 32, 32, -1);
15052 if (align_good == FAIL)
15053 return;
15054 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
15055 {
15056 case 1: break;
15057 case 2: inst.instruction |= 1 << 5; break;
15058 default: first_error (_("bad list length")); return;
15059 }
15060 inst.instruction |= neon_logbits (et.size) << 6;
15061 break;
15062
15063 case 1: /* VLD2. */
15064 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
15065 &do_align, 8, 16, 16, 32, 32, 64, -1);
15066 if (align_good == FAIL)
15067 return;
15068 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
15069 _("bad list length"));
15070 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15071 inst.instruction |= 1 << 5;
15072 inst.instruction |= neon_logbits (et.size) << 6;
15073 break;
15074
15075 case 2: /* VLD3. */
15076 constraint (inst.operands[1].immisalign,
15077 _("can't use alignment with this instruction"));
15078 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
15079 _("bad list length"));
15080 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15081 inst.instruction |= 1 << 5;
15082 inst.instruction |= neon_logbits (et.size) << 6;
15083 break;
15084
15085 case 3: /* VLD4. */
15086 {
15087 int align = inst.operands[1].imm >> 8;
15088 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
15089 16, 64, 32, 64, 32, 128, -1);
15090 if (align_good == FAIL)
15091 return;
15092 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
15093 _("bad list length"));
15094 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15095 inst.instruction |= 1 << 5;
15096 if (et.size == 32 && align == 128)
15097 inst.instruction |= 0x3 << 6;
15098 else
15099 inst.instruction |= neon_logbits (et.size) << 6;
15100 }
15101 break;
15102
15103 default: ;
15104 }
15105
15106 inst.instruction |= do_align << 4;
15107 }
15108
15109 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
15110 apart from bits [11:4]. */
15111
15112 static void
15113 do_neon_ldx_stx (void)
15114 {
15115 if (inst.operands[1].isreg)
15116 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
15117
15118 switch (NEON_LANE (inst.operands[0].imm))
15119 {
15120 case NEON_INTERLEAVE_LANES:
15121 NEON_ENCODE (INTERLV, inst);
15122 do_neon_ld_st_interleave ();
15123 break;
15124
15125 case NEON_ALL_LANES:
15126 NEON_ENCODE (DUP, inst);
15127 do_neon_ld_dup ();
15128 break;
15129
15130 default:
15131 NEON_ENCODE (LANE, inst);
15132 do_neon_ld_st_lane ();
15133 }
15134
15135 /* L bit comes from bit mask. */
15136 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15137 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15138 inst.instruction |= inst.operands[1].reg << 16;
15139
15140 if (inst.operands[1].postind)
15141 {
15142 int postreg = inst.operands[1].imm & 0xf;
15143 constraint (!inst.operands[1].immisreg,
15144 _("post-index must be a register"));
15145 constraint (postreg == 0xd || postreg == 0xf,
15146 _("bad register for post-index"));
15147 inst.instruction |= postreg;
15148 }
15149 else if (inst.operands[1].writeback)
15150 {
15151 inst.instruction |= 0xd;
15152 }
15153 else
15154 inst.instruction |= 0xf;
15155
15156 if (thumb_mode)
15157 inst.instruction |= 0xf9000000;
15158 else
15159 inst.instruction |= 0xf4000000;
15160 }
15161 \f
15162 /* Overall per-instruction processing. */
15163
15164 /* We need to be able to fix up arbitrary expressions in some statements.
15165 This is so that we can handle symbols that are an arbitrary distance from
15166 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
15167 which returns part of an address in a form which will be valid for
15168 a data instruction. We do this by pushing the expression into a symbol
15169 in the expr_section, and creating a fix for that. */
15170
15171 static void
15172 fix_new_arm (fragS * frag,
15173 int where,
15174 short int size,
15175 expressionS * exp,
15176 int pc_rel,
15177 int reloc)
15178 {
15179 fixS * new_fix;
15180
15181 switch (exp->X_op)
15182 {
15183 case O_constant:
15184 case O_symbol:
15185 case O_add:
15186 case O_subtract:
15187 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
15188 (enum bfd_reloc_code_real) reloc);
15189 break;
15190
15191 default:
15192 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
15193 pc_rel, (enum bfd_reloc_code_real) reloc);
15194 break;
15195 }
15196
15197 /* Mark whether the fix is to a THUMB instruction, or an ARM
15198 instruction. */
15199 new_fix->tc_fix_data = thumb_mode;
15200 }
15201
15202 /* Create a frg for an instruction requiring relaxation. */
15203 static void
15204 output_relax_insn (void)
15205 {
15206 char * to;
15207 symbolS *sym;
15208 int offset;
15209
15210 /* The size of the instruction is unknown, so tie the debug info to the
15211 start of the instruction. */
15212 dwarf2_emit_insn (0);
15213
15214 switch (inst.reloc.exp.X_op)
15215 {
15216 case O_symbol:
15217 sym = inst.reloc.exp.X_add_symbol;
15218 offset = inst.reloc.exp.X_add_number;
15219 break;
15220 case O_constant:
15221 sym = NULL;
15222 offset = inst.reloc.exp.X_add_number;
15223 break;
15224 default:
15225 sym = make_expr_symbol (&inst.reloc.exp);
15226 offset = 0;
15227 break;
15228 }
15229 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
15230 inst.relax, sym, offset, NULL/*offset, opcode*/);
15231 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
15232 }
15233
15234 /* Write a 32-bit thumb instruction to buf. */
15235 static void
15236 put_thumb32_insn (char * buf, unsigned long insn)
15237 {
15238 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
15239 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
15240 }
15241
15242 static void
15243 output_inst (const char * str)
15244 {
15245 char * to = NULL;
15246
15247 if (inst.error)
15248 {
15249 as_bad ("%s -- `%s'", inst.error, str);
15250 return;
15251 }
15252 if (inst.relax)
15253 {
15254 output_relax_insn ();
15255 return;
15256 }
15257 if (inst.size == 0)
15258 return;
15259
15260 to = frag_more (inst.size);
15261 /* PR 9814: Record the thumb mode into the current frag so that we know
15262 what type of NOP padding to use, if necessary. We override any previous
15263 setting so that if the mode has changed then the NOPS that we use will
15264 match the encoding of the last instruction in the frag. */
15265 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
15266
15267 if (thumb_mode && (inst.size > THUMB_SIZE))
15268 {
15269 gas_assert (inst.size == (2 * THUMB_SIZE));
15270 put_thumb32_insn (to, inst.instruction);
15271 }
15272 else if (inst.size > INSN_SIZE)
15273 {
15274 gas_assert (inst.size == (2 * INSN_SIZE));
15275 md_number_to_chars (to, inst.instruction, INSN_SIZE);
15276 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
15277 }
15278 else
15279 md_number_to_chars (to, inst.instruction, inst.size);
15280
15281 if (inst.reloc.type != BFD_RELOC_UNUSED)
15282 fix_new_arm (frag_now, to - frag_now->fr_literal,
15283 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
15284 inst.reloc.type);
15285
15286 dwarf2_emit_insn (inst.size);
15287 }
15288
15289 static char *
15290 output_it_inst (int cond, int mask, char * to)
15291 {
15292 unsigned long instruction = 0xbf00;
15293
15294 mask &= 0xf;
15295 instruction |= mask;
15296 instruction |= cond << 4;
15297
15298 if (to == NULL)
15299 {
15300 to = frag_more (2);
15301 #ifdef OBJ_ELF
15302 dwarf2_emit_insn (2);
15303 #endif
15304 }
15305
15306 md_number_to_chars (to, instruction, 2);
15307
15308 return to;
15309 }
15310
15311 /* Tag values used in struct asm_opcode's tag field. */
15312 enum opcode_tag
15313 {
15314 OT_unconditional, /* Instruction cannot be conditionalized.
15315 The ARM condition field is still 0xE. */
15316 OT_unconditionalF, /* Instruction cannot be conditionalized
15317 and carries 0xF in its ARM condition field. */
15318 OT_csuffix, /* Instruction takes a conditional suffix. */
15319 OT_csuffixF, /* Some forms of the instruction take a conditional
15320 suffix, others place 0xF where the condition field
15321 would be. */
15322 OT_cinfix3, /* Instruction takes a conditional infix,
15323 beginning at character index 3. (In
15324 unified mode, it becomes a suffix.) */
15325 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
15326 tsts, cmps, cmns, and teqs. */
15327 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
15328 character index 3, even in unified mode. Used for
15329 legacy instructions where suffix and infix forms
15330 may be ambiguous. */
15331 OT_csuf_or_in3, /* Instruction takes either a conditional
15332 suffix or an infix at character index 3. */
15333 OT_odd_infix_unc, /* This is the unconditional variant of an
15334 instruction that takes a conditional infix
15335 at an unusual position. In unified mode,
15336 this variant will accept a suffix. */
15337 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
15338 are the conditional variants of instructions that
15339 take conditional infixes in unusual positions.
15340 The infix appears at character index
15341 (tag - OT_odd_infix_0). These are not accepted
15342 in unified mode. */
15343 };
15344
15345 /* Subroutine of md_assemble, responsible for looking up the primary
15346 opcode from the mnemonic the user wrote. STR points to the
15347 beginning of the mnemonic.
15348
15349 This is not simply a hash table lookup, because of conditional
15350 variants. Most instructions have conditional variants, which are
15351 expressed with a _conditional affix_ to the mnemonic. If we were
15352 to encode each conditional variant as a literal string in the opcode
15353 table, it would have approximately 20,000 entries.
15354
15355 Most mnemonics take this affix as a suffix, and in unified syntax,
15356 'most' is upgraded to 'all'. However, in the divided syntax, some
15357 instructions take the affix as an infix, notably the s-variants of
15358 the arithmetic instructions. Of those instructions, all but six
15359 have the infix appear after the third character of the mnemonic.
15360
15361 Accordingly, the algorithm for looking up primary opcodes given
15362 an identifier is:
15363
15364 1. Look up the identifier in the opcode table.
15365 If we find a match, go to step U.
15366
15367 2. Look up the last two characters of the identifier in the
15368 conditions table. If we find a match, look up the first N-2
15369 characters of the identifier in the opcode table. If we
15370 find a match, go to step CE.
15371
15372 3. Look up the fourth and fifth characters of the identifier in
15373 the conditions table. If we find a match, extract those
15374 characters from the identifier, and look up the remaining
15375 characters in the opcode table. If we find a match, go
15376 to step CM.
15377
15378 4. Fail.
15379
15380 U. Examine the tag field of the opcode structure, in case this is
15381 one of the six instructions with its conditional infix in an
15382 unusual place. If it is, the tag tells us where to find the
15383 infix; look it up in the conditions table and set inst.cond
15384 accordingly. Otherwise, this is an unconditional instruction.
15385 Again set inst.cond accordingly. Return the opcode structure.
15386
15387 CE. Examine the tag field to make sure this is an instruction that
15388 should receive a conditional suffix. If it is not, fail.
15389 Otherwise, set inst.cond from the suffix we already looked up,
15390 and return the opcode structure.
15391
15392 CM. Examine the tag field to make sure this is an instruction that
15393 should receive a conditional infix after the third character.
15394 If it is not, fail. Otherwise, undo the edits to the current
15395 line of input and proceed as for case CE. */
15396
15397 static const struct asm_opcode *
15398 opcode_lookup (char **str)
15399 {
15400 char *end, *base;
15401 char *affix;
15402 const struct asm_opcode *opcode;
15403 const struct asm_cond *cond;
15404 char save[2];
15405
15406 /* Scan up to the end of the mnemonic, which must end in white space,
15407 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
15408 for (base = end = *str; *end != '\0'; end++)
15409 if (*end == ' ' || *end == '.')
15410 break;
15411
15412 if (end == base)
15413 return NULL;
15414
15415 /* Handle a possible width suffix and/or Neon type suffix. */
15416 if (end[0] == '.')
15417 {
15418 int offset = 2;
15419
15420 /* The .w and .n suffixes are only valid if the unified syntax is in
15421 use. */
15422 if (unified_syntax && end[1] == 'w')
15423 inst.size_req = 4;
15424 else if (unified_syntax && end[1] == 'n')
15425 inst.size_req = 2;
15426 else
15427 offset = 0;
15428
15429 inst.vectype.elems = 0;
15430
15431 *str = end + offset;
15432
15433 if (end[offset] == '.')
15434 {
15435 /* See if we have a Neon type suffix (possible in either unified or
15436 non-unified ARM syntax mode). */
15437 if (parse_neon_type (&inst.vectype, str) == FAIL)
15438 return NULL;
15439 }
15440 else if (end[offset] != '\0' && end[offset] != ' ')
15441 return NULL;
15442 }
15443 else
15444 *str = end;
15445
15446 /* Look for unaffixed or special-case affixed mnemonic. */
15447 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
15448 end - base);
15449 if (opcode)
15450 {
15451 /* step U */
15452 if (opcode->tag < OT_odd_infix_0)
15453 {
15454 inst.cond = COND_ALWAYS;
15455 return opcode;
15456 }
15457
15458 if (warn_on_deprecated && unified_syntax)
15459 as_warn (_("conditional infixes are deprecated in unified syntax"));
15460 affix = base + (opcode->tag - OT_odd_infix_0);
15461 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
15462 gas_assert (cond);
15463
15464 inst.cond = cond->value;
15465 return opcode;
15466 }
15467
15468 /* Cannot have a conditional suffix on a mnemonic of less than two
15469 characters. */
15470 if (end - base < 3)
15471 return NULL;
15472
15473 /* Look for suffixed mnemonic. */
15474 affix = end - 2;
15475 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
15476 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
15477 affix - base);
15478 if (opcode && cond)
15479 {
15480 /* step CE */
15481 switch (opcode->tag)
15482 {
15483 case OT_cinfix3_legacy:
15484 /* Ignore conditional suffixes matched on infix only mnemonics. */
15485 break;
15486
15487 case OT_cinfix3:
15488 case OT_cinfix3_deprecated:
15489 case OT_odd_infix_unc:
15490 if (!unified_syntax)
15491 return 0;
15492 /* else fall through */
15493
15494 case OT_csuffix:
15495 case OT_csuffixF:
15496 case OT_csuf_or_in3:
15497 inst.cond = cond->value;
15498 return opcode;
15499
15500 case OT_unconditional:
15501 case OT_unconditionalF:
15502 if (thumb_mode)
15503 inst.cond = cond->value;
15504 else
15505 {
15506 /* Delayed diagnostic. */
15507 inst.error = BAD_COND;
15508 inst.cond = COND_ALWAYS;
15509 }
15510 return opcode;
15511
15512 default:
15513 return NULL;
15514 }
15515 }
15516
15517 /* Cannot have a usual-position infix on a mnemonic of less than
15518 six characters (five would be a suffix). */
15519 if (end - base < 6)
15520 return NULL;
15521
15522 /* Look for infixed mnemonic in the usual position. */
15523 affix = base + 3;
15524 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
15525 if (!cond)
15526 return NULL;
15527
15528 memcpy (save, affix, 2);
15529 memmove (affix, affix + 2, (end - affix) - 2);
15530 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
15531 (end - base) - 2);
15532 memmove (affix + 2, affix, (end - affix) - 2);
15533 memcpy (affix, save, 2);
15534
15535 if (opcode
15536 && (opcode->tag == OT_cinfix3
15537 || opcode->tag == OT_cinfix3_deprecated
15538 || opcode->tag == OT_csuf_or_in3
15539 || opcode->tag == OT_cinfix3_legacy))
15540 {
15541 /* Step CM. */
15542 if (warn_on_deprecated && unified_syntax
15543 && (opcode->tag == OT_cinfix3
15544 || opcode->tag == OT_cinfix3_deprecated))
15545 as_warn (_("conditional infixes are deprecated in unified syntax"));
15546
15547 inst.cond = cond->value;
15548 return opcode;
15549 }
15550
15551 return NULL;
15552 }
15553
15554 /* This function generates an initial IT instruction, leaving its block
15555 virtually open for the new instructions. Eventually,
15556 the mask will be updated by now_it_add_mask () each time
15557 a new instruction needs to be included in the IT block.
15558 Finally, the block is closed with close_automatic_it_block ().
15559 The block closure can be requested either from md_assemble (),
15560 a tencode (), or due to a label hook. */
15561
15562 static void
15563 new_automatic_it_block (int cond)
15564 {
15565 now_it.state = AUTOMATIC_IT_BLOCK;
15566 now_it.mask = 0x18;
15567 now_it.cc = cond;
15568 now_it.block_length = 1;
15569 mapping_state (MAP_THUMB);
15570 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
15571 }
15572
15573 /* Close an automatic IT block.
15574 See comments in new_automatic_it_block (). */
15575
15576 static void
15577 close_automatic_it_block (void)
15578 {
15579 now_it.mask = 0x10;
15580 now_it.block_length = 0;
15581 }
15582
15583 /* Update the mask of the current automatically-generated IT
15584 instruction. See comments in new_automatic_it_block (). */
15585
15586 static void
15587 now_it_add_mask (int cond)
15588 {
15589 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
15590 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
15591 | ((bitvalue) << (nbit)))
15592 const int resulting_bit = (cond & 1);
15593
15594 now_it.mask &= 0xf;
15595 now_it.mask = SET_BIT_VALUE (now_it.mask,
15596 resulting_bit,
15597 (5 - now_it.block_length));
15598 now_it.mask = SET_BIT_VALUE (now_it.mask,
15599 1,
15600 ((5 - now_it.block_length) - 1) );
15601 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
15602
15603 #undef CLEAR_BIT
15604 #undef SET_BIT_VALUE
15605 }
15606
15607 /* The IT blocks handling machinery is accessed through the these functions:
15608 it_fsm_pre_encode () from md_assemble ()
15609 set_it_insn_type () optional, from the tencode functions
15610 set_it_insn_type_last () ditto
15611 in_it_block () ditto
15612 it_fsm_post_encode () from md_assemble ()
15613 force_automatic_it_block_close () from label habdling functions
15614
15615 Rationale:
15616 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
15617 initializing the IT insn type with a generic initial value depending
15618 on the inst.condition.
15619 2) During the tencode function, two things may happen:
15620 a) The tencode function overrides the IT insn type by
15621 calling either set_it_insn_type (type) or set_it_insn_type_last ().
15622 b) The tencode function queries the IT block state by
15623 calling in_it_block () (i.e. to determine narrow/not narrow mode).
15624
15625 Both set_it_insn_type and in_it_block run the internal FSM state
15626 handling function (handle_it_state), because: a) setting the IT insn
15627 type may incur in an invalid state (exiting the function),
15628 and b) querying the state requires the FSM to be updated.
15629 Specifically we want to avoid creating an IT block for conditional
15630 branches, so it_fsm_pre_encode is actually a guess and we can't
15631 determine whether an IT block is required until the tencode () routine
15632 has decided what type of instruction this actually it.
15633 Because of this, if set_it_insn_type and in_it_block have to be used,
15634 set_it_insn_type has to be called first.
15635
15636 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
15637 determines the insn IT type depending on the inst.cond code.
15638 When a tencode () routine encodes an instruction that can be
15639 either outside an IT block, or, in the case of being inside, has to be
15640 the last one, set_it_insn_type_last () will determine the proper
15641 IT instruction type based on the inst.cond code. Otherwise,
15642 set_it_insn_type can be called for overriding that logic or
15643 for covering other cases.
15644
15645 Calling handle_it_state () may not transition the IT block state to
15646 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
15647 still queried. Instead, if the FSM determines that the state should
15648 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
15649 after the tencode () function: that's what it_fsm_post_encode () does.
15650
15651 Since in_it_block () calls the state handling function to get an
15652 updated state, an error may occur (due to invalid insns combination).
15653 In that case, inst.error is set.
15654 Therefore, inst.error has to be checked after the execution of
15655 the tencode () routine.
15656
15657 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
15658 any pending state change (if any) that didn't take place in
15659 handle_it_state () as explained above. */
15660
15661 static void
15662 it_fsm_pre_encode (void)
15663 {
15664 if (inst.cond != COND_ALWAYS)
15665 inst.it_insn_type = INSIDE_IT_INSN;
15666 else
15667 inst.it_insn_type = OUTSIDE_IT_INSN;
15668
15669 now_it.state_handled = 0;
15670 }
15671
15672 /* IT state FSM handling function. */
15673
15674 static int
15675 handle_it_state (void)
15676 {
15677 now_it.state_handled = 1;
15678
15679 switch (now_it.state)
15680 {
15681 case OUTSIDE_IT_BLOCK:
15682 switch (inst.it_insn_type)
15683 {
15684 case OUTSIDE_IT_INSN:
15685 break;
15686
15687 case INSIDE_IT_INSN:
15688 case INSIDE_IT_LAST_INSN:
15689 if (thumb_mode == 0)
15690 {
15691 if (unified_syntax
15692 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
15693 as_tsktsk (_("Warning: conditional outside an IT block"\
15694 " for Thumb."));
15695 }
15696 else
15697 {
15698 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
15699 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
15700 {
15701 /* Automatically generate the IT instruction. */
15702 new_automatic_it_block (inst.cond);
15703 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
15704 close_automatic_it_block ();
15705 }
15706 else
15707 {
15708 inst.error = BAD_OUT_IT;
15709 return FAIL;
15710 }
15711 }
15712 break;
15713
15714 case IF_INSIDE_IT_LAST_INSN:
15715 case NEUTRAL_IT_INSN:
15716 break;
15717
15718 case IT_INSN:
15719 now_it.state = MANUAL_IT_BLOCK;
15720 now_it.block_length = 0;
15721 break;
15722 }
15723 break;
15724
15725 case AUTOMATIC_IT_BLOCK:
15726 /* Three things may happen now:
15727 a) We should increment current it block size;
15728 b) We should close current it block (closing insn or 4 insns);
15729 c) We should close current it block and start a new one (due
15730 to incompatible conditions or
15731 4 insns-length block reached). */
15732
15733 switch (inst.it_insn_type)
15734 {
15735 case OUTSIDE_IT_INSN:
15736 /* The closure of the block shall happen immediatelly,
15737 so any in_it_block () call reports the block as closed. */
15738 force_automatic_it_block_close ();
15739 break;
15740
15741 case INSIDE_IT_INSN:
15742 case INSIDE_IT_LAST_INSN:
15743 case IF_INSIDE_IT_LAST_INSN:
15744 now_it.block_length++;
15745
15746 if (now_it.block_length > 4
15747 || !now_it_compatible (inst.cond))
15748 {
15749 force_automatic_it_block_close ();
15750 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
15751 new_automatic_it_block (inst.cond);
15752 }
15753 else
15754 {
15755 now_it_add_mask (inst.cond);
15756 }
15757
15758 if (now_it.state == AUTOMATIC_IT_BLOCK
15759 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
15760 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
15761 close_automatic_it_block ();
15762 break;
15763
15764 case NEUTRAL_IT_INSN:
15765 now_it.block_length++;
15766
15767 if (now_it.block_length > 4)
15768 force_automatic_it_block_close ();
15769 else
15770 now_it_add_mask (now_it.cc & 1);
15771 break;
15772
15773 case IT_INSN:
15774 close_automatic_it_block ();
15775 now_it.state = MANUAL_IT_BLOCK;
15776 break;
15777 }
15778 break;
15779
15780 case MANUAL_IT_BLOCK:
15781 {
15782 /* Check conditional suffixes. */
15783 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
15784 int is_last;
15785 now_it.mask <<= 1;
15786 now_it.mask &= 0x1f;
15787 is_last = (now_it.mask == 0x10);
15788
15789 switch (inst.it_insn_type)
15790 {
15791 case OUTSIDE_IT_INSN:
15792 inst.error = BAD_NOT_IT;
15793 return FAIL;
15794
15795 case INSIDE_IT_INSN:
15796 if (cond != inst.cond)
15797 {
15798 inst.error = BAD_IT_COND;
15799 return FAIL;
15800 }
15801 break;
15802
15803 case INSIDE_IT_LAST_INSN:
15804 case IF_INSIDE_IT_LAST_INSN:
15805 if (cond != inst.cond)
15806 {
15807 inst.error = BAD_IT_COND;
15808 return FAIL;
15809 }
15810 if (!is_last)
15811 {
15812 inst.error = BAD_BRANCH;
15813 return FAIL;
15814 }
15815 break;
15816
15817 case NEUTRAL_IT_INSN:
15818 /* The BKPT instruction is unconditional even in an IT block. */
15819 break;
15820
15821 case IT_INSN:
15822 inst.error = BAD_IT_IT;
15823 return FAIL;
15824 }
15825 }
15826 break;
15827 }
15828
15829 return SUCCESS;
15830 }
15831
15832 static void
15833 it_fsm_post_encode (void)
15834 {
15835 int is_last;
15836
15837 if (!now_it.state_handled)
15838 handle_it_state ();
15839
15840 is_last = (now_it.mask == 0x10);
15841 if (is_last)
15842 {
15843 now_it.state = OUTSIDE_IT_BLOCK;
15844 now_it.mask = 0;
15845 }
15846 }
15847
15848 static void
15849 force_automatic_it_block_close (void)
15850 {
15851 if (now_it.state == AUTOMATIC_IT_BLOCK)
15852 {
15853 close_automatic_it_block ();
15854 now_it.state = OUTSIDE_IT_BLOCK;
15855 now_it.mask = 0;
15856 }
15857 }
15858
15859 static int
15860 in_it_block (void)
15861 {
15862 if (!now_it.state_handled)
15863 handle_it_state ();
15864
15865 return now_it.state != OUTSIDE_IT_BLOCK;
15866 }
15867
15868 void
15869 md_assemble (char *str)
15870 {
15871 char *p = str;
15872 const struct asm_opcode * opcode;
15873
15874 /* Align the previous label if needed. */
15875 if (last_label_seen != NULL)
15876 {
15877 symbol_set_frag (last_label_seen, frag_now);
15878 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
15879 S_SET_SEGMENT (last_label_seen, now_seg);
15880 }
15881
15882 memset (&inst, '\0', sizeof (inst));
15883 inst.reloc.type = BFD_RELOC_UNUSED;
15884
15885 opcode = opcode_lookup (&p);
15886 if (!opcode)
15887 {
15888 /* It wasn't an instruction, but it might be a register alias of
15889 the form alias .req reg, or a Neon .dn/.qn directive. */
15890 if (! create_register_alias (str, p)
15891 && ! create_neon_reg_alias (str, p))
15892 as_bad (_("bad instruction `%s'"), str);
15893
15894 return;
15895 }
15896
15897 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
15898 as_warn (_("s suffix on comparison instruction is deprecated"));
15899
15900 /* The value which unconditional instructions should have in place of the
15901 condition field. */
15902 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
15903
15904 if (thumb_mode)
15905 {
15906 arm_feature_set variant;
15907
15908 variant = cpu_variant;
15909 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
15910 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
15911 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
15912 /* Check that this instruction is supported for this CPU. */
15913 if (!opcode->tvariant
15914 || (thumb_mode == 1
15915 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
15916 {
15917 as_bad (_("selected processor does not support Thumb mode `%s'"), str);
15918 return;
15919 }
15920 if (inst.cond != COND_ALWAYS && !unified_syntax
15921 && opcode->tencode != do_t_branch)
15922 {
15923 as_bad (_("Thumb does not support conditional execution"));
15924 return;
15925 }
15926
15927 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2))
15928 {
15929 if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23
15930 && !(ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr)
15931 || ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_barrier)))
15932 {
15933 /* Two things are addressed here.
15934 1) Implicit require narrow instructions on Thumb-1.
15935 This avoids relaxation accidentally introducing Thumb-2
15936 instructions.
15937 2) Reject wide instructions in non Thumb-2 cores. */
15938 if (inst.size_req == 0)
15939 inst.size_req = 2;
15940 else if (inst.size_req == 4)
15941 {
15942 as_bad (_("selected processor does not support Thumb-2 mode `%s'"), str);
15943 return;
15944 }
15945 }
15946 }
15947
15948 inst.instruction = opcode->tvalue;
15949
15950 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
15951 {
15952 /* Prepare the it_insn_type for those encodings that don't set
15953 it. */
15954 it_fsm_pre_encode ();
15955
15956 opcode->tencode ();
15957
15958 it_fsm_post_encode ();
15959 }
15960
15961 if (!(inst.error || inst.relax))
15962 {
15963 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
15964 inst.size = (inst.instruction > 0xffff ? 4 : 2);
15965 if (inst.size_req && inst.size_req != inst.size)
15966 {
15967 as_bad (_("cannot honor width suffix -- `%s'"), str);
15968 return;
15969 }
15970 }
15971
15972 /* Something has gone badly wrong if we try to relax a fixed size
15973 instruction. */
15974 gas_assert (inst.size_req == 0 || !inst.relax);
15975
15976 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
15977 *opcode->tvariant);
15978 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
15979 set those bits when Thumb-2 32-bit instructions are seen. ie.
15980 anything other than bl/blx and v6-M instructions.
15981 This is overly pessimistic for relaxable instructions. */
15982 if (((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
15983 || inst.relax)
15984 && !(ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
15985 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier)))
15986 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
15987 arm_ext_v6t2);
15988
15989 check_neon_suffixes;
15990
15991 if (!inst.error)
15992 {
15993 mapping_state (MAP_THUMB);
15994 }
15995 }
15996 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
15997 {
15998 bfd_boolean is_bx;
15999
16000 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
16001 is_bx = (opcode->aencode == do_bx);
16002
16003 /* Check that this instruction is supported for this CPU. */
16004 if (!(is_bx && fix_v4bx)
16005 && !(opcode->avariant &&
16006 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
16007 {
16008 as_bad (_("selected processor does not support ARM mode `%s'"), str);
16009 return;
16010 }
16011 if (inst.size_req)
16012 {
16013 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
16014 return;
16015 }
16016
16017 inst.instruction = opcode->avalue;
16018 if (opcode->tag == OT_unconditionalF)
16019 inst.instruction |= 0xF << 28;
16020 else
16021 inst.instruction |= inst.cond << 28;
16022 inst.size = INSN_SIZE;
16023 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
16024 {
16025 it_fsm_pre_encode ();
16026 opcode->aencode ();
16027 it_fsm_post_encode ();
16028 }
16029 /* Arm mode bx is marked as both v4T and v5 because it's still required
16030 on a hypothetical non-thumb v5 core. */
16031 if (is_bx)
16032 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
16033 else
16034 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
16035 *opcode->avariant);
16036
16037 check_neon_suffixes;
16038
16039 if (!inst.error)
16040 {
16041 mapping_state (MAP_ARM);
16042 }
16043 }
16044 else
16045 {
16046 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
16047 "-- `%s'"), str);
16048 return;
16049 }
16050 output_inst (str);
16051 }
16052
16053 static void
16054 check_it_blocks_finished (void)
16055 {
16056 #ifdef OBJ_ELF
16057 asection *sect;
16058
16059 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
16060 if (seg_info (sect)->tc_segment_info_data.current_it.state
16061 == MANUAL_IT_BLOCK)
16062 {
16063 as_warn (_("section '%s' finished with an open IT block."),
16064 sect->name);
16065 }
16066 #else
16067 if (now_it.state == MANUAL_IT_BLOCK)
16068 as_warn (_("file finished with an open IT block."));
16069 #endif
16070 }
16071
16072 /* Various frobbings of labels and their addresses. */
16073
16074 void
16075 arm_start_line_hook (void)
16076 {
16077 last_label_seen = NULL;
16078 }
16079
16080 void
16081 arm_frob_label (symbolS * sym)
16082 {
16083 last_label_seen = sym;
16084
16085 ARM_SET_THUMB (sym, thumb_mode);
16086
16087 #if defined OBJ_COFF || defined OBJ_ELF
16088 ARM_SET_INTERWORK (sym, support_interwork);
16089 #endif
16090
16091 force_automatic_it_block_close ();
16092
16093 /* Note - do not allow local symbols (.Lxxx) to be labelled
16094 as Thumb functions. This is because these labels, whilst
16095 they exist inside Thumb code, are not the entry points for
16096 possible ARM->Thumb calls. Also, these labels can be used
16097 as part of a computed goto or switch statement. eg gcc
16098 can generate code that looks like this:
16099
16100 ldr r2, [pc, .Laaa]
16101 lsl r3, r3, #2
16102 ldr r2, [r3, r2]
16103 mov pc, r2
16104
16105 .Lbbb: .word .Lxxx
16106 .Lccc: .word .Lyyy
16107 ..etc...
16108 .Laaa: .word Lbbb
16109
16110 The first instruction loads the address of the jump table.
16111 The second instruction converts a table index into a byte offset.
16112 The third instruction gets the jump address out of the table.
16113 The fourth instruction performs the jump.
16114
16115 If the address stored at .Laaa is that of a symbol which has the
16116 Thumb_Func bit set, then the linker will arrange for this address
16117 to have the bottom bit set, which in turn would mean that the
16118 address computation performed by the third instruction would end
16119 up with the bottom bit set. Since the ARM is capable of unaligned
16120 word loads, the instruction would then load the incorrect address
16121 out of the jump table, and chaos would ensue. */
16122 if (label_is_thumb_function_name
16123 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
16124 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
16125 {
16126 /* When the address of a Thumb function is taken the bottom
16127 bit of that address should be set. This will allow
16128 interworking between Arm and Thumb functions to work
16129 correctly. */
16130
16131 THUMB_SET_FUNC (sym, 1);
16132
16133 label_is_thumb_function_name = FALSE;
16134 }
16135
16136 dwarf2_emit_label (sym);
16137 }
16138
16139 bfd_boolean
16140 arm_data_in_code (void)
16141 {
16142 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
16143 {
16144 *input_line_pointer = '/';
16145 input_line_pointer += 5;
16146 *input_line_pointer = 0;
16147 return TRUE;
16148 }
16149
16150 return FALSE;
16151 }
16152
16153 char *
16154 arm_canonicalize_symbol_name (char * name)
16155 {
16156 int len;
16157
16158 if (thumb_mode && (len = strlen (name)) > 5
16159 && streq (name + len - 5, "/data"))
16160 *(name + len - 5) = 0;
16161
16162 return name;
16163 }
16164 \f
16165 /* Table of all register names defined by default. The user can
16166 define additional names with .req. Note that all register names
16167 should appear in both upper and lowercase variants. Some registers
16168 also have mixed-case names. */
16169
16170 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
16171 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
16172 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
16173 #define REGSET(p,t) \
16174 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
16175 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
16176 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
16177 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
16178 #define REGSETH(p,t) \
16179 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
16180 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
16181 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
16182 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
16183 #define REGSET2(p,t) \
16184 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
16185 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
16186 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
16187 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
16188
16189 static const struct reg_entry reg_names[] =
16190 {
16191 /* ARM integer registers. */
16192 REGSET(r, RN), REGSET(R, RN),
16193
16194 /* ATPCS synonyms. */
16195 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
16196 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
16197 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
16198
16199 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
16200 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
16201 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
16202
16203 /* Well-known aliases. */
16204 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
16205 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
16206
16207 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
16208 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
16209
16210 /* Coprocessor numbers. */
16211 REGSET(p, CP), REGSET(P, CP),
16212
16213 /* Coprocessor register numbers. The "cr" variants are for backward
16214 compatibility. */
16215 REGSET(c, CN), REGSET(C, CN),
16216 REGSET(cr, CN), REGSET(CR, CN),
16217
16218 /* FPA registers. */
16219 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
16220 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
16221
16222 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
16223 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
16224
16225 /* VFP SP registers. */
16226 REGSET(s,VFS), REGSET(S,VFS),
16227 REGSETH(s,VFS), REGSETH(S,VFS),
16228
16229 /* VFP DP Registers. */
16230 REGSET(d,VFD), REGSET(D,VFD),
16231 /* Extra Neon DP registers. */
16232 REGSETH(d,VFD), REGSETH(D,VFD),
16233
16234 /* Neon QP registers. */
16235 REGSET2(q,NQ), REGSET2(Q,NQ),
16236
16237 /* VFP control registers. */
16238 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
16239 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
16240 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
16241 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
16242 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
16243 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
16244
16245 /* Maverick DSP coprocessor registers. */
16246 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
16247 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
16248
16249 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
16250 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
16251 REGDEF(dspsc,0,DSPSC),
16252
16253 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
16254 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
16255 REGDEF(DSPSC,0,DSPSC),
16256
16257 /* iWMMXt data registers - p0, c0-15. */
16258 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
16259
16260 /* iWMMXt control registers - p1, c0-3. */
16261 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
16262 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
16263 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
16264 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
16265
16266 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
16267 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
16268 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
16269 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
16270 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
16271
16272 /* XScale accumulator registers. */
16273 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
16274 };
16275 #undef REGDEF
16276 #undef REGNUM
16277 #undef REGSET
16278
16279 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
16280 within psr_required_here. */
16281 static const struct asm_psr psrs[] =
16282 {
16283 /* Backward compatibility notation. Note that "all" is no longer
16284 truly all possible PSR bits. */
16285 {"all", PSR_c | PSR_f},
16286 {"flg", PSR_f},
16287 {"ctl", PSR_c},
16288
16289 /* Individual flags. */
16290 {"f", PSR_f},
16291 {"c", PSR_c},
16292 {"x", PSR_x},
16293 {"s", PSR_s},
16294 {"g", PSR_s},
16295
16296 /* Combinations of flags. */
16297 {"fs", PSR_f | PSR_s},
16298 {"fx", PSR_f | PSR_x},
16299 {"fc", PSR_f | PSR_c},
16300 {"sf", PSR_s | PSR_f},
16301 {"sx", PSR_s | PSR_x},
16302 {"sc", PSR_s | PSR_c},
16303 {"xf", PSR_x | PSR_f},
16304 {"xs", PSR_x | PSR_s},
16305 {"xc", PSR_x | PSR_c},
16306 {"cf", PSR_c | PSR_f},
16307 {"cs", PSR_c | PSR_s},
16308 {"cx", PSR_c | PSR_x},
16309 {"fsx", PSR_f | PSR_s | PSR_x},
16310 {"fsc", PSR_f | PSR_s | PSR_c},
16311 {"fxs", PSR_f | PSR_x | PSR_s},
16312 {"fxc", PSR_f | PSR_x | PSR_c},
16313 {"fcs", PSR_f | PSR_c | PSR_s},
16314 {"fcx", PSR_f | PSR_c | PSR_x},
16315 {"sfx", PSR_s | PSR_f | PSR_x},
16316 {"sfc", PSR_s | PSR_f | PSR_c},
16317 {"sxf", PSR_s | PSR_x | PSR_f},
16318 {"sxc", PSR_s | PSR_x | PSR_c},
16319 {"scf", PSR_s | PSR_c | PSR_f},
16320 {"scx", PSR_s | PSR_c | PSR_x},
16321 {"xfs", PSR_x | PSR_f | PSR_s},
16322 {"xfc", PSR_x | PSR_f | PSR_c},
16323 {"xsf", PSR_x | PSR_s | PSR_f},
16324 {"xsc", PSR_x | PSR_s | PSR_c},
16325 {"xcf", PSR_x | PSR_c | PSR_f},
16326 {"xcs", PSR_x | PSR_c | PSR_s},
16327 {"cfs", PSR_c | PSR_f | PSR_s},
16328 {"cfx", PSR_c | PSR_f | PSR_x},
16329 {"csf", PSR_c | PSR_s | PSR_f},
16330 {"csx", PSR_c | PSR_s | PSR_x},
16331 {"cxf", PSR_c | PSR_x | PSR_f},
16332 {"cxs", PSR_c | PSR_x | PSR_s},
16333 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
16334 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
16335 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
16336 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
16337 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
16338 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
16339 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
16340 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
16341 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
16342 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
16343 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
16344 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
16345 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
16346 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
16347 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
16348 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
16349 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
16350 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
16351 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
16352 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
16353 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
16354 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
16355 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
16356 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
16357
16358 /* APSR flags */
16359 {"nzcvq", PSR_f},
16360 {"nzcvqg", PSR_s | PSR_f}
16361 };
16362
16363 /* Table of V7M psr names. */
16364 static const struct asm_psr v7m_psrs[] =
16365 {
16366 {"apsr", 0 }, {"APSR", 0 },
16367 {"iapsr", 1 }, {"IAPSR", 1 },
16368 {"eapsr", 2 }, {"EAPSR", 2 },
16369 {"psr", 3 }, {"PSR", 3 },
16370 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
16371 {"ipsr", 5 }, {"IPSR", 5 },
16372 {"epsr", 6 }, {"EPSR", 6 },
16373 {"iepsr", 7 }, {"IEPSR", 7 },
16374 {"msp", 8 }, {"MSP", 8 },
16375 {"psp", 9 }, {"PSP", 9 },
16376 {"primask", 16}, {"PRIMASK", 16},
16377 {"basepri", 17}, {"BASEPRI", 17},
16378 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
16379 {"faultmask", 19}, {"FAULTMASK", 19},
16380 {"control", 20}, {"CONTROL", 20}
16381 };
16382
16383 /* Table of all shift-in-operand names. */
16384 static const struct asm_shift_name shift_names [] =
16385 {
16386 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
16387 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
16388 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
16389 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
16390 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
16391 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
16392 };
16393
16394 /* Table of all explicit relocation names. */
16395 #ifdef OBJ_ELF
16396 static struct reloc_entry reloc_names[] =
16397 {
16398 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
16399 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
16400 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
16401 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
16402 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
16403 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
16404 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
16405 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
16406 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
16407 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
16408 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
16409 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL}
16410 };
16411 #endif
16412
16413 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
16414 static const struct asm_cond conds[] =
16415 {
16416 {"eq", 0x0},
16417 {"ne", 0x1},
16418 {"cs", 0x2}, {"hs", 0x2},
16419 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
16420 {"mi", 0x4},
16421 {"pl", 0x5},
16422 {"vs", 0x6},
16423 {"vc", 0x7},
16424 {"hi", 0x8},
16425 {"ls", 0x9},
16426 {"ge", 0xa},
16427 {"lt", 0xb},
16428 {"gt", 0xc},
16429 {"le", 0xd},
16430 {"al", 0xe}
16431 };
16432
16433 static struct asm_barrier_opt barrier_opt_names[] =
16434 {
16435 { "sy", 0xf }, { "SY", 0xf },
16436 { "un", 0x7 }, { "UN", 0x7 },
16437 { "st", 0xe }, { "ST", 0xe },
16438 { "unst", 0x6 }, { "UNST", 0x6 },
16439 { "ish", 0xb }, { "ISH", 0xb },
16440 { "sh", 0xb }, { "SH", 0xb },
16441 { "ishst", 0xa }, { "ISHST", 0xa },
16442 { "shst", 0xa }, { "SHST", 0xa },
16443 { "nsh", 0x7 }, { "NSH", 0x7 },
16444 { "nshst", 0x6 }, { "NSHST", 0x6 },
16445 { "osh", 0x3 }, { "OSH", 0x3 },
16446 { "oshst", 0x2 }, { "OSHST", 0x2 }
16447 };
16448
16449 /* Table of ARM-format instructions. */
16450
16451 /* Macros for gluing together operand strings. N.B. In all cases
16452 other than OPS0, the trailing OP_stop comes from default
16453 zero-initialization of the unspecified elements of the array. */
16454 #define OPS0() { OP_stop, }
16455 #define OPS1(a) { OP_##a, }
16456 #define OPS2(a,b) { OP_##a,OP_##b, }
16457 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
16458 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
16459 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
16460 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
16461
16462 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
16463 This is useful when mixing operands for ARM and THUMB, i.e. using the
16464 MIX_ARM_THUMB_OPERANDS macro.
16465 In order to use these macros, prefix the number of operands with _
16466 e.g. _3. */
16467 #define OPS_1(a) { a, }
16468 #define OPS_2(a,b) { a,b, }
16469 #define OPS_3(a,b,c) { a,b,c, }
16470 #define OPS_4(a,b,c,d) { a,b,c,d, }
16471 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
16472 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
16473
16474 /* These macros abstract out the exact format of the mnemonic table and
16475 save some repeated characters. */
16476
16477 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
16478 #define TxCE(mnem, op, top, nops, ops, ae, te) \
16479 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
16480 THUMB_VARIANT, do_##ae, do_##te }
16481
16482 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
16483 a T_MNEM_xyz enumerator. */
16484 #define TCE(mnem, aop, top, nops, ops, ae, te) \
16485 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
16486 #define tCE(mnem, aop, top, nops, ops, ae, te) \
16487 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16488
16489 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
16490 infix after the third character. */
16491 #define TxC3(mnem, op, top, nops, ops, ae, te) \
16492 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
16493 THUMB_VARIANT, do_##ae, do_##te }
16494 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
16495 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
16496 THUMB_VARIANT, do_##ae, do_##te }
16497 #define TC3(mnem, aop, top, nops, ops, ae, te) \
16498 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
16499 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
16500 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
16501 #define tC3(mnem, aop, top, nops, ops, ae, te) \
16502 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16503 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
16504 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16505
16506 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
16507 appear in the condition table. */
16508 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
16509 { m1 #m2 m3, OPS##nops ops, sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
16510 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
16511
16512 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
16513 TxCM_ (m1, , m2, op, top, nops, ops, ae, te), \
16514 TxCM_ (m1, eq, m2, op, top, nops, ops, ae, te), \
16515 TxCM_ (m1, ne, m2, op, top, nops, ops, ae, te), \
16516 TxCM_ (m1, cs, m2, op, top, nops, ops, ae, te), \
16517 TxCM_ (m1, hs, m2, op, top, nops, ops, ae, te), \
16518 TxCM_ (m1, cc, m2, op, top, nops, ops, ae, te), \
16519 TxCM_ (m1, ul, m2, op, top, nops, ops, ae, te), \
16520 TxCM_ (m1, lo, m2, op, top, nops, ops, ae, te), \
16521 TxCM_ (m1, mi, m2, op, top, nops, ops, ae, te), \
16522 TxCM_ (m1, pl, m2, op, top, nops, ops, ae, te), \
16523 TxCM_ (m1, vs, m2, op, top, nops, ops, ae, te), \
16524 TxCM_ (m1, vc, m2, op, top, nops, ops, ae, te), \
16525 TxCM_ (m1, hi, m2, op, top, nops, ops, ae, te), \
16526 TxCM_ (m1, ls, m2, op, top, nops, ops, ae, te), \
16527 TxCM_ (m1, ge, m2, op, top, nops, ops, ae, te), \
16528 TxCM_ (m1, lt, m2, op, top, nops, ops, ae, te), \
16529 TxCM_ (m1, gt, m2, op, top, nops, ops, ae, te), \
16530 TxCM_ (m1, le, m2, op, top, nops, ops, ae, te), \
16531 TxCM_ (m1, al, m2, op, top, nops, ops, ae, te)
16532
16533 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
16534 TxCM (m1,m2, aop, 0x##top, nops, ops, ae, te)
16535 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
16536 TxCM (m1,m2, aop, T_MNEM##top, nops, ops, ae, te)
16537
16538 /* Mnemonic that cannot be conditionalized. The ARM condition-code
16539 field is still 0xE. Many of the Thumb variants can be executed
16540 conditionally, so this is checked separately. */
16541 #define TUE(mnem, op, top, nops, ops, ae, te) \
16542 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
16543 THUMB_VARIANT, do_##ae, do_##te }
16544
16545 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
16546 condition code field. */
16547 #define TUF(mnem, op, top, nops, ops, ae, te) \
16548 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
16549 THUMB_VARIANT, do_##ae, do_##te }
16550
16551 /* ARM-only variants of all the above. */
16552 #define CE(mnem, op, nops, ops, ae) \
16553 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16554
16555 #define C3(mnem, op, nops, ops, ae) \
16556 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16557
16558 /* Legacy mnemonics that always have conditional infix after the third
16559 character. */
16560 #define CL(mnem, op, nops, ops, ae) \
16561 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
16562 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16563
16564 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
16565 #define cCE(mnem, op, nops, ops, ae) \
16566 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16567
16568 /* Legacy coprocessor instructions where conditional infix and conditional
16569 suffix are ambiguous. For consistency this includes all FPA instructions,
16570 not just the potentially ambiguous ones. */
16571 #define cCL(mnem, op, nops, ops, ae) \
16572 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
16573 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16574
16575 /* Coprocessor, takes either a suffix or a position-3 infix
16576 (for an FPA corner case). */
16577 #define C3E(mnem, op, nops, ops, ae) \
16578 { mnem, OPS##nops ops, OT_csuf_or_in3, \
16579 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16580
16581 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
16582 { m1 #m2 m3, OPS##nops ops, \
16583 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
16584 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16585
16586 #define CM(m1, m2, op, nops, ops, ae) \
16587 xCM_ (m1, , m2, op, nops, ops, ae), \
16588 xCM_ (m1, eq, m2, op, nops, ops, ae), \
16589 xCM_ (m1, ne, m2, op, nops, ops, ae), \
16590 xCM_ (m1, cs, m2, op, nops, ops, ae), \
16591 xCM_ (m1, hs, m2, op, nops, ops, ae), \
16592 xCM_ (m1, cc, m2, op, nops, ops, ae), \
16593 xCM_ (m1, ul, m2, op, nops, ops, ae), \
16594 xCM_ (m1, lo, m2, op, nops, ops, ae), \
16595 xCM_ (m1, mi, m2, op, nops, ops, ae), \
16596 xCM_ (m1, pl, m2, op, nops, ops, ae), \
16597 xCM_ (m1, vs, m2, op, nops, ops, ae), \
16598 xCM_ (m1, vc, m2, op, nops, ops, ae), \
16599 xCM_ (m1, hi, m2, op, nops, ops, ae), \
16600 xCM_ (m1, ls, m2, op, nops, ops, ae), \
16601 xCM_ (m1, ge, m2, op, nops, ops, ae), \
16602 xCM_ (m1, lt, m2, op, nops, ops, ae), \
16603 xCM_ (m1, gt, m2, op, nops, ops, ae), \
16604 xCM_ (m1, le, m2, op, nops, ops, ae), \
16605 xCM_ (m1, al, m2, op, nops, ops, ae)
16606
16607 #define UE(mnem, op, nops, ops, ae) \
16608 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
16609
16610 #define UF(mnem, op, nops, ops, ae) \
16611 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
16612
16613 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
16614 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
16615 use the same encoding function for each. */
16616 #define NUF(mnem, op, nops, ops, enc) \
16617 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
16618 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16619
16620 /* Neon data processing, version which indirects through neon_enc_tab for
16621 the various overloaded versions of opcodes. */
16622 #define nUF(mnem, op, nops, ops, enc) \
16623 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
16624 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16625
16626 /* Neon insn with conditional suffix for the ARM version, non-overloaded
16627 version. */
16628 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
16629 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
16630 THUMB_VARIANT, do_##enc, do_##enc }
16631
16632 #define NCE(mnem, op, nops, ops, enc) \
16633 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
16634
16635 #define NCEF(mnem, op, nops, ops, enc) \
16636 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
16637
16638 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
16639 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
16640 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
16641 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16642
16643 #define nCE(mnem, op, nops, ops, enc) \
16644 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
16645
16646 #define nCEF(mnem, op, nops, ops, enc) \
16647 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
16648
16649 #define do_0 0
16650
16651 static const struct asm_opcode insns[] =
16652 {
16653 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
16654 #define THUMB_VARIANT &arm_ext_v4t
16655 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
16656 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
16657 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
16658 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
16659 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
16660 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
16661 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
16662 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
16663 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
16664 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
16665 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
16666 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
16667 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
16668 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
16669 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
16670 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
16671
16672 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
16673 for setting PSR flag bits. They are obsolete in V6 and do not
16674 have Thumb equivalents. */
16675 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
16676 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
16677 CL("tstp", 110f000, 2, (RR, SH), cmp),
16678 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
16679 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
16680 CL("cmpp", 150f000, 2, (RR, SH), cmp),
16681 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
16682 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
16683 CL("cmnp", 170f000, 2, (RR, SH), cmp),
16684
16685 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
16686 tC3("movs", 1b00000, _movs, 2, (RR, SH), mov, t_mov_cmp),
16687 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
16688 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
16689
16690 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
16691 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
16692 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
16693 OP_RRnpc),
16694 OP_ADDRGLDR),ldst, t_ldst),
16695 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
16696
16697 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16698 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16699 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16700 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16701 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16702 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16703
16704 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
16705 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
16706 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
16707 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
16708
16709 /* Pseudo ops. */
16710 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
16711 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
16712 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
16713
16714 /* Thumb-compatibility pseudo ops. */
16715 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
16716 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
16717 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
16718 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
16719 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
16720 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
16721 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
16722 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
16723 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
16724 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
16725 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
16726 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
16727
16728 /* These may simplify to neg. */
16729 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
16730 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
16731
16732 #undef THUMB_VARIANT
16733 #define THUMB_VARIANT & arm_ext_v6
16734
16735 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
16736
16737 /* V1 instructions with no Thumb analogue prior to V6T2. */
16738 #undef THUMB_VARIANT
16739 #define THUMB_VARIANT & arm_ext_v6t2
16740
16741 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
16742 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
16743 CL("teqp", 130f000, 2, (RR, SH), cmp),
16744
16745 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
16746 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
16747 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
16748 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
16749
16750 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16751 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16752
16753 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16754 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16755
16756 /* V1 instructions with no Thumb analogue at all. */
16757 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
16758 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
16759
16760 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
16761 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
16762 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
16763 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
16764 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
16765 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
16766 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
16767 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
16768
16769 #undef ARM_VARIANT
16770 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
16771 #undef THUMB_VARIANT
16772 #define THUMB_VARIANT & arm_ext_v4t
16773
16774 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
16775 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
16776
16777 #undef THUMB_VARIANT
16778 #define THUMB_VARIANT & arm_ext_v6t2
16779
16780 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
16781 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
16782
16783 /* Generic coprocessor instructions. */
16784 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
16785 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16786 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16787 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16788 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16789 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
16790 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
16791
16792 #undef ARM_VARIANT
16793 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
16794
16795 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
16796 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
16797
16798 #undef ARM_VARIANT
16799 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
16800 #undef THUMB_VARIANT
16801 #define THUMB_VARIANT & arm_ext_msr
16802
16803 TCE("mrs", 10f0000, f3ef8000, 2, (APSR_RR, RVC_PSR), mrs, t_mrs),
16804 TCE("msr", 120f000, f3808000, 2, (RVC_PSR, RR_EXi), msr, t_msr),
16805
16806 #undef ARM_VARIANT
16807 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
16808 #undef THUMB_VARIANT
16809 #define THUMB_VARIANT & arm_ext_v6t2
16810
16811 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
16812 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
16813 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
16814 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
16815 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
16816 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
16817 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
16818 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
16819
16820 #undef ARM_VARIANT
16821 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
16822 #undef THUMB_VARIANT
16823 #define THUMB_VARIANT & arm_ext_v4t
16824
16825 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
16826 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
16827 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
16828 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
16829 tCM("ld","sh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
16830 tCM("ld","sb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
16831
16832 #undef ARM_VARIANT
16833 #define ARM_VARIANT & arm_ext_v4t_5
16834
16835 /* ARM Architecture 4T. */
16836 /* Note: bx (and blx) are required on V5, even if the processor does
16837 not support Thumb. */
16838 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
16839
16840 #undef ARM_VARIANT
16841 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
16842 #undef THUMB_VARIANT
16843 #define THUMB_VARIANT & arm_ext_v5t
16844
16845 /* Note: blx has 2 variants; the .value coded here is for
16846 BLX(2). Only this variant has conditional execution. */
16847 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
16848 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
16849
16850 #undef THUMB_VARIANT
16851 #define THUMB_VARIANT & arm_ext_v6t2
16852
16853 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
16854 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16855 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16856 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16857 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16858 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
16859 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
16860 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
16861
16862 #undef ARM_VARIANT
16863 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
16864 #undef THUMB_VARIANT
16865 #define THUMB_VARIANT &arm_ext_v5exp
16866
16867 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16868 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16869 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16870 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16871
16872 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16873 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16874
16875 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
16876 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
16877 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
16878 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
16879
16880 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16881 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16882 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16883 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16884
16885 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16886 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16887
16888 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
16889 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
16890 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
16891 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
16892
16893 #undef ARM_VARIANT
16894 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
16895 #undef THUMB_VARIANT
16896 #define THUMB_VARIANT &arm_ext_v6t2
16897
16898 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
16899 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
16900 ldrd, t_ldstd),
16901 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
16902 ADDRGLDRS), ldrd, t_ldstd),
16903
16904 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
16905 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
16906
16907 #undef ARM_VARIANT
16908 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
16909
16910 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
16911
16912 #undef ARM_VARIANT
16913 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
16914 #undef THUMB_VARIANT
16915 #define THUMB_VARIANT & arm_ext_v6
16916
16917 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
16918 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
16919 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
16920 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
16921 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
16922 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16923 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16924 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16925 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16926 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
16927
16928 #undef THUMB_VARIANT
16929 #define THUMB_VARIANT & arm_ext_v6t2
16930
16931 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
16932 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
16933 strex, t_strex),
16934 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
16935 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
16936
16937 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
16938 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
16939
16940 /* ARM V6 not included in V7M. */
16941 #undef THUMB_VARIANT
16942 #define THUMB_VARIANT & arm_ext_v6_notm
16943 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
16944 UF(rfeib, 9900a00, 1, (RRw), rfe),
16945 UF(rfeda, 8100a00, 1, (RRw), rfe),
16946 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
16947 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
16948 UF(rfefa, 9900a00, 1, (RRw), rfe),
16949 UF(rfeea, 8100a00, 1, (RRw), rfe),
16950 TUF("rfeed", 9100a00, e810c000, 1, (RRw), rfe, rfe),
16951 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
16952 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
16953 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
16954 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
16955
16956 /* ARM V6 not included in V7M (eg. integer SIMD). */
16957 #undef THUMB_VARIANT
16958 #define THUMB_VARIANT & arm_ext_v6_dsp
16959 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
16960 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
16961 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
16962 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16963 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16964 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16965 /* Old name for QASX. */
16966 TCE("qaddsubx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16967 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16968 /* Old name for QSAX. */
16969 TCE("qsubaddx", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16970 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16971 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16972 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16973 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16974 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16975 /* Old name for SASX. */
16976 TCE("saddsubx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16977 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16978 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16979 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16980 /* Old name for SHASX. */
16981 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16982 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16983 /* Old name for SHSAX. */
16984 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16985 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16986 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16987 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16988 /* Old name for SSAX. */
16989 TCE("ssubaddx", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16990 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16991 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16992 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16993 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16994 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16995 /* Old name for UASX. */
16996 TCE("uaddsubx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16997 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16998 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16999 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17000 /* Old name for UHASX. */
17001 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17002 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17003 /* Old name for UHSAX. */
17004 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17005 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17006 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17007 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17008 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17009 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17010 /* Old name for UQASX. */
17011 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17012 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17013 /* Old name for UQSAX. */
17014 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17015 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17016 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17017 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17018 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17019 /* Old name for USAX. */
17020 TCE("usubaddx", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17021 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17022 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17023 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17024 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17025 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
17026 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17027 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17028 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17029 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
17030 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17031 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17032 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17033 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
17034 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
17035 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17036 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17037 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
17038 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
17039 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17040 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17041 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17042 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17043 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17044 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17045 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17046 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17047 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17048 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17049 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
17050 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
17051 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17052 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17053 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
17054
17055 #undef ARM_VARIANT
17056 #define ARM_VARIANT & arm_ext_v6k
17057 #undef THUMB_VARIANT
17058 #define THUMB_VARIANT & arm_ext_v6k
17059
17060 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
17061 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
17062 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
17063 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
17064
17065 #undef THUMB_VARIANT
17066 #define THUMB_VARIANT & arm_ext_v6_notm
17067 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
17068 ldrexd, t_ldrexd),
17069 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
17070 RRnpcb), strexd, t_strexd),
17071
17072 #undef THUMB_VARIANT
17073 #define THUMB_VARIANT & arm_ext_v6t2
17074 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
17075 rd_rn, rd_rn),
17076 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
17077 rd_rn, rd_rn),
17078 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
17079 strex, rm_rd_rn),
17080 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
17081 strex, rm_rd_rn),
17082 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
17083
17084 #undef ARM_VARIANT
17085 #define ARM_VARIANT & arm_ext_sec
17086 #undef THUMB_VARIANT
17087 #define THUMB_VARIANT & arm_ext_sec
17088
17089 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
17090
17091 #undef ARM_VARIANT
17092 #define ARM_VARIANT & arm_ext_v6t2
17093 #undef THUMB_VARIANT
17094 #define THUMB_VARIANT & arm_ext_v6t2
17095
17096 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
17097 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
17098 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
17099 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
17100
17101 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
17102 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
17103 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
17104 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
17105
17106 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
17107 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
17108 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
17109 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
17110
17111 /* Thumb-only instructions. */
17112 #undef ARM_VARIANT
17113 #define ARM_VARIANT NULL
17114 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
17115 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
17116
17117 /* ARM does not really have an IT instruction, so always allow it.
17118 The opcode is copied from Thumb in order to allow warnings in
17119 -mimplicit-it=[never | arm] modes. */
17120 #undef ARM_VARIANT
17121 #define ARM_VARIANT & arm_ext_v1
17122
17123 TUE("it", bf08, bf08, 1, (COND), it, t_it),
17124 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
17125 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
17126 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
17127 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
17128 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
17129 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
17130 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
17131 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
17132 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
17133 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
17134 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
17135 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
17136 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
17137 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
17138 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
17139 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
17140 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
17141
17142 /* Thumb2 only instructions. */
17143 #undef ARM_VARIANT
17144 #define ARM_VARIANT NULL
17145
17146 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
17147 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
17148 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
17149 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
17150 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
17151 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
17152
17153 /* Hardware division instructions. */
17154 #undef ARM_VARIANT
17155 #define ARM_VARIANT & arm_ext_adiv
17156 #undef THUMB_VARIANT
17157 #define THUMB_VARIANT & arm_ext_div
17158
17159 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
17160 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
17161
17162 /* ARM V6M/V7 instructions. */
17163 #undef ARM_VARIANT
17164 #define ARM_VARIANT & arm_ext_barrier
17165 #undef THUMB_VARIANT
17166 #define THUMB_VARIANT & arm_ext_barrier
17167
17168 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, t_barrier),
17169 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, t_barrier),
17170 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, t_barrier),
17171
17172 /* ARM V7 instructions. */
17173 #undef ARM_VARIANT
17174 #define ARM_VARIANT & arm_ext_v7
17175 #undef THUMB_VARIANT
17176 #define THUMB_VARIANT & arm_ext_v7
17177
17178 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
17179 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
17180
17181 #undef ARM_VARIANT
17182 #define ARM_VARIANT & arm_ext_mp
17183 #undef THUMB_VARIANT
17184 #define THUMB_VARIANT & arm_ext_mp
17185
17186 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
17187
17188 #undef ARM_VARIANT
17189 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
17190
17191 cCE("wfs", e200110, 1, (RR), rd),
17192 cCE("rfs", e300110, 1, (RR), rd),
17193 cCE("wfc", e400110, 1, (RR), rd),
17194 cCE("rfc", e500110, 1, (RR), rd),
17195
17196 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
17197 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
17198 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
17199 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
17200
17201 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
17202 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
17203 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
17204 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
17205
17206 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
17207 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
17208 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
17209 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
17210 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
17211 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
17212 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
17213 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
17214 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
17215 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
17216 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
17217 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
17218
17219 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
17220 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
17221 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
17222 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
17223 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
17224 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
17225 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
17226 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
17227 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
17228 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
17229 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
17230 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
17231
17232 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
17233 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
17234 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
17235 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
17236 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
17237 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
17238 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
17239 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
17240 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
17241 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
17242 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
17243 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
17244
17245 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
17246 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
17247 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
17248 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
17249 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
17250 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
17251 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
17252 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
17253 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
17254 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
17255 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
17256 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
17257
17258 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
17259 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
17260 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
17261 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
17262 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
17263 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
17264 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
17265 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
17266 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
17267 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
17268 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
17269 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
17270
17271 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
17272 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
17273 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
17274 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
17275 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
17276 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
17277 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
17278 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
17279 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
17280 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
17281 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
17282 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
17283
17284 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
17285 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
17286 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
17287 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
17288 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
17289 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
17290 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
17291 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
17292 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
17293 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
17294 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
17295 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
17296
17297 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
17298 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
17299 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
17300 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
17301 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
17302 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
17303 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
17304 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
17305 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
17306 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
17307 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
17308 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
17309
17310 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
17311 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
17312 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
17313 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
17314 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
17315 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
17316 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
17317 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
17318 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
17319 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
17320 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
17321 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
17322
17323 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
17324 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
17325 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
17326 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
17327 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
17328 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
17329 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
17330 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
17331 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
17332 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
17333 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
17334 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
17335
17336 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
17337 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
17338 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
17339 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
17340 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
17341 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
17342 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
17343 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
17344 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
17345 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
17346 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
17347 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
17348
17349 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
17350 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
17351 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
17352 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
17353 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
17354 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
17355 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
17356 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
17357 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
17358 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
17359 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
17360 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
17361
17362 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
17363 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
17364 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
17365 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
17366 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
17367 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
17368 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
17369 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
17370 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
17371 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
17372 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
17373 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
17374
17375 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
17376 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
17377 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
17378 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
17379 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
17380 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
17381 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
17382 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
17383 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
17384 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
17385 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
17386 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
17387
17388 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
17389 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
17390 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
17391 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
17392 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
17393 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
17394 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
17395 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
17396 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
17397 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
17398 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
17399 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
17400
17401 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
17402 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
17403 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
17404 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
17405 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
17406 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
17407 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
17408 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
17409 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
17410 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
17411 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
17412 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
17413
17414 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
17415 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
17416 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
17417 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
17418 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
17419 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17420 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17421 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17422 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
17423 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
17424 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
17425 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
17426
17427 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
17428 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
17429 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
17430 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
17431 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
17432 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17433 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17434 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17435 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
17436 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
17437 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
17438 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
17439
17440 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
17441 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
17442 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
17443 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
17444 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
17445 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17446 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17447 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17448 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
17449 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
17450 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
17451 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
17452
17453 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
17454 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
17455 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
17456 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
17457 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
17458 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17459 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17460 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17461 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
17462 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
17463 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
17464 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
17465
17466 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
17467 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
17468 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
17469 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
17470 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
17471 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17472 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17473 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17474 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
17475 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
17476 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
17477 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
17478
17479 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
17480 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
17481 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
17482 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
17483 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
17484 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17485 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17486 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17487 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
17488 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
17489 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
17490 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
17491
17492 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
17493 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
17494 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
17495 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
17496 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
17497 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17498 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17499 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17500 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
17501 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
17502 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
17503 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
17504
17505 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
17506 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
17507 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
17508 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
17509 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
17510 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17511 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17512 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17513 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
17514 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
17515 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
17516 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
17517
17518 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
17519 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
17520 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
17521 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
17522 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
17523 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17524 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17525 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17526 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
17527 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
17528 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
17529 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
17530
17531 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
17532 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
17533 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
17534 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
17535 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
17536 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17537 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17538 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17539 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
17540 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
17541 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
17542 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
17543
17544 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
17545 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
17546 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
17547 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
17548 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
17549 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17550 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17551 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17552 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
17553 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
17554 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
17555 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
17556
17557 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
17558 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
17559 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
17560 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
17561 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
17562 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17563 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17564 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17565 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
17566 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
17567 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
17568 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
17569
17570 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
17571 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
17572 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
17573 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
17574 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
17575 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17576 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17577 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17578 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
17579 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
17580 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
17581 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
17582
17583 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
17584 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
17585 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
17586 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
17587
17588 cCL("flts", e000110, 2, (RF, RR), rn_rd),
17589 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
17590 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
17591 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
17592 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
17593 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
17594 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
17595 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
17596 cCL("flte", e080110, 2, (RF, RR), rn_rd),
17597 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
17598 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
17599 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
17600
17601 /* The implementation of the FIX instruction is broken on some
17602 assemblers, in that it accepts a precision specifier as well as a
17603 rounding specifier, despite the fact that this is meaningless.
17604 To be more compatible, we accept it as well, though of course it
17605 does not set any bits. */
17606 cCE("fix", e100110, 2, (RR, RF), rd_rm),
17607 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
17608 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
17609 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
17610 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
17611 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
17612 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
17613 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
17614 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
17615 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
17616 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
17617 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
17618 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
17619
17620 /* Instructions that were new with the real FPA, call them V2. */
17621 #undef ARM_VARIANT
17622 #define ARM_VARIANT & fpu_fpa_ext_v2
17623
17624 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17625 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17626 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17627 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17628 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17629 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17630
17631 #undef ARM_VARIANT
17632 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
17633
17634 /* Moves and type conversions. */
17635 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
17636 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
17637 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
17638 cCE("fmstat", ef1fa10, 0, (), noargs),
17639 cCE("vmrs", ef10a10, 2, (APSR_RR, RVC), vmrs),
17640 cCE("vmsr", ee10a10, 2, (RVC, RR), vmsr),
17641 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
17642 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
17643 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
17644 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
17645 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
17646 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
17647 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
17648 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
17649
17650 /* Memory operations. */
17651 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
17652 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
17653 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
17654 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
17655 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
17656 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
17657 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
17658 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
17659 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
17660 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
17661 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
17662 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
17663 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
17664 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
17665 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
17666 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
17667 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
17668 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
17669
17670 /* Monadic operations. */
17671 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
17672 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
17673 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
17674
17675 /* Dyadic operations. */
17676 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17677 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17678 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17679 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17680 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17681 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17682 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17683 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17684 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17685
17686 /* Comparisons. */
17687 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
17688 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
17689 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
17690 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
17691
17692 /* Double precision load/store are still present on single precision
17693 implementations. */
17694 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
17695 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
17696 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
17697 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
17698 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
17699 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
17700 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
17701 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
17702 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
17703 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
17704
17705 #undef ARM_VARIANT
17706 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
17707
17708 /* Moves and type conversions. */
17709 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
17710 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
17711 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
17712 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
17713 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
17714 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
17715 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
17716 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
17717 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
17718 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
17719 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
17720 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
17721 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
17722
17723 /* Monadic operations. */
17724 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
17725 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
17726 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
17727
17728 /* Dyadic operations. */
17729 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17730 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17731 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17732 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17733 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17734 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17735 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17736 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17737 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17738
17739 /* Comparisons. */
17740 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
17741 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
17742 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
17743 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
17744
17745 #undef ARM_VARIANT
17746 #define ARM_VARIANT & fpu_vfp_ext_v2
17747
17748 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
17749 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
17750 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
17751 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
17752
17753 /* Instructions which may belong to either the Neon or VFP instruction sets.
17754 Individual encoder functions perform additional architecture checks. */
17755 #undef ARM_VARIANT
17756 #define ARM_VARIANT & fpu_vfp_ext_v1xd
17757 #undef THUMB_VARIANT
17758 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
17759
17760 /* These mnemonics are unique to VFP. */
17761 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
17762 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
17763 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
17764 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
17765 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
17766 nCE(vcmp, _vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
17767 nCE(vcmpe, _vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
17768 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
17769 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
17770 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
17771
17772 /* Mnemonics shared by Neon and VFP. */
17773 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
17774 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
17775 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
17776
17777 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
17778 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
17779
17780 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
17781 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
17782
17783 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
17784 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
17785 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
17786 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
17787 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
17788 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
17789 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
17790 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
17791
17792 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32b), neon_cvt),
17793 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
17794 nCEF(vcvtb, _vcvt, 2, (RVS, RVS), neon_cvtb),
17795 nCEF(vcvtt, _vcvt, 2, (RVS, RVS), neon_cvtt),
17796
17797
17798 /* NOTE: All VMOV encoding is special-cased! */
17799 NCE(vmov, 0, 1, (VMOV), neon_mov),
17800 NCE(vmovq, 0, 1, (VMOV), neon_mov),
17801
17802 #undef THUMB_VARIANT
17803 #define THUMB_VARIANT & fpu_neon_ext_v1
17804 #undef ARM_VARIANT
17805 #define ARM_VARIANT & fpu_neon_ext_v1
17806
17807 /* Data processing with three registers of the same length. */
17808 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
17809 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
17810 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
17811 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
17812 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
17813 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
17814 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
17815 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
17816 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
17817 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
17818 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
17819 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
17820 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
17821 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
17822 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
17823 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
17824 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
17825 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
17826 /* If not immediate, fall back to neon_dyadic_i64_su.
17827 shl_imm should accept I8 I16 I32 I64,
17828 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
17829 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
17830 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
17831 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
17832 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
17833 /* Logic ops, types optional & ignored. */
17834 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
17835 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
17836 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
17837 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
17838 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
17839 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
17840 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
17841 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
17842 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
17843 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
17844 /* Bitfield ops, untyped. */
17845 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
17846 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
17847 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
17848 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
17849 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
17850 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
17851 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
17852 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
17853 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
17854 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
17855 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
17856 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
17857 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
17858 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
17859 back to neon_dyadic_if_su. */
17860 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
17861 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
17862 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
17863 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
17864 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
17865 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
17866 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
17867 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
17868 /* Comparison. Type I8 I16 I32 F32. */
17869 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
17870 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
17871 /* As above, D registers only. */
17872 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
17873 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
17874 /* Int and float variants, signedness unimportant. */
17875 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
17876 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
17877 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
17878 /* Add/sub take types I8 I16 I32 I64 F32. */
17879 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
17880 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
17881 /* vtst takes sizes 8, 16, 32. */
17882 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
17883 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
17884 /* VMUL takes I8 I16 I32 F32 P8. */
17885 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
17886 /* VQD{R}MULH takes S16 S32. */
17887 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
17888 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
17889 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
17890 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
17891 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
17892 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
17893 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
17894 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
17895 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
17896 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
17897 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
17898 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
17899 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
17900 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
17901 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
17902 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
17903
17904 /* Two address, int/float. Types S8 S16 S32 F32. */
17905 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
17906 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
17907
17908 /* Data processing with two registers and a shift amount. */
17909 /* Right shifts, and variants with rounding.
17910 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
17911 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
17912 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
17913 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
17914 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
17915 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
17916 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
17917 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
17918 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
17919 /* Shift and insert. Sizes accepted 8 16 32 64. */
17920 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
17921 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
17922 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
17923 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
17924 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
17925 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
17926 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
17927 /* Right shift immediate, saturating & narrowing, with rounding variants.
17928 Types accepted S16 S32 S64 U16 U32 U64. */
17929 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
17930 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
17931 /* As above, unsigned. Types accepted S16 S32 S64. */
17932 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
17933 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
17934 /* Right shift narrowing. Types accepted I16 I32 I64. */
17935 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
17936 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
17937 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
17938 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
17939 /* CVT with optional immediate for fixed-point variant. */
17940 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
17941
17942 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
17943 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
17944
17945 /* Data processing, three registers of different lengths. */
17946 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
17947 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
17948 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
17949 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
17950 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
17951 /* If not scalar, fall back to neon_dyadic_long.
17952 Vector types as above, scalar types S16 S32 U16 U32. */
17953 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
17954 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
17955 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
17956 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
17957 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
17958 /* Dyadic, narrowing insns. Types I16 I32 I64. */
17959 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
17960 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
17961 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
17962 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
17963 /* Saturating doubling multiplies. Types S16 S32. */
17964 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
17965 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
17966 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
17967 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
17968 S16 S32 U16 U32. */
17969 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
17970
17971 /* Extract. Size 8. */
17972 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
17973 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
17974
17975 /* Two registers, miscellaneous. */
17976 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
17977 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
17978 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
17979 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
17980 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
17981 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
17982 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
17983 /* Vector replicate. Sizes 8 16 32. */
17984 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
17985 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
17986 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
17987 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
17988 /* VMOVN. Types I16 I32 I64. */
17989 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
17990 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
17991 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
17992 /* VQMOVUN. Types S16 S32 S64. */
17993 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
17994 /* VZIP / VUZP. Sizes 8 16 32. */
17995 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
17996 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
17997 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
17998 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
17999 /* VQABS / VQNEG. Types S8 S16 S32. */
18000 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
18001 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
18002 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
18003 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
18004 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
18005 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
18006 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
18007 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
18008 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
18009 /* Reciprocal estimates. Types U32 F32. */
18010 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
18011 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
18012 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
18013 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
18014 /* VCLS. Types S8 S16 S32. */
18015 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
18016 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
18017 /* VCLZ. Types I8 I16 I32. */
18018 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
18019 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
18020 /* VCNT. Size 8. */
18021 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
18022 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
18023 /* Two address, untyped. */
18024 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
18025 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
18026 /* VTRN. Sizes 8 16 32. */
18027 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
18028 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
18029
18030 /* Table lookup. Size 8. */
18031 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
18032 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
18033
18034 #undef THUMB_VARIANT
18035 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
18036 #undef ARM_VARIANT
18037 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
18038
18039 /* Neon element/structure load/store. */
18040 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
18041 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
18042 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
18043 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
18044 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
18045 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
18046 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
18047 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
18048
18049 #undef THUMB_VARIANT
18050 #define THUMB_VARIANT &fpu_vfp_ext_v3xd
18051 #undef ARM_VARIANT
18052 #define ARM_VARIANT &fpu_vfp_ext_v3xd
18053 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
18054 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
18055 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
18056 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
18057 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
18058 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
18059 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
18060 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
18061 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
18062
18063 #undef THUMB_VARIANT
18064 #define THUMB_VARIANT & fpu_vfp_ext_v3
18065 #undef ARM_VARIANT
18066 #define ARM_VARIANT & fpu_vfp_ext_v3
18067
18068 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
18069 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
18070 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
18071 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
18072 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
18073 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
18074 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
18075 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
18076 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
18077
18078 #undef ARM_VARIANT
18079 #define ARM_VARIANT &fpu_vfp_ext_fma
18080 #undef THUMB_VARIANT
18081 #define THUMB_VARIANT &fpu_vfp_ext_fma
18082 /* Mnemonics shared by Neon and VFP. These are included in the
18083 VFP FMA variant; NEON and VFP FMA always includes the NEON
18084 FMA instructions. */
18085 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
18086 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
18087 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
18088 the v form should always be used. */
18089 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
18090 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
18091 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
18092 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
18093 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18094 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18095
18096 #undef THUMB_VARIANT
18097 #undef ARM_VARIANT
18098 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
18099
18100 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18101 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18102 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18103 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18104 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18105 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18106 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
18107 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
18108
18109 #undef ARM_VARIANT
18110 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
18111
18112 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
18113 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
18114 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
18115 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
18116 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
18117 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
18118 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
18119 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
18120 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
18121 cCE("textrmub", e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
18122 cCE("textrmuh", e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
18123 cCE("textrmuw", e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
18124 cCE("textrmsb", e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
18125 cCE("textrmsh", e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
18126 cCE("textrmsw", e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
18127 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
18128 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
18129 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
18130 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
18131 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
18132 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
18133 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
18134 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
18135 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
18136 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
18137 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
18138 cCE("tmovmskb", e100030, 2, (RR, RIWR), rd_rn),
18139 cCE("tmovmskh", e500030, 2, (RR, RIWR), rd_rn),
18140 cCE("tmovmskw", e900030, 2, (RR, RIWR), rd_rn),
18141 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
18142 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
18143 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
18144 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
18145 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
18146 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
18147 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
18148 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
18149 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18150 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18151 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18152 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18153 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18154 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18155 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18156 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18157 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18158 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
18159 cCE("walignr0", e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18160 cCE("walignr1", e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18161 cCE("walignr2", ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18162 cCE("walignr3", eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18163 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18164 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18165 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18166 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18167 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18168 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18169 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18170 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18171 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18172 cCE("wcmpgtub", e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18173 cCE("wcmpgtuh", e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18174 cCE("wcmpgtuw", e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18175 cCE("wcmpgtsb", e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18176 cCE("wcmpgtsh", e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18177 cCE("wcmpgtsw", eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18178 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
18179 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
18180 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
18181 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
18182 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18183 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18184 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18185 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18186 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18187 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18188 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18189 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18190 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18191 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18192 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18193 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18194 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18195 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18196 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18197 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18198 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18199 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18200 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
18201 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18202 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18203 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18204 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18205 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18206 cCE("wpackhss", e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18207 cCE("wpackhus", e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18208 cCE("wpackwss", eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18209 cCE("wpackwus", e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18210 cCE("wpackdss", ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18211 cCE("wpackdus", ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18212 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18213 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18214 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18215 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18216 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18217 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18218 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18219 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18220 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18221 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18222 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
18223 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18224 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18225 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18226 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18227 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18228 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18229 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18230 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18231 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18232 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18233 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18234 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18235 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18236 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18237 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18238 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18239 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18240 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18241 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
18242 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
18243 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
18244 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
18245 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18246 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18247 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18248 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18249 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18250 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18251 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18252 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18253 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18254 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
18255 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
18256 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
18257 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
18258 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
18259 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
18260 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18261 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18262 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18263 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
18264 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
18265 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
18266 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
18267 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
18268 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
18269 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18270 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18271 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18272 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18273 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
18274
18275 #undef ARM_VARIANT
18276 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
18277
18278 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
18279 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
18280 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
18281 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
18282 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
18283 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
18284 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18285 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18286 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18287 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18288 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18289 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18290 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18291 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18292 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18293 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18294 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18295 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18296 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18297 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18298 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
18299 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18300 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18301 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18302 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18303 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18304 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18305 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18306 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18307 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18308 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18309 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18310 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18311 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18312 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18313 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18314 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18315 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18316 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18317 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18318 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18319 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18320 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18321 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18322 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18323 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18324 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18325 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18326 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18327 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18328 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18329 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18330 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18331 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18332 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18333 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18334 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18335
18336 #undef ARM_VARIANT
18337 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
18338
18339 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
18340 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
18341 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
18342 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
18343 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
18344 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
18345 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
18346 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
18347 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
18348 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
18349 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
18350 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
18351 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
18352 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
18353 cCE("cfmv64lr", e000510, 2, (RMDX, RR), rn_rd),
18354 cCE("cfmvr64l", e100510, 2, (RR, RMDX), rd_rn),
18355 cCE("cfmv64hr", e000530, 2, (RMDX, RR), rn_rd),
18356 cCE("cfmvr64h", e100530, 2, (RR, RMDX), rd_rn),
18357 cCE("cfmval32", e200440, 2, (RMAX, RMFX), rd_rn),
18358 cCE("cfmv32al", e100440, 2, (RMFX, RMAX), rd_rn),
18359 cCE("cfmvam32", e200460, 2, (RMAX, RMFX), rd_rn),
18360 cCE("cfmv32am", e100460, 2, (RMFX, RMAX), rd_rn),
18361 cCE("cfmvah32", e200480, 2, (RMAX, RMFX), rd_rn),
18362 cCE("cfmv32ah", e100480, 2, (RMFX, RMAX), rd_rn),
18363 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
18364 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
18365 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
18366 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
18367 cCE("cfmvsc32", e2004e0, 2, (RMDS, RMDX), mav_dspsc),
18368 cCE("cfmv32sc", e1004e0, 2, (RMDX, RMDS), rd),
18369 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
18370 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
18371 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
18372 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
18373 cCE("cfcvt32s", e000480, 2, (RMF, RMFX), rd_rn),
18374 cCE("cfcvt32d", e0004a0, 2, (RMD, RMFX), rd_rn),
18375 cCE("cfcvt64s", e0004c0, 2, (RMF, RMDX), rd_rn),
18376 cCE("cfcvt64d", e0004e0, 2, (RMD, RMDX), rd_rn),
18377 cCE("cfcvts32", e100580, 2, (RMFX, RMF), rd_rn),
18378 cCE("cfcvtd32", e1005a0, 2, (RMFX, RMD), rd_rn),
18379 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
18380 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
18381 cCE("cfrshl32", e000550, 3, (RMFX, RMFX, RR), mav_triple),
18382 cCE("cfrshl64", e000570, 3, (RMDX, RMDX, RR), mav_triple),
18383 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
18384 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
18385 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
18386 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
18387 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
18388 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
18389 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
18390 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
18391 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
18392 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
18393 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
18394 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
18395 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
18396 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
18397 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
18398 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
18399 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
18400 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
18401 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
18402 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
18403 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
18404 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
18405 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
18406 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
18407 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
18408 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
18409 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
18410 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
18411 cCE("cfmadd32", e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
18412 cCE("cfmsub32", e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
18413 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
18414 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
18415 };
18416 #undef ARM_VARIANT
18417 #undef THUMB_VARIANT
18418 #undef TCE
18419 #undef TCM
18420 #undef TUE
18421 #undef TUF
18422 #undef TCC
18423 #undef cCE
18424 #undef cCL
18425 #undef C3E
18426 #undef CE
18427 #undef CM
18428 #undef UE
18429 #undef UF
18430 #undef UT
18431 #undef NUF
18432 #undef nUF
18433 #undef NCE
18434 #undef nCE
18435 #undef OPS0
18436 #undef OPS1
18437 #undef OPS2
18438 #undef OPS3
18439 #undef OPS4
18440 #undef OPS5
18441 #undef OPS6
18442 #undef do_0
18443 \f
18444 /* MD interface: bits in the object file. */
18445
18446 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
18447 for use in the a.out file, and stores them in the array pointed to by buf.
18448 This knows about the endian-ness of the target machine and does
18449 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
18450 2 (short) and 4 (long) Floating numbers are put out as a series of
18451 LITTLENUMS (shorts, here at least). */
18452
18453 void
18454 md_number_to_chars (char * buf, valueT val, int n)
18455 {
18456 if (target_big_endian)
18457 number_to_chars_bigendian (buf, val, n);
18458 else
18459 number_to_chars_littleendian (buf, val, n);
18460 }
18461
18462 static valueT
18463 md_chars_to_number (char * buf, int n)
18464 {
18465 valueT result = 0;
18466 unsigned char * where = (unsigned char *) buf;
18467
18468 if (target_big_endian)
18469 {
18470 while (n--)
18471 {
18472 result <<= 8;
18473 result |= (*where++ & 255);
18474 }
18475 }
18476 else
18477 {
18478 while (n--)
18479 {
18480 result <<= 8;
18481 result |= (where[n] & 255);
18482 }
18483 }
18484
18485 return result;
18486 }
18487
18488 /* MD interface: Sections. */
18489
18490 /* Estimate the size of a frag before relaxing. Assume everything fits in
18491 2 bytes. */
18492
18493 int
18494 md_estimate_size_before_relax (fragS * fragp,
18495 segT segtype ATTRIBUTE_UNUSED)
18496 {
18497 fragp->fr_var = 2;
18498 return 2;
18499 }
18500
18501 /* Convert a machine dependent frag. */
18502
18503 void
18504 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
18505 {
18506 unsigned long insn;
18507 unsigned long old_op;
18508 char *buf;
18509 expressionS exp;
18510 fixS *fixp;
18511 int reloc_type;
18512 int pc_rel;
18513 int opcode;
18514
18515 buf = fragp->fr_literal + fragp->fr_fix;
18516
18517 old_op = bfd_get_16(abfd, buf);
18518 if (fragp->fr_symbol)
18519 {
18520 exp.X_op = O_symbol;
18521 exp.X_add_symbol = fragp->fr_symbol;
18522 }
18523 else
18524 {
18525 exp.X_op = O_constant;
18526 }
18527 exp.X_add_number = fragp->fr_offset;
18528 opcode = fragp->fr_subtype;
18529 switch (opcode)
18530 {
18531 case T_MNEM_ldr_pc:
18532 case T_MNEM_ldr_pc2:
18533 case T_MNEM_ldr_sp:
18534 case T_MNEM_str_sp:
18535 case T_MNEM_ldr:
18536 case T_MNEM_ldrb:
18537 case T_MNEM_ldrh:
18538 case T_MNEM_str:
18539 case T_MNEM_strb:
18540 case T_MNEM_strh:
18541 if (fragp->fr_var == 4)
18542 {
18543 insn = THUMB_OP32 (opcode);
18544 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
18545 {
18546 insn |= (old_op & 0x700) << 4;
18547 }
18548 else
18549 {
18550 insn |= (old_op & 7) << 12;
18551 insn |= (old_op & 0x38) << 13;
18552 }
18553 insn |= 0x00000c00;
18554 put_thumb32_insn (buf, insn);
18555 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
18556 }
18557 else
18558 {
18559 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
18560 }
18561 pc_rel = (opcode == T_MNEM_ldr_pc2);
18562 break;
18563 case T_MNEM_adr:
18564 if (fragp->fr_var == 4)
18565 {
18566 insn = THUMB_OP32 (opcode);
18567 insn |= (old_op & 0xf0) << 4;
18568 put_thumb32_insn (buf, insn);
18569 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
18570 }
18571 else
18572 {
18573 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
18574 exp.X_add_number -= 4;
18575 }
18576 pc_rel = 1;
18577 break;
18578 case T_MNEM_mov:
18579 case T_MNEM_movs:
18580 case T_MNEM_cmp:
18581 case T_MNEM_cmn:
18582 if (fragp->fr_var == 4)
18583 {
18584 int r0off = (opcode == T_MNEM_mov
18585 || opcode == T_MNEM_movs) ? 0 : 8;
18586 insn = THUMB_OP32 (opcode);
18587 insn = (insn & 0xe1ffffff) | 0x10000000;
18588 insn |= (old_op & 0x700) << r0off;
18589 put_thumb32_insn (buf, insn);
18590 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
18591 }
18592 else
18593 {
18594 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
18595 }
18596 pc_rel = 0;
18597 break;
18598 case T_MNEM_b:
18599 if (fragp->fr_var == 4)
18600 {
18601 insn = THUMB_OP32(opcode);
18602 put_thumb32_insn (buf, insn);
18603 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
18604 }
18605 else
18606 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
18607 pc_rel = 1;
18608 break;
18609 case T_MNEM_bcond:
18610 if (fragp->fr_var == 4)
18611 {
18612 insn = THUMB_OP32(opcode);
18613 insn |= (old_op & 0xf00) << 14;
18614 put_thumb32_insn (buf, insn);
18615 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
18616 }
18617 else
18618 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
18619 pc_rel = 1;
18620 break;
18621 case T_MNEM_add_sp:
18622 case T_MNEM_add_pc:
18623 case T_MNEM_inc_sp:
18624 case T_MNEM_dec_sp:
18625 if (fragp->fr_var == 4)
18626 {
18627 /* ??? Choose between add and addw. */
18628 insn = THUMB_OP32 (opcode);
18629 insn |= (old_op & 0xf0) << 4;
18630 put_thumb32_insn (buf, insn);
18631 if (opcode == T_MNEM_add_pc)
18632 reloc_type = BFD_RELOC_ARM_T32_IMM12;
18633 else
18634 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
18635 }
18636 else
18637 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
18638 pc_rel = 0;
18639 break;
18640
18641 case T_MNEM_addi:
18642 case T_MNEM_addis:
18643 case T_MNEM_subi:
18644 case T_MNEM_subis:
18645 if (fragp->fr_var == 4)
18646 {
18647 insn = THUMB_OP32 (opcode);
18648 insn |= (old_op & 0xf0) << 4;
18649 insn |= (old_op & 0xf) << 16;
18650 put_thumb32_insn (buf, insn);
18651 if (insn & (1 << 20))
18652 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
18653 else
18654 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
18655 }
18656 else
18657 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
18658 pc_rel = 0;
18659 break;
18660 default:
18661 abort ();
18662 }
18663 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
18664 (enum bfd_reloc_code_real) reloc_type);
18665 fixp->fx_file = fragp->fr_file;
18666 fixp->fx_line = fragp->fr_line;
18667 fragp->fr_fix += fragp->fr_var;
18668 }
18669
18670 /* Return the size of a relaxable immediate operand instruction.
18671 SHIFT and SIZE specify the form of the allowable immediate. */
18672 static int
18673 relax_immediate (fragS *fragp, int size, int shift)
18674 {
18675 offsetT offset;
18676 offsetT mask;
18677 offsetT low;
18678
18679 /* ??? Should be able to do better than this. */
18680 if (fragp->fr_symbol)
18681 return 4;
18682
18683 low = (1 << shift) - 1;
18684 mask = (1 << (shift + size)) - (1 << shift);
18685 offset = fragp->fr_offset;
18686 /* Force misaligned offsets to 32-bit variant. */
18687 if (offset & low)
18688 return 4;
18689 if (offset & ~mask)
18690 return 4;
18691 return 2;
18692 }
18693
18694 /* Get the address of a symbol during relaxation. */
18695 static addressT
18696 relaxed_symbol_addr (fragS *fragp, long stretch)
18697 {
18698 fragS *sym_frag;
18699 addressT addr;
18700 symbolS *sym;
18701
18702 sym = fragp->fr_symbol;
18703 sym_frag = symbol_get_frag (sym);
18704 know (S_GET_SEGMENT (sym) != absolute_section
18705 || sym_frag == &zero_address_frag);
18706 addr = S_GET_VALUE (sym) + fragp->fr_offset;
18707
18708 /* If frag has yet to be reached on this pass, assume it will
18709 move by STRETCH just as we did. If this is not so, it will
18710 be because some frag between grows, and that will force
18711 another pass. */
18712
18713 if (stretch != 0
18714 && sym_frag->relax_marker != fragp->relax_marker)
18715 {
18716 fragS *f;
18717
18718 /* Adjust stretch for any alignment frag. Note that if have
18719 been expanding the earlier code, the symbol may be
18720 defined in what appears to be an earlier frag. FIXME:
18721 This doesn't handle the fr_subtype field, which specifies
18722 a maximum number of bytes to skip when doing an
18723 alignment. */
18724 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
18725 {
18726 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
18727 {
18728 if (stretch < 0)
18729 stretch = - ((- stretch)
18730 & ~ ((1 << (int) f->fr_offset) - 1));
18731 else
18732 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
18733 if (stretch == 0)
18734 break;
18735 }
18736 }
18737 if (f != NULL)
18738 addr += stretch;
18739 }
18740
18741 return addr;
18742 }
18743
18744 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
18745 load. */
18746 static int
18747 relax_adr (fragS *fragp, asection *sec, long stretch)
18748 {
18749 addressT addr;
18750 offsetT val;
18751
18752 /* Assume worst case for symbols not known to be in the same section. */
18753 if (fragp->fr_symbol == NULL
18754 || !S_IS_DEFINED (fragp->fr_symbol)
18755 || sec != S_GET_SEGMENT (fragp->fr_symbol)
18756 || S_IS_WEAK (fragp->fr_symbol))
18757 return 4;
18758
18759 val = relaxed_symbol_addr (fragp, stretch);
18760 addr = fragp->fr_address + fragp->fr_fix;
18761 addr = (addr + 4) & ~3;
18762 /* Force misaligned targets to 32-bit variant. */
18763 if (val & 3)
18764 return 4;
18765 val -= addr;
18766 if (val < 0 || val > 1020)
18767 return 4;
18768 return 2;
18769 }
18770
18771 /* Return the size of a relaxable add/sub immediate instruction. */
18772 static int
18773 relax_addsub (fragS *fragp, asection *sec)
18774 {
18775 char *buf;
18776 int op;
18777
18778 buf = fragp->fr_literal + fragp->fr_fix;
18779 op = bfd_get_16(sec->owner, buf);
18780 if ((op & 0xf) == ((op >> 4) & 0xf))
18781 return relax_immediate (fragp, 8, 0);
18782 else
18783 return relax_immediate (fragp, 3, 0);
18784 }
18785
18786
18787 /* Return the size of a relaxable branch instruction. BITS is the
18788 size of the offset field in the narrow instruction. */
18789
18790 static int
18791 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
18792 {
18793 addressT addr;
18794 offsetT val;
18795 offsetT limit;
18796
18797 /* Assume worst case for symbols not known to be in the same section. */
18798 if (!S_IS_DEFINED (fragp->fr_symbol)
18799 || sec != S_GET_SEGMENT (fragp->fr_symbol)
18800 || S_IS_WEAK (fragp->fr_symbol))
18801 return 4;
18802
18803 #ifdef OBJ_ELF
18804 if (S_IS_DEFINED (fragp->fr_symbol)
18805 && ARM_IS_FUNC (fragp->fr_symbol))
18806 return 4;
18807 #endif
18808
18809 val = relaxed_symbol_addr (fragp, stretch);
18810 addr = fragp->fr_address + fragp->fr_fix + 4;
18811 val -= addr;
18812
18813 /* Offset is a signed value *2 */
18814 limit = 1 << bits;
18815 if (val >= limit || val < -limit)
18816 return 4;
18817 return 2;
18818 }
18819
18820
18821 /* Relax a machine dependent frag. This returns the amount by which
18822 the current size of the frag should change. */
18823
18824 int
18825 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
18826 {
18827 int oldsize;
18828 int newsize;
18829
18830 oldsize = fragp->fr_var;
18831 switch (fragp->fr_subtype)
18832 {
18833 case T_MNEM_ldr_pc2:
18834 newsize = relax_adr (fragp, sec, stretch);
18835 break;
18836 case T_MNEM_ldr_pc:
18837 case T_MNEM_ldr_sp:
18838 case T_MNEM_str_sp:
18839 newsize = relax_immediate (fragp, 8, 2);
18840 break;
18841 case T_MNEM_ldr:
18842 case T_MNEM_str:
18843 newsize = relax_immediate (fragp, 5, 2);
18844 break;
18845 case T_MNEM_ldrh:
18846 case T_MNEM_strh:
18847 newsize = relax_immediate (fragp, 5, 1);
18848 break;
18849 case T_MNEM_ldrb:
18850 case T_MNEM_strb:
18851 newsize = relax_immediate (fragp, 5, 0);
18852 break;
18853 case T_MNEM_adr:
18854 newsize = relax_adr (fragp, sec, stretch);
18855 break;
18856 case T_MNEM_mov:
18857 case T_MNEM_movs:
18858 case T_MNEM_cmp:
18859 case T_MNEM_cmn:
18860 newsize = relax_immediate (fragp, 8, 0);
18861 break;
18862 case T_MNEM_b:
18863 newsize = relax_branch (fragp, sec, 11, stretch);
18864 break;
18865 case T_MNEM_bcond:
18866 newsize = relax_branch (fragp, sec, 8, stretch);
18867 break;
18868 case T_MNEM_add_sp:
18869 case T_MNEM_add_pc:
18870 newsize = relax_immediate (fragp, 8, 2);
18871 break;
18872 case T_MNEM_inc_sp:
18873 case T_MNEM_dec_sp:
18874 newsize = relax_immediate (fragp, 7, 2);
18875 break;
18876 case T_MNEM_addi:
18877 case T_MNEM_addis:
18878 case T_MNEM_subi:
18879 case T_MNEM_subis:
18880 newsize = relax_addsub (fragp, sec);
18881 break;
18882 default:
18883 abort ();
18884 }
18885
18886 fragp->fr_var = newsize;
18887 /* Freeze wide instructions that are at or before the same location as
18888 in the previous pass. This avoids infinite loops.
18889 Don't freeze them unconditionally because targets may be artificially
18890 misaligned by the expansion of preceding frags. */
18891 if (stretch <= 0 && newsize > 2)
18892 {
18893 md_convert_frag (sec->owner, sec, fragp);
18894 frag_wane (fragp);
18895 }
18896
18897 return newsize - oldsize;
18898 }
18899
18900 /* Round up a section size to the appropriate boundary. */
18901
18902 valueT
18903 md_section_align (segT segment ATTRIBUTE_UNUSED,
18904 valueT size)
18905 {
18906 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
18907 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
18908 {
18909 /* For a.out, force the section size to be aligned. If we don't do
18910 this, BFD will align it for us, but it will not write out the
18911 final bytes of the section. This may be a bug in BFD, but it is
18912 easier to fix it here since that is how the other a.out targets
18913 work. */
18914 int align;
18915
18916 align = bfd_get_section_alignment (stdoutput, segment);
18917 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
18918 }
18919 #endif
18920
18921 return size;
18922 }
18923
18924 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
18925 of an rs_align_code fragment. */
18926
18927 void
18928 arm_handle_align (fragS * fragP)
18929 {
18930 static char const arm_noop[2][2][4] =
18931 {
18932 { /* ARMv1 */
18933 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
18934 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
18935 },
18936 { /* ARMv6k */
18937 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
18938 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
18939 },
18940 };
18941 static char const thumb_noop[2][2][2] =
18942 {
18943 { /* Thumb-1 */
18944 {0xc0, 0x46}, /* LE */
18945 {0x46, 0xc0}, /* BE */
18946 },
18947 { /* Thumb-2 */
18948 {0x00, 0xbf}, /* LE */
18949 {0xbf, 0x00} /* BE */
18950 }
18951 };
18952 static char const wide_thumb_noop[2][4] =
18953 { /* Wide Thumb-2 */
18954 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
18955 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
18956 };
18957
18958 unsigned bytes, fix, noop_size;
18959 char * p;
18960 const char * noop;
18961 const char *narrow_noop = NULL;
18962 #ifdef OBJ_ELF
18963 enum mstate state;
18964 #endif
18965
18966 if (fragP->fr_type != rs_align_code)
18967 return;
18968
18969 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
18970 p = fragP->fr_literal + fragP->fr_fix;
18971 fix = 0;
18972
18973 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
18974 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
18975
18976 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
18977
18978 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
18979 {
18980 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
18981 {
18982 narrow_noop = thumb_noop[1][target_big_endian];
18983 noop = wide_thumb_noop[target_big_endian];
18984 }
18985 else
18986 noop = thumb_noop[0][target_big_endian];
18987 noop_size = 2;
18988 #ifdef OBJ_ELF
18989 state = MAP_THUMB;
18990 #endif
18991 }
18992 else
18993 {
18994 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k) != 0]
18995 [target_big_endian];
18996 noop_size = 4;
18997 #ifdef OBJ_ELF
18998 state = MAP_ARM;
18999 #endif
19000 }
19001
19002 fragP->fr_var = noop_size;
19003
19004 if (bytes & (noop_size - 1))
19005 {
19006 fix = bytes & (noop_size - 1);
19007 #ifdef OBJ_ELF
19008 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
19009 #endif
19010 memset (p, 0, fix);
19011 p += fix;
19012 bytes -= fix;
19013 }
19014
19015 if (narrow_noop)
19016 {
19017 if (bytes & noop_size)
19018 {
19019 /* Insert a narrow noop. */
19020 memcpy (p, narrow_noop, noop_size);
19021 p += noop_size;
19022 bytes -= noop_size;
19023 fix += noop_size;
19024 }
19025
19026 /* Use wide noops for the remainder */
19027 noop_size = 4;
19028 }
19029
19030 while (bytes >= noop_size)
19031 {
19032 memcpy (p, noop, noop_size);
19033 p += noop_size;
19034 bytes -= noop_size;
19035 fix += noop_size;
19036 }
19037
19038 fragP->fr_fix += fix;
19039 }
19040
19041 /* Called from md_do_align. Used to create an alignment
19042 frag in a code section. */
19043
19044 void
19045 arm_frag_align_code (int n, int max)
19046 {
19047 char * p;
19048
19049 /* We assume that there will never be a requirement
19050 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
19051 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
19052 {
19053 char err_msg[128];
19054
19055 sprintf (err_msg,
19056 _("alignments greater than %d bytes not supported in .text sections."),
19057 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
19058 as_fatal ("%s", err_msg);
19059 }
19060
19061 p = frag_var (rs_align_code,
19062 MAX_MEM_FOR_RS_ALIGN_CODE,
19063 1,
19064 (relax_substateT) max,
19065 (symbolS *) NULL,
19066 (offsetT) n,
19067 (char *) NULL);
19068 *p = 0;
19069 }
19070
19071 /* Perform target specific initialisation of a frag.
19072 Note - despite the name this initialisation is not done when the frag
19073 is created, but only when its type is assigned. A frag can be created
19074 and used a long time before its type is set, so beware of assuming that
19075 this initialisationis performed first. */
19076
19077 #ifndef OBJ_ELF
19078 void
19079 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
19080 {
19081 /* Record whether this frag is in an ARM or a THUMB area. */
19082 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
19083 }
19084
19085 #else /* OBJ_ELF is defined. */
19086 void
19087 arm_init_frag (fragS * fragP, int max_chars)
19088 {
19089 /* If the current ARM vs THUMB mode has not already
19090 been recorded into this frag then do so now. */
19091 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
19092 {
19093 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
19094
19095 /* Record a mapping symbol for alignment frags. We will delete this
19096 later if the alignment ends up empty. */
19097 switch (fragP->fr_type)
19098 {
19099 case rs_align:
19100 case rs_align_test:
19101 case rs_fill:
19102 mapping_state_2 (MAP_DATA, max_chars);
19103 break;
19104 case rs_align_code:
19105 mapping_state_2 (thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
19106 break;
19107 default:
19108 break;
19109 }
19110 }
19111 }
19112
19113 /* When we change sections we need to issue a new mapping symbol. */
19114
19115 void
19116 arm_elf_change_section (void)
19117 {
19118 /* Link an unlinked unwind index table section to the .text section. */
19119 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
19120 && elf_linked_to_section (now_seg) == NULL)
19121 elf_linked_to_section (now_seg) = text_section;
19122 }
19123
19124 int
19125 arm_elf_section_type (const char * str, size_t len)
19126 {
19127 if (len == 5 && strncmp (str, "exidx", 5) == 0)
19128 return SHT_ARM_EXIDX;
19129
19130 return -1;
19131 }
19132 \f
19133 /* Code to deal with unwinding tables. */
19134
19135 static void add_unwind_adjustsp (offsetT);
19136
19137 /* Generate any deferred unwind frame offset. */
19138
19139 static void
19140 flush_pending_unwind (void)
19141 {
19142 offsetT offset;
19143
19144 offset = unwind.pending_offset;
19145 unwind.pending_offset = 0;
19146 if (offset != 0)
19147 add_unwind_adjustsp (offset);
19148 }
19149
19150 /* Add an opcode to this list for this function. Two-byte opcodes should
19151 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
19152 order. */
19153
19154 static void
19155 add_unwind_opcode (valueT op, int length)
19156 {
19157 /* Add any deferred stack adjustment. */
19158 if (unwind.pending_offset)
19159 flush_pending_unwind ();
19160
19161 unwind.sp_restored = 0;
19162
19163 if (unwind.opcode_count + length > unwind.opcode_alloc)
19164 {
19165 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
19166 if (unwind.opcodes)
19167 unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,
19168 unwind.opcode_alloc);
19169 else
19170 unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);
19171 }
19172 while (length > 0)
19173 {
19174 length--;
19175 unwind.opcodes[unwind.opcode_count] = op & 0xff;
19176 op >>= 8;
19177 unwind.opcode_count++;
19178 }
19179 }
19180
19181 /* Add unwind opcodes to adjust the stack pointer. */
19182
19183 static void
19184 add_unwind_adjustsp (offsetT offset)
19185 {
19186 valueT op;
19187
19188 if (offset > 0x200)
19189 {
19190 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
19191 char bytes[5];
19192 int n;
19193 valueT o;
19194
19195 /* Long form: 0xb2, uleb128. */
19196 /* This might not fit in a word so add the individual bytes,
19197 remembering the list is built in reverse order. */
19198 o = (valueT) ((offset - 0x204) >> 2);
19199 if (o == 0)
19200 add_unwind_opcode (0, 1);
19201
19202 /* Calculate the uleb128 encoding of the offset. */
19203 n = 0;
19204 while (o)
19205 {
19206 bytes[n] = o & 0x7f;
19207 o >>= 7;
19208 if (o)
19209 bytes[n] |= 0x80;
19210 n++;
19211 }
19212 /* Add the insn. */
19213 for (; n; n--)
19214 add_unwind_opcode (bytes[n - 1], 1);
19215 add_unwind_opcode (0xb2, 1);
19216 }
19217 else if (offset > 0x100)
19218 {
19219 /* Two short opcodes. */
19220 add_unwind_opcode (0x3f, 1);
19221 op = (offset - 0x104) >> 2;
19222 add_unwind_opcode (op, 1);
19223 }
19224 else if (offset > 0)
19225 {
19226 /* Short opcode. */
19227 op = (offset - 4) >> 2;
19228 add_unwind_opcode (op, 1);
19229 }
19230 else if (offset < 0)
19231 {
19232 offset = -offset;
19233 while (offset > 0x100)
19234 {
19235 add_unwind_opcode (0x7f, 1);
19236 offset -= 0x100;
19237 }
19238 op = ((offset - 4) >> 2) | 0x40;
19239 add_unwind_opcode (op, 1);
19240 }
19241 }
19242
19243 /* Finish the list of unwind opcodes for this function. */
19244 static void
19245 finish_unwind_opcodes (void)
19246 {
19247 valueT op;
19248
19249 if (unwind.fp_used)
19250 {
19251 /* Adjust sp as necessary. */
19252 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
19253 flush_pending_unwind ();
19254
19255 /* After restoring sp from the frame pointer. */
19256 op = 0x90 | unwind.fp_reg;
19257 add_unwind_opcode (op, 1);
19258 }
19259 else
19260 flush_pending_unwind ();
19261 }
19262
19263
19264 /* Start an exception table entry. If idx is nonzero this is an index table
19265 entry. */
19266
19267 static void
19268 start_unwind_section (const segT text_seg, int idx)
19269 {
19270 const char * text_name;
19271 const char * prefix;
19272 const char * prefix_once;
19273 const char * group_name;
19274 size_t prefix_len;
19275 size_t text_len;
19276 char * sec_name;
19277 size_t sec_name_len;
19278 int type;
19279 int flags;
19280 int linkonce;
19281
19282 if (idx)
19283 {
19284 prefix = ELF_STRING_ARM_unwind;
19285 prefix_once = ELF_STRING_ARM_unwind_once;
19286 type = SHT_ARM_EXIDX;
19287 }
19288 else
19289 {
19290 prefix = ELF_STRING_ARM_unwind_info;
19291 prefix_once = ELF_STRING_ARM_unwind_info_once;
19292 type = SHT_PROGBITS;
19293 }
19294
19295 text_name = segment_name (text_seg);
19296 if (streq (text_name, ".text"))
19297 text_name = "";
19298
19299 if (strncmp (text_name, ".gnu.linkonce.t.",
19300 strlen (".gnu.linkonce.t.")) == 0)
19301 {
19302 prefix = prefix_once;
19303 text_name += strlen (".gnu.linkonce.t.");
19304 }
19305
19306 prefix_len = strlen (prefix);
19307 text_len = strlen (text_name);
19308 sec_name_len = prefix_len + text_len;
19309 sec_name = (char *) xmalloc (sec_name_len + 1);
19310 memcpy (sec_name, prefix, prefix_len);
19311 memcpy (sec_name + prefix_len, text_name, text_len);
19312 sec_name[prefix_len + text_len] = '\0';
19313
19314 flags = SHF_ALLOC;
19315 linkonce = 0;
19316 group_name = 0;
19317
19318 /* Handle COMDAT group. */
19319 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
19320 {
19321 group_name = elf_group_name (text_seg);
19322 if (group_name == NULL)
19323 {
19324 as_bad (_("Group section `%s' has no group signature"),
19325 segment_name (text_seg));
19326 ignore_rest_of_line ();
19327 return;
19328 }
19329 flags |= SHF_GROUP;
19330 linkonce = 1;
19331 }
19332
19333 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
19334
19335 /* Set the section link for index tables. */
19336 if (idx)
19337 elf_linked_to_section (now_seg) = text_seg;
19338 }
19339
19340
19341 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
19342 personality routine data. Returns zero, or the index table value for
19343 and inline entry. */
19344
19345 static valueT
19346 create_unwind_entry (int have_data)
19347 {
19348 int size;
19349 addressT where;
19350 char *ptr;
19351 /* The current word of data. */
19352 valueT data;
19353 /* The number of bytes left in this word. */
19354 int n;
19355
19356 finish_unwind_opcodes ();
19357
19358 /* Remember the current text section. */
19359 unwind.saved_seg = now_seg;
19360 unwind.saved_subseg = now_subseg;
19361
19362 start_unwind_section (now_seg, 0);
19363
19364 if (unwind.personality_routine == NULL)
19365 {
19366 if (unwind.personality_index == -2)
19367 {
19368 if (have_data)
19369 as_bad (_("handlerdata in cantunwind frame"));
19370 return 1; /* EXIDX_CANTUNWIND. */
19371 }
19372
19373 /* Use a default personality routine if none is specified. */
19374 if (unwind.personality_index == -1)
19375 {
19376 if (unwind.opcode_count > 3)
19377 unwind.personality_index = 1;
19378 else
19379 unwind.personality_index = 0;
19380 }
19381
19382 /* Space for the personality routine entry. */
19383 if (unwind.personality_index == 0)
19384 {
19385 if (unwind.opcode_count > 3)
19386 as_bad (_("too many unwind opcodes for personality routine 0"));
19387
19388 if (!have_data)
19389 {
19390 /* All the data is inline in the index table. */
19391 data = 0x80;
19392 n = 3;
19393 while (unwind.opcode_count > 0)
19394 {
19395 unwind.opcode_count--;
19396 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
19397 n--;
19398 }
19399
19400 /* Pad with "finish" opcodes. */
19401 while (n--)
19402 data = (data << 8) | 0xb0;
19403
19404 return data;
19405 }
19406 size = 0;
19407 }
19408 else
19409 /* We get two opcodes "free" in the first word. */
19410 size = unwind.opcode_count - 2;
19411 }
19412 else
19413 /* An extra byte is required for the opcode count. */
19414 size = unwind.opcode_count + 1;
19415
19416 size = (size + 3) >> 2;
19417 if (size > 0xff)
19418 as_bad (_("too many unwind opcodes"));
19419
19420 frag_align (2, 0, 0);
19421 record_alignment (now_seg, 2);
19422 unwind.table_entry = expr_build_dot ();
19423
19424 /* Allocate the table entry. */
19425 ptr = frag_more ((size << 2) + 4);
19426 where = frag_now_fix () - ((size << 2) + 4);
19427
19428 switch (unwind.personality_index)
19429 {
19430 case -1:
19431 /* ??? Should this be a PLT generating relocation? */
19432 /* Custom personality routine. */
19433 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
19434 BFD_RELOC_ARM_PREL31);
19435
19436 where += 4;
19437 ptr += 4;
19438
19439 /* Set the first byte to the number of additional words. */
19440 data = size - 1;
19441 n = 3;
19442 break;
19443
19444 /* ABI defined personality routines. */
19445 case 0:
19446 /* Three opcodes bytes are packed into the first word. */
19447 data = 0x80;
19448 n = 3;
19449 break;
19450
19451 case 1:
19452 case 2:
19453 /* The size and first two opcode bytes go in the first word. */
19454 data = ((0x80 + unwind.personality_index) << 8) | size;
19455 n = 2;
19456 break;
19457
19458 default:
19459 /* Should never happen. */
19460 abort ();
19461 }
19462
19463 /* Pack the opcodes into words (MSB first), reversing the list at the same
19464 time. */
19465 while (unwind.opcode_count > 0)
19466 {
19467 if (n == 0)
19468 {
19469 md_number_to_chars (ptr, data, 4);
19470 ptr += 4;
19471 n = 4;
19472 data = 0;
19473 }
19474 unwind.opcode_count--;
19475 n--;
19476 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
19477 }
19478
19479 /* Finish off the last word. */
19480 if (n < 4)
19481 {
19482 /* Pad with "finish" opcodes. */
19483 while (n--)
19484 data = (data << 8) | 0xb0;
19485
19486 md_number_to_chars (ptr, data, 4);
19487 }
19488
19489 if (!have_data)
19490 {
19491 /* Add an empty descriptor if there is no user-specified data. */
19492 ptr = frag_more (4);
19493 md_number_to_chars (ptr, 0, 4);
19494 }
19495
19496 return 0;
19497 }
19498
19499
19500 /* Initialize the DWARF-2 unwind information for this procedure. */
19501
19502 void
19503 tc_arm_frame_initial_instructions (void)
19504 {
19505 cfi_add_CFA_def_cfa (REG_SP, 0);
19506 }
19507 #endif /* OBJ_ELF */
19508
19509 /* Convert REGNAME to a DWARF-2 register number. */
19510
19511 int
19512 tc_arm_regname_to_dw2regnum (char *regname)
19513 {
19514 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
19515
19516 if (reg == FAIL)
19517 return -1;
19518
19519 return reg;
19520 }
19521
19522 #ifdef TE_PE
19523 void
19524 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
19525 {
19526 expressionS exp;
19527
19528 exp.X_op = O_secrel;
19529 exp.X_add_symbol = symbol;
19530 exp.X_add_number = 0;
19531 emit_expr (&exp, size);
19532 }
19533 #endif
19534
19535 /* MD interface: Symbol and relocation handling. */
19536
19537 /* Return the address within the segment that a PC-relative fixup is
19538 relative to. For ARM, PC-relative fixups applied to instructions
19539 are generally relative to the location of the fixup plus 8 bytes.
19540 Thumb branches are offset by 4, and Thumb loads relative to PC
19541 require special handling. */
19542
19543 long
19544 md_pcrel_from_section (fixS * fixP, segT seg)
19545 {
19546 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
19547
19548 /* If this is pc-relative and we are going to emit a relocation
19549 then we just want to put out any pipeline compensation that the linker
19550 will need. Otherwise we want to use the calculated base.
19551 For WinCE we skip the bias for externals as well, since this
19552 is how the MS ARM-CE assembler behaves and we want to be compatible. */
19553 if (fixP->fx_pcrel
19554 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
19555 || (arm_force_relocation (fixP)
19556 #ifdef TE_WINCE
19557 && !S_IS_EXTERNAL (fixP->fx_addsy)
19558 #endif
19559 )))
19560 base = 0;
19561
19562
19563 switch (fixP->fx_r_type)
19564 {
19565 /* PC relative addressing on the Thumb is slightly odd as the
19566 bottom two bits of the PC are forced to zero for the
19567 calculation. This happens *after* application of the
19568 pipeline offset. However, Thumb adrl already adjusts for
19569 this, so we need not do it again. */
19570 case BFD_RELOC_ARM_THUMB_ADD:
19571 return base & ~3;
19572
19573 case BFD_RELOC_ARM_THUMB_OFFSET:
19574 case BFD_RELOC_ARM_T32_OFFSET_IMM:
19575 case BFD_RELOC_ARM_T32_ADD_PC12:
19576 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
19577 return (base + 4) & ~3;
19578
19579 /* Thumb branches are simply offset by +4. */
19580 case BFD_RELOC_THUMB_PCREL_BRANCH7:
19581 case BFD_RELOC_THUMB_PCREL_BRANCH9:
19582 case BFD_RELOC_THUMB_PCREL_BRANCH12:
19583 case BFD_RELOC_THUMB_PCREL_BRANCH20:
19584 case BFD_RELOC_THUMB_PCREL_BRANCH25:
19585 return base + 4;
19586
19587 case BFD_RELOC_THUMB_PCREL_BRANCH23:
19588 if (fixP->fx_addsy
19589 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
19590 && (!S_IS_EXTERNAL (fixP->fx_addsy))
19591 && ARM_IS_FUNC (fixP->fx_addsy)
19592 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
19593 base = fixP->fx_where + fixP->fx_frag->fr_address;
19594 return base + 4;
19595
19596 /* BLX is like branches above, but forces the low two bits of PC to
19597 zero. */
19598 case BFD_RELOC_THUMB_PCREL_BLX:
19599 if (fixP->fx_addsy
19600 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
19601 && (!S_IS_EXTERNAL (fixP->fx_addsy))
19602 && THUMB_IS_FUNC (fixP->fx_addsy)
19603 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
19604 base = fixP->fx_where + fixP->fx_frag->fr_address;
19605 return (base + 4) & ~3;
19606
19607 /* ARM mode branches are offset by +8. However, the Windows CE
19608 loader expects the relocation not to take this into account. */
19609 case BFD_RELOC_ARM_PCREL_BLX:
19610 if (fixP->fx_addsy
19611 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
19612 && (!S_IS_EXTERNAL (fixP->fx_addsy))
19613 && ARM_IS_FUNC (fixP->fx_addsy)
19614 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
19615 base = fixP->fx_where + fixP->fx_frag->fr_address;
19616 return base + 8;
19617
19618 case BFD_RELOC_ARM_PCREL_CALL:
19619 if (fixP->fx_addsy
19620 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
19621 && (!S_IS_EXTERNAL (fixP->fx_addsy))
19622 && THUMB_IS_FUNC (fixP->fx_addsy)
19623 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
19624 base = fixP->fx_where + fixP->fx_frag->fr_address;
19625 return base + 8;
19626
19627 case BFD_RELOC_ARM_PCREL_BRANCH:
19628 case BFD_RELOC_ARM_PCREL_JUMP:
19629 case BFD_RELOC_ARM_PLT32:
19630 #ifdef TE_WINCE
19631 /* When handling fixups immediately, because we have already
19632 discovered the value of a symbol, or the address of the frag involved
19633 we must account for the offset by +8, as the OS loader will never see the reloc.
19634 see fixup_segment() in write.c
19635 The S_IS_EXTERNAL test handles the case of global symbols.
19636 Those need the calculated base, not just the pipe compensation the linker will need. */
19637 if (fixP->fx_pcrel
19638 && fixP->fx_addsy != NULL
19639 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
19640 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
19641 return base + 8;
19642 return base;
19643 #else
19644 return base + 8;
19645 #endif
19646
19647
19648 /* ARM mode loads relative to PC are also offset by +8. Unlike
19649 branches, the Windows CE loader *does* expect the relocation
19650 to take this into account. */
19651 case BFD_RELOC_ARM_OFFSET_IMM:
19652 case BFD_RELOC_ARM_OFFSET_IMM8:
19653 case BFD_RELOC_ARM_HWLITERAL:
19654 case BFD_RELOC_ARM_LITERAL:
19655 case BFD_RELOC_ARM_CP_OFF_IMM:
19656 return base + 8;
19657
19658
19659 /* Other PC-relative relocations are un-offset. */
19660 default:
19661 return base;
19662 }
19663 }
19664
19665 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
19666 Otherwise we have no need to default values of symbols. */
19667
19668 symbolS *
19669 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
19670 {
19671 #ifdef OBJ_ELF
19672 if (name[0] == '_' && name[1] == 'G'
19673 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
19674 {
19675 if (!GOT_symbol)
19676 {
19677 if (symbol_find (name))
19678 as_bad (_("GOT already in the symbol table"));
19679
19680 GOT_symbol = symbol_new (name, undefined_section,
19681 (valueT) 0, & zero_address_frag);
19682 }
19683
19684 return GOT_symbol;
19685 }
19686 #endif
19687
19688 return NULL;
19689 }
19690
19691 /* Subroutine of md_apply_fix. Check to see if an immediate can be
19692 computed as two separate immediate values, added together. We
19693 already know that this value cannot be computed by just one ARM
19694 instruction. */
19695
19696 static unsigned int
19697 validate_immediate_twopart (unsigned int val,
19698 unsigned int * highpart)
19699 {
19700 unsigned int a;
19701 unsigned int i;
19702
19703 for (i = 0; i < 32; i += 2)
19704 if (((a = rotate_left (val, i)) & 0xff) != 0)
19705 {
19706 if (a & 0xff00)
19707 {
19708 if (a & ~ 0xffff)
19709 continue;
19710 * highpart = (a >> 8) | ((i + 24) << 7);
19711 }
19712 else if (a & 0xff0000)
19713 {
19714 if (a & 0xff000000)
19715 continue;
19716 * highpart = (a >> 16) | ((i + 16) << 7);
19717 }
19718 else
19719 {
19720 gas_assert (a & 0xff000000);
19721 * highpart = (a >> 24) | ((i + 8) << 7);
19722 }
19723
19724 return (a & 0xff) | (i << 7);
19725 }
19726
19727 return FAIL;
19728 }
19729
19730 static int
19731 validate_offset_imm (unsigned int val, int hwse)
19732 {
19733 if ((hwse && val > 255) || val > 4095)
19734 return FAIL;
19735 return val;
19736 }
19737
19738 /* Subroutine of md_apply_fix. Do those data_ops which can take a
19739 negative immediate constant by altering the instruction. A bit of
19740 a hack really.
19741 MOV <-> MVN
19742 AND <-> BIC
19743 ADC <-> SBC
19744 by inverting the second operand, and
19745 ADD <-> SUB
19746 CMP <-> CMN
19747 by negating the second operand. */
19748
19749 static int
19750 negate_data_op (unsigned long * instruction,
19751 unsigned long value)
19752 {
19753 int op, new_inst;
19754 unsigned long negated, inverted;
19755
19756 negated = encode_arm_immediate (-value);
19757 inverted = encode_arm_immediate (~value);
19758
19759 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
19760 switch (op)
19761 {
19762 /* First negates. */
19763 case OPCODE_SUB: /* ADD <-> SUB */
19764 new_inst = OPCODE_ADD;
19765 value = negated;
19766 break;
19767
19768 case OPCODE_ADD:
19769 new_inst = OPCODE_SUB;
19770 value = negated;
19771 break;
19772
19773 case OPCODE_CMP: /* CMP <-> CMN */
19774 new_inst = OPCODE_CMN;
19775 value = negated;
19776 break;
19777
19778 case OPCODE_CMN:
19779 new_inst = OPCODE_CMP;
19780 value = negated;
19781 break;
19782
19783 /* Now Inverted ops. */
19784 case OPCODE_MOV: /* MOV <-> MVN */
19785 new_inst = OPCODE_MVN;
19786 value = inverted;
19787 break;
19788
19789 case OPCODE_MVN:
19790 new_inst = OPCODE_MOV;
19791 value = inverted;
19792 break;
19793
19794 case OPCODE_AND: /* AND <-> BIC */
19795 new_inst = OPCODE_BIC;
19796 value = inverted;
19797 break;
19798
19799 case OPCODE_BIC:
19800 new_inst = OPCODE_AND;
19801 value = inverted;
19802 break;
19803
19804 case OPCODE_ADC: /* ADC <-> SBC */
19805 new_inst = OPCODE_SBC;
19806 value = inverted;
19807 break;
19808
19809 case OPCODE_SBC:
19810 new_inst = OPCODE_ADC;
19811 value = inverted;
19812 break;
19813
19814 /* We cannot do anything. */
19815 default:
19816 return FAIL;
19817 }
19818
19819 if (value == (unsigned) FAIL)
19820 return FAIL;
19821
19822 *instruction &= OPCODE_MASK;
19823 *instruction |= new_inst << DATA_OP_SHIFT;
19824 return value;
19825 }
19826
19827 /* Like negate_data_op, but for Thumb-2. */
19828
19829 static unsigned int
19830 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
19831 {
19832 int op, new_inst;
19833 int rd;
19834 unsigned int negated, inverted;
19835
19836 negated = encode_thumb32_immediate (-value);
19837 inverted = encode_thumb32_immediate (~value);
19838
19839 rd = (*instruction >> 8) & 0xf;
19840 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
19841 switch (op)
19842 {
19843 /* ADD <-> SUB. Includes CMP <-> CMN. */
19844 case T2_OPCODE_SUB:
19845 new_inst = T2_OPCODE_ADD;
19846 value = negated;
19847 break;
19848
19849 case T2_OPCODE_ADD:
19850 new_inst = T2_OPCODE_SUB;
19851 value = negated;
19852 break;
19853
19854 /* ORR <-> ORN. Includes MOV <-> MVN. */
19855 case T2_OPCODE_ORR:
19856 new_inst = T2_OPCODE_ORN;
19857 value = inverted;
19858 break;
19859
19860 case T2_OPCODE_ORN:
19861 new_inst = T2_OPCODE_ORR;
19862 value = inverted;
19863 break;
19864
19865 /* AND <-> BIC. TST has no inverted equivalent. */
19866 case T2_OPCODE_AND:
19867 new_inst = T2_OPCODE_BIC;
19868 if (rd == 15)
19869 value = FAIL;
19870 else
19871 value = inverted;
19872 break;
19873
19874 case T2_OPCODE_BIC:
19875 new_inst = T2_OPCODE_AND;
19876 value = inverted;
19877 break;
19878
19879 /* ADC <-> SBC */
19880 case T2_OPCODE_ADC:
19881 new_inst = T2_OPCODE_SBC;
19882 value = inverted;
19883 break;
19884
19885 case T2_OPCODE_SBC:
19886 new_inst = T2_OPCODE_ADC;
19887 value = inverted;
19888 break;
19889
19890 /* We cannot do anything. */
19891 default:
19892 return FAIL;
19893 }
19894
19895 if (value == (unsigned int)FAIL)
19896 return FAIL;
19897
19898 *instruction &= T2_OPCODE_MASK;
19899 *instruction |= new_inst << T2_DATA_OP_SHIFT;
19900 return value;
19901 }
19902
19903 /* Read a 32-bit thumb instruction from buf. */
19904 static unsigned long
19905 get_thumb32_insn (char * buf)
19906 {
19907 unsigned long insn;
19908 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
19909 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
19910
19911 return insn;
19912 }
19913
19914
19915 /* We usually want to set the low bit on the address of thumb function
19916 symbols. In particular .word foo - . should have the low bit set.
19917 Generic code tries to fold the difference of two symbols to
19918 a constant. Prevent this and force a relocation when the first symbols
19919 is a thumb function. */
19920
19921 bfd_boolean
19922 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
19923 {
19924 if (op == O_subtract
19925 && l->X_op == O_symbol
19926 && r->X_op == O_symbol
19927 && THUMB_IS_FUNC (l->X_add_symbol))
19928 {
19929 l->X_op = O_subtract;
19930 l->X_op_symbol = r->X_add_symbol;
19931 l->X_add_number -= r->X_add_number;
19932 return TRUE;
19933 }
19934
19935 /* Process as normal. */
19936 return FALSE;
19937 }
19938
19939 /* Encode Thumb2 unconditional branches and calls. The encoding
19940 for the 2 are identical for the immediate values. */
19941
19942 static void
19943 encode_thumb2_b_bl_offset (char * buf, offsetT value)
19944 {
19945 #define T2I1I2MASK ((1 << 13) | (1 << 11))
19946 offsetT newval;
19947 offsetT newval2;
19948 addressT S, I1, I2, lo, hi;
19949
19950 S = (value >> 24) & 0x01;
19951 I1 = (value >> 23) & 0x01;
19952 I2 = (value >> 22) & 0x01;
19953 hi = (value >> 12) & 0x3ff;
19954 lo = (value >> 1) & 0x7ff;
19955 newval = md_chars_to_number (buf, THUMB_SIZE);
19956 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
19957 newval |= (S << 10) | hi;
19958 newval2 &= ~T2I1I2MASK;
19959 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
19960 md_number_to_chars (buf, newval, THUMB_SIZE);
19961 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
19962 }
19963
19964 void
19965 md_apply_fix (fixS * fixP,
19966 valueT * valP,
19967 segT seg)
19968 {
19969 offsetT value = * valP;
19970 offsetT newval;
19971 unsigned int newimm;
19972 unsigned long temp;
19973 int sign;
19974 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
19975
19976 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
19977
19978 /* Note whether this will delete the relocation. */
19979
19980 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
19981 fixP->fx_done = 1;
19982
19983 /* On a 64-bit host, silently truncate 'value' to 32 bits for
19984 consistency with the behaviour on 32-bit hosts. Remember value
19985 for emit_reloc. */
19986 value &= 0xffffffff;
19987 value ^= 0x80000000;
19988 value -= 0x80000000;
19989
19990 *valP = value;
19991 fixP->fx_addnumber = value;
19992
19993 /* Same treatment for fixP->fx_offset. */
19994 fixP->fx_offset &= 0xffffffff;
19995 fixP->fx_offset ^= 0x80000000;
19996 fixP->fx_offset -= 0x80000000;
19997
19998 switch (fixP->fx_r_type)
19999 {
20000 case BFD_RELOC_NONE:
20001 /* This will need to go in the object file. */
20002 fixP->fx_done = 0;
20003 break;
20004
20005 case BFD_RELOC_ARM_IMMEDIATE:
20006 /* We claim that this fixup has been processed here,
20007 even if in fact we generate an error because we do
20008 not have a reloc for it, so tc_gen_reloc will reject it. */
20009 fixP->fx_done = 1;
20010
20011 if (fixP->fx_addsy)
20012 {
20013 const char *msg = 0;
20014
20015 if (! S_IS_DEFINED (fixP->fx_addsy))
20016 msg = _("undefined symbol %s used as an immediate value");
20017 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
20018 msg = _("symbol %s is in a different section");
20019 else if (S_IS_WEAK (fixP->fx_addsy))
20020 msg = _("symbol %s is weak and may be overridden later");
20021
20022 if (msg)
20023 {
20024 as_bad_where (fixP->fx_file, fixP->fx_line,
20025 msg, S_GET_NAME (fixP->fx_addsy));
20026 break;
20027 }
20028 }
20029
20030 newimm = encode_arm_immediate (value);
20031 temp = md_chars_to_number (buf, INSN_SIZE);
20032
20033 /* If the instruction will fail, see if we can fix things up by
20034 changing the opcode. */
20035 if (newimm == (unsigned int) FAIL
20036 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
20037 {
20038 as_bad_where (fixP->fx_file, fixP->fx_line,
20039 _("invalid constant (%lx) after fixup"),
20040 (unsigned long) value);
20041 break;
20042 }
20043
20044 newimm |= (temp & 0xfffff000);
20045 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
20046 break;
20047
20048 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
20049 {
20050 unsigned int highpart = 0;
20051 unsigned int newinsn = 0xe1a00000; /* nop. */
20052
20053 if (fixP->fx_addsy)
20054 {
20055 const char *msg = 0;
20056
20057 if (! S_IS_DEFINED (fixP->fx_addsy))
20058 msg = _("undefined symbol %s used as an immediate value");
20059 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
20060 msg = _("symbol %s is in a different section");
20061 else if (S_IS_WEAK (fixP->fx_addsy))
20062 msg = _("symbol %s is weak and may be overridden later");
20063
20064 if (msg)
20065 {
20066 as_bad_where (fixP->fx_file, fixP->fx_line,
20067 msg, S_GET_NAME (fixP->fx_addsy));
20068 break;
20069 }
20070 }
20071
20072 newimm = encode_arm_immediate (value);
20073 temp = md_chars_to_number (buf, INSN_SIZE);
20074
20075 /* If the instruction will fail, see if we can fix things up by
20076 changing the opcode. */
20077 if (newimm == (unsigned int) FAIL
20078 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
20079 {
20080 /* No ? OK - try using two ADD instructions to generate
20081 the value. */
20082 newimm = validate_immediate_twopart (value, & highpart);
20083
20084 /* Yes - then make sure that the second instruction is
20085 also an add. */
20086 if (newimm != (unsigned int) FAIL)
20087 newinsn = temp;
20088 /* Still No ? Try using a negated value. */
20089 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
20090 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
20091 /* Otherwise - give up. */
20092 else
20093 {
20094 as_bad_where (fixP->fx_file, fixP->fx_line,
20095 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
20096 (long) value);
20097 break;
20098 }
20099
20100 /* Replace the first operand in the 2nd instruction (which
20101 is the PC) with the destination register. We have
20102 already added in the PC in the first instruction and we
20103 do not want to do it again. */
20104 newinsn &= ~ 0xf0000;
20105 newinsn |= ((newinsn & 0x0f000) << 4);
20106 }
20107
20108 newimm |= (temp & 0xfffff000);
20109 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
20110
20111 highpart |= (newinsn & 0xfffff000);
20112 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
20113 }
20114 break;
20115
20116 case BFD_RELOC_ARM_OFFSET_IMM:
20117 if (!fixP->fx_done && seg->use_rela_p)
20118 value = 0;
20119
20120 case BFD_RELOC_ARM_LITERAL:
20121 sign = value >= 0;
20122
20123 if (value < 0)
20124 value = - value;
20125
20126 if (validate_offset_imm (value, 0) == FAIL)
20127 {
20128 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
20129 as_bad_where (fixP->fx_file, fixP->fx_line,
20130 _("invalid literal constant: pool needs to be closer"));
20131 else
20132 as_bad_where (fixP->fx_file, fixP->fx_line,
20133 _("bad immediate value for offset (%ld)"),
20134 (long) value);
20135 break;
20136 }
20137
20138 newval = md_chars_to_number (buf, INSN_SIZE);
20139 newval &= 0xff7ff000;
20140 newval |= value | (sign ? INDEX_UP : 0);
20141 md_number_to_chars (buf, newval, INSN_SIZE);
20142 break;
20143
20144 case BFD_RELOC_ARM_OFFSET_IMM8:
20145 case BFD_RELOC_ARM_HWLITERAL:
20146 sign = value >= 0;
20147
20148 if (value < 0)
20149 value = - value;
20150
20151 if (validate_offset_imm (value, 1) == FAIL)
20152 {
20153 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
20154 as_bad_where (fixP->fx_file, fixP->fx_line,
20155 _("invalid literal constant: pool needs to be closer"));
20156 else
20157 as_bad (_("bad immediate value for 8-bit offset (%ld)"),
20158 (long) value);
20159 break;
20160 }
20161
20162 newval = md_chars_to_number (buf, INSN_SIZE);
20163 newval &= 0xff7ff0f0;
20164 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
20165 md_number_to_chars (buf, newval, INSN_SIZE);
20166 break;
20167
20168 case BFD_RELOC_ARM_T32_OFFSET_U8:
20169 if (value < 0 || value > 1020 || value % 4 != 0)
20170 as_bad_where (fixP->fx_file, fixP->fx_line,
20171 _("bad immediate value for offset (%ld)"), (long) value);
20172 value /= 4;
20173
20174 newval = md_chars_to_number (buf+2, THUMB_SIZE);
20175 newval |= value;
20176 md_number_to_chars (buf+2, newval, THUMB_SIZE);
20177 break;
20178
20179 case BFD_RELOC_ARM_T32_OFFSET_IMM:
20180 /* This is a complicated relocation used for all varieties of Thumb32
20181 load/store instruction with immediate offset:
20182
20183 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
20184 *4, optional writeback(W)
20185 (doubleword load/store)
20186
20187 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
20188 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
20189 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
20190 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
20191 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
20192
20193 Uppercase letters indicate bits that are already encoded at
20194 this point. Lowercase letters are our problem. For the
20195 second block of instructions, the secondary opcode nybble
20196 (bits 8..11) is present, and bit 23 is zero, even if this is
20197 a PC-relative operation. */
20198 newval = md_chars_to_number (buf, THUMB_SIZE);
20199 newval <<= 16;
20200 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
20201
20202 if ((newval & 0xf0000000) == 0xe0000000)
20203 {
20204 /* Doubleword load/store: 8-bit offset, scaled by 4. */
20205 if (value >= 0)
20206 newval |= (1 << 23);
20207 else
20208 value = -value;
20209 if (value % 4 != 0)
20210 {
20211 as_bad_where (fixP->fx_file, fixP->fx_line,
20212 _("offset not a multiple of 4"));
20213 break;
20214 }
20215 value /= 4;
20216 if (value > 0xff)
20217 {
20218 as_bad_where (fixP->fx_file, fixP->fx_line,
20219 _("offset out of range"));
20220 break;
20221 }
20222 newval &= ~0xff;
20223 }
20224 else if ((newval & 0x000f0000) == 0x000f0000)
20225 {
20226 /* PC-relative, 12-bit offset. */
20227 if (value >= 0)
20228 newval |= (1 << 23);
20229 else
20230 value = -value;
20231 if (value > 0xfff)
20232 {
20233 as_bad_where (fixP->fx_file, fixP->fx_line,
20234 _("offset out of range"));
20235 break;
20236 }
20237 newval &= ~0xfff;
20238 }
20239 else if ((newval & 0x00000100) == 0x00000100)
20240 {
20241 /* Writeback: 8-bit, +/- offset. */
20242 if (value >= 0)
20243 newval |= (1 << 9);
20244 else
20245 value = -value;
20246 if (value > 0xff)
20247 {
20248 as_bad_where (fixP->fx_file, fixP->fx_line,
20249 _("offset out of range"));
20250 break;
20251 }
20252 newval &= ~0xff;
20253 }
20254 else if ((newval & 0x00000f00) == 0x00000e00)
20255 {
20256 /* T-instruction: positive 8-bit offset. */
20257 if (value < 0 || value > 0xff)
20258 {
20259 as_bad_where (fixP->fx_file, fixP->fx_line,
20260 _("offset out of range"));
20261 break;
20262 }
20263 newval &= ~0xff;
20264 newval |= value;
20265 }
20266 else
20267 {
20268 /* Positive 12-bit or negative 8-bit offset. */
20269 int limit;
20270 if (value >= 0)
20271 {
20272 newval |= (1 << 23);
20273 limit = 0xfff;
20274 }
20275 else
20276 {
20277 value = -value;
20278 limit = 0xff;
20279 }
20280 if (value > limit)
20281 {
20282 as_bad_where (fixP->fx_file, fixP->fx_line,
20283 _("offset out of range"));
20284 break;
20285 }
20286 newval &= ~limit;
20287 }
20288
20289 newval |= value;
20290 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
20291 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
20292 break;
20293
20294 case BFD_RELOC_ARM_SHIFT_IMM:
20295 newval = md_chars_to_number (buf, INSN_SIZE);
20296 if (((unsigned long) value) > 32
20297 || (value == 32
20298 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
20299 {
20300 as_bad_where (fixP->fx_file, fixP->fx_line,
20301 _("shift expression is too large"));
20302 break;
20303 }
20304
20305 if (value == 0)
20306 /* Shifts of zero must be done as lsl. */
20307 newval &= ~0x60;
20308 else if (value == 32)
20309 value = 0;
20310 newval &= 0xfffff07f;
20311 newval |= (value & 0x1f) << 7;
20312 md_number_to_chars (buf, newval, INSN_SIZE);
20313 break;
20314
20315 case BFD_RELOC_ARM_T32_IMMEDIATE:
20316 case BFD_RELOC_ARM_T32_ADD_IMM:
20317 case BFD_RELOC_ARM_T32_IMM12:
20318 case BFD_RELOC_ARM_T32_ADD_PC12:
20319 /* We claim that this fixup has been processed here,
20320 even if in fact we generate an error because we do
20321 not have a reloc for it, so tc_gen_reloc will reject it. */
20322 fixP->fx_done = 1;
20323
20324 if (fixP->fx_addsy
20325 && ! S_IS_DEFINED (fixP->fx_addsy))
20326 {
20327 as_bad_where (fixP->fx_file, fixP->fx_line,
20328 _("undefined symbol %s used as an immediate value"),
20329 S_GET_NAME (fixP->fx_addsy));
20330 break;
20331 }
20332
20333 newval = md_chars_to_number (buf, THUMB_SIZE);
20334 newval <<= 16;
20335 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
20336
20337 newimm = FAIL;
20338 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
20339 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
20340 {
20341 newimm = encode_thumb32_immediate (value);
20342 if (newimm == (unsigned int) FAIL)
20343 newimm = thumb32_negate_data_op (&newval, value);
20344 }
20345 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
20346 && newimm == (unsigned int) FAIL)
20347 {
20348 /* Turn add/sum into addw/subw. */
20349 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
20350 newval = (newval & 0xfeffffff) | 0x02000000;
20351 /* No flat 12-bit imm encoding for addsw/subsw. */
20352 if ((newval & 0x00100000) == 0)
20353 {
20354 /* 12 bit immediate for addw/subw. */
20355 if (value < 0)
20356 {
20357 value = -value;
20358 newval ^= 0x00a00000;
20359 }
20360 if (value > 0xfff)
20361 newimm = (unsigned int) FAIL;
20362 else
20363 newimm = value;
20364 }
20365 }
20366
20367 if (newimm == (unsigned int)FAIL)
20368 {
20369 as_bad_where (fixP->fx_file, fixP->fx_line,
20370 _("invalid constant (%lx) after fixup"),
20371 (unsigned long) value);
20372 break;
20373 }
20374
20375 newval |= (newimm & 0x800) << 15;
20376 newval |= (newimm & 0x700) << 4;
20377 newval |= (newimm & 0x0ff);
20378
20379 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
20380 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
20381 break;
20382
20383 case BFD_RELOC_ARM_SMC:
20384 if (((unsigned long) value) > 0xffff)
20385 as_bad_where (fixP->fx_file, fixP->fx_line,
20386 _("invalid smc expression"));
20387 newval = md_chars_to_number (buf, INSN_SIZE);
20388 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
20389 md_number_to_chars (buf, newval, INSN_SIZE);
20390 break;
20391
20392 case BFD_RELOC_ARM_SWI:
20393 if (fixP->tc_fix_data != 0)
20394 {
20395 if (((unsigned long) value) > 0xff)
20396 as_bad_where (fixP->fx_file, fixP->fx_line,
20397 _("invalid swi expression"));
20398 newval = md_chars_to_number (buf, THUMB_SIZE);
20399 newval |= value;
20400 md_number_to_chars (buf, newval, THUMB_SIZE);
20401 }
20402 else
20403 {
20404 if (((unsigned long) value) > 0x00ffffff)
20405 as_bad_where (fixP->fx_file, fixP->fx_line,
20406 _("invalid swi expression"));
20407 newval = md_chars_to_number (buf, INSN_SIZE);
20408 newval |= value;
20409 md_number_to_chars (buf, newval, INSN_SIZE);
20410 }
20411 break;
20412
20413 case BFD_RELOC_ARM_MULTI:
20414 if (((unsigned long) value) > 0xffff)
20415 as_bad_where (fixP->fx_file, fixP->fx_line,
20416 _("invalid expression in load/store multiple"));
20417 newval = value | md_chars_to_number (buf, INSN_SIZE);
20418 md_number_to_chars (buf, newval, INSN_SIZE);
20419 break;
20420
20421 #ifdef OBJ_ELF
20422 case BFD_RELOC_ARM_PCREL_CALL:
20423
20424 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
20425 && fixP->fx_addsy
20426 && !S_IS_EXTERNAL (fixP->fx_addsy)
20427 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20428 && THUMB_IS_FUNC (fixP->fx_addsy))
20429 /* Flip the bl to blx. This is a simple flip
20430 bit here because we generate PCREL_CALL for
20431 unconditional bls. */
20432 {
20433 newval = md_chars_to_number (buf, INSN_SIZE);
20434 newval = newval | 0x10000000;
20435 md_number_to_chars (buf, newval, INSN_SIZE);
20436 temp = 1;
20437 fixP->fx_done = 1;
20438 }
20439 else
20440 temp = 3;
20441 goto arm_branch_common;
20442
20443 case BFD_RELOC_ARM_PCREL_JUMP:
20444 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
20445 && fixP->fx_addsy
20446 && !S_IS_EXTERNAL (fixP->fx_addsy)
20447 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20448 && THUMB_IS_FUNC (fixP->fx_addsy))
20449 {
20450 /* This would map to a bl<cond>, b<cond>,
20451 b<always> to a Thumb function. We
20452 need to force a relocation for this particular
20453 case. */
20454 newval = md_chars_to_number (buf, INSN_SIZE);
20455 fixP->fx_done = 0;
20456 }
20457
20458 case BFD_RELOC_ARM_PLT32:
20459 #endif
20460 case BFD_RELOC_ARM_PCREL_BRANCH:
20461 temp = 3;
20462 goto arm_branch_common;
20463
20464 case BFD_RELOC_ARM_PCREL_BLX:
20465
20466 temp = 1;
20467 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
20468 && fixP->fx_addsy
20469 && !S_IS_EXTERNAL (fixP->fx_addsy)
20470 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20471 && ARM_IS_FUNC (fixP->fx_addsy))
20472 {
20473 /* Flip the blx to a bl and warn. */
20474 const char *name = S_GET_NAME (fixP->fx_addsy);
20475 newval = 0xeb000000;
20476 as_warn_where (fixP->fx_file, fixP->fx_line,
20477 _("blx to '%s' an ARM ISA state function changed to bl"),
20478 name);
20479 md_number_to_chars (buf, newval, INSN_SIZE);
20480 temp = 3;
20481 fixP->fx_done = 1;
20482 }
20483
20484 #ifdef OBJ_ELF
20485 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
20486 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
20487 #endif
20488
20489 arm_branch_common:
20490 /* We are going to store value (shifted right by two) in the
20491 instruction, in a 24 bit, signed field. Bits 26 through 32 either
20492 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
20493 also be be clear. */
20494 if (value & temp)
20495 as_bad_where (fixP->fx_file, fixP->fx_line,
20496 _("misaligned branch destination"));
20497 if ((value & (offsetT)0xfe000000) != (offsetT)0
20498 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
20499 as_bad_where (fixP->fx_file, fixP->fx_line,
20500 _("branch out of range"));
20501
20502 if (fixP->fx_done || !seg->use_rela_p)
20503 {
20504 newval = md_chars_to_number (buf, INSN_SIZE);
20505 newval |= (value >> 2) & 0x00ffffff;
20506 /* Set the H bit on BLX instructions. */
20507 if (temp == 1)
20508 {
20509 if (value & 2)
20510 newval |= 0x01000000;
20511 else
20512 newval &= ~0x01000000;
20513 }
20514 md_number_to_chars (buf, newval, INSN_SIZE);
20515 }
20516 break;
20517
20518 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
20519 /* CBZ can only branch forward. */
20520
20521 /* Attempts to use CBZ to branch to the next instruction
20522 (which, strictly speaking, are prohibited) will be turned into
20523 no-ops.
20524
20525 FIXME: It may be better to remove the instruction completely and
20526 perform relaxation. */
20527 if (value == -2)
20528 {
20529 newval = md_chars_to_number (buf, THUMB_SIZE);
20530 newval = 0xbf00; /* NOP encoding T1 */
20531 md_number_to_chars (buf, newval, THUMB_SIZE);
20532 }
20533 else
20534 {
20535 if (value & ~0x7e)
20536 as_bad_where (fixP->fx_file, fixP->fx_line,
20537 _("branch out of range"));
20538
20539 if (fixP->fx_done || !seg->use_rela_p)
20540 {
20541 newval = md_chars_to_number (buf, THUMB_SIZE);
20542 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
20543 md_number_to_chars (buf, newval, THUMB_SIZE);
20544 }
20545 }
20546 break;
20547
20548 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
20549 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
20550 as_bad_where (fixP->fx_file, fixP->fx_line,
20551 _("branch out of range"));
20552
20553 if (fixP->fx_done || !seg->use_rela_p)
20554 {
20555 newval = md_chars_to_number (buf, THUMB_SIZE);
20556 newval |= (value & 0x1ff) >> 1;
20557 md_number_to_chars (buf, newval, THUMB_SIZE);
20558 }
20559 break;
20560
20561 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
20562 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
20563 as_bad_where (fixP->fx_file, fixP->fx_line,
20564 _("branch out of range"));
20565
20566 if (fixP->fx_done || !seg->use_rela_p)
20567 {
20568 newval = md_chars_to_number (buf, THUMB_SIZE);
20569 newval |= (value & 0xfff) >> 1;
20570 md_number_to_chars (buf, newval, THUMB_SIZE);
20571 }
20572 break;
20573
20574 case BFD_RELOC_THUMB_PCREL_BRANCH20:
20575 if (fixP->fx_addsy
20576 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20577 && !S_IS_EXTERNAL (fixP->fx_addsy)
20578 && S_IS_DEFINED (fixP->fx_addsy)
20579 && ARM_IS_FUNC (fixP->fx_addsy)
20580 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20581 {
20582 /* Force a relocation for a branch 20 bits wide. */
20583 fixP->fx_done = 0;
20584 }
20585 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff))
20586 as_bad_where (fixP->fx_file, fixP->fx_line,
20587 _("conditional branch out of range"));
20588
20589 if (fixP->fx_done || !seg->use_rela_p)
20590 {
20591 offsetT newval2;
20592 addressT S, J1, J2, lo, hi;
20593
20594 S = (value & 0x00100000) >> 20;
20595 J2 = (value & 0x00080000) >> 19;
20596 J1 = (value & 0x00040000) >> 18;
20597 hi = (value & 0x0003f000) >> 12;
20598 lo = (value & 0x00000ffe) >> 1;
20599
20600 newval = md_chars_to_number (buf, THUMB_SIZE);
20601 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20602 newval |= (S << 10) | hi;
20603 newval2 |= (J1 << 13) | (J2 << 11) | lo;
20604 md_number_to_chars (buf, newval, THUMB_SIZE);
20605 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
20606 }
20607 break;
20608
20609 case BFD_RELOC_THUMB_PCREL_BLX:
20610
20611 /* If there is a blx from a thumb state function to
20612 another thumb function flip this to a bl and warn
20613 about it. */
20614
20615 if (fixP->fx_addsy
20616 && S_IS_DEFINED (fixP->fx_addsy)
20617 && !S_IS_EXTERNAL (fixP->fx_addsy)
20618 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20619 && THUMB_IS_FUNC (fixP->fx_addsy))
20620 {
20621 const char *name = S_GET_NAME (fixP->fx_addsy);
20622 as_warn_where (fixP->fx_file, fixP->fx_line,
20623 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
20624 name);
20625 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20626 newval = newval | 0x1000;
20627 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
20628 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
20629 fixP->fx_done = 1;
20630 }
20631
20632
20633 goto thumb_bl_common;
20634
20635 case BFD_RELOC_THUMB_PCREL_BRANCH23:
20636
20637 /* A bl from Thumb state ISA to an internal ARM state function
20638 is converted to a blx. */
20639 if (fixP->fx_addsy
20640 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20641 && !S_IS_EXTERNAL (fixP->fx_addsy)
20642 && S_IS_DEFINED (fixP->fx_addsy)
20643 && ARM_IS_FUNC (fixP->fx_addsy)
20644 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20645 {
20646 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20647 newval = newval & ~0x1000;
20648 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
20649 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
20650 fixP->fx_done = 1;
20651 }
20652
20653 thumb_bl_common:
20654
20655 #ifdef OBJ_ELF
20656 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4 &&
20657 fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
20658 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
20659 #endif
20660
20661 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
20662 /* For a BLX instruction, make sure that the relocation is rounded up
20663 to a word boundary. This follows the semantics of the instruction
20664 which specifies that bit 1 of the target address will come from bit
20665 1 of the base address. */
20666 value = (value + 1) & ~ 1;
20667
20668
20669 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
20670 {
20671 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2)))
20672 {
20673 as_bad_where (fixP->fx_file, fixP->fx_line,
20674 _("branch out of range"));
20675 }
20676 else if ((value & ~0x1ffffff)
20677 && ((value & ~0x1ffffff) != ~0x1ffffff))
20678 {
20679 as_bad_where (fixP->fx_file, fixP->fx_line,
20680 _("Thumb2 branch out of range"));
20681 }
20682 }
20683
20684 if (fixP->fx_done || !seg->use_rela_p)
20685 encode_thumb2_b_bl_offset (buf, value);
20686
20687 break;
20688
20689 case BFD_RELOC_THUMB_PCREL_BRANCH25:
20690 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff))
20691 as_bad_where (fixP->fx_file, fixP->fx_line,
20692 _("branch out of range"));
20693
20694 if (fixP->fx_done || !seg->use_rela_p)
20695 encode_thumb2_b_bl_offset (buf, value);
20696
20697 break;
20698
20699 case BFD_RELOC_8:
20700 if (fixP->fx_done || !seg->use_rela_p)
20701 md_number_to_chars (buf, value, 1);
20702 break;
20703
20704 case BFD_RELOC_16:
20705 if (fixP->fx_done || !seg->use_rela_p)
20706 md_number_to_chars (buf, value, 2);
20707 break;
20708
20709 #ifdef OBJ_ELF
20710 case BFD_RELOC_ARM_TLS_GD32:
20711 case BFD_RELOC_ARM_TLS_LE32:
20712 case BFD_RELOC_ARM_TLS_IE32:
20713 case BFD_RELOC_ARM_TLS_LDM32:
20714 case BFD_RELOC_ARM_TLS_LDO32:
20715 S_SET_THREAD_LOCAL (fixP->fx_addsy);
20716 /* fall through */
20717
20718 case BFD_RELOC_ARM_GOT32:
20719 case BFD_RELOC_ARM_GOTOFF:
20720 if (fixP->fx_done || !seg->use_rela_p)
20721 md_number_to_chars (buf, 0, 4);
20722 break;
20723
20724 case BFD_RELOC_ARM_GOT_PREL:
20725 if (fixP->fx_done || !seg->use_rela_p)
20726 md_number_to_chars (buf, value, 4);
20727 break;
20728
20729 case BFD_RELOC_ARM_TARGET2:
20730 /* TARGET2 is not partial-inplace, so we need to write the
20731 addend here for REL targets, because it won't be written out
20732 during reloc processing later. */
20733 if (fixP->fx_done || !seg->use_rela_p)
20734 md_number_to_chars (buf, fixP->fx_offset, 4);
20735 break;
20736 #endif
20737
20738 case BFD_RELOC_RVA:
20739 case BFD_RELOC_32:
20740 case BFD_RELOC_ARM_TARGET1:
20741 case BFD_RELOC_ARM_ROSEGREL32:
20742 case BFD_RELOC_ARM_SBREL32:
20743 case BFD_RELOC_32_PCREL:
20744 #ifdef TE_PE
20745 case BFD_RELOC_32_SECREL:
20746 #endif
20747 if (fixP->fx_done || !seg->use_rela_p)
20748 #ifdef TE_WINCE
20749 /* For WinCE we only do this for pcrel fixups. */
20750 if (fixP->fx_done || fixP->fx_pcrel)
20751 #endif
20752 md_number_to_chars (buf, value, 4);
20753 break;
20754
20755 #ifdef OBJ_ELF
20756 case BFD_RELOC_ARM_PREL31:
20757 if (fixP->fx_done || !seg->use_rela_p)
20758 {
20759 newval = md_chars_to_number (buf, 4) & 0x80000000;
20760 if ((value ^ (value >> 1)) & 0x40000000)
20761 {
20762 as_bad_where (fixP->fx_file, fixP->fx_line,
20763 _("rel31 relocation overflow"));
20764 }
20765 newval |= value & 0x7fffffff;
20766 md_number_to_chars (buf, newval, 4);
20767 }
20768 break;
20769 #endif
20770
20771 case BFD_RELOC_ARM_CP_OFF_IMM:
20772 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
20773 if (value < -1023 || value > 1023 || (value & 3))
20774 as_bad_where (fixP->fx_file, fixP->fx_line,
20775 _("co-processor offset out of range"));
20776 cp_off_common:
20777 sign = value >= 0;
20778 if (value < 0)
20779 value = -value;
20780 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
20781 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
20782 newval = md_chars_to_number (buf, INSN_SIZE);
20783 else
20784 newval = get_thumb32_insn (buf);
20785 newval &= 0xff7fff00;
20786 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
20787 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
20788 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
20789 md_number_to_chars (buf, newval, INSN_SIZE);
20790 else
20791 put_thumb32_insn (buf, newval);
20792 break;
20793
20794 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
20795 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
20796 if (value < -255 || value > 255)
20797 as_bad_where (fixP->fx_file, fixP->fx_line,
20798 _("co-processor offset out of range"));
20799 value *= 4;
20800 goto cp_off_common;
20801
20802 case BFD_RELOC_ARM_THUMB_OFFSET:
20803 newval = md_chars_to_number (buf, THUMB_SIZE);
20804 /* Exactly what ranges, and where the offset is inserted depends
20805 on the type of instruction, we can establish this from the
20806 top 4 bits. */
20807 switch (newval >> 12)
20808 {
20809 case 4: /* PC load. */
20810 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
20811 forced to zero for these loads; md_pcrel_from has already
20812 compensated for this. */
20813 if (value & 3)
20814 as_bad_where (fixP->fx_file, fixP->fx_line,
20815 _("invalid offset, target not word aligned (0x%08lX)"),
20816 (((unsigned long) fixP->fx_frag->fr_address
20817 + (unsigned long) fixP->fx_where) & ~3)
20818 + (unsigned long) value);
20819
20820 if (value & ~0x3fc)
20821 as_bad_where (fixP->fx_file, fixP->fx_line,
20822 _("invalid offset, value too big (0x%08lX)"),
20823 (long) value);
20824
20825 newval |= value >> 2;
20826 break;
20827
20828 case 9: /* SP load/store. */
20829 if (value & ~0x3fc)
20830 as_bad_where (fixP->fx_file, fixP->fx_line,
20831 _("invalid offset, value too big (0x%08lX)"),
20832 (long) value);
20833 newval |= value >> 2;
20834 break;
20835
20836 case 6: /* Word load/store. */
20837 if (value & ~0x7c)
20838 as_bad_where (fixP->fx_file, fixP->fx_line,
20839 _("invalid offset, value too big (0x%08lX)"),
20840 (long) value);
20841 newval |= value << 4; /* 6 - 2. */
20842 break;
20843
20844 case 7: /* Byte load/store. */
20845 if (value & ~0x1f)
20846 as_bad_where (fixP->fx_file, fixP->fx_line,
20847 _("invalid offset, value too big (0x%08lX)"),
20848 (long) value);
20849 newval |= value << 6;
20850 break;
20851
20852 case 8: /* Halfword load/store. */
20853 if (value & ~0x3e)
20854 as_bad_where (fixP->fx_file, fixP->fx_line,
20855 _("invalid offset, value too big (0x%08lX)"),
20856 (long) value);
20857 newval |= value << 5; /* 6 - 1. */
20858 break;
20859
20860 default:
20861 as_bad_where (fixP->fx_file, fixP->fx_line,
20862 "Unable to process relocation for thumb opcode: %lx",
20863 (unsigned long) newval);
20864 break;
20865 }
20866 md_number_to_chars (buf, newval, THUMB_SIZE);
20867 break;
20868
20869 case BFD_RELOC_ARM_THUMB_ADD:
20870 /* This is a complicated relocation, since we use it for all of
20871 the following immediate relocations:
20872
20873 3bit ADD/SUB
20874 8bit ADD/SUB
20875 9bit ADD/SUB SP word-aligned
20876 10bit ADD PC/SP word-aligned
20877
20878 The type of instruction being processed is encoded in the
20879 instruction field:
20880
20881 0x8000 SUB
20882 0x00F0 Rd
20883 0x000F Rs
20884 */
20885 newval = md_chars_to_number (buf, THUMB_SIZE);
20886 {
20887 int rd = (newval >> 4) & 0xf;
20888 int rs = newval & 0xf;
20889 int subtract = !!(newval & 0x8000);
20890
20891 /* Check for HI regs, only very restricted cases allowed:
20892 Adjusting SP, and using PC or SP to get an address. */
20893 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
20894 || (rs > 7 && rs != REG_SP && rs != REG_PC))
20895 as_bad_where (fixP->fx_file, fixP->fx_line,
20896 _("invalid Hi register with immediate"));
20897
20898 /* If value is negative, choose the opposite instruction. */
20899 if (value < 0)
20900 {
20901 value = -value;
20902 subtract = !subtract;
20903 if (value < 0)
20904 as_bad_where (fixP->fx_file, fixP->fx_line,
20905 _("immediate value out of range"));
20906 }
20907
20908 if (rd == REG_SP)
20909 {
20910 if (value & ~0x1fc)
20911 as_bad_where (fixP->fx_file, fixP->fx_line,
20912 _("invalid immediate for stack address calculation"));
20913 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
20914 newval |= value >> 2;
20915 }
20916 else if (rs == REG_PC || rs == REG_SP)
20917 {
20918 if (subtract || value & ~0x3fc)
20919 as_bad_where (fixP->fx_file, fixP->fx_line,
20920 _("invalid immediate for address calculation (value = 0x%08lX)"),
20921 (unsigned long) value);
20922 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
20923 newval |= rd << 8;
20924 newval |= value >> 2;
20925 }
20926 else if (rs == rd)
20927 {
20928 if (value & ~0xff)
20929 as_bad_where (fixP->fx_file, fixP->fx_line,
20930 _("immediate value out of range"));
20931 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
20932 newval |= (rd << 8) | value;
20933 }
20934 else
20935 {
20936 if (value & ~0x7)
20937 as_bad_where (fixP->fx_file, fixP->fx_line,
20938 _("immediate value out of range"));
20939 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
20940 newval |= rd | (rs << 3) | (value << 6);
20941 }
20942 }
20943 md_number_to_chars (buf, newval, THUMB_SIZE);
20944 break;
20945
20946 case BFD_RELOC_ARM_THUMB_IMM:
20947 newval = md_chars_to_number (buf, THUMB_SIZE);
20948 if (value < 0 || value > 255)
20949 as_bad_where (fixP->fx_file, fixP->fx_line,
20950 _("invalid immediate: %ld is out of range"),
20951 (long) value);
20952 newval |= value;
20953 md_number_to_chars (buf, newval, THUMB_SIZE);
20954 break;
20955
20956 case BFD_RELOC_ARM_THUMB_SHIFT:
20957 /* 5bit shift value (0..32). LSL cannot take 32. */
20958 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
20959 temp = newval & 0xf800;
20960 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
20961 as_bad_where (fixP->fx_file, fixP->fx_line,
20962 _("invalid shift value: %ld"), (long) value);
20963 /* Shifts of zero must be encoded as LSL. */
20964 if (value == 0)
20965 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
20966 /* Shifts of 32 are encoded as zero. */
20967 else if (value == 32)
20968 value = 0;
20969 newval |= value << 6;
20970 md_number_to_chars (buf, newval, THUMB_SIZE);
20971 break;
20972
20973 case BFD_RELOC_VTABLE_INHERIT:
20974 case BFD_RELOC_VTABLE_ENTRY:
20975 fixP->fx_done = 0;
20976 return;
20977
20978 case BFD_RELOC_ARM_MOVW:
20979 case BFD_RELOC_ARM_MOVT:
20980 case BFD_RELOC_ARM_THUMB_MOVW:
20981 case BFD_RELOC_ARM_THUMB_MOVT:
20982 if (fixP->fx_done || !seg->use_rela_p)
20983 {
20984 /* REL format relocations are limited to a 16-bit addend. */
20985 if (!fixP->fx_done)
20986 {
20987 if (value < -0x8000 || value > 0x7fff)
20988 as_bad_where (fixP->fx_file, fixP->fx_line,
20989 _("offset out of range"));
20990 }
20991 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
20992 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
20993 {
20994 value >>= 16;
20995 }
20996
20997 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
20998 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
20999 {
21000 newval = get_thumb32_insn (buf);
21001 newval &= 0xfbf08f00;
21002 newval |= (value & 0xf000) << 4;
21003 newval |= (value & 0x0800) << 15;
21004 newval |= (value & 0x0700) << 4;
21005 newval |= (value & 0x00ff);
21006 put_thumb32_insn (buf, newval);
21007 }
21008 else
21009 {
21010 newval = md_chars_to_number (buf, 4);
21011 newval &= 0xfff0f000;
21012 newval |= value & 0x0fff;
21013 newval |= (value & 0xf000) << 4;
21014 md_number_to_chars (buf, newval, 4);
21015 }
21016 }
21017 return;
21018
21019 case BFD_RELOC_ARM_ALU_PC_G0_NC:
21020 case BFD_RELOC_ARM_ALU_PC_G0:
21021 case BFD_RELOC_ARM_ALU_PC_G1_NC:
21022 case BFD_RELOC_ARM_ALU_PC_G1:
21023 case BFD_RELOC_ARM_ALU_PC_G2:
21024 case BFD_RELOC_ARM_ALU_SB_G0_NC:
21025 case BFD_RELOC_ARM_ALU_SB_G0:
21026 case BFD_RELOC_ARM_ALU_SB_G1_NC:
21027 case BFD_RELOC_ARM_ALU_SB_G1:
21028 case BFD_RELOC_ARM_ALU_SB_G2:
21029 gas_assert (!fixP->fx_done);
21030 if (!seg->use_rela_p)
21031 {
21032 bfd_vma insn;
21033 bfd_vma encoded_addend;
21034 bfd_vma addend_abs = abs (value);
21035
21036 /* Check that the absolute value of the addend can be
21037 expressed as an 8-bit constant plus a rotation. */
21038 encoded_addend = encode_arm_immediate (addend_abs);
21039 if (encoded_addend == (unsigned int) FAIL)
21040 as_bad_where (fixP->fx_file, fixP->fx_line,
21041 _("the offset 0x%08lX is not representable"),
21042 (unsigned long) addend_abs);
21043
21044 /* Extract the instruction. */
21045 insn = md_chars_to_number (buf, INSN_SIZE);
21046
21047 /* If the addend is positive, use an ADD instruction.
21048 Otherwise use a SUB. Take care not to destroy the S bit. */
21049 insn &= 0xff1fffff;
21050 if (value < 0)
21051 insn |= 1 << 22;
21052 else
21053 insn |= 1 << 23;
21054
21055 /* Place the encoded addend into the first 12 bits of the
21056 instruction. */
21057 insn &= 0xfffff000;
21058 insn |= encoded_addend;
21059
21060 /* Update the instruction. */
21061 md_number_to_chars (buf, insn, INSN_SIZE);
21062 }
21063 break;
21064
21065 case BFD_RELOC_ARM_LDR_PC_G0:
21066 case BFD_RELOC_ARM_LDR_PC_G1:
21067 case BFD_RELOC_ARM_LDR_PC_G2:
21068 case BFD_RELOC_ARM_LDR_SB_G0:
21069 case BFD_RELOC_ARM_LDR_SB_G1:
21070 case BFD_RELOC_ARM_LDR_SB_G2:
21071 gas_assert (!fixP->fx_done);
21072 if (!seg->use_rela_p)
21073 {
21074 bfd_vma insn;
21075 bfd_vma addend_abs = abs (value);
21076
21077 /* Check that the absolute value of the addend can be
21078 encoded in 12 bits. */
21079 if (addend_abs >= 0x1000)
21080 as_bad_where (fixP->fx_file, fixP->fx_line,
21081 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
21082 (unsigned long) addend_abs);
21083
21084 /* Extract the instruction. */
21085 insn = md_chars_to_number (buf, INSN_SIZE);
21086
21087 /* If the addend is negative, clear bit 23 of the instruction.
21088 Otherwise set it. */
21089 if (value < 0)
21090 insn &= ~(1 << 23);
21091 else
21092 insn |= 1 << 23;
21093
21094 /* Place the absolute value of the addend into the first 12 bits
21095 of the instruction. */
21096 insn &= 0xfffff000;
21097 insn |= addend_abs;
21098
21099 /* Update the instruction. */
21100 md_number_to_chars (buf, insn, INSN_SIZE);
21101 }
21102 break;
21103
21104 case BFD_RELOC_ARM_LDRS_PC_G0:
21105 case BFD_RELOC_ARM_LDRS_PC_G1:
21106 case BFD_RELOC_ARM_LDRS_PC_G2:
21107 case BFD_RELOC_ARM_LDRS_SB_G0:
21108 case BFD_RELOC_ARM_LDRS_SB_G1:
21109 case BFD_RELOC_ARM_LDRS_SB_G2:
21110 gas_assert (!fixP->fx_done);
21111 if (!seg->use_rela_p)
21112 {
21113 bfd_vma insn;
21114 bfd_vma addend_abs = abs (value);
21115
21116 /* Check that the absolute value of the addend can be
21117 encoded in 8 bits. */
21118 if (addend_abs >= 0x100)
21119 as_bad_where (fixP->fx_file, fixP->fx_line,
21120 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
21121 (unsigned long) addend_abs);
21122
21123 /* Extract the instruction. */
21124 insn = md_chars_to_number (buf, INSN_SIZE);
21125
21126 /* If the addend is negative, clear bit 23 of the instruction.
21127 Otherwise set it. */
21128 if (value < 0)
21129 insn &= ~(1 << 23);
21130 else
21131 insn |= 1 << 23;
21132
21133 /* Place the first four bits of the absolute value of the addend
21134 into the first 4 bits of the instruction, and the remaining
21135 four into bits 8 .. 11. */
21136 insn &= 0xfffff0f0;
21137 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
21138
21139 /* Update the instruction. */
21140 md_number_to_chars (buf, insn, INSN_SIZE);
21141 }
21142 break;
21143
21144 case BFD_RELOC_ARM_LDC_PC_G0:
21145 case BFD_RELOC_ARM_LDC_PC_G1:
21146 case BFD_RELOC_ARM_LDC_PC_G2:
21147 case BFD_RELOC_ARM_LDC_SB_G0:
21148 case BFD_RELOC_ARM_LDC_SB_G1:
21149 case BFD_RELOC_ARM_LDC_SB_G2:
21150 gas_assert (!fixP->fx_done);
21151 if (!seg->use_rela_p)
21152 {
21153 bfd_vma insn;
21154 bfd_vma addend_abs = abs (value);
21155
21156 /* Check that the absolute value of the addend is a multiple of
21157 four and, when divided by four, fits in 8 bits. */
21158 if (addend_abs & 0x3)
21159 as_bad_where (fixP->fx_file, fixP->fx_line,
21160 _("bad offset 0x%08lX (must be word-aligned)"),
21161 (unsigned long) addend_abs);
21162
21163 if ((addend_abs >> 2) > 0xff)
21164 as_bad_where (fixP->fx_file, fixP->fx_line,
21165 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
21166 (unsigned long) addend_abs);
21167
21168 /* Extract the instruction. */
21169 insn = md_chars_to_number (buf, INSN_SIZE);
21170
21171 /* If the addend is negative, clear bit 23 of the instruction.
21172 Otherwise set it. */
21173 if (value < 0)
21174 insn &= ~(1 << 23);
21175 else
21176 insn |= 1 << 23;
21177
21178 /* Place the addend (divided by four) into the first eight
21179 bits of the instruction. */
21180 insn &= 0xfffffff0;
21181 insn |= addend_abs >> 2;
21182
21183 /* Update the instruction. */
21184 md_number_to_chars (buf, insn, INSN_SIZE);
21185 }
21186 break;
21187
21188 case BFD_RELOC_ARM_V4BX:
21189 /* This will need to go in the object file. */
21190 fixP->fx_done = 0;
21191 break;
21192
21193 case BFD_RELOC_UNUSED:
21194 default:
21195 as_bad_where (fixP->fx_file, fixP->fx_line,
21196 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
21197 }
21198 }
21199
21200 /* Translate internal representation of relocation info to BFD target
21201 format. */
21202
21203 arelent *
21204 tc_gen_reloc (asection *section, fixS *fixp)
21205 {
21206 arelent * reloc;
21207 bfd_reloc_code_real_type code;
21208
21209 reloc = (arelent *) xmalloc (sizeof (arelent));
21210
21211 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
21212 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
21213 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
21214
21215 if (fixp->fx_pcrel)
21216 {
21217 if (section->use_rela_p)
21218 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
21219 else
21220 fixp->fx_offset = reloc->address;
21221 }
21222 reloc->addend = fixp->fx_offset;
21223
21224 switch (fixp->fx_r_type)
21225 {
21226 case BFD_RELOC_8:
21227 if (fixp->fx_pcrel)
21228 {
21229 code = BFD_RELOC_8_PCREL;
21230 break;
21231 }
21232
21233 case BFD_RELOC_16:
21234 if (fixp->fx_pcrel)
21235 {
21236 code = BFD_RELOC_16_PCREL;
21237 break;
21238 }
21239
21240 case BFD_RELOC_32:
21241 if (fixp->fx_pcrel)
21242 {
21243 code = BFD_RELOC_32_PCREL;
21244 break;
21245 }
21246
21247 case BFD_RELOC_ARM_MOVW:
21248 if (fixp->fx_pcrel)
21249 {
21250 code = BFD_RELOC_ARM_MOVW_PCREL;
21251 break;
21252 }
21253
21254 case BFD_RELOC_ARM_MOVT:
21255 if (fixp->fx_pcrel)
21256 {
21257 code = BFD_RELOC_ARM_MOVT_PCREL;
21258 break;
21259 }
21260
21261 case BFD_RELOC_ARM_THUMB_MOVW:
21262 if (fixp->fx_pcrel)
21263 {
21264 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
21265 break;
21266 }
21267
21268 case BFD_RELOC_ARM_THUMB_MOVT:
21269 if (fixp->fx_pcrel)
21270 {
21271 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
21272 break;
21273 }
21274
21275 case BFD_RELOC_NONE:
21276 case BFD_RELOC_ARM_PCREL_BRANCH:
21277 case BFD_RELOC_ARM_PCREL_BLX:
21278 case BFD_RELOC_RVA:
21279 case BFD_RELOC_THUMB_PCREL_BRANCH7:
21280 case BFD_RELOC_THUMB_PCREL_BRANCH9:
21281 case BFD_RELOC_THUMB_PCREL_BRANCH12:
21282 case BFD_RELOC_THUMB_PCREL_BRANCH20:
21283 case BFD_RELOC_THUMB_PCREL_BRANCH23:
21284 case BFD_RELOC_THUMB_PCREL_BRANCH25:
21285 case BFD_RELOC_VTABLE_ENTRY:
21286 case BFD_RELOC_VTABLE_INHERIT:
21287 #ifdef TE_PE
21288 case BFD_RELOC_32_SECREL:
21289 #endif
21290 code = fixp->fx_r_type;
21291 break;
21292
21293 case BFD_RELOC_THUMB_PCREL_BLX:
21294 #ifdef OBJ_ELF
21295 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
21296 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
21297 else
21298 #endif
21299 code = BFD_RELOC_THUMB_PCREL_BLX;
21300 break;
21301
21302 case BFD_RELOC_ARM_LITERAL:
21303 case BFD_RELOC_ARM_HWLITERAL:
21304 /* If this is called then the a literal has
21305 been referenced across a section boundary. */
21306 as_bad_where (fixp->fx_file, fixp->fx_line,
21307 _("literal referenced across section boundary"));
21308 return NULL;
21309
21310 #ifdef OBJ_ELF
21311 case BFD_RELOC_ARM_GOT32:
21312 case BFD_RELOC_ARM_GOTOFF:
21313 case BFD_RELOC_ARM_GOT_PREL:
21314 case BFD_RELOC_ARM_PLT32:
21315 case BFD_RELOC_ARM_TARGET1:
21316 case BFD_RELOC_ARM_ROSEGREL32:
21317 case BFD_RELOC_ARM_SBREL32:
21318 case BFD_RELOC_ARM_PREL31:
21319 case BFD_RELOC_ARM_TARGET2:
21320 case BFD_RELOC_ARM_TLS_LE32:
21321 case BFD_RELOC_ARM_TLS_LDO32:
21322 case BFD_RELOC_ARM_PCREL_CALL:
21323 case BFD_RELOC_ARM_PCREL_JUMP:
21324 case BFD_RELOC_ARM_ALU_PC_G0_NC:
21325 case BFD_RELOC_ARM_ALU_PC_G0:
21326 case BFD_RELOC_ARM_ALU_PC_G1_NC:
21327 case BFD_RELOC_ARM_ALU_PC_G1:
21328 case BFD_RELOC_ARM_ALU_PC_G2:
21329 case BFD_RELOC_ARM_LDR_PC_G0:
21330 case BFD_RELOC_ARM_LDR_PC_G1:
21331 case BFD_RELOC_ARM_LDR_PC_G2:
21332 case BFD_RELOC_ARM_LDRS_PC_G0:
21333 case BFD_RELOC_ARM_LDRS_PC_G1:
21334 case BFD_RELOC_ARM_LDRS_PC_G2:
21335 case BFD_RELOC_ARM_LDC_PC_G0:
21336 case BFD_RELOC_ARM_LDC_PC_G1:
21337 case BFD_RELOC_ARM_LDC_PC_G2:
21338 case BFD_RELOC_ARM_ALU_SB_G0_NC:
21339 case BFD_RELOC_ARM_ALU_SB_G0:
21340 case BFD_RELOC_ARM_ALU_SB_G1_NC:
21341 case BFD_RELOC_ARM_ALU_SB_G1:
21342 case BFD_RELOC_ARM_ALU_SB_G2:
21343 case BFD_RELOC_ARM_LDR_SB_G0:
21344 case BFD_RELOC_ARM_LDR_SB_G1:
21345 case BFD_RELOC_ARM_LDR_SB_G2:
21346 case BFD_RELOC_ARM_LDRS_SB_G0:
21347 case BFD_RELOC_ARM_LDRS_SB_G1:
21348 case BFD_RELOC_ARM_LDRS_SB_G2:
21349 case BFD_RELOC_ARM_LDC_SB_G0:
21350 case BFD_RELOC_ARM_LDC_SB_G1:
21351 case BFD_RELOC_ARM_LDC_SB_G2:
21352 case BFD_RELOC_ARM_V4BX:
21353 code = fixp->fx_r_type;
21354 break;
21355
21356 case BFD_RELOC_ARM_TLS_GD32:
21357 case BFD_RELOC_ARM_TLS_IE32:
21358 case BFD_RELOC_ARM_TLS_LDM32:
21359 /* BFD will include the symbol's address in the addend.
21360 But we don't want that, so subtract it out again here. */
21361 if (!S_IS_COMMON (fixp->fx_addsy))
21362 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
21363 code = fixp->fx_r_type;
21364 break;
21365 #endif
21366
21367 case BFD_RELOC_ARM_IMMEDIATE:
21368 as_bad_where (fixp->fx_file, fixp->fx_line,
21369 _("internal relocation (type: IMMEDIATE) not fixed up"));
21370 return NULL;
21371
21372 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
21373 as_bad_where (fixp->fx_file, fixp->fx_line,
21374 _("ADRL used for a symbol not defined in the same file"));
21375 return NULL;
21376
21377 case BFD_RELOC_ARM_OFFSET_IMM:
21378 if (section->use_rela_p)
21379 {
21380 code = fixp->fx_r_type;
21381 break;
21382 }
21383
21384 if (fixp->fx_addsy != NULL
21385 && !S_IS_DEFINED (fixp->fx_addsy)
21386 && S_IS_LOCAL (fixp->fx_addsy))
21387 {
21388 as_bad_where (fixp->fx_file, fixp->fx_line,
21389 _("undefined local label `%s'"),
21390 S_GET_NAME (fixp->fx_addsy));
21391 return NULL;
21392 }
21393
21394 as_bad_where (fixp->fx_file, fixp->fx_line,
21395 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
21396 return NULL;
21397
21398 default:
21399 {
21400 char * type;
21401
21402 switch (fixp->fx_r_type)
21403 {
21404 case BFD_RELOC_NONE: type = "NONE"; break;
21405 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
21406 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
21407 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
21408 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
21409 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
21410 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
21411 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
21412 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
21413 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
21414 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
21415 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
21416 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
21417 default: type = _("<unknown>"); break;
21418 }
21419 as_bad_where (fixp->fx_file, fixp->fx_line,
21420 _("cannot represent %s relocation in this object file format"),
21421 type);
21422 return NULL;
21423 }
21424 }
21425
21426 #ifdef OBJ_ELF
21427 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
21428 && GOT_symbol
21429 && fixp->fx_addsy == GOT_symbol)
21430 {
21431 code = BFD_RELOC_ARM_GOTPC;
21432 reloc->addend = fixp->fx_offset = reloc->address;
21433 }
21434 #endif
21435
21436 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
21437
21438 if (reloc->howto == NULL)
21439 {
21440 as_bad_where (fixp->fx_file, fixp->fx_line,
21441 _("cannot represent %s relocation in this object file format"),
21442 bfd_get_reloc_code_name (code));
21443 return NULL;
21444 }
21445
21446 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
21447 vtable entry to be used in the relocation's section offset. */
21448 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
21449 reloc->address = fixp->fx_offset;
21450
21451 return reloc;
21452 }
21453
21454 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
21455
21456 void
21457 cons_fix_new_arm (fragS * frag,
21458 int where,
21459 int size,
21460 expressionS * exp)
21461 {
21462 bfd_reloc_code_real_type type;
21463 int pcrel = 0;
21464
21465 /* Pick a reloc.
21466 FIXME: @@ Should look at CPU word size. */
21467 switch (size)
21468 {
21469 case 1:
21470 type = BFD_RELOC_8;
21471 break;
21472 case 2:
21473 type = BFD_RELOC_16;
21474 break;
21475 case 4:
21476 default:
21477 type = BFD_RELOC_32;
21478 break;
21479 case 8:
21480 type = BFD_RELOC_64;
21481 break;
21482 }
21483
21484 #ifdef TE_PE
21485 if (exp->X_op == O_secrel)
21486 {
21487 exp->X_op = O_symbol;
21488 type = BFD_RELOC_32_SECREL;
21489 }
21490 #endif
21491
21492 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
21493 }
21494
21495 #if defined (OBJ_COFF)
21496 void
21497 arm_validate_fix (fixS * fixP)
21498 {
21499 /* If the destination of the branch is a defined symbol which does not have
21500 the THUMB_FUNC attribute, then we must be calling a function which has
21501 the (interfacearm) attribute. We look for the Thumb entry point to that
21502 function and change the branch to refer to that function instead. */
21503 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
21504 && fixP->fx_addsy != NULL
21505 && S_IS_DEFINED (fixP->fx_addsy)
21506 && ! THUMB_IS_FUNC (fixP->fx_addsy))
21507 {
21508 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
21509 }
21510 }
21511 #endif
21512
21513
21514 int
21515 arm_force_relocation (struct fix * fixp)
21516 {
21517 #if defined (OBJ_COFF) && defined (TE_PE)
21518 if (fixp->fx_r_type == BFD_RELOC_RVA)
21519 return 1;
21520 #endif
21521
21522 /* In case we have a call or a branch to a function in ARM ISA mode from
21523 a thumb function or vice-versa force the relocation. These relocations
21524 are cleared off for some cores that might have blx and simple transformations
21525 are possible. */
21526
21527 #ifdef OBJ_ELF
21528 switch (fixp->fx_r_type)
21529 {
21530 case BFD_RELOC_ARM_PCREL_JUMP:
21531 case BFD_RELOC_ARM_PCREL_CALL:
21532 case BFD_RELOC_THUMB_PCREL_BLX:
21533 if (THUMB_IS_FUNC (fixp->fx_addsy))
21534 return 1;
21535 break;
21536
21537 case BFD_RELOC_ARM_PCREL_BLX:
21538 case BFD_RELOC_THUMB_PCREL_BRANCH25:
21539 case BFD_RELOC_THUMB_PCREL_BRANCH20:
21540 case BFD_RELOC_THUMB_PCREL_BRANCH23:
21541 if (ARM_IS_FUNC (fixp->fx_addsy))
21542 return 1;
21543 break;
21544
21545 default:
21546 break;
21547 }
21548 #endif
21549
21550 /* Resolve these relocations even if the symbol is extern or weak. */
21551 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
21552 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
21553 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
21554 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
21555 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
21556 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
21557 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12)
21558 return 0;
21559
21560 /* Always leave these relocations for the linker. */
21561 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
21562 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
21563 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
21564 return 1;
21565
21566 /* Always generate relocations against function symbols. */
21567 if (fixp->fx_r_type == BFD_RELOC_32
21568 && fixp->fx_addsy
21569 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
21570 return 1;
21571
21572 return generic_force_reloc (fixp);
21573 }
21574
21575 #if defined (OBJ_ELF) || defined (OBJ_COFF)
21576 /* Relocations against function names must be left unadjusted,
21577 so that the linker can use this information to generate interworking
21578 stubs. The MIPS version of this function
21579 also prevents relocations that are mips-16 specific, but I do not
21580 know why it does this.
21581
21582 FIXME:
21583 There is one other problem that ought to be addressed here, but
21584 which currently is not: Taking the address of a label (rather
21585 than a function) and then later jumping to that address. Such
21586 addresses also ought to have their bottom bit set (assuming that
21587 they reside in Thumb code), but at the moment they will not. */
21588
21589 bfd_boolean
21590 arm_fix_adjustable (fixS * fixP)
21591 {
21592 if (fixP->fx_addsy == NULL)
21593 return 1;
21594
21595 /* Preserve relocations against symbols with function type. */
21596 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
21597 return FALSE;
21598
21599 if (THUMB_IS_FUNC (fixP->fx_addsy)
21600 && fixP->fx_subsy == NULL)
21601 return FALSE;
21602
21603 /* We need the symbol name for the VTABLE entries. */
21604 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
21605 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
21606 return FALSE;
21607
21608 /* Don't allow symbols to be discarded on GOT related relocs. */
21609 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
21610 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
21611 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
21612 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
21613 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
21614 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
21615 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
21616 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
21617 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
21618 return FALSE;
21619
21620 /* Similarly for group relocations. */
21621 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
21622 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
21623 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
21624 return FALSE;
21625
21626 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
21627 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
21628 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
21629 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
21630 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
21631 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
21632 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
21633 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
21634 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
21635 return FALSE;
21636
21637 return TRUE;
21638 }
21639 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
21640
21641 #ifdef OBJ_ELF
21642
21643 const char *
21644 elf32_arm_target_format (void)
21645 {
21646 #ifdef TE_SYMBIAN
21647 return (target_big_endian
21648 ? "elf32-bigarm-symbian"
21649 : "elf32-littlearm-symbian");
21650 #elif defined (TE_VXWORKS)
21651 return (target_big_endian
21652 ? "elf32-bigarm-vxworks"
21653 : "elf32-littlearm-vxworks");
21654 #else
21655 if (target_big_endian)
21656 return "elf32-bigarm";
21657 else
21658 return "elf32-littlearm";
21659 #endif
21660 }
21661
21662 void
21663 armelf_frob_symbol (symbolS * symp,
21664 int * puntp)
21665 {
21666 elf_frob_symbol (symp, puntp);
21667 }
21668 #endif
21669
21670 /* MD interface: Finalization. */
21671
21672 void
21673 arm_cleanup (void)
21674 {
21675 literal_pool * pool;
21676
21677 /* Ensure that all the IT blocks are properly closed. */
21678 check_it_blocks_finished ();
21679
21680 for (pool = list_of_pools; pool; pool = pool->next)
21681 {
21682 /* Put it at the end of the relevant section. */
21683 subseg_set (pool->section, pool->sub_section);
21684 #ifdef OBJ_ELF
21685 arm_elf_change_section ();
21686 #endif
21687 s_ltorg (0);
21688 }
21689 }
21690
21691 #ifdef OBJ_ELF
21692 /* Remove any excess mapping symbols generated for alignment frags in
21693 SEC. We may have created a mapping symbol before a zero byte
21694 alignment; remove it if there's a mapping symbol after the
21695 alignment. */
21696 static void
21697 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
21698 void *dummy ATTRIBUTE_UNUSED)
21699 {
21700 segment_info_type *seginfo = seg_info (sec);
21701 fragS *fragp;
21702
21703 if (seginfo == NULL || seginfo->frchainP == NULL)
21704 return;
21705
21706 for (fragp = seginfo->frchainP->frch_root;
21707 fragp != NULL;
21708 fragp = fragp->fr_next)
21709 {
21710 symbolS *sym = fragp->tc_frag_data.last_map;
21711 fragS *next = fragp->fr_next;
21712
21713 /* Variable-sized frags have been converted to fixed size by
21714 this point. But if this was variable-sized to start with,
21715 there will be a fixed-size frag after it. So don't handle
21716 next == NULL. */
21717 if (sym == NULL || next == NULL)
21718 continue;
21719
21720 if (S_GET_VALUE (sym) < next->fr_address)
21721 /* Not at the end of this frag. */
21722 continue;
21723 know (S_GET_VALUE (sym) == next->fr_address);
21724
21725 do
21726 {
21727 if (next->tc_frag_data.first_map != NULL)
21728 {
21729 /* Next frag starts with a mapping symbol. Discard this
21730 one. */
21731 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
21732 break;
21733 }
21734
21735 if (next->fr_next == NULL)
21736 {
21737 /* This mapping symbol is at the end of the section. Discard
21738 it. */
21739 know (next->fr_fix == 0 && next->fr_var == 0);
21740 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
21741 break;
21742 }
21743
21744 /* As long as we have empty frags without any mapping symbols,
21745 keep looking. */
21746 /* If the next frag is non-empty and does not start with a
21747 mapping symbol, then this mapping symbol is required. */
21748 if (next->fr_address != next->fr_next->fr_address)
21749 break;
21750
21751 next = next->fr_next;
21752 }
21753 while (next != NULL);
21754 }
21755 }
21756 #endif
21757
21758 /* Adjust the symbol table. This marks Thumb symbols as distinct from
21759 ARM ones. */
21760
21761 void
21762 arm_adjust_symtab (void)
21763 {
21764 #ifdef OBJ_COFF
21765 symbolS * sym;
21766
21767 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
21768 {
21769 if (ARM_IS_THUMB (sym))
21770 {
21771 if (THUMB_IS_FUNC (sym))
21772 {
21773 /* Mark the symbol as a Thumb function. */
21774 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
21775 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
21776 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
21777
21778 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
21779 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
21780 else
21781 as_bad (_("%s: unexpected function type: %d"),
21782 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
21783 }
21784 else switch (S_GET_STORAGE_CLASS (sym))
21785 {
21786 case C_EXT:
21787 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
21788 break;
21789 case C_STAT:
21790 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
21791 break;
21792 case C_LABEL:
21793 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
21794 break;
21795 default:
21796 /* Do nothing. */
21797 break;
21798 }
21799 }
21800
21801 if (ARM_IS_INTERWORK (sym))
21802 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
21803 }
21804 #endif
21805 #ifdef OBJ_ELF
21806 symbolS * sym;
21807 char bind;
21808
21809 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
21810 {
21811 if (ARM_IS_THUMB (sym))
21812 {
21813 elf_symbol_type * elf_sym;
21814
21815 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
21816 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
21817
21818 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
21819 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
21820 {
21821 /* If it's a .thumb_func, declare it as so,
21822 otherwise tag label as .code 16. */
21823 if (THUMB_IS_FUNC (sym))
21824 elf_sym->internal_elf_sym.st_info =
21825 ELF_ST_INFO (bind, STT_ARM_TFUNC);
21826 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
21827 elf_sym->internal_elf_sym.st_info =
21828 ELF_ST_INFO (bind, STT_ARM_16BIT);
21829 }
21830 }
21831 }
21832
21833 /* Remove any overlapping mapping symbols generated by alignment frags. */
21834 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
21835 #endif
21836 }
21837
21838 /* MD interface: Initialization. */
21839
21840 static void
21841 set_constant_flonums (void)
21842 {
21843 int i;
21844
21845 for (i = 0; i < NUM_FLOAT_VALS; i++)
21846 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
21847 abort ();
21848 }
21849
21850 /* Auto-select Thumb mode if it's the only available instruction set for the
21851 given architecture. */
21852
21853 static void
21854 autoselect_thumb_from_cpu_variant (void)
21855 {
21856 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
21857 opcode_select (16);
21858 }
21859
21860 void
21861 md_begin (void)
21862 {
21863 unsigned mach;
21864 unsigned int i;
21865
21866 if ( (arm_ops_hsh = hash_new ()) == NULL
21867 || (arm_cond_hsh = hash_new ()) == NULL
21868 || (arm_shift_hsh = hash_new ()) == NULL
21869 || (arm_psr_hsh = hash_new ()) == NULL
21870 || (arm_v7m_psr_hsh = hash_new ()) == NULL
21871 || (arm_reg_hsh = hash_new ()) == NULL
21872 || (arm_reloc_hsh = hash_new ()) == NULL
21873 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
21874 as_fatal (_("virtual memory exhausted"));
21875
21876 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
21877 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
21878 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
21879 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
21880 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
21881 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
21882 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
21883 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
21884 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
21885 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
21886 (void *) (v7m_psrs + i));
21887 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
21888 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
21889 for (i = 0;
21890 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
21891 i++)
21892 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
21893 (void *) (barrier_opt_names + i));
21894 #ifdef OBJ_ELF
21895 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
21896 hash_insert (arm_reloc_hsh, reloc_names[i].name, (void *) (reloc_names + i));
21897 #endif
21898
21899 set_constant_flonums ();
21900
21901 /* Set the cpu variant based on the command-line options. We prefer
21902 -mcpu= over -march= if both are set (as for GCC); and we prefer
21903 -mfpu= over any other way of setting the floating point unit.
21904 Use of legacy options with new options are faulted. */
21905 if (legacy_cpu)
21906 {
21907 if (mcpu_cpu_opt || march_cpu_opt)
21908 as_bad (_("use of old and new-style options to set CPU type"));
21909
21910 mcpu_cpu_opt = legacy_cpu;
21911 }
21912 else if (!mcpu_cpu_opt)
21913 mcpu_cpu_opt = march_cpu_opt;
21914
21915 if (legacy_fpu)
21916 {
21917 if (mfpu_opt)
21918 as_bad (_("use of old and new-style options to set FPU type"));
21919
21920 mfpu_opt = legacy_fpu;
21921 }
21922 else if (!mfpu_opt)
21923 {
21924 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
21925 || defined (TE_NetBSD) || defined (TE_VXWORKS))
21926 /* Some environments specify a default FPU. If they don't, infer it
21927 from the processor. */
21928 if (mcpu_fpu_opt)
21929 mfpu_opt = mcpu_fpu_opt;
21930 else
21931 mfpu_opt = march_fpu_opt;
21932 #else
21933 mfpu_opt = &fpu_default;
21934 #endif
21935 }
21936
21937 if (!mfpu_opt)
21938 {
21939 if (mcpu_cpu_opt != NULL)
21940 mfpu_opt = &fpu_default;
21941 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
21942 mfpu_opt = &fpu_arch_vfp_v2;
21943 else
21944 mfpu_opt = &fpu_arch_fpa;
21945 }
21946
21947 #ifdef CPU_DEFAULT
21948 if (!mcpu_cpu_opt)
21949 {
21950 mcpu_cpu_opt = &cpu_default;
21951 selected_cpu = cpu_default;
21952 }
21953 #else
21954 if (mcpu_cpu_opt)
21955 selected_cpu = *mcpu_cpu_opt;
21956 else
21957 mcpu_cpu_opt = &arm_arch_any;
21958 #endif
21959
21960 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
21961
21962 autoselect_thumb_from_cpu_variant ();
21963
21964 arm_arch_used = thumb_arch_used = arm_arch_none;
21965
21966 #if defined OBJ_COFF || defined OBJ_ELF
21967 {
21968 unsigned int flags = 0;
21969
21970 #if defined OBJ_ELF
21971 flags = meabi_flags;
21972
21973 switch (meabi_flags)
21974 {
21975 case EF_ARM_EABI_UNKNOWN:
21976 #endif
21977 /* Set the flags in the private structure. */
21978 if (uses_apcs_26) flags |= F_APCS26;
21979 if (support_interwork) flags |= F_INTERWORK;
21980 if (uses_apcs_float) flags |= F_APCS_FLOAT;
21981 if (pic_code) flags |= F_PIC;
21982 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
21983 flags |= F_SOFT_FLOAT;
21984
21985 switch (mfloat_abi_opt)
21986 {
21987 case ARM_FLOAT_ABI_SOFT:
21988 case ARM_FLOAT_ABI_SOFTFP:
21989 flags |= F_SOFT_FLOAT;
21990 break;
21991
21992 case ARM_FLOAT_ABI_HARD:
21993 if (flags & F_SOFT_FLOAT)
21994 as_bad (_("hard-float conflicts with specified fpu"));
21995 break;
21996 }
21997
21998 /* Using pure-endian doubles (even if soft-float). */
21999 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
22000 flags |= F_VFP_FLOAT;
22001
22002 #if defined OBJ_ELF
22003 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
22004 flags |= EF_ARM_MAVERICK_FLOAT;
22005 break;
22006
22007 case EF_ARM_EABI_VER4:
22008 case EF_ARM_EABI_VER5:
22009 /* No additional flags to set. */
22010 break;
22011
22012 default:
22013 abort ();
22014 }
22015 #endif
22016 bfd_set_private_flags (stdoutput, flags);
22017
22018 /* We have run out flags in the COFF header to encode the
22019 status of ATPCS support, so instead we create a dummy,
22020 empty, debug section called .arm.atpcs. */
22021 if (atpcs)
22022 {
22023 asection * sec;
22024
22025 sec = bfd_make_section (stdoutput, ".arm.atpcs");
22026
22027 if (sec != NULL)
22028 {
22029 bfd_set_section_flags
22030 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
22031 bfd_set_section_size (stdoutput, sec, 0);
22032 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
22033 }
22034 }
22035 }
22036 #endif
22037
22038 /* Record the CPU type as well. */
22039 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
22040 mach = bfd_mach_arm_iWMMXt2;
22041 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
22042 mach = bfd_mach_arm_iWMMXt;
22043 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
22044 mach = bfd_mach_arm_XScale;
22045 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
22046 mach = bfd_mach_arm_ep9312;
22047 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
22048 mach = bfd_mach_arm_5TE;
22049 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
22050 {
22051 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
22052 mach = bfd_mach_arm_5T;
22053 else
22054 mach = bfd_mach_arm_5;
22055 }
22056 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
22057 {
22058 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
22059 mach = bfd_mach_arm_4T;
22060 else
22061 mach = bfd_mach_arm_4;
22062 }
22063 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
22064 mach = bfd_mach_arm_3M;
22065 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
22066 mach = bfd_mach_arm_3;
22067 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
22068 mach = bfd_mach_arm_2a;
22069 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
22070 mach = bfd_mach_arm_2;
22071 else
22072 mach = bfd_mach_arm_unknown;
22073
22074 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
22075 }
22076
22077 /* Command line processing. */
22078
22079 /* md_parse_option
22080 Invocation line includes a switch not recognized by the base assembler.
22081 See if it's a processor-specific option.
22082
22083 This routine is somewhat complicated by the need for backwards
22084 compatibility (since older releases of gcc can't be changed).
22085 The new options try to make the interface as compatible as
22086 possible with GCC.
22087
22088 New options (supported) are:
22089
22090 -mcpu=<cpu name> Assemble for selected processor
22091 -march=<architecture name> Assemble for selected architecture
22092 -mfpu=<fpu architecture> Assemble for selected FPU.
22093 -EB/-mbig-endian Big-endian
22094 -EL/-mlittle-endian Little-endian
22095 -k Generate PIC code
22096 -mthumb Start in Thumb mode
22097 -mthumb-interwork Code supports ARM/Thumb interworking
22098
22099 -m[no-]warn-deprecated Warn about deprecated features
22100
22101 For now we will also provide support for:
22102
22103 -mapcs-32 32-bit Program counter
22104 -mapcs-26 26-bit Program counter
22105 -macps-float Floats passed in FP registers
22106 -mapcs-reentrant Reentrant code
22107 -matpcs
22108 (sometime these will probably be replaced with -mapcs=<list of options>
22109 and -matpcs=<list of options>)
22110
22111 The remaining options are only supported for back-wards compatibility.
22112 Cpu variants, the arm part is optional:
22113 -m[arm]1 Currently not supported.
22114 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
22115 -m[arm]3 Arm 3 processor
22116 -m[arm]6[xx], Arm 6 processors
22117 -m[arm]7[xx][t][[d]m] Arm 7 processors
22118 -m[arm]8[10] Arm 8 processors
22119 -m[arm]9[20][tdmi] Arm 9 processors
22120 -mstrongarm[110[0]] StrongARM processors
22121 -mxscale XScale processors
22122 -m[arm]v[2345[t[e]]] Arm architectures
22123 -mall All (except the ARM1)
22124 FP variants:
22125 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
22126 -mfpe-old (No float load/store multiples)
22127 -mvfpxd VFP Single precision
22128 -mvfp All VFP
22129 -mno-fpu Disable all floating point instructions
22130
22131 The following CPU names are recognized:
22132 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
22133 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
22134 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
22135 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
22136 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
22137 arm10t arm10e, arm1020t, arm1020e, arm10200e,
22138 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
22139
22140 */
22141
22142 const char * md_shortopts = "m:k";
22143
22144 #ifdef ARM_BI_ENDIAN
22145 #define OPTION_EB (OPTION_MD_BASE + 0)
22146 #define OPTION_EL (OPTION_MD_BASE + 1)
22147 #else
22148 #if TARGET_BYTES_BIG_ENDIAN
22149 #define OPTION_EB (OPTION_MD_BASE + 0)
22150 #else
22151 #define OPTION_EL (OPTION_MD_BASE + 1)
22152 #endif
22153 #endif
22154 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
22155
22156 struct option md_longopts[] =
22157 {
22158 #ifdef OPTION_EB
22159 {"EB", no_argument, NULL, OPTION_EB},
22160 #endif
22161 #ifdef OPTION_EL
22162 {"EL", no_argument, NULL, OPTION_EL},
22163 #endif
22164 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
22165 {NULL, no_argument, NULL, 0}
22166 };
22167
22168 size_t md_longopts_size = sizeof (md_longopts);
22169
22170 struct arm_option_table
22171 {
22172 char *option; /* Option name to match. */
22173 char *help; /* Help information. */
22174 int *var; /* Variable to change. */
22175 int value; /* What to change it to. */
22176 char *deprecated; /* If non-null, print this message. */
22177 };
22178
22179 struct arm_option_table arm_opts[] =
22180 {
22181 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
22182 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
22183 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
22184 &support_interwork, 1, NULL},
22185 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
22186 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
22187 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
22188 1, NULL},
22189 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
22190 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
22191 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
22192 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
22193 NULL},
22194
22195 /* These are recognized by the assembler, but have no affect on code. */
22196 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
22197 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
22198
22199 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
22200 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
22201 &warn_on_deprecated, 0, NULL},
22202 {NULL, NULL, NULL, 0, NULL}
22203 };
22204
22205 struct arm_legacy_option_table
22206 {
22207 char *option; /* Option name to match. */
22208 const arm_feature_set **var; /* Variable to change. */
22209 const arm_feature_set value; /* What to change it to. */
22210 char *deprecated; /* If non-null, print this message. */
22211 };
22212
22213 const struct arm_legacy_option_table arm_legacy_opts[] =
22214 {
22215 /* DON'T add any new processors to this list -- we want the whole list
22216 to go away... Add them to the processors table instead. */
22217 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
22218 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
22219 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
22220 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
22221 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
22222 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
22223 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
22224 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
22225 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
22226 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
22227 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
22228 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
22229 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
22230 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
22231 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
22232 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
22233 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
22234 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
22235 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
22236 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
22237 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
22238 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
22239 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
22240 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
22241 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
22242 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
22243 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
22244 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
22245 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
22246 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
22247 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
22248 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
22249 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
22250 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
22251 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
22252 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
22253 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
22254 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
22255 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
22256 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
22257 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
22258 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
22259 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
22260 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
22261 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
22262 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
22263 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
22264 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
22265 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
22266 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
22267 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
22268 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
22269 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
22270 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
22271 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
22272 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
22273 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
22274 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
22275 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
22276 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
22277 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
22278 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
22279 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
22280 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
22281 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
22282 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
22283 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
22284 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
22285 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
22286 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
22287 N_("use -mcpu=strongarm110")},
22288 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
22289 N_("use -mcpu=strongarm1100")},
22290 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
22291 N_("use -mcpu=strongarm1110")},
22292 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
22293 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
22294 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
22295
22296 /* Architecture variants -- don't add any more to this list either. */
22297 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
22298 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
22299 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
22300 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
22301 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
22302 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
22303 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
22304 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
22305 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
22306 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
22307 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
22308 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
22309 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
22310 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
22311 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
22312 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
22313 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
22314 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
22315
22316 /* Floating point variants -- don't add any more to this list either. */
22317 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
22318 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
22319 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
22320 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
22321 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
22322
22323 {NULL, NULL, ARM_ARCH_NONE, NULL}
22324 };
22325
22326 struct arm_cpu_option_table
22327 {
22328 char *name;
22329 const arm_feature_set value;
22330 /* For some CPUs we assume an FPU unless the user explicitly sets
22331 -mfpu=... */
22332 const arm_feature_set default_fpu;
22333 /* The canonical name of the CPU, or NULL to use NAME converted to upper
22334 case. */
22335 const char *canonical_name;
22336 };
22337
22338 /* This list should, at a minimum, contain all the cpu names
22339 recognized by GCC. */
22340 static const struct arm_cpu_option_table arm_cpus[] =
22341 {
22342 {"all", ARM_ANY, FPU_ARCH_FPA, NULL},
22343 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL},
22344 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL},
22345 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
22346 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
22347 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22348 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22349 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22350 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22351 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22352 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22353 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
22354 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22355 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
22356 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22357 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
22358 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22359 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22360 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22361 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22362 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22363 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22364 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22365 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22366 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22367 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22368 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22369 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22370 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22371 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22372 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22373 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22374 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22375 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22376 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22377 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22378 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22379 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22380 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22381 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"},
22382 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22383 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22384 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22385 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22386 {"fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22387 {"fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22388 /* For V5 or later processors we default to using VFP; but the user
22389 should really set the FPU type explicitly. */
22390 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
22391 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22392 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
22393 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
22394 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
22395 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
22396 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"},
22397 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22398 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
22399 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"},
22400 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22401 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22402 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
22403 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
22404 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22405 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"},
22406 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
22407 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22408 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22409 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
22410 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
22411 {"fa626te", ARM_ARCH_V5TE, FPU_NONE, NULL},
22412 {"fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22413 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"},
22414 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL},
22415 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"},
22416 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL},
22417 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, "MPCore"},
22418 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, "MPCore"},
22419 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL},
22420 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL},
22421 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL},
22422 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL},
22423 {"cortex-a5", ARM_ARCH_V7A_MP_SEC,
22424 FPU_NONE, "Cortex-A5"},
22425 {"cortex-a8", ARM_ARCH_V7A_SEC,
22426 ARM_FEATURE (0, FPU_VFP_V3
22427 | FPU_NEON_EXT_V1),
22428 "Cortex-A8"},
22429 {"cortex-a9", ARM_ARCH_V7A_MP_SEC,
22430 ARM_FEATURE (0, FPU_VFP_V3
22431 | FPU_NEON_EXT_V1),
22432 "Cortex-A9"},
22433 {"cortex-a15", ARM_ARCH_V7A_IDIV_MP_SEC,
22434 FPU_ARCH_NEON_VFP_V4,
22435 "Cortex-A15"},
22436 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, "Cortex-R4"},
22437 {"cortex-r4f", ARM_ARCH_V7R, FPU_ARCH_VFP_V3D16,
22438 "Cortex-R4F"},
22439 {"cortex-m4", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M4"},
22440 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, "Cortex-M3"},
22441 {"cortex-m1", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M1"},
22442 {"cortex-m0", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0"},
22443 /* ??? XSCALE is really an architecture. */
22444 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
22445 /* ??? iwmmxt is not a processor. */
22446 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
22447 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL},
22448 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
22449 /* Maverick */
22450 {"ep9312", ARM_FEATURE (ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
22451 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL}
22452 };
22453
22454 struct arm_arch_option_table
22455 {
22456 char *name;
22457 const arm_feature_set value;
22458 const arm_feature_set default_fpu;
22459 };
22460
22461 /* This list should, at a minimum, contain all the architecture names
22462 recognized by GCC. */
22463 static const struct arm_arch_option_table arm_archs[] =
22464 {
22465 {"all", ARM_ANY, FPU_ARCH_FPA},
22466 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA},
22467 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA},
22468 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA},
22469 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA},
22470 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA},
22471 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA},
22472 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA},
22473 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA},
22474 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA},
22475 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA},
22476 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP},
22477 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP},
22478 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP},
22479 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP},
22480 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP},
22481 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP},
22482 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP},
22483 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP},
22484 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP},
22485 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP},
22486 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP},
22487 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP},
22488 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP},
22489 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP},
22490 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
22491 {"armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP},
22492 {"armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP},
22493 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP},
22494 /* The official spelling of the ARMv7 profile variants is the dashed form.
22495 Accept the non-dashed form for compatibility with old toolchains. */
22496 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP},
22497 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP},
22498 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP},
22499 {"armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP},
22500 {"armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP},
22501 {"armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP},
22502 {"armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP},
22503 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP},
22504 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
22505 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP},
22506 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
22507 };
22508
22509 /* ISA extensions in the co-processor and main instruction set space. */
22510 struct arm_option_extension_value_table
22511 {
22512 char *name;
22513 const arm_feature_set value;
22514 const arm_feature_set allowed_archs;
22515 };
22516
22517 /* The following table must be in alphabetical order with a NULL last entry.
22518 */
22519 static const struct arm_option_extension_value_table arm_extensions[] =
22520 {
22521 {"idiv", ARM_FEATURE (ARM_EXT_ADIV | ARM_EXT_DIV, 0),
22522 ARM_FEATURE (ARM_EXT_V7A, 0)},
22523 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT), ARM_ANY},
22524 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2), ARM_ANY},
22525 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK), ARM_ANY},
22526 {"mp", ARM_FEATURE (ARM_EXT_MP, 0),
22527 ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)},
22528 {"os", ARM_FEATURE (ARM_EXT_OS, 0),
22529 ARM_FEATURE (ARM_EXT_V6M, 0)},
22530 {"sec", ARM_FEATURE (ARM_EXT_SEC, 0),
22531 ARM_FEATURE (ARM_EXT_V6K | ARM_EXT_V7A, 0)},
22532 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE), ARM_ANY},
22533 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
22534 };
22535
22536 /* ISA floating-point and Advanced SIMD extensions. */
22537 struct arm_option_fpu_value_table
22538 {
22539 char *name;
22540 const arm_feature_set value;
22541 };
22542
22543 /* This list should, at a minimum, contain all the fpu names
22544 recognized by GCC. */
22545 static const struct arm_option_fpu_value_table arm_fpus[] =
22546 {
22547 {"softfpa", FPU_NONE},
22548 {"fpe", FPU_ARCH_FPE},
22549 {"fpe2", FPU_ARCH_FPE},
22550 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
22551 {"fpa", FPU_ARCH_FPA},
22552 {"fpa10", FPU_ARCH_FPA},
22553 {"fpa11", FPU_ARCH_FPA},
22554 {"arm7500fe", FPU_ARCH_FPA},
22555 {"softvfp", FPU_ARCH_VFP},
22556 {"softvfp+vfp", FPU_ARCH_VFP_V2},
22557 {"vfp", FPU_ARCH_VFP_V2},
22558 {"vfp9", FPU_ARCH_VFP_V2},
22559 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
22560 {"vfp10", FPU_ARCH_VFP_V2},
22561 {"vfp10-r0", FPU_ARCH_VFP_V1},
22562 {"vfpxd", FPU_ARCH_VFP_V1xD},
22563 {"vfpv2", FPU_ARCH_VFP_V2},
22564 {"vfpv3", FPU_ARCH_VFP_V3},
22565 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
22566 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
22567 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
22568 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
22569 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
22570 {"arm1020t", FPU_ARCH_VFP_V1},
22571 {"arm1020e", FPU_ARCH_VFP_V2},
22572 {"arm1136jfs", FPU_ARCH_VFP_V2},
22573 {"arm1136jf-s", FPU_ARCH_VFP_V2},
22574 {"maverick", FPU_ARCH_MAVERICK},
22575 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
22576 {"neon-fp16", FPU_ARCH_NEON_FP16},
22577 {"vfpv4", FPU_ARCH_VFP_V4},
22578 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
22579 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
22580 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
22581 {NULL, ARM_ARCH_NONE}
22582 };
22583
22584 struct arm_option_value_table
22585 {
22586 char *name;
22587 long value;
22588 };
22589
22590 static const struct arm_option_value_table arm_float_abis[] =
22591 {
22592 {"hard", ARM_FLOAT_ABI_HARD},
22593 {"softfp", ARM_FLOAT_ABI_SOFTFP},
22594 {"soft", ARM_FLOAT_ABI_SOFT},
22595 {NULL, 0}
22596 };
22597
22598 #ifdef OBJ_ELF
22599 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
22600 static const struct arm_option_value_table arm_eabis[] =
22601 {
22602 {"gnu", EF_ARM_EABI_UNKNOWN},
22603 {"4", EF_ARM_EABI_VER4},
22604 {"5", EF_ARM_EABI_VER5},
22605 {NULL, 0}
22606 };
22607 #endif
22608
22609 struct arm_long_option_table
22610 {
22611 char * option; /* Substring to match. */
22612 char * help; /* Help information. */
22613 int (* func) (char * subopt); /* Function to decode sub-option. */
22614 char * deprecated; /* If non-null, print this message. */
22615 };
22616
22617 static bfd_boolean
22618 arm_parse_extension (char * str, const arm_feature_set **opt_p)
22619 {
22620 arm_feature_set *ext_set = (arm_feature_set *)
22621 xmalloc (sizeof (arm_feature_set));
22622
22623 /* We insist on extensions being specified in alphabetical order, and with
22624 extensions being added before being removed. We achieve this by having
22625 the global ARM_EXTENSIONS table in alphabetical order, and using the
22626 ADDING_VALUE variable to indicate whether we are adding an extension (1)
22627 or removing it (0) and only allowing it to change in the order
22628 -1 -> 1 -> 0. */
22629 const struct arm_option_extension_value_table * opt = NULL;
22630 int adding_value = -1;
22631
22632 /* Copy the feature set, so that we can modify it. */
22633 *ext_set = **opt_p;
22634 *opt_p = ext_set;
22635
22636 while (str != NULL && *str != 0)
22637 {
22638 char * ext;
22639 size_t optlen;
22640
22641 if (*str != '+')
22642 {
22643 as_bad (_("invalid architectural extension"));
22644 return FALSE;
22645 }
22646
22647 str++;
22648 ext = strchr (str, '+');
22649
22650 if (ext != NULL)
22651 optlen = ext - str;
22652 else
22653 optlen = strlen (str);
22654
22655 if (optlen >= 2
22656 && strncmp (str, "no", 2) == 0)
22657 {
22658 if (adding_value != 0)
22659 {
22660 adding_value = 0;
22661 opt = arm_extensions;
22662 }
22663
22664 optlen -= 2;
22665 str += 2;
22666 }
22667 else if (optlen > 0)
22668 {
22669 if (adding_value == -1)
22670 {
22671 adding_value = 1;
22672 opt = arm_extensions;
22673 }
22674 else if (adding_value != 1)
22675 {
22676 as_bad (_("must specify extensions to add before specifying "
22677 "those to remove"));
22678 return FALSE;
22679 }
22680 }
22681
22682 if (optlen == 0)
22683 {
22684 as_bad (_("missing architectural extension"));
22685 return FALSE;
22686 }
22687
22688 gas_assert (adding_value != -1);
22689 gas_assert (opt != NULL);
22690
22691 /* Scan over the options table trying to find an exact match. */
22692 for (; opt->name != NULL; opt++)
22693 if (strncmp (opt->name, str, optlen) == 0
22694 && strlen (opt->name) == optlen)
22695 {
22696 /* Check we can apply the extension to this architecture. */
22697 if (!ARM_CPU_HAS_FEATURE (*ext_set, opt->allowed_archs))
22698 {
22699 as_bad (_("extension does not apply to the base architecture"));
22700 return FALSE;
22701 }
22702
22703 /* Add or remove the extension. */
22704 if (adding_value)
22705 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
22706 else
22707 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
22708
22709 break;
22710 }
22711
22712 if (opt->name == NULL)
22713 {
22714 /* Did we fail to find an extension because it wasn't specified in
22715 alphabetical order, or because it does not exist? */
22716
22717 for (opt = arm_extensions; opt->name != NULL; opt++)
22718 if (strncmp (opt->name, str, optlen) == 0)
22719 break;
22720
22721 if (opt->name == NULL)
22722 as_bad (_("unknown architectural extension `%s'"), str);
22723 else
22724 as_bad (_("architectural extensions must be specified in "
22725 "alphabetical order"));
22726
22727 return FALSE;
22728 }
22729 else
22730 {
22731 /* We should skip the extension we've just matched the next time
22732 round. */
22733 opt++;
22734 }
22735
22736 str = ext;
22737 };
22738
22739 return TRUE;
22740 }
22741
22742 static bfd_boolean
22743 arm_parse_cpu (char * str)
22744 {
22745 const struct arm_cpu_option_table * opt;
22746 char * ext = strchr (str, '+');
22747 int optlen;
22748
22749 if (ext != NULL)
22750 optlen = ext - str;
22751 else
22752 optlen = strlen (str);
22753
22754 if (optlen == 0)
22755 {
22756 as_bad (_("missing cpu name `%s'"), str);
22757 return FALSE;
22758 }
22759
22760 for (opt = arm_cpus; opt->name != NULL; opt++)
22761 if (strncmp (opt->name, str, optlen) == 0)
22762 {
22763 mcpu_cpu_opt = &opt->value;
22764 mcpu_fpu_opt = &opt->default_fpu;
22765 if (opt->canonical_name)
22766 strcpy (selected_cpu_name, opt->canonical_name);
22767 else
22768 {
22769 int i;
22770
22771 for (i = 0; i < optlen; i++)
22772 selected_cpu_name[i] = TOUPPER (opt->name[i]);
22773 selected_cpu_name[i] = 0;
22774 }
22775
22776 if (ext != NULL)
22777 return arm_parse_extension (ext, &mcpu_cpu_opt);
22778
22779 return TRUE;
22780 }
22781
22782 as_bad (_("unknown cpu `%s'"), str);
22783 return FALSE;
22784 }
22785
22786 static bfd_boolean
22787 arm_parse_arch (char * str)
22788 {
22789 const struct arm_arch_option_table *opt;
22790 char *ext = strchr (str, '+');
22791 int optlen;
22792
22793 if (ext != NULL)
22794 optlen = ext - str;
22795 else
22796 optlen = strlen (str);
22797
22798 if (optlen == 0)
22799 {
22800 as_bad (_("missing architecture name `%s'"), str);
22801 return FALSE;
22802 }
22803
22804 for (opt = arm_archs; opt->name != NULL; opt++)
22805 if (strncmp (opt->name, str, optlen) == 0)
22806 {
22807 march_cpu_opt = &opt->value;
22808 march_fpu_opt = &opt->default_fpu;
22809 strcpy (selected_cpu_name, opt->name);
22810
22811 if (ext != NULL)
22812 return arm_parse_extension (ext, &march_cpu_opt);
22813
22814 return TRUE;
22815 }
22816
22817 as_bad (_("unknown architecture `%s'\n"), str);
22818 return FALSE;
22819 }
22820
22821 static bfd_boolean
22822 arm_parse_fpu (char * str)
22823 {
22824 const struct arm_option_fpu_value_table * opt;
22825
22826 for (opt = arm_fpus; opt->name != NULL; opt++)
22827 if (streq (opt->name, str))
22828 {
22829 mfpu_opt = &opt->value;
22830 return TRUE;
22831 }
22832
22833 as_bad (_("unknown floating point format `%s'\n"), str);
22834 return FALSE;
22835 }
22836
22837 static bfd_boolean
22838 arm_parse_float_abi (char * str)
22839 {
22840 const struct arm_option_value_table * opt;
22841
22842 for (opt = arm_float_abis; opt->name != NULL; opt++)
22843 if (streq (opt->name, str))
22844 {
22845 mfloat_abi_opt = opt->value;
22846 return TRUE;
22847 }
22848
22849 as_bad (_("unknown floating point abi `%s'\n"), str);
22850 return FALSE;
22851 }
22852
22853 #ifdef OBJ_ELF
22854 static bfd_boolean
22855 arm_parse_eabi (char * str)
22856 {
22857 const struct arm_option_value_table *opt;
22858
22859 for (opt = arm_eabis; opt->name != NULL; opt++)
22860 if (streq (opt->name, str))
22861 {
22862 meabi_flags = opt->value;
22863 return TRUE;
22864 }
22865 as_bad (_("unknown EABI `%s'\n"), str);
22866 return FALSE;
22867 }
22868 #endif
22869
22870 static bfd_boolean
22871 arm_parse_it_mode (char * str)
22872 {
22873 bfd_boolean ret = TRUE;
22874
22875 if (streq ("arm", str))
22876 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
22877 else if (streq ("thumb", str))
22878 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
22879 else if (streq ("always", str))
22880 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
22881 else if (streq ("never", str))
22882 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
22883 else
22884 {
22885 as_bad (_("unknown implicit IT mode `%s', should be "\
22886 "arm, thumb, always, or never."), str);
22887 ret = FALSE;
22888 }
22889
22890 return ret;
22891 }
22892
22893 struct arm_long_option_table arm_long_opts[] =
22894 {
22895 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
22896 arm_parse_cpu, NULL},
22897 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
22898 arm_parse_arch, NULL},
22899 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
22900 arm_parse_fpu, NULL},
22901 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
22902 arm_parse_float_abi, NULL},
22903 #ifdef OBJ_ELF
22904 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
22905 arm_parse_eabi, NULL},
22906 #endif
22907 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
22908 arm_parse_it_mode, NULL},
22909 {NULL, NULL, 0, NULL}
22910 };
22911
22912 int
22913 md_parse_option (int c, char * arg)
22914 {
22915 struct arm_option_table *opt;
22916 const struct arm_legacy_option_table *fopt;
22917 struct arm_long_option_table *lopt;
22918
22919 switch (c)
22920 {
22921 #ifdef OPTION_EB
22922 case OPTION_EB:
22923 target_big_endian = 1;
22924 break;
22925 #endif
22926
22927 #ifdef OPTION_EL
22928 case OPTION_EL:
22929 target_big_endian = 0;
22930 break;
22931 #endif
22932
22933 case OPTION_FIX_V4BX:
22934 fix_v4bx = TRUE;
22935 break;
22936
22937 case 'a':
22938 /* Listing option. Just ignore these, we don't support additional
22939 ones. */
22940 return 0;
22941
22942 default:
22943 for (opt = arm_opts; opt->option != NULL; opt++)
22944 {
22945 if (c == opt->option[0]
22946 && ((arg == NULL && opt->option[1] == 0)
22947 || streq (arg, opt->option + 1)))
22948 {
22949 /* If the option is deprecated, tell the user. */
22950 if (warn_on_deprecated && opt->deprecated != NULL)
22951 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
22952 arg ? arg : "", _(opt->deprecated));
22953
22954 if (opt->var != NULL)
22955 *opt->var = opt->value;
22956
22957 return 1;
22958 }
22959 }
22960
22961 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
22962 {
22963 if (c == fopt->option[0]
22964 && ((arg == NULL && fopt->option[1] == 0)
22965 || streq (arg, fopt->option + 1)))
22966 {
22967 /* If the option is deprecated, tell the user. */
22968 if (warn_on_deprecated && fopt->deprecated != NULL)
22969 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
22970 arg ? arg : "", _(fopt->deprecated));
22971
22972 if (fopt->var != NULL)
22973 *fopt->var = &fopt->value;
22974
22975 return 1;
22976 }
22977 }
22978
22979 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
22980 {
22981 /* These options are expected to have an argument. */
22982 if (c == lopt->option[0]
22983 && arg != NULL
22984 && strncmp (arg, lopt->option + 1,
22985 strlen (lopt->option + 1)) == 0)
22986 {
22987 /* If the option is deprecated, tell the user. */
22988 if (warn_on_deprecated && lopt->deprecated != NULL)
22989 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
22990 _(lopt->deprecated));
22991
22992 /* Call the sup-option parser. */
22993 return lopt->func (arg + strlen (lopt->option) - 1);
22994 }
22995 }
22996
22997 return 0;
22998 }
22999
23000 return 1;
23001 }
23002
23003 void
23004 md_show_usage (FILE * fp)
23005 {
23006 struct arm_option_table *opt;
23007 struct arm_long_option_table *lopt;
23008
23009 fprintf (fp, _(" ARM-specific assembler options:\n"));
23010
23011 for (opt = arm_opts; opt->option != NULL; opt++)
23012 if (opt->help != NULL)
23013 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
23014
23015 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
23016 if (lopt->help != NULL)
23017 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
23018
23019 #ifdef OPTION_EB
23020 fprintf (fp, _("\
23021 -EB assemble code for a big-endian cpu\n"));
23022 #endif
23023
23024 #ifdef OPTION_EL
23025 fprintf (fp, _("\
23026 -EL assemble code for a little-endian cpu\n"));
23027 #endif
23028
23029 fprintf (fp, _("\
23030 --fix-v4bx Allow BX in ARMv4 code\n"));
23031 }
23032
23033
23034 #ifdef OBJ_ELF
23035 typedef struct
23036 {
23037 int val;
23038 arm_feature_set flags;
23039 } cpu_arch_ver_table;
23040
23041 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
23042 least features first. */
23043 static const cpu_arch_ver_table cpu_arch_ver[] =
23044 {
23045 {1, ARM_ARCH_V4},
23046 {2, ARM_ARCH_V4T},
23047 {3, ARM_ARCH_V5},
23048 {3, ARM_ARCH_V5T},
23049 {4, ARM_ARCH_V5TE},
23050 {5, ARM_ARCH_V5TEJ},
23051 {6, ARM_ARCH_V6},
23052 {9, ARM_ARCH_V6K},
23053 {7, ARM_ARCH_V6Z},
23054 {11, ARM_ARCH_V6M},
23055 {12, ARM_ARCH_V6SM},
23056 {8, ARM_ARCH_V6T2},
23057 {10, ARM_ARCH_V7A},
23058 {10, ARM_ARCH_V7R},
23059 {10, ARM_ARCH_V7M},
23060 {0, ARM_ARCH_NONE}
23061 };
23062
23063 /* Set an attribute if it has not already been set by the user. */
23064 static void
23065 aeabi_set_attribute_int (int tag, int value)
23066 {
23067 if (tag < 1
23068 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
23069 || !attributes_set_explicitly[tag])
23070 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
23071 }
23072
23073 static void
23074 aeabi_set_attribute_string (int tag, const char *value)
23075 {
23076 if (tag < 1
23077 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
23078 || !attributes_set_explicitly[tag])
23079 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
23080 }
23081
23082 /* Set the public EABI object attributes. */
23083 static void
23084 aeabi_set_public_attributes (void)
23085 {
23086 int arch;
23087 arm_feature_set flags;
23088 arm_feature_set tmp;
23089 const cpu_arch_ver_table *p;
23090
23091 /* Choose the architecture based on the capabilities of the requested cpu
23092 (if any) and/or the instructions actually used. */
23093 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
23094 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
23095 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
23096 /*Allow the user to override the reported architecture. */
23097 if (object_arch)
23098 {
23099 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
23100 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
23101 }
23102
23103 tmp = flags;
23104 arch = 0;
23105 for (p = cpu_arch_ver; p->val; p++)
23106 {
23107 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
23108 {
23109 arch = p->val;
23110 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
23111 }
23112 }
23113
23114 /* The table lookup above finds the last architecture to contribute
23115 a new feature. Unfortunately, Tag13 is a subset of the union of
23116 v6T2 and v7-M, so it is never seen as contributing a new feature.
23117 We can not search for the last entry which is entirely used,
23118 because if no CPU is specified we build up only those flags
23119 actually used. Perhaps we should separate out the specified
23120 and implicit cases. Avoid taking this path for -march=all by
23121 checking for contradictory v7-A / v7-M features. */
23122 if (arch == 10
23123 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
23124 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
23125 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
23126 arch = 13;
23127
23128 /* Tag_CPU_name. */
23129 if (selected_cpu_name[0])
23130 {
23131 char *q;
23132
23133 q = selected_cpu_name;
23134 if (strncmp (q, "armv", 4) == 0)
23135 {
23136 int i;
23137
23138 q += 4;
23139 for (i = 0; q[i]; i++)
23140 q[i] = TOUPPER (q[i]);
23141 }
23142 aeabi_set_attribute_string (Tag_CPU_name, q);
23143 }
23144
23145 /* Tag_CPU_arch. */
23146 aeabi_set_attribute_int (Tag_CPU_arch, arch);
23147
23148 /* Tag_CPU_arch_profile. */
23149 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
23150 aeabi_set_attribute_int (Tag_CPU_arch_profile, 'A');
23151 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
23152 aeabi_set_attribute_int (Tag_CPU_arch_profile, 'R');
23153 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
23154 aeabi_set_attribute_int (Tag_CPU_arch_profile, 'M');
23155
23156 /* Tag_ARM_ISA_use. */
23157 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
23158 || arch == 0)
23159 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
23160
23161 /* Tag_THUMB_ISA_use. */
23162 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
23163 || arch == 0)
23164 aeabi_set_attribute_int (Tag_THUMB_ISA_use,
23165 ARM_CPU_HAS_FEATURE (flags, arm_arch_t2) ? 2 : 1);
23166
23167 /* Tag_VFP_arch. */
23168 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
23169 aeabi_set_attribute_int (Tag_VFP_arch,
23170 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
23171 ? 5 : 6);
23172 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
23173 aeabi_set_attribute_int (Tag_VFP_arch, 3);
23174 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
23175 aeabi_set_attribute_int (Tag_VFP_arch, 4);
23176 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
23177 aeabi_set_attribute_int (Tag_VFP_arch, 2);
23178 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
23179 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
23180 aeabi_set_attribute_int (Tag_VFP_arch, 1);
23181
23182 /* Tag_ABI_HardFP_use. */
23183 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
23184 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
23185 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
23186
23187 /* Tag_WMMX_arch. */
23188 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
23189 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
23190 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
23191 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
23192
23193 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
23194 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
23195 aeabi_set_attribute_int
23196 (Tag_Advanced_SIMD_arch, (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma)
23197 ? 2 : 1));
23198
23199 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
23200 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16))
23201 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
23202
23203 /* Tag_DIV_use. */
23204 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv))
23205 aeabi_set_attribute_int (Tag_DIV_use, 2);
23206 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_div))
23207 aeabi_set_attribute_int (Tag_DIV_use, 0);
23208 else
23209 aeabi_set_attribute_int (Tag_DIV_use, 1);
23210
23211 /* Tag_MP_extension_use. */
23212 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
23213 aeabi_set_attribute_int (Tag_MPextension_use, 1);
23214
23215 /* Tag Virtualization_use. */
23216 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
23217 aeabi_set_attribute_int (Tag_Virtualization_use, 1);
23218 }
23219
23220 /* Add the default contents for the .ARM.attributes section. */
23221 void
23222 arm_md_end (void)
23223 {
23224 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
23225 return;
23226
23227 aeabi_set_public_attributes ();
23228 }
23229 #endif /* OBJ_ELF */
23230
23231
23232 /* Parse a .cpu directive. */
23233
23234 static void
23235 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
23236 {
23237 const struct arm_cpu_option_table *opt;
23238 char *name;
23239 char saved_char;
23240
23241 name = input_line_pointer;
23242 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
23243 input_line_pointer++;
23244 saved_char = *input_line_pointer;
23245 *input_line_pointer = 0;
23246
23247 /* Skip the first "all" entry. */
23248 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
23249 if (streq (opt->name, name))
23250 {
23251 mcpu_cpu_opt = &opt->value;
23252 selected_cpu = opt->value;
23253 if (opt->canonical_name)
23254 strcpy (selected_cpu_name, opt->canonical_name);
23255 else
23256 {
23257 int i;
23258 for (i = 0; opt->name[i]; i++)
23259 selected_cpu_name[i] = TOUPPER (opt->name[i]);
23260 selected_cpu_name[i] = 0;
23261 }
23262 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
23263 *input_line_pointer = saved_char;
23264 demand_empty_rest_of_line ();
23265 return;
23266 }
23267 as_bad (_("unknown cpu `%s'"), name);
23268 *input_line_pointer = saved_char;
23269 ignore_rest_of_line ();
23270 }
23271
23272
23273 /* Parse a .arch directive. */
23274
23275 static void
23276 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
23277 {
23278 const struct arm_arch_option_table *opt;
23279 char saved_char;
23280 char *name;
23281
23282 name = input_line_pointer;
23283 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
23284 input_line_pointer++;
23285 saved_char = *input_line_pointer;
23286 *input_line_pointer = 0;
23287
23288 /* Skip the first "all" entry. */
23289 for (opt = arm_archs + 1; opt->name != NULL; opt++)
23290 if (streq (opt->name, name))
23291 {
23292 mcpu_cpu_opt = &opt->value;
23293 selected_cpu = opt->value;
23294 strcpy (selected_cpu_name, opt->name);
23295 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
23296 *input_line_pointer = saved_char;
23297 demand_empty_rest_of_line ();
23298 return;
23299 }
23300
23301 as_bad (_("unknown architecture `%s'\n"), name);
23302 *input_line_pointer = saved_char;
23303 ignore_rest_of_line ();
23304 }
23305
23306
23307 /* Parse a .object_arch directive. */
23308
23309 static void
23310 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
23311 {
23312 const struct arm_arch_option_table *opt;
23313 char saved_char;
23314 char *name;
23315
23316 name = input_line_pointer;
23317 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
23318 input_line_pointer++;
23319 saved_char = *input_line_pointer;
23320 *input_line_pointer = 0;
23321
23322 /* Skip the first "all" entry. */
23323 for (opt = arm_archs + 1; opt->name != NULL; opt++)
23324 if (streq (opt->name, name))
23325 {
23326 object_arch = &opt->value;
23327 *input_line_pointer = saved_char;
23328 demand_empty_rest_of_line ();
23329 return;
23330 }
23331
23332 as_bad (_("unknown architecture `%s'\n"), name);
23333 *input_line_pointer = saved_char;
23334 ignore_rest_of_line ();
23335 }
23336
23337 /* Parse a .arch_extension directive. */
23338
23339 static void
23340 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
23341 {
23342 const struct arm_option_extension_value_table *opt;
23343 char saved_char;
23344 char *name;
23345 int adding_value = 1;
23346
23347 name = input_line_pointer;
23348 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
23349 input_line_pointer++;
23350 saved_char = *input_line_pointer;
23351 *input_line_pointer = 0;
23352
23353 if (strlen (name) >= 2
23354 && strncmp (name, "no", 2) == 0)
23355 {
23356 adding_value = 0;
23357 name += 2;
23358 }
23359
23360 for (opt = arm_extensions; opt->name != NULL; opt++)
23361 if (streq (opt->name, name))
23362 {
23363 if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt, opt->allowed_archs))
23364 {
23365 as_bad (_("architectural extension `%s' is not allowed for the "
23366 "current base architecture"), name);
23367 break;
23368 }
23369
23370 if (adding_value)
23371 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu, opt->value);
23372 else
23373 ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->value);
23374
23375 mcpu_cpu_opt = &selected_cpu;
23376 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
23377 *input_line_pointer = saved_char;
23378 demand_empty_rest_of_line ();
23379 return;
23380 }
23381
23382 if (opt->name == NULL)
23383 as_bad (_("unknown architecture `%s'\n"), name);
23384
23385 *input_line_pointer = saved_char;
23386 ignore_rest_of_line ();
23387 }
23388
23389 /* Parse a .fpu directive. */
23390
23391 static void
23392 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
23393 {
23394 const struct arm_option_fpu_value_table *opt;
23395 char saved_char;
23396 char *name;
23397
23398 name = input_line_pointer;
23399 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
23400 input_line_pointer++;
23401 saved_char = *input_line_pointer;
23402 *input_line_pointer = 0;
23403
23404 for (opt = arm_fpus; opt->name != NULL; opt++)
23405 if (streq (opt->name, name))
23406 {
23407 mfpu_opt = &opt->value;
23408 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
23409 *input_line_pointer = saved_char;
23410 demand_empty_rest_of_line ();
23411 return;
23412 }
23413
23414 as_bad (_("unknown floating point format `%s'\n"), name);
23415 *input_line_pointer = saved_char;
23416 ignore_rest_of_line ();
23417 }
23418
23419 /* Copy symbol information. */
23420
23421 void
23422 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
23423 {
23424 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
23425 }
23426
23427 #ifdef OBJ_ELF
23428 /* Given a symbolic attribute NAME, return the proper integer value.
23429 Returns -1 if the attribute is not known. */
23430
23431 int
23432 arm_convert_symbolic_attribute (const char *name)
23433 {
23434 static const struct
23435 {
23436 const char * name;
23437 const int tag;
23438 }
23439 attribute_table[] =
23440 {
23441 /* When you modify this table you should
23442 also modify the list in doc/c-arm.texi. */
23443 #define T(tag) {#tag, tag}
23444 T (Tag_CPU_raw_name),
23445 T (Tag_CPU_name),
23446 T (Tag_CPU_arch),
23447 T (Tag_CPU_arch_profile),
23448 T (Tag_ARM_ISA_use),
23449 T (Tag_THUMB_ISA_use),
23450 T (Tag_FP_arch),
23451 T (Tag_VFP_arch),
23452 T (Tag_WMMX_arch),
23453 T (Tag_Advanced_SIMD_arch),
23454 T (Tag_PCS_config),
23455 T (Tag_ABI_PCS_R9_use),
23456 T (Tag_ABI_PCS_RW_data),
23457 T (Tag_ABI_PCS_RO_data),
23458 T (Tag_ABI_PCS_GOT_use),
23459 T (Tag_ABI_PCS_wchar_t),
23460 T (Tag_ABI_FP_rounding),
23461 T (Tag_ABI_FP_denormal),
23462 T (Tag_ABI_FP_exceptions),
23463 T (Tag_ABI_FP_user_exceptions),
23464 T (Tag_ABI_FP_number_model),
23465 T (Tag_ABI_align_needed),
23466 T (Tag_ABI_align8_needed),
23467 T (Tag_ABI_align_preserved),
23468 T (Tag_ABI_align8_preserved),
23469 T (Tag_ABI_enum_size),
23470 T (Tag_ABI_HardFP_use),
23471 T (Tag_ABI_VFP_args),
23472 T (Tag_ABI_WMMX_args),
23473 T (Tag_ABI_optimization_goals),
23474 T (Tag_ABI_FP_optimization_goals),
23475 T (Tag_compatibility),
23476 T (Tag_CPU_unaligned_access),
23477 T (Tag_FP_HP_extension),
23478 T (Tag_VFP_HP_extension),
23479 T (Tag_ABI_FP_16bit_format),
23480 T (Tag_MPextension_use),
23481 T (Tag_DIV_use),
23482 T (Tag_nodefaults),
23483 T (Tag_also_compatible_with),
23484 T (Tag_conformance),
23485 T (Tag_T2EE_use),
23486 T (Tag_Virtualization_use),
23487 /* We deliberately do not include Tag_MPextension_use_legacy. */
23488 #undef T
23489 };
23490 unsigned int i;
23491
23492 if (name == NULL)
23493 return -1;
23494
23495 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
23496 if (streq (name, attribute_table[i].name))
23497 return attribute_table[i].tag;
23498
23499 return -1;
23500 }
23501
23502
23503 /* Apply sym value for relocations only in the case that
23504 they are for local symbols and you have the respective
23505 architectural feature for blx and simple switches. */
23506 int
23507 arm_apply_sym_value (struct fix * fixP)
23508 {
23509 if (fixP->fx_addsy
23510 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23511 && !S_IS_EXTERNAL (fixP->fx_addsy))
23512 {
23513 switch (fixP->fx_r_type)
23514 {
23515 case BFD_RELOC_ARM_PCREL_BLX:
23516 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23517 if (ARM_IS_FUNC (fixP->fx_addsy))
23518 return 1;
23519 break;
23520
23521 case BFD_RELOC_ARM_PCREL_CALL:
23522 case BFD_RELOC_THUMB_PCREL_BLX:
23523 if (THUMB_IS_FUNC (fixP->fx_addsy))
23524 return 1;
23525 break;
23526
23527 default:
23528 break;
23529 }
23530
23531 }
23532 return 0;
23533 }
23534 #endif /* OBJ_ELF */
This page took 0.775617 seconds and 4 git commands to generate.