e0eafbd874c4f210db17a87f0570c5dcf204e020
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
10
11 This file is part of GAS, the GNU Assembler.
12
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26 02110-1301, USA. */
27
28 #include <limits.h>
29 #include <stdarg.h>
30 #define NO_RELOC 0
31 #include "as.h"
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "obstack.h"
35
36 #include "opcode/arm.h"
37
38 #ifdef OBJ_ELF
39 #include "elf/arm.h"
40 #include "dw2gencfi.h"
41 #endif
42
43 #include "dwarf2dbg.h"
44
45 #define WARN_DEPRECATED 1
46
47 #ifdef OBJ_ELF
48 /* Must be at least the size of the largest unwind opcode (currently two). */
49 #define ARM_OPCODE_CHUNK_SIZE 8
50
51 /* This structure holds the unwinding state. */
52
53 static struct
54 {
55 symbolS * proc_start;
56 symbolS * table_entry;
57 symbolS * personality_routine;
58 int personality_index;
59 /* The segment containing the function. */
60 segT saved_seg;
61 subsegT saved_subseg;
62 /* Opcodes generated from this function. */
63 unsigned char * opcodes;
64 int opcode_count;
65 int opcode_alloc;
66 /* The number of bytes pushed to the stack. */
67 offsetT frame_size;
68 /* We don't add stack adjustment opcodes immediately so that we can merge
69 multiple adjustments. We can also omit the final adjustment
70 when using a frame pointer. */
71 offsetT pending_offset;
72 /* These two fields are set by both unwind_movsp and unwind_setfp. They
73 hold the reg+offset to use when restoring sp from a frame pointer. */
74 offsetT fp_offset;
75 int fp_reg;
76 /* Nonzero if an unwind_setfp directive has been seen. */
77 unsigned fp_used:1;
78 /* Nonzero if the last opcode restores sp from fp_reg. */
79 unsigned sp_restored:1;
80 } unwind;
81
82 /* Bit N indicates that an R_ARM_NONE relocation has been output for
83 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
84 emitted only once per section, to save unnecessary bloat. */
85 static unsigned int marked_pr_dependency = 0;
86
87 #endif /* OBJ_ELF */
88
89 /* Results from operand parsing worker functions. */
90
91 typedef enum
92 {
93 PARSE_OPERAND_SUCCESS,
94 PARSE_OPERAND_FAIL,
95 PARSE_OPERAND_FAIL_NO_BACKTRACK
96 } parse_operand_result;
97
98 enum arm_float_abi
99 {
100 ARM_FLOAT_ABI_HARD,
101 ARM_FLOAT_ABI_SOFTFP,
102 ARM_FLOAT_ABI_SOFT
103 };
104
105 /* Types of processor to assemble for. */
106 #ifndef CPU_DEFAULT
107 #if defined __XSCALE__
108 #define CPU_DEFAULT ARM_ARCH_XSCALE
109 #else
110 #if defined __thumb__
111 #define CPU_DEFAULT ARM_ARCH_V5T
112 #endif
113 #endif
114 #endif
115
116 #ifndef FPU_DEFAULT
117 # ifdef TE_LINUX
118 # define FPU_DEFAULT FPU_ARCH_FPA
119 # elif defined (TE_NetBSD)
120 # ifdef OBJ_ELF
121 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
122 # else
123 /* Legacy a.out format. */
124 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
125 # endif
126 # elif defined (TE_VXWORKS)
127 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
128 # else
129 /* For backwards compatibility, default to FPA. */
130 # define FPU_DEFAULT FPU_ARCH_FPA
131 # endif
132 #endif /* ifndef FPU_DEFAULT */
133
134 #define streq(a, b) (strcmp (a, b) == 0)
135
136 static arm_feature_set cpu_variant;
137 static arm_feature_set arm_arch_used;
138 static arm_feature_set thumb_arch_used;
139
140 /* Flags stored in private area of BFD structure. */
141 static int uses_apcs_26 = FALSE;
142 static int atpcs = FALSE;
143 static int support_interwork = FALSE;
144 static int uses_apcs_float = FALSE;
145 static int pic_code = FALSE;
146
147 /* Variables that we set while parsing command-line options. Once all
148 options have been read we re-process these values to set the real
149 assembly flags. */
150 static const arm_feature_set *legacy_cpu = NULL;
151 static const arm_feature_set *legacy_fpu = NULL;
152
153 static const arm_feature_set *mcpu_cpu_opt = NULL;
154 static const arm_feature_set *mcpu_fpu_opt = NULL;
155 static const arm_feature_set *march_cpu_opt = NULL;
156 static const arm_feature_set *march_fpu_opt = NULL;
157 static const arm_feature_set *mfpu_opt = NULL;
158 static const arm_feature_set *object_arch = NULL;
159
160 /* Constants for known architecture features. */
161 static const arm_feature_set fpu_default = FPU_DEFAULT;
162 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
163 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
164 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
165 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
166 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
167 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
168 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
169 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
170
171 #ifdef CPU_DEFAULT
172 static const arm_feature_set cpu_default = CPU_DEFAULT;
173 #endif
174
175 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
176 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
177 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
178 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
179 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
180 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
181 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
182 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
183 static const arm_feature_set arm_ext_v4t_5 =
184 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
185 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
186 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
187 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
188 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
189 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
190 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
191 static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0);
192 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
193 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
194 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
195 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
196 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
197 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
198 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
199
200 static const arm_feature_set arm_arch_any = ARM_ANY;
201 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
202 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
203 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
204
205 static const arm_feature_set arm_cext_iwmmxt2 =
206 ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
207 static const arm_feature_set arm_cext_iwmmxt =
208 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
209 static const arm_feature_set arm_cext_xscale =
210 ARM_FEATURE (0, ARM_CEXT_XSCALE);
211 static const arm_feature_set arm_cext_maverick =
212 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
213 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
214 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
215 static const arm_feature_set fpu_vfp_ext_v1xd =
216 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
217 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
218 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
219 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
220 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
221 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
222 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
223
224 static int mfloat_abi_opt = -1;
225 /* Record user cpu selection for object attributes. */
226 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
227 /* Must be long enough to hold any of the names in arm_cpus. */
228 static char selected_cpu_name[16];
229 #ifdef OBJ_ELF
230 # ifdef EABI_DEFAULT
231 static int meabi_flags = EABI_DEFAULT;
232 # else
233 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
234 # endif
235
236 bfd_boolean
237 arm_is_eabi(void)
238 {
239 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
240 }
241 #endif
242
243 #ifdef OBJ_ELF
244 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
245 symbolS * GOT_symbol;
246 #endif
247
248 /* 0: assemble for ARM,
249 1: assemble for Thumb,
250 2: assemble for Thumb even though target CPU does not support thumb
251 instructions. */
252 static int thumb_mode = 0;
253
254 /* If unified_syntax is true, we are processing the new unified
255 ARM/Thumb syntax. Important differences from the old ARM mode:
256
257 - Immediate operands do not require a # prefix.
258 - Conditional affixes always appear at the end of the
259 instruction. (For backward compatibility, those instructions
260 that formerly had them in the middle, continue to accept them
261 there.)
262 - The IT instruction may appear, and if it does is validated
263 against subsequent conditional affixes. It does not generate
264 machine code.
265
266 Important differences from the old Thumb mode:
267
268 - Immediate operands do not require a # prefix.
269 - Most of the V6T2 instructions are only available in unified mode.
270 - The .N and .W suffixes are recognized and honored (it is an error
271 if they cannot be honored).
272 - All instructions set the flags if and only if they have an 's' affix.
273 - Conditional affixes may be used. They are validated against
274 preceding IT instructions. Unlike ARM mode, you cannot use a
275 conditional affix except in the scope of an IT instruction. */
276
277 static bfd_boolean unified_syntax = FALSE;
278
279 enum neon_el_type
280 {
281 NT_invtype,
282 NT_untyped,
283 NT_integer,
284 NT_float,
285 NT_poly,
286 NT_signed,
287 NT_unsigned
288 };
289
290 struct neon_type_el
291 {
292 enum neon_el_type type;
293 unsigned size;
294 };
295
296 #define NEON_MAX_TYPE_ELS 4
297
298 struct neon_type
299 {
300 struct neon_type_el el[NEON_MAX_TYPE_ELS];
301 unsigned elems;
302 };
303
304 struct arm_it
305 {
306 const char * error;
307 unsigned long instruction;
308 int size;
309 int size_req;
310 int cond;
311 /* "uncond_value" is set to the value in place of the conditional field in
312 unconditional versions of the instruction, or -1 if nothing is
313 appropriate. */
314 int uncond_value;
315 struct neon_type vectype;
316 /* Set to the opcode if the instruction needs relaxation.
317 Zero if the instruction is not relaxed. */
318 unsigned long relax;
319 struct
320 {
321 bfd_reloc_code_real_type type;
322 expressionS exp;
323 int pc_rel;
324 } reloc;
325
326 struct
327 {
328 unsigned reg;
329 signed int imm;
330 struct neon_type_el vectype;
331 unsigned present : 1; /* Operand present. */
332 unsigned isreg : 1; /* Operand was a register. */
333 unsigned immisreg : 1; /* .imm field is a second register. */
334 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
335 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
336 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
337 instructions. This allows us to disambiguate ARM <-> vector insns. */
338 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
339 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
340 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
341 unsigned issingle : 1; /* Operand is VFP single-precision register. */
342 unsigned hasreloc : 1; /* Operand has relocation suffix. */
343 unsigned writeback : 1; /* Operand has trailing ! */
344 unsigned preind : 1; /* Preindexed address. */
345 unsigned postind : 1; /* Postindexed address. */
346 unsigned negative : 1; /* Index register was negated. */
347 unsigned shifted : 1; /* Shift applied to operation. */
348 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
349 } operands[6];
350 };
351
352 static struct arm_it inst;
353
354 #define NUM_FLOAT_VALS 8
355
356 const char * fp_const[] =
357 {
358 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
359 };
360
361 /* Number of littlenums required to hold an extended precision number. */
362 #define MAX_LITTLENUMS 6
363
364 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
365
366 #define FAIL (-1)
367 #define SUCCESS (0)
368
369 #define SUFF_S 1
370 #define SUFF_D 2
371 #define SUFF_E 3
372 #define SUFF_P 4
373
374 #define CP_T_X 0x00008000
375 #define CP_T_Y 0x00400000
376
377 #define CONDS_BIT 0x00100000
378 #define LOAD_BIT 0x00100000
379
380 #define DOUBLE_LOAD_FLAG 0x00000001
381
382 struct asm_cond
383 {
384 const char * template;
385 unsigned long value;
386 };
387
388 #define COND_ALWAYS 0xE
389
390 struct asm_psr
391 {
392 const char *template;
393 unsigned long field;
394 };
395
396 struct asm_barrier_opt
397 {
398 const char *template;
399 unsigned long value;
400 };
401
402 /* The bit that distinguishes CPSR and SPSR. */
403 #define SPSR_BIT (1 << 22)
404
405 /* The individual PSR flag bits. */
406 #define PSR_c (1 << 16)
407 #define PSR_x (1 << 17)
408 #define PSR_s (1 << 18)
409 #define PSR_f (1 << 19)
410
411 struct reloc_entry
412 {
413 char *name;
414 bfd_reloc_code_real_type reloc;
415 };
416
417 enum vfp_reg_pos
418 {
419 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
420 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
421 };
422
423 enum vfp_ldstm_type
424 {
425 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
426 };
427
428 /* Bits for DEFINED field in neon_typed_alias. */
429 #define NTA_HASTYPE 1
430 #define NTA_HASINDEX 2
431
432 struct neon_typed_alias
433 {
434 unsigned char defined;
435 unsigned char index;
436 struct neon_type_el eltype;
437 };
438
439 /* ARM register categories. This includes coprocessor numbers and various
440 architecture extensions' registers. */
441 enum arm_reg_type
442 {
443 REG_TYPE_RN,
444 REG_TYPE_CP,
445 REG_TYPE_CN,
446 REG_TYPE_FN,
447 REG_TYPE_VFS,
448 REG_TYPE_VFD,
449 REG_TYPE_NQ,
450 REG_TYPE_VFSD,
451 REG_TYPE_NDQ,
452 REG_TYPE_NSDQ,
453 REG_TYPE_VFC,
454 REG_TYPE_MVF,
455 REG_TYPE_MVD,
456 REG_TYPE_MVFX,
457 REG_TYPE_MVDX,
458 REG_TYPE_MVAX,
459 REG_TYPE_DSPSC,
460 REG_TYPE_MMXWR,
461 REG_TYPE_MMXWC,
462 REG_TYPE_MMXWCG,
463 REG_TYPE_XSCALE,
464 };
465
466 /* Structure for a hash table entry for a register.
467 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
468 information which states whether a vector type or index is specified (for a
469 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
470 struct reg_entry
471 {
472 const char *name;
473 unsigned char number;
474 unsigned char type;
475 unsigned char builtin;
476 struct neon_typed_alias *neon;
477 };
478
479 /* Diagnostics used when we don't get a register of the expected type. */
480 const char *const reg_expected_msgs[] =
481 {
482 N_("ARM register expected"),
483 N_("bad or missing co-processor number"),
484 N_("co-processor register expected"),
485 N_("FPA register expected"),
486 N_("VFP single precision register expected"),
487 N_("VFP/Neon double precision register expected"),
488 N_("Neon quad precision register expected"),
489 N_("VFP single or double precision register expected"),
490 N_("Neon double or quad precision register expected"),
491 N_("VFP single, double or Neon quad precision register expected"),
492 N_("VFP system register expected"),
493 N_("Maverick MVF register expected"),
494 N_("Maverick MVD register expected"),
495 N_("Maverick MVFX register expected"),
496 N_("Maverick MVDX register expected"),
497 N_("Maverick MVAX register expected"),
498 N_("Maverick DSPSC register expected"),
499 N_("iWMMXt data register expected"),
500 N_("iWMMXt control register expected"),
501 N_("iWMMXt scalar register expected"),
502 N_("XScale accumulator register expected"),
503 };
504
505 /* Some well known registers that we refer to directly elsewhere. */
506 #define REG_SP 13
507 #define REG_LR 14
508 #define REG_PC 15
509
510 /* ARM instructions take 4bytes in the object file, Thumb instructions
511 take 2: */
512 #define INSN_SIZE 4
513
514 struct asm_opcode
515 {
516 /* Basic string to match. */
517 const char *template;
518
519 /* Parameters to instruction. */
520 unsigned char operands[8];
521
522 /* Conditional tag - see opcode_lookup. */
523 unsigned int tag : 4;
524
525 /* Basic instruction code. */
526 unsigned int avalue : 28;
527
528 /* Thumb-format instruction code. */
529 unsigned int tvalue;
530
531 /* Which architecture variant provides this instruction. */
532 const arm_feature_set *avariant;
533 const arm_feature_set *tvariant;
534
535 /* Function to call to encode instruction in ARM format. */
536 void (* aencode) (void);
537
538 /* Function to call to encode instruction in Thumb format. */
539 void (* tencode) (void);
540 };
541
542 /* Defines for various bits that we will want to toggle. */
543 #define INST_IMMEDIATE 0x02000000
544 #define OFFSET_REG 0x02000000
545 #define HWOFFSET_IMM 0x00400000
546 #define SHIFT_BY_REG 0x00000010
547 #define PRE_INDEX 0x01000000
548 #define INDEX_UP 0x00800000
549 #define WRITE_BACK 0x00200000
550 #define LDM_TYPE_2_OR_3 0x00400000
551
552 #define LITERAL_MASK 0xf000f000
553 #define OPCODE_MASK 0xfe1fffff
554 #define V4_STR_BIT 0x00000020
555
556 #define DATA_OP_SHIFT 21
557
558 #define T2_OPCODE_MASK 0xfe1fffff
559 #define T2_DATA_OP_SHIFT 21
560
561 /* Codes to distinguish the arithmetic instructions. */
562 #define OPCODE_AND 0
563 #define OPCODE_EOR 1
564 #define OPCODE_SUB 2
565 #define OPCODE_RSB 3
566 #define OPCODE_ADD 4
567 #define OPCODE_ADC 5
568 #define OPCODE_SBC 6
569 #define OPCODE_RSC 7
570 #define OPCODE_TST 8
571 #define OPCODE_TEQ 9
572 #define OPCODE_CMP 10
573 #define OPCODE_CMN 11
574 #define OPCODE_ORR 12
575 #define OPCODE_MOV 13
576 #define OPCODE_BIC 14
577 #define OPCODE_MVN 15
578
579 #define T2_OPCODE_AND 0
580 #define T2_OPCODE_BIC 1
581 #define T2_OPCODE_ORR 2
582 #define T2_OPCODE_ORN 3
583 #define T2_OPCODE_EOR 4
584 #define T2_OPCODE_ADD 8
585 #define T2_OPCODE_ADC 10
586 #define T2_OPCODE_SBC 11
587 #define T2_OPCODE_SUB 13
588 #define T2_OPCODE_RSB 14
589
590 #define T_OPCODE_MUL 0x4340
591 #define T_OPCODE_TST 0x4200
592 #define T_OPCODE_CMN 0x42c0
593 #define T_OPCODE_NEG 0x4240
594 #define T_OPCODE_MVN 0x43c0
595
596 #define T_OPCODE_ADD_R3 0x1800
597 #define T_OPCODE_SUB_R3 0x1a00
598 #define T_OPCODE_ADD_HI 0x4400
599 #define T_OPCODE_ADD_ST 0xb000
600 #define T_OPCODE_SUB_ST 0xb080
601 #define T_OPCODE_ADD_SP 0xa800
602 #define T_OPCODE_ADD_PC 0xa000
603 #define T_OPCODE_ADD_I8 0x3000
604 #define T_OPCODE_SUB_I8 0x3800
605 #define T_OPCODE_ADD_I3 0x1c00
606 #define T_OPCODE_SUB_I3 0x1e00
607
608 #define T_OPCODE_ASR_R 0x4100
609 #define T_OPCODE_LSL_R 0x4080
610 #define T_OPCODE_LSR_R 0x40c0
611 #define T_OPCODE_ROR_R 0x41c0
612 #define T_OPCODE_ASR_I 0x1000
613 #define T_OPCODE_LSL_I 0x0000
614 #define T_OPCODE_LSR_I 0x0800
615
616 #define T_OPCODE_MOV_I8 0x2000
617 #define T_OPCODE_CMP_I8 0x2800
618 #define T_OPCODE_CMP_LR 0x4280
619 #define T_OPCODE_MOV_HR 0x4600
620 #define T_OPCODE_CMP_HR 0x4500
621
622 #define T_OPCODE_LDR_PC 0x4800
623 #define T_OPCODE_LDR_SP 0x9800
624 #define T_OPCODE_STR_SP 0x9000
625 #define T_OPCODE_LDR_IW 0x6800
626 #define T_OPCODE_STR_IW 0x6000
627 #define T_OPCODE_LDR_IH 0x8800
628 #define T_OPCODE_STR_IH 0x8000
629 #define T_OPCODE_LDR_IB 0x7800
630 #define T_OPCODE_STR_IB 0x7000
631 #define T_OPCODE_LDR_RW 0x5800
632 #define T_OPCODE_STR_RW 0x5000
633 #define T_OPCODE_LDR_RH 0x5a00
634 #define T_OPCODE_STR_RH 0x5200
635 #define T_OPCODE_LDR_RB 0x5c00
636 #define T_OPCODE_STR_RB 0x5400
637
638 #define T_OPCODE_PUSH 0xb400
639 #define T_OPCODE_POP 0xbc00
640
641 #define T_OPCODE_BRANCH 0xe000
642
643 #define THUMB_SIZE 2 /* Size of thumb instruction. */
644 #define THUMB_PP_PC_LR 0x0100
645 #define THUMB_LOAD_BIT 0x0800
646 #define THUMB2_LOAD_BIT 0x00100000
647
648 #define BAD_ARGS _("bad arguments to instruction")
649 #define BAD_PC _("r15 not allowed here")
650 #define BAD_COND _("instruction cannot be conditional")
651 #define BAD_OVERLAP _("registers may not be the same")
652 #define BAD_HIREG _("lo register required")
653 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
654 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
655 #define BAD_BRANCH _("branch must be last instruction in IT block")
656 #define BAD_NOT_IT _("instruction not allowed in IT block")
657 #define BAD_FPU _("selected FPU does not support instruction")
658
659 static struct hash_control *arm_ops_hsh;
660 static struct hash_control *arm_cond_hsh;
661 static struct hash_control *arm_shift_hsh;
662 static struct hash_control *arm_psr_hsh;
663 static struct hash_control *arm_v7m_psr_hsh;
664 static struct hash_control *arm_reg_hsh;
665 static struct hash_control *arm_reloc_hsh;
666 static struct hash_control *arm_barrier_opt_hsh;
667
668 /* Stuff needed to resolve the label ambiguity
669 As:
670 ...
671 label: <insn>
672 may differ from:
673 ...
674 label:
675 <insn>
676 */
677
678 symbolS * last_label_seen;
679 static int label_is_thumb_function_name = FALSE;
680 \f
681 /* Literal pool structure. Held on a per-section
682 and per-sub-section basis. */
683
684 #define MAX_LITERAL_POOL_SIZE 1024
685 typedef struct literal_pool
686 {
687 expressionS literals [MAX_LITERAL_POOL_SIZE];
688 unsigned int next_free_entry;
689 unsigned int id;
690 symbolS * symbol;
691 segT section;
692 subsegT sub_section;
693 struct literal_pool * next;
694 } literal_pool;
695
696 /* Pointer to a linked list of literal pools. */
697 literal_pool * list_of_pools = NULL;
698
699 /* State variables for IT block handling. */
700 static bfd_boolean current_it_mask = 0;
701 static int current_cc;
702
703 \f
704 /* Pure syntax. */
705
706 /* This array holds the chars that always start a comment. If the
707 pre-processor is disabled, these aren't very useful. */
708 const char comment_chars[] = "@";
709
710 /* This array holds the chars that only start a comment at the beginning of
711 a line. If the line seems to have the form '# 123 filename'
712 .line and .file directives will appear in the pre-processed output. */
713 /* Note that input_file.c hand checks for '#' at the beginning of the
714 first line of the input file. This is because the compiler outputs
715 #NO_APP at the beginning of its output. */
716 /* Also note that comments like this one will always work. */
717 const char line_comment_chars[] = "#";
718
719 const char line_separator_chars[] = ";";
720
721 /* Chars that can be used to separate mant
722 from exp in floating point numbers. */
723 const char EXP_CHARS[] = "eE";
724
725 /* Chars that mean this number is a floating point constant. */
726 /* As in 0f12.456 */
727 /* or 0d1.2345e12 */
728
729 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
730
731 /* Prefix characters that indicate the start of an immediate
732 value. */
733 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
734
735 /* Separator character handling. */
736
737 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
738
739 static inline int
740 skip_past_char (char ** str, char c)
741 {
742 if (**str == c)
743 {
744 (*str)++;
745 return SUCCESS;
746 }
747 else
748 return FAIL;
749 }
750 #define skip_past_comma(str) skip_past_char (str, ',')
751
752 /* Arithmetic expressions (possibly involving symbols). */
753
754 /* Return TRUE if anything in the expression is a bignum. */
755
756 static int
757 walk_no_bignums (symbolS * sp)
758 {
759 if (symbol_get_value_expression (sp)->X_op == O_big)
760 return 1;
761
762 if (symbol_get_value_expression (sp)->X_add_symbol)
763 {
764 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
765 || (symbol_get_value_expression (sp)->X_op_symbol
766 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
767 }
768
769 return 0;
770 }
771
772 static int in_my_get_expression = 0;
773
774 /* Third argument to my_get_expression. */
775 #define GE_NO_PREFIX 0
776 #define GE_IMM_PREFIX 1
777 #define GE_OPT_PREFIX 2
778 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
779 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
780 #define GE_OPT_PREFIX_BIG 3
781
782 static int
783 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
784 {
785 char * save_in;
786 segT seg;
787
788 /* In unified syntax, all prefixes are optional. */
789 if (unified_syntax)
790 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
791 : GE_OPT_PREFIX;
792
793 switch (prefix_mode)
794 {
795 case GE_NO_PREFIX: break;
796 case GE_IMM_PREFIX:
797 if (!is_immediate_prefix (**str))
798 {
799 inst.error = _("immediate expression requires a # prefix");
800 return FAIL;
801 }
802 (*str)++;
803 break;
804 case GE_OPT_PREFIX:
805 case GE_OPT_PREFIX_BIG:
806 if (is_immediate_prefix (**str))
807 (*str)++;
808 break;
809 default: abort ();
810 }
811
812 memset (ep, 0, sizeof (expressionS));
813
814 save_in = input_line_pointer;
815 input_line_pointer = *str;
816 in_my_get_expression = 1;
817 seg = expression (ep);
818 in_my_get_expression = 0;
819
820 if (ep->X_op == O_illegal)
821 {
822 /* We found a bad expression in md_operand(). */
823 *str = input_line_pointer;
824 input_line_pointer = save_in;
825 if (inst.error == NULL)
826 inst.error = _("bad expression");
827 return 1;
828 }
829
830 #ifdef OBJ_AOUT
831 if (seg != absolute_section
832 && seg != text_section
833 && seg != data_section
834 && seg != bss_section
835 && seg != undefined_section)
836 {
837 inst.error = _("bad segment");
838 *str = input_line_pointer;
839 input_line_pointer = save_in;
840 return 1;
841 }
842 #endif
843
844 /* Get rid of any bignums now, so that we don't generate an error for which
845 we can't establish a line number later on. Big numbers are never valid
846 in instructions, which is where this routine is always called. */
847 if (prefix_mode != GE_OPT_PREFIX_BIG
848 && (ep->X_op == O_big
849 || (ep->X_add_symbol
850 && (walk_no_bignums (ep->X_add_symbol)
851 || (ep->X_op_symbol
852 && walk_no_bignums (ep->X_op_symbol))))))
853 {
854 inst.error = _("invalid constant");
855 *str = input_line_pointer;
856 input_line_pointer = save_in;
857 return 1;
858 }
859
860 *str = input_line_pointer;
861 input_line_pointer = save_in;
862 return 0;
863 }
864
865 /* Turn a string in input_line_pointer into a floating point constant
866 of type TYPE, and store the appropriate bytes in *LITP. The number
867 of LITTLENUMS emitted is stored in *SIZEP. An error message is
868 returned, or NULL on OK.
869
870 Note that fp constants aren't represent in the normal way on the ARM.
871 In big endian mode, things are as expected. However, in little endian
872 mode fp constants are big-endian word-wise, and little-endian byte-wise
873 within the words. For example, (double) 1.1 in big endian mode is
874 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
875 the byte sequence 99 99 f1 3f 9a 99 99 99.
876
877 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
878
879 char *
880 md_atof (int type, char * litP, int * sizeP)
881 {
882 int prec;
883 LITTLENUM_TYPE words[MAX_LITTLENUMS];
884 char *t;
885 int i;
886
887 switch (type)
888 {
889 case 'f':
890 case 'F':
891 case 's':
892 case 'S':
893 prec = 2;
894 break;
895
896 case 'd':
897 case 'D':
898 case 'r':
899 case 'R':
900 prec = 4;
901 break;
902
903 case 'x':
904 case 'X':
905 prec = 6;
906 break;
907
908 case 'p':
909 case 'P':
910 prec = 6;
911 break;
912
913 default:
914 *sizeP = 0;
915 return _("bad call to MD_ATOF()");
916 }
917
918 t = atof_ieee (input_line_pointer, type, words);
919 if (t)
920 input_line_pointer = t;
921 *sizeP = prec * 2;
922
923 if (target_big_endian)
924 {
925 for (i = 0; i < prec; i++)
926 {
927 md_number_to_chars (litP, (valueT) words[i], 2);
928 litP += 2;
929 }
930 }
931 else
932 {
933 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
934 for (i = prec - 1; i >= 0; i--)
935 {
936 md_number_to_chars (litP, (valueT) words[i], 2);
937 litP += 2;
938 }
939 else
940 /* For a 4 byte float the order of elements in `words' is 1 0.
941 For an 8 byte float the order is 1 0 3 2. */
942 for (i = 0; i < prec; i += 2)
943 {
944 md_number_to_chars (litP, (valueT) words[i + 1], 2);
945 md_number_to_chars (litP + 2, (valueT) words[i], 2);
946 litP += 4;
947 }
948 }
949
950 return 0;
951 }
952
953 /* We handle all bad expressions here, so that we can report the faulty
954 instruction in the error message. */
955 void
956 md_operand (expressionS * expr)
957 {
958 if (in_my_get_expression)
959 expr->X_op = O_illegal;
960 }
961
962 /* Immediate values. */
963
964 /* Generic immediate-value read function for use in directives.
965 Accepts anything that 'expression' can fold to a constant.
966 *val receives the number. */
967 #ifdef OBJ_ELF
968 static int
969 immediate_for_directive (int *val)
970 {
971 expressionS exp;
972 exp.X_op = O_illegal;
973
974 if (is_immediate_prefix (*input_line_pointer))
975 {
976 input_line_pointer++;
977 expression (&exp);
978 }
979
980 if (exp.X_op != O_constant)
981 {
982 as_bad (_("expected #constant"));
983 ignore_rest_of_line ();
984 return FAIL;
985 }
986 *val = exp.X_add_number;
987 return SUCCESS;
988 }
989 #endif
990
991 /* Register parsing. */
992
993 /* Generic register parser. CCP points to what should be the
994 beginning of a register name. If it is indeed a valid register
995 name, advance CCP over it and return the reg_entry structure;
996 otherwise return NULL. Does not issue diagnostics. */
997
998 static struct reg_entry *
999 arm_reg_parse_multi (char **ccp)
1000 {
1001 char *start = *ccp;
1002 char *p;
1003 struct reg_entry *reg;
1004
1005 #ifdef REGISTER_PREFIX
1006 if (*start != REGISTER_PREFIX)
1007 return NULL;
1008 start++;
1009 #endif
1010 #ifdef OPTIONAL_REGISTER_PREFIX
1011 if (*start == OPTIONAL_REGISTER_PREFIX)
1012 start++;
1013 #endif
1014
1015 p = start;
1016 if (!ISALPHA (*p) || !is_name_beginner (*p))
1017 return NULL;
1018
1019 do
1020 p++;
1021 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1022
1023 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1024
1025 if (!reg)
1026 return NULL;
1027
1028 *ccp = p;
1029 return reg;
1030 }
1031
1032 static int
1033 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1034 enum arm_reg_type type)
1035 {
1036 /* Alternative syntaxes are accepted for a few register classes. */
1037 switch (type)
1038 {
1039 case REG_TYPE_MVF:
1040 case REG_TYPE_MVD:
1041 case REG_TYPE_MVFX:
1042 case REG_TYPE_MVDX:
1043 /* Generic coprocessor register names are allowed for these. */
1044 if (reg && reg->type == REG_TYPE_CN)
1045 return reg->number;
1046 break;
1047
1048 case REG_TYPE_CP:
1049 /* For backward compatibility, a bare number is valid here. */
1050 {
1051 unsigned long processor = strtoul (start, ccp, 10);
1052 if (*ccp != start && processor <= 15)
1053 return processor;
1054 }
1055
1056 case REG_TYPE_MMXWC:
1057 /* WC includes WCG. ??? I'm not sure this is true for all
1058 instructions that take WC registers. */
1059 if (reg && reg->type == REG_TYPE_MMXWCG)
1060 return reg->number;
1061 break;
1062
1063 default:
1064 break;
1065 }
1066
1067 return FAIL;
1068 }
1069
1070 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1071 return value is the register number or FAIL. */
1072
1073 static int
1074 arm_reg_parse (char **ccp, enum arm_reg_type type)
1075 {
1076 char *start = *ccp;
1077 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1078 int ret;
1079
1080 /* Do not allow a scalar (reg+index) to parse as a register. */
1081 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1082 return FAIL;
1083
1084 if (reg && reg->type == type)
1085 return reg->number;
1086
1087 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1088 return ret;
1089
1090 *ccp = start;
1091 return FAIL;
1092 }
1093
1094 /* Parse a Neon type specifier. *STR should point at the leading '.'
1095 character. Does no verification at this stage that the type fits the opcode
1096 properly. E.g.,
1097
1098 .i32.i32.s16
1099 .s32.f32
1100 .u16
1101
1102 Can all be legally parsed by this function.
1103
1104 Fills in neon_type struct pointer with parsed information, and updates STR
1105 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1106 type, FAIL if not. */
1107
1108 static int
1109 parse_neon_type (struct neon_type *type, char **str)
1110 {
1111 char *ptr = *str;
1112
1113 if (type)
1114 type->elems = 0;
1115
1116 while (type->elems < NEON_MAX_TYPE_ELS)
1117 {
1118 enum neon_el_type thistype = NT_untyped;
1119 unsigned thissize = -1u;
1120
1121 if (*ptr != '.')
1122 break;
1123
1124 ptr++;
1125
1126 /* Just a size without an explicit type. */
1127 if (ISDIGIT (*ptr))
1128 goto parsesize;
1129
1130 switch (TOLOWER (*ptr))
1131 {
1132 case 'i': thistype = NT_integer; break;
1133 case 'f': thistype = NT_float; break;
1134 case 'p': thistype = NT_poly; break;
1135 case 's': thistype = NT_signed; break;
1136 case 'u': thistype = NT_unsigned; break;
1137 case 'd':
1138 thistype = NT_float;
1139 thissize = 64;
1140 ptr++;
1141 goto done;
1142 default:
1143 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1144 return FAIL;
1145 }
1146
1147 ptr++;
1148
1149 /* .f is an abbreviation for .f32. */
1150 if (thistype == NT_float && !ISDIGIT (*ptr))
1151 thissize = 32;
1152 else
1153 {
1154 parsesize:
1155 thissize = strtoul (ptr, &ptr, 10);
1156
1157 if (thissize != 8 && thissize != 16 && thissize != 32
1158 && thissize != 64)
1159 {
1160 as_bad (_("bad size %d in type specifier"), thissize);
1161 return FAIL;
1162 }
1163 }
1164
1165 done:
1166 if (type)
1167 {
1168 type->el[type->elems].type = thistype;
1169 type->el[type->elems].size = thissize;
1170 type->elems++;
1171 }
1172 }
1173
1174 /* Empty/missing type is not a successful parse. */
1175 if (type->elems == 0)
1176 return FAIL;
1177
1178 *str = ptr;
1179
1180 return SUCCESS;
1181 }
1182
1183 /* Errors may be set multiple times during parsing or bit encoding
1184 (particularly in the Neon bits), but usually the earliest error which is set
1185 will be the most meaningful. Avoid overwriting it with later (cascading)
1186 errors by calling this function. */
1187
1188 static void
1189 first_error (const char *err)
1190 {
1191 if (!inst.error)
1192 inst.error = err;
1193 }
1194
1195 /* Parse a single type, e.g. ".s32", leading period included. */
1196 static int
1197 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1198 {
1199 char *str = *ccp;
1200 struct neon_type optype;
1201
1202 if (*str == '.')
1203 {
1204 if (parse_neon_type (&optype, &str) == SUCCESS)
1205 {
1206 if (optype.elems == 1)
1207 *vectype = optype.el[0];
1208 else
1209 {
1210 first_error (_("only one type should be specified for operand"));
1211 return FAIL;
1212 }
1213 }
1214 else
1215 {
1216 first_error (_("vector type expected"));
1217 return FAIL;
1218 }
1219 }
1220 else
1221 return FAIL;
1222
1223 *ccp = str;
1224
1225 return SUCCESS;
1226 }
1227
1228 /* Special meanings for indices (which have a range of 0-7), which will fit into
1229 a 4-bit integer. */
1230
1231 #define NEON_ALL_LANES 15
1232 #define NEON_INTERLEAVE_LANES 14
1233
1234 /* Parse either a register or a scalar, with an optional type. Return the
1235 register number, and optionally fill in the actual type of the register
1236 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1237 type/index information in *TYPEINFO. */
1238
1239 static int
1240 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1241 enum arm_reg_type *rtype,
1242 struct neon_typed_alias *typeinfo)
1243 {
1244 char *str = *ccp;
1245 struct reg_entry *reg = arm_reg_parse_multi (&str);
1246 struct neon_typed_alias atype;
1247 struct neon_type_el parsetype;
1248
1249 atype.defined = 0;
1250 atype.index = -1;
1251 atype.eltype.type = NT_invtype;
1252 atype.eltype.size = -1;
1253
1254 /* Try alternate syntax for some types of register. Note these are mutually
1255 exclusive with the Neon syntax extensions. */
1256 if (reg == NULL)
1257 {
1258 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1259 if (altreg != FAIL)
1260 *ccp = str;
1261 if (typeinfo)
1262 *typeinfo = atype;
1263 return altreg;
1264 }
1265
1266 /* Undo polymorphism when a set of register types may be accepted. */
1267 if ((type == REG_TYPE_NDQ
1268 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1269 || (type == REG_TYPE_VFSD
1270 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1271 || (type == REG_TYPE_NSDQ
1272 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1273 || reg->type == REG_TYPE_NQ))
1274 || (type == REG_TYPE_MMXWC
1275 && (reg->type == REG_TYPE_MMXWCG)))
1276 type = reg->type;
1277
1278 if (type != reg->type)
1279 return FAIL;
1280
1281 if (reg->neon)
1282 atype = *reg->neon;
1283
1284 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1285 {
1286 if ((atype.defined & NTA_HASTYPE) != 0)
1287 {
1288 first_error (_("can't redefine type for operand"));
1289 return FAIL;
1290 }
1291 atype.defined |= NTA_HASTYPE;
1292 atype.eltype = parsetype;
1293 }
1294
1295 if (skip_past_char (&str, '[') == SUCCESS)
1296 {
1297 if (type != REG_TYPE_VFD)
1298 {
1299 first_error (_("only D registers may be indexed"));
1300 return FAIL;
1301 }
1302
1303 if ((atype.defined & NTA_HASINDEX) != 0)
1304 {
1305 first_error (_("can't change index for operand"));
1306 return FAIL;
1307 }
1308
1309 atype.defined |= NTA_HASINDEX;
1310
1311 if (skip_past_char (&str, ']') == SUCCESS)
1312 atype.index = NEON_ALL_LANES;
1313 else
1314 {
1315 expressionS exp;
1316
1317 my_get_expression (&exp, &str, GE_NO_PREFIX);
1318
1319 if (exp.X_op != O_constant)
1320 {
1321 first_error (_("constant expression required"));
1322 return FAIL;
1323 }
1324
1325 if (skip_past_char (&str, ']') == FAIL)
1326 return FAIL;
1327
1328 atype.index = exp.X_add_number;
1329 }
1330 }
1331
1332 if (typeinfo)
1333 *typeinfo = atype;
1334
1335 if (rtype)
1336 *rtype = type;
1337
1338 *ccp = str;
1339
1340 return reg->number;
1341 }
1342
1343 /* Like arm_reg_parse, but allow allow the following extra features:
1344 - If RTYPE is non-zero, return the (possibly restricted) type of the
1345 register (e.g. Neon double or quad reg when either has been requested).
1346 - If this is a Neon vector type with additional type information, fill
1347 in the struct pointed to by VECTYPE (if non-NULL).
1348 This function will fault on encountering a scalar.
1349 */
1350
1351 static int
1352 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1353 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1354 {
1355 struct neon_typed_alias atype;
1356 char *str = *ccp;
1357 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1358
1359 if (reg == FAIL)
1360 return FAIL;
1361
1362 /* Do not allow a scalar (reg+index) to parse as a register. */
1363 if ((atype.defined & NTA_HASINDEX) != 0)
1364 {
1365 first_error (_("register operand expected, but got scalar"));
1366 return FAIL;
1367 }
1368
1369 if (vectype)
1370 *vectype = atype.eltype;
1371
1372 *ccp = str;
1373
1374 return reg;
1375 }
1376
1377 #define NEON_SCALAR_REG(X) ((X) >> 4)
1378 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1379
1380 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1381 have enough information to be able to do a good job bounds-checking. So, we
1382 just do easy checks here, and do further checks later. */
1383
1384 static int
1385 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1386 {
1387 int reg;
1388 char *str = *ccp;
1389 struct neon_typed_alias atype;
1390
1391 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1392
1393 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1394 return FAIL;
1395
1396 if (atype.index == NEON_ALL_LANES)
1397 {
1398 first_error (_("scalar must have an index"));
1399 return FAIL;
1400 }
1401 else if (atype.index >= 64 / elsize)
1402 {
1403 first_error (_("scalar index out of range"));
1404 return FAIL;
1405 }
1406
1407 if (type)
1408 *type = atype.eltype;
1409
1410 *ccp = str;
1411
1412 return reg * 16 + atype.index;
1413 }
1414
1415 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1416 static long
1417 parse_reg_list (char ** strp)
1418 {
1419 char * str = * strp;
1420 long range = 0;
1421 int another_range;
1422
1423 /* We come back here if we get ranges concatenated by '+' or '|'. */
1424 do
1425 {
1426 another_range = 0;
1427
1428 if (*str == '{')
1429 {
1430 int in_range = 0;
1431 int cur_reg = -1;
1432
1433 str++;
1434 do
1435 {
1436 int reg;
1437
1438 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1439 {
1440 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1441 return FAIL;
1442 }
1443
1444 if (in_range)
1445 {
1446 int i;
1447
1448 if (reg <= cur_reg)
1449 {
1450 first_error (_("bad range in register list"));
1451 return FAIL;
1452 }
1453
1454 for (i = cur_reg + 1; i < reg; i++)
1455 {
1456 if (range & (1 << i))
1457 as_tsktsk
1458 (_("Warning: duplicated register (r%d) in register list"),
1459 i);
1460 else
1461 range |= 1 << i;
1462 }
1463 in_range = 0;
1464 }
1465
1466 if (range & (1 << reg))
1467 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1468 reg);
1469 else if (reg <= cur_reg)
1470 as_tsktsk (_("Warning: register range not in ascending order"));
1471
1472 range |= 1 << reg;
1473 cur_reg = reg;
1474 }
1475 while (skip_past_comma (&str) != FAIL
1476 || (in_range = 1, *str++ == '-'));
1477 str--;
1478
1479 if (*str++ != '}')
1480 {
1481 first_error (_("missing `}'"));
1482 return FAIL;
1483 }
1484 }
1485 else
1486 {
1487 expressionS expr;
1488
1489 if (my_get_expression (&expr, &str, GE_NO_PREFIX))
1490 return FAIL;
1491
1492 if (expr.X_op == O_constant)
1493 {
1494 if (expr.X_add_number
1495 != (expr.X_add_number & 0x0000ffff))
1496 {
1497 inst.error = _("invalid register mask");
1498 return FAIL;
1499 }
1500
1501 if ((range & expr.X_add_number) != 0)
1502 {
1503 int regno = range & expr.X_add_number;
1504
1505 regno &= -regno;
1506 regno = (1 << regno) - 1;
1507 as_tsktsk
1508 (_("Warning: duplicated register (r%d) in register list"),
1509 regno);
1510 }
1511
1512 range |= expr.X_add_number;
1513 }
1514 else
1515 {
1516 if (inst.reloc.type != 0)
1517 {
1518 inst.error = _("expression too complex");
1519 return FAIL;
1520 }
1521
1522 memcpy (&inst.reloc.exp, &expr, sizeof (expressionS));
1523 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1524 inst.reloc.pc_rel = 0;
1525 }
1526 }
1527
1528 if (*str == '|' || *str == '+')
1529 {
1530 str++;
1531 another_range = 1;
1532 }
1533 }
1534 while (another_range);
1535
1536 *strp = str;
1537 return range;
1538 }
1539
1540 /* Types of registers in a list. */
1541
1542 enum reg_list_els
1543 {
1544 REGLIST_VFP_S,
1545 REGLIST_VFP_D,
1546 REGLIST_NEON_D
1547 };
1548
1549 /* Parse a VFP register list. If the string is invalid return FAIL.
1550 Otherwise return the number of registers, and set PBASE to the first
1551 register. Parses registers of type ETYPE.
1552 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1553 - Q registers can be used to specify pairs of D registers
1554 - { } can be omitted from around a singleton register list
1555 FIXME: This is not implemented, as it would require backtracking in
1556 some cases, e.g.:
1557 vtbl.8 d3,d4,d5
1558 This could be done (the meaning isn't really ambiguous), but doesn't
1559 fit in well with the current parsing framework.
1560 - 32 D registers may be used (also true for VFPv3).
1561 FIXME: Types are ignored in these register lists, which is probably a
1562 bug. */
1563
1564 static int
1565 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1566 {
1567 char *str = *ccp;
1568 int base_reg;
1569 int new_base;
1570 enum arm_reg_type regtype = 0;
1571 int max_regs = 0;
1572 int count = 0;
1573 int warned = 0;
1574 unsigned long mask = 0;
1575 int i;
1576
1577 if (*str != '{')
1578 {
1579 inst.error = _("expecting {");
1580 return FAIL;
1581 }
1582
1583 str++;
1584
1585 switch (etype)
1586 {
1587 case REGLIST_VFP_S:
1588 regtype = REG_TYPE_VFS;
1589 max_regs = 32;
1590 break;
1591
1592 case REGLIST_VFP_D:
1593 regtype = REG_TYPE_VFD;
1594 break;
1595
1596 case REGLIST_NEON_D:
1597 regtype = REG_TYPE_NDQ;
1598 break;
1599 }
1600
1601 if (etype != REGLIST_VFP_S)
1602 {
1603 /* VFPv3 allows 32 D registers. */
1604 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
1605 {
1606 max_regs = 32;
1607 if (thumb_mode)
1608 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1609 fpu_vfp_ext_v3);
1610 else
1611 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1612 fpu_vfp_ext_v3);
1613 }
1614 else
1615 max_regs = 16;
1616 }
1617
1618 base_reg = max_regs;
1619
1620 do
1621 {
1622 int setmask = 1, addregs = 1;
1623
1624 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1625
1626 if (new_base == FAIL)
1627 {
1628 first_error (_(reg_expected_msgs[regtype]));
1629 return FAIL;
1630 }
1631
1632 if (new_base >= max_regs)
1633 {
1634 first_error (_("register out of range in list"));
1635 return FAIL;
1636 }
1637
1638 /* Note: a value of 2 * n is returned for the register Q<n>. */
1639 if (regtype == REG_TYPE_NQ)
1640 {
1641 setmask = 3;
1642 addregs = 2;
1643 }
1644
1645 if (new_base < base_reg)
1646 base_reg = new_base;
1647
1648 if (mask & (setmask << new_base))
1649 {
1650 first_error (_("invalid register list"));
1651 return FAIL;
1652 }
1653
1654 if ((mask >> new_base) != 0 && ! warned)
1655 {
1656 as_tsktsk (_("register list not in ascending order"));
1657 warned = 1;
1658 }
1659
1660 mask |= setmask << new_base;
1661 count += addregs;
1662
1663 if (*str == '-') /* We have the start of a range expression */
1664 {
1665 int high_range;
1666
1667 str++;
1668
1669 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1670 == FAIL)
1671 {
1672 inst.error = gettext (reg_expected_msgs[regtype]);
1673 return FAIL;
1674 }
1675
1676 if (high_range >= max_regs)
1677 {
1678 first_error (_("register out of range in list"));
1679 return FAIL;
1680 }
1681
1682 if (regtype == REG_TYPE_NQ)
1683 high_range = high_range + 1;
1684
1685 if (high_range <= new_base)
1686 {
1687 inst.error = _("register range not in ascending order");
1688 return FAIL;
1689 }
1690
1691 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1692 {
1693 if (mask & (setmask << new_base))
1694 {
1695 inst.error = _("invalid register list");
1696 return FAIL;
1697 }
1698
1699 mask |= setmask << new_base;
1700 count += addregs;
1701 }
1702 }
1703 }
1704 while (skip_past_comma (&str) != FAIL);
1705
1706 str++;
1707
1708 /* Sanity check -- should have raised a parse error above. */
1709 if (count == 0 || count > max_regs)
1710 abort ();
1711
1712 *pbase = base_reg;
1713
1714 /* Final test -- the registers must be consecutive. */
1715 mask >>= base_reg;
1716 for (i = 0; i < count; i++)
1717 {
1718 if ((mask & (1u << i)) == 0)
1719 {
1720 inst.error = _("non-contiguous register range");
1721 return FAIL;
1722 }
1723 }
1724
1725 *ccp = str;
1726
1727 return count;
1728 }
1729
1730 /* True if two alias types are the same. */
1731
1732 static int
1733 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1734 {
1735 if (!a && !b)
1736 return 1;
1737
1738 if (!a || !b)
1739 return 0;
1740
1741 if (a->defined != b->defined)
1742 return 0;
1743
1744 if ((a->defined & NTA_HASTYPE) != 0
1745 && (a->eltype.type != b->eltype.type
1746 || a->eltype.size != b->eltype.size))
1747 return 0;
1748
1749 if ((a->defined & NTA_HASINDEX) != 0
1750 && (a->index != b->index))
1751 return 0;
1752
1753 return 1;
1754 }
1755
1756 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1757 The base register is put in *PBASE.
1758 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1759 the return value.
1760 The register stride (minus one) is put in bit 4 of the return value.
1761 Bits [6:5] encode the list length (minus one).
1762 The type of the list elements is put in *ELTYPE, if non-NULL. */
1763
1764 #define NEON_LANE(X) ((X) & 0xf)
1765 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1766 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1767
1768 static int
1769 parse_neon_el_struct_list (char **str, unsigned *pbase,
1770 struct neon_type_el *eltype)
1771 {
1772 char *ptr = *str;
1773 int base_reg = -1;
1774 int reg_incr = -1;
1775 int count = 0;
1776 int lane = -1;
1777 int leading_brace = 0;
1778 enum arm_reg_type rtype = REG_TYPE_NDQ;
1779 int addregs = 1;
1780 const char *const incr_error = "register stride must be 1 or 2";
1781 const char *const type_error = "mismatched element/structure types in list";
1782 struct neon_typed_alias firsttype;
1783
1784 if (skip_past_char (&ptr, '{') == SUCCESS)
1785 leading_brace = 1;
1786
1787 do
1788 {
1789 struct neon_typed_alias atype;
1790 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1791
1792 if (getreg == FAIL)
1793 {
1794 first_error (_(reg_expected_msgs[rtype]));
1795 return FAIL;
1796 }
1797
1798 if (base_reg == -1)
1799 {
1800 base_reg = getreg;
1801 if (rtype == REG_TYPE_NQ)
1802 {
1803 reg_incr = 1;
1804 addregs = 2;
1805 }
1806 firsttype = atype;
1807 }
1808 else if (reg_incr == -1)
1809 {
1810 reg_incr = getreg - base_reg;
1811 if (reg_incr < 1 || reg_incr > 2)
1812 {
1813 first_error (_(incr_error));
1814 return FAIL;
1815 }
1816 }
1817 else if (getreg != base_reg + reg_incr * count)
1818 {
1819 first_error (_(incr_error));
1820 return FAIL;
1821 }
1822
1823 if (!neon_alias_types_same (&atype, &firsttype))
1824 {
1825 first_error (_(type_error));
1826 return FAIL;
1827 }
1828
1829 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1830 modes. */
1831 if (ptr[0] == '-')
1832 {
1833 struct neon_typed_alias htype;
1834 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1835 if (lane == -1)
1836 lane = NEON_INTERLEAVE_LANES;
1837 else if (lane != NEON_INTERLEAVE_LANES)
1838 {
1839 first_error (_(type_error));
1840 return FAIL;
1841 }
1842 if (reg_incr == -1)
1843 reg_incr = 1;
1844 else if (reg_incr != 1)
1845 {
1846 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1847 return FAIL;
1848 }
1849 ptr++;
1850 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1851 if (hireg == FAIL)
1852 {
1853 first_error (_(reg_expected_msgs[rtype]));
1854 return FAIL;
1855 }
1856 if (!neon_alias_types_same (&htype, &firsttype))
1857 {
1858 first_error (_(type_error));
1859 return FAIL;
1860 }
1861 count += hireg + dregs - getreg;
1862 continue;
1863 }
1864
1865 /* If we're using Q registers, we can't use [] or [n] syntax. */
1866 if (rtype == REG_TYPE_NQ)
1867 {
1868 count += 2;
1869 continue;
1870 }
1871
1872 if ((atype.defined & NTA_HASINDEX) != 0)
1873 {
1874 if (lane == -1)
1875 lane = atype.index;
1876 else if (lane != atype.index)
1877 {
1878 first_error (_(type_error));
1879 return FAIL;
1880 }
1881 }
1882 else if (lane == -1)
1883 lane = NEON_INTERLEAVE_LANES;
1884 else if (lane != NEON_INTERLEAVE_LANES)
1885 {
1886 first_error (_(type_error));
1887 return FAIL;
1888 }
1889 count++;
1890 }
1891 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
1892
1893 /* No lane set by [x]. We must be interleaving structures. */
1894 if (lane == -1)
1895 lane = NEON_INTERLEAVE_LANES;
1896
1897 /* Sanity check. */
1898 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
1899 || (count > 1 && reg_incr == -1))
1900 {
1901 first_error (_("error parsing element/structure list"));
1902 return FAIL;
1903 }
1904
1905 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
1906 {
1907 first_error (_("expected }"));
1908 return FAIL;
1909 }
1910
1911 if (reg_incr == -1)
1912 reg_incr = 1;
1913
1914 if (eltype)
1915 *eltype = firsttype.eltype;
1916
1917 *pbase = base_reg;
1918 *str = ptr;
1919
1920 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
1921 }
1922
1923 /* Parse an explicit relocation suffix on an expression. This is
1924 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1925 arm_reloc_hsh contains no entries, so this function can only
1926 succeed if there is no () after the word. Returns -1 on error,
1927 BFD_RELOC_UNUSED if there wasn't any suffix. */
1928 static int
1929 parse_reloc (char **str)
1930 {
1931 struct reloc_entry *r;
1932 char *p, *q;
1933
1934 if (**str != '(')
1935 return BFD_RELOC_UNUSED;
1936
1937 p = *str + 1;
1938 q = p;
1939
1940 while (*q && *q != ')' && *q != ',')
1941 q++;
1942 if (*q != ')')
1943 return -1;
1944
1945 if ((r = hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
1946 return -1;
1947
1948 *str = q + 1;
1949 return r->reloc;
1950 }
1951
1952 /* Directives: register aliases. */
1953
1954 static struct reg_entry *
1955 insert_reg_alias (char *str, int number, int type)
1956 {
1957 struct reg_entry *new;
1958 const char *name;
1959
1960 if ((new = hash_find (arm_reg_hsh, str)) != 0)
1961 {
1962 if (new->builtin)
1963 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
1964
1965 /* Only warn about a redefinition if it's not defined as the
1966 same register. */
1967 else if (new->number != number || new->type != type)
1968 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1969
1970 return 0;
1971 }
1972
1973 name = xstrdup (str);
1974 new = xmalloc (sizeof (struct reg_entry));
1975
1976 new->name = name;
1977 new->number = number;
1978 new->type = type;
1979 new->builtin = FALSE;
1980 new->neon = NULL;
1981
1982 if (hash_insert (arm_reg_hsh, name, (PTR) new))
1983 abort ();
1984
1985 return new;
1986 }
1987
1988 static void
1989 insert_neon_reg_alias (char *str, int number, int type,
1990 struct neon_typed_alias *atype)
1991 {
1992 struct reg_entry *reg = insert_reg_alias (str, number, type);
1993
1994 if (!reg)
1995 {
1996 first_error (_("attempt to redefine typed alias"));
1997 return;
1998 }
1999
2000 if (atype)
2001 {
2002 reg->neon = xmalloc (sizeof (struct neon_typed_alias));
2003 *reg->neon = *atype;
2004 }
2005 }
2006
2007 /* Look for the .req directive. This is of the form:
2008
2009 new_register_name .req existing_register_name
2010
2011 If we find one, or if it looks sufficiently like one that we want to
2012 handle any error here, return non-zero. Otherwise return zero. */
2013
2014 static int
2015 create_register_alias (char * newname, char *p)
2016 {
2017 struct reg_entry *old;
2018 char *oldname, *nbuf;
2019 size_t nlen;
2020
2021 /* The input scrubber ensures that whitespace after the mnemonic is
2022 collapsed to single spaces. */
2023 oldname = p;
2024 if (strncmp (oldname, " .req ", 6) != 0)
2025 return 0;
2026
2027 oldname += 6;
2028 if (*oldname == '\0')
2029 return 0;
2030
2031 old = hash_find (arm_reg_hsh, oldname);
2032 if (!old)
2033 {
2034 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2035 return 1;
2036 }
2037
2038 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2039 the desired alias name, and p points to its end. If not, then
2040 the desired alias name is in the global original_case_string. */
2041 #ifdef TC_CASE_SENSITIVE
2042 nlen = p - newname;
2043 #else
2044 newname = original_case_string;
2045 nlen = strlen (newname);
2046 #endif
2047
2048 nbuf = alloca (nlen + 1);
2049 memcpy (nbuf, newname, nlen);
2050 nbuf[nlen] = '\0';
2051
2052 /* Create aliases under the new name as stated; an all-lowercase
2053 version of the new name; and an all-uppercase version of the new
2054 name. */
2055 insert_reg_alias (nbuf, old->number, old->type);
2056
2057 for (p = nbuf; *p; p++)
2058 *p = TOUPPER (*p);
2059
2060 if (strncmp (nbuf, newname, nlen))
2061 insert_reg_alias (nbuf, old->number, old->type);
2062
2063 for (p = nbuf; *p; p++)
2064 *p = TOLOWER (*p);
2065
2066 if (strncmp (nbuf, newname, nlen))
2067 insert_reg_alias (nbuf, old->number, old->type);
2068
2069 return 1;
2070 }
2071
2072 /* Create a Neon typed/indexed register alias using directives, e.g.:
2073 X .dn d5.s32[1]
2074 Y .qn 6.s16
2075 Z .dn d7
2076 T .dn Z[0]
2077 These typed registers can be used instead of the types specified after the
2078 Neon mnemonic, so long as all operands given have types. Types can also be
2079 specified directly, e.g.:
2080 vadd d0.s32, d1.s32, d2.s32
2081 */
2082
2083 static int
2084 create_neon_reg_alias (char *newname, char *p)
2085 {
2086 enum arm_reg_type basetype;
2087 struct reg_entry *basereg;
2088 struct reg_entry mybasereg;
2089 struct neon_type ntype;
2090 struct neon_typed_alias typeinfo;
2091 char *namebuf, *nameend;
2092 int namelen;
2093
2094 typeinfo.defined = 0;
2095 typeinfo.eltype.type = NT_invtype;
2096 typeinfo.eltype.size = -1;
2097 typeinfo.index = -1;
2098
2099 nameend = p;
2100
2101 if (strncmp (p, " .dn ", 5) == 0)
2102 basetype = REG_TYPE_VFD;
2103 else if (strncmp (p, " .qn ", 5) == 0)
2104 basetype = REG_TYPE_NQ;
2105 else
2106 return 0;
2107
2108 p += 5;
2109
2110 if (*p == '\0')
2111 return 0;
2112
2113 basereg = arm_reg_parse_multi (&p);
2114
2115 if (basereg && basereg->type != basetype)
2116 {
2117 as_bad (_("bad type for register"));
2118 return 0;
2119 }
2120
2121 if (basereg == NULL)
2122 {
2123 expressionS exp;
2124 /* Try parsing as an integer. */
2125 my_get_expression (&exp, &p, GE_NO_PREFIX);
2126 if (exp.X_op != O_constant)
2127 {
2128 as_bad (_("expression must be constant"));
2129 return 0;
2130 }
2131 basereg = &mybasereg;
2132 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2133 : exp.X_add_number;
2134 basereg->neon = 0;
2135 }
2136
2137 if (basereg->neon)
2138 typeinfo = *basereg->neon;
2139
2140 if (parse_neon_type (&ntype, &p) == SUCCESS)
2141 {
2142 /* We got a type. */
2143 if (typeinfo.defined & NTA_HASTYPE)
2144 {
2145 as_bad (_("can't redefine the type of a register alias"));
2146 return 0;
2147 }
2148
2149 typeinfo.defined |= NTA_HASTYPE;
2150 if (ntype.elems != 1)
2151 {
2152 as_bad (_("you must specify a single type only"));
2153 return 0;
2154 }
2155 typeinfo.eltype = ntype.el[0];
2156 }
2157
2158 if (skip_past_char (&p, '[') == SUCCESS)
2159 {
2160 expressionS exp;
2161 /* We got a scalar index. */
2162
2163 if (typeinfo.defined & NTA_HASINDEX)
2164 {
2165 as_bad (_("can't redefine the index of a scalar alias"));
2166 return 0;
2167 }
2168
2169 my_get_expression (&exp, &p, GE_NO_PREFIX);
2170
2171 if (exp.X_op != O_constant)
2172 {
2173 as_bad (_("scalar index must be constant"));
2174 return 0;
2175 }
2176
2177 typeinfo.defined |= NTA_HASINDEX;
2178 typeinfo.index = exp.X_add_number;
2179
2180 if (skip_past_char (&p, ']') == FAIL)
2181 {
2182 as_bad (_("expecting ]"));
2183 return 0;
2184 }
2185 }
2186
2187 namelen = nameend - newname;
2188 namebuf = alloca (namelen + 1);
2189 strncpy (namebuf, newname, namelen);
2190 namebuf[namelen] = '\0';
2191
2192 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2193 typeinfo.defined != 0 ? &typeinfo : NULL);
2194
2195 /* Insert name in all uppercase. */
2196 for (p = namebuf; *p; p++)
2197 *p = TOUPPER (*p);
2198
2199 if (strncmp (namebuf, newname, namelen))
2200 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2201 typeinfo.defined != 0 ? &typeinfo : NULL);
2202
2203 /* Insert name in all lowercase. */
2204 for (p = namebuf; *p; p++)
2205 *p = TOLOWER (*p);
2206
2207 if (strncmp (namebuf, newname, namelen))
2208 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2209 typeinfo.defined != 0 ? &typeinfo : NULL);
2210
2211 return 1;
2212 }
2213
2214 /* Should never be called, as .req goes between the alias and the
2215 register name, not at the beginning of the line. */
2216 static void
2217 s_req (int a ATTRIBUTE_UNUSED)
2218 {
2219 as_bad (_("invalid syntax for .req directive"));
2220 }
2221
2222 static void
2223 s_dn (int a ATTRIBUTE_UNUSED)
2224 {
2225 as_bad (_("invalid syntax for .dn directive"));
2226 }
2227
2228 static void
2229 s_qn (int a ATTRIBUTE_UNUSED)
2230 {
2231 as_bad (_("invalid syntax for .qn directive"));
2232 }
2233
2234 /* The .unreq directive deletes an alias which was previously defined
2235 by .req. For example:
2236
2237 my_alias .req r11
2238 .unreq my_alias */
2239
2240 static void
2241 s_unreq (int a ATTRIBUTE_UNUSED)
2242 {
2243 char * name;
2244 char saved_char;
2245
2246 name = input_line_pointer;
2247
2248 while (*input_line_pointer != 0
2249 && *input_line_pointer != ' '
2250 && *input_line_pointer != '\n')
2251 ++input_line_pointer;
2252
2253 saved_char = *input_line_pointer;
2254 *input_line_pointer = 0;
2255
2256 if (!*name)
2257 as_bad (_("invalid syntax for .unreq directive"));
2258 else
2259 {
2260 struct reg_entry *reg = hash_find (arm_reg_hsh, name);
2261
2262 if (!reg)
2263 as_bad (_("unknown register alias '%s'"), name);
2264 else if (reg->builtin)
2265 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2266 name);
2267 else
2268 {
2269 hash_delete (arm_reg_hsh, name);
2270 free ((char *) reg->name);
2271 if (reg->neon)
2272 free (reg->neon);
2273 free (reg);
2274 }
2275 }
2276
2277 *input_line_pointer = saved_char;
2278 demand_empty_rest_of_line ();
2279 }
2280
2281 /* Directives: Instruction set selection. */
2282
2283 #ifdef OBJ_ELF
2284 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2285 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2286 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2287 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2288
2289 static enum mstate mapstate = MAP_UNDEFINED;
2290
2291 void
2292 mapping_state (enum mstate state)
2293 {
2294 symbolS * symbolP;
2295 const char * symname;
2296 int type;
2297
2298 if (mapstate == state)
2299 /* The mapping symbol has already been emitted.
2300 There is nothing else to do. */
2301 return;
2302
2303 mapstate = state;
2304
2305 switch (state)
2306 {
2307 case MAP_DATA:
2308 symname = "$d";
2309 type = BSF_NO_FLAGS;
2310 break;
2311 case MAP_ARM:
2312 symname = "$a";
2313 type = BSF_NO_FLAGS;
2314 break;
2315 case MAP_THUMB:
2316 symname = "$t";
2317 type = BSF_NO_FLAGS;
2318 break;
2319 case MAP_UNDEFINED:
2320 return;
2321 default:
2322 abort ();
2323 }
2324
2325 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2326
2327 symbolP = symbol_new (symname, now_seg, (valueT) frag_now_fix (), frag_now);
2328 symbol_table_insert (symbolP);
2329 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2330
2331 switch (state)
2332 {
2333 case MAP_ARM:
2334 THUMB_SET_FUNC (symbolP, 0);
2335 ARM_SET_THUMB (symbolP, 0);
2336 ARM_SET_INTERWORK (symbolP, support_interwork);
2337 break;
2338
2339 case MAP_THUMB:
2340 THUMB_SET_FUNC (symbolP, 1);
2341 ARM_SET_THUMB (symbolP, 1);
2342 ARM_SET_INTERWORK (symbolP, support_interwork);
2343 break;
2344
2345 case MAP_DATA:
2346 default:
2347 return;
2348 }
2349 }
2350 #else
2351 #define mapping_state(x) /* nothing */
2352 #endif
2353
2354 /* Find the real, Thumb encoded start of a Thumb function. */
2355
2356 static symbolS *
2357 find_real_start (symbolS * symbolP)
2358 {
2359 char * real_start;
2360 const char * name = S_GET_NAME (symbolP);
2361 symbolS * new_target;
2362
2363 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2364 #define STUB_NAME ".real_start_of"
2365
2366 if (name == NULL)
2367 abort ();
2368
2369 /* The compiler may generate BL instructions to local labels because
2370 it needs to perform a branch to a far away location. These labels
2371 do not have a corresponding ".real_start_of" label. We check
2372 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2373 the ".real_start_of" convention for nonlocal branches. */
2374 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2375 return symbolP;
2376
2377 real_start = ACONCAT ((STUB_NAME, name, NULL));
2378 new_target = symbol_find (real_start);
2379
2380 if (new_target == NULL)
2381 {
2382 as_warn ("Failed to find real start of function: %s\n", name);
2383 new_target = symbolP;
2384 }
2385
2386 return new_target;
2387 }
2388
2389 static void
2390 opcode_select (int width)
2391 {
2392 switch (width)
2393 {
2394 case 16:
2395 if (! thumb_mode)
2396 {
2397 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2398 as_bad (_("selected processor does not support THUMB opcodes"));
2399
2400 thumb_mode = 1;
2401 /* No need to force the alignment, since we will have been
2402 coming from ARM mode, which is word-aligned. */
2403 record_alignment (now_seg, 1);
2404 }
2405 mapping_state (MAP_THUMB);
2406 break;
2407
2408 case 32:
2409 if (thumb_mode)
2410 {
2411 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2412 as_bad (_("selected processor does not support ARM opcodes"));
2413
2414 thumb_mode = 0;
2415
2416 if (!need_pass_2)
2417 frag_align (2, 0, 0);
2418
2419 record_alignment (now_seg, 1);
2420 }
2421 mapping_state (MAP_ARM);
2422 break;
2423
2424 default:
2425 as_bad (_("invalid instruction size selected (%d)"), width);
2426 }
2427 }
2428
2429 static void
2430 s_arm (int ignore ATTRIBUTE_UNUSED)
2431 {
2432 opcode_select (32);
2433 demand_empty_rest_of_line ();
2434 }
2435
2436 static void
2437 s_thumb (int ignore ATTRIBUTE_UNUSED)
2438 {
2439 opcode_select (16);
2440 demand_empty_rest_of_line ();
2441 }
2442
2443 static void
2444 s_code (int unused ATTRIBUTE_UNUSED)
2445 {
2446 int temp;
2447
2448 temp = get_absolute_expression ();
2449 switch (temp)
2450 {
2451 case 16:
2452 case 32:
2453 opcode_select (temp);
2454 break;
2455
2456 default:
2457 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2458 }
2459 }
2460
2461 static void
2462 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2463 {
2464 /* If we are not already in thumb mode go into it, EVEN if
2465 the target processor does not support thumb instructions.
2466 This is used by gcc/config/arm/lib1funcs.asm for example
2467 to compile interworking support functions even if the
2468 target processor should not support interworking. */
2469 if (! thumb_mode)
2470 {
2471 thumb_mode = 2;
2472 record_alignment (now_seg, 1);
2473 }
2474
2475 demand_empty_rest_of_line ();
2476 }
2477
2478 static void
2479 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2480 {
2481 s_thumb (0);
2482
2483 /* The following label is the name/address of the start of a Thumb function.
2484 We need to know this for the interworking support. */
2485 label_is_thumb_function_name = TRUE;
2486 }
2487
2488 /* Perform a .set directive, but also mark the alias as
2489 being a thumb function. */
2490
2491 static void
2492 s_thumb_set (int equiv)
2493 {
2494 /* XXX the following is a duplicate of the code for s_set() in read.c
2495 We cannot just call that code as we need to get at the symbol that
2496 is created. */
2497 char * name;
2498 char delim;
2499 char * end_name;
2500 symbolS * symbolP;
2501
2502 /* Especial apologies for the random logic:
2503 This just grew, and could be parsed much more simply!
2504 Dean - in haste. */
2505 name = input_line_pointer;
2506 delim = get_symbol_end ();
2507 end_name = input_line_pointer;
2508 *end_name = delim;
2509
2510 if (*input_line_pointer != ',')
2511 {
2512 *end_name = 0;
2513 as_bad (_("expected comma after name \"%s\""), name);
2514 *end_name = delim;
2515 ignore_rest_of_line ();
2516 return;
2517 }
2518
2519 input_line_pointer++;
2520 *end_name = 0;
2521
2522 if (name[0] == '.' && name[1] == '\0')
2523 {
2524 /* XXX - this should not happen to .thumb_set. */
2525 abort ();
2526 }
2527
2528 if ((symbolP = symbol_find (name)) == NULL
2529 && (symbolP = md_undefined_symbol (name)) == NULL)
2530 {
2531 #ifndef NO_LISTING
2532 /* When doing symbol listings, play games with dummy fragments living
2533 outside the normal fragment chain to record the file and line info
2534 for this symbol. */
2535 if (listing & LISTING_SYMBOLS)
2536 {
2537 extern struct list_info_struct * listing_tail;
2538 fragS * dummy_frag = xmalloc (sizeof (fragS));
2539
2540 memset (dummy_frag, 0, sizeof (fragS));
2541 dummy_frag->fr_type = rs_fill;
2542 dummy_frag->line = listing_tail;
2543 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2544 dummy_frag->fr_symbol = symbolP;
2545 }
2546 else
2547 #endif
2548 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2549
2550 #ifdef OBJ_COFF
2551 /* "set" symbols are local unless otherwise specified. */
2552 SF_SET_LOCAL (symbolP);
2553 #endif /* OBJ_COFF */
2554 } /* Make a new symbol. */
2555
2556 symbol_table_insert (symbolP);
2557
2558 * end_name = delim;
2559
2560 if (equiv
2561 && S_IS_DEFINED (symbolP)
2562 && S_GET_SEGMENT (symbolP) != reg_section)
2563 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2564
2565 pseudo_set (symbolP);
2566
2567 demand_empty_rest_of_line ();
2568
2569 /* XXX Now we come to the Thumb specific bit of code. */
2570
2571 THUMB_SET_FUNC (symbolP, 1);
2572 ARM_SET_THUMB (symbolP, 1);
2573 #if defined OBJ_ELF || defined OBJ_COFF
2574 ARM_SET_INTERWORK (symbolP, support_interwork);
2575 #endif
2576 }
2577
2578 /* Directives: Mode selection. */
2579
2580 /* .syntax [unified|divided] - choose the new unified syntax
2581 (same for Arm and Thumb encoding, modulo slight differences in what
2582 can be represented) or the old divergent syntax for each mode. */
2583 static void
2584 s_syntax (int unused ATTRIBUTE_UNUSED)
2585 {
2586 char *name, delim;
2587
2588 name = input_line_pointer;
2589 delim = get_symbol_end ();
2590
2591 if (!strcasecmp (name, "unified"))
2592 unified_syntax = TRUE;
2593 else if (!strcasecmp (name, "divided"))
2594 unified_syntax = FALSE;
2595 else
2596 {
2597 as_bad (_("unrecognized syntax mode \"%s\""), name);
2598 return;
2599 }
2600 *input_line_pointer = delim;
2601 demand_empty_rest_of_line ();
2602 }
2603
2604 /* Directives: sectioning and alignment. */
2605
2606 /* Same as s_align_ptwo but align 0 => align 2. */
2607
2608 static void
2609 s_align (int unused ATTRIBUTE_UNUSED)
2610 {
2611 int temp;
2612 long temp_fill;
2613 long max_alignment = 15;
2614
2615 temp = get_absolute_expression ();
2616 if (temp > max_alignment)
2617 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2618 else if (temp < 0)
2619 {
2620 as_bad (_("alignment negative. 0 assumed."));
2621 temp = 0;
2622 }
2623
2624 if (*input_line_pointer == ',')
2625 {
2626 input_line_pointer++;
2627 temp_fill = get_absolute_expression ();
2628 }
2629 else
2630 temp_fill = 0;
2631
2632 if (!temp)
2633 temp = 2;
2634
2635 /* Only make a frag if we HAVE to. */
2636 if (temp && !need_pass_2)
2637 frag_align (temp, (int) temp_fill, 0);
2638 demand_empty_rest_of_line ();
2639
2640 record_alignment (now_seg, temp);
2641 }
2642
2643 static void
2644 s_bss (int ignore ATTRIBUTE_UNUSED)
2645 {
2646 /* We don't support putting frags in the BSS segment, we fake it by
2647 marking in_bss, then looking at s_skip for clues. */
2648 subseg_set (bss_section, 0);
2649 demand_empty_rest_of_line ();
2650 mapping_state (MAP_DATA);
2651 }
2652
2653 static void
2654 s_even (int ignore ATTRIBUTE_UNUSED)
2655 {
2656 /* Never make frag if expect extra pass. */
2657 if (!need_pass_2)
2658 frag_align (1, 0, 0);
2659
2660 record_alignment (now_seg, 1);
2661
2662 demand_empty_rest_of_line ();
2663 }
2664
2665 /* Directives: Literal pools. */
2666
2667 static literal_pool *
2668 find_literal_pool (void)
2669 {
2670 literal_pool * pool;
2671
2672 for (pool = list_of_pools; pool != NULL; pool = pool->next)
2673 {
2674 if (pool->section == now_seg
2675 && pool->sub_section == now_subseg)
2676 break;
2677 }
2678
2679 return pool;
2680 }
2681
2682 static literal_pool *
2683 find_or_make_literal_pool (void)
2684 {
2685 /* Next literal pool ID number. */
2686 static unsigned int latest_pool_num = 1;
2687 literal_pool * pool;
2688
2689 pool = find_literal_pool ();
2690
2691 if (pool == NULL)
2692 {
2693 /* Create a new pool. */
2694 pool = xmalloc (sizeof (* pool));
2695 if (! pool)
2696 return NULL;
2697
2698 pool->next_free_entry = 0;
2699 pool->section = now_seg;
2700 pool->sub_section = now_subseg;
2701 pool->next = list_of_pools;
2702 pool->symbol = NULL;
2703
2704 /* Add it to the list. */
2705 list_of_pools = pool;
2706 }
2707
2708 /* New pools, and emptied pools, will have a NULL symbol. */
2709 if (pool->symbol == NULL)
2710 {
2711 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
2712 (valueT) 0, &zero_address_frag);
2713 pool->id = latest_pool_num ++;
2714 }
2715
2716 /* Done. */
2717 return pool;
2718 }
2719
2720 /* Add the literal in the global 'inst'
2721 structure to the relevent literal pool. */
2722
2723 static int
2724 add_to_lit_pool (void)
2725 {
2726 literal_pool * pool;
2727 unsigned int entry;
2728
2729 pool = find_or_make_literal_pool ();
2730
2731 /* Check if this literal value is already in the pool. */
2732 for (entry = 0; entry < pool->next_free_entry; entry ++)
2733 {
2734 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2735 && (inst.reloc.exp.X_op == O_constant)
2736 && (pool->literals[entry].X_add_number
2737 == inst.reloc.exp.X_add_number)
2738 && (pool->literals[entry].X_unsigned
2739 == inst.reloc.exp.X_unsigned))
2740 break;
2741
2742 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2743 && (inst.reloc.exp.X_op == O_symbol)
2744 && (pool->literals[entry].X_add_number
2745 == inst.reloc.exp.X_add_number)
2746 && (pool->literals[entry].X_add_symbol
2747 == inst.reloc.exp.X_add_symbol)
2748 && (pool->literals[entry].X_op_symbol
2749 == inst.reloc.exp.X_op_symbol))
2750 break;
2751 }
2752
2753 /* Do we need to create a new entry? */
2754 if (entry == pool->next_free_entry)
2755 {
2756 if (entry >= MAX_LITERAL_POOL_SIZE)
2757 {
2758 inst.error = _("literal pool overflow");
2759 return FAIL;
2760 }
2761
2762 pool->literals[entry] = inst.reloc.exp;
2763 pool->next_free_entry += 1;
2764 }
2765
2766 inst.reloc.exp.X_op = O_symbol;
2767 inst.reloc.exp.X_add_number = ((int) entry) * 4;
2768 inst.reloc.exp.X_add_symbol = pool->symbol;
2769
2770 return SUCCESS;
2771 }
2772
2773 /* Can't use symbol_new here, so have to create a symbol and then at
2774 a later date assign it a value. Thats what these functions do. */
2775
2776 static void
2777 symbol_locate (symbolS * symbolP,
2778 const char * name, /* It is copied, the caller can modify. */
2779 segT segment, /* Segment identifier (SEG_<something>). */
2780 valueT valu, /* Symbol value. */
2781 fragS * frag) /* Associated fragment. */
2782 {
2783 unsigned int name_length;
2784 char * preserved_copy_of_name;
2785
2786 name_length = strlen (name) + 1; /* +1 for \0. */
2787 obstack_grow (&notes, name, name_length);
2788 preserved_copy_of_name = obstack_finish (&notes);
2789
2790 #ifdef tc_canonicalize_symbol_name
2791 preserved_copy_of_name =
2792 tc_canonicalize_symbol_name (preserved_copy_of_name);
2793 #endif
2794
2795 S_SET_NAME (symbolP, preserved_copy_of_name);
2796
2797 S_SET_SEGMENT (symbolP, segment);
2798 S_SET_VALUE (symbolP, valu);
2799 symbol_clear_list_pointers (symbolP);
2800
2801 symbol_set_frag (symbolP, frag);
2802
2803 /* Link to end of symbol chain. */
2804 {
2805 extern int symbol_table_frozen;
2806
2807 if (symbol_table_frozen)
2808 abort ();
2809 }
2810
2811 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
2812
2813 obj_symbol_new_hook (symbolP);
2814
2815 #ifdef tc_symbol_new_hook
2816 tc_symbol_new_hook (symbolP);
2817 #endif
2818
2819 #ifdef DEBUG_SYMS
2820 verify_symbol_chain (symbol_rootP, symbol_lastP);
2821 #endif /* DEBUG_SYMS */
2822 }
2823
2824
2825 static void
2826 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2827 {
2828 unsigned int entry;
2829 literal_pool * pool;
2830 char sym_name[20];
2831
2832 pool = find_literal_pool ();
2833 if (pool == NULL
2834 || pool->symbol == NULL
2835 || pool->next_free_entry == 0)
2836 return;
2837
2838 mapping_state (MAP_DATA);
2839
2840 /* Align pool as you have word accesses.
2841 Only make a frag if we have to. */
2842 if (!need_pass_2)
2843 frag_align (2, 0, 0);
2844
2845 record_alignment (now_seg, 2);
2846
2847 sprintf (sym_name, "$$lit_\002%x", pool->id);
2848
2849 symbol_locate (pool->symbol, sym_name, now_seg,
2850 (valueT) frag_now_fix (), frag_now);
2851 symbol_table_insert (pool->symbol);
2852
2853 ARM_SET_THUMB (pool->symbol, thumb_mode);
2854
2855 #if defined OBJ_COFF || defined OBJ_ELF
2856 ARM_SET_INTERWORK (pool->symbol, support_interwork);
2857 #endif
2858
2859 for (entry = 0; entry < pool->next_free_entry; entry ++)
2860 /* First output the expression in the instruction to the pool. */
2861 emit_expr (&(pool->literals[entry]), 4); /* .word */
2862
2863 /* Mark the pool as empty. */
2864 pool->next_free_entry = 0;
2865 pool->symbol = NULL;
2866 }
2867
2868 #ifdef OBJ_ELF
2869 /* Forward declarations for functions below, in the MD interface
2870 section. */
2871 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
2872 static valueT create_unwind_entry (int);
2873 static void start_unwind_section (const segT, int);
2874 static void add_unwind_opcode (valueT, int);
2875 static void flush_pending_unwind (void);
2876
2877 /* Directives: Data. */
2878
2879 static void
2880 s_arm_elf_cons (int nbytes)
2881 {
2882 expressionS exp;
2883
2884 #ifdef md_flush_pending_output
2885 md_flush_pending_output ();
2886 #endif
2887
2888 if (is_it_end_of_statement ())
2889 {
2890 demand_empty_rest_of_line ();
2891 return;
2892 }
2893
2894 #ifdef md_cons_align
2895 md_cons_align (nbytes);
2896 #endif
2897
2898 mapping_state (MAP_DATA);
2899 do
2900 {
2901 int reloc;
2902 char *base = input_line_pointer;
2903
2904 expression (& exp);
2905
2906 if (exp.X_op != O_symbol)
2907 emit_expr (&exp, (unsigned int) nbytes);
2908 else
2909 {
2910 char *before_reloc = input_line_pointer;
2911 reloc = parse_reloc (&input_line_pointer);
2912 if (reloc == -1)
2913 {
2914 as_bad (_("unrecognized relocation suffix"));
2915 ignore_rest_of_line ();
2916 return;
2917 }
2918 else if (reloc == BFD_RELOC_UNUSED)
2919 emit_expr (&exp, (unsigned int) nbytes);
2920 else
2921 {
2922 reloc_howto_type *howto = bfd_reloc_type_lookup (stdoutput, reloc);
2923 int size = bfd_get_reloc_size (howto);
2924
2925 if (reloc == BFD_RELOC_ARM_PLT32)
2926 {
2927 as_bad (_("(plt) is only valid on branch targets"));
2928 reloc = BFD_RELOC_UNUSED;
2929 size = 0;
2930 }
2931
2932 if (size > nbytes)
2933 as_bad (_("%s relocations do not fit in %d bytes"),
2934 howto->name, nbytes);
2935 else
2936 {
2937 /* We've parsed an expression stopping at O_symbol.
2938 But there may be more expression left now that we
2939 have parsed the relocation marker. Parse it again.
2940 XXX Surely there is a cleaner way to do this. */
2941 char *p = input_line_pointer;
2942 int offset;
2943 char *save_buf = alloca (input_line_pointer - base);
2944 memcpy (save_buf, base, input_line_pointer - base);
2945 memmove (base + (input_line_pointer - before_reloc),
2946 base, before_reloc - base);
2947
2948 input_line_pointer = base + (input_line_pointer-before_reloc);
2949 expression (&exp);
2950 memcpy (base, save_buf, p - base);
2951
2952 offset = nbytes - size;
2953 p = frag_more ((int) nbytes);
2954 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
2955 size, &exp, 0, reloc);
2956 }
2957 }
2958 }
2959 }
2960 while (*input_line_pointer++ == ',');
2961
2962 /* Put terminator back into stream. */
2963 input_line_pointer --;
2964 demand_empty_rest_of_line ();
2965 }
2966
2967
2968 /* Parse a .rel31 directive. */
2969
2970 static void
2971 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
2972 {
2973 expressionS exp;
2974 char *p;
2975 valueT highbit;
2976
2977 highbit = 0;
2978 if (*input_line_pointer == '1')
2979 highbit = 0x80000000;
2980 else if (*input_line_pointer != '0')
2981 as_bad (_("expected 0 or 1"));
2982
2983 input_line_pointer++;
2984 if (*input_line_pointer != ',')
2985 as_bad (_("missing comma"));
2986 input_line_pointer++;
2987
2988 #ifdef md_flush_pending_output
2989 md_flush_pending_output ();
2990 #endif
2991
2992 #ifdef md_cons_align
2993 md_cons_align (4);
2994 #endif
2995
2996 mapping_state (MAP_DATA);
2997
2998 expression (&exp);
2999
3000 p = frag_more (4);
3001 md_number_to_chars (p, highbit, 4);
3002 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3003 BFD_RELOC_ARM_PREL31);
3004
3005 demand_empty_rest_of_line ();
3006 }
3007
3008 /* Directives: AEABI stack-unwind tables. */
3009
3010 /* Parse an unwind_fnstart directive. Simply records the current location. */
3011
3012 static void
3013 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3014 {
3015 demand_empty_rest_of_line ();
3016 /* Mark the start of the function. */
3017 unwind.proc_start = expr_build_dot ();
3018
3019 /* Reset the rest of the unwind info. */
3020 unwind.opcode_count = 0;
3021 unwind.table_entry = NULL;
3022 unwind.personality_routine = NULL;
3023 unwind.personality_index = -1;
3024 unwind.frame_size = 0;
3025 unwind.fp_offset = 0;
3026 unwind.fp_reg = 13;
3027 unwind.fp_used = 0;
3028 unwind.sp_restored = 0;
3029 }
3030
3031
3032 /* Parse a handlerdata directive. Creates the exception handling table entry
3033 for the function. */
3034
3035 static void
3036 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3037 {
3038 demand_empty_rest_of_line ();
3039 if (unwind.table_entry)
3040 as_bad (_("dupicate .handlerdata directive"));
3041
3042 create_unwind_entry (1);
3043 }
3044
3045 /* Parse an unwind_fnend directive. Generates the index table entry. */
3046
3047 static void
3048 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3049 {
3050 long where;
3051 char *ptr;
3052 valueT val;
3053
3054 demand_empty_rest_of_line ();
3055
3056 /* Add eh table entry. */
3057 if (unwind.table_entry == NULL)
3058 val = create_unwind_entry (0);
3059 else
3060 val = 0;
3061
3062 /* Add index table entry. This is two words. */
3063 start_unwind_section (unwind.saved_seg, 1);
3064 frag_align (2, 0, 0);
3065 record_alignment (now_seg, 2);
3066
3067 ptr = frag_more (8);
3068 where = frag_now_fix () - 8;
3069
3070 /* Self relative offset of the function start. */
3071 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3072 BFD_RELOC_ARM_PREL31);
3073
3074 /* Indicate dependency on EHABI-defined personality routines to the
3075 linker, if it hasn't been done already. */
3076 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3077 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3078 {
3079 static const char *const name[] = {
3080 "__aeabi_unwind_cpp_pr0",
3081 "__aeabi_unwind_cpp_pr1",
3082 "__aeabi_unwind_cpp_pr2"
3083 };
3084 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3085 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3086 marked_pr_dependency |= 1 << unwind.personality_index;
3087 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3088 = marked_pr_dependency;
3089 }
3090
3091 if (val)
3092 /* Inline exception table entry. */
3093 md_number_to_chars (ptr + 4, val, 4);
3094 else
3095 /* Self relative offset of the table entry. */
3096 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3097 BFD_RELOC_ARM_PREL31);
3098
3099 /* Restore the original section. */
3100 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3101 }
3102
3103
3104 /* Parse an unwind_cantunwind directive. */
3105
3106 static void
3107 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3108 {
3109 demand_empty_rest_of_line ();
3110 if (unwind.personality_routine || unwind.personality_index != -1)
3111 as_bad (_("personality routine specified for cantunwind frame"));
3112
3113 unwind.personality_index = -2;
3114 }
3115
3116
3117 /* Parse a personalityindex directive. */
3118
3119 static void
3120 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3121 {
3122 expressionS exp;
3123
3124 if (unwind.personality_routine || unwind.personality_index != -1)
3125 as_bad (_("duplicate .personalityindex directive"));
3126
3127 expression (&exp);
3128
3129 if (exp.X_op != O_constant
3130 || exp.X_add_number < 0 || exp.X_add_number > 15)
3131 {
3132 as_bad (_("bad personality routine number"));
3133 ignore_rest_of_line ();
3134 return;
3135 }
3136
3137 unwind.personality_index = exp.X_add_number;
3138
3139 demand_empty_rest_of_line ();
3140 }
3141
3142
3143 /* Parse a personality directive. */
3144
3145 static void
3146 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3147 {
3148 char *name, *p, c;
3149
3150 if (unwind.personality_routine || unwind.personality_index != -1)
3151 as_bad (_("duplicate .personality directive"));
3152
3153 name = input_line_pointer;
3154 c = get_symbol_end ();
3155 p = input_line_pointer;
3156 unwind.personality_routine = symbol_find_or_make (name);
3157 *p = c;
3158 demand_empty_rest_of_line ();
3159 }
3160
3161
3162 /* Parse a directive saving core registers. */
3163
3164 static void
3165 s_arm_unwind_save_core (void)
3166 {
3167 valueT op;
3168 long range;
3169 int n;
3170
3171 range = parse_reg_list (&input_line_pointer);
3172 if (range == FAIL)
3173 {
3174 as_bad (_("expected register list"));
3175 ignore_rest_of_line ();
3176 return;
3177 }
3178
3179 demand_empty_rest_of_line ();
3180
3181 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3182 into .unwind_save {..., sp...}. We aren't bothered about the value of
3183 ip because it is clobbered by calls. */
3184 if (unwind.sp_restored && unwind.fp_reg == 12
3185 && (range & 0x3000) == 0x1000)
3186 {
3187 unwind.opcode_count--;
3188 unwind.sp_restored = 0;
3189 range = (range | 0x2000) & ~0x1000;
3190 unwind.pending_offset = 0;
3191 }
3192
3193 /* Pop r4-r15. */
3194 if (range & 0xfff0)
3195 {
3196 /* See if we can use the short opcodes. These pop a block of up to 8
3197 registers starting with r4, plus maybe r14. */
3198 for (n = 0; n < 8; n++)
3199 {
3200 /* Break at the first non-saved register. */
3201 if ((range & (1 << (n + 4))) == 0)
3202 break;
3203 }
3204 /* See if there are any other bits set. */
3205 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3206 {
3207 /* Use the long form. */
3208 op = 0x8000 | ((range >> 4) & 0xfff);
3209 add_unwind_opcode (op, 2);
3210 }
3211 else
3212 {
3213 /* Use the short form. */
3214 if (range & 0x4000)
3215 op = 0xa8; /* Pop r14. */
3216 else
3217 op = 0xa0; /* Do not pop r14. */
3218 op |= (n - 1);
3219 add_unwind_opcode (op, 1);
3220 }
3221 }
3222
3223 /* Pop r0-r3. */
3224 if (range & 0xf)
3225 {
3226 op = 0xb100 | (range & 0xf);
3227 add_unwind_opcode (op, 2);
3228 }
3229
3230 /* Record the number of bytes pushed. */
3231 for (n = 0; n < 16; n++)
3232 {
3233 if (range & (1 << n))
3234 unwind.frame_size += 4;
3235 }
3236 }
3237
3238
3239 /* Parse a directive saving FPA registers. */
3240
3241 static void
3242 s_arm_unwind_save_fpa (int reg)
3243 {
3244 expressionS exp;
3245 int num_regs;
3246 valueT op;
3247
3248 /* Get Number of registers to transfer. */
3249 if (skip_past_comma (&input_line_pointer) != FAIL)
3250 expression (&exp);
3251 else
3252 exp.X_op = O_illegal;
3253
3254 if (exp.X_op != O_constant)
3255 {
3256 as_bad (_("expected , <constant>"));
3257 ignore_rest_of_line ();
3258 return;
3259 }
3260
3261 num_regs = exp.X_add_number;
3262
3263 if (num_regs < 1 || num_regs > 4)
3264 {
3265 as_bad (_("number of registers must be in the range [1:4]"));
3266 ignore_rest_of_line ();
3267 return;
3268 }
3269
3270 demand_empty_rest_of_line ();
3271
3272 if (reg == 4)
3273 {
3274 /* Short form. */
3275 op = 0xb4 | (num_regs - 1);
3276 add_unwind_opcode (op, 1);
3277 }
3278 else
3279 {
3280 /* Long form. */
3281 op = 0xc800 | (reg << 4) | (num_regs - 1);
3282 add_unwind_opcode (op, 2);
3283 }
3284 unwind.frame_size += num_regs * 12;
3285 }
3286
3287
3288 /* Parse a directive saving VFP registers for ARMv6 and above. */
3289
3290 static void
3291 s_arm_unwind_save_vfp_armv6 (void)
3292 {
3293 int count;
3294 unsigned int start;
3295 valueT op;
3296 int num_vfpv3_regs = 0;
3297 int num_regs_below_16;
3298
3299 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
3300 if (count == FAIL)
3301 {
3302 as_bad (_("expected register list"));
3303 ignore_rest_of_line ();
3304 return;
3305 }
3306
3307 demand_empty_rest_of_line ();
3308
3309 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3310 than FSTMX/FLDMX-style ones). */
3311
3312 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3313 if (start >= 16)
3314 num_vfpv3_regs = count;
3315 else if (start + count > 16)
3316 num_vfpv3_regs = start + count - 16;
3317
3318 if (num_vfpv3_regs > 0)
3319 {
3320 int start_offset = start > 16 ? start - 16 : 0;
3321 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
3322 add_unwind_opcode (op, 2);
3323 }
3324
3325 /* Generate opcode for registers numbered in the range 0 .. 15. */
3326 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
3327 assert (num_regs_below_16 + num_vfpv3_regs == count);
3328 if (num_regs_below_16 > 0)
3329 {
3330 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
3331 add_unwind_opcode (op, 2);
3332 }
3333
3334 unwind.frame_size += count * 8;
3335 }
3336
3337
3338 /* Parse a directive saving VFP registers for pre-ARMv6. */
3339
3340 static void
3341 s_arm_unwind_save_vfp (void)
3342 {
3343 int count;
3344 unsigned int reg;
3345 valueT op;
3346
3347 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3348 if (count == FAIL)
3349 {
3350 as_bad (_("expected register list"));
3351 ignore_rest_of_line ();
3352 return;
3353 }
3354
3355 demand_empty_rest_of_line ();
3356
3357 if (reg == 8)
3358 {
3359 /* Short form. */
3360 op = 0xb8 | (count - 1);
3361 add_unwind_opcode (op, 1);
3362 }
3363 else
3364 {
3365 /* Long form. */
3366 op = 0xb300 | (reg << 4) | (count - 1);
3367 add_unwind_opcode (op, 2);
3368 }
3369 unwind.frame_size += count * 8 + 4;
3370 }
3371
3372
3373 /* Parse a directive saving iWMMXt data registers. */
3374
3375 static void
3376 s_arm_unwind_save_mmxwr (void)
3377 {
3378 int reg;
3379 int hi_reg;
3380 int i;
3381 unsigned mask = 0;
3382 valueT op;
3383
3384 if (*input_line_pointer == '{')
3385 input_line_pointer++;
3386
3387 do
3388 {
3389 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3390
3391 if (reg == FAIL)
3392 {
3393 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3394 goto error;
3395 }
3396
3397 if (mask >> reg)
3398 as_tsktsk (_("register list not in ascending order"));
3399 mask |= 1 << reg;
3400
3401 if (*input_line_pointer == '-')
3402 {
3403 input_line_pointer++;
3404 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3405 if (hi_reg == FAIL)
3406 {
3407 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3408 goto error;
3409 }
3410 else if (reg >= hi_reg)
3411 {
3412 as_bad (_("bad register range"));
3413 goto error;
3414 }
3415 for (; reg < hi_reg; reg++)
3416 mask |= 1 << reg;
3417 }
3418 }
3419 while (skip_past_comma (&input_line_pointer) != FAIL);
3420
3421 if (*input_line_pointer == '}')
3422 input_line_pointer++;
3423
3424 demand_empty_rest_of_line ();
3425
3426 /* Generate any deferred opcodes because we're going to be looking at
3427 the list. */
3428 flush_pending_unwind ();
3429
3430 for (i = 0; i < 16; i++)
3431 {
3432 if (mask & (1 << i))
3433 unwind.frame_size += 8;
3434 }
3435
3436 /* Attempt to combine with a previous opcode. We do this because gcc
3437 likes to output separate unwind directives for a single block of
3438 registers. */
3439 if (unwind.opcode_count > 0)
3440 {
3441 i = unwind.opcodes[unwind.opcode_count - 1];
3442 if ((i & 0xf8) == 0xc0)
3443 {
3444 i &= 7;
3445 /* Only merge if the blocks are contiguous. */
3446 if (i < 6)
3447 {
3448 if ((mask & 0xfe00) == (1 << 9))
3449 {
3450 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3451 unwind.opcode_count--;
3452 }
3453 }
3454 else if (i == 6 && unwind.opcode_count >= 2)
3455 {
3456 i = unwind.opcodes[unwind.opcode_count - 2];
3457 reg = i >> 4;
3458 i &= 0xf;
3459
3460 op = 0xffff << (reg - 1);
3461 if (reg > 0
3462 && ((mask & op) == (1u << (reg - 1))))
3463 {
3464 op = (1 << (reg + i + 1)) - 1;
3465 op &= ~((1 << reg) - 1);
3466 mask |= op;
3467 unwind.opcode_count -= 2;
3468 }
3469 }
3470 }
3471 }
3472
3473 hi_reg = 15;
3474 /* We want to generate opcodes in the order the registers have been
3475 saved, ie. descending order. */
3476 for (reg = 15; reg >= -1; reg--)
3477 {
3478 /* Save registers in blocks. */
3479 if (reg < 0
3480 || !(mask & (1 << reg)))
3481 {
3482 /* We found an unsaved reg. Generate opcodes to save the
3483 preceeding block. */
3484 if (reg != hi_reg)
3485 {
3486 if (reg == 9)
3487 {
3488 /* Short form. */
3489 op = 0xc0 | (hi_reg - 10);
3490 add_unwind_opcode (op, 1);
3491 }
3492 else
3493 {
3494 /* Long form. */
3495 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3496 add_unwind_opcode (op, 2);
3497 }
3498 }
3499 hi_reg = reg - 1;
3500 }
3501 }
3502
3503 return;
3504 error:
3505 ignore_rest_of_line ();
3506 }
3507
3508 static void
3509 s_arm_unwind_save_mmxwcg (void)
3510 {
3511 int reg;
3512 int hi_reg;
3513 unsigned mask = 0;
3514 valueT op;
3515
3516 if (*input_line_pointer == '{')
3517 input_line_pointer++;
3518
3519 do
3520 {
3521 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3522
3523 if (reg == FAIL)
3524 {
3525 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3526 goto error;
3527 }
3528
3529 reg -= 8;
3530 if (mask >> reg)
3531 as_tsktsk (_("register list not in ascending order"));
3532 mask |= 1 << reg;
3533
3534 if (*input_line_pointer == '-')
3535 {
3536 input_line_pointer++;
3537 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3538 if (hi_reg == FAIL)
3539 {
3540 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3541 goto error;
3542 }
3543 else if (reg >= hi_reg)
3544 {
3545 as_bad (_("bad register range"));
3546 goto error;
3547 }
3548 for (; reg < hi_reg; reg++)
3549 mask |= 1 << reg;
3550 }
3551 }
3552 while (skip_past_comma (&input_line_pointer) != FAIL);
3553
3554 if (*input_line_pointer == '}')
3555 input_line_pointer++;
3556
3557 demand_empty_rest_of_line ();
3558
3559 /* Generate any deferred opcodes because we're going to be looking at
3560 the list. */
3561 flush_pending_unwind ();
3562
3563 for (reg = 0; reg < 16; reg++)
3564 {
3565 if (mask & (1 << reg))
3566 unwind.frame_size += 4;
3567 }
3568 op = 0xc700 | mask;
3569 add_unwind_opcode (op, 2);
3570 return;
3571 error:
3572 ignore_rest_of_line ();
3573 }
3574
3575
3576 /* Parse an unwind_save directive.
3577 If the argument is non-zero, this is a .vsave directive. */
3578
3579 static void
3580 s_arm_unwind_save (int arch_v6)
3581 {
3582 char *peek;
3583 struct reg_entry *reg;
3584 bfd_boolean had_brace = FALSE;
3585
3586 /* Figure out what sort of save we have. */
3587 peek = input_line_pointer;
3588
3589 if (*peek == '{')
3590 {
3591 had_brace = TRUE;
3592 peek++;
3593 }
3594
3595 reg = arm_reg_parse_multi (&peek);
3596
3597 if (!reg)
3598 {
3599 as_bad (_("register expected"));
3600 ignore_rest_of_line ();
3601 return;
3602 }
3603
3604 switch (reg->type)
3605 {
3606 case REG_TYPE_FN:
3607 if (had_brace)
3608 {
3609 as_bad (_("FPA .unwind_save does not take a register list"));
3610 ignore_rest_of_line ();
3611 return;
3612 }
3613 s_arm_unwind_save_fpa (reg->number);
3614 return;
3615
3616 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
3617 case REG_TYPE_VFD:
3618 if (arch_v6)
3619 s_arm_unwind_save_vfp_armv6 ();
3620 else
3621 s_arm_unwind_save_vfp ();
3622 return;
3623 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
3624 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
3625
3626 default:
3627 as_bad (_(".unwind_save does not support this kind of register"));
3628 ignore_rest_of_line ();
3629 }
3630 }
3631
3632
3633 /* Parse an unwind_movsp directive. */
3634
3635 static void
3636 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
3637 {
3638 int reg;
3639 valueT op;
3640 int offset;
3641
3642 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3643 if (reg == FAIL)
3644 {
3645 as_bad (_(reg_expected_msgs[REG_TYPE_RN]));
3646 ignore_rest_of_line ();
3647 return;
3648 }
3649
3650 /* Optional constant. */
3651 if (skip_past_comma (&input_line_pointer) != FAIL)
3652 {
3653 if (immediate_for_directive (&offset) == FAIL)
3654 return;
3655 }
3656 else
3657 offset = 0;
3658
3659 demand_empty_rest_of_line ();
3660
3661 if (reg == REG_SP || reg == REG_PC)
3662 {
3663 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3664 return;
3665 }
3666
3667 if (unwind.fp_reg != REG_SP)
3668 as_bad (_("unexpected .unwind_movsp directive"));
3669
3670 /* Generate opcode to restore the value. */
3671 op = 0x90 | reg;
3672 add_unwind_opcode (op, 1);
3673
3674 /* Record the information for later. */
3675 unwind.fp_reg = reg;
3676 unwind.fp_offset = unwind.frame_size - offset;
3677 unwind.sp_restored = 1;
3678 }
3679
3680 /* Parse an unwind_pad directive. */
3681
3682 static void
3683 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
3684 {
3685 int offset;
3686
3687 if (immediate_for_directive (&offset) == FAIL)
3688 return;
3689
3690 if (offset & 3)
3691 {
3692 as_bad (_("stack increment must be multiple of 4"));
3693 ignore_rest_of_line ();
3694 return;
3695 }
3696
3697 /* Don't generate any opcodes, just record the details for later. */
3698 unwind.frame_size += offset;
3699 unwind.pending_offset += offset;
3700
3701 demand_empty_rest_of_line ();
3702 }
3703
3704 /* Parse an unwind_setfp directive. */
3705
3706 static void
3707 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
3708 {
3709 int sp_reg;
3710 int fp_reg;
3711 int offset;
3712
3713 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3714 if (skip_past_comma (&input_line_pointer) == FAIL)
3715 sp_reg = FAIL;
3716 else
3717 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3718
3719 if (fp_reg == FAIL || sp_reg == FAIL)
3720 {
3721 as_bad (_("expected <reg>, <reg>"));
3722 ignore_rest_of_line ();
3723 return;
3724 }
3725
3726 /* Optional constant. */
3727 if (skip_past_comma (&input_line_pointer) != FAIL)
3728 {
3729 if (immediate_for_directive (&offset) == FAIL)
3730 return;
3731 }
3732 else
3733 offset = 0;
3734
3735 demand_empty_rest_of_line ();
3736
3737 if (sp_reg != 13 && sp_reg != unwind.fp_reg)
3738 {
3739 as_bad (_("register must be either sp or set by a previous"
3740 "unwind_movsp directive"));
3741 return;
3742 }
3743
3744 /* Don't generate any opcodes, just record the information for later. */
3745 unwind.fp_reg = fp_reg;
3746 unwind.fp_used = 1;
3747 if (sp_reg == 13)
3748 unwind.fp_offset = unwind.frame_size - offset;
3749 else
3750 unwind.fp_offset -= offset;
3751 }
3752
3753 /* Parse an unwind_raw directive. */
3754
3755 static void
3756 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
3757 {
3758 expressionS exp;
3759 /* This is an arbitrary limit. */
3760 unsigned char op[16];
3761 int count;
3762
3763 expression (&exp);
3764 if (exp.X_op == O_constant
3765 && skip_past_comma (&input_line_pointer) != FAIL)
3766 {
3767 unwind.frame_size += exp.X_add_number;
3768 expression (&exp);
3769 }
3770 else
3771 exp.X_op = O_illegal;
3772
3773 if (exp.X_op != O_constant)
3774 {
3775 as_bad (_("expected <offset>, <opcode>"));
3776 ignore_rest_of_line ();
3777 return;
3778 }
3779
3780 count = 0;
3781
3782 /* Parse the opcode. */
3783 for (;;)
3784 {
3785 if (count >= 16)
3786 {
3787 as_bad (_("unwind opcode too long"));
3788 ignore_rest_of_line ();
3789 }
3790 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
3791 {
3792 as_bad (_("invalid unwind opcode"));
3793 ignore_rest_of_line ();
3794 return;
3795 }
3796 op[count++] = exp.X_add_number;
3797
3798 /* Parse the next byte. */
3799 if (skip_past_comma (&input_line_pointer) == FAIL)
3800 break;
3801
3802 expression (&exp);
3803 }
3804
3805 /* Add the opcode bytes in reverse order. */
3806 while (count--)
3807 add_unwind_opcode (op[count], 1);
3808
3809 demand_empty_rest_of_line ();
3810 }
3811
3812
3813 /* Parse a .eabi_attribute directive. */
3814
3815 static void
3816 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
3817 {
3818 expressionS exp;
3819 bfd_boolean is_string;
3820 int tag;
3821 unsigned int i = 0;
3822 char *s = NULL;
3823 char saved_char;
3824
3825 expression (& exp);
3826 if (exp.X_op != O_constant)
3827 goto bad;
3828
3829 tag = exp.X_add_number;
3830 if (tag == 4 || tag == 5 || tag == 32 || (tag > 32 && (tag & 1) != 0))
3831 is_string = 1;
3832 else
3833 is_string = 0;
3834
3835 if (skip_past_comma (&input_line_pointer) == FAIL)
3836 goto bad;
3837 if (tag == 32 || !is_string)
3838 {
3839 expression (& exp);
3840 if (exp.X_op != O_constant)
3841 {
3842 as_bad (_("expected numeric constant"));
3843 ignore_rest_of_line ();
3844 return;
3845 }
3846 i = exp.X_add_number;
3847 }
3848 if (tag == Tag_compatibility
3849 && skip_past_comma (&input_line_pointer) == FAIL)
3850 {
3851 as_bad (_("expected comma"));
3852 ignore_rest_of_line ();
3853 return;
3854 }
3855 if (is_string)
3856 {
3857 skip_whitespace(input_line_pointer);
3858 if (*input_line_pointer != '"')
3859 goto bad_string;
3860 input_line_pointer++;
3861 s = input_line_pointer;
3862 while (*input_line_pointer && *input_line_pointer != '"')
3863 input_line_pointer++;
3864 if (*input_line_pointer != '"')
3865 goto bad_string;
3866 saved_char = *input_line_pointer;
3867 *input_line_pointer = 0;
3868 }
3869 else
3870 {
3871 s = NULL;
3872 saved_char = 0;
3873 }
3874
3875 if (tag == Tag_compatibility)
3876 elf32_arm_add_eabi_attr_compat (stdoutput, i, s);
3877 else if (is_string)
3878 elf32_arm_add_eabi_attr_string (stdoutput, tag, s);
3879 else
3880 elf32_arm_add_eabi_attr_int (stdoutput, tag, i);
3881
3882 if (s)
3883 {
3884 *input_line_pointer = saved_char;
3885 input_line_pointer++;
3886 }
3887 demand_empty_rest_of_line ();
3888 return;
3889 bad_string:
3890 as_bad (_("bad string constant"));
3891 ignore_rest_of_line ();
3892 return;
3893 bad:
3894 as_bad (_("expected <tag> , <value>"));
3895 ignore_rest_of_line ();
3896 }
3897 #endif /* OBJ_ELF */
3898
3899 static void s_arm_arch (int);
3900 static void s_arm_object_arch (int);
3901 static void s_arm_cpu (int);
3902 static void s_arm_fpu (int);
3903
3904 #ifdef TE_PE
3905
3906 static void
3907 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
3908 {
3909 expressionS exp;
3910
3911 do
3912 {
3913 expression (&exp);
3914 if (exp.X_op == O_symbol)
3915 exp.X_op = O_secrel;
3916
3917 emit_expr (&exp, 4);
3918 }
3919 while (*input_line_pointer++ == ',');
3920
3921 input_line_pointer--;
3922 demand_empty_rest_of_line ();
3923 }
3924 #endif /* TE_PE */
3925
3926 /* This table describes all the machine specific pseudo-ops the assembler
3927 has to support. The fields are:
3928 pseudo-op name without dot
3929 function to call to execute this pseudo-op
3930 Integer arg to pass to the function. */
3931
3932 const pseudo_typeS md_pseudo_table[] =
3933 {
3934 /* Never called because '.req' does not start a line. */
3935 { "req", s_req, 0 },
3936 /* Following two are likewise never called. */
3937 { "dn", s_dn, 0 },
3938 { "qn", s_qn, 0 },
3939 { "unreq", s_unreq, 0 },
3940 { "bss", s_bss, 0 },
3941 { "align", s_align, 0 },
3942 { "arm", s_arm, 0 },
3943 { "thumb", s_thumb, 0 },
3944 { "code", s_code, 0 },
3945 { "force_thumb", s_force_thumb, 0 },
3946 { "thumb_func", s_thumb_func, 0 },
3947 { "thumb_set", s_thumb_set, 0 },
3948 { "even", s_even, 0 },
3949 { "ltorg", s_ltorg, 0 },
3950 { "pool", s_ltorg, 0 },
3951 { "syntax", s_syntax, 0 },
3952 { "cpu", s_arm_cpu, 0 },
3953 { "arch", s_arm_arch, 0 },
3954 { "object_arch", s_arm_object_arch, 0 },
3955 { "fpu", s_arm_fpu, 0 },
3956 #ifdef OBJ_ELF
3957 { "word", s_arm_elf_cons, 4 },
3958 { "long", s_arm_elf_cons, 4 },
3959 { "rel31", s_arm_rel31, 0 },
3960 { "fnstart", s_arm_unwind_fnstart, 0 },
3961 { "fnend", s_arm_unwind_fnend, 0 },
3962 { "cantunwind", s_arm_unwind_cantunwind, 0 },
3963 { "personality", s_arm_unwind_personality, 0 },
3964 { "personalityindex", s_arm_unwind_personalityindex, 0 },
3965 { "handlerdata", s_arm_unwind_handlerdata, 0 },
3966 { "save", s_arm_unwind_save, 0 },
3967 { "vsave", s_arm_unwind_save, 1 },
3968 { "movsp", s_arm_unwind_movsp, 0 },
3969 { "pad", s_arm_unwind_pad, 0 },
3970 { "setfp", s_arm_unwind_setfp, 0 },
3971 { "unwind_raw", s_arm_unwind_raw, 0 },
3972 { "eabi_attribute", s_arm_eabi_attribute, 0 },
3973 #else
3974 { "word", cons, 4},
3975
3976 /* These are used for dwarf. */
3977 {"2byte", cons, 2},
3978 {"4byte", cons, 4},
3979 {"8byte", cons, 8},
3980 /* These are used for dwarf2. */
3981 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
3982 { "loc", dwarf2_directive_loc, 0 },
3983 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
3984 #endif
3985 { "extend", float_cons, 'x' },
3986 { "ldouble", float_cons, 'x' },
3987 { "packed", float_cons, 'p' },
3988 #ifdef TE_PE
3989 {"secrel32", pe_directive_secrel, 0},
3990 #endif
3991 { 0, 0, 0 }
3992 };
3993 \f
3994 /* Parser functions used exclusively in instruction operands. */
3995
3996 /* Generic immediate-value read function for use in insn parsing.
3997 STR points to the beginning of the immediate (the leading #);
3998 VAL receives the value; if the value is outside [MIN, MAX]
3999 issue an error. PREFIX_OPT is true if the immediate prefix is
4000 optional. */
4001
4002 static int
4003 parse_immediate (char **str, int *val, int min, int max,
4004 bfd_boolean prefix_opt)
4005 {
4006 expressionS exp;
4007 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4008 if (exp.X_op != O_constant)
4009 {
4010 inst.error = _("constant expression required");
4011 return FAIL;
4012 }
4013
4014 if (exp.X_add_number < min || exp.X_add_number > max)
4015 {
4016 inst.error = _("immediate value out of range");
4017 return FAIL;
4018 }
4019
4020 *val = exp.X_add_number;
4021 return SUCCESS;
4022 }
4023
4024 /* Less-generic immediate-value read function with the possibility of loading a
4025 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4026 instructions. Puts the result directly in inst.operands[i]. */
4027
4028 static int
4029 parse_big_immediate (char **str, int i)
4030 {
4031 expressionS exp;
4032 char *ptr = *str;
4033
4034 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
4035
4036 if (exp.X_op == O_constant)
4037 {
4038 inst.operands[i].imm = exp.X_add_number & 0xffffffff;
4039 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4040 O_constant. We have to be careful not to break compilation for
4041 32-bit X_add_number, though. */
4042 if ((exp.X_add_number & ~0xffffffffl) != 0)
4043 {
4044 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4045 inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff;
4046 inst.operands[i].regisimm = 1;
4047 }
4048 }
4049 else if (exp.X_op == O_big
4050 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32
4051 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64)
4052 {
4053 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4054 /* Bignums have their least significant bits in
4055 generic_bignum[0]. Make sure we put 32 bits in imm and
4056 32 bits in reg, in a (hopefully) portable way. */
4057 assert (parts != 0);
4058 inst.operands[i].imm = 0;
4059 for (j = 0; j < parts; j++, idx++)
4060 inst.operands[i].imm |= generic_bignum[idx]
4061 << (LITTLENUM_NUMBER_OF_BITS * j);
4062 inst.operands[i].reg = 0;
4063 for (j = 0; j < parts; j++, idx++)
4064 inst.operands[i].reg |= generic_bignum[idx]
4065 << (LITTLENUM_NUMBER_OF_BITS * j);
4066 inst.operands[i].regisimm = 1;
4067 }
4068 else
4069 return FAIL;
4070
4071 *str = ptr;
4072
4073 return SUCCESS;
4074 }
4075
4076 /* Returns the pseudo-register number of an FPA immediate constant,
4077 or FAIL if there isn't a valid constant here. */
4078
4079 static int
4080 parse_fpa_immediate (char ** str)
4081 {
4082 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4083 char * save_in;
4084 expressionS exp;
4085 int i;
4086 int j;
4087
4088 /* First try and match exact strings, this is to guarantee
4089 that some formats will work even for cross assembly. */
4090
4091 for (i = 0; fp_const[i]; i++)
4092 {
4093 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4094 {
4095 char *start = *str;
4096
4097 *str += strlen (fp_const[i]);
4098 if (is_end_of_line[(unsigned char) **str])
4099 return i + 8;
4100 *str = start;
4101 }
4102 }
4103
4104 /* Just because we didn't get a match doesn't mean that the constant
4105 isn't valid, just that it is in a format that we don't
4106 automatically recognize. Try parsing it with the standard
4107 expression routines. */
4108
4109 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4110
4111 /* Look for a raw floating point number. */
4112 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4113 && is_end_of_line[(unsigned char) *save_in])
4114 {
4115 for (i = 0; i < NUM_FLOAT_VALS; i++)
4116 {
4117 for (j = 0; j < MAX_LITTLENUMS; j++)
4118 {
4119 if (words[j] != fp_values[i][j])
4120 break;
4121 }
4122
4123 if (j == MAX_LITTLENUMS)
4124 {
4125 *str = save_in;
4126 return i + 8;
4127 }
4128 }
4129 }
4130
4131 /* Try and parse a more complex expression, this will probably fail
4132 unless the code uses a floating point prefix (eg "0f"). */
4133 save_in = input_line_pointer;
4134 input_line_pointer = *str;
4135 if (expression (&exp) == absolute_section
4136 && exp.X_op == O_big
4137 && exp.X_add_number < 0)
4138 {
4139 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4140 Ditto for 15. */
4141 if (gen_to_words (words, 5, (long) 15) == 0)
4142 {
4143 for (i = 0; i < NUM_FLOAT_VALS; i++)
4144 {
4145 for (j = 0; j < MAX_LITTLENUMS; j++)
4146 {
4147 if (words[j] != fp_values[i][j])
4148 break;
4149 }
4150
4151 if (j == MAX_LITTLENUMS)
4152 {
4153 *str = input_line_pointer;
4154 input_line_pointer = save_in;
4155 return i + 8;
4156 }
4157 }
4158 }
4159 }
4160
4161 *str = input_line_pointer;
4162 input_line_pointer = save_in;
4163 inst.error = _("invalid FPA immediate expression");
4164 return FAIL;
4165 }
4166
4167 /* Returns 1 if a number has "quarter-precision" float format
4168 0baBbbbbbc defgh000 00000000 00000000. */
4169
4170 static int
4171 is_quarter_float (unsigned imm)
4172 {
4173 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4174 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4175 }
4176
4177 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4178 0baBbbbbbc defgh000 00000000 00000000.
4179 The minus-zero case needs special handling, since it can't be encoded in the
4180 "quarter-precision" float format, but can nonetheless be loaded as an integer
4181 constant. */
4182
4183 static unsigned
4184 parse_qfloat_immediate (char **ccp, int *immed)
4185 {
4186 char *str = *ccp;
4187 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4188
4189 skip_past_char (&str, '#');
4190
4191 if ((str = atof_ieee (str, 's', words)) != NULL)
4192 {
4193 unsigned fpword = 0;
4194 int i;
4195
4196 /* Our FP word must be 32 bits (single-precision FP). */
4197 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4198 {
4199 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4200 fpword |= words[i];
4201 }
4202
4203 if (is_quarter_float (fpword) || fpword == 0x80000000)
4204 *immed = fpword;
4205 else
4206 return FAIL;
4207
4208 *ccp = str;
4209
4210 return SUCCESS;
4211 }
4212
4213 return FAIL;
4214 }
4215
4216 /* Shift operands. */
4217 enum shift_kind
4218 {
4219 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4220 };
4221
4222 struct asm_shift_name
4223 {
4224 const char *name;
4225 enum shift_kind kind;
4226 };
4227
4228 /* Third argument to parse_shift. */
4229 enum parse_shift_mode
4230 {
4231 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4232 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4233 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4234 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4235 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4236 };
4237
4238 /* Parse a <shift> specifier on an ARM data processing instruction.
4239 This has three forms:
4240
4241 (LSL|LSR|ASL|ASR|ROR) Rs
4242 (LSL|LSR|ASL|ASR|ROR) #imm
4243 RRX
4244
4245 Note that ASL is assimilated to LSL in the instruction encoding, and
4246 RRX to ROR #0 (which cannot be written as such). */
4247
4248 static int
4249 parse_shift (char **str, int i, enum parse_shift_mode mode)
4250 {
4251 const struct asm_shift_name *shift_name;
4252 enum shift_kind shift;
4253 char *s = *str;
4254 char *p = s;
4255 int reg;
4256
4257 for (p = *str; ISALPHA (*p); p++)
4258 ;
4259
4260 if (p == *str)
4261 {
4262 inst.error = _("shift expression expected");
4263 return FAIL;
4264 }
4265
4266 shift_name = hash_find_n (arm_shift_hsh, *str, p - *str);
4267
4268 if (shift_name == NULL)
4269 {
4270 inst.error = _("shift expression expected");
4271 return FAIL;
4272 }
4273
4274 shift = shift_name->kind;
4275
4276 switch (mode)
4277 {
4278 case NO_SHIFT_RESTRICT:
4279 case SHIFT_IMMEDIATE: break;
4280
4281 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4282 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4283 {
4284 inst.error = _("'LSL' or 'ASR' required");
4285 return FAIL;
4286 }
4287 break;
4288
4289 case SHIFT_LSL_IMMEDIATE:
4290 if (shift != SHIFT_LSL)
4291 {
4292 inst.error = _("'LSL' required");
4293 return FAIL;
4294 }
4295 break;
4296
4297 case SHIFT_ASR_IMMEDIATE:
4298 if (shift != SHIFT_ASR)
4299 {
4300 inst.error = _("'ASR' required");
4301 return FAIL;
4302 }
4303 break;
4304
4305 default: abort ();
4306 }
4307
4308 if (shift != SHIFT_RRX)
4309 {
4310 /* Whitespace can appear here if the next thing is a bare digit. */
4311 skip_whitespace (p);
4312
4313 if (mode == NO_SHIFT_RESTRICT
4314 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4315 {
4316 inst.operands[i].imm = reg;
4317 inst.operands[i].immisreg = 1;
4318 }
4319 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4320 return FAIL;
4321 }
4322 inst.operands[i].shift_kind = shift;
4323 inst.operands[i].shifted = 1;
4324 *str = p;
4325 return SUCCESS;
4326 }
4327
4328 /* Parse a <shifter_operand> for an ARM data processing instruction:
4329
4330 #<immediate>
4331 #<immediate>, <rotate>
4332 <Rm>
4333 <Rm>, <shift>
4334
4335 where <shift> is defined by parse_shift above, and <rotate> is a
4336 multiple of 2 between 0 and 30. Validation of immediate operands
4337 is deferred to md_apply_fix. */
4338
4339 static int
4340 parse_shifter_operand (char **str, int i)
4341 {
4342 int value;
4343 expressionS expr;
4344
4345 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4346 {
4347 inst.operands[i].reg = value;
4348 inst.operands[i].isreg = 1;
4349
4350 /* parse_shift will override this if appropriate */
4351 inst.reloc.exp.X_op = O_constant;
4352 inst.reloc.exp.X_add_number = 0;
4353
4354 if (skip_past_comma (str) == FAIL)
4355 return SUCCESS;
4356
4357 /* Shift operation on register. */
4358 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4359 }
4360
4361 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4362 return FAIL;
4363
4364 if (skip_past_comma (str) == SUCCESS)
4365 {
4366 /* #x, y -- ie explicit rotation by Y. */
4367 if (my_get_expression (&expr, str, GE_NO_PREFIX))
4368 return FAIL;
4369
4370 if (expr.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4371 {
4372 inst.error = _("constant expression expected");
4373 return FAIL;
4374 }
4375
4376 value = expr.X_add_number;
4377 if (value < 0 || value > 30 || value % 2 != 0)
4378 {
4379 inst.error = _("invalid rotation");
4380 return FAIL;
4381 }
4382 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4383 {
4384 inst.error = _("invalid constant");
4385 return FAIL;
4386 }
4387
4388 /* Convert to decoded value. md_apply_fix will put it back. */
4389 inst.reloc.exp.X_add_number
4390 = (((inst.reloc.exp.X_add_number << (32 - value))
4391 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
4392 }
4393
4394 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4395 inst.reloc.pc_rel = 0;
4396 return SUCCESS;
4397 }
4398
4399 /* Group relocation information. Each entry in the table contains the
4400 textual name of the relocation as may appear in assembler source
4401 and must end with a colon.
4402 Along with this textual name are the relocation codes to be used if
4403 the corresponding instruction is an ALU instruction (ADD or SUB only),
4404 an LDR, an LDRS, or an LDC. */
4405
4406 struct group_reloc_table_entry
4407 {
4408 const char *name;
4409 int alu_code;
4410 int ldr_code;
4411 int ldrs_code;
4412 int ldc_code;
4413 };
4414
4415 typedef enum
4416 {
4417 /* Varieties of non-ALU group relocation. */
4418
4419 GROUP_LDR,
4420 GROUP_LDRS,
4421 GROUP_LDC
4422 } group_reloc_type;
4423
4424 static struct group_reloc_table_entry group_reloc_table[] =
4425 { /* Program counter relative: */
4426 { "pc_g0_nc",
4427 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
4428 0, /* LDR */
4429 0, /* LDRS */
4430 0 }, /* LDC */
4431 { "pc_g0",
4432 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
4433 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
4434 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
4435 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
4436 { "pc_g1_nc",
4437 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
4438 0, /* LDR */
4439 0, /* LDRS */
4440 0 }, /* LDC */
4441 { "pc_g1",
4442 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
4443 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
4444 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
4445 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
4446 { "pc_g2",
4447 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
4448 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
4449 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
4450 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
4451 /* Section base relative */
4452 { "sb_g0_nc",
4453 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
4454 0, /* LDR */
4455 0, /* LDRS */
4456 0 }, /* LDC */
4457 { "sb_g0",
4458 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
4459 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
4460 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
4461 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
4462 { "sb_g1_nc",
4463 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
4464 0, /* LDR */
4465 0, /* LDRS */
4466 0 }, /* LDC */
4467 { "sb_g1",
4468 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
4469 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
4470 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
4471 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
4472 { "sb_g2",
4473 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
4474 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
4475 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
4476 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
4477
4478 /* Given the address of a pointer pointing to the textual name of a group
4479 relocation as may appear in assembler source, attempt to find its details
4480 in group_reloc_table. The pointer will be updated to the character after
4481 the trailing colon. On failure, FAIL will be returned; SUCCESS
4482 otherwise. On success, *entry will be updated to point at the relevant
4483 group_reloc_table entry. */
4484
4485 static int
4486 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
4487 {
4488 unsigned int i;
4489 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
4490 {
4491 int length = strlen (group_reloc_table[i].name);
4492
4493 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0 &&
4494 (*str)[length] == ':')
4495 {
4496 *out = &group_reloc_table[i];
4497 *str += (length + 1);
4498 return SUCCESS;
4499 }
4500 }
4501
4502 return FAIL;
4503 }
4504
4505 /* Parse a <shifter_operand> for an ARM data processing instruction
4506 (as for parse_shifter_operand) where group relocations are allowed:
4507
4508 #<immediate>
4509 #<immediate>, <rotate>
4510 #:<group_reloc>:<expression>
4511 <Rm>
4512 <Rm>, <shift>
4513
4514 where <group_reloc> is one of the strings defined in group_reloc_table.
4515 The hashes are optional.
4516
4517 Everything else is as for parse_shifter_operand. */
4518
4519 static parse_operand_result
4520 parse_shifter_operand_group_reloc (char **str, int i)
4521 {
4522 /* Determine if we have the sequence of characters #: or just :
4523 coming next. If we do, then we check for a group relocation.
4524 If we don't, punt the whole lot to parse_shifter_operand. */
4525
4526 if (((*str)[0] == '#' && (*str)[1] == ':')
4527 || (*str)[0] == ':')
4528 {
4529 struct group_reloc_table_entry *entry;
4530
4531 if ((*str)[0] == '#')
4532 (*str) += 2;
4533 else
4534 (*str)++;
4535
4536 /* Try to parse a group relocation. Anything else is an error. */
4537 if (find_group_reloc_table_entry (str, &entry) == FAIL)
4538 {
4539 inst.error = _("unknown group relocation");
4540 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4541 }
4542
4543 /* We now have the group relocation table entry corresponding to
4544 the name in the assembler source. Next, we parse the expression. */
4545 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
4546 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4547
4548 /* Record the relocation type (always the ALU variant here). */
4549 inst.reloc.type = entry->alu_code;
4550 assert (inst.reloc.type != 0);
4551
4552 return PARSE_OPERAND_SUCCESS;
4553 }
4554 else
4555 return parse_shifter_operand (str, i) == SUCCESS
4556 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
4557
4558 /* Never reached. */
4559 }
4560
4561 /* Parse all forms of an ARM address expression. Information is written
4562 to inst.operands[i] and/or inst.reloc.
4563
4564 Preindexed addressing (.preind=1):
4565
4566 [Rn, #offset] .reg=Rn .reloc.exp=offset
4567 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4568 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4569 .shift_kind=shift .reloc.exp=shift_imm
4570
4571 These three may have a trailing ! which causes .writeback to be set also.
4572
4573 Postindexed addressing (.postind=1, .writeback=1):
4574
4575 [Rn], #offset .reg=Rn .reloc.exp=offset
4576 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4577 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4578 .shift_kind=shift .reloc.exp=shift_imm
4579
4580 Unindexed addressing (.preind=0, .postind=0):
4581
4582 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4583
4584 Other:
4585
4586 [Rn]{!} shorthand for [Rn,#0]{!}
4587 =immediate .isreg=0 .reloc.exp=immediate
4588 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4589
4590 It is the caller's responsibility to check for addressing modes not
4591 supported by the instruction, and to set inst.reloc.type. */
4592
4593 static parse_operand_result
4594 parse_address_main (char **str, int i, int group_relocations,
4595 group_reloc_type group_type)
4596 {
4597 char *p = *str;
4598 int reg;
4599
4600 if (skip_past_char (&p, '[') == FAIL)
4601 {
4602 if (skip_past_char (&p, '=') == FAIL)
4603 {
4604 /* bare address - translate to PC-relative offset */
4605 inst.reloc.pc_rel = 1;
4606 inst.operands[i].reg = REG_PC;
4607 inst.operands[i].isreg = 1;
4608 inst.operands[i].preind = 1;
4609 }
4610 /* else a load-constant pseudo op, no special treatment needed here */
4611
4612 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4613 return PARSE_OPERAND_FAIL;
4614
4615 *str = p;
4616 return PARSE_OPERAND_SUCCESS;
4617 }
4618
4619 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4620 {
4621 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4622 return PARSE_OPERAND_FAIL;
4623 }
4624 inst.operands[i].reg = reg;
4625 inst.operands[i].isreg = 1;
4626
4627 if (skip_past_comma (&p) == SUCCESS)
4628 {
4629 inst.operands[i].preind = 1;
4630
4631 if (*p == '+') p++;
4632 else if (*p == '-') p++, inst.operands[i].negative = 1;
4633
4634 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4635 {
4636 inst.operands[i].imm = reg;
4637 inst.operands[i].immisreg = 1;
4638
4639 if (skip_past_comma (&p) == SUCCESS)
4640 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4641 return PARSE_OPERAND_FAIL;
4642 }
4643 else if (skip_past_char (&p, ':') == SUCCESS)
4644 {
4645 /* FIXME: '@' should be used here, but it's filtered out by generic
4646 code before we get to see it here. This may be subject to
4647 change. */
4648 expressionS exp;
4649 my_get_expression (&exp, &p, GE_NO_PREFIX);
4650 if (exp.X_op != O_constant)
4651 {
4652 inst.error = _("alignment must be constant");
4653 return PARSE_OPERAND_FAIL;
4654 }
4655 inst.operands[i].imm = exp.X_add_number << 8;
4656 inst.operands[i].immisalign = 1;
4657 /* Alignments are not pre-indexes. */
4658 inst.operands[i].preind = 0;
4659 }
4660 else
4661 {
4662 if (inst.operands[i].negative)
4663 {
4664 inst.operands[i].negative = 0;
4665 p--;
4666 }
4667
4668 if (group_relocations &&
4669 ((*p == '#' && *(p + 1) == ':') || *p == ':'))
4670
4671 {
4672 struct group_reloc_table_entry *entry;
4673
4674 /* Skip over the #: or : sequence. */
4675 if (*p == '#')
4676 p += 2;
4677 else
4678 p++;
4679
4680 /* Try to parse a group relocation. Anything else is an
4681 error. */
4682 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
4683 {
4684 inst.error = _("unknown group relocation");
4685 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4686 }
4687
4688 /* We now have the group relocation table entry corresponding to
4689 the name in the assembler source. Next, we parse the
4690 expression. */
4691 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4692 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4693
4694 /* Record the relocation type. */
4695 switch (group_type)
4696 {
4697 case GROUP_LDR:
4698 inst.reloc.type = entry->ldr_code;
4699 break;
4700
4701 case GROUP_LDRS:
4702 inst.reloc.type = entry->ldrs_code;
4703 break;
4704
4705 case GROUP_LDC:
4706 inst.reloc.type = entry->ldc_code;
4707 break;
4708
4709 default:
4710 assert (0);
4711 }
4712
4713 if (inst.reloc.type == 0)
4714 {
4715 inst.error = _("this group relocation is not allowed on this instruction");
4716 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4717 }
4718 }
4719 else
4720 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4721 return PARSE_OPERAND_FAIL;
4722 }
4723 }
4724
4725 if (skip_past_char (&p, ']') == FAIL)
4726 {
4727 inst.error = _("']' expected");
4728 return PARSE_OPERAND_FAIL;
4729 }
4730
4731 if (skip_past_char (&p, '!') == SUCCESS)
4732 inst.operands[i].writeback = 1;
4733
4734 else if (skip_past_comma (&p) == SUCCESS)
4735 {
4736 if (skip_past_char (&p, '{') == SUCCESS)
4737 {
4738 /* [Rn], {expr} - unindexed, with option */
4739 if (parse_immediate (&p, &inst.operands[i].imm,
4740 0, 255, TRUE) == FAIL)
4741 return PARSE_OPERAND_FAIL;
4742
4743 if (skip_past_char (&p, '}') == FAIL)
4744 {
4745 inst.error = _("'}' expected at end of 'option' field");
4746 return PARSE_OPERAND_FAIL;
4747 }
4748 if (inst.operands[i].preind)
4749 {
4750 inst.error = _("cannot combine index with option");
4751 return PARSE_OPERAND_FAIL;
4752 }
4753 *str = p;
4754 return PARSE_OPERAND_SUCCESS;
4755 }
4756 else
4757 {
4758 inst.operands[i].postind = 1;
4759 inst.operands[i].writeback = 1;
4760
4761 if (inst.operands[i].preind)
4762 {
4763 inst.error = _("cannot combine pre- and post-indexing");
4764 return PARSE_OPERAND_FAIL;
4765 }
4766
4767 if (*p == '+') p++;
4768 else if (*p == '-') p++, inst.operands[i].negative = 1;
4769
4770 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4771 {
4772 /* We might be using the immediate for alignment already. If we
4773 are, OR the register number into the low-order bits. */
4774 if (inst.operands[i].immisalign)
4775 inst.operands[i].imm |= reg;
4776 else
4777 inst.operands[i].imm = reg;
4778 inst.operands[i].immisreg = 1;
4779
4780 if (skip_past_comma (&p) == SUCCESS)
4781 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4782 return PARSE_OPERAND_FAIL;
4783 }
4784 else
4785 {
4786 if (inst.operands[i].negative)
4787 {
4788 inst.operands[i].negative = 0;
4789 p--;
4790 }
4791 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4792 return PARSE_OPERAND_FAIL;
4793 }
4794 }
4795 }
4796
4797 /* If at this point neither .preind nor .postind is set, we have a
4798 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4799 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
4800 {
4801 inst.operands[i].preind = 1;
4802 inst.reloc.exp.X_op = O_constant;
4803 inst.reloc.exp.X_add_number = 0;
4804 }
4805 *str = p;
4806 return PARSE_OPERAND_SUCCESS;
4807 }
4808
4809 static int
4810 parse_address (char **str, int i)
4811 {
4812 return parse_address_main (str, i, 0, 0) == PARSE_OPERAND_SUCCESS
4813 ? SUCCESS : FAIL;
4814 }
4815
4816 static parse_operand_result
4817 parse_address_group_reloc (char **str, int i, group_reloc_type type)
4818 {
4819 return parse_address_main (str, i, 1, type);
4820 }
4821
4822 /* Parse an operand for a MOVW or MOVT instruction. */
4823 static int
4824 parse_half (char **str)
4825 {
4826 char * p;
4827
4828 p = *str;
4829 skip_past_char (&p, '#');
4830 if (strncasecmp (p, ":lower16:", 9) == 0)
4831 inst.reloc.type = BFD_RELOC_ARM_MOVW;
4832 else if (strncasecmp (p, ":upper16:", 9) == 0)
4833 inst.reloc.type = BFD_RELOC_ARM_MOVT;
4834
4835 if (inst.reloc.type != BFD_RELOC_UNUSED)
4836 {
4837 p += 9;
4838 skip_whitespace(p);
4839 }
4840
4841 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4842 return FAIL;
4843
4844 if (inst.reloc.type == BFD_RELOC_UNUSED)
4845 {
4846 if (inst.reloc.exp.X_op != O_constant)
4847 {
4848 inst.error = _("constant expression expected");
4849 return FAIL;
4850 }
4851 if (inst.reloc.exp.X_add_number < 0
4852 || inst.reloc.exp.X_add_number > 0xffff)
4853 {
4854 inst.error = _("immediate value out of range");
4855 return FAIL;
4856 }
4857 }
4858 *str = p;
4859 return SUCCESS;
4860 }
4861
4862 /* Miscellaneous. */
4863
4864 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4865 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4866 static int
4867 parse_psr (char **str)
4868 {
4869 char *p;
4870 unsigned long psr_field;
4871 const struct asm_psr *psr;
4872 char *start;
4873
4874 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4875 feature for ease of use and backwards compatibility. */
4876 p = *str;
4877 if (strncasecmp (p, "SPSR", 4) == 0)
4878 psr_field = SPSR_BIT;
4879 else if (strncasecmp (p, "CPSR", 4) == 0)
4880 psr_field = 0;
4881 else
4882 {
4883 start = p;
4884 do
4885 p++;
4886 while (ISALNUM (*p) || *p == '_');
4887
4888 psr = hash_find_n (arm_v7m_psr_hsh, start, p - start);
4889 if (!psr)
4890 return FAIL;
4891
4892 *str = p;
4893 return psr->field;
4894 }
4895
4896 p += 4;
4897 if (*p == '_')
4898 {
4899 /* A suffix follows. */
4900 p++;
4901 start = p;
4902
4903 do
4904 p++;
4905 while (ISALNUM (*p) || *p == '_');
4906
4907 psr = hash_find_n (arm_psr_hsh, start, p - start);
4908 if (!psr)
4909 goto error;
4910
4911 psr_field |= psr->field;
4912 }
4913 else
4914 {
4915 if (ISALNUM (*p))
4916 goto error; /* Garbage after "[CS]PSR". */
4917
4918 psr_field |= (PSR_c | PSR_f);
4919 }
4920 *str = p;
4921 return psr_field;
4922
4923 error:
4924 inst.error = _("flag for {c}psr instruction expected");
4925 return FAIL;
4926 }
4927
4928 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4929 value suitable for splatting into the AIF field of the instruction. */
4930
4931 static int
4932 parse_cps_flags (char **str)
4933 {
4934 int val = 0;
4935 int saw_a_flag = 0;
4936 char *s = *str;
4937
4938 for (;;)
4939 switch (*s++)
4940 {
4941 case '\0': case ',':
4942 goto done;
4943
4944 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
4945 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
4946 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
4947
4948 default:
4949 inst.error = _("unrecognized CPS flag");
4950 return FAIL;
4951 }
4952
4953 done:
4954 if (saw_a_flag == 0)
4955 {
4956 inst.error = _("missing CPS flags");
4957 return FAIL;
4958 }
4959
4960 *str = s - 1;
4961 return val;
4962 }
4963
4964 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4965 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4966
4967 static int
4968 parse_endian_specifier (char **str)
4969 {
4970 int little_endian;
4971 char *s = *str;
4972
4973 if (strncasecmp (s, "BE", 2))
4974 little_endian = 0;
4975 else if (strncasecmp (s, "LE", 2))
4976 little_endian = 1;
4977 else
4978 {
4979 inst.error = _("valid endian specifiers are be or le");
4980 return FAIL;
4981 }
4982
4983 if (ISALNUM (s[2]) || s[2] == '_')
4984 {
4985 inst.error = _("valid endian specifiers are be or le");
4986 return FAIL;
4987 }
4988
4989 *str = s + 2;
4990 return little_endian;
4991 }
4992
4993 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
4994 value suitable for poking into the rotate field of an sxt or sxta
4995 instruction, or FAIL on error. */
4996
4997 static int
4998 parse_ror (char **str)
4999 {
5000 int rot;
5001 char *s = *str;
5002
5003 if (strncasecmp (s, "ROR", 3) == 0)
5004 s += 3;
5005 else
5006 {
5007 inst.error = _("missing rotation field after comma");
5008 return FAIL;
5009 }
5010
5011 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
5012 return FAIL;
5013
5014 switch (rot)
5015 {
5016 case 0: *str = s; return 0x0;
5017 case 8: *str = s; return 0x1;
5018 case 16: *str = s; return 0x2;
5019 case 24: *str = s; return 0x3;
5020
5021 default:
5022 inst.error = _("rotation can only be 0, 8, 16, or 24");
5023 return FAIL;
5024 }
5025 }
5026
5027 /* Parse a conditional code (from conds[] below). The value returned is in the
5028 range 0 .. 14, or FAIL. */
5029 static int
5030 parse_cond (char **str)
5031 {
5032 char *p, *q;
5033 const struct asm_cond *c;
5034
5035 p = q = *str;
5036 while (ISALPHA (*q))
5037 q++;
5038
5039 c = hash_find_n (arm_cond_hsh, p, q - p);
5040 if (!c)
5041 {
5042 inst.error = _("condition required");
5043 return FAIL;
5044 }
5045
5046 *str = q;
5047 return c->value;
5048 }
5049
5050 /* Parse an option for a barrier instruction. Returns the encoding for the
5051 option, or FAIL. */
5052 static int
5053 parse_barrier (char **str)
5054 {
5055 char *p, *q;
5056 const struct asm_barrier_opt *o;
5057
5058 p = q = *str;
5059 while (ISALPHA (*q))
5060 q++;
5061
5062 o = hash_find_n (arm_barrier_opt_hsh, p, q - p);
5063 if (!o)
5064 return FAIL;
5065
5066 *str = q;
5067 return o->value;
5068 }
5069
5070 /* Parse the operands of a table branch instruction. Similar to a memory
5071 operand. */
5072 static int
5073 parse_tb (char **str)
5074 {
5075 char * p = *str;
5076 int reg;
5077
5078 if (skip_past_char (&p, '[') == FAIL)
5079 {
5080 inst.error = _("'[' expected");
5081 return FAIL;
5082 }
5083
5084 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5085 {
5086 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5087 return FAIL;
5088 }
5089 inst.operands[0].reg = reg;
5090
5091 if (skip_past_comma (&p) == FAIL)
5092 {
5093 inst.error = _("',' expected");
5094 return FAIL;
5095 }
5096
5097 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5098 {
5099 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5100 return FAIL;
5101 }
5102 inst.operands[0].imm = reg;
5103
5104 if (skip_past_comma (&p) == SUCCESS)
5105 {
5106 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
5107 return FAIL;
5108 if (inst.reloc.exp.X_add_number != 1)
5109 {
5110 inst.error = _("invalid shift");
5111 return FAIL;
5112 }
5113 inst.operands[0].shifted = 1;
5114 }
5115
5116 if (skip_past_char (&p, ']') == FAIL)
5117 {
5118 inst.error = _("']' expected");
5119 return FAIL;
5120 }
5121 *str = p;
5122 return SUCCESS;
5123 }
5124
5125 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5126 information on the types the operands can take and how they are encoded.
5127 Up to four operands may be read; this function handles setting the
5128 ".present" field for each read operand itself.
5129 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5130 else returns FAIL. */
5131
5132 static int
5133 parse_neon_mov (char **str, int *which_operand)
5134 {
5135 int i = *which_operand, val;
5136 enum arm_reg_type rtype;
5137 char *ptr = *str;
5138 struct neon_type_el optype;
5139
5140 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5141 {
5142 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5143 inst.operands[i].reg = val;
5144 inst.operands[i].isscalar = 1;
5145 inst.operands[i].vectype = optype;
5146 inst.operands[i++].present = 1;
5147
5148 if (skip_past_comma (&ptr) == FAIL)
5149 goto wanted_comma;
5150
5151 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5152 goto wanted_arm;
5153
5154 inst.operands[i].reg = val;
5155 inst.operands[i].isreg = 1;
5156 inst.operands[i].present = 1;
5157 }
5158 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
5159 != FAIL)
5160 {
5161 /* Cases 0, 1, 2, 3, 5 (D only). */
5162 if (skip_past_comma (&ptr) == FAIL)
5163 goto wanted_comma;
5164
5165 inst.operands[i].reg = val;
5166 inst.operands[i].isreg = 1;
5167 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5168 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5169 inst.operands[i].isvec = 1;
5170 inst.operands[i].vectype = optype;
5171 inst.operands[i++].present = 1;
5172
5173 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5174 {
5175 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5176 Case 13: VMOV <Sd>, <Rm> */
5177 inst.operands[i].reg = val;
5178 inst.operands[i].isreg = 1;
5179 inst.operands[i].present = 1;
5180
5181 if (rtype == REG_TYPE_NQ)
5182 {
5183 first_error (_("can't use Neon quad register here"));
5184 return FAIL;
5185 }
5186 else if (rtype != REG_TYPE_VFS)
5187 {
5188 i++;
5189 if (skip_past_comma (&ptr) == FAIL)
5190 goto wanted_comma;
5191 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5192 goto wanted_arm;
5193 inst.operands[i].reg = val;
5194 inst.operands[i].isreg = 1;
5195 inst.operands[i].present = 1;
5196 }
5197 }
5198 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
5199 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5200 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5201 Case 10: VMOV.F32 <Sd>, #<imm>
5202 Case 11: VMOV.F64 <Dd>, #<imm> */
5203 ;
5204 else if (parse_big_immediate (&ptr, i) == SUCCESS)
5205 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5206 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5207 ;
5208 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
5209 &optype)) != FAIL)
5210 {
5211 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5212 Case 1: VMOV<c><q> <Dd>, <Dm>
5213 Case 8: VMOV.F32 <Sd>, <Sm>
5214 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5215
5216 inst.operands[i].reg = val;
5217 inst.operands[i].isreg = 1;
5218 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5219 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5220 inst.operands[i].isvec = 1;
5221 inst.operands[i].vectype = optype;
5222 inst.operands[i].present = 1;
5223
5224 if (skip_past_comma (&ptr) == SUCCESS)
5225 {
5226 /* Case 15. */
5227 i++;
5228
5229 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5230 goto wanted_arm;
5231
5232 inst.operands[i].reg = val;
5233 inst.operands[i].isreg = 1;
5234 inst.operands[i++].present = 1;
5235
5236 if (skip_past_comma (&ptr) == FAIL)
5237 goto wanted_comma;
5238
5239 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5240 goto wanted_arm;
5241
5242 inst.operands[i].reg = val;
5243 inst.operands[i].isreg = 1;
5244 inst.operands[i++].present = 1;
5245 }
5246 }
5247 else
5248 {
5249 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5250 return FAIL;
5251 }
5252 }
5253 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5254 {
5255 /* Cases 6, 7. */
5256 inst.operands[i].reg = val;
5257 inst.operands[i].isreg = 1;
5258 inst.operands[i++].present = 1;
5259
5260 if (skip_past_comma (&ptr) == FAIL)
5261 goto wanted_comma;
5262
5263 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5264 {
5265 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5266 inst.operands[i].reg = val;
5267 inst.operands[i].isscalar = 1;
5268 inst.operands[i].present = 1;
5269 inst.operands[i].vectype = optype;
5270 }
5271 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5272 {
5273 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5274 inst.operands[i].reg = val;
5275 inst.operands[i].isreg = 1;
5276 inst.operands[i++].present = 1;
5277
5278 if (skip_past_comma (&ptr) == FAIL)
5279 goto wanted_comma;
5280
5281 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
5282 == FAIL)
5283 {
5284 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
5285 return FAIL;
5286 }
5287
5288 inst.operands[i].reg = val;
5289 inst.operands[i].isreg = 1;
5290 inst.operands[i].isvec = 1;
5291 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5292 inst.operands[i].vectype = optype;
5293 inst.operands[i].present = 1;
5294
5295 if (rtype == REG_TYPE_VFS)
5296 {
5297 /* Case 14. */
5298 i++;
5299 if (skip_past_comma (&ptr) == FAIL)
5300 goto wanted_comma;
5301 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
5302 &optype)) == FAIL)
5303 {
5304 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
5305 return FAIL;
5306 }
5307 inst.operands[i].reg = val;
5308 inst.operands[i].isreg = 1;
5309 inst.operands[i].isvec = 1;
5310 inst.operands[i].issingle = 1;
5311 inst.operands[i].vectype = optype;
5312 inst.operands[i].present = 1;
5313 }
5314 }
5315 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
5316 != FAIL)
5317 {
5318 /* Case 13. */
5319 inst.operands[i].reg = val;
5320 inst.operands[i].isreg = 1;
5321 inst.operands[i].isvec = 1;
5322 inst.operands[i].issingle = 1;
5323 inst.operands[i].vectype = optype;
5324 inst.operands[i++].present = 1;
5325 }
5326 }
5327 else
5328 {
5329 first_error (_("parse error"));
5330 return FAIL;
5331 }
5332
5333 /* Successfully parsed the operands. Update args. */
5334 *which_operand = i;
5335 *str = ptr;
5336 return SUCCESS;
5337
5338 wanted_comma:
5339 first_error (_("expected comma"));
5340 return FAIL;
5341
5342 wanted_arm:
5343 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
5344 return FAIL;
5345 }
5346
5347 /* Matcher codes for parse_operands. */
5348 enum operand_parse_code
5349 {
5350 OP_stop, /* end of line */
5351
5352 OP_RR, /* ARM register */
5353 OP_RRnpc, /* ARM register, not r15 */
5354 OP_RRnpcb, /* ARM register, not r15, in square brackets */
5355 OP_RRw, /* ARM register, not r15, optional trailing ! */
5356 OP_RCP, /* Coprocessor number */
5357 OP_RCN, /* Coprocessor register */
5358 OP_RF, /* FPA register */
5359 OP_RVS, /* VFP single precision register */
5360 OP_RVD, /* VFP double precision register (0..15) */
5361 OP_RND, /* Neon double precision register (0..31) */
5362 OP_RNQ, /* Neon quad precision register */
5363 OP_RVSD, /* VFP single or double precision register */
5364 OP_RNDQ, /* Neon double or quad precision register */
5365 OP_RNSDQ, /* Neon single, double or quad precision register */
5366 OP_RNSC, /* Neon scalar D[X] */
5367 OP_RVC, /* VFP control register */
5368 OP_RMF, /* Maverick F register */
5369 OP_RMD, /* Maverick D register */
5370 OP_RMFX, /* Maverick FX register */
5371 OP_RMDX, /* Maverick DX register */
5372 OP_RMAX, /* Maverick AX register */
5373 OP_RMDS, /* Maverick DSPSC register */
5374 OP_RIWR, /* iWMMXt wR register */
5375 OP_RIWC, /* iWMMXt wC register */
5376 OP_RIWG, /* iWMMXt wCG register */
5377 OP_RXA, /* XScale accumulator register */
5378
5379 OP_REGLST, /* ARM register list */
5380 OP_VRSLST, /* VFP single-precision register list */
5381 OP_VRDLST, /* VFP double-precision register list */
5382 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
5383 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
5384 OP_NSTRLST, /* Neon element/structure list */
5385
5386 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
5387 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
5388 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
5389 OP_RR_RNSC, /* ARM reg or Neon scalar. */
5390 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
5391 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
5392 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
5393 OP_VMOV, /* Neon VMOV operands. */
5394 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */
5395 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
5396 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5397
5398 OP_I0, /* immediate zero */
5399 OP_I7, /* immediate value 0 .. 7 */
5400 OP_I15, /* 0 .. 15 */
5401 OP_I16, /* 1 .. 16 */
5402 OP_I16z, /* 0 .. 16 */
5403 OP_I31, /* 0 .. 31 */
5404 OP_I31w, /* 0 .. 31, optional trailing ! */
5405 OP_I32, /* 1 .. 32 */
5406 OP_I32z, /* 0 .. 32 */
5407 OP_I63, /* 0 .. 63 */
5408 OP_I63s, /* -64 .. 63 */
5409 OP_I64, /* 1 .. 64 */
5410 OP_I64z, /* 0 .. 64 */
5411 OP_I255, /* 0 .. 255 */
5412
5413 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
5414 OP_I7b, /* 0 .. 7 */
5415 OP_I15b, /* 0 .. 15 */
5416 OP_I31b, /* 0 .. 31 */
5417
5418 OP_SH, /* shifter operand */
5419 OP_SHG, /* shifter operand with possible group relocation */
5420 OP_ADDR, /* Memory address expression (any mode) */
5421 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
5422 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
5423 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
5424 OP_EXP, /* arbitrary expression */
5425 OP_EXPi, /* same, with optional immediate prefix */
5426 OP_EXPr, /* same, with optional relocation suffix */
5427 OP_HALF, /* 0 .. 65535 or low/high reloc. */
5428
5429 OP_CPSF, /* CPS flags */
5430 OP_ENDI, /* Endianness specifier */
5431 OP_PSR, /* CPSR/SPSR mask for msr */
5432 OP_COND, /* conditional code */
5433 OP_TB, /* Table branch. */
5434
5435 OP_RVC_PSR, /* CPSR/SPSR mask for msr, or VFP control register. */
5436 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
5437
5438 OP_RRnpc_I0, /* ARM register or literal 0 */
5439 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
5440 OP_RR_EXi, /* ARM register or expression with imm prefix */
5441 OP_RF_IF, /* FPA register or immediate */
5442 OP_RIWR_RIWC, /* iWMMXt R or C reg */
5443 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
5444
5445 /* Optional operands. */
5446 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
5447 OP_oI31b, /* 0 .. 31 */
5448 OP_oI32b, /* 1 .. 32 */
5449 OP_oIffffb, /* 0 .. 65535 */
5450 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
5451
5452 OP_oRR, /* ARM register */
5453 OP_oRRnpc, /* ARM register, not the PC */
5454 OP_oRND, /* Optional Neon double precision register */
5455 OP_oRNQ, /* Optional Neon quad precision register */
5456 OP_oRNDQ, /* Optional Neon double or quad precision register */
5457 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
5458 OP_oSHll, /* LSL immediate */
5459 OP_oSHar, /* ASR immediate */
5460 OP_oSHllar, /* LSL or ASR immediate */
5461 OP_oROR, /* ROR 0/8/16/24 */
5462 OP_oBARRIER, /* Option argument for a barrier instruction. */
5463
5464 OP_FIRST_OPTIONAL = OP_oI7b
5465 };
5466
5467 /* Generic instruction operand parser. This does no encoding and no
5468 semantic validation; it merely squirrels values away in the inst
5469 structure. Returns SUCCESS or FAIL depending on whether the
5470 specified grammar matched. */
5471 static int
5472 parse_operands (char *str, const unsigned char *pattern)
5473 {
5474 unsigned const char *upat = pattern;
5475 char *backtrack_pos = 0;
5476 const char *backtrack_error = 0;
5477 int i, val, backtrack_index = 0;
5478 enum arm_reg_type rtype;
5479 parse_operand_result result;
5480
5481 #define po_char_or_fail(chr) do { \
5482 if (skip_past_char (&str, chr) == FAIL) \
5483 goto bad_args; \
5484 } while (0)
5485
5486 #define po_reg_or_fail(regtype) do { \
5487 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5488 &inst.operands[i].vectype); \
5489 if (val == FAIL) \
5490 { \
5491 first_error (_(reg_expected_msgs[regtype])); \
5492 goto failure; \
5493 } \
5494 inst.operands[i].reg = val; \
5495 inst.operands[i].isreg = 1; \
5496 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5497 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5498 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5499 || rtype == REG_TYPE_VFD \
5500 || rtype == REG_TYPE_NQ); \
5501 } while (0)
5502
5503 #define po_reg_or_goto(regtype, label) do { \
5504 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5505 &inst.operands[i].vectype); \
5506 if (val == FAIL) \
5507 goto label; \
5508 \
5509 inst.operands[i].reg = val; \
5510 inst.operands[i].isreg = 1; \
5511 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5512 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5513 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5514 || rtype == REG_TYPE_VFD \
5515 || rtype == REG_TYPE_NQ); \
5516 } while (0)
5517
5518 #define po_imm_or_fail(min, max, popt) do { \
5519 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5520 goto failure; \
5521 inst.operands[i].imm = val; \
5522 } while (0)
5523
5524 #define po_scalar_or_goto(elsz, label) do { \
5525 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5526 if (val == FAIL) \
5527 goto label; \
5528 inst.operands[i].reg = val; \
5529 inst.operands[i].isscalar = 1; \
5530 } while (0)
5531
5532 #define po_misc_or_fail(expr) do { \
5533 if (expr) \
5534 goto failure; \
5535 } while (0)
5536
5537 #define po_misc_or_fail_no_backtrack(expr) do { \
5538 result = expr; \
5539 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)\
5540 backtrack_pos = 0; \
5541 if (result != PARSE_OPERAND_SUCCESS) \
5542 goto failure; \
5543 } while (0)
5544
5545 skip_whitespace (str);
5546
5547 for (i = 0; upat[i] != OP_stop; i++)
5548 {
5549 if (upat[i] >= OP_FIRST_OPTIONAL)
5550 {
5551 /* Remember where we are in case we need to backtrack. */
5552 assert (!backtrack_pos);
5553 backtrack_pos = str;
5554 backtrack_error = inst.error;
5555 backtrack_index = i;
5556 }
5557
5558 if (i > 0)
5559 po_char_or_fail (',');
5560
5561 switch (upat[i])
5562 {
5563 /* Registers */
5564 case OP_oRRnpc:
5565 case OP_RRnpc:
5566 case OP_oRR:
5567 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
5568 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
5569 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
5570 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
5571 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
5572 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
5573 case OP_oRND:
5574 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
5575 case OP_RVC: po_reg_or_fail (REG_TYPE_VFC); break;
5576 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
5577 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
5578 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
5579 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
5580 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
5581 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
5582 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
5583 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
5584 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
5585 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
5586 case OP_oRNQ:
5587 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
5588 case OP_oRNDQ:
5589 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
5590 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
5591 case OP_oRNSDQ:
5592 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
5593
5594 /* Neon scalar. Using an element size of 8 means that some invalid
5595 scalars are accepted here, so deal with those in later code. */
5596 case OP_RNSC: po_scalar_or_goto (8, failure); break;
5597
5598 /* WARNING: We can expand to two operands here. This has the potential
5599 to totally confuse the backtracking mechanism! It will be OK at
5600 least as long as we don't try to use optional args as well,
5601 though. */
5602 case OP_NILO:
5603 {
5604 po_reg_or_goto (REG_TYPE_NDQ, try_imm);
5605 inst.operands[i].present = 1;
5606 i++;
5607 skip_past_comma (&str);
5608 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only);
5609 break;
5610 one_reg_only:
5611 /* Optional register operand was omitted. Unfortunately, it's in
5612 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5613 here (this is a bit grotty). */
5614 inst.operands[i] = inst.operands[i-1];
5615 inst.operands[i-1].present = 0;
5616 break;
5617 try_imm:
5618 /* There's a possibility of getting a 64-bit immediate here, so
5619 we need special handling. */
5620 if (parse_big_immediate (&str, i) == FAIL)
5621 {
5622 inst.error = _("immediate value is out of range");
5623 goto failure;
5624 }
5625 }
5626 break;
5627
5628 case OP_RNDQ_I0:
5629 {
5630 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
5631 break;
5632 try_imm0:
5633 po_imm_or_fail (0, 0, TRUE);
5634 }
5635 break;
5636
5637 case OP_RVSD_I0:
5638 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
5639 break;
5640
5641 case OP_RR_RNSC:
5642 {
5643 po_scalar_or_goto (8, try_rr);
5644 break;
5645 try_rr:
5646 po_reg_or_fail (REG_TYPE_RN);
5647 }
5648 break;
5649
5650 case OP_RNSDQ_RNSC:
5651 {
5652 po_scalar_or_goto (8, try_nsdq);
5653 break;
5654 try_nsdq:
5655 po_reg_or_fail (REG_TYPE_NSDQ);
5656 }
5657 break;
5658
5659 case OP_RNDQ_RNSC:
5660 {
5661 po_scalar_or_goto (8, try_ndq);
5662 break;
5663 try_ndq:
5664 po_reg_or_fail (REG_TYPE_NDQ);
5665 }
5666 break;
5667
5668 case OP_RND_RNSC:
5669 {
5670 po_scalar_or_goto (8, try_vfd);
5671 break;
5672 try_vfd:
5673 po_reg_or_fail (REG_TYPE_VFD);
5674 }
5675 break;
5676
5677 case OP_VMOV:
5678 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5679 not careful then bad things might happen. */
5680 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
5681 break;
5682
5683 case OP_RNDQ_IMVNb:
5684 {
5685 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm);
5686 break;
5687 try_mvnimm:
5688 /* There's a possibility of getting a 64-bit immediate here, so
5689 we need special handling. */
5690 if (parse_big_immediate (&str, i) == FAIL)
5691 {
5692 inst.error = _("immediate value is out of range");
5693 goto failure;
5694 }
5695 }
5696 break;
5697
5698 case OP_RNDQ_I63b:
5699 {
5700 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
5701 break;
5702 try_shimm:
5703 po_imm_or_fail (0, 63, TRUE);
5704 }
5705 break;
5706
5707 case OP_RRnpcb:
5708 po_char_or_fail ('[');
5709 po_reg_or_fail (REG_TYPE_RN);
5710 po_char_or_fail (']');
5711 break;
5712
5713 case OP_RRw:
5714 po_reg_or_fail (REG_TYPE_RN);
5715 if (skip_past_char (&str, '!') == SUCCESS)
5716 inst.operands[i].writeback = 1;
5717 break;
5718
5719 /* Immediates */
5720 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
5721 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
5722 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
5723 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
5724 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
5725 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
5726 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
5727 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
5728 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
5729 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
5730 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
5731 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
5732
5733 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
5734 case OP_oI7b:
5735 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
5736 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
5737 case OP_oI31b:
5738 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
5739 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
5740 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
5741
5742 /* Immediate variants */
5743 case OP_oI255c:
5744 po_char_or_fail ('{');
5745 po_imm_or_fail (0, 255, TRUE);
5746 po_char_or_fail ('}');
5747 break;
5748
5749 case OP_I31w:
5750 /* The expression parser chokes on a trailing !, so we have
5751 to find it first and zap it. */
5752 {
5753 char *s = str;
5754 while (*s && *s != ',')
5755 s++;
5756 if (s[-1] == '!')
5757 {
5758 s[-1] = '\0';
5759 inst.operands[i].writeback = 1;
5760 }
5761 po_imm_or_fail (0, 31, TRUE);
5762 if (str == s - 1)
5763 str = s;
5764 }
5765 break;
5766
5767 /* Expressions */
5768 case OP_EXPi: EXPi:
5769 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5770 GE_OPT_PREFIX));
5771 break;
5772
5773 case OP_EXP:
5774 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5775 GE_NO_PREFIX));
5776 break;
5777
5778 case OP_EXPr: EXPr:
5779 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5780 GE_NO_PREFIX));
5781 if (inst.reloc.exp.X_op == O_symbol)
5782 {
5783 val = parse_reloc (&str);
5784 if (val == -1)
5785 {
5786 inst.error = _("unrecognized relocation suffix");
5787 goto failure;
5788 }
5789 else if (val != BFD_RELOC_UNUSED)
5790 {
5791 inst.operands[i].imm = val;
5792 inst.operands[i].hasreloc = 1;
5793 }
5794 }
5795 break;
5796
5797 /* Operand for MOVW or MOVT. */
5798 case OP_HALF:
5799 po_misc_or_fail (parse_half (&str));
5800 break;
5801
5802 /* Register or expression */
5803 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
5804 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
5805
5806 /* Register or immediate */
5807 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
5808 I0: po_imm_or_fail (0, 0, FALSE); break;
5809
5810 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
5811 IF:
5812 if (!is_immediate_prefix (*str))
5813 goto bad_args;
5814 str++;
5815 val = parse_fpa_immediate (&str);
5816 if (val == FAIL)
5817 goto failure;
5818 /* FPA immediates are encoded as registers 8-15.
5819 parse_fpa_immediate has already applied the offset. */
5820 inst.operands[i].reg = val;
5821 inst.operands[i].isreg = 1;
5822 break;
5823
5824 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
5825 I32z: po_imm_or_fail (0, 32, FALSE); break;
5826
5827 /* Two kinds of register */
5828 case OP_RIWR_RIWC:
5829 {
5830 struct reg_entry *rege = arm_reg_parse_multi (&str);
5831 if (!rege
5832 || (rege->type != REG_TYPE_MMXWR
5833 && rege->type != REG_TYPE_MMXWC
5834 && rege->type != REG_TYPE_MMXWCG))
5835 {
5836 inst.error = _("iWMMXt data or control register expected");
5837 goto failure;
5838 }
5839 inst.operands[i].reg = rege->number;
5840 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
5841 }
5842 break;
5843
5844 case OP_RIWC_RIWG:
5845 {
5846 struct reg_entry *rege = arm_reg_parse_multi (&str);
5847 if (!rege
5848 || (rege->type != REG_TYPE_MMXWC
5849 && rege->type != REG_TYPE_MMXWCG))
5850 {
5851 inst.error = _("iWMMXt control register expected");
5852 goto failure;
5853 }
5854 inst.operands[i].reg = rege->number;
5855 inst.operands[i].isreg = 1;
5856 }
5857 break;
5858
5859 /* Misc */
5860 case OP_CPSF: val = parse_cps_flags (&str); break;
5861 case OP_ENDI: val = parse_endian_specifier (&str); break;
5862 case OP_oROR: val = parse_ror (&str); break;
5863 case OP_PSR: val = parse_psr (&str); break;
5864 case OP_COND: val = parse_cond (&str); break;
5865 case OP_oBARRIER:val = parse_barrier (&str); break;
5866
5867 case OP_RVC_PSR:
5868 po_reg_or_goto (REG_TYPE_VFC, try_psr);
5869 inst.operands[i].isvec = 1; /* Mark VFP control reg as vector. */
5870 break;
5871 try_psr:
5872 val = parse_psr (&str);
5873 break;
5874
5875 case OP_APSR_RR:
5876 po_reg_or_goto (REG_TYPE_RN, try_apsr);
5877 break;
5878 try_apsr:
5879 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
5880 instruction). */
5881 if (strncasecmp (str, "APSR_", 5) == 0)
5882 {
5883 unsigned found = 0;
5884 str += 5;
5885 while (found < 15)
5886 switch (*str++)
5887 {
5888 case 'c': found = (found & 1) ? 16 : found | 1; break;
5889 case 'n': found = (found & 2) ? 16 : found | 2; break;
5890 case 'z': found = (found & 4) ? 16 : found | 4; break;
5891 case 'v': found = (found & 8) ? 16 : found | 8; break;
5892 default: found = 16;
5893 }
5894 if (found != 15)
5895 goto failure;
5896 inst.operands[i].isvec = 1;
5897 }
5898 else
5899 goto failure;
5900 break;
5901
5902 case OP_TB:
5903 po_misc_or_fail (parse_tb (&str));
5904 break;
5905
5906 /* Register lists */
5907 case OP_REGLST:
5908 val = parse_reg_list (&str);
5909 if (*str == '^')
5910 {
5911 inst.operands[1].writeback = 1;
5912 str++;
5913 }
5914 break;
5915
5916 case OP_VRSLST:
5917 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
5918 break;
5919
5920 case OP_VRDLST:
5921 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
5922 break;
5923
5924 case OP_VRSDLST:
5925 /* Allow Q registers too. */
5926 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5927 REGLIST_NEON_D);
5928 if (val == FAIL)
5929 {
5930 inst.error = NULL;
5931 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5932 REGLIST_VFP_S);
5933 inst.operands[i].issingle = 1;
5934 }
5935 break;
5936
5937 case OP_NRDLST:
5938 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5939 REGLIST_NEON_D);
5940 break;
5941
5942 case OP_NSTRLST:
5943 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
5944 &inst.operands[i].vectype);
5945 break;
5946
5947 /* Addressing modes */
5948 case OP_ADDR:
5949 po_misc_or_fail (parse_address (&str, i));
5950 break;
5951
5952 case OP_ADDRGLDR:
5953 po_misc_or_fail_no_backtrack (
5954 parse_address_group_reloc (&str, i, GROUP_LDR));
5955 break;
5956
5957 case OP_ADDRGLDRS:
5958 po_misc_or_fail_no_backtrack (
5959 parse_address_group_reloc (&str, i, GROUP_LDRS));
5960 break;
5961
5962 case OP_ADDRGLDC:
5963 po_misc_or_fail_no_backtrack (
5964 parse_address_group_reloc (&str, i, GROUP_LDC));
5965 break;
5966
5967 case OP_SH:
5968 po_misc_or_fail (parse_shifter_operand (&str, i));
5969 break;
5970
5971 case OP_SHG:
5972 po_misc_or_fail_no_backtrack (
5973 parse_shifter_operand_group_reloc (&str, i));
5974 break;
5975
5976 case OP_oSHll:
5977 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
5978 break;
5979
5980 case OP_oSHar:
5981 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
5982 break;
5983
5984 case OP_oSHllar:
5985 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
5986 break;
5987
5988 default:
5989 as_fatal ("unhandled operand code %d", upat[i]);
5990 }
5991
5992 /* Various value-based sanity checks and shared operations. We
5993 do not signal immediate failures for the register constraints;
5994 this allows a syntax error to take precedence. */
5995 switch (upat[i])
5996 {
5997 case OP_oRRnpc:
5998 case OP_RRnpc:
5999 case OP_RRnpcb:
6000 case OP_RRw:
6001 case OP_RRnpc_I0:
6002 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
6003 inst.error = BAD_PC;
6004 break;
6005
6006 case OP_CPSF:
6007 case OP_ENDI:
6008 case OP_oROR:
6009 case OP_PSR:
6010 case OP_RVC_PSR:
6011 case OP_COND:
6012 case OP_oBARRIER:
6013 case OP_REGLST:
6014 case OP_VRSLST:
6015 case OP_VRDLST:
6016 case OP_VRSDLST:
6017 case OP_NRDLST:
6018 case OP_NSTRLST:
6019 if (val == FAIL)
6020 goto failure;
6021 inst.operands[i].imm = val;
6022 break;
6023
6024 default:
6025 break;
6026 }
6027
6028 /* If we get here, this operand was successfully parsed. */
6029 inst.operands[i].present = 1;
6030 continue;
6031
6032 bad_args:
6033 inst.error = BAD_ARGS;
6034
6035 failure:
6036 if (!backtrack_pos)
6037 {
6038 /* The parse routine should already have set inst.error, but set a
6039 defaut here just in case. */
6040 if (!inst.error)
6041 inst.error = _("syntax error");
6042 return FAIL;
6043 }
6044
6045 /* Do not backtrack over a trailing optional argument that
6046 absorbed some text. We will only fail again, with the
6047 'garbage following instruction' error message, which is
6048 probably less helpful than the current one. */
6049 if (backtrack_index == i && backtrack_pos != str
6050 && upat[i+1] == OP_stop)
6051 {
6052 if (!inst.error)
6053 inst.error = _("syntax error");
6054 return FAIL;
6055 }
6056
6057 /* Try again, skipping the optional argument at backtrack_pos. */
6058 str = backtrack_pos;
6059 inst.error = backtrack_error;
6060 inst.operands[backtrack_index].present = 0;
6061 i = backtrack_index;
6062 backtrack_pos = 0;
6063 }
6064
6065 /* Check that we have parsed all the arguments. */
6066 if (*str != '\0' && !inst.error)
6067 inst.error = _("garbage following instruction");
6068
6069 return inst.error ? FAIL : SUCCESS;
6070 }
6071
6072 #undef po_char_or_fail
6073 #undef po_reg_or_fail
6074 #undef po_reg_or_goto
6075 #undef po_imm_or_fail
6076 #undef po_scalar_or_fail
6077 \f
6078 /* Shorthand macro for instruction encoding functions issuing errors. */
6079 #define constraint(expr, err) do { \
6080 if (expr) \
6081 { \
6082 inst.error = err; \
6083 return; \
6084 } \
6085 } while (0)
6086
6087 /* Functions for operand encoding. ARM, then Thumb. */
6088
6089 #define rotate_left(v, n) (v << n | v >> (32 - n))
6090
6091 /* If VAL can be encoded in the immediate field of an ARM instruction,
6092 return the encoded form. Otherwise, return FAIL. */
6093
6094 static unsigned int
6095 encode_arm_immediate (unsigned int val)
6096 {
6097 unsigned int a, i;
6098
6099 for (i = 0; i < 32; i += 2)
6100 if ((a = rotate_left (val, i)) <= 0xff)
6101 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
6102
6103 return FAIL;
6104 }
6105
6106 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6107 return the encoded form. Otherwise, return FAIL. */
6108 static unsigned int
6109 encode_thumb32_immediate (unsigned int val)
6110 {
6111 unsigned int a, i;
6112
6113 if (val <= 0xff)
6114 return val;
6115
6116 for (i = 1; i <= 24; i++)
6117 {
6118 a = val >> i;
6119 if ((val & ~(0xff << i)) == 0)
6120 return ((val >> i) & 0x7f) | ((32 - i) << 7);
6121 }
6122
6123 a = val & 0xff;
6124 if (val == ((a << 16) | a))
6125 return 0x100 | a;
6126 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
6127 return 0x300 | a;
6128
6129 a = val & 0xff00;
6130 if (val == ((a << 16) | a))
6131 return 0x200 | (a >> 8);
6132
6133 return FAIL;
6134 }
6135 /* Encode a VFP SP or DP register number into inst.instruction. */
6136
6137 static void
6138 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
6139 {
6140 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
6141 && reg > 15)
6142 {
6143 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
6144 {
6145 if (thumb_mode)
6146 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
6147 fpu_vfp_ext_v3);
6148 else
6149 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
6150 fpu_vfp_ext_v3);
6151 }
6152 else
6153 {
6154 first_error (_("D register out of range for selected VFP version"));
6155 return;
6156 }
6157 }
6158
6159 switch (pos)
6160 {
6161 case VFP_REG_Sd:
6162 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
6163 break;
6164
6165 case VFP_REG_Sn:
6166 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
6167 break;
6168
6169 case VFP_REG_Sm:
6170 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
6171 break;
6172
6173 case VFP_REG_Dd:
6174 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
6175 break;
6176
6177 case VFP_REG_Dn:
6178 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
6179 break;
6180
6181 case VFP_REG_Dm:
6182 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
6183 break;
6184
6185 default:
6186 abort ();
6187 }
6188 }
6189
6190 /* Encode a <shift> in an ARM-format instruction. The immediate,
6191 if any, is handled by md_apply_fix. */
6192 static void
6193 encode_arm_shift (int i)
6194 {
6195 if (inst.operands[i].shift_kind == SHIFT_RRX)
6196 inst.instruction |= SHIFT_ROR << 5;
6197 else
6198 {
6199 inst.instruction |= inst.operands[i].shift_kind << 5;
6200 if (inst.operands[i].immisreg)
6201 {
6202 inst.instruction |= SHIFT_BY_REG;
6203 inst.instruction |= inst.operands[i].imm << 8;
6204 }
6205 else
6206 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6207 }
6208 }
6209
6210 static void
6211 encode_arm_shifter_operand (int i)
6212 {
6213 if (inst.operands[i].isreg)
6214 {
6215 inst.instruction |= inst.operands[i].reg;
6216 encode_arm_shift (i);
6217 }
6218 else
6219 inst.instruction |= INST_IMMEDIATE;
6220 }
6221
6222 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6223 static void
6224 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
6225 {
6226 assert (inst.operands[i].isreg);
6227 inst.instruction |= inst.operands[i].reg << 16;
6228
6229 if (inst.operands[i].preind)
6230 {
6231 if (is_t)
6232 {
6233 inst.error = _("instruction does not accept preindexed addressing");
6234 return;
6235 }
6236 inst.instruction |= PRE_INDEX;
6237 if (inst.operands[i].writeback)
6238 inst.instruction |= WRITE_BACK;
6239
6240 }
6241 else if (inst.operands[i].postind)
6242 {
6243 assert (inst.operands[i].writeback);
6244 if (is_t)
6245 inst.instruction |= WRITE_BACK;
6246 }
6247 else /* unindexed - only for coprocessor */
6248 {
6249 inst.error = _("instruction does not accept unindexed addressing");
6250 return;
6251 }
6252
6253 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
6254 && (((inst.instruction & 0x000f0000) >> 16)
6255 == ((inst.instruction & 0x0000f000) >> 12)))
6256 as_warn ((inst.instruction & LOAD_BIT)
6257 ? _("destination register same as write-back base")
6258 : _("source register same as write-back base"));
6259 }
6260
6261 /* inst.operands[i] was set up by parse_address. Encode it into an
6262 ARM-format mode 2 load or store instruction. If is_t is true,
6263 reject forms that cannot be used with a T instruction (i.e. not
6264 post-indexed). */
6265 static void
6266 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
6267 {
6268 encode_arm_addr_mode_common (i, is_t);
6269
6270 if (inst.operands[i].immisreg)
6271 {
6272 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
6273 inst.instruction |= inst.operands[i].imm;
6274 if (!inst.operands[i].negative)
6275 inst.instruction |= INDEX_UP;
6276 if (inst.operands[i].shifted)
6277 {
6278 if (inst.operands[i].shift_kind == SHIFT_RRX)
6279 inst.instruction |= SHIFT_ROR << 5;
6280 else
6281 {
6282 inst.instruction |= inst.operands[i].shift_kind << 5;
6283 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6284 }
6285 }
6286 }
6287 else /* immediate offset in inst.reloc */
6288 {
6289 if (inst.reloc.type == BFD_RELOC_UNUSED)
6290 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
6291 }
6292 }
6293
6294 /* inst.operands[i] was set up by parse_address. Encode it into an
6295 ARM-format mode 3 load or store instruction. Reject forms that
6296 cannot be used with such instructions. If is_t is true, reject
6297 forms that cannot be used with a T instruction (i.e. not
6298 post-indexed). */
6299 static void
6300 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
6301 {
6302 if (inst.operands[i].immisreg && inst.operands[i].shifted)
6303 {
6304 inst.error = _("instruction does not accept scaled register index");
6305 return;
6306 }
6307
6308 encode_arm_addr_mode_common (i, is_t);
6309
6310 if (inst.operands[i].immisreg)
6311 {
6312 inst.instruction |= inst.operands[i].imm;
6313 if (!inst.operands[i].negative)
6314 inst.instruction |= INDEX_UP;
6315 }
6316 else /* immediate offset in inst.reloc */
6317 {
6318 inst.instruction |= HWOFFSET_IMM;
6319 if (inst.reloc.type == BFD_RELOC_UNUSED)
6320 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
6321 }
6322 }
6323
6324 /* inst.operands[i] was set up by parse_address. Encode it into an
6325 ARM-format instruction. Reject all forms which cannot be encoded
6326 into a coprocessor load/store instruction. If wb_ok is false,
6327 reject use of writeback; if unind_ok is false, reject use of
6328 unindexed addressing. If reloc_override is not 0, use it instead
6329 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6330 (in which case it is preserved). */
6331
6332 static int
6333 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
6334 {
6335 inst.instruction |= inst.operands[i].reg << 16;
6336
6337 assert (!(inst.operands[i].preind && inst.operands[i].postind));
6338
6339 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
6340 {
6341 assert (!inst.operands[i].writeback);
6342 if (!unind_ok)
6343 {
6344 inst.error = _("instruction does not support unindexed addressing");
6345 return FAIL;
6346 }
6347 inst.instruction |= inst.operands[i].imm;
6348 inst.instruction |= INDEX_UP;
6349 return SUCCESS;
6350 }
6351
6352 if (inst.operands[i].preind)
6353 inst.instruction |= PRE_INDEX;
6354
6355 if (inst.operands[i].writeback)
6356 {
6357 if (inst.operands[i].reg == REG_PC)
6358 {
6359 inst.error = _("pc may not be used with write-back");
6360 return FAIL;
6361 }
6362 if (!wb_ok)
6363 {
6364 inst.error = _("instruction does not support writeback");
6365 return FAIL;
6366 }
6367 inst.instruction |= WRITE_BACK;
6368 }
6369
6370 if (reloc_override)
6371 inst.reloc.type = reloc_override;
6372 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
6373 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
6374 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
6375 {
6376 if (thumb_mode)
6377 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
6378 else
6379 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
6380 }
6381
6382 return SUCCESS;
6383 }
6384
6385 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6386 Determine whether it can be performed with a move instruction; if
6387 it can, convert inst.instruction to that move instruction and
6388 return 1; if it can't, convert inst.instruction to a literal-pool
6389 load and return 0. If this is not a valid thing to do in the
6390 current context, set inst.error and return 1.
6391
6392 inst.operands[i] describes the destination register. */
6393
6394 static int
6395 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
6396 {
6397 unsigned long tbit;
6398
6399 if (thumb_p)
6400 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
6401 else
6402 tbit = LOAD_BIT;
6403
6404 if ((inst.instruction & tbit) == 0)
6405 {
6406 inst.error = _("invalid pseudo operation");
6407 return 1;
6408 }
6409 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
6410 {
6411 inst.error = _("constant expression expected");
6412 return 1;
6413 }
6414 if (inst.reloc.exp.X_op == O_constant)
6415 {
6416 if (thumb_p)
6417 {
6418 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
6419 {
6420 /* This can be done with a mov(1) instruction. */
6421 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
6422 inst.instruction |= inst.reloc.exp.X_add_number;
6423 return 1;
6424 }
6425 }
6426 else
6427 {
6428 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
6429 if (value != FAIL)
6430 {
6431 /* This can be done with a mov instruction. */
6432 inst.instruction &= LITERAL_MASK;
6433 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
6434 inst.instruction |= value & 0xfff;
6435 return 1;
6436 }
6437
6438 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
6439 if (value != FAIL)
6440 {
6441 /* This can be done with a mvn instruction. */
6442 inst.instruction &= LITERAL_MASK;
6443 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
6444 inst.instruction |= value & 0xfff;
6445 return 1;
6446 }
6447 }
6448 }
6449
6450 if (add_to_lit_pool () == FAIL)
6451 {
6452 inst.error = _("literal pool insertion failed");
6453 return 1;
6454 }
6455 inst.operands[1].reg = REG_PC;
6456 inst.operands[1].isreg = 1;
6457 inst.operands[1].preind = 1;
6458 inst.reloc.pc_rel = 1;
6459 inst.reloc.type = (thumb_p
6460 ? BFD_RELOC_ARM_THUMB_OFFSET
6461 : (mode_3
6462 ? BFD_RELOC_ARM_HWLITERAL
6463 : BFD_RELOC_ARM_LITERAL));
6464 return 0;
6465 }
6466
6467 /* Functions for instruction encoding, sorted by subarchitecture.
6468 First some generics; their names are taken from the conventional
6469 bit positions for register arguments in ARM format instructions. */
6470
6471 static void
6472 do_noargs (void)
6473 {
6474 }
6475
6476 static void
6477 do_rd (void)
6478 {
6479 inst.instruction |= inst.operands[0].reg << 12;
6480 }
6481
6482 static void
6483 do_rd_rm (void)
6484 {
6485 inst.instruction |= inst.operands[0].reg << 12;
6486 inst.instruction |= inst.operands[1].reg;
6487 }
6488
6489 static void
6490 do_rd_rn (void)
6491 {
6492 inst.instruction |= inst.operands[0].reg << 12;
6493 inst.instruction |= inst.operands[1].reg << 16;
6494 }
6495
6496 static void
6497 do_rn_rd (void)
6498 {
6499 inst.instruction |= inst.operands[0].reg << 16;
6500 inst.instruction |= inst.operands[1].reg << 12;
6501 }
6502
6503 static void
6504 do_rd_rm_rn (void)
6505 {
6506 unsigned Rn = inst.operands[2].reg;
6507 /* Enforce restrictions on SWP instruction. */
6508 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
6509 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
6510 _("Rn must not overlap other operands"));
6511 inst.instruction |= inst.operands[0].reg << 12;
6512 inst.instruction |= inst.operands[1].reg;
6513 inst.instruction |= Rn << 16;
6514 }
6515
6516 static void
6517 do_rd_rn_rm (void)
6518 {
6519 inst.instruction |= inst.operands[0].reg << 12;
6520 inst.instruction |= inst.operands[1].reg << 16;
6521 inst.instruction |= inst.operands[2].reg;
6522 }
6523
6524 static void
6525 do_rm_rd_rn (void)
6526 {
6527 inst.instruction |= inst.operands[0].reg;
6528 inst.instruction |= inst.operands[1].reg << 12;
6529 inst.instruction |= inst.operands[2].reg << 16;
6530 }
6531
6532 static void
6533 do_imm0 (void)
6534 {
6535 inst.instruction |= inst.operands[0].imm;
6536 }
6537
6538 static void
6539 do_rd_cpaddr (void)
6540 {
6541 inst.instruction |= inst.operands[0].reg << 12;
6542 encode_arm_cp_address (1, TRUE, TRUE, 0);
6543 }
6544
6545 /* ARM instructions, in alphabetical order by function name (except
6546 that wrapper functions appear immediately after the function they
6547 wrap). */
6548
6549 /* This is a pseudo-op of the form "adr rd, label" to be converted
6550 into a relative address of the form "add rd, pc, #label-.-8". */
6551
6552 static void
6553 do_adr (void)
6554 {
6555 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6556
6557 /* Frag hacking will turn this into a sub instruction if the offset turns
6558 out to be negative. */
6559 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
6560 inst.reloc.pc_rel = 1;
6561 inst.reloc.exp.X_add_number -= 8;
6562 }
6563
6564 /* This is a pseudo-op of the form "adrl rd, label" to be converted
6565 into a relative address of the form:
6566 add rd, pc, #low(label-.-8)"
6567 add rd, rd, #high(label-.-8)" */
6568
6569 static void
6570 do_adrl (void)
6571 {
6572 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6573
6574 /* Frag hacking will turn this into a sub instruction if the offset turns
6575 out to be negative. */
6576 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
6577 inst.reloc.pc_rel = 1;
6578 inst.size = INSN_SIZE * 2;
6579 inst.reloc.exp.X_add_number -= 8;
6580 }
6581
6582 static void
6583 do_arit (void)
6584 {
6585 if (!inst.operands[1].present)
6586 inst.operands[1].reg = inst.operands[0].reg;
6587 inst.instruction |= inst.operands[0].reg << 12;
6588 inst.instruction |= inst.operands[1].reg << 16;
6589 encode_arm_shifter_operand (2);
6590 }
6591
6592 static void
6593 do_barrier (void)
6594 {
6595 if (inst.operands[0].present)
6596 {
6597 constraint ((inst.instruction & 0xf0) != 0x40
6598 && inst.operands[0].imm != 0xf,
6599 "bad barrier type");
6600 inst.instruction |= inst.operands[0].imm;
6601 }
6602 else
6603 inst.instruction |= 0xf;
6604 }
6605
6606 static void
6607 do_bfc (void)
6608 {
6609 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
6610 constraint (msb > 32, _("bit-field extends past end of register"));
6611 /* The instruction encoding stores the LSB and MSB,
6612 not the LSB and width. */
6613 inst.instruction |= inst.operands[0].reg << 12;
6614 inst.instruction |= inst.operands[1].imm << 7;
6615 inst.instruction |= (msb - 1) << 16;
6616 }
6617
6618 static void
6619 do_bfi (void)
6620 {
6621 unsigned int msb;
6622
6623 /* #0 in second position is alternative syntax for bfc, which is
6624 the same instruction but with REG_PC in the Rm field. */
6625 if (!inst.operands[1].isreg)
6626 inst.operands[1].reg = REG_PC;
6627
6628 msb = inst.operands[2].imm + inst.operands[3].imm;
6629 constraint (msb > 32, _("bit-field extends past end of register"));
6630 /* The instruction encoding stores the LSB and MSB,
6631 not the LSB and width. */
6632 inst.instruction |= inst.operands[0].reg << 12;
6633 inst.instruction |= inst.operands[1].reg;
6634 inst.instruction |= inst.operands[2].imm << 7;
6635 inst.instruction |= (msb - 1) << 16;
6636 }
6637
6638 static void
6639 do_bfx (void)
6640 {
6641 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
6642 _("bit-field extends past end of register"));
6643 inst.instruction |= inst.operands[0].reg << 12;
6644 inst.instruction |= inst.operands[1].reg;
6645 inst.instruction |= inst.operands[2].imm << 7;
6646 inst.instruction |= (inst.operands[3].imm - 1) << 16;
6647 }
6648
6649 /* ARM V5 breakpoint instruction (argument parse)
6650 BKPT <16 bit unsigned immediate>
6651 Instruction is not conditional.
6652 The bit pattern given in insns[] has the COND_ALWAYS condition,
6653 and it is an error if the caller tried to override that. */
6654
6655 static void
6656 do_bkpt (void)
6657 {
6658 /* Top 12 of 16 bits to bits 19:8. */
6659 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
6660
6661 /* Bottom 4 of 16 bits to bits 3:0. */
6662 inst.instruction |= inst.operands[0].imm & 0xf;
6663 }
6664
6665 static void
6666 encode_branch (int default_reloc)
6667 {
6668 if (inst.operands[0].hasreloc)
6669 {
6670 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32,
6671 _("the only suffix valid here is '(plt)'"));
6672 inst.reloc.type = BFD_RELOC_ARM_PLT32;
6673 }
6674 else
6675 {
6676 inst.reloc.type = default_reloc;
6677 }
6678 inst.reloc.pc_rel = 1;
6679 }
6680
6681 static void
6682 do_branch (void)
6683 {
6684 #ifdef OBJ_ELF
6685 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6686 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6687 else
6688 #endif
6689 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6690 }
6691
6692 static void
6693 do_bl (void)
6694 {
6695 #ifdef OBJ_ELF
6696 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6697 {
6698 if (inst.cond == COND_ALWAYS)
6699 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6700 else
6701 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6702 }
6703 else
6704 #endif
6705 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6706 }
6707
6708 /* ARM V5 branch-link-exchange instruction (argument parse)
6709 BLX <target_addr> ie BLX(1)
6710 BLX{<condition>} <Rm> ie BLX(2)
6711 Unfortunately, there are two different opcodes for this mnemonic.
6712 So, the insns[].value is not used, and the code here zaps values
6713 into inst.instruction.
6714 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6715
6716 static void
6717 do_blx (void)
6718 {
6719 if (inst.operands[0].isreg)
6720 {
6721 /* Arg is a register; the opcode provided by insns[] is correct.
6722 It is not illegal to do "blx pc", just useless. */
6723 if (inst.operands[0].reg == REG_PC)
6724 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6725
6726 inst.instruction |= inst.operands[0].reg;
6727 }
6728 else
6729 {
6730 /* Arg is an address; this instruction cannot be executed
6731 conditionally, and the opcode must be adjusted. */
6732 constraint (inst.cond != COND_ALWAYS, BAD_COND);
6733 inst.instruction = 0xfa000000;
6734 #ifdef OBJ_ELF
6735 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6736 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6737 else
6738 #endif
6739 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
6740 }
6741 }
6742
6743 static void
6744 do_bx (void)
6745 {
6746 if (inst.operands[0].reg == REG_PC)
6747 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6748
6749 inst.instruction |= inst.operands[0].reg;
6750 }
6751
6752
6753 /* ARM v5TEJ. Jump to Jazelle code. */
6754
6755 static void
6756 do_bxj (void)
6757 {
6758 if (inst.operands[0].reg == REG_PC)
6759 as_tsktsk (_("use of r15 in bxj is not really useful"));
6760
6761 inst.instruction |= inst.operands[0].reg;
6762 }
6763
6764 /* Co-processor data operation:
6765 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6766 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6767 static void
6768 do_cdp (void)
6769 {
6770 inst.instruction |= inst.operands[0].reg << 8;
6771 inst.instruction |= inst.operands[1].imm << 20;
6772 inst.instruction |= inst.operands[2].reg << 12;
6773 inst.instruction |= inst.operands[3].reg << 16;
6774 inst.instruction |= inst.operands[4].reg;
6775 inst.instruction |= inst.operands[5].imm << 5;
6776 }
6777
6778 static void
6779 do_cmp (void)
6780 {
6781 inst.instruction |= inst.operands[0].reg << 16;
6782 encode_arm_shifter_operand (1);
6783 }
6784
6785 /* Transfer between coprocessor and ARM registers.
6786 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6787 MRC2
6788 MCR{cond}
6789 MCR2
6790
6791 No special properties. */
6792
6793 static void
6794 do_co_reg (void)
6795 {
6796 inst.instruction |= inst.operands[0].reg << 8;
6797 inst.instruction |= inst.operands[1].imm << 21;
6798 inst.instruction |= inst.operands[2].reg << 12;
6799 inst.instruction |= inst.operands[3].reg << 16;
6800 inst.instruction |= inst.operands[4].reg;
6801 inst.instruction |= inst.operands[5].imm << 5;
6802 }
6803
6804 /* Transfer between coprocessor register and pair of ARM registers.
6805 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6806 MCRR2
6807 MRRC{cond}
6808 MRRC2
6809
6810 Two XScale instructions are special cases of these:
6811
6812 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6813 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6814
6815 Result unpredicatable if Rd or Rn is R15. */
6816
6817 static void
6818 do_co_reg2c (void)
6819 {
6820 inst.instruction |= inst.operands[0].reg << 8;
6821 inst.instruction |= inst.operands[1].imm << 4;
6822 inst.instruction |= inst.operands[2].reg << 12;
6823 inst.instruction |= inst.operands[3].reg << 16;
6824 inst.instruction |= inst.operands[4].reg;
6825 }
6826
6827 static void
6828 do_cpsi (void)
6829 {
6830 inst.instruction |= inst.operands[0].imm << 6;
6831 inst.instruction |= inst.operands[1].imm;
6832 }
6833
6834 static void
6835 do_dbg (void)
6836 {
6837 inst.instruction |= inst.operands[0].imm;
6838 }
6839
6840 static void
6841 do_it (void)
6842 {
6843 /* There is no IT instruction in ARM mode. We
6844 process it but do not generate code for it. */
6845 inst.size = 0;
6846 }
6847
6848 static void
6849 do_ldmstm (void)
6850 {
6851 int base_reg = inst.operands[0].reg;
6852 int range = inst.operands[1].imm;
6853
6854 inst.instruction |= base_reg << 16;
6855 inst.instruction |= range;
6856
6857 if (inst.operands[1].writeback)
6858 inst.instruction |= LDM_TYPE_2_OR_3;
6859
6860 if (inst.operands[0].writeback)
6861 {
6862 inst.instruction |= WRITE_BACK;
6863 /* Check for unpredictable uses of writeback. */
6864 if (inst.instruction & LOAD_BIT)
6865 {
6866 /* Not allowed in LDM type 2. */
6867 if ((inst.instruction & LDM_TYPE_2_OR_3)
6868 && ((range & (1 << REG_PC)) == 0))
6869 as_warn (_("writeback of base register is UNPREDICTABLE"));
6870 /* Only allowed if base reg not in list for other types. */
6871 else if (range & (1 << base_reg))
6872 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6873 }
6874 else /* STM. */
6875 {
6876 /* Not allowed for type 2. */
6877 if (inst.instruction & LDM_TYPE_2_OR_3)
6878 as_warn (_("writeback of base register is UNPREDICTABLE"));
6879 /* Only allowed if base reg not in list, or first in list. */
6880 else if ((range & (1 << base_reg))
6881 && (range & ((1 << base_reg) - 1)))
6882 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6883 }
6884 }
6885 }
6886
6887 /* ARMv5TE load-consecutive (argument parse)
6888 Mode is like LDRH.
6889
6890 LDRccD R, mode
6891 STRccD R, mode. */
6892
6893 static void
6894 do_ldrd (void)
6895 {
6896 constraint (inst.operands[0].reg % 2 != 0,
6897 _("first destination register must be even"));
6898 constraint (inst.operands[1].present
6899 && inst.operands[1].reg != inst.operands[0].reg + 1,
6900 _("can only load two consecutive registers"));
6901 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6902 constraint (!inst.operands[2].isreg, _("'[' expected"));
6903
6904 if (!inst.operands[1].present)
6905 inst.operands[1].reg = inst.operands[0].reg + 1;
6906
6907 if (inst.instruction & LOAD_BIT)
6908 {
6909 /* encode_arm_addr_mode_3 will diagnose overlap between the base
6910 register and the first register written; we have to diagnose
6911 overlap between the base and the second register written here. */
6912
6913 if (inst.operands[2].reg == inst.operands[1].reg
6914 && (inst.operands[2].writeback || inst.operands[2].postind))
6915 as_warn (_("base register written back, and overlaps "
6916 "second destination register"));
6917
6918 /* For an index-register load, the index register must not overlap the
6919 destination (even if not write-back). */
6920 else if (inst.operands[2].immisreg
6921 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
6922 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
6923 as_warn (_("index register overlaps destination register"));
6924 }
6925
6926 inst.instruction |= inst.operands[0].reg << 12;
6927 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
6928 }
6929
6930 static void
6931 do_ldrex (void)
6932 {
6933 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
6934 || inst.operands[1].postind || inst.operands[1].writeback
6935 || inst.operands[1].immisreg || inst.operands[1].shifted
6936 || inst.operands[1].negative
6937 /* This can arise if the programmer has written
6938 strex rN, rM, foo
6939 or if they have mistakenly used a register name as the last
6940 operand, eg:
6941 strex rN, rM, rX
6942 It is very difficult to distinguish between these two cases
6943 because "rX" might actually be a label. ie the register
6944 name has been occluded by a symbol of the same name. So we
6945 just generate a general 'bad addressing mode' type error
6946 message and leave it up to the programmer to discover the
6947 true cause and fix their mistake. */
6948 || (inst.operands[1].reg == REG_PC),
6949 BAD_ADDR_MODE);
6950
6951 constraint (inst.reloc.exp.X_op != O_constant
6952 || inst.reloc.exp.X_add_number != 0,
6953 _("offset must be zero in ARM encoding"));
6954
6955 inst.instruction |= inst.operands[0].reg << 12;
6956 inst.instruction |= inst.operands[1].reg << 16;
6957 inst.reloc.type = BFD_RELOC_UNUSED;
6958 }
6959
6960 static void
6961 do_ldrexd (void)
6962 {
6963 constraint (inst.operands[0].reg % 2 != 0,
6964 _("even register required"));
6965 constraint (inst.operands[1].present
6966 && inst.operands[1].reg != inst.operands[0].reg + 1,
6967 _("can only load two consecutive registers"));
6968 /* If op 1 were present and equal to PC, this function wouldn't
6969 have been called in the first place. */
6970 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6971
6972 inst.instruction |= inst.operands[0].reg << 12;
6973 inst.instruction |= inst.operands[2].reg << 16;
6974 }
6975
6976 static void
6977 do_ldst (void)
6978 {
6979 inst.instruction |= inst.operands[0].reg << 12;
6980 if (!inst.operands[1].isreg)
6981 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
6982 return;
6983 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
6984 }
6985
6986 static void
6987 do_ldstt (void)
6988 {
6989 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6990 reject [Rn,...]. */
6991 if (inst.operands[1].preind)
6992 {
6993 constraint (inst.reloc.exp.X_op != O_constant ||
6994 inst.reloc.exp.X_add_number != 0,
6995 _("this instruction requires a post-indexed address"));
6996
6997 inst.operands[1].preind = 0;
6998 inst.operands[1].postind = 1;
6999 inst.operands[1].writeback = 1;
7000 }
7001 inst.instruction |= inst.operands[0].reg << 12;
7002 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
7003 }
7004
7005 /* Halfword and signed-byte load/store operations. */
7006
7007 static void
7008 do_ldstv4 (void)
7009 {
7010 inst.instruction |= inst.operands[0].reg << 12;
7011 if (!inst.operands[1].isreg)
7012 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
7013 return;
7014 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
7015 }
7016
7017 static void
7018 do_ldsttv4 (void)
7019 {
7020 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7021 reject [Rn,...]. */
7022 if (inst.operands[1].preind)
7023 {
7024 constraint (inst.reloc.exp.X_op != O_constant ||
7025 inst.reloc.exp.X_add_number != 0,
7026 _("this instruction requires a post-indexed address"));
7027
7028 inst.operands[1].preind = 0;
7029 inst.operands[1].postind = 1;
7030 inst.operands[1].writeback = 1;
7031 }
7032 inst.instruction |= inst.operands[0].reg << 12;
7033 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
7034 }
7035
7036 /* Co-processor register load/store.
7037 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7038 static void
7039 do_lstc (void)
7040 {
7041 inst.instruction |= inst.operands[0].reg << 8;
7042 inst.instruction |= inst.operands[1].reg << 12;
7043 encode_arm_cp_address (2, TRUE, TRUE, 0);
7044 }
7045
7046 static void
7047 do_mlas (void)
7048 {
7049 /* This restriction does not apply to mls (nor to mla in v6, but
7050 that's hard to detect at present). */
7051 if (inst.operands[0].reg == inst.operands[1].reg
7052 && !(inst.instruction & 0x00400000))
7053 as_tsktsk (_("rd and rm should be different in mla"));
7054
7055 inst.instruction |= inst.operands[0].reg << 16;
7056 inst.instruction |= inst.operands[1].reg;
7057 inst.instruction |= inst.operands[2].reg << 8;
7058 inst.instruction |= inst.operands[3].reg << 12;
7059
7060 }
7061
7062 static void
7063 do_mov (void)
7064 {
7065 inst.instruction |= inst.operands[0].reg << 12;
7066 encode_arm_shifter_operand (1);
7067 }
7068
7069 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7070 static void
7071 do_mov16 (void)
7072 {
7073 bfd_vma imm;
7074 bfd_boolean top;
7075
7076 top = (inst.instruction & 0x00400000) != 0;
7077 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
7078 _(":lower16: not allowed this instruction"));
7079 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
7080 _(":upper16: not allowed instruction"));
7081 inst.instruction |= inst.operands[0].reg << 12;
7082 if (inst.reloc.type == BFD_RELOC_UNUSED)
7083 {
7084 imm = inst.reloc.exp.X_add_number;
7085 /* The value is in two pieces: 0:11, 16:19. */
7086 inst.instruction |= (imm & 0x00000fff);
7087 inst.instruction |= (imm & 0x0000f000) << 4;
7088 }
7089 }
7090
7091 static void do_vfp_nsyn_opcode (const char *);
7092
7093 static int
7094 do_vfp_nsyn_mrs (void)
7095 {
7096 if (inst.operands[0].isvec)
7097 {
7098 if (inst.operands[1].reg != 1)
7099 first_error (_("operand 1 must be FPSCR"));
7100 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
7101 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
7102 do_vfp_nsyn_opcode ("fmstat");
7103 }
7104 else if (inst.operands[1].isvec)
7105 do_vfp_nsyn_opcode ("fmrx");
7106 else
7107 return FAIL;
7108
7109 return SUCCESS;
7110 }
7111
7112 static int
7113 do_vfp_nsyn_msr (void)
7114 {
7115 if (inst.operands[0].isvec)
7116 do_vfp_nsyn_opcode ("fmxr");
7117 else
7118 return FAIL;
7119
7120 return SUCCESS;
7121 }
7122
7123 static void
7124 do_mrs (void)
7125 {
7126 if (do_vfp_nsyn_mrs () == SUCCESS)
7127 return;
7128
7129 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7130 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
7131 != (PSR_c|PSR_f),
7132 _("'CPSR' or 'SPSR' expected"));
7133 inst.instruction |= inst.operands[0].reg << 12;
7134 inst.instruction |= (inst.operands[1].imm & SPSR_BIT);
7135 }
7136
7137 /* Two possible forms:
7138 "{C|S}PSR_<field>, Rm",
7139 "{C|S}PSR_f, #expression". */
7140
7141 static void
7142 do_msr (void)
7143 {
7144 if (do_vfp_nsyn_msr () == SUCCESS)
7145 return;
7146
7147 inst.instruction |= inst.operands[0].imm;
7148 if (inst.operands[1].isreg)
7149 inst.instruction |= inst.operands[1].reg;
7150 else
7151 {
7152 inst.instruction |= INST_IMMEDIATE;
7153 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7154 inst.reloc.pc_rel = 0;
7155 }
7156 }
7157
7158 static void
7159 do_mul (void)
7160 {
7161 if (!inst.operands[2].present)
7162 inst.operands[2].reg = inst.operands[0].reg;
7163 inst.instruction |= inst.operands[0].reg << 16;
7164 inst.instruction |= inst.operands[1].reg;
7165 inst.instruction |= inst.operands[2].reg << 8;
7166
7167 if (inst.operands[0].reg == inst.operands[1].reg)
7168 as_tsktsk (_("rd and rm should be different in mul"));
7169 }
7170
7171 /* Long Multiply Parser
7172 UMULL RdLo, RdHi, Rm, Rs
7173 SMULL RdLo, RdHi, Rm, Rs
7174 UMLAL RdLo, RdHi, Rm, Rs
7175 SMLAL RdLo, RdHi, Rm, Rs. */
7176
7177 static void
7178 do_mull (void)
7179 {
7180 inst.instruction |= inst.operands[0].reg << 12;
7181 inst.instruction |= inst.operands[1].reg << 16;
7182 inst.instruction |= inst.operands[2].reg;
7183 inst.instruction |= inst.operands[3].reg << 8;
7184
7185 /* rdhi, rdlo and rm must all be different. */
7186 if (inst.operands[0].reg == inst.operands[1].reg
7187 || inst.operands[0].reg == inst.operands[2].reg
7188 || inst.operands[1].reg == inst.operands[2].reg)
7189 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7190 }
7191
7192 static void
7193 do_nop (void)
7194 {
7195 if (inst.operands[0].present)
7196 {
7197 /* Architectural NOP hints are CPSR sets with no bits selected. */
7198 inst.instruction &= 0xf0000000;
7199 inst.instruction |= 0x0320f000 + inst.operands[0].imm;
7200 }
7201 }
7202
7203 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7204 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7205 Condition defaults to COND_ALWAYS.
7206 Error if Rd, Rn or Rm are R15. */
7207
7208 static void
7209 do_pkhbt (void)
7210 {
7211 inst.instruction |= inst.operands[0].reg << 12;
7212 inst.instruction |= inst.operands[1].reg << 16;
7213 inst.instruction |= inst.operands[2].reg;
7214 if (inst.operands[3].present)
7215 encode_arm_shift (3);
7216 }
7217
7218 /* ARM V6 PKHTB (Argument Parse). */
7219
7220 static void
7221 do_pkhtb (void)
7222 {
7223 if (!inst.operands[3].present)
7224 {
7225 /* If the shift specifier is omitted, turn the instruction
7226 into pkhbt rd, rm, rn. */
7227 inst.instruction &= 0xfff00010;
7228 inst.instruction |= inst.operands[0].reg << 12;
7229 inst.instruction |= inst.operands[1].reg;
7230 inst.instruction |= inst.operands[2].reg << 16;
7231 }
7232 else
7233 {
7234 inst.instruction |= inst.operands[0].reg << 12;
7235 inst.instruction |= inst.operands[1].reg << 16;
7236 inst.instruction |= inst.operands[2].reg;
7237 encode_arm_shift (3);
7238 }
7239 }
7240
7241 /* ARMv5TE: Preload-Cache
7242
7243 PLD <addr_mode>
7244
7245 Syntactically, like LDR with B=1, W=0, L=1. */
7246
7247 static void
7248 do_pld (void)
7249 {
7250 constraint (!inst.operands[0].isreg,
7251 _("'[' expected after PLD mnemonic"));
7252 constraint (inst.operands[0].postind,
7253 _("post-indexed expression used in preload instruction"));
7254 constraint (inst.operands[0].writeback,
7255 _("writeback used in preload instruction"));
7256 constraint (!inst.operands[0].preind,
7257 _("unindexed addressing used in preload instruction"));
7258 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7259 }
7260
7261 /* ARMv7: PLI <addr_mode> */
7262 static void
7263 do_pli (void)
7264 {
7265 constraint (!inst.operands[0].isreg,
7266 _("'[' expected after PLI mnemonic"));
7267 constraint (inst.operands[0].postind,
7268 _("post-indexed expression used in preload instruction"));
7269 constraint (inst.operands[0].writeback,
7270 _("writeback used in preload instruction"));
7271 constraint (!inst.operands[0].preind,
7272 _("unindexed addressing used in preload instruction"));
7273 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7274 inst.instruction &= ~PRE_INDEX;
7275 }
7276
7277 static void
7278 do_push_pop (void)
7279 {
7280 inst.operands[1] = inst.operands[0];
7281 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
7282 inst.operands[0].isreg = 1;
7283 inst.operands[0].writeback = 1;
7284 inst.operands[0].reg = REG_SP;
7285 do_ldmstm ();
7286 }
7287
7288 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
7289 word at the specified address and the following word
7290 respectively.
7291 Unconditionally executed.
7292 Error if Rn is R15. */
7293
7294 static void
7295 do_rfe (void)
7296 {
7297 inst.instruction |= inst.operands[0].reg << 16;
7298 if (inst.operands[0].writeback)
7299 inst.instruction |= WRITE_BACK;
7300 }
7301
7302 /* ARM V6 ssat (argument parse). */
7303
7304 static void
7305 do_ssat (void)
7306 {
7307 inst.instruction |= inst.operands[0].reg << 12;
7308 inst.instruction |= (inst.operands[1].imm - 1) << 16;
7309 inst.instruction |= inst.operands[2].reg;
7310
7311 if (inst.operands[3].present)
7312 encode_arm_shift (3);
7313 }
7314
7315 /* ARM V6 usat (argument parse). */
7316
7317 static void
7318 do_usat (void)
7319 {
7320 inst.instruction |= inst.operands[0].reg << 12;
7321 inst.instruction |= inst.operands[1].imm << 16;
7322 inst.instruction |= inst.operands[2].reg;
7323
7324 if (inst.operands[3].present)
7325 encode_arm_shift (3);
7326 }
7327
7328 /* ARM V6 ssat16 (argument parse). */
7329
7330 static void
7331 do_ssat16 (void)
7332 {
7333 inst.instruction |= inst.operands[0].reg << 12;
7334 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
7335 inst.instruction |= inst.operands[2].reg;
7336 }
7337
7338 static void
7339 do_usat16 (void)
7340 {
7341 inst.instruction |= inst.operands[0].reg << 12;
7342 inst.instruction |= inst.operands[1].imm << 16;
7343 inst.instruction |= inst.operands[2].reg;
7344 }
7345
7346 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
7347 preserving the other bits.
7348
7349 setend <endian_specifier>, where <endian_specifier> is either
7350 BE or LE. */
7351
7352 static void
7353 do_setend (void)
7354 {
7355 if (inst.operands[0].imm)
7356 inst.instruction |= 0x200;
7357 }
7358
7359 static void
7360 do_shift (void)
7361 {
7362 unsigned int Rm = (inst.operands[1].present
7363 ? inst.operands[1].reg
7364 : inst.operands[0].reg);
7365
7366 inst.instruction |= inst.operands[0].reg << 12;
7367 inst.instruction |= Rm;
7368 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
7369 {
7370 inst.instruction |= inst.operands[2].reg << 8;
7371 inst.instruction |= SHIFT_BY_REG;
7372 }
7373 else
7374 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7375 }
7376
7377 static void
7378 do_smc (void)
7379 {
7380 inst.reloc.type = BFD_RELOC_ARM_SMC;
7381 inst.reloc.pc_rel = 0;
7382 }
7383
7384 static void
7385 do_swi (void)
7386 {
7387 inst.reloc.type = BFD_RELOC_ARM_SWI;
7388 inst.reloc.pc_rel = 0;
7389 }
7390
7391 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
7392 SMLAxy{cond} Rd,Rm,Rs,Rn
7393 SMLAWy{cond} Rd,Rm,Rs,Rn
7394 Error if any register is R15. */
7395
7396 static void
7397 do_smla (void)
7398 {
7399 inst.instruction |= inst.operands[0].reg << 16;
7400 inst.instruction |= inst.operands[1].reg;
7401 inst.instruction |= inst.operands[2].reg << 8;
7402 inst.instruction |= inst.operands[3].reg << 12;
7403 }
7404
7405 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
7406 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
7407 Error if any register is R15.
7408 Warning if Rdlo == Rdhi. */
7409
7410 static void
7411 do_smlal (void)
7412 {
7413 inst.instruction |= inst.operands[0].reg << 12;
7414 inst.instruction |= inst.operands[1].reg << 16;
7415 inst.instruction |= inst.operands[2].reg;
7416 inst.instruction |= inst.operands[3].reg << 8;
7417
7418 if (inst.operands[0].reg == inst.operands[1].reg)
7419 as_tsktsk (_("rdhi and rdlo must be different"));
7420 }
7421
7422 /* ARM V5E (El Segundo) signed-multiply (argument parse)
7423 SMULxy{cond} Rd,Rm,Rs
7424 Error if any register is R15. */
7425
7426 static void
7427 do_smul (void)
7428 {
7429 inst.instruction |= inst.operands[0].reg << 16;
7430 inst.instruction |= inst.operands[1].reg;
7431 inst.instruction |= inst.operands[2].reg << 8;
7432 }
7433
7434 /* ARM V6 srs (argument parse). */
7435
7436 static void
7437 do_srs (void)
7438 {
7439 inst.instruction |= inst.operands[0].imm;
7440 if (inst.operands[0].writeback)
7441 inst.instruction |= WRITE_BACK;
7442 }
7443
7444 /* ARM V6 strex (argument parse). */
7445
7446 static void
7447 do_strex (void)
7448 {
7449 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
7450 || inst.operands[2].postind || inst.operands[2].writeback
7451 || inst.operands[2].immisreg || inst.operands[2].shifted
7452 || inst.operands[2].negative
7453 /* See comment in do_ldrex(). */
7454 || (inst.operands[2].reg == REG_PC),
7455 BAD_ADDR_MODE);
7456
7457 constraint (inst.operands[0].reg == inst.operands[1].reg
7458 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
7459
7460 constraint (inst.reloc.exp.X_op != O_constant
7461 || inst.reloc.exp.X_add_number != 0,
7462 _("offset must be zero in ARM encoding"));
7463
7464 inst.instruction |= inst.operands[0].reg << 12;
7465 inst.instruction |= inst.operands[1].reg;
7466 inst.instruction |= inst.operands[2].reg << 16;
7467 inst.reloc.type = BFD_RELOC_UNUSED;
7468 }
7469
7470 static void
7471 do_strexd (void)
7472 {
7473 constraint (inst.operands[1].reg % 2 != 0,
7474 _("even register required"));
7475 constraint (inst.operands[2].present
7476 && inst.operands[2].reg != inst.operands[1].reg + 1,
7477 _("can only store two consecutive registers"));
7478 /* If op 2 were present and equal to PC, this function wouldn't
7479 have been called in the first place. */
7480 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
7481
7482 constraint (inst.operands[0].reg == inst.operands[1].reg
7483 || inst.operands[0].reg == inst.operands[1].reg + 1
7484 || inst.operands[0].reg == inst.operands[3].reg,
7485 BAD_OVERLAP);
7486
7487 inst.instruction |= inst.operands[0].reg << 12;
7488 inst.instruction |= inst.operands[1].reg;
7489 inst.instruction |= inst.operands[3].reg << 16;
7490 }
7491
7492 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
7493 extends it to 32-bits, and adds the result to a value in another
7494 register. You can specify a rotation by 0, 8, 16, or 24 bits
7495 before extracting the 16-bit value.
7496 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
7497 Condition defaults to COND_ALWAYS.
7498 Error if any register uses R15. */
7499
7500 static void
7501 do_sxtah (void)
7502 {
7503 inst.instruction |= inst.operands[0].reg << 12;
7504 inst.instruction |= inst.operands[1].reg << 16;
7505 inst.instruction |= inst.operands[2].reg;
7506 inst.instruction |= inst.operands[3].imm << 10;
7507 }
7508
7509 /* ARM V6 SXTH.
7510
7511 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
7512 Condition defaults to COND_ALWAYS.
7513 Error if any register uses R15. */
7514
7515 static void
7516 do_sxth (void)
7517 {
7518 inst.instruction |= inst.operands[0].reg << 12;
7519 inst.instruction |= inst.operands[1].reg;
7520 inst.instruction |= inst.operands[2].imm << 10;
7521 }
7522 \f
7523 /* VFP instructions. In a logical order: SP variant first, monad
7524 before dyad, arithmetic then move then load/store. */
7525
7526 static void
7527 do_vfp_sp_monadic (void)
7528 {
7529 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7530 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
7531 }
7532
7533 static void
7534 do_vfp_sp_dyadic (void)
7535 {
7536 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7537 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
7538 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
7539 }
7540
7541 static void
7542 do_vfp_sp_compare_z (void)
7543 {
7544 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7545 }
7546
7547 static void
7548 do_vfp_dp_sp_cvt (void)
7549 {
7550 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7551 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
7552 }
7553
7554 static void
7555 do_vfp_sp_dp_cvt (void)
7556 {
7557 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7558 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7559 }
7560
7561 static void
7562 do_vfp_reg_from_sp (void)
7563 {
7564 inst.instruction |= inst.operands[0].reg << 12;
7565 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
7566 }
7567
7568 static void
7569 do_vfp_reg2_from_sp2 (void)
7570 {
7571 constraint (inst.operands[2].imm != 2,
7572 _("only two consecutive VFP SP registers allowed here"));
7573 inst.instruction |= inst.operands[0].reg << 12;
7574 inst.instruction |= inst.operands[1].reg << 16;
7575 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
7576 }
7577
7578 static void
7579 do_vfp_sp_from_reg (void)
7580 {
7581 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
7582 inst.instruction |= inst.operands[1].reg << 12;
7583 }
7584
7585 static void
7586 do_vfp_sp2_from_reg2 (void)
7587 {
7588 constraint (inst.operands[0].imm != 2,
7589 _("only two consecutive VFP SP registers allowed here"));
7590 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
7591 inst.instruction |= inst.operands[1].reg << 12;
7592 inst.instruction |= inst.operands[2].reg << 16;
7593 }
7594
7595 static void
7596 do_vfp_sp_ldst (void)
7597 {
7598 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7599 encode_arm_cp_address (1, FALSE, TRUE, 0);
7600 }
7601
7602 static void
7603 do_vfp_dp_ldst (void)
7604 {
7605 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7606 encode_arm_cp_address (1, FALSE, TRUE, 0);
7607 }
7608
7609
7610 static void
7611 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
7612 {
7613 if (inst.operands[0].writeback)
7614 inst.instruction |= WRITE_BACK;
7615 else
7616 constraint (ldstm_type != VFP_LDSTMIA,
7617 _("this addressing mode requires base-register writeback"));
7618 inst.instruction |= inst.operands[0].reg << 16;
7619 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
7620 inst.instruction |= inst.operands[1].imm;
7621 }
7622
7623 static void
7624 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
7625 {
7626 int count;
7627
7628 if (inst.operands[0].writeback)
7629 inst.instruction |= WRITE_BACK;
7630 else
7631 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
7632 _("this addressing mode requires base-register writeback"));
7633
7634 inst.instruction |= inst.operands[0].reg << 16;
7635 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7636
7637 count = inst.operands[1].imm << 1;
7638 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
7639 count += 1;
7640
7641 inst.instruction |= count;
7642 }
7643
7644 static void
7645 do_vfp_sp_ldstmia (void)
7646 {
7647 vfp_sp_ldstm (VFP_LDSTMIA);
7648 }
7649
7650 static void
7651 do_vfp_sp_ldstmdb (void)
7652 {
7653 vfp_sp_ldstm (VFP_LDSTMDB);
7654 }
7655
7656 static void
7657 do_vfp_dp_ldstmia (void)
7658 {
7659 vfp_dp_ldstm (VFP_LDSTMIA);
7660 }
7661
7662 static void
7663 do_vfp_dp_ldstmdb (void)
7664 {
7665 vfp_dp_ldstm (VFP_LDSTMDB);
7666 }
7667
7668 static void
7669 do_vfp_xp_ldstmia (void)
7670 {
7671 vfp_dp_ldstm (VFP_LDSTMIAX);
7672 }
7673
7674 static void
7675 do_vfp_xp_ldstmdb (void)
7676 {
7677 vfp_dp_ldstm (VFP_LDSTMDBX);
7678 }
7679
7680 static void
7681 do_vfp_dp_rd_rm (void)
7682 {
7683 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7684 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7685 }
7686
7687 static void
7688 do_vfp_dp_rn_rd (void)
7689 {
7690 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
7691 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7692 }
7693
7694 static void
7695 do_vfp_dp_rd_rn (void)
7696 {
7697 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7698 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7699 }
7700
7701 static void
7702 do_vfp_dp_rd_rn_rm (void)
7703 {
7704 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7705 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7706 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
7707 }
7708
7709 static void
7710 do_vfp_dp_rd (void)
7711 {
7712 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7713 }
7714
7715 static void
7716 do_vfp_dp_rm_rd_rn (void)
7717 {
7718 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
7719 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7720 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
7721 }
7722
7723 /* VFPv3 instructions. */
7724 static void
7725 do_vfp_sp_const (void)
7726 {
7727 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7728 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
7729 inst.instruction |= (inst.operands[1].imm & 0x0f);
7730 }
7731
7732 static void
7733 do_vfp_dp_const (void)
7734 {
7735 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7736 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
7737 inst.instruction |= (inst.operands[1].imm & 0x0f);
7738 }
7739
7740 static void
7741 vfp_conv (int srcsize)
7742 {
7743 unsigned immbits = srcsize - inst.operands[1].imm;
7744 inst.instruction |= (immbits & 1) << 5;
7745 inst.instruction |= (immbits >> 1);
7746 }
7747
7748 static void
7749 do_vfp_sp_conv_16 (void)
7750 {
7751 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7752 vfp_conv (16);
7753 }
7754
7755 static void
7756 do_vfp_dp_conv_16 (void)
7757 {
7758 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7759 vfp_conv (16);
7760 }
7761
7762 static void
7763 do_vfp_sp_conv_32 (void)
7764 {
7765 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7766 vfp_conv (32);
7767 }
7768
7769 static void
7770 do_vfp_dp_conv_32 (void)
7771 {
7772 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7773 vfp_conv (32);
7774 }
7775
7776 \f
7777 /* FPA instructions. Also in a logical order. */
7778
7779 static void
7780 do_fpa_cmp (void)
7781 {
7782 inst.instruction |= inst.operands[0].reg << 16;
7783 inst.instruction |= inst.operands[1].reg;
7784 }
7785
7786 static void
7787 do_fpa_ldmstm (void)
7788 {
7789 inst.instruction |= inst.operands[0].reg << 12;
7790 switch (inst.operands[1].imm)
7791 {
7792 case 1: inst.instruction |= CP_T_X; break;
7793 case 2: inst.instruction |= CP_T_Y; break;
7794 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
7795 case 4: break;
7796 default: abort ();
7797 }
7798
7799 if (inst.instruction & (PRE_INDEX | INDEX_UP))
7800 {
7801 /* The instruction specified "ea" or "fd", so we can only accept
7802 [Rn]{!}. The instruction does not really support stacking or
7803 unstacking, so we have to emulate these by setting appropriate
7804 bits and offsets. */
7805 constraint (inst.reloc.exp.X_op != O_constant
7806 || inst.reloc.exp.X_add_number != 0,
7807 _("this instruction does not support indexing"));
7808
7809 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
7810 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
7811
7812 if (!(inst.instruction & INDEX_UP))
7813 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
7814
7815 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
7816 {
7817 inst.operands[2].preind = 0;
7818 inst.operands[2].postind = 1;
7819 }
7820 }
7821
7822 encode_arm_cp_address (2, TRUE, TRUE, 0);
7823 }
7824
7825 \f
7826 /* iWMMXt instructions: strictly in alphabetical order. */
7827
7828 static void
7829 do_iwmmxt_tandorc (void)
7830 {
7831 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
7832 }
7833
7834 static void
7835 do_iwmmxt_textrc (void)
7836 {
7837 inst.instruction |= inst.operands[0].reg << 12;
7838 inst.instruction |= inst.operands[1].imm;
7839 }
7840
7841 static void
7842 do_iwmmxt_textrm (void)
7843 {
7844 inst.instruction |= inst.operands[0].reg << 12;
7845 inst.instruction |= inst.operands[1].reg << 16;
7846 inst.instruction |= inst.operands[2].imm;
7847 }
7848
7849 static void
7850 do_iwmmxt_tinsr (void)
7851 {
7852 inst.instruction |= inst.operands[0].reg << 16;
7853 inst.instruction |= inst.operands[1].reg << 12;
7854 inst.instruction |= inst.operands[2].imm;
7855 }
7856
7857 static void
7858 do_iwmmxt_tmia (void)
7859 {
7860 inst.instruction |= inst.operands[0].reg << 5;
7861 inst.instruction |= inst.operands[1].reg;
7862 inst.instruction |= inst.operands[2].reg << 12;
7863 }
7864
7865 static void
7866 do_iwmmxt_waligni (void)
7867 {
7868 inst.instruction |= inst.operands[0].reg << 12;
7869 inst.instruction |= inst.operands[1].reg << 16;
7870 inst.instruction |= inst.operands[2].reg;
7871 inst.instruction |= inst.operands[3].imm << 20;
7872 }
7873
7874 static void
7875 do_iwmmxt_wmerge (void)
7876 {
7877 inst.instruction |= inst.operands[0].reg << 12;
7878 inst.instruction |= inst.operands[1].reg << 16;
7879 inst.instruction |= inst.operands[2].reg;
7880 inst.instruction |= inst.operands[3].imm << 21;
7881 }
7882
7883 static void
7884 do_iwmmxt_wmov (void)
7885 {
7886 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
7887 inst.instruction |= inst.operands[0].reg << 12;
7888 inst.instruction |= inst.operands[1].reg << 16;
7889 inst.instruction |= inst.operands[1].reg;
7890 }
7891
7892 static void
7893 do_iwmmxt_wldstbh (void)
7894 {
7895 int reloc;
7896 inst.instruction |= inst.operands[0].reg << 12;
7897 if (thumb_mode)
7898 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
7899 else
7900 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
7901 encode_arm_cp_address (1, TRUE, FALSE, reloc);
7902 }
7903
7904 static void
7905 do_iwmmxt_wldstw (void)
7906 {
7907 /* RIWR_RIWC clears .isreg for a control register. */
7908 if (!inst.operands[0].isreg)
7909 {
7910 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7911 inst.instruction |= 0xf0000000;
7912 }
7913
7914 inst.instruction |= inst.operands[0].reg << 12;
7915 encode_arm_cp_address (1, TRUE, TRUE, 0);
7916 }
7917
7918 static void
7919 do_iwmmxt_wldstd (void)
7920 {
7921 inst.instruction |= inst.operands[0].reg << 12;
7922 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
7923 && inst.operands[1].immisreg)
7924 {
7925 inst.instruction &= ~0x1a000ff;
7926 inst.instruction |= (0xf << 28);
7927 if (inst.operands[1].preind)
7928 inst.instruction |= PRE_INDEX;
7929 if (!inst.operands[1].negative)
7930 inst.instruction |= INDEX_UP;
7931 if (inst.operands[1].writeback)
7932 inst.instruction |= WRITE_BACK;
7933 inst.instruction |= inst.operands[1].reg << 16;
7934 inst.instruction |= inst.reloc.exp.X_add_number << 4;
7935 inst.instruction |= inst.operands[1].imm;
7936 }
7937 else
7938 encode_arm_cp_address (1, TRUE, FALSE, 0);
7939 }
7940
7941 static void
7942 do_iwmmxt_wshufh (void)
7943 {
7944 inst.instruction |= inst.operands[0].reg << 12;
7945 inst.instruction |= inst.operands[1].reg << 16;
7946 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
7947 inst.instruction |= (inst.operands[2].imm & 0x0f);
7948 }
7949
7950 static void
7951 do_iwmmxt_wzero (void)
7952 {
7953 /* WZERO reg is an alias for WANDN reg, reg, reg. */
7954 inst.instruction |= inst.operands[0].reg;
7955 inst.instruction |= inst.operands[0].reg << 12;
7956 inst.instruction |= inst.operands[0].reg << 16;
7957 }
7958
7959 static void
7960 do_iwmmxt_wrwrwr_or_imm5 (void)
7961 {
7962 if (inst.operands[2].isreg)
7963 do_rd_rn_rm ();
7964 else {
7965 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
7966 _("immediate operand requires iWMMXt2"));
7967 do_rd_rn ();
7968 if (inst.operands[2].imm == 0)
7969 {
7970 switch ((inst.instruction >> 20) & 0xf)
7971 {
7972 case 4:
7973 case 5:
7974 case 6:
7975 case 7:
7976 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
7977 inst.operands[2].imm = 16;
7978 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
7979 break;
7980 case 8:
7981 case 9:
7982 case 10:
7983 case 11:
7984 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
7985 inst.operands[2].imm = 32;
7986 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
7987 break;
7988 case 12:
7989 case 13:
7990 case 14:
7991 case 15:
7992 {
7993 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
7994 unsigned long wrn;
7995 wrn = (inst.instruction >> 16) & 0xf;
7996 inst.instruction &= 0xff0fff0f;
7997 inst.instruction |= wrn;
7998 /* Bail out here; the instruction is now assembled. */
7999 return;
8000 }
8001 }
8002 }
8003 /* Map 32 -> 0, etc. */
8004 inst.operands[2].imm &= 0x1f;
8005 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
8006 }
8007 }
8008 \f
8009 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
8010 operations first, then control, shift, and load/store. */
8011
8012 /* Insns like "foo X,Y,Z". */
8013
8014 static void
8015 do_mav_triple (void)
8016 {
8017 inst.instruction |= inst.operands[0].reg << 16;
8018 inst.instruction |= inst.operands[1].reg;
8019 inst.instruction |= inst.operands[2].reg << 12;
8020 }
8021
8022 /* Insns like "foo W,X,Y,Z".
8023 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
8024
8025 static void
8026 do_mav_quad (void)
8027 {
8028 inst.instruction |= inst.operands[0].reg << 5;
8029 inst.instruction |= inst.operands[1].reg << 12;
8030 inst.instruction |= inst.operands[2].reg << 16;
8031 inst.instruction |= inst.operands[3].reg;
8032 }
8033
8034 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8035 static void
8036 do_mav_dspsc (void)
8037 {
8038 inst.instruction |= inst.operands[1].reg << 12;
8039 }
8040
8041 /* Maverick shift immediate instructions.
8042 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8043 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8044
8045 static void
8046 do_mav_shift (void)
8047 {
8048 int imm = inst.operands[2].imm;
8049
8050 inst.instruction |= inst.operands[0].reg << 12;
8051 inst.instruction |= inst.operands[1].reg << 16;
8052
8053 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8054 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8055 Bit 4 should be 0. */
8056 imm = (imm & 0xf) | ((imm & 0x70) << 1);
8057
8058 inst.instruction |= imm;
8059 }
8060 \f
8061 /* XScale instructions. Also sorted arithmetic before move. */
8062
8063 /* Xscale multiply-accumulate (argument parse)
8064 MIAcc acc0,Rm,Rs
8065 MIAPHcc acc0,Rm,Rs
8066 MIAxycc acc0,Rm,Rs. */
8067
8068 static void
8069 do_xsc_mia (void)
8070 {
8071 inst.instruction |= inst.operands[1].reg;
8072 inst.instruction |= inst.operands[2].reg << 12;
8073 }
8074
8075 /* Xscale move-accumulator-register (argument parse)
8076
8077 MARcc acc0,RdLo,RdHi. */
8078
8079 static void
8080 do_xsc_mar (void)
8081 {
8082 inst.instruction |= inst.operands[1].reg << 12;
8083 inst.instruction |= inst.operands[2].reg << 16;
8084 }
8085
8086 /* Xscale move-register-accumulator (argument parse)
8087
8088 MRAcc RdLo,RdHi,acc0. */
8089
8090 static void
8091 do_xsc_mra (void)
8092 {
8093 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
8094 inst.instruction |= inst.operands[0].reg << 12;
8095 inst.instruction |= inst.operands[1].reg << 16;
8096 }
8097 \f
8098 /* Encoding functions relevant only to Thumb. */
8099
8100 /* inst.operands[i] is a shifted-register operand; encode
8101 it into inst.instruction in the format used by Thumb32. */
8102
8103 static void
8104 encode_thumb32_shifted_operand (int i)
8105 {
8106 unsigned int value = inst.reloc.exp.X_add_number;
8107 unsigned int shift = inst.operands[i].shift_kind;
8108
8109 constraint (inst.operands[i].immisreg,
8110 _("shift by register not allowed in thumb mode"));
8111 inst.instruction |= inst.operands[i].reg;
8112 if (shift == SHIFT_RRX)
8113 inst.instruction |= SHIFT_ROR << 4;
8114 else
8115 {
8116 constraint (inst.reloc.exp.X_op != O_constant,
8117 _("expression too complex"));
8118
8119 constraint (value > 32
8120 || (value == 32 && (shift == SHIFT_LSL
8121 || shift == SHIFT_ROR)),
8122 _("shift expression is too large"));
8123
8124 if (value == 0)
8125 shift = SHIFT_LSL;
8126 else if (value == 32)
8127 value = 0;
8128
8129 inst.instruction |= shift << 4;
8130 inst.instruction |= (value & 0x1c) << 10;
8131 inst.instruction |= (value & 0x03) << 6;
8132 }
8133 }
8134
8135
8136 /* inst.operands[i] was set up by parse_address. Encode it into a
8137 Thumb32 format load or store instruction. Reject forms that cannot
8138 be used with such instructions. If is_t is true, reject forms that
8139 cannot be used with a T instruction; if is_d is true, reject forms
8140 that cannot be used with a D instruction. */
8141
8142 static void
8143 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
8144 {
8145 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
8146
8147 constraint (!inst.operands[i].isreg,
8148 _("Instruction does not support =N addresses"));
8149
8150 inst.instruction |= inst.operands[i].reg << 16;
8151 if (inst.operands[i].immisreg)
8152 {
8153 constraint (is_pc, _("cannot use register index with PC-relative addressing"));
8154 constraint (is_t || is_d, _("cannot use register index with this instruction"));
8155 constraint (inst.operands[i].negative,
8156 _("Thumb does not support negative register indexing"));
8157 constraint (inst.operands[i].postind,
8158 _("Thumb does not support register post-indexing"));
8159 constraint (inst.operands[i].writeback,
8160 _("Thumb does not support register indexing with writeback"));
8161 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
8162 _("Thumb supports only LSL in shifted register indexing"));
8163
8164 inst.instruction |= inst.operands[i].imm;
8165 if (inst.operands[i].shifted)
8166 {
8167 constraint (inst.reloc.exp.X_op != O_constant,
8168 _("expression too complex"));
8169 constraint (inst.reloc.exp.X_add_number < 0
8170 || inst.reloc.exp.X_add_number > 3,
8171 _("shift out of range"));
8172 inst.instruction |= inst.reloc.exp.X_add_number << 4;
8173 }
8174 inst.reloc.type = BFD_RELOC_UNUSED;
8175 }
8176 else if (inst.operands[i].preind)
8177 {
8178 constraint (is_pc && inst.operands[i].writeback,
8179 _("cannot use writeback with PC-relative addressing"));
8180 constraint (is_t && inst.operands[i].writeback,
8181 _("cannot use writeback with this instruction"));
8182
8183 if (is_d)
8184 {
8185 inst.instruction |= 0x01000000;
8186 if (inst.operands[i].writeback)
8187 inst.instruction |= 0x00200000;
8188 }
8189 else
8190 {
8191 inst.instruction |= 0x00000c00;
8192 if (inst.operands[i].writeback)
8193 inst.instruction |= 0x00000100;
8194 }
8195 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8196 }
8197 else if (inst.operands[i].postind)
8198 {
8199 assert (inst.operands[i].writeback);
8200 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
8201 constraint (is_t, _("cannot use post-indexing with this instruction"));
8202
8203 if (is_d)
8204 inst.instruction |= 0x00200000;
8205 else
8206 inst.instruction |= 0x00000900;
8207 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8208 }
8209 else /* unindexed - only for coprocessor */
8210 inst.error = _("instruction does not accept unindexed addressing");
8211 }
8212
8213 /* Table of Thumb instructions which exist in both 16- and 32-bit
8214 encodings (the latter only in post-V6T2 cores). The index is the
8215 value used in the insns table below. When there is more than one
8216 possible 16-bit encoding for the instruction, this table always
8217 holds variant (1).
8218 Also contains several pseudo-instructions used during relaxation. */
8219 #define T16_32_TAB \
8220 X(adc, 4140, eb400000), \
8221 X(adcs, 4140, eb500000), \
8222 X(add, 1c00, eb000000), \
8223 X(adds, 1c00, eb100000), \
8224 X(addi, 0000, f1000000), \
8225 X(addis, 0000, f1100000), \
8226 X(add_pc,000f, f20f0000), \
8227 X(add_sp,000d, f10d0000), \
8228 X(adr, 000f, f20f0000), \
8229 X(and, 4000, ea000000), \
8230 X(ands, 4000, ea100000), \
8231 X(asr, 1000, fa40f000), \
8232 X(asrs, 1000, fa50f000), \
8233 X(b, e000, f000b000), \
8234 X(bcond, d000, f0008000), \
8235 X(bic, 4380, ea200000), \
8236 X(bics, 4380, ea300000), \
8237 X(cmn, 42c0, eb100f00), \
8238 X(cmp, 2800, ebb00f00), \
8239 X(cpsie, b660, f3af8400), \
8240 X(cpsid, b670, f3af8600), \
8241 X(cpy, 4600, ea4f0000), \
8242 X(dec_sp,80dd, f1bd0d00), \
8243 X(eor, 4040, ea800000), \
8244 X(eors, 4040, ea900000), \
8245 X(inc_sp,00dd, f10d0d00), \
8246 X(ldmia, c800, e8900000), \
8247 X(ldr, 6800, f8500000), \
8248 X(ldrb, 7800, f8100000), \
8249 X(ldrh, 8800, f8300000), \
8250 X(ldrsb, 5600, f9100000), \
8251 X(ldrsh, 5e00, f9300000), \
8252 X(ldr_pc,4800, f85f0000), \
8253 X(ldr_pc2,4800, f85f0000), \
8254 X(ldr_sp,9800, f85d0000), \
8255 X(lsl, 0000, fa00f000), \
8256 X(lsls, 0000, fa10f000), \
8257 X(lsr, 0800, fa20f000), \
8258 X(lsrs, 0800, fa30f000), \
8259 X(mov, 2000, ea4f0000), \
8260 X(movs, 2000, ea5f0000), \
8261 X(mul, 4340, fb00f000), \
8262 X(muls, 4340, ffffffff), /* no 32b muls */ \
8263 X(mvn, 43c0, ea6f0000), \
8264 X(mvns, 43c0, ea7f0000), \
8265 X(neg, 4240, f1c00000), /* rsb #0 */ \
8266 X(negs, 4240, f1d00000), /* rsbs #0 */ \
8267 X(orr, 4300, ea400000), \
8268 X(orrs, 4300, ea500000), \
8269 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
8270 X(push, b400, e92d0000), /* stmdb sp!,... */ \
8271 X(rev, ba00, fa90f080), \
8272 X(rev16, ba40, fa90f090), \
8273 X(revsh, bac0, fa90f0b0), \
8274 X(ror, 41c0, fa60f000), \
8275 X(rors, 41c0, fa70f000), \
8276 X(sbc, 4180, eb600000), \
8277 X(sbcs, 4180, eb700000), \
8278 X(stmia, c000, e8800000), \
8279 X(str, 6000, f8400000), \
8280 X(strb, 7000, f8000000), \
8281 X(strh, 8000, f8200000), \
8282 X(str_sp,9000, f84d0000), \
8283 X(sub, 1e00, eba00000), \
8284 X(subs, 1e00, ebb00000), \
8285 X(subi, 8000, f1a00000), \
8286 X(subis, 8000, f1b00000), \
8287 X(sxtb, b240, fa4ff080), \
8288 X(sxth, b200, fa0ff080), \
8289 X(tst, 4200, ea100f00), \
8290 X(uxtb, b2c0, fa5ff080), \
8291 X(uxth, b280, fa1ff080), \
8292 X(nop, bf00, f3af8000), \
8293 X(yield, bf10, f3af8001), \
8294 X(wfe, bf20, f3af8002), \
8295 X(wfi, bf30, f3af8003), \
8296 X(sev, bf40, f3af9004), /* typo, 8004? */
8297
8298 /* To catch errors in encoding functions, the codes are all offset by
8299 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
8300 as 16-bit instructions. */
8301 #define X(a,b,c) T_MNEM_##a
8302 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
8303 #undef X
8304
8305 #define X(a,b,c) 0x##b
8306 static const unsigned short thumb_op16[] = { T16_32_TAB };
8307 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
8308 #undef X
8309
8310 #define X(a,b,c) 0x##c
8311 static const unsigned int thumb_op32[] = { T16_32_TAB };
8312 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
8313 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
8314 #undef X
8315 #undef T16_32_TAB
8316
8317 /* Thumb instruction encoders, in alphabetical order. */
8318
8319 /* ADDW or SUBW. */
8320 static void
8321 do_t_add_sub_w (void)
8322 {
8323 int Rd, Rn;
8324
8325 Rd = inst.operands[0].reg;
8326 Rn = inst.operands[1].reg;
8327
8328 constraint (Rd == 15, _("PC not allowed as destination"));
8329 inst.instruction |= (Rn << 16) | (Rd << 8);
8330 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8331 }
8332
8333 /* Parse an add or subtract instruction. We get here with inst.instruction
8334 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
8335
8336 static void
8337 do_t_add_sub (void)
8338 {
8339 int Rd, Rs, Rn;
8340
8341 Rd = inst.operands[0].reg;
8342 Rs = (inst.operands[1].present
8343 ? inst.operands[1].reg /* Rd, Rs, foo */
8344 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8345
8346 if (unified_syntax)
8347 {
8348 bfd_boolean flags;
8349 bfd_boolean narrow;
8350 int opcode;
8351
8352 flags = (inst.instruction == T_MNEM_adds
8353 || inst.instruction == T_MNEM_subs);
8354 if (flags)
8355 narrow = (current_it_mask == 0);
8356 else
8357 narrow = (current_it_mask != 0);
8358 if (!inst.operands[2].isreg)
8359 {
8360 int add;
8361
8362 add = (inst.instruction == T_MNEM_add
8363 || inst.instruction == T_MNEM_adds);
8364 opcode = 0;
8365 if (inst.size_req != 4)
8366 {
8367 /* Attempt to use a narrow opcode, with relaxation if
8368 appropriate. */
8369 if (Rd == REG_SP && Rs == REG_SP && !flags)
8370 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
8371 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
8372 opcode = T_MNEM_add_sp;
8373 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
8374 opcode = T_MNEM_add_pc;
8375 else if (Rd <= 7 && Rs <= 7 && narrow)
8376 {
8377 if (flags)
8378 opcode = add ? T_MNEM_addis : T_MNEM_subis;
8379 else
8380 opcode = add ? T_MNEM_addi : T_MNEM_subi;
8381 }
8382 if (opcode)
8383 {
8384 inst.instruction = THUMB_OP16(opcode);
8385 inst.instruction |= (Rd << 4) | Rs;
8386 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8387 if (inst.size_req != 2)
8388 inst.relax = opcode;
8389 }
8390 else
8391 constraint (inst.size_req == 2, BAD_HIREG);
8392 }
8393 if (inst.size_req == 4
8394 || (inst.size_req != 2 && !opcode))
8395 {
8396 if (Rs == REG_PC)
8397 {
8398 /* Always use addw/subw. */
8399 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
8400 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8401 }
8402 else
8403 {
8404 inst.instruction = THUMB_OP32 (inst.instruction);
8405 inst.instruction = (inst.instruction & 0xe1ffffff)
8406 | 0x10000000;
8407 if (flags)
8408 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8409 else
8410 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
8411 }
8412 inst.instruction |= inst.operands[0].reg << 8;
8413 inst.instruction |= inst.operands[1].reg << 16;
8414 }
8415 }
8416 else
8417 {
8418 Rn = inst.operands[2].reg;
8419 /* See if we can do this with a 16-bit instruction. */
8420 if (!inst.operands[2].shifted && inst.size_req != 4)
8421 {
8422 if (Rd > 7 || Rs > 7 || Rn > 7)
8423 narrow = FALSE;
8424
8425 if (narrow)
8426 {
8427 inst.instruction = ((inst.instruction == T_MNEM_adds
8428 || inst.instruction == T_MNEM_add)
8429 ? T_OPCODE_ADD_R3
8430 : T_OPCODE_SUB_R3);
8431 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
8432 return;
8433 }
8434
8435 if (inst.instruction == T_MNEM_add)
8436 {
8437 if (Rd == Rs)
8438 {
8439 inst.instruction = T_OPCODE_ADD_HI;
8440 inst.instruction |= (Rd & 8) << 4;
8441 inst.instruction |= (Rd & 7);
8442 inst.instruction |= Rn << 3;
8443 return;
8444 }
8445 /* ... because addition is commutative! */
8446 else if (Rd == Rn)
8447 {
8448 inst.instruction = T_OPCODE_ADD_HI;
8449 inst.instruction |= (Rd & 8) << 4;
8450 inst.instruction |= (Rd & 7);
8451 inst.instruction |= Rs << 3;
8452 return;
8453 }
8454 }
8455 }
8456 /* If we get here, it can't be done in 16 bits. */
8457 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
8458 _("shift must be constant"));
8459 inst.instruction = THUMB_OP32 (inst.instruction);
8460 inst.instruction |= Rd << 8;
8461 inst.instruction |= Rs << 16;
8462 encode_thumb32_shifted_operand (2);
8463 }
8464 }
8465 else
8466 {
8467 constraint (inst.instruction == T_MNEM_adds
8468 || inst.instruction == T_MNEM_subs,
8469 BAD_THUMB32);
8470
8471 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
8472 {
8473 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
8474 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
8475 BAD_HIREG);
8476
8477 inst.instruction = (inst.instruction == T_MNEM_add
8478 ? 0x0000 : 0x8000);
8479 inst.instruction |= (Rd << 4) | Rs;
8480 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8481 return;
8482 }
8483
8484 Rn = inst.operands[2].reg;
8485 constraint (inst.operands[2].shifted, _("unshifted register required"));
8486
8487 /* We now have Rd, Rs, and Rn set to registers. */
8488 if (Rd > 7 || Rs > 7 || Rn > 7)
8489 {
8490 /* Can't do this for SUB. */
8491 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
8492 inst.instruction = T_OPCODE_ADD_HI;
8493 inst.instruction |= (Rd & 8) << 4;
8494 inst.instruction |= (Rd & 7);
8495 if (Rs == Rd)
8496 inst.instruction |= Rn << 3;
8497 else if (Rn == Rd)
8498 inst.instruction |= Rs << 3;
8499 else
8500 constraint (1, _("dest must overlap one source register"));
8501 }
8502 else
8503 {
8504 inst.instruction = (inst.instruction == T_MNEM_add
8505 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
8506 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
8507 }
8508 }
8509 }
8510
8511 static void
8512 do_t_adr (void)
8513 {
8514 if (unified_syntax && inst.size_req == 0 && inst.operands[0].reg <= 7)
8515 {
8516 /* Defer to section relaxation. */
8517 inst.relax = inst.instruction;
8518 inst.instruction = THUMB_OP16 (inst.instruction);
8519 inst.instruction |= inst.operands[0].reg << 4;
8520 }
8521 else if (unified_syntax && inst.size_req != 2)
8522 {
8523 /* Generate a 32-bit opcode. */
8524 inst.instruction = THUMB_OP32 (inst.instruction);
8525 inst.instruction |= inst.operands[0].reg << 8;
8526 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
8527 inst.reloc.pc_rel = 1;
8528 }
8529 else
8530 {
8531 /* Generate a 16-bit opcode. */
8532 inst.instruction = THUMB_OP16 (inst.instruction);
8533 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8534 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
8535 inst.reloc.pc_rel = 1;
8536
8537 inst.instruction |= inst.operands[0].reg << 4;
8538 }
8539 }
8540
8541 /* Arithmetic instructions for which there is just one 16-bit
8542 instruction encoding, and it allows only two low registers.
8543 For maximal compatibility with ARM syntax, we allow three register
8544 operands even when Thumb-32 instructions are not available, as long
8545 as the first two are identical. For instance, both "sbc r0,r1" and
8546 "sbc r0,r0,r1" are allowed. */
8547 static void
8548 do_t_arit3 (void)
8549 {
8550 int Rd, Rs, Rn;
8551
8552 Rd = inst.operands[0].reg;
8553 Rs = (inst.operands[1].present
8554 ? inst.operands[1].reg /* Rd, Rs, foo */
8555 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8556 Rn = inst.operands[2].reg;
8557
8558 if (unified_syntax)
8559 {
8560 if (!inst.operands[2].isreg)
8561 {
8562 /* For an immediate, we always generate a 32-bit opcode;
8563 section relaxation will shrink it later if possible. */
8564 inst.instruction = THUMB_OP32 (inst.instruction);
8565 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8566 inst.instruction |= Rd << 8;
8567 inst.instruction |= Rs << 16;
8568 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8569 }
8570 else
8571 {
8572 bfd_boolean narrow;
8573
8574 /* See if we can do this with a 16-bit instruction. */
8575 if (THUMB_SETS_FLAGS (inst.instruction))
8576 narrow = current_it_mask == 0;
8577 else
8578 narrow = current_it_mask != 0;
8579
8580 if (Rd > 7 || Rn > 7 || Rs > 7)
8581 narrow = FALSE;
8582 if (inst.operands[2].shifted)
8583 narrow = FALSE;
8584 if (inst.size_req == 4)
8585 narrow = FALSE;
8586
8587 if (narrow
8588 && Rd == Rs)
8589 {
8590 inst.instruction = THUMB_OP16 (inst.instruction);
8591 inst.instruction |= Rd;
8592 inst.instruction |= Rn << 3;
8593 return;
8594 }
8595
8596 /* If we get here, it can't be done in 16 bits. */
8597 constraint (inst.operands[2].shifted
8598 && inst.operands[2].immisreg,
8599 _("shift must be constant"));
8600 inst.instruction = THUMB_OP32 (inst.instruction);
8601 inst.instruction |= Rd << 8;
8602 inst.instruction |= Rs << 16;
8603 encode_thumb32_shifted_operand (2);
8604 }
8605 }
8606 else
8607 {
8608 /* On its face this is a lie - the instruction does set the
8609 flags. However, the only supported mnemonic in this mode
8610 says it doesn't. */
8611 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8612
8613 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
8614 _("unshifted register required"));
8615 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
8616 constraint (Rd != Rs,
8617 _("dest and source1 must be the same register"));
8618
8619 inst.instruction = THUMB_OP16 (inst.instruction);
8620 inst.instruction |= Rd;
8621 inst.instruction |= Rn << 3;
8622 }
8623 }
8624
8625 /* Similarly, but for instructions where the arithmetic operation is
8626 commutative, so we can allow either of them to be different from
8627 the destination operand in a 16-bit instruction. For instance, all
8628 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
8629 accepted. */
8630 static void
8631 do_t_arit3c (void)
8632 {
8633 int Rd, Rs, Rn;
8634
8635 Rd = inst.operands[0].reg;
8636 Rs = (inst.operands[1].present
8637 ? inst.operands[1].reg /* Rd, Rs, foo */
8638 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8639 Rn = inst.operands[2].reg;
8640
8641 if (unified_syntax)
8642 {
8643 if (!inst.operands[2].isreg)
8644 {
8645 /* For an immediate, we always generate a 32-bit opcode;
8646 section relaxation will shrink it later if possible. */
8647 inst.instruction = THUMB_OP32 (inst.instruction);
8648 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8649 inst.instruction |= Rd << 8;
8650 inst.instruction |= Rs << 16;
8651 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8652 }
8653 else
8654 {
8655 bfd_boolean narrow;
8656
8657 /* See if we can do this with a 16-bit instruction. */
8658 if (THUMB_SETS_FLAGS (inst.instruction))
8659 narrow = current_it_mask == 0;
8660 else
8661 narrow = current_it_mask != 0;
8662
8663 if (Rd > 7 || Rn > 7 || Rs > 7)
8664 narrow = FALSE;
8665 if (inst.operands[2].shifted)
8666 narrow = FALSE;
8667 if (inst.size_req == 4)
8668 narrow = FALSE;
8669
8670 if (narrow)
8671 {
8672 if (Rd == Rs)
8673 {
8674 inst.instruction = THUMB_OP16 (inst.instruction);
8675 inst.instruction |= Rd;
8676 inst.instruction |= Rn << 3;
8677 return;
8678 }
8679 if (Rd == Rn)
8680 {
8681 inst.instruction = THUMB_OP16 (inst.instruction);
8682 inst.instruction |= Rd;
8683 inst.instruction |= Rs << 3;
8684 return;
8685 }
8686 }
8687
8688 /* If we get here, it can't be done in 16 bits. */
8689 constraint (inst.operands[2].shifted
8690 && inst.operands[2].immisreg,
8691 _("shift must be constant"));
8692 inst.instruction = THUMB_OP32 (inst.instruction);
8693 inst.instruction |= Rd << 8;
8694 inst.instruction |= Rs << 16;
8695 encode_thumb32_shifted_operand (2);
8696 }
8697 }
8698 else
8699 {
8700 /* On its face this is a lie - the instruction does set the
8701 flags. However, the only supported mnemonic in this mode
8702 says it doesn't. */
8703 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8704
8705 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
8706 _("unshifted register required"));
8707 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
8708
8709 inst.instruction = THUMB_OP16 (inst.instruction);
8710 inst.instruction |= Rd;
8711
8712 if (Rd == Rs)
8713 inst.instruction |= Rn << 3;
8714 else if (Rd == Rn)
8715 inst.instruction |= Rs << 3;
8716 else
8717 constraint (1, _("dest must overlap one source register"));
8718 }
8719 }
8720
8721 static void
8722 do_t_barrier (void)
8723 {
8724 if (inst.operands[0].present)
8725 {
8726 constraint ((inst.instruction & 0xf0) != 0x40
8727 && inst.operands[0].imm != 0xf,
8728 "bad barrier type");
8729 inst.instruction |= inst.operands[0].imm;
8730 }
8731 else
8732 inst.instruction |= 0xf;
8733 }
8734
8735 static void
8736 do_t_bfc (void)
8737 {
8738 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8739 constraint (msb > 32, _("bit-field extends past end of register"));
8740 /* The instruction encoding stores the LSB and MSB,
8741 not the LSB and width. */
8742 inst.instruction |= inst.operands[0].reg << 8;
8743 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
8744 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
8745 inst.instruction |= msb - 1;
8746 }
8747
8748 static void
8749 do_t_bfi (void)
8750 {
8751 unsigned int msb;
8752
8753 /* #0 in second position is alternative syntax for bfc, which is
8754 the same instruction but with REG_PC in the Rm field. */
8755 if (!inst.operands[1].isreg)
8756 inst.operands[1].reg = REG_PC;
8757
8758 msb = inst.operands[2].imm + inst.operands[3].imm;
8759 constraint (msb > 32, _("bit-field extends past end of register"));
8760 /* The instruction encoding stores the LSB and MSB,
8761 not the LSB and width. */
8762 inst.instruction |= inst.operands[0].reg << 8;
8763 inst.instruction |= inst.operands[1].reg << 16;
8764 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8765 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8766 inst.instruction |= msb - 1;
8767 }
8768
8769 static void
8770 do_t_bfx (void)
8771 {
8772 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8773 _("bit-field extends past end of register"));
8774 inst.instruction |= inst.operands[0].reg << 8;
8775 inst.instruction |= inst.operands[1].reg << 16;
8776 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8777 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8778 inst.instruction |= inst.operands[3].imm - 1;
8779 }
8780
8781 /* ARM V5 Thumb BLX (argument parse)
8782 BLX <target_addr> which is BLX(1)
8783 BLX <Rm> which is BLX(2)
8784 Unfortunately, there are two different opcodes for this mnemonic.
8785 So, the insns[].value is not used, and the code here zaps values
8786 into inst.instruction.
8787
8788 ??? How to take advantage of the additional two bits of displacement
8789 available in Thumb32 mode? Need new relocation? */
8790
8791 static void
8792 do_t_blx (void)
8793 {
8794 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8795 if (inst.operands[0].isreg)
8796 /* We have a register, so this is BLX(2). */
8797 inst.instruction |= inst.operands[0].reg << 3;
8798 else
8799 {
8800 /* No register. This must be BLX(1). */
8801 inst.instruction = 0xf000e800;
8802 #ifdef OBJ_ELF
8803 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8804 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8805 else
8806 #endif
8807 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX;
8808 inst.reloc.pc_rel = 1;
8809 }
8810 }
8811
8812 static void
8813 do_t_branch (void)
8814 {
8815 int opcode;
8816 int cond;
8817
8818 if (current_it_mask)
8819 {
8820 /* Conditional branches inside IT blocks are encoded as unconditional
8821 branches. */
8822 cond = COND_ALWAYS;
8823 /* A branch must be the last instruction in an IT block. */
8824 constraint (current_it_mask != 0x10, BAD_BRANCH);
8825 }
8826 else
8827 cond = inst.cond;
8828
8829 if (cond != COND_ALWAYS)
8830 opcode = T_MNEM_bcond;
8831 else
8832 opcode = inst.instruction;
8833
8834 if (unified_syntax && inst.size_req == 4)
8835 {
8836 inst.instruction = THUMB_OP32(opcode);
8837 if (cond == COND_ALWAYS)
8838 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25;
8839 else
8840 {
8841 assert (cond != 0xF);
8842 inst.instruction |= cond << 22;
8843 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20;
8844 }
8845 }
8846 else
8847 {
8848 inst.instruction = THUMB_OP16(opcode);
8849 if (cond == COND_ALWAYS)
8850 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
8851 else
8852 {
8853 inst.instruction |= cond << 8;
8854 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
8855 }
8856 /* Allow section relaxation. */
8857 if (unified_syntax && inst.size_req != 2)
8858 inst.relax = opcode;
8859 }
8860
8861 inst.reloc.pc_rel = 1;
8862 }
8863
8864 static void
8865 do_t_bkpt (void)
8866 {
8867 constraint (inst.cond != COND_ALWAYS,
8868 _("instruction is always unconditional"));
8869 if (inst.operands[0].present)
8870 {
8871 constraint (inst.operands[0].imm > 255,
8872 _("immediate value out of range"));
8873 inst.instruction |= inst.operands[0].imm;
8874 }
8875 }
8876
8877 static void
8878 do_t_branch23 (void)
8879 {
8880 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8881 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8882 inst.reloc.pc_rel = 1;
8883
8884 /* If the destination of the branch is a defined symbol which does not have
8885 the THUMB_FUNC attribute, then we must be calling a function which has
8886 the (interfacearm) attribute. We look for the Thumb entry point to that
8887 function and change the branch to refer to that function instead. */
8888 if ( inst.reloc.exp.X_op == O_symbol
8889 && inst.reloc.exp.X_add_symbol != NULL
8890 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8891 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8892 inst.reloc.exp.X_add_symbol =
8893 find_real_start (inst.reloc.exp.X_add_symbol);
8894 }
8895
8896 static void
8897 do_t_bx (void)
8898 {
8899 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8900 inst.instruction |= inst.operands[0].reg << 3;
8901 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
8902 should cause the alignment to be checked once it is known. This is
8903 because BX PC only works if the instruction is word aligned. */
8904 }
8905
8906 static void
8907 do_t_bxj (void)
8908 {
8909 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8910 if (inst.operands[0].reg == REG_PC)
8911 as_tsktsk (_("use of r15 in bxj is not really useful"));
8912
8913 inst.instruction |= inst.operands[0].reg << 16;
8914 }
8915
8916 static void
8917 do_t_clz (void)
8918 {
8919 inst.instruction |= inst.operands[0].reg << 8;
8920 inst.instruction |= inst.operands[1].reg << 16;
8921 inst.instruction |= inst.operands[1].reg;
8922 }
8923
8924 static void
8925 do_t_cps (void)
8926 {
8927 constraint (current_it_mask, BAD_NOT_IT);
8928 inst.instruction |= inst.operands[0].imm;
8929 }
8930
8931 static void
8932 do_t_cpsi (void)
8933 {
8934 constraint (current_it_mask, BAD_NOT_IT);
8935 if (unified_syntax
8936 && (inst.operands[1].present || inst.size_req == 4)
8937 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
8938 {
8939 unsigned int imod = (inst.instruction & 0x0030) >> 4;
8940 inst.instruction = 0xf3af8000;
8941 inst.instruction |= imod << 9;
8942 inst.instruction |= inst.operands[0].imm << 5;
8943 if (inst.operands[1].present)
8944 inst.instruction |= 0x100 | inst.operands[1].imm;
8945 }
8946 else
8947 {
8948 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
8949 && (inst.operands[0].imm & 4),
8950 _("selected processor does not support 'A' form "
8951 "of this instruction"));
8952 constraint (inst.operands[1].present || inst.size_req == 4,
8953 _("Thumb does not support the 2-argument "
8954 "form of this instruction"));
8955 inst.instruction |= inst.operands[0].imm;
8956 }
8957 }
8958
8959 /* THUMB CPY instruction (argument parse). */
8960
8961 static void
8962 do_t_cpy (void)
8963 {
8964 if (inst.size_req == 4)
8965 {
8966 inst.instruction = THUMB_OP32 (T_MNEM_mov);
8967 inst.instruction |= inst.operands[0].reg << 8;
8968 inst.instruction |= inst.operands[1].reg;
8969 }
8970 else
8971 {
8972 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8973 inst.instruction |= (inst.operands[0].reg & 0x7);
8974 inst.instruction |= inst.operands[1].reg << 3;
8975 }
8976 }
8977
8978 static void
8979 do_t_cbz (void)
8980 {
8981 constraint (current_it_mask, BAD_NOT_IT);
8982 constraint (inst.operands[0].reg > 7, BAD_HIREG);
8983 inst.instruction |= inst.operands[0].reg;
8984 inst.reloc.pc_rel = 1;
8985 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
8986 }
8987
8988 static void
8989 do_t_dbg (void)
8990 {
8991 inst.instruction |= inst.operands[0].imm;
8992 }
8993
8994 static void
8995 do_t_div (void)
8996 {
8997 if (!inst.operands[1].present)
8998 inst.operands[1].reg = inst.operands[0].reg;
8999 inst.instruction |= inst.operands[0].reg << 8;
9000 inst.instruction |= inst.operands[1].reg << 16;
9001 inst.instruction |= inst.operands[2].reg;
9002 }
9003
9004 static void
9005 do_t_hint (void)
9006 {
9007 if (unified_syntax && inst.size_req == 4)
9008 inst.instruction = THUMB_OP32 (inst.instruction);
9009 else
9010 inst.instruction = THUMB_OP16 (inst.instruction);
9011 }
9012
9013 static void
9014 do_t_it (void)
9015 {
9016 unsigned int cond = inst.operands[0].imm;
9017
9018 constraint (current_it_mask, BAD_NOT_IT);
9019 current_it_mask = (inst.instruction & 0xf) | 0x10;
9020 current_cc = cond;
9021
9022 /* If the condition is a negative condition, invert the mask. */
9023 if ((cond & 0x1) == 0x0)
9024 {
9025 unsigned int mask = inst.instruction & 0x000f;
9026
9027 if ((mask & 0x7) == 0)
9028 /* no conversion needed */;
9029 else if ((mask & 0x3) == 0)
9030 mask ^= 0x8;
9031 else if ((mask & 0x1) == 0)
9032 mask ^= 0xC;
9033 else
9034 mask ^= 0xE;
9035
9036 inst.instruction &= 0xfff0;
9037 inst.instruction |= mask;
9038 }
9039
9040 inst.instruction |= cond << 4;
9041 }
9042
9043 static void
9044 do_t_ldmstm (void)
9045 {
9046 /* This really doesn't seem worth it. */
9047 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9048 _("expression too complex"));
9049 constraint (inst.operands[1].writeback,
9050 _("Thumb load/store multiple does not support {reglist}^"));
9051
9052 if (unified_syntax)
9053 {
9054 /* See if we can use a 16-bit instruction. */
9055 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
9056 && inst.size_req != 4
9057 && inst.operands[0].reg <= 7
9058 && !(inst.operands[1].imm & ~0xff)
9059 && (inst.instruction == T_MNEM_stmia
9060 ? inst.operands[0].writeback
9061 : (inst.operands[0].writeback
9062 == !(inst.operands[1].imm & (1 << inst.operands[0].reg)))))
9063 {
9064 if (inst.instruction == T_MNEM_stmia
9065 && (inst.operands[1].imm & (1 << inst.operands[0].reg))
9066 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
9067 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9068 inst.operands[0].reg);
9069
9070 inst.instruction = THUMB_OP16 (inst.instruction);
9071 inst.instruction |= inst.operands[0].reg << 8;
9072 inst.instruction |= inst.operands[1].imm;
9073 }
9074 else
9075 {
9076 if (inst.operands[1].imm & (1 << 13))
9077 as_warn (_("SP should not be in register list"));
9078 if (inst.instruction == T_MNEM_stmia)
9079 {
9080 if (inst.operands[1].imm & (1 << 15))
9081 as_warn (_("PC should not be in register list"));
9082 if (inst.operands[1].imm & (1 << inst.operands[0].reg))
9083 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9084 inst.operands[0].reg);
9085 }
9086 else
9087 {
9088 if (inst.operands[1].imm & (1 << 14)
9089 && inst.operands[1].imm & (1 << 15))
9090 as_warn (_("LR and PC should not both be in register list"));
9091 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
9092 && inst.operands[0].writeback)
9093 as_warn (_("base register should not be in register list "
9094 "when written back"));
9095 }
9096 if (inst.instruction < 0xffff)
9097 inst.instruction = THUMB_OP32 (inst.instruction);
9098 inst.instruction |= inst.operands[0].reg << 16;
9099 inst.instruction |= inst.operands[1].imm;
9100 if (inst.operands[0].writeback)
9101 inst.instruction |= WRITE_BACK;
9102 }
9103 }
9104 else
9105 {
9106 constraint (inst.operands[0].reg > 7
9107 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
9108 if (inst.instruction == T_MNEM_stmia)
9109 {
9110 if (!inst.operands[0].writeback)
9111 as_warn (_("this instruction will write back the base register"));
9112 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
9113 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
9114 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9115 inst.operands[0].reg);
9116 }
9117 else
9118 {
9119 if (!inst.operands[0].writeback
9120 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
9121 as_warn (_("this instruction will write back the base register"));
9122 else if (inst.operands[0].writeback
9123 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
9124 as_warn (_("this instruction will not write back the base register"));
9125 }
9126
9127 inst.instruction = THUMB_OP16 (inst.instruction);
9128 inst.instruction |= inst.operands[0].reg << 8;
9129 inst.instruction |= inst.operands[1].imm;
9130 }
9131 }
9132
9133 static void
9134 do_t_ldrex (void)
9135 {
9136 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9137 || inst.operands[1].postind || inst.operands[1].writeback
9138 || inst.operands[1].immisreg || inst.operands[1].shifted
9139 || inst.operands[1].negative,
9140 BAD_ADDR_MODE);
9141
9142 inst.instruction |= inst.operands[0].reg << 12;
9143 inst.instruction |= inst.operands[1].reg << 16;
9144 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
9145 }
9146
9147 static void
9148 do_t_ldrexd (void)
9149 {
9150 if (!inst.operands[1].present)
9151 {
9152 constraint (inst.operands[0].reg == REG_LR,
9153 _("r14 not allowed as first register "
9154 "when second register is omitted"));
9155 inst.operands[1].reg = inst.operands[0].reg + 1;
9156 }
9157 constraint (inst.operands[0].reg == inst.operands[1].reg,
9158 BAD_OVERLAP);
9159
9160 inst.instruction |= inst.operands[0].reg << 12;
9161 inst.instruction |= inst.operands[1].reg << 8;
9162 inst.instruction |= inst.operands[2].reg << 16;
9163 }
9164
9165 static void
9166 do_t_ldst (void)
9167 {
9168 unsigned long opcode;
9169 int Rn;
9170
9171 opcode = inst.instruction;
9172 if (unified_syntax)
9173 {
9174 if (!inst.operands[1].isreg)
9175 {
9176 if (opcode <= 0xffff)
9177 inst.instruction = THUMB_OP32 (opcode);
9178 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9179 return;
9180 }
9181 if (inst.operands[1].isreg
9182 && !inst.operands[1].writeback
9183 && !inst.operands[1].shifted && !inst.operands[1].postind
9184 && !inst.operands[1].negative && inst.operands[0].reg <= 7
9185 && opcode <= 0xffff
9186 && inst.size_req != 4)
9187 {
9188 /* Insn may have a 16-bit form. */
9189 Rn = inst.operands[1].reg;
9190 if (inst.operands[1].immisreg)
9191 {
9192 inst.instruction = THUMB_OP16 (opcode);
9193 /* [Rn, Ri] */
9194 if (Rn <= 7 && inst.operands[1].imm <= 7)
9195 goto op16;
9196 }
9197 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
9198 && opcode != T_MNEM_ldrsb)
9199 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
9200 || (Rn == REG_SP && opcode == T_MNEM_str))
9201 {
9202 /* [Rn, #const] */
9203 if (Rn > 7)
9204 {
9205 if (Rn == REG_PC)
9206 {
9207 if (inst.reloc.pc_rel)
9208 opcode = T_MNEM_ldr_pc2;
9209 else
9210 opcode = T_MNEM_ldr_pc;
9211 }
9212 else
9213 {
9214 if (opcode == T_MNEM_ldr)
9215 opcode = T_MNEM_ldr_sp;
9216 else
9217 opcode = T_MNEM_str_sp;
9218 }
9219 inst.instruction = inst.operands[0].reg << 8;
9220 }
9221 else
9222 {
9223 inst.instruction = inst.operands[0].reg;
9224 inst.instruction |= inst.operands[1].reg << 3;
9225 }
9226 inst.instruction |= THUMB_OP16 (opcode);
9227 if (inst.size_req == 2)
9228 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9229 else
9230 inst.relax = opcode;
9231 return;
9232 }
9233 }
9234 /* Definitely a 32-bit variant. */
9235 inst.instruction = THUMB_OP32 (opcode);
9236 inst.instruction |= inst.operands[0].reg << 12;
9237 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
9238 return;
9239 }
9240
9241 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9242
9243 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
9244 {
9245 /* Only [Rn,Rm] is acceptable. */
9246 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
9247 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
9248 || inst.operands[1].postind || inst.operands[1].shifted
9249 || inst.operands[1].negative,
9250 _("Thumb does not support this addressing mode"));
9251 inst.instruction = THUMB_OP16 (inst.instruction);
9252 goto op16;
9253 }
9254
9255 inst.instruction = THUMB_OP16 (inst.instruction);
9256 if (!inst.operands[1].isreg)
9257 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9258 return;
9259
9260 constraint (!inst.operands[1].preind
9261 || inst.operands[1].shifted
9262 || inst.operands[1].writeback,
9263 _("Thumb does not support this addressing mode"));
9264 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
9265 {
9266 constraint (inst.instruction & 0x0600,
9267 _("byte or halfword not valid for base register"));
9268 constraint (inst.operands[1].reg == REG_PC
9269 && !(inst.instruction & THUMB_LOAD_BIT),
9270 _("r15 based store not allowed"));
9271 constraint (inst.operands[1].immisreg,
9272 _("invalid base register for register offset"));
9273
9274 if (inst.operands[1].reg == REG_PC)
9275 inst.instruction = T_OPCODE_LDR_PC;
9276 else if (inst.instruction & THUMB_LOAD_BIT)
9277 inst.instruction = T_OPCODE_LDR_SP;
9278 else
9279 inst.instruction = T_OPCODE_STR_SP;
9280
9281 inst.instruction |= inst.operands[0].reg << 8;
9282 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9283 return;
9284 }
9285
9286 constraint (inst.operands[1].reg > 7, BAD_HIREG);
9287 if (!inst.operands[1].immisreg)
9288 {
9289 /* Immediate offset. */
9290 inst.instruction |= inst.operands[0].reg;
9291 inst.instruction |= inst.operands[1].reg << 3;
9292 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9293 return;
9294 }
9295
9296 /* Register offset. */
9297 constraint (inst.operands[1].imm > 7, BAD_HIREG);
9298 constraint (inst.operands[1].negative,
9299 _("Thumb does not support this addressing mode"));
9300
9301 op16:
9302 switch (inst.instruction)
9303 {
9304 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
9305 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
9306 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
9307 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
9308 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
9309 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
9310 case 0x5600 /* ldrsb */:
9311 case 0x5e00 /* ldrsh */: break;
9312 default: abort ();
9313 }
9314
9315 inst.instruction |= inst.operands[0].reg;
9316 inst.instruction |= inst.operands[1].reg << 3;
9317 inst.instruction |= inst.operands[1].imm << 6;
9318 }
9319
9320 static void
9321 do_t_ldstd (void)
9322 {
9323 if (!inst.operands[1].present)
9324 {
9325 inst.operands[1].reg = inst.operands[0].reg + 1;
9326 constraint (inst.operands[0].reg == REG_LR,
9327 _("r14 not allowed here"));
9328 }
9329 inst.instruction |= inst.operands[0].reg << 12;
9330 inst.instruction |= inst.operands[1].reg << 8;
9331 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
9332
9333 }
9334
9335 static void
9336 do_t_ldstt (void)
9337 {
9338 inst.instruction |= inst.operands[0].reg << 12;
9339 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
9340 }
9341
9342 static void
9343 do_t_mla (void)
9344 {
9345 inst.instruction |= inst.operands[0].reg << 8;
9346 inst.instruction |= inst.operands[1].reg << 16;
9347 inst.instruction |= inst.operands[2].reg;
9348 inst.instruction |= inst.operands[3].reg << 12;
9349 }
9350
9351 static void
9352 do_t_mlal (void)
9353 {
9354 inst.instruction |= inst.operands[0].reg << 12;
9355 inst.instruction |= inst.operands[1].reg << 8;
9356 inst.instruction |= inst.operands[2].reg << 16;
9357 inst.instruction |= inst.operands[3].reg;
9358 }
9359
9360 static void
9361 do_t_mov_cmp (void)
9362 {
9363 if (unified_syntax)
9364 {
9365 int r0off = (inst.instruction == T_MNEM_mov
9366 || inst.instruction == T_MNEM_movs) ? 8 : 16;
9367 unsigned long opcode;
9368 bfd_boolean narrow;
9369 bfd_boolean low_regs;
9370
9371 low_regs = (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7);
9372 opcode = inst.instruction;
9373 if (current_it_mask)
9374 narrow = opcode != T_MNEM_movs;
9375 else
9376 narrow = opcode != T_MNEM_movs || low_regs;
9377 if (inst.size_req == 4
9378 || inst.operands[1].shifted)
9379 narrow = FALSE;
9380
9381 if (!inst.operands[1].isreg)
9382 {
9383 /* Immediate operand. */
9384 if (current_it_mask == 0 && opcode == T_MNEM_mov)
9385 narrow = 0;
9386 if (low_regs && narrow)
9387 {
9388 inst.instruction = THUMB_OP16 (opcode);
9389 inst.instruction |= inst.operands[0].reg << 8;
9390 if (inst.size_req == 2)
9391 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
9392 else
9393 inst.relax = opcode;
9394 }
9395 else
9396 {
9397 inst.instruction = THUMB_OP32 (inst.instruction);
9398 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9399 inst.instruction |= inst.operands[0].reg << r0off;
9400 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9401 }
9402 }
9403 else if (!narrow)
9404 {
9405 inst.instruction = THUMB_OP32 (inst.instruction);
9406 inst.instruction |= inst.operands[0].reg << r0off;
9407 encode_thumb32_shifted_operand (1);
9408 }
9409 else
9410 switch (inst.instruction)
9411 {
9412 case T_MNEM_mov:
9413 inst.instruction = T_OPCODE_MOV_HR;
9414 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9415 inst.instruction |= (inst.operands[0].reg & 0x7);
9416 inst.instruction |= inst.operands[1].reg << 3;
9417 break;
9418
9419 case T_MNEM_movs:
9420 /* We know we have low registers at this point.
9421 Generate ADD Rd, Rs, #0. */
9422 inst.instruction = T_OPCODE_ADD_I3;
9423 inst.instruction |= inst.operands[0].reg;
9424 inst.instruction |= inst.operands[1].reg << 3;
9425 break;
9426
9427 case T_MNEM_cmp:
9428 if (low_regs)
9429 {
9430 inst.instruction = T_OPCODE_CMP_LR;
9431 inst.instruction |= inst.operands[0].reg;
9432 inst.instruction |= inst.operands[1].reg << 3;
9433 }
9434 else
9435 {
9436 inst.instruction = T_OPCODE_CMP_HR;
9437 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9438 inst.instruction |= (inst.operands[0].reg & 0x7);
9439 inst.instruction |= inst.operands[1].reg << 3;
9440 }
9441 break;
9442 }
9443 return;
9444 }
9445
9446 inst.instruction = THUMB_OP16 (inst.instruction);
9447 if (inst.operands[1].isreg)
9448 {
9449 if (inst.operands[0].reg < 8 && inst.operands[1].reg < 8)
9450 {
9451 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
9452 since a MOV instruction produces unpredictable results. */
9453 if (inst.instruction == T_OPCODE_MOV_I8)
9454 inst.instruction = T_OPCODE_ADD_I3;
9455 else
9456 inst.instruction = T_OPCODE_CMP_LR;
9457
9458 inst.instruction |= inst.operands[0].reg;
9459 inst.instruction |= inst.operands[1].reg << 3;
9460 }
9461 else
9462 {
9463 if (inst.instruction == T_OPCODE_MOV_I8)
9464 inst.instruction = T_OPCODE_MOV_HR;
9465 else
9466 inst.instruction = T_OPCODE_CMP_HR;
9467 do_t_cpy ();
9468 }
9469 }
9470 else
9471 {
9472 constraint (inst.operands[0].reg > 7,
9473 _("only lo regs allowed with immediate"));
9474 inst.instruction |= inst.operands[0].reg << 8;
9475 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
9476 }
9477 }
9478
9479 static void
9480 do_t_mov16 (void)
9481 {
9482 bfd_vma imm;
9483 bfd_boolean top;
9484
9485 top = (inst.instruction & 0x00800000) != 0;
9486 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
9487 {
9488 constraint (top, _(":lower16: not allowed this instruction"));
9489 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
9490 }
9491 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
9492 {
9493 constraint (!top, _(":upper16: not allowed this instruction"));
9494 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
9495 }
9496
9497 inst.instruction |= inst.operands[0].reg << 8;
9498 if (inst.reloc.type == BFD_RELOC_UNUSED)
9499 {
9500 imm = inst.reloc.exp.X_add_number;
9501 inst.instruction |= (imm & 0xf000) << 4;
9502 inst.instruction |= (imm & 0x0800) << 15;
9503 inst.instruction |= (imm & 0x0700) << 4;
9504 inst.instruction |= (imm & 0x00ff);
9505 }
9506 }
9507
9508 static void
9509 do_t_mvn_tst (void)
9510 {
9511 if (unified_syntax)
9512 {
9513 int r0off = (inst.instruction == T_MNEM_mvn
9514 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
9515 bfd_boolean narrow;
9516
9517 if (inst.size_req == 4
9518 || inst.instruction > 0xffff
9519 || inst.operands[1].shifted
9520 || inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9521 narrow = FALSE;
9522 else if (inst.instruction == T_MNEM_cmn)
9523 narrow = TRUE;
9524 else if (THUMB_SETS_FLAGS (inst.instruction))
9525 narrow = (current_it_mask == 0);
9526 else
9527 narrow = (current_it_mask != 0);
9528
9529 if (!inst.operands[1].isreg)
9530 {
9531 /* For an immediate, we always generate a 32-bit opcode;
9532 section relaxation will shrink it later if possible. */
9533 if (inst.instruction < 0xffff)
9534 inst.instruction = THUMB_OP32 (inst.instruction);
9535 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9536 inst.instruction |= inst.operands[0].reg << r0off;
9537 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9538 }
9539 else
9540 {
9541 /* See if we can do this with a 16-bit instruction. */
9542 if (narrow)
9543 {
9544 inst.instruction = THUMB_OP16 (inst.instruction);
9545 inst.instruction |= inst.operands[0].reg;
9546 inst.instruction |= inst.operands[1].reg << 3;
9547 }
9548 else
9549 {
9550 constraint (inst.operands[1].shifted
9551 && inst.operands[1].immisreg,
9552 _("shift must be constant"));
9553 if (inst.instruction < 0xffff)
9554 inst.instruction = THUMB_OP32 (inst.instruction);
9555 inst.instruction |= inst.operands[0].reg << r0off;
9556 encode_thumb32_shifted_operand (1);
9557 }
9558 }
9559 }
9560 else
9561 {
9562 constraint (inst.instruction > 0xffff
9563 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
9564 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
9565 _("unshifted register required"));
9566 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9567 BAD_HIREG);
9568
9569 inst.instruction = THUMB_OP16 (inst.instruction);
9570 inst.instruction |= inst.operands[0].reg;
9571 inst.instruction |= inst.operands[1].reg << 3;
9572 }
9573 }
9574
9575 static void
9576 do_t_mrs (void)
9577 {
9578 int flags;
9579
9580 if (do_vfp_nsyn_mrs () == SUCCESS)
9581 return;
9582
9583 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
9584 if (flags == 0)
9585 {
9586 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
9587 _("selected processor does not support "
9588 "requested special purpose register"));
9589 }
9590 else
9591 {
9592 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
9593 _("selected processor does not support "
9594 "requested special purpose register %x"));
9595 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9596 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
9597 _("'CPSR' or 'SPSR' expected"));
9598 }
9599
9600 inst.instruction |= inst.operands[0].reg << 8;
9601 inst.instruction |= (flags & SPSR_BIT) >> 2;
9602 inst.instruction |= inst.operands[1].imm & 0xff;
9603 }
9604
9605 static void
9606 do_t_msr (void)
9607 {
9608 int flags;
9609
9610 if (do_vfp_nsyn_msr () == SUCCESS)
9611 return;
9612
9613 constraint (!inst.operands[1].isreg,
9614 _("Thumb encoding does not support an immediate here"));
9615 flags = inst.operands[0].imm;
9616 if (flags & ~0xff)
9617 {
9618 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
9619 _("selected processor does not support "
9620 "requested special purpose register"));
9621 }
9622 else
9623 {
9624 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
9625 _("selected processor does not support "
9626 "requested special purpose register"));
9627 flags |= PSR_f;
9628 }
9629 inst.instruction |= (flags & SPSR_BIT) >> 2;
9630 inst.instruction |= (flags & ~SPSR_BIT) >> 8;
9631 inst.instruction |= (flags & 0xff);
9632 inst.instruction |= inst.operands[1].reg << 16;
9633 }
9634
9635 static void
9636 do_t_mul (void)
9637 {
9638 if (!inst.operands[2].present)
9639 inst.operands[2].reg = inst.operands[0].reg;
9640
9641 /* There is no 32-bit MULS and no 16-bit MUL. */
9642 if (unified_syntax && inst.instruction == T_MNEM_mul)
9643 {
9644 inst.instruction = THUMB_OP32 (inst.instruction);
9645 inst.instruction |= inst.operands[0].reg << 8;
9646 inst.instruction |= inst.operands[1].reg << 16;
9647 inst.instruction |= inst.operands[2].reg << 0;
9648 }
9649 else
9650 {
9651 constraint (!unified_syntax
9652 && inst.instruction == T_MNEM_muls, BAD_THUMB32);
9653 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9654 BAD_HIREG);
9655
9656 inst.instruction = THUMB_OP16 (inst.instruction);
9657 inst.instruction |= inst.operands[0].reg;
9658
9659 if (inst.operands[0].reg == inst.operands[1].reg)
9660 inst.instruction |= inst.operands[2].reg << 3;
9661 else if (inst.operands[0].reg == inst.operands[2].reg)
9662 inst.instruction |= inst.operands[1].reg << 3;
9663 else
9664 constraint (1, _("dest must overlap one source register"));
9665 }
9666 }
9667
9668 static void
9669 do_t_mull (void)
9670 {
9671 inst.instruction |= inst.operands[0].reg << 12;
9672 inst.instruction |= inst.operands[1].reg << 8;
9673 inst.instruction |= inst.operands[2].reg << 16;
9674 inst.instruction |= inst.operands[3].reg;
9675
9676 if (inst.operands[0].reg == inst.operands[1].reg)
9677 as_tsktsk (_("rdhi and rdlo must be different"));
9678 }
9679
9680 static void
9681 do_t_nop (void)
9682 {
9683 if (unified_syntax)
9684 {
9685 if (inst.size_req == 4 || inst.operands[0].imm > 15)
9686 {
9687 inst.instruction = THUMB_OP32 (inst.instruction);
9688 inst.instruction |= inst.operands[0].imm;
9689 }
9690 else
9691 {
9692 inst.instruction = THUMB_OP16 (inst.instruction);
9693 inst.instruction |= inst.operands[0].imm << 4;
9694 }
9695 }
9696 else
9697 {
9698 constraint (inst.operands[0].present,
9699 _("Thumb does not support NOP with hints"));
9700 inst.instruction = 0x46c0;
9701 }
9702 }
9703
9704 static void
9705 do_t_neg (void)
9706 {
9707 if (unified_syntax)
9708 {
9709 bfd_boolean narrow;
9710
9711 if (THUMB_SETS_FLAGS (inst.instruction))
9712 narrow = (current_it_mask == 0);
9713 else
9714 narrow = (current_it_mask != 0);
9715 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9716 narrow = FALSE;
9717 if (inst.size_req == 4)
9718 narrow = FALSE;
9719
9720 if (!narrow)
9721 {
9722 inst.instruction = THUMB_OP32 (inst.instruction);
9723 inst.instruction |= inst.operands[0].reg << 8;
9724 inst.instruction |= inst.operands[1].reg << 16;
9725 }
9726 else
9727 {
9728 inst.instruction = THUMB_OP16 (inst.instruction);
9729 inst.instruction |= inst.operands[0].reg;
9730 inst.instruction |= inst.operands[1].reg << 3;
9731 }
9732 }
9733 else
9734 {
9735 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9736 BAD_HIREG);
9737 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9738
9739 inst.instruction = THUMB_OP16 (inst.instruction);
9740 inst.instruction |= inst.operands[0].reg;
9741 inst.instruction |= inst.operands[1].reg << 3;
9742 }
9743 }
9744
9745 static void
9746 do_t_pkhbt (void)
9747 {
9748 inst.instruction |= inst.operands[0].reg << 8;
9749 inst.instruction |= inst.operands[1].reg << 16;
9750 inst.instruction |= inst.operands[2].reg;
9751 if (inst.operands[3].present)
9752 {
9753 unsigned int val = inst.reloc.exp.X_add_number;
9754 constraint (inst.reloc.exp.X_op != O_constant,
9755 _("expression too complex"));
9756 inst.instruction |= (val & 0x1c) << 10;
9757 inst.instruction |= (val & 0x03) << 6;
9758 }
9759 }
9760
9761 static void
9762 do_t_pkhtb (void)
9763 {
9764 if (!inst.operands[3].present)
9765 inst.instruction &= ~0x00000020;
9766 do_t_pkhbt ();
9767 }
9768
9769 static void
9770 do_t_pld (void)
9771 {
9772 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
9773 }
9774
9775 static void
9776 do_t_push_pop (void)
9777 {
9778 unsigned mask;
9779
9780 constraint (inst.operands[0].writeback,
9781 _("push/pop do not support {reglist}^"));
9782 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9783 _("expression too complex"));
9784
9785 mask = inst.operands[0].imm;
9786 if ((mask & ~0xff) == 0)
9787 inst.instruction = THUMB_OP16 (inst.instruction);
9788 else if ((inst.instruction == T_MNEM_push
9789 && (mask & ~0xff) == 1 << REG_LR)
9790 || (inst.instruction == T_MNEM_pop
9791 && (mask & ~0xff) == 1 << REG_PC))
9792 {
9793 inst.instruction = THUMB_OP16 (inst.instruction);
9794 inst.instruction |= THUMB_PP_PC_LR;
9795 mask &= 0xff;
9796 }
9797 else if (unified_syntax)
9798 {
9799 if (mask & (1 << 13))
9800 inst.error = _("SP not allowed in register list");
9801 if (inst.instruction == T_MNEM_push)
9802 {
9803 if (mask & (1 << 15))
9804 inst.error = _("PC not allowed in register list");
9805 }
9806 else
9807 {
9808 if (mask & (1 << 14)
9809 && mask & (1 << 15))
9810 inst.error = _("LR and PC should not both be in register list");
9811 }
9812 if ((mask & (mask - 1)) == 0)
9813 {
9814 /* Single register push/pop implemented as str/ldr. */
9815 if (inst.instruction == T_MNEM_push)
9816 inst.instruction = 0xf84d0d04; /* str reg, [sp, #-4]! */
9817 else
9818 inst.instruction = 0xf85d0b04; /* ldr reg, [sp], #4 */
9819 mask = ffs(mask) - 1;
9820 mask <<= 12;
9821 }
9822 else
9823 inst.instruction = THUMB_OP32 (inst.instruction);
9824 }
9825 else
9826 {
9827 inst.error = _("invalid register list to push/pop instruction");
9828 return;
9829 }
9830
9831 inst.instruction |= mask;
9832 }
9833
9834 static void
9835 do_t_rbit (void)
9836 {
9837 inst.instruction |= inst.operands[0].reg << 8;
9838 inst.instruction |= inst.operands[1].reg << 16;
9839 }
9840
9841 static void
9842 do_t_rev (void)
9843 {
9844 if (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
9845 && inst.size_req != 4)
9846 {
9847 inst.instruction = THUMB_OP16 (inst.instruction);
9848 inst.instruction |= inst.operands[0].reg;
9849 inst.instruction |= inst.operands[1].reg << 3;
9850 }
9851 else if (unified_syntax)
9852 {
9853 inst.instruction = THUMB_OP32 (inst.instruction);
9854 inst.instruction |= inst.operands[0].reg << 8;
9855 inst.instruction |= inst.operands[1].reg << 16;
9856 inst.instruction |= inst.operands[1].reg;
9857 }
9858 else
9859 inst.error = BAD_HIREG;
9860 }
9861
9862 static void
9863 do_t_rsb (void)
9864 {
9865 int Rd, Rs;
9866
9867 Rd = inst.operands[0].reg;
9868 Rs = (inst.operands[1].present
9869 ? inst.operands[1].reg /* Rd, Rs, foo */
9870 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9871
9872 inst.instruction |= Rd << 8;
9873 inst.instruction |= Rs << 16;
9874 if (!inst.operands[2].isreg)
9875 {
9876 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9877 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9878 }
9879 else
9880 encode_thumb32_shifted_operand (2);
9881 }
9882
9883 static void
9884 do_t_setend (void)
9885 {
9886 constraint (current_it_mask, BAD_NOT_IT);
9887 if (inst.operands[0].imm)
9888 inst.instruction |= 0x8;
9889 }
9890
9891 static void
9892 do_t_shift (void)
9893 {
9894 if (!inst.operands[1].present)
9895 inst.operands[1].reg = inst.operands[0].reg;
9896
9897 if (unified_syntax)
9898 {
9899 bfd_boolean narrow;
9900 int shift_kind;
9901
9902 switch (inst.instruction)
9903 {
9904 case T_MNEM_asr:
9905 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
9906 case T_MNEM_lsl:
9907 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
9908 case T_MNEM_lsr:
9909 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
9910 case T_MNEM_ror:
9911 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
9912 default: abort ();
9913 }
9914
9915 if (THUMB_SETS_FLAGS (inst.instruction))
9916 narrow = (current_it_mask == 0);
9917 else
9918 narrow = (current_it_mask != 0);
9919 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9920 narrow = FALSE;
9921 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
9922 narrow = FALSE;
9923 if (inst.operands[2].isreg
9924 && (inst.operands[1].reg != inst.operands[0].reg
9925 || inst.operands[2].reg > 7))
9926 narrow = FALSE;
9927 if (inst.size_req == 4)
9928 narrow = FALSE;
9929
9930 if (!narrow)
9931 {
9932 if (inst.operands[2].isreg)
9933 {
9934 inst.instruction = THUMB_OP32 (inst.instruction);
9935 inst.instruction |= inst.operands[0].reg << 8;
9936 inst.instruction |= inst.operands[1].reg << 16;
9937 inst.instruction |= inst.operands[2].reg;
9938 }
9939 else
9940 {
9941 inst.operands[1].shifted = 1;
9942 inst.operands[1].shift_kind = shift_kind;
9943 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
9944 ? T_MNEM_movs : T_MNEM_mov);
9945 inst.instruction |= inst.operands[0].reg << 8;
9946 encode_thumb32_shifted_operand (1);
9947 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
9948 inst.reloc.type = BFD_RELOC_UNUSED;
9949 }
9950 }
9951 else
9952 {
9953 if (inst.operands[2].isreg)
9954 {
9955 switch (shift_kind)
9956 {
9957 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
9958 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
9959 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
9960 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
9961 default: abort ();
9962 }
9963
9964 inst.instruction |= inst.operands[0].reg;
9965 inst.instruction |= inst.operands[2].reg << 3;
9966 }
9967 else
9968 {
9969 switch (shift_kind)
9970 {
9971 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
9972 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
9973 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
9974 default: abort ();
9975 }
9976 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
9977 inst.instruction |= inst.operands[0].reg;
9978 inst.instruction |= inst.operands[1].reg << 3;
9979 }
9980 }
9981 }
9982 else
9983 {
9984 constraint (inst.operands[0].reg > 7
9985 || inst.operands[1].reg > 7, BAD_HIREG);
9986 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9987
9988 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
9989 {
9990 constraint (inst.operands[2].reg > 7, BAD_HIREG);
9991 constraint (inst.operands[0].reg != inst.operands[1].reg,
9992 _("source1 and dest must be same register"));
9993
9994 switch (inst.instruction)
9995 {
9996 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
9997 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
9998 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
9999 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
10000 default: abort ();
10001 }
10002
10003 inst.instruction |= inst.operands[0].reg;
10004 inst.instruction |= inst.operands[2].reg << 3;
10005 }
10006 else
10007 {
10008 switch (inst.instruction)
10009 {
10010 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
10011 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
10012 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
10013 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
10014 default: abort ();
10015 }
10016 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10017 inst.instruction |= inst.operands[0].reg;
10018 inst.instruction |= inst.operands[1].reg << 3;
10019 }
10020 }
10021 }
10022
10023 static void
10024 do_t_simd (void)
10025 {
10026 inst.instruction |= inst.operands[0].reg << 8;
10027 inst.instruction |= inst.operands[1].reg << 16;
10028 inst.instruction |= inst.operands[2].reg;
10029 }
10030
10031 static void
10032 do_t_smc (void)
10033 {
10034 unsigned int value = inst.reloc.exp.X_add_number;
10035 constraint (inst.reloc.exp.X_op != O_constant,
10036 _("expression too complex"));
10037 inst.reloc.type = BFD_RELOC_UNUSED;
10038 inst.instruction |= (value & 0xf000) >> 12;
10039 inst.instruction |= (value & 0x0ff0);
10040 inst.instruction |= (value & 0x000f) << 16;
10041 }
10042
10043 static void
10044 do_t_ssat (void)
10045 {
10046 inst.instruction |= inst.operands[0].reg << 8;
10047 inst.instruction |= inst.operands[1].imm - 1;
10048 inst.instruction |= inst.operands[2].reg << 16;
10049
10050 if (inst.operands[3].present)
10051 {
10052 constraint (inst.reloc.exp.X_op != O_constant,
10053 _("expression too complex"));
10054
10055 if (inst.reloc.exp.X_add_number != 0)
10056 {
10057 if (inst.operands[3].shift_kind == SHIFT_ASR)
10058 inst.instruction |= 0x00200000; /* sh bit */
10059 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
10060 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
10061 }
10062 inst.reloc.type = BFD_RELOC_UNUSED;
10063 }
10064 }
10065
10066 static void
10067 do_t_ssat16 (void)
10068 {
10069 inst.instruction |= inst.operands[0].reg << 8;
10070 inst.instruction |= inst.operands[1].imm - 1;
10071 inst.instruction |= inst.operands[2].reg << 16;
10072 }
10073
10074 static void
10075 do_t_strex (void)
10076 {
10077 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10078 || inst.operands[2].postind || inst.operands[2].writeback
10079 || inst.operands[2].immisreg || inst.operands[2].shifted
10080 || inst.operands[2].negative,
10081 BAD_ADDR_MODE);
10082
10083 inst.instruction |= inst.operands[0].reg << 8;
10084 inst.instruction |= inst.operands[1].reg << 12;
10085 inst.instruction |= inst.operands[2].reg << 16;
10086 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
10087 }
10088
10089 static void
10090 do_t_strexd (void)
10091 {
10092 if (!inst.operands[2].present)
10093 inst.operands[2].reg = inst.operands[1].reg + 1;
10094
10095 constraint (inst.operands[0].reg == inst.operands[1].reg
10096 || inst.operands[0].reg == inst.operands[2].reg
10097 || inst.operands[0].reg == inst.operands[3].reg
10098 || inst.operands[1].reg == inst.operands[2].reg,
10099 BAD_OVERLAP);
10100
10101 inst.instruction |= inst.operands[0].reg;
10102 inst.instruction |= inst.operands[1].reg << 12;
10103 inst.instruction |= inst.operands[2].reg << 8;
10104 inst.instruction |= inst.operands[3].reg << 16;
10105 }
10106
10107 static void
10108 do_t_sxtah (void)
10109 {
10110 inst.instruction |= inst.operands[0].reg << 8;
10111 inst.instruction |= inst.operands[1].reg << 16;
10112 inst.instruction |= inst.operands[2].reg;
10113 inst.instruction |= inst.operands[3].imm << 4;
10114 }
10115
10116 static void
10117 do_t_sxth (void)
10118 {
10119 if (inst.instruction <= 0xffff && inst.size_req != 4
10120 && inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
10121 && (!inst.operands[2].present || inst.operands[2].imm == 0))
10122 {
10123 inst.instruction = THUMB_OP16 (inst.instruction);
10124 inst.instruction |= inst.operands[0].reg;
10125 inst.instruction |= inst.operands[1].reg << 3;
10126 }
10127 else if (unified_syntax)
10128 {
10129 if (inst.instruction <= 0xffff)
10130 inst.instruction = THUMB_OP32 (inst.instruction);
10131 inst.instruction |= inst.operands[0].reg << 8;
10132 inst.instruction |= inst.operands[1].reg;
10133 inst.instruction |= inst.operands[2].imm << 4;
10134 }
10135 else
10136 {
10137 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
10138 _("Thumb encoding does not support rotation"));
10139 constraint (1, BAD_HIREG);
10140 }
10141 }
10142
10143 static void
10144 do_t_swi (void)
10145 {
10146 inst.reloc.type = BFD_RELOC_ARM_SWI;
10147 }
10148
10149 static void
10150 do_t_tb (void)
10151 {
10152 int half;
10153
10154 half = (inst.instruction & 0x10) != 0;
10155 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
10156 constraint (inst.operands[0].immisreg,
10157 _("instruction requires register index"));
10158 constraint (inst.operands[0].imm == 15,
10159 _("PC is not a valid index register"));
10160 constraint (!half && inst.operands[0].shifted,
10161 _("instruction does not allow shifted index"));
10162 inst.instruction |= (inst.operands[0].reg << 16) | inst.operands[0].imm;
10163 }
10164
10165 static void
10166 do_t_usat (void)
10167 {
10168 inst.instruction |= inst.operands[0].reg << 8;
10169 inst.instruction |= inst.operands[1].imm;
10170 inst.instruction |= inst.operands[2].reg << 16;
10171
10172 if (inst.operands[3].present)
10173 {
10174 constraint (inst.reloc.exp.X_op != O_constant,
10175 _("expression too complex"));
10176 if (inst.reloc.exp.X_add_number != 0)
10177 {
10178 if (inst.operands[3].shift_kind == SHIFT_ASR)
10179 inst.instruction |= 0x00200000; /* sh bit */
10180
10181 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
10182 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
10183 }
10184 inst.reloc.type = BFD_RELOC_UNUSED;
10185 }
10186 }
10187
10188 static void
10189 do_t_usat16 (void)
10190 {
10191 inst.instruction |= inst.operands[0].reg << 8;
10192 inst.instruction |= inst.operands[1].imm;
10193 inst.instruction |= inst.operands[2].reg << 16;
10194 }
10195
10196 /* Neon instruction encoder helpers. */
10197
10198 /* Encodings for the different types for various Neon opcodes. */
10199
10200 /* An "invalid" code for the following tables. */
10201 #define N_INV -1u
10202
10203 struct neon_tab_entry
10204 {
10205 unsigned integer;
10206 unsigned float_or_poly;
10207 unsigned scalar_or_imm;
10208 };
10209
10210 /* Map overloaded Neon opcodes to their respective encodings. */
10211 #define NEON_ENC_TAB \
10212 X(vabd, 0x0000700, 0x1200d00, N_INV), \
10213 X(vmax, 0x0000600, 0x0000f00, N_INV), \
10214 X(vmin, 0x0000610, 0x0200f00, N_INV), \
10215 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
10216 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
10217 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
10218 X(vadd, 0x0000800, 0x0000d00, N_INV), \
10219 X(vsub, 0x1000800, 0x0200d00, N_INV), \
10220 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
10221 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
10222 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
10223 /* Register variants of the following two instructions are encoded as
10224 vcge / vcgt with the operands reversed. */ \
10225 X(vclt, 0x0000310, 0x1000e00, 0x1b10200), \
10226 X(vcle, 0x0000300, 0x1200e00, 0x1b10180), \
10227 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
10228 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
10229 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
10230 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
10231 X(vmlal, 0x0800800, N_INV, 0x0800240), \
10232 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
10233 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
10234 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
10235 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
10236 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
10237 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
10238 X(vshl, 0x0000400, N_INV, 0x0800510), \
10239 X(vqshl, 0x0000410, N_INV, 0x0800710), \
10240 X(vand, 0x0000110, N_INV, 0x0800030), \
10241 X(vbic, 0x0100110, N_INV, 0x0800030), \
10242 X(veor, 0x1000110, N_INV, N_INV), \
10243 X(vorn, 0x0300110, N_INV, 0x0800010), \
10244 X(vorr, 0x0200110, N_INV, 0x0800010), \
10245 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
10246 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
10247 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
10248 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
10249 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
10250 X(vst1, 0x0000000, 0x0800000, N_INV), \
10251 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
10252 X(vst2, 0x0000100, 0x0800100, N_INV), \
10253 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
10254 X(vst3, 0x0000200, 0x0800200, N_INV), \
10255 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
10256 X(vst4, 0x0000300, 0x0800300, N_INV), \
10257 X(vmovn, 0x1b20200, N_INV, N_INV), \
10258 X(vtrn, 0x1b20080, N_INV, N_INV), \
10259 X(vqmovn, 0x1b20200, N_INV, N_INV), \
10260 X(vqmovun, 0x1b20240, N_INV, N_INV), \
10261 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
10262 X(vnmla, 0xe000a40, 0xe000b40, N_INV), \
10263 X(vnmls, 0xe100a40, 0xe100b40, N_INV), \
10264 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
10265 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
10266 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
10267 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
10268
10269 enum neon_opc
10270 {
10271 #define X(OPC,I,F,S) N_MNEM_##OPC
10272 NEON_ENC_TAB
10273 #undef X
10274 };
10275
10276 static const struct neon_tab_entry neon_enc_tab[] =
10277 {
10278 #define X(OPC,I,F,S) { (I), (F), (S) }
10279 NEON_ENC_TAB
10280 #undef X
10281 };
10282
10283 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10284 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10285 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10286 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10287 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10288 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10289 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10290 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10291 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10292 #define NEON_ENC_SINGLE(X) \
10293 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
10294 #define NEON_ENC_DOUBLE(X) \
10295 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
10296
10297 /* Define shapes for instruction operands. The following mnemonic characters
10298 are used in this table:
10299
10300 F - VFP S<n> register
10301 D - Neon D<n> register
10302 Q - Neon Q<n> register
10303 I - Immediate
10304 S - Scalar
10305 R - ARM register
10306 L - D<n> register list
10307
10308 This table is used to generate various data:
10309 - enumerations of the form NS_DDR to be used as arguments to
10310 neon_select_shape.
10311 - a table classifying shapes into single, double, quad, mixed.
10312 - a table used to drive neon_select_shape.
10313 */
10314
10315 #define NEON_SHAPE_DEF \
10316 X(3, (D, D, D), DOUBLE), \
10317 X(3, (Q, Q, Q), QUAD), \
10318 X(3, (D, D, I), DOUBLE), \
10319 X(3, (Q, Q, I), QUAD), \
10320 X(3, (D, D, S), DOUBLE), \
10321 X(3, (Q, Q, S), QUAD), \
10322 X(2, (D, D), DOUBLE), \
10323 X(2, (Q, Q), QUAD), \
10324 X(2, (D, S), DOUBLE), \
10325 X(2, (Q, S), QUAD), \
10326 X(2, (D, R), DOUBLE), \
10327 X(2, (Q, R), QUAD), \
10328 X(2, (D, I), DOUBLE), \
10329 X(2, (Q, I), QUAD), \
10330 X(3, (D, L, D), DOUBLE), \
10331 X(2, (D, Q), MIXED), \
10332 X(2, (Q, D), MIXED), \
10333 X(3, (D, Q, I), MIXED), \
10334 X(3, (Q, D, I), MIXED), \
10335 X(3, (Q, D, D), MIXED), \
10336 X(3, (D, Q, Q), MIXED), \
10337 X(3, (Q, Q, D), MIXED), \
10338 X(3, (Q, D, S), MIXED), \
10339 X(3, (D, Q, S), MIXED), \
10340 X(4, (D, D, D, I), DOUBLE), \
10341 X(4, (Q, Q, Q, I), QUAD), \
10342 X(2, (F, F), SINGLE), \
10343 X(3, (F, F, F), SINGLE), \
10344 X(2, (F, I), SINGLE), \
10345 X(2, (F, D), MIXED), \
10346 X(2, (D, F), MIXED), \
10347 X(3, (F, F, I), MIXED), \
10348 X(4, (R, R, F, F), SINGLE), \
10349 X(4, (F, F, R, R), SINGLE), \
10350 X(3, (D, R, R), DOUBLE), \
10351 X(3, (R, R, D), DOUBLE), \
10352 X(2, (S, R), SINGLE), \
10353 X(2, (R, S), SINGLE), \
10354 X(2, (F, R), SINGLE), \
10355 X(2, (R, F), SINGLE)
10356
10357 #define S2(A,B) NS_##A##B
10358 #define S3(A,B,C) NS_##A##B##C
10359 #define S4(A,B,C,D) NS_##A##B##C##D
10360
10361 #define X(N, L, C) S##N L
10362
10363 enum neon_shape
10364 {
10365 NEON_SHAPE_DEF,
10366 NS_NULL
10367 };
10368
10369 #undef X
10370 #undef S2
10371 #undef S3
10372 #undef S4
10373
10374 enum neon_shape_class
10375 {
10376 SC_SINGLE,
10377 SC_DOUBLE,
10378 SC_QUAD,
10379 SC_MIXED
10380 };
10381
10382 #define X(N, L, C) SC_##C
10383
10384 static enum neon_shape_class neon_shape_class[] =
10385 {
10386 NEON_SHAPE_DEF
10387 };
10388
10389 #undef X
10390
10391 enum neon_shape_el
10392 {
10393 SE_F,
10394 SE_D,
10395 SE_Q,
10396 SE_I,
10397 SE_S,
10398 SE_R,
10399 SE_L
10400 };
10401
10402 /* Register widths of above. */
10403 static unsigned neon_shape_el_size[] =
10404 {
10405 32,
10406 64,
10407 128,
10408 0,
10409 32,
10410 32,
10411 0
10412 };
10413
10414 struct neon_shape_info
10415 {
10416 unsigned els;
10417 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
10418 };
10419
10420 #define S2(A,B) { SE_##A, SE_##B }
10421 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
10422 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
10423
10424 #define X(N, L, C) { N, S##N L }
10425
10426 static struct neon_shape_info neon_shape_tab[] =
10427 {
10428 NEON_SHAPE_DEF
10429 };
10430
10431 #undef X
10432 #undef S2
10433 #undef S3
10434 #undef S4
10435
10436 /* Bit masks used in type checking given instructions.
10437 'N_EQK' means the type must be the same as (or based on in some way) the key
10438 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
10439 set, various other bits can be set as well in order to modify the meaning of
10440 the type constraint. */
10441
10442 enum neon_type_mask
10443 {
10444 N_S8 = 0x000001,
10445 N_S16 = 0x000002,
10446 N_S32 = 0x000004,
10447 N_S64 = 0x000008,
10448 N_U8 = 0x000010,
10449 N_U16 = 0x000020,
10450 N_U32 = 0x000040,
10451 N_U64 = 0x000080,
10452 N_I8 = 0x000100,
10453 N_I16 = 0x000200,
10454 N_I32 = 0x000400,
10455 N_I64 = 0x000800,
10456 N_8 = 0x001000,
10457 N_16 = 0x002000,
10458 N_32 = 0x004000,
10459 N_64 = 0x008000,
10460 N_P8 = 0x010000,
10461 N_P16 = 0x020000,
10462 N_F32 = 0x040000,
10463 N_F64 = 0x080000,
10464 N_KEY = 0x100000, /* key element (main type specifier). */
10465 N_EQK = 0x200000, /* given operand has the same type & size as the key. */
10466 N_VFP = 0x400000, /* VFP mode: operand size must match register width. */
10467 N_DBL = 0x000001, /* if N_EQK, this operand is twice the size. */
10468 N_HLF = 0x000002, /* if N_EQK, this operand is half the size. */
10469 N_SGN = 0x000004, /* if N_EQK, this operand is forced to be signed. */
10470 N_UNS = 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
10471 N_INT = 0x000010, /* if N_EQK, this operand is forced to be integer. */
10472 N_FLT = 0x000020, /* if N_EQK, this operand is forced to be float. */
10473 N_SIZ = 0x000040, /* if N_EQK, this operand is forced to be size-only. */
10474 N_UTYP = 0,
10475 N_MAX_NONSPECIAL = N_F64
10476 };
10477
10478 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
10479
10480 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
10481 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
10482 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
10483 #define N_SUF_32 (N_SU_32 | N_F32)
10484 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
10485 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
10486
10487 /* Pass this as the first type argument to neon_check_type to ignore types
10488 altogether. */
10489 #define N_IGNORE_TYPE (N_KEY | N_EQK)
10490
10491 /* Select a "shape" for the current instruction (describing register types or
10492 sizes) from a list of alternatives. Return NS_NULL if the current instruction
10493 doesn't fit. For non-polymorphic shapes, checking is usually done as a
10494 function of operand parsing, so this function doesn't need to be called.
10495 Shapes should be listed in order of decreasing length. */
10496
10497 static enum neon_shape
10498 neon_select_shape (enum neon_shape shape, ...)
10499 {
10500 va_list ap;
10501 enum neon_shape first_shape = shape;
10502
10503 /* Fix missing optional operands. FIXME: we don't know at this point how
10504 many arguments we should have, so this makes the assumption that we have
10505 > 1. This is true of all current Neon opcodes, I think, but may not be
10506 true in the future. */
10507 if (!inst.operands[1].present)
10508 inst.operands[1] = inst.operands[0];
10509
10510 va_start (ap, shape);
10511
10512 for (; shape != NS_NULL; shape = va_arg (ap, int))
10513 {
10514 unsigned j;
10515 int matches = 1;
10516
10517 for (j = 0; j < neon_shape_tab[shape].els; j++)
10518 {
10519 if (!inst.operands[j].present)
10520 {
10521 matches = 0;
10522 break;
10523 }
10524
10525 switch (neon_shape_tab[shape].el[j])
10526 {
10527 case SE_F:
10528 if (!(inst.operands[j].isreg
10529 && inst.operands[j].isvec
10530 && inst.operands[j].issingle
10531 && !inst.operands[j].isquad))
10532 matches = 0;
10533 break;
10534
10535 case SE_D:
10536 if (!(inst.operands[j].isreg
10537 && inst.operands[j].isvec
10538 && !inst.operands[j].isquad
10539 && !inst.operands[j].issingle))
10540 matches = 0;
10541 break;
10542
10543 case SE_R:
10544 if (!(inst.operands[j].isreg
10545 && !inst.operands[j].isvec))
10546 matches = 0;
10547 break;
10548
10549 case SE_Q:
10550 if (!(inst.operands[j].isreg
10551 && inst.operands[j].isvec
10552 && inst.operands[j].isquad
10553 && !inst.operands[j].issingle))
10554 matches = 0;
10555 break;
10556
10557 case SE_I:
10558 if (!(!inst.operands[j].isreg
10559 && !inst.operands[j].isscalar))
10560 matches = 0;
10561 break;
10562
10563 case SE_S:
10564 if (!(!inst.operands[j].isreg
10565 && inst.operands[j].isscalar))
10566 matches = 0;
10567 break;
10568
10569 case SE_L:
10570 break;
10571 }
10572 }
10573 if (matches)
10574 break;
10575 }
10576
10577 va_end (ap);
10578
10579 if (shape == NS_NULL && first_shape != NS_NULL)
10580 first_error (_("invalid instruction shape"));
10581
10582 return shape;
10583 }
10584
10585 /* True if SHAPE is predominantly a quadword operation (most of the time, this
10586 means the Q bit should be set). */
10587
10588 static int
10589 neon_quad (enum neon_shape shape)
10590 {
10591 return neon_shape_class[shape] == SC_QUAD;
10592 }
10593
10594 static void
10595 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
10596 unsigned *g_size)
10597 {
10598 /* Allow modification to be made to types which are constrained to be
10599 based on the key element, based on bits set alongside N_EQK. */
10600 if ((typebits & N_EQK) != 0)
10601 {
10602 if ((typebits & N_HLF) != 0)
10603 *g_size /= 2;
10604 else if ((typebits & N_DBL) != 0)
10605 *g_size *= 2;
10606 if ((typebits & N_SGN) != 0)
10607 *g_type = NT_signed;
10608 else if ((typebits & N_UNS) != 0)
10609 *g_type = NT_unsigned;
10610 else if ((typebits & N_INT) != 0)
10611 *g_type = NT_integer;
10612 else if ((typebits & N_FLT) != 0)
10613 *g_type = NT_float;
10614 else if ((typebits & N_SIZ) != 0)
10615 *g_type = NT_untyped;
10616 }
10617 }
10618
10619 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
10620 operand type, i.e. the single type specified in a Neon instruction when it
10621 is the only one given. */
10622
10623 static struct neon_type_el
10624 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
10625 {
10626 struct neon_type_el dest = *key;
10627
10628 assert ((thisarg & N_EQK) != 0);
10629
10630 neon_modify_type_size (thisarg, &dest.type, &dest.size);
10631
10632 return dest;
10633 }
10634
10635 /* Convert Neon type and size into compact bitmask representation. */
10636
10637 static enum neon_type_mask
10638 type_chk_of_el_type (enum neon_el_type type, unsigned size)
10639 {
10640 switch (type)
10641 {
10642 case NT_untyped:
10643 switch (size)
10644 {
10645 case 8: return N_8;
10646 case 16: return N_16;
10647 case 32: return N_32;
10648 case 64: return N_64;
10649 default: ;
10650 }
10651 break;
10652
10653 case NT_integer:
10654 switch (size)
10655 {
10656 case 8: return N_I8;
10657 case 16: return N_I16;
10658 case 32: return N_I32;
10659 case 64: return N_I64;
10660 default: ;
10661 }
10662 break;
10663
10664 case NT_float:
10665 switch (size)
10666 {
10667 case 32: return N_F32;
10668 case 64: return N_F64;
10669 default: ;
10670 }
10671 break;
10672
10673 case NT_poly:
10674 switch (size)
10675 {
10676 case 8: return N_P8;
10677 case 16: return N_P16;
10678 default: ;
10679 }
10680 break;
10681
10682 case NT_signed:
10683 switch (size)
10684 {
10685 case 8: return N_S8;
10686 case 16: return N_S16;
10687 case 32: return N_S32;
10688 case 64: return N_S64;
10689 default: ;
10690 }
10691 break;
10692
10693 case NT_unsigned:
10694 switch (size)
10695 {
10696 case 8: return N_U8;
10697 case 16: return N_U16;
10698 case 32: return N_U32;
10699 case 64: return N_U64;
10700 default: ;
10701 }
10702 break;
10703
10704 default: ;
10705 }
10706
10707 return N_UTYP;
10708 }
10709
10710 /* Convert compact Neon bitmask type representation to a type and size. Only
10711 handles the case where a single bit is set in the mask. */
10712
10713 static int
10714 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
10715 enum neon_type_mask mask)
10716 {
10717 if ((mask & N_EQK) != 0)
10718 return FAIL;
10719
10720 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
10721 *size = 8;
10722 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
10723 *size = 16;
10724 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
10725 *size = 32;
10726 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0)
10727 *size = 64;
10728 else
10729 return FAIL;
10730
10731 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
10732 *type = NT_signed;
10733 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
10734 *type = NT_unsigned;
10735 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
10736 *type = NT_integer;
10737 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
10738 *type = NT_untyped;
10739 else if ((mask & (N_P8 | N_P16)) != 0)
10740 *type = NT_poly;
10741 else if ((mask & (N_F32 | N_F64)) != 0)
10742 *type = NT_float;
10743 else
10744 return FAIL;
10745
10746 return SUCCESS;
10747 }
10748
10749 /* Modify a bitmask of allowed types. This is only needed for type
10750 relaxation. */
10751
10752 static unsigned
10753 modify_types_allowed (unsigned allowed, unsigned mods)
10754 {
10755 unsigned size;
10756 enum neon_el_type type;
10757 unsigned destmask;
10758 int i;
10759
10760 destmask = 0;
10761
10762 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
10763 {
10764 if (el_type_of_type_chk (&type, &size, allowed & i) == SUCCESS)
10765 {
10766 neon_modify_type_size (mods, &type, &size);
10767 destmask |= type_chk_of_el_type (type, size);
10768 }
10769 }
10770
10771 return destmask;
10772 }
10773
10774 /* Check type and return type classification.
10775 The manual states (paraphrase): If one datatype is given, it indicates the
10776 type given in:
10777 - the second operand, if there is one
10778 - the operand, if there is no second operand
10779 - the result, if there are no operands.
10780 This isn't quite good enough though, so we use a concept of a "key" datatype
10781 which is set on a per-instruction basis, which is the one which matters when
10782 only one data type is written.
10783 Note: this function has side-effects (e.g. filling in missing operands). All
10784 Neon instructions should call it before performing bit encoding. */
10785
10786 static struct neon_type_el
10787 neon_check_type (unsigned els, enum neon_shape ns, ...)
10788 {
10789 va_list ap;
10790 unsigned i, pass, key_el = 0;
10791 unsigned types[NEON_MAX_TYPE_ELS];
10792 enum neon_el_type k_type = NT_invtype;
10793 unsigned k_size = -1u;
10794 struct neon_type_el badtype = {NT_invtype, -1};
10795 unsigned key_allowed = 0;
10796
10797 /* Optional registers in Neon instructions are always (not) in operand 1.
10798 Fill in the missing operand here, if it was omitted. */
10799 if (els > 1 && !inst.operands[1].present)
10800 inst.operands[1] = inst.operands[0];
10801
10802 /* Suck up all the varargs. */
10803 va_start (ap, ns);
10804 for (i = 0; i < els; i++)
10805 {
10806 unsigned thisarg = va_arg (ap, unsigned);
10807 if (thisarg == N_IGNORE_TYPE)
10808 {
10809 va_end (ap);
10810 return badtype;
10811 }
10812 types[i] = thisarg;
10813 if ((thisarg & N_KEY) != 0)
10814 key_el = i;
10815 }
10816 va_end (ap);
10817
10818 if (inst.vectype.elems > 0)
10819 for (i = 0; i < els; i++)
10820 if (inst.operands[i].vectype.type != NT_invtype)
10821 {
10822 first_error (_("types specified in both the mnemonic and operands"));
10823 return badtype;
10824 }
10825
10826 /* Duplicate inst.vectype elements here as necessary.
10827 FIXME: No idea if this is exactly the same as the ARM assembler,
10828 particularly when an insn takes one register and one non-register
10829 operand. */
10830 if (inst.vectype.elems == 1 && els > 1)
10831 {
10832 unsigned j;
10833 inst.vectype.elems = els;
10834 inst.vectype.el[key_el] = inst.vectype.el[0];
10835 for (j = 0; j < els; j++)
10836 if (j != key_el)
10837 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
10838 types[j]);
10839 }
10840 else if (inst.vectype.elems == 0 && els > 0)
10841 {
10842 unsigned j;
10843 /* No types were given after the mnemonic, so look for types specified
10844 after each operand. We allow some flexibility here; as long as the
10845 "key" operand has a type, we can infer the others. */
10846 for (j = 0; j < els; j++)
10847 if (inst.operands[j].vectype.type != NT_invtype)
10848 inst.vectype.el[j] = inst.operands[j].vectype;
10849
10850 if (inst.operands[key_el].vectype.type != NT_invtype)
10851 {
10852 for (j = 0; j < els; j++)
10853 if (inst.operands[j].vectype.type == NT_invtype)
10854 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
10855 types[j]);
10856 }
10857 else
10858 {
10859 first_error (_("operand types can't be inferred"));
10860 return badtype;
10861 }
10862 }
10863 else if (inst.vectype.elems != els)
10864 {
10865 first_error (_("type specifier has the wrong number of parts"));
10866 return badtype;
10867 }
10868
10869 for (pass = 0; pass < 2; pass++)
10870 {
10871 for (i = 0; i < els; i++)
10872 {
10873 unsigned thisarg = types[i];
10874 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
10875 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
10876 enum neon_el_type g_type = inst.vectype.el[i].type;
10877 unsigned g_size = inst.vectype.el[i].size;
10878
10879 /* Decay more-specific signed & unsigned types to sign-insensitive
10880 integer types if sign-specific variants are unavailable. */
10881 if ((g_type == NT_signed || g_type == NT_unsigned)
10882 && (types_allowed & N_SU_ALL) == 0)
10883 g_type = NT_integer;
10884
10885 /* If only untyped args are allowed, decay any more specific types to
10886 them. Some instructions only care about signs for some element
10887 sizes, so handle that properly. */
10888 if ((g_size == 8 && (types_allowed & N_8) != 0)
10889 || (g_size == 16 && (types_allowed & N_16) != 0)
10890 || (g_size == 32 && (types_allowed & N_32) != 0)
10891 || (g_size == 64 && (types_allowed & N_64) != 0))
10892 g_type = NT_untyped;
10893
10894 if (pass == 0)
10895 {
10896 if ((thisarg & N_KEY) != 0)
10897 {
10898 k_type = g_type;
10899 k_size = g_size;
10900 key_allowed = thisarg & ~N_KEY;
10901 }
10902 }
10903 else
10904 {
10905 if ((thisarg & N_VFP) != 0)
10906 {
10907 enum neon_shape_el regshape = neon_shape_tab[ns].el[i];
10908 unsigned regwidth = neon_shape_el_size[regshape], match;
10909
10910 /* In VFP mode, operands must match register widths. If we
10911 have a key operand, use its width, else use the width of
10912 the current operand. */
10913 if (k_size != -1u)
10914 match = k_size;
10915 else
10916 match = g_size;
10917
10918 if (regwidth != match)
10919 {
10920 first_error (_("operand size must match register width"));
10921 return badtype;
10922 }
10923 }
10924
10925 if ((thisarg & N_EQK) == 0)
10926 {
10927 unsigned given_type = type_chk_of_el_type (g_type, g_size);
10928
10929 if ((given_type & types_allowed) == 0)
10930 {
10931 first_error (_("bad type in Neon instruction"));
10932 return badtype;
10933 }
10934 }
10935 else
10936 {
10937 enum neon_el_type mod_k_type = k_type;
10938 unsigned mod_k_size = k_size;
10939 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
10940 if (g_type != mod_k_type || g_size != mod_k_size)
10941 {
10942 first_error (_("inconsistent types in Neon instruction"));
10943 return badtype;
10944 }
10945 }
10946 }
10947 }
10948 }
10949
10950 return inst.vectype.el[key_el];
10951 }
10952
10953 /* Neon-style VFP instruction forwarding. */
10954
10955 /* Thumb VFP instructions have 0xE in the condition field. */
10956
10957 static void
10958 do_vfp_cond_or_thumb (void)
10959 {
10960 if (thumb_mode)
10961 inst.instruction |= 0xe0000000;
10962 else
10963 inst.instruction |= inst.cond << 28;
10964 }
10965
10966 /* Look up and encode a simple mnemonic, for use as a helper function for the
10967 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
10968 etc. It is assumed that operand parsing has already been done, and that the
10969 operands are in the form expected by the given opcode (this isn't necessarily
10970 the same as the form in which they were parsed, hence some massaging must
10971 take place before this function is called).
10972 Checks current arch version against that in the looked-up opcode. */
10973
10974 static void
10975 do_vfp_nsyn_opcode (const char *opname)
10976 {
10977 const struct asm_opcode *opcode;
10978
10979 opcode = hash_find (arm_ops_hsh, opname);
10980
10981 if (!opcode)
10982 abort ();
10983
10984 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
10985 thumb_mode ? *opcode->tvariant : *opcode->avariant),
10986 _(BAD_FPU));
10987
10988 if (thumb_mode)
10989 {
10990 inst.instruction = opcode->tvalue;
10991 opcode->tencode ();
10992 }
10993 else
10994 {
10995 inst.instruction = (inst.cond << 28) | opcode->avalue;
10996 opcode->aencode ();
10997 }
10998 }
10999
11000 static void
11001 do_vfp_nsyn_add_sub (enum neon_shape rs)
11002 {
11003 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
11004
11005 if (rs == NS_FFF)
11006 {
11007 if (is_add)
11008 do_vfp_nsyn_opcode ("fadds");
11009 else
11010 do_vfp_nsyn_opcode ("fsubs");
11011 }
11012 else
11013 {
11014 if (is_add)
11015 do_vfp_nsyn_opcode ("faddd");
11016 else
11017 do_vfp_nsyn_opcode ("fsubd");
11018 }
11019 }
11020
11021 /* Check operand types to see if this is a VFP instruction, and if so call
11022 PFN (). */
11023
11024 static int
11025 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
11026 {
11027 enum neon_shape rs;
11028 struct neon_type_el et;
11029
11030 switch (args)
11031 {
11032 case 2:
11033 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11034 et = neon_check_type (2, rs,
11035 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11036 break;
11037
11038 case 3:
11039 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11040 et = neon_check_type (3, rs,
11041 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11042 break;
11043
11044 default:
11045 abort ();
11046 }
11047
11048 if (et.type != NT_invtype)
11049 {
11050 pfn (rs);
11051 return SUCCESS;
11052 }
11053 else
11054 inst.error = NULL;
11055
11056 return FAIL;
11057 }
11058
11059 static void
11060 do_vfp_nsyn_mla_mls (enum neon_shape rs)
11061 {
11062 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
11063
11064 if (rs == NS_FFF)
11065 {
11066 if (is_mla)
11067 do_vfp_nsyn_opcode ("fmacs");
11068 else
11069 do_vfp_nsyn_opcode ("fmscs");
11070 }
11071 else
11072 {
11073 if (is_mla)
11074 do_vfp_nsyn_opcode ("fmacd");
11075 else
11076 do_vfp_nsyn_opcode ("fmscd");
11077 }
11078 }
11079
11080 static void
11081 do_vfp_nsyn_mul (enum neon_shape rs)
11082 {
11083 if (rs == NS_FFF)
11084 do_vfp_nsyn_opcode ("fmuls");
11085 else
11086 do_vfp_nsyn_opcode ("fmuld");
11087 }
11088
11089 static void
11090 do_vfp_nsyn_abs_neg (enum neon_shape rs)
11091 {
11092 int is_neg = (inst.instruction & 0x80) != 0;
11093 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
11094
11095 if (rs == NS_FF)
11096 {
11097 if (is_neg)
11098 do_vfp_nsyn_opcode ("fnegs");
11099 else
11100 do_vfp_nsyn_opcode ("fabss");
11101 }
11102 else
11103 {
11104 if (is_neg)
11105 do_vfp_nsyn_opcode ("fnegd");
11106 else
11107 do_vfp_nsyn_opcode ("fabsd");
11108 }
11109 }
11110
11111 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
11112 insns belong to Neon, and are handled elsewhere. */
11113
11114 static void
11115 do_vfp_nsyn_ldm_stm (int is_dbmode)
11116 {
11117 int is_ldm = (inst.instruction & (1 << 20)) != 0;
11118 if (is_ldm)
11119 {
11120 if (is_dbmode)
11121 do_vfp_nsyn_opcode ("fldmdbs");
11122 else
11123 do_vfp_nsyn_opcode ("fldmias");
11124 }
11125 else
11126 {
11127 if (is_dbmode)
11128 do_vfp_nsyn_opcode ("fstmdbs");
11129 else
11130 do_vfp_nsyn_opcode ("fstmias");
11131 }
11132 }
11133
11134 static void
11135 do_vfp_nsyn_sqrt (void)
11136 {
11137 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11138 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11139
11140 if (rs == NS_FF)
11141 do_vfp_nsyn_opcode ("fsqrts");
11142 else
11143 do_vfp_nsyn_opcode ("fsqrtd");
11144 }
11145
11146 static void
11147 do_vfp_nsyn_div (void)
11148 {
11149 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11150 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
11151 N_F32 | N_F64 | N_KEY | N_VFP);
11152
11153 if (rs == NS_FFF)
11154 do_vfp_nsyn_opcode ("fdivs");
11155 else
11156 do_vfp_nsyn_opcode ("fdivd");
11157 }
11158
11159 static void
11160 do_vfp_nsyn_nmul (void)
11161 {
11162 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11163 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
11164 N_F32 | N_F64 | N_KEY | N_VFP);
11165
11166 if (rs == NS_FFF)
11167 {
11168 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11169 do_vfp_sp_dyadic ();
11170 }
11171 else
11172 {
11173 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11174 do_vfp_dp_rd_rn_rm ();
11175 }
11176 do_vfp_cond_or_thumb ();
11177 }
11178
11179 static void
11180 do_vfp_nsyn_cmp (void)
11181 {
11182 if (inst.operands[1].isreg)
11183 {
11184 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11185 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11186
11187 if (rs == NS_FF)
11188 {
11189 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11190 do_vfp_sp_monadic ();
11191 }
11192 else
11193 {
11194 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11195 do_vfp_dp_rd_rm ();
11196 }
11197 }
11198 else
11199 {
11200 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
11201 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
11202
11203 switch (inst.instruction & 0x0fffffff)
11204 {
11205 case N_MNEM_vcmp:
11206 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
11207 break;
11208 case N_MNEM_vcmpe:
11209 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
11210 break;
11211 default:
11212 abort ();
11213 }
11214
11215 if (rs == NS_FI)
11216 {
11217 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11218 do_vfp_sp_compare_z ();
11219 }
11220 else
11221 {
11222 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11223 do_vfp_dp_rd ();
11224 }
11225 }
11226 do_vfp_cond_or_thumb ();
11227 }
11228
11229 static void
11230 nsyn_insert_sp (void)
11231 {
11232 inst.operands[1] = inst.operands[0];
11233 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
11234 inst.operands[0].reg = 13;
11235 inst.operands[0].isreg = 1;
11236 inst.operands[0].writeback = 1;
11237 inst.operands[0].present = 1;
11238 }
11239
11240 static void
11241 do_vfp_nsyn_push (void)
11242 {
11243 nsyn_insert_sp ();
11244 if (inst.operands[1].issingle)
11245 do_vfp_nsyn_opcode ("fstmdbs");
11246 else
11247 do_vfp_nsyn_opcode ("fstmdbd");
11248 }
11249
11250 static void
11251 do_vfp_nsyn_pop (void)
11252 {
11253 nsyn_insert_sp ();
11254 if (inst.operands[1].issingle)
11255 do_vfp_nsyn_opcode ("fldmdbs");
11256 else
11257 do_vfp_nsyn_opcode ("fldmdbd");
11258 }
11259
11260 /* Fix up Neon data-processing instructions, ORing in the correct bits for
11261 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
11262
11263 static unsigned
11264 neon_dp_fixup (unsigned i)
11265 {
11266 if (thumb_mode)
11267 {
11268 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
11269 if (i & (1 << 24))
11270 i |= 1 << 28;
11271
11272 i &= ~(1 << 24);
11273
11274 i |= 0xef000000;
11275 }
11276 else
11277 i |= 0xf2000000;
11278
11279 return i;
11280 }
11281
11282 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
11283 (0, 1, 2, 3). */
11284
11285 static unsigned
11286 neon_logbits (unsigned x)
11287 {
11288 return ffs (x) - 4;
11289 }
11290
11291 #define LOW4(R) ((R) & 0xf)
11292 #define HI1(R) (((R) >> 4) & 1)
11293
11294 /* Encode insns with bit pattern:
11295
11296 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11297 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
11298
11299 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
11300 different meaning for some instruction. */
11301
11302 static void
11303 neon_three_same (int isquad, int ubit, int size)
11304 {
11305 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11306 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11307 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11308 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11309 inst.instruction |= LOW4 (inst.operands[2].reg);
11310 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11311 inst.instruction |= (isquad != 0) << 6;
11312 inst.instruction |= (ubit != 0) << 24;
11313 if (size != -1)
11314 inst.instruction |= neon_logbits (size) << 20;
11315
11316 inst.instruction = neon_dp_fixup (inst.instruction);
11317 }
11318
11319 /* Encode instructions of the form:
11320
11321 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
11322 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
11323
11324 Don't write size if SIZE == -1. */
11325
11326 static void
11327 neon_two_same (int qbit, int ubit, int size)
11328 {
11329 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11330 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11331 inst.instruction |= LOW4 (inst.operands[1].reg);
11332 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11333 inst.instruction |= (qbit != 0) << 6;
11334 inst.instruction |= (ubit != 0) << 24;
11335
11336 if (size != -1)
11337 inst.instruction |= neon_logbits (size) << 18;
11338
11339 inst.instruction = neon_dp_fixup (inst.instruction);
11340 }
11341
11342 /* Neon instruction encoders, in approximate order of appearance. */
11343
11344 static void
11345 do_neon_dyadic_i_su (void)
11346 {
11347 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11348 struct neon_type_el et = neon_check_type (3, rs,
11349 N_EQK, N_EQK, N_SU_32 | N_KEY);
11350 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11351 }
11352
11353 static void
11354 do_neon_dyadic_i64_su (void)
11355 {
11356 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11357 struct neon_type_el et = neon_check_type (3, rs,
11358 N_EQK, N_EQK, N_SU_ALL | N_KEY);
11359 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11360 }
11361
11362 static void
11363 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
11364 unsigned immbits)
11365 {
11366 unsigned size = et.size >> 3;
11367 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11368 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11369 inst.instruction |= LOW4 (inst.operands[1].reg);
11370 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11371 inst.instruction |= (isquad != 0) << 6;
11372 inst.instruction |= immbits << 16;
11373 inst.instruction |= (size >> 3) << 7;
11374 inst.instruction |= (size & 0x7) << 19;
11375 if (write_ubit)
11376 inst.instruction |= (uval != 0) << 24;
11377
11378 inst.instruction = neon_dp_fixup (inst.instruction);
11379 }
11380
11381 static void
11382 do_neon_shl_imm (void)
11383 {
11384 if (!inst.operands[2].isreg)
11385 {
11386 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11387 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
11388 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11389 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
11390 }
11391 else
11392 {
11393 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11394 struct neon_type_el et = neon_check_type (3, rs,
11395 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
11396 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11397 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11398 }
11399 }
11400
11401 static void
11402 do_neon_qshl_imm (void)
11403 {
11404 if (!inst.operands[2].isreg)
11405 {
11406 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11407 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
11408 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11409 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
11410 inst.operands[2].imm);
11411 }
11412 else
11413 {
11414 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11415 struct neon_type_el et = neon_check_type (3, rs,
11416 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
11417 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11418 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11419 }
11420 }
11421
11422 static int
11423 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
11424 {
11425 /* Handle .I8 pseudo-instructions. */
11426 if (size == 8)
11427 {
11428 /* Unfortunately, this will make everything apart from zero out-of-range.
11429 FIXME is this the intended semantics? There doesn't seem much point in
11430 accepting .I8 if so. */
11431 immediate |= immediate << 8;
11432 size = 16;
11433 }
11434
11435 if (size >= 32)
11436 {
11437 if (immediate == (immediate & 0x000000ff))
11438 {
11439 *immbits = immediate;
11440 return 0x1;
11441 }
11442 else if (immediate == (immediate & 0x0000ff00))
11443 {
11444 *immbits = immediate >> 8;
11445 return 0x3;
11446 }
11447 else if (immediate == (immediate & 0x00ff0000))
11448 {
11449 *immbits = immediate >> 16;
11450 return 0x5;
11451 }
11452 else if (immediate == (immediate & 0xff000000))
11453 {
11454 *immbits = immediate >> 24;
11455 return 0x7;
11456 }
11457 if ((immediate & 0xffff) != (immediate >> 16))
11458 goto bad_immediate;
11459 immediate &= 0xffff;
11460 }
11461
11462 if (immediate == (immediate & 0x000000ff))
11463 {
11464 *immbits = immediate;
11465 return 0x9;
11466 }
11467 else if (immediate == (immediate & 0x0000ff00))
11468 {
11469 *immbits = immediate >> 8;
11470 return 0xb;
11471 }
11472
11473 bad_immediate:
11474 first_error (_("immediate value out of range"));
11475 return FAIL;
11476 }
11477
11478 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
11479 A, B, C, D. */
11480
11481 static int
11482 neon_bits_same_in_bytes (unsigned imm)
11483 {
11484 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
11485 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
11486 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
11487 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
11488 }
11489
11490 /* For immediate of above form, return 0bABCD. */
11491
11492 static unsigned
11493 neon_squash_bits (unsigned imm)
11494 {
11495 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
11496 | ((imm & 0x01000000) >> 21);
11497 }
11498
11499 /* Compress quarter-float representation to 0b...000 abcdefgh. */
11500
11501 static unsigned
11502 neon_qfloat_bits (unsigned imm)
11503 {
11504 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
11505 }
11506
11507 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
11508 the instruction. *OP is passed as the initial value of the op field, and
11509 may be set to a different value depending on the constant (i.e.
11510 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
11511 MVN). If the immediate looks like a repeated parttern then also
11512 try smaller element sizes. */
11513
11514 static int
11515 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, unsigned *immbits,
11516 int *op, int size, enum neon_el_type type)
11517 {
11518 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
11519 {
11520 if (size != 32 || *op == 1)
11521 return FAIL;
11522 *immbits = neon_qfloat_bits (immlo);
11523 return 0xf;
11524 }
11525
11526 if (size == 64)
11527 {
11528 if (neon_bits_same_in_bytes (immhi)
11529 && neon_bits_same_in_bytes (immlo))
11530 {
11531 if (*op == 1)
11532 return FAIL;
11533 *immbits = (neon_squash_bits (immhi) << 4)
11534 | neon_squash_bits (immlo);
11535 *op = 1;
11536 return 0xe;
11537 }
11538
11539 if (immhi != immlo)
11540 return FAIL;
11541 }
11542
11543 if (size >= 32)
11544 {
11545 if (immlo == (immlo & 0x000000ff))
11546 {
11547 *immbits = immlo;
11548 return 0x0;
11549 }
11550 else if (immlo == (immlo & 0x0000ff00))
11551 {
11552 *immbits = immlo >> 8;
11553 return 0x2;
11554 }
11555 else if (immlo == (immlo & 0x00ff0000))
11556 {
11557 *immbits = immlo >> 16;
11558 return 0x4;
11559 }
11560 else if (immlo == (immlo & 0xff000000))
11561 {
11562 *immbits = immlo >> 24;
11563 return 0x6;
11564 }
11565 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
11566 {
11567 *immbits = (immlo >> 8) & 0xff;
11568 return 0xc;
11569 }
11570 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
11571 {
11572 *immbits = (immlo >> 16) & 0xff;
11573 return 0xd;
11574 }
11575
11576 if ((immlo & 0xffff) != (immlo >> 16))
11577 return FAIL;
11578 immlo &= 0xffff;
11579 }
11580
11581 if (size >= 16)
11582 {
11583 if (immlo == (immlo & 0x000000ff))
11584 {
11585 *immbits = immlo;
11586 return 0x8;
11587 }
11588 else if (immlo == (immlo & 0x0000ff00))
11589 {
11590 *immbits = immlo >> 8;
11591 return 0xa;
11592 }
11593
11594 if ((immlo & 0xff) != (immlo >> 8))
11595 return FAIL;
11596 immlo &= 0xff;
11597 }
11598
11599 if (immlo == (immlo & 0x000000ff))
11600 {
11601 /* Don't allow MVN with 8-bit immediate. */
11602 if (*op == 1)
11603 return FAIL;
11604 *immbits = immlo;
11605 return 0xe;
11606 }
11607
11608 return FAIL;
11609 }
11610
11611 /* Write immediate bits [7:0] to the following locations:
11612
11613 |28/24|23 19|18 16|15 4|3 0|
11614 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
11615
11616 This function is used by VMOV/VMVN/VORR/VBIC. */
11617
11618 static void
11619 neon_write_immbits (unsigned immbits)
11620 {
11621 inst.instruction |= immbits & 0xf;
11622 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
11623 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
11624 }
11625
11626 /* Invert low-order SIZE bits of XHI:XLO. */
11627
11628 static void
11629 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
11630 {
11631 unsigned immlo = xlo ? *xlo : 0;
11632 unsigned immhi = xhi ? *xhi : 0;
11633
11634 switch (size)
11635 {
11636 case 8:
11637 immlo = (~immlo) & 0xff;
11638 break;
11639
11640 case 16:
11641 immlo = (~immlo) & 0xffff;
11642 break;
11643
11644 case 64:
11645 immhi = (~immhi) & 0xffffffff;
11646 /* fall through. */
11647
11648 case 32:
11649 immlo = (~immlo) & 0xffffffff;
11650 break;
11651
11652 default:
11653 abort ();
11654 }
11655
11656 if (xlo)
11657 *xlo = immlo;
11658
11659 if (xhi)
11660 *xhi = immhi;
11661 }
11662
11663 static void
11664 do_neon_logic (void)
11665 {
11666 if (inst.operands[2].present && inst.operands[2].isreg)
11667 {
11668 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11669 neon_check_type (3, rs, N_IGNORE_TYPE);
11670 /* U bit and size field were set as part of the bitmask. */
11671 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11672 neon_three_same (neon_quad (rs), 0, -1);
11673 }
11674 else
11675 {
11676 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
11677 struct neon_type_el et = neon_check_type (2, rs,
11678 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
11679 enum neon_opc opcode = inst.instruction & 0x0fffffff;
11680 unsigned immbits;
11681 int cmode;
11682
11683 if (et.type == NT_invtype)
11684 return;
11685
11686 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11687
11688 immbits = inst.operands[1].imm;
11689 if (et.size == 64)
11690 {
11691 /* .i64 is a pseudo-op, so the immediate must be a repeating
11692 pattern. */
11693 if (immbits != (inst.operands[1].regisimm ?
11694 inst.operands[1].reg : 0))
11695 {
11696 /* Set immbits to an invalid constant. */
11697 immbits = 0xdeadbeef;
11698 }
11699 }
11700
11701 switch (opcode)
11702 {
11703 case N_MNEM_vbic:
11704 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11705 break;
11706
11707 case N_MNEM_vorr:
11708 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11709 break;
11710
11711 case N_MNEM_vand:
11712 /* Pseudo-instruction for VBIC. */
11713 neon_invert_size (&immbits, 0, et.size);
11714 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11715 break;
11716
11717 case N_MNEM_vorn:
11718 /* Pseudo-instruction for VORR. */
11719 neon_invert_size (&immbits, 0, et.size);
11720 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11721 break;
11722
11723 default:
11724 abort ();
11725 }
11726
11727 if (cmode == FAIL)
11728 return;
11729
11730 inst.instruction |= neon_quad (rs) << 6;
11731 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11732 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11733 inst.instruction |= cmode << 8;
11734 neon_write_immbits (immbits);
11735
11736 inst.instruction = neon_dp_fixup (inst.instruction);
11737 }
11738 }
11739
11740 static void
11741 do_neon_bitfield (void)
11742 {
11743 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11744 neon_check_type (3, rs, N_IGNORE_TYPE);
11745 neon_three_same (neon_quad (rs), 0, -1);
11746 }
11747
11748 static void
11749 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
11750 unsigned destbits)
11751 {
11752 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11753 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
11754 types | N_KEY);
11755 if (et.type == NT_float)
11756 {
11757 inst.instruction = NEON_ENC_FLOAT (inst.instruction);
11758 neon_three_same (neon_quad (rs), 0, -1);
11759 }
11760 else
11761 {
11762 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11763 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
11764 }
11765 }
11766
11767 static void
11768 do_neon_dyadic_if_su (void)
11769 {
11770 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
11771 }
11772
11773 static void
11774 do_neon_dyadic_if_su_d (void)
11775 {
11776 /* This version only allow D registers, but that constraint is enforced during
11777 operand parsing so we don't need to do anything extra here. */
11778 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
11779 }
11780
11781 static void
11782 do_neon_dyadic_if_i_d (void)
11783 {
11784 /* The "untyped" case can't happen. Do this to stop the "U" bit being
11785 affected if we specify unsigned args. */
11786 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
11787 }
11788
11789 enum vfp_or_neon_is_neon_bits
11790 {
11791 NEON_CHECK_CC = 1,
11792 NEON_CHECK_ARCH = 2
11793 };
11794
11795 /* Call this function if an instruction which may have belonged to the VFP or
11796 Neon instruction sets, but turned out to be a Neon instruction (due to the
11797 operand types involved, etc.). We have to check and/or fix-up a couple of
11798 things:
11799
11800 - Make sure the user hasn't attempted to make a Neon instruction
11801 conditional.
11802 - Alter the value in the condition code field if necessary.
11803 - Make sure that the arch supports Neon instructions.
11804
11805 Which of these operations take place depends on bits from enum
11806 vfp_or_neon_is_neon_bits.
11807
11808 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
11809 current instruction's condition is COND_ALWAYS, the condition field is
11810 changed to inst.uncond_value. This is necessary because instructions shared
11811 between VFP and Neon may be conditional for the VFP variants only, and the
11812 unconditional Neon version must have, e.g., 0xF in the condition field. */
11813
11814 static int
11815 vfp_or_neon_is_neon (unsigned check)
11816 {
11817 /* Conditions are always legal in Thumb mode (IT blocks). */
11818 if (!thumb_mode && (check & NEON_CHECK_CC))
11819 {
11820 if (inst.cond != COND_ALWAYS)
11821 {
11822 first_error (_(BAD_COND));
11823 return FAIL;
11824 }
11825 if (inst.uncond_value != -1)
11826 inst.instruction |= inst.uncond_value << 28;
11827 }
11828
11829 if ((check & NEON_CHECK_ARCH)
11830 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
11831 {
11832 first_error (_(BAD_FPU));
11833 return FAIL;
11834 }
11835
11836 return SUCCESS;
11837 }
11838
11839 static void
11840 do_neon_addsub_if_i (void)
11841 {
11842 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
11843 return;
11844
11845 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
11846 return;
11847
11848 /* The "untyped" case can't happen. Do this to stop the "U" bit being
11849 affected if we specify unsigned args. */
11850 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
11851 }
11852
11853 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
11854 result to be:
11855 V<op> A,B (A is operand 0, B is operand 2)
11856 to mean:
11857 V<op> A,B,A
11858 not:
11859 V<op> A,B,B
11860 so handle that case specially. */
11861
11862 static void
11863 neon_exchange_operands (void)
11864 {
11865 void *scratch = alloca (sizeof (inst.operands[0]));
11866 if (inst.operands[1].present)
11867 {
11868 /* Swap operands[1] and operands[2]. */
11869 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
11870 inst.operands[1] = inst.operands[2];
11871 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
11872 }
11873 else
11874 {
11875 inst.operands[1] = inst.operands[2];
11876 inst.operands[2] = inst.operands[0];
11877 }
11878 }
11879
11880 static void
11881 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
11882 {
11883 if (inst.operands[2].isreg)
11884 {
11885 if (invert)
11886 neon_exchange_operands ();
11887 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
11888 }
11889 else
11890 {
11891 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11892 struct neon_type_el et = neon_check_type (2, rs,
11893 N_EQK | N_SIZ, immtypes | N_KEY);
11894
11895 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11896 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11897 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11898 inst.instruction |= LOW4 (inst.operands[1].reg);
11899 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11900 inst.instruction |= neon_quad (rs) << 6;
11901 inst.instruction |= (et.type == NT_float) << 10;
11902 inst.instruction |= neon_logbits (et.size) << 18;
11903
11904 inst.instruction = neon_dp_fixup (inst.instruction);
11905 }
11906 }
11907
11908 static void
11909 do_neon_cmp (void)
11910 {
11911 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
11912 }
11913
11914 static void
11915 do_neon_cmp_inv (void)
11916 {
11917 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
11918 }
11919
11920 static void
11921 do_neon_ceq (void)
11922 {
11923 neon_compare (N_IF_32, N_IF_32, FALSE);
11924 }
11925
11926 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
11927 scalars, which are encoded in 5 bits, M : Rm.
11928 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
11929 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
11930 index in M. */
11931
11932 static unsigned
11933 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
11934 {
11935 unsigned regno = NEON_SCALAR_REG (scalar);
11936 unsigned elno = NEON_SCALAR_INDEX (scalar);
11937
11938 switch (elsize)
11939 {
11940 case 16:
11941 if (regno > 7 || elno > 3)
11942 goto bad_scalar;
11943 return regno | (elno << 3);
11944
11945 case 32:
11946 if (regno > 15 || elno > 1)
11947 goto bad_scalar;
11948 return regno | (elno << 4);
11949
11950 default:
11951 bad_scalar:
11952 first_error (_("scalar out of range for multiply instruction"));
11953 }
11954
11955 return 0;
11956 }
11957
11958 /* Encode multiply / multiply-accumulate scalar instructions. */
11959
11960 static void
11961 neon_mul_mac (struct neon_type_el et, int ubit)
11962 {
11963 unsigned scalar;
11964
11965 /* Give a more helpful error message if we have an invalid type. */
11966 if (et.type == NT_invtype)
11967 return;
11968
11969 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
11970 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11971 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11972 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11973 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11974 inst.instruction |= LOW4 (scalar);
11975 inst.instruction |= HI1 (scalar) << 5;
11976 inst.instruction |= (et.type == NT_float) << 8;
11977 inst.instruction |= neon_logbits (et.size) << 20;
11978 inst.instruction |= (ubit != 0) << 24;
11979
11980 inst.instruction = neon_dp_fixup (inst.instruction);
11981 }
11982
11983 static void
11984 do_neon_mac_maybe_scalar (void)
11985 {
11986 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
11987 return;
11988
11989 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
11990 return;
11991
11992 if (inst.operands[2].isscalar)
11993 {
11994 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
11995 struct neon_type_el et = neon_check_type (3, rs,
11996 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
11997 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
11998 neon_mul_mac (et, neon_quad (rs));
11999 }
12000 else
12001 {
12002 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12003 affected if we specify unsigned args. */
12004 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
12005 }
12006 }
12007
12008 static void
12009 do_neon_tst (void)
12010 {
12011 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12012 struct neon_type_el et = neon_check_type (3, rs,
12013 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
12014 neon_three_same (neon_quad (rs), 0, et.size);
12015 }
12016
12017 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
12018 same types as the MAC equivalents. The polynomial type for this instruction
12019 is encoded the same as the integer type. */
12020
12021 static void
12022 do_neon_mul (void)
12023 {
12024 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
12025 return;
12026
12027 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12028 return;
12029
12030 if (inst.operands[2].isscalar)
12031 do_neon_mac_maybe_scalar ();
12032 else
12033 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
12034 }
12035
12036 static void
12037 do_neon_qdmulh (void)
12038 {
12039 if (inst.operands[2].isscalar)
12040 {
12041 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
12042 struct neon_type_el et = neon_check_type (3, rs,
12043 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
12044 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12045 neon_mul_mac (et, neon_quad (rs));
12046 }
12047 else
12048 {
12049 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12050 struct neon_type_el et = neon_check_type (3, rs,
12051 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
12052 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12053 /* The U bit (rounding) comes from bit mask. */
12054 neon_three_same (neon_quad (rs), 0, et.size);
12055 }
12056 }
12057
12058 static void
12059 do_neon_fcmp_absolute (void)
12060 {
12061 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12062 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
12063 /* Size field comes from bit mask. */
12064 neon_three_same (neon_quad (rs), 1, -1);
12065 }
12066
12067 static void
12068 do_neon_fcmp_absolute_inv (void)
12069 {
12070 neon_exchange_operands ();
12071 do_neon_fcmp_absolute ();
12072 }
12073
12074 static void
12075 do_neon_step (void)
12076 {
12077 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12078 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
12079 neon_three_same (neon_quad (rs), 0, -1);
12080 }
12081
12082 static void
12083 do_neon_abs_neg (void)
12084 {
12085 enum neon_shape rs;
12086 struct neon_type_el et;
12087
12088 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
12089 return;
12090
12091 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12092 return;
12093
12094 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12095 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
12096
12097 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12098 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12099 inst.instruction |= LOW4 (inst.operands[1].reg);
12100 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12101 inst.instruction |= neon_quad (rs) << 6;
12102 inst.instruction |= (et.type == NT_float) << 10;
12103 inst.instruction |= neon_logbits (et.size) << 18;
12104
12105 inst.instruction = neon_dp_fixup (inst.instruction);
12106 }
12107
12108 static void
12109 do_neon_sli (void)
12110 {
12111 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12112 struct neon_type_el et = neon_check_type (2, rs,
12113 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12114 int imm = inst.operands[2].imm;
12115 constraint (imm < 0 || (unsigned)imm >= et.size,
12116 _("immediate out of range for insert"));
12117 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
12118 }
12119
12120 static void
12121 do_neon_sri (void)
12122 {
12123 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12124 struct neon_type_el et = neon_check_type (2, rs,
12125 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12126 int imm = inst.operands[2].imm;
12127 constraint (imm < 1 || (unsigned)imm > et.size,
12128 _("immediate out of range for insert"));
12129 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
12130 }
12131
12132 static void
12133 do_neon_qshlu_imm (void)
12134 {
12135 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12136 struct neon_type_el et = neon_check_type (2, rs,
12137 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
12138 int imm = inst.operands[2].imm;
12139 constraint (imm < 0 || (unsigned)imm >= et.size,
12140 _("immediate out of range for shift"));
12141 /* Only encodes the 'U present' variant of the instruction.
12142 In this case, signed types have OP (bit 8) set to 0.
12143 Unsigned types have OP set to 1. */
12144 inst.instruction |= (et.type == NT_unsigned) << 8;
12145 /* The rest of the bits are the same as other immediate shifts. */
12146 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
12147 }
12148
12149 static void
12150 do_neon_qmovn (void)
12151 {
12152 struct neon_type_el et = neon_check_type (2, NS_DQ,
12153 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
12154 /* Saturating move where operands can be signed or unsigned, and the
12155 destination has the same signedness. */
12156 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12157 if (et.type == NT_unsigned)
12158 inst.instruction |= 0xc0;
12159 else
12160 inst.instruction |= 0x80;
12161 neon_two_same (0, 1, et.size / 2);
12162 }
12163
12164 static void
12165 do_neon_qmovun (void)
12166 {
12167 struct neon_type_el et = neon_check_type (2, NS_DQ,
12168 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
12169 /* Saturating move with unsigned results. Operands must be signed. */
12170 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12171 neon_two_same (0, 1, et.size / 2);
12172 }
12173
12174 static void
12175 do_neon_rshift_sat_narrow (void)
12176 {
12177 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12178 or unsigned. If operands are unsigned, results must also be unsigned. */
12179 struct neon_type_el et = neon_check_type (2, NS_DQI,
12180 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
12181 int imm = inst.operands[2].imm;
12182 /* This gets the bounds check, size encoding and immediate bits calculation
12183 right. */
12184 et.size /= 2;
12185
12186 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
12187 VQMOVN.I<size> <Dd>, <Qm>. */
12188 if (imm == 0)
12189 {
12190 inst.operands[2].present = 0;
12191 inst.instruction = N_MNEM_vqmovn;
12192 do_neon_qmovn ();
12193 return;
12194 }
12195
12196 constraint (imm < 1 || (unsigned)imm > et.size,
12197 _("immediate out of range"));
12198 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
12199 }
12200
12201 static void
12202 do_neon_rshift_sat_narrow_u (void)
12203 {
12204 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12205 or unsigned. If operands are unsigned, results must also be unsigned. */
12206 struct neon_type_el et = neon_check_type (2, NS_DQI,
12207 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
12208 int imm = inst.operands[2].imm;
12209 /* This gets the bounds check, size encoding and immediate bits calculation
12210 right. */
12211 et.size /= 2;
12212
12213 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
12214 VQMOVUN.I<size> <Dd>, <Qm>. */
12215 if (imm == 0)
12216 {
12217 inst.operands[2].present = 0;
12218 inst.instruction = N_MNEM_vqmovun;
12219 do_neon_qmovun ();
12220 return;
12221 }
12222
12223 constraint (imm < 1 || (unsigned)imm > et.size,
12224 _("immediate out of range"));
12225 /* FIXME: The manual is kind of unclear about what value U should have in
12226 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
12227 must be 1. */
12228 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
12229 }
12230
12231 static void
12232 do_neon_movn (void)
12233 {
12234 struct neon_type_el et = neon_check_type (2, NS_DQ,
12235 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
12236 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12237 neon_two_same (0, 1, et.size / 2);
12238 }
12239
12240 static void
12241 do_neon_rshift_narrow (void)
12242 {
12243 struct neon_type_el et = neon_check_type (2, NS_DQI,
12244 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
12245 int imm = inst.operands[2].imm;
12246 /* This gets the bounds check, size encoding and immediate bits calculation
12247 right. */
12248 et.size /= 2;
12249
12250 /* If immediate is zero then we are a pseudo-instruction for
12251 VMOVN.I<size> <Dd>, <Qm> */
12252 if (imm == 0)
12253 {
12254 inst.operands[2].present = 0;
12255 inst.instruction = N_MNEM_vmovn;
12256 do_neon_movn ();
12257 return;
12258 }
12259
12260 constraint (imm < 1 || (unsigned)imm > et.size,
12261 _("immediate out of range for narrowing operation"));
12262 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
12263 }
12264
12265 static void
12266 do_neon_shll (void)
12267 {
12268 /* FIXME: Type checking when lengthening. */
12269 struct neon_type_el et = neon_check_type (2, NS_QDI,
12270 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
12271 unsigned imm = inst.operands[2].imm;
12272
12273 if (imm == et.size)
12274 {
12275 /* Maximum shift variant. */
12276 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12277 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12278 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12279 inst.instruction |= LOW4 (inst.operands[1].reg);
12280 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12281 inst.instruction |= neon_logbits (et.size) << 18;
12282
12283 inst.instruction = neon_dp_fixup (inst.instruction);
12284 }
12285 else
12286 {
12287 /* A more-specific type check for non-max versions. */
12288 et = neon_check_type (2, NS_QDI,
12289 N_EQK | N_DBL, N_SU_32 | N_KEY);
12290 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12291 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
12292 }
12293 }
12294
12295 /* Check the various types for the VCVT instruction, and return which version
12296 the current instruction is. */
12297
12298 static int
12299 neon_cvt_flavour (enum neon_shape rs)
12300 {
12301 #define CVT_VAR(C,X,Y) \
12302 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
12303 if (et.type != NT_invtype) \
12304 { \
12305 inst.error = NULL; \
12306 return (C); \
12307 }
12308 struct neon_type_el et;
12309 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
12310 || rs == NS_FF) ? N_VFP : 0;
12311 /* The instruction versions which take an immediate take one register
12312 argument, which is extended to the width of the full register. Thus the
12313 "source" and "destination" registers must have the same width. Hack that
12314 here by making the size equal to the key (wider, in this case) operand. */
12315 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
12316
12317 CVT_VAR (0, N_S32, N_F32);
12318 CVT_VAR (1, N_U32, N_F32);
12319 CVT_VAR (2, N_F32, N_S32);
12320 CVT_VAR (3, N_F32, N_U32);
12321
12322 whole_reg = N_VFP;
12323
12324 /* VFP instructions. */
12325 CVT_VAR (4, N_F32, N_F64);
12326 CVT_VAR (5, N_F64, N_F32);
12327 CVT_VAR (6, N_S32, N_F64 | key);
12328 CVT_VAR (7, N_U32, N_F64 | key);
12329 CVT_VAR (8, N_F64 | key, N_S32);
12330 CVT_VAR (9, N_F64 | key, N_U32);
12331 /* VFP instructions with bitshift. */
12332 CVT_VAR (10, N_F32 | key, N_S16);
12333 CVT_VAR (11, N_F32 | key, N_U16);
12334 CVT_VAR (12, N_F64 | key, N_S16);
12335 CVT_VAR (13, N_F64 | key, N_U16);
12336 CVT_VAR (14, N_S16, N_F32 | key);
12337 CVT_VAR (15, N_U16, N_F32 | key);
12338 CVT_VAR (16, N_S16, N_F64 | key);
12339 CVT_VAR (17, N_U16, N_F64 | key);
12340
12341 return -1;
12342 #undef CVT_VAR
12343 }
12344
12345 /* Neon-syntax VFP conversions. */
12346
12347 static void
12348 do_vfp_nsyn_cvt (enum neon_shape rs, int flavour)
12349 {
12350 const char *opname = 0;
12351
12352 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
12353 {
12354 /* Conversions with immediate bitshift. */
12355 const char *enc[] =
12356 {
12357 "ftosls",
12358 "ftouls",
12359 "fsltos",
12360 "fultos",
12361 NULL,
12362 NULL,
12363 "ftosld",
12364 "ftould",
12365 "fsltod",
12366 "fultod",
12367 "fshtos",
12368 "fuhtos",
12369 "fshtod",
12370 "fuhtod",
12371 "ftoshs",
12372 "ftouhs",
12373 "ftoshd",
12374 "ftouhd"
12375 };
12376
12377 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
12378 {
12379 opname = enc[flavour];
12380 constraint (inst.operands[0].reg != inst.operands[1].reg,
12381 _("operands 0 and 1 must be the same register"));
12382 inst.operands[1] = inst.operands[2];
12383 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
12384 }
12385 }
12386 else
12387 {
12388 /* Conversions without bitshift. */
12389 const char *enc[] =
12390 {
12391 "ftosis",
12392 "ftouis",
12393 "fsitos",
12394 "fuitos",
12395 "fcvtsd",
12396 "fcvtds",
12397 "ftosid",
12398 "ftouid",
12399 "fsitod",
12400 "fuitod"
12401 };
12402
12403 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
12404 opname = enc[flavour];
12405 }
12406
12407 if (opname)
12408 do_vfp_nsyn_opcode (opname);
12409 }
12410
12411 static void
12412 do_vfp_nsyn_cvtz (void)
12413 {
12414 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
12415 int flavour = neon_cvt_flavour (rs);
12416 const char *enc[] =
12417 {
12418 "ftosizs",
12419 "ftouizs",
12420 NULL,
12421 NULL,
12422 NULL,
12423 NULL,
12424 "ftosizd",
12425 "ftouizd"
12426 };
12427
12428 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
12429 do_vfp_nsyn_opcode (enc[flavour]);
12430 }
12431
12432 static void
12433 do_neon_cvt (void)
12434 {
12435 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
12436 NS_FD, NS_DF, NS_FF, NS_NULL);
12437 int flavour = neon_cvt_flavour (rs);
12438
12439 /* VFP rather than Neon conversions. */
12440 if (flavour >= 4)
12441 {
12442 do_vfp_nsyn_cvt (rs, flavour);
12443 return;
12444 }
12445
12446 switch (rs)
12447 {
12448 case NS_DDI:
12449 case NS_QQI:
12450 {
12451 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12452 return;
12453
12454 /* Fixed-point conversion with #0 immediate is encoded as an
12455 integer conversion. */
12456 if (inst.operands[2].present && inst.operands[2].imm == 0)
12457 goto int_encode;
12458 unsigned immbits = 32 - inst.operands[2].imm;
12459 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
12460 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12461 if (flavour != -1)
12462 inst.instruction |= enctab[flavour];
12463 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12464 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12465 inst.instruction |= LOW4 (inst.operands[1].reg);
12466 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12467 inst.instruction |= neon_quad (rs) << 6;
12468 inst.instruction |= 1 << 21;
12469 inst.instruction |= immbits << 16;
12470
12471 inst.instruction = neon_dp_fixup (inst.instruction);
12472 }
12473 break;
12474
12475 case NS_DD:
12476 case NS_QQ:
12477 int_encode:
12478 {
12479 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
12480
12481 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12482
12483 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12484 return;
12485
12486 if (flavour != -1)
12487 inst.instruction |= enctab[flavour];
12488
12489 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12490 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12491 inst.instruction |= LOW4 (inst.operands[1].reg);
12492 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12493 inst.instruction |= neon_quad (rs) << 6;
12494 inst.instruction |= 2 << 18;
12495
12496 inst.instruction = neon_dp_fixup (inst.instruction);
12497 }
12498 break;
12499
12500 default:
12501 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
12502 do_vfp_nsyn_cvt (rs, flavour);
12503 }
12504 }
12505
12506 static void
12507 neon_move_immediate (void)
12508 {
12509 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
12510 struct neon_type_el et = neon_check_type (2, rs,
12511 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
12512 unsigned immlo, immhi = 0, immbits;
12513 int op, cmode;
12514
12515 constraint (et.type == NT_invtype,
12516 _("operand size must be specified for immediate VMOV"));
12517
12518 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
12519 op = (inst.instruction & (1 << 5)) != 0;
12520
12521 immlo = inst.operands[1].imm;
12522 if (inst.operands[1].regisimm)
12523 immhi = inst.operands[1].reg;
12524
12525 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
12526 _("immediate has bits set outside the operand size"));
12527
12528 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
12529 et.size, et.type)) == FAIL)
12530 {
12531 /* Invert relevant bits only. */
12532 neon_invert_size (&immlo, &immhi, et.size);
12533 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
12534 with one or the other; those cases are caught by
12535 neon_cmode_for_move_imm. */
12536 op = !op;
12537 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
12538 et.size, et.type)) == FAIL)
12539 {
12540 first_error (_("immediate out of range"));
12541 return;
12542 }
12543 }
12544
12545 inst.instruction &= ~(1 << 5);
12546 inst.instruction |= op << 5;
12547
12548 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12549 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12550 inst.instruction |= neon_quad (rs) << 6;
12551 inst.instruction |= cmode << 8;
12552
12553 neon_write_immbits (immbits);
12554 }
12555
12556 static void
12557 do_neon_mvn (void)
12558 {
12559 if (inst.operands[1].isreg)
12560 {
12561 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12562
12563 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12564 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12565 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12566 inst.instruction |= LOW4 (inst.operands[1].reg);
12567 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12568 inst.instruction |= neon_quad (rs) << 6;
12569 }
12570 else
12571 {
12572 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12573 neon_move_immediate ();
12574 }
12575
12576 inst.instruction = neon_dp_fixup (inst.instruction);
12577 }
12578
12579 /* Encode instructions of form:
12580
12581 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
12582 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm |
12583
12584 */
12585
12586 static void
12587 neon_mixed_length (struct neon_type_el et, unsigned size)
12588 {
12589 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12590 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12591 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12592 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12593 inst.instruction |= LOW4 (inst.operands[2].reg);
12594 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12595 inst.instruction |= (et.type == NT_unsigned) << 24;
12596 inst.instruction |= neon_logbits (size) << 20;
12597
12598 inst.instruction = neon_dp_fixup (inst.instruction);
12599 }
12600
12601 static void
12602 do_neon_dyadic_long (void)
12603 {
12604 /* FIXME: Type checking for lengthening op. */
12605 struct neon_type_el et = neon_check_type (3, NS_QDD,
12606 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
12607 neon_mixed_length (et, et.size);
12608 }
12609
12610 static void
12611 do_neon_abal (void)
12612 {
12613 struct neon_type_el et = neon_check_type (3, NS_QDD,
12614 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
12615 neon_mixed_length (et, et.size);
12616 }
12617
12618 static void
12619 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
12620 {
12621 if (inst.operands[2].isscalar)
12622 {
12623 struct neon_type_el et = neon_check_type (3, NS_QDS,
12624 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
12625 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12626 neon_mul_mac (et, et.type == NT_unsigned);
12627 }
12628 else
12629 {
12630 struct neon_type_el et = neon_check_type (3, NS_QDD,
12631 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
12632 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12633 neon_mixed_length (et, et.size);
12634 }
12635 }
12636
12637 static void
12638 do_neon_mac_maybe_scalar_long (void)
12639 {
12640 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
12641 }
12642
12643 static void
12644 do_neon_dyadic_wide (void)
12645 {
12646 struct neon_type_el et = neon_check_type (3, NS_QQD,
12647 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
12648 neon_mixed_length (et, et.size);
12649 }
12650
12651 static void
12652 do_neon_dyadic_narrow (void)
12653 {
12654 struct neon_type_el et = neon_check_type (3, NS_QDD,
12655 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
12656 /* Operand sign is unimportant, and the U bit is part of the opcode,
12657 so force the operand type to integer. */
12658 et.type = NT_integer;
12659 neon_mixed_length (et, et.size / 2);
12660 }
12661
12662 static void
12663 do_neon_mul_sat_scalar_long (void)
12664 {
12665 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
12666 }
12667
12668 static void
12669 do_neon_vmull (void)
12670 {
12671 if (inst.operands[2].isscalar)
12672 do_neon_mac_maybe_scalar_long ();
12673 else
12674 {
12675 struct neon_type_el et = neon_check_type (3, NS_QDD,
12676 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
12677 if (et.type == NT_poly)
12678 inst.instruction = NEON_ENC_POLY (inst.instruction);
12679 else
12680 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12681 /* For polynomial encoding, size field must be 0b00 and the U bit must be
12682 zero. Should be OK as-is. */
12683 neon_mixed_length (et, et.size);
12684 }
12685 }
12686
12687 static void
12688 do_neon_ext (void)
12689 {
12690 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
12691 struct neon_type_el et = neon_check_type (3, rs,
12692 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12693 unsigned imm = (inst.operands[3].imm * et.size) / 8;
12694 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12695 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12696 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12697 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12698 inst.instruction |= LOW4 (inst.operands[2].reg);
12699 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12700 inst.instruction |= neon_quad (rs) << 6;
12701 inst.instruction |= imm << 8;
12702
12703 inst.instruction = neon_dp_fixup (inst.instruction);
12704 }
12705
12706 static void
12707 do_neon_rev (void)
12708 {
12709 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12710 struct neon_type_el et = neon_check_type (2, rs,
12711 N_EQK, N_8 | N_16 | N_32 | N_KEY);
12712 unsigned op = (inst.instruction >> 7) & 3;
12713 /* N (width of reversed regions) is encoded as part of the bitmask. We
12714 extract it here to check the elements to be reversed are smaller.
12715 Otherwise we'd get a reserved instruction. */
12716 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
12717 assert (elsize != 0);
12718 constraint (et.size >= elsize,
12719 _("elements must be smaller than reversal region"));
12720 neon_two_same (neon_quad (rs), 1, et.size);
12721 }
12722
12723 static void
12724 do_neon_dup (void)
12725 {
12726 if (inst.operands[1].isscalar)
12727 {
12728 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
12729 struct neon_type_el et = neon_check_type (2, rs,
12730 N_EQK, N_8 | N_16 | N_32 | N_KEY);
12731 unsigned sizebits = et.size >> 3;
12732 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
12733 int logsize = neon_logbits (et.size);
12734 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
12735
12736 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
12737 return;
12738
12739 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12740 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12741 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12742 inst.instruction |= LOW4 (dm);
12743 inst.instruction |= HI1 (dm) << 5;
12744 inst.instruction |= neon_quad (rs) << 6;
12745 inst.instruction |= x << 17;
12746 inst.instruction |= sizebits << 16;
12747
12748 inst.instruction = neon_dp_fixup (inst.instruction);
12749 }
12750 else
12751 {
12752 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
12753 struct neon_type_el et = neon_check_type (2, rs,
12754 N_8 | N_16 | N_32 | N_KEY, N_EQK);
12755 /* Duplicate ARM register to lanes of vector. */
12756 inst.instruction = NEON_ENC_ARMREG (inst.instruction);
12757 switch (et.size)
12758 {
12759 case 8: inst.instruction |= 0x400000; break;
12760 case 16: inst.instruction |= 0x000020; break;
12761 case 32: inst.instruction |= 0x000000; break;
12762 default: break;
12763 }
12764 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
12765 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
12766 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
12767 inst.instruction |= neon_quad (rs) << 21;
12768 /* The encoding for this instruction is identical for the ARM and Thumb
12769 variants, except for the condition field. */
12770 do_vfp_cond_or_thumb ();
12771 }
12772 }
12773
12774 /* VMOV has particularly many variations. It can be one of:
12775 0. VMOV<c><q> <Qd>, <Qm>
12776 1. VMOV<c><q> <Dd>, <Dm>
12777 (Register operations, which are VORR with Rm = Rn.)
12778 2. VMOV<c><q>.<dt> <Qd>, #<imm>
12779 3. VMOV<c><q>.<dt> <Dd>, #<imm>
12780 (Immediate loads.)
12781 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
12782 (ARM register to scalar.)
12783 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
12784 (Two ARM registers to vector.)
12785 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
12786 (Scalar to ARM register.)
12787 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
12788 (Vector to two ARM registers.)
12789 8. VMOV.F32 <Sd>, <Sm>
12790 9. VMOV.F64 <Dd>, <Dm>
12791 (VFP register moves.)
12792 10. VMOV.F32 <Sd>, #imm
12793 11. VMOV.F64 <Dd>, #imm
12794 (VFP float immediate load.)
12795 12. VMOV <Rd>, <Sm>
12796 (VFP single to ARM reg.)
12797 13. VMOV <Sd>, <Rm>
12798 (ARM reg to VFP single.)
12799 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
12800 (Two ARM regs to two VFP singles.)
12801 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
12802 (Two VFP singles to two ARM regs.)
12803
12804 These cases can be disambiguated using neon_select_shape, except cases 1/9
12805 and 3/11 which depend on the operand type too.
12806
12807 All the encoded bits are hardcoded by this function.
12808
12809 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
12810 Cases 5, 7 may be used with VFPv2 and above.
12811
12812 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
12813 can specify a type where it doesn't make sense to, and is ignored).
12814 */
12815
12816 static void
12817 do_neon_mov (void)
12818 {
12819 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
12820 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
12821 NS_NULL);
12822 struct neon_type_el et;
12823 const char *ldconst = 0;
12824
12825 switch (rs)
12826 {
12827 case NS_DD: /* case 1/9. */
12828 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
12829 /* It is not an error here if no type is given. */
12830 inst.error = NULL;
12831 if (et.type == NT_float && et.size == 64)
12832 {
12833 do_vfp_nsyn_opcode ("fcpyd");
12834 break;
12835 }
12836 /* fall through. */
12837
12838 case NS_QQ: /* case 0/1. */
12839 {
12840 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12841 return;
12842 /* The architecture manual I have doesn't explicitly state which
12843 value the U bit should have for register->register moves, but
12844 the equivalent VORR instruction has U = 0, so do that. */
12845 inst.instruction = 0x0200110;
12846 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12847 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12848 inst.instruction |= LOW4 (inst.operands[1].reg);
12849 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12850 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12851 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12852 inst.instruction |= neon_quad (rs) << 6;
12853
12854 inst.instruction = neon_dp_fixup (inst.instruction);
12855 }
12856 break;
12857
12858 case NS_DI: /* case 3/11. */
12859 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
12860 inst.error = NULL;
12861 if (et.type == NT_float && et.size == 64)
12862 {
12863 /* case 11 (fconstd). */
12864 ldconst = "fconstd";
12865 goto encode_fconstd;
12866 }
12867 /* fall through. */
12868
12869 case NS_QI: /* case 2/3. */
12870 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12871 return;
12872 inst.instruction = 0x0800010;
12873 neon_move_immediate ();
12874 inst.instruction = neon_dp_fixup (inst.instruction);
12875 break;
12876
12877 case NS_SR: /* case 4. */
12878 {
12879 unsigned bcdebits = 0;
12880 struct neon_type_el et = neon_check_type (2, NS_NULL,
12881 N_8 | N_16 | N_32 | N_KEY, N_EQK);
12882 int logsize = neon_logbits (et.size);
12883 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
12884 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
12885
12886 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
12887 _(BAD_FPU));
12888 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
12889 && et.size != 32, _(BAD_FPU));
12890 constraint (et.type == NT_invtype, _("bad type for scalar"));
12891 constraint (x >= 64 / et.size, _("scalar index out of range"));
12892
12893 switch (et.size)
12894 {
12895 case 8: bcdebits = 0x8; break;
12896 case 16: bcdebits = 0x1; break;
12897 case 32: bcdebits = 0x0; break;
12898 default: ;
12899 }
12900
12901 bcdebits |= x << logsize;
12902
12903 inst.instruction = 0xe000b10;
12904 do_vfp_cond_or_thumb ();
12905 inst.instruction |= LOW4 (dn) << 16;
12906 inst.instruction |= HI1 (dn) << 7;
12907 inst.instruction |= inst.operands[1].reg << 12;
12908 inst.instruction |= (bcdebits & 3) << 5;
12909 inst.instruction |= (bcdebits >> 2) << 21;
12910 }
12911 break;
12912
12913 case NS_DRR: /* case 5 (fmdrr). */
12914 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
12915 _(BAD_FPU));
12916
12917 inst.instruction = 0xc400b10;
12918 do_vfp_cond_or_thumb ();
12919 inst.instruction |= LOW4 (inst.operands[0].reg);
12920 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
12921 inst.instruction |= inst.operands[1].reg << 12;
12922 inst.instruction |= inst.operands[2].reg << 16;
12923 break;
12924
12925 case NS_RS: /* case 6. */
12926 {
12927 struct neon_type_el et = neon_check_type (2, NS_NULL,
12928 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
12929 unsigned logsize = neon_logbits (et.size);
12930 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
12931 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
12932 unsigned abcdebits = 0;
12933
12934 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
12935 _(BAD_FPU));
12936 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
12937 && et.size != 32, _(BAD_FPU));
12938 constraint (et.type == NT_invtype, _("bad type for scalar"));
12939 constraint (x >= 64 / et.size, _("scalar index out of range"));
12940
12941 switch (et.size)
12942 {
12943 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
12944 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
12945 case 32: abcdebits = 0x00; break;
12946 default: ;
12947 }
12948
12949 abcdebits |= x << logsize;
12950 inst.instruction = 0xe100b10;
12951 do_vfp_cond_or_thumb ();
12952 inst.instruction |= LOW4 (dn) << 16;
12953 inst.instruction |= HI1 (dn) << 7;
12954 inst.instruction |= inst.operands[0].reg << 12;
12955 inst.instruction |= (abcdebits & 3) << 5;
12956 inst.instruction |= (abcdebits >> 2) << 21;
12957 }
12958 break;
12959
12960 case NS_RRD: /* case 7 (fmrrd). */
12961 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
12962 _(BAD_FPU));
12963
12964 inst.instruction = 0xc500b10;
12965 do_vfp_cond_or_thumb ();
12966 inst.instruction |= inst.operands[0].reg << 12;
12967 inst.instruction |= inst.operands[1].reg << 16;
12968 inst.instruction |= LOW4 (inst.operands[2].reg);
12969 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12970 break;
12971
12972 case NS_FF: /* case 8 (fcpys). */
12973 do_vfp_nsyn_opcode ("fcpys");
12974 break;
12975
12976 case NS_FI: /* case 10 (fconsts). */
12977 ldconst = "fconsts";
12978 encode_fconstd:
12979 if (is_quarter_float (inst.operands[1].imm))
12980 {
12981 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
12982 do_vfp_nsyn_opcode (ldconst);
12983 }
12984 else
12985 first_error (_("immediate out of range"));
12986 break;
12987
12988 case NS_RF: /* case 12 (fmrs). */
12989 do_vfp_nsyn_opcode ("fmrs");
12990 break;
12991
12992 case NS_FR: /* case 13 (fmsr). */
12993 do_vfp_nsyn_opcode ("fmsr");
12994 break;
12995
12996 /* The encoders for the fmrrs and fmsrr instructions expect three operands
12997 (one of which is a list), but we have parsed four. Do some fiddling to
12998 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
12999 expect. */
13000 case NS_RRFF: /* case 14 (fmrrs). */
13001 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
13002 _("VFP registers must be adjacent"));
13003 inst.operands[2].imm = 2;
13004 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
13005 do_vfp_nsyn_opcode ("fmrrs");
13006 break;
13007
13008 case NS_FFRR: /* case 15 (fmsrr). */
13009 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
13010 _("VFP registers must be adjacent"));
13011 inst.operands[1] = inst.operands[2];
13012 inst.operands[2] = inst.operands[3];
13013 inst.operands[0].imm = 2;
13014 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
13015 do_vfp_nsyn_opcode ("fmsrr");
13016 break;
13017
13018 default:
13019 abort ();
13020 }
13021 }
13022
13023 static void
13024 do_neon_rshift_round_imm (void)
13025 {
13026 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13027 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
13028 int imm = inst.operands[2].imm;
13029
13030 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
13031 if (imm == 0)
13032 {
13033 inst.operands[2].present = 0;
13034 do_neon_mov ();
13035 return;
13036 }
13037
13038 constraint (imm < 1 || (unsigned)imm > et.size,
13039 _("immediate out of range for shift"));
13040 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
13041 et.size - imm);
13042 }
13043
13044 static void
13045 do_neon_movl (void)
13046 {
13047 struct neon_type_el et = neon_check_type (2, NS_QD,
13048 N_EQK | N_DBL, N_SU_32 | N_KEY);
13049 unsigned sizebits = et.size >> 3;
13050 inst.instruction |= sizebits << 19;
13051 neon_two_same (0, et.type == NT_unsigned, -1);
13052 }
13053
13054 static void
13055 do_neon_trn (void)
13056 {
13057 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13058 struct neon_type_el et = neon_check_type (2, rs,
13059 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13060 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13061 neon_two_same (neon_quad (rs), 1, et.size);
13062 }
13063
13064 static void
13065 do_neon_zip_uzp (void)
13066 {
13067 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13068 struct neon_type_el et = neon_check_type (2, rs,
13069 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13070 if (rs == NS_DD && et.size == 32)
13071 {
13072 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
13073 inst.instruction = N_MNEM_vtrn;
13074 do_neon_trn ();
13075 return;
13076 }
13077 neon_two_same (neon_quad (rs), 1, et.size);
13078 }
13079
13080 static void
13081 do_neon_sat_abs_neg (void)
13082 {
13083 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13084 struct neon_type_el et = neon_check_type (2, rs,
13085 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
13086 neon_two_same (neon_quad (rs), 1, et.size);
13087 }
13088
13089 static void
13090 do_neon_pair_long (void)
13091 {
13092 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13093 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
13094 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
13095 inst.instruction |= (et.type == NT_unsigned) << 7;
13096 neon_two_same (neon_quad (rs), 1, et.size);
13097 }
13098
13099 static void
13100 do_neon_recip_est (void)
13101 {
13102 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13103 struct neon_type_el et = neon_check_type (2, rs,
13104 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
13105 inst.instruction |= (et.type == NT_float) << 8;
13106 neon_two_same (neon_quad (rs), 1, et.size);
13107 }
13108
13109 static void
13110 do_neon_cls (void)
13111 {
13112 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13113 struct neon_type_el et = neon_check_type (2, rs,
13114 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
13115 neon_two_same (neon_quad (rs), 1, et.size);
13116 }
13117
13118 static void
13119 do_neon_clz (void)
13120 {
13121 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13122 struct neon_type_el et = neon_check_type (2, rs,
13123 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
13124 neon_two_same (neon_quad (rs), 1, et.size);
13125 }
13126
13127 static void
13128 do_neon_cnt (void)
13129 {
13130 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13131 struct neon_type_el et = neon_check_type (2, rs,
13132 N_EQK | N_INT, N_8 | N_KEY);
13133 neon_two_same (neon_quad (rs), 1, et.size);
13134 }
13135
13136 static void
13137 do_neon_swp (void)
13138 {
13139 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13140 neon_two_same (neon_quad (rs), 1, -1);
13141 }
13142
13143 static void
13144 do_neon_tbl_tbx (void)
13145 {
13146 unsigned listlenbits;
13147 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
13148
13149 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
13150 {
13151 first_error (_("bad list length for table lookup"));
13152 return;
13153 }
13154
13155 listlenbits = inst.operands[1].imm - 1;
13156 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13157 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13158 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13159 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13160 inst.instruction |= LOW4 (inst.operands[2].reg);
13161 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13162 inst.instruction |= listlenbits << 8;
13163
13164 inst.instruction = neon_dp_fixup (inst.instruction);
13165 }
13166
13167 static void
13168 do_neon_ldm_stm (void)
13169 {
13170 /* P, U and L bits are part of bitmask. */
13171 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
13172 unsigned offsetbits = inst.operands[1].imm * 2;
13173
13174 if (inst.operands[1].issingle)
13175 {
13176 do_vfp_nsyn_ldm_stm (is_dbmode);
13177 return;
13178 }
13179
13180 constraint (is_dbmode && !inst.operands[0].writeback,
13181 _("writeback (!) must be used for VLDMDB and VSTMDB"));
13182
13183 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
13184 _("register list must contain at least 1 and at most 16 "
13185 "registers"));
13186
13187 inst.instruction |= inst.operands[0].reg << 16;
13188 inst.instruction |= inst.operands[0].writeback << 21;
13189 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
13190 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
13191
13192 inst.instruction |= offsetbits;
13193
13194 do_vfp_cond_or_thumb ();
13195 }
13196
13197 static void
13198 do_neon_ldr_str (void)
13199 {
13200 int is_ldr = (inst.instruction & (1 << 20)) != 0;
13201
13202 if (inst.operands[0].issingle)
13203 {
13204 if (is_ldr)
13205 do_vfp_nsyn_opcode ("flds");
13206 else
13207 do_vfp_nsyn_opcode ("fsts");
13208 }
13209 else
13210 {
13211 if (is_ldr)
13212 do_vfp_nsyn_opcode ("fldd");
13213 else
13214 do_vfp_nsyn_opcode ("fstd");
13215 }
13216 }
13217
13218 /* "interleave" version also handles non-interleaving register VLD1/VST1
13219 instructions. */
13220
13221 static void
13222 do_neon_ld_st_interleave (void)
13223 {
13224 struct neon_type_el et = neon_check_type (1, NS_NULL,
13225 N_8 | N_16 | N_32 | N_64);
13226 unsigned alignbits = 0;
13227 unsigned idx;
13228 /* The bits in this table go:
13229 0: register stride of one (0) or two (1)
13230 1,2: register list length, minus one (1, 2, 3, 4).
13231 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
13232 We use -1 for invalid entries. */
13233 const int typetable[] =
13234 {
13235 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
13236 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
13237 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
13238 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
13239 };
13240 int typebits;
13241
13242 if (et.type == NT_invtype)
13243 return;
13244
13245 if (inst.operands[1].immisalign)
13246 switch (inst.operands[1].imm >> 8)
13247 {
13248 case 64: alignbits = 1; break;
13249 case 128:
13250 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
13251 goto bad_alignment;
13252 alignbits = 2;
13253 break;
13254 case 256:
13255 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
13256 goto bad_alignment;
13257 alignbits = 3;
13258 break;
13259 default:
13260 bad_alignment:
13261 first_error (_("bad alignment"));
13262 return;
13263 }
13264
13265 inst.instruction |= alignbits << 4;
13266 inst.instruction |= neon_logbits (et.size) << 6;
13267
13268 /* Bits [4:6] of the immediate in a list specifier encode register stride
13269 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
13270 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
13271 up the right value for "type" in a table based on this value and the given
13272 list style, then stick it back. */
13273 idx = ((inst.operands[0].imm >> 4) & 7)
13274 | (((inst.instruction >> 8) & 3) << 3);
13275
13276 typebits = typetable[idx];
13277
13278 constraint (typebits == -1, _("bad list type for instruction"));
13279
13280 inst.instruction &= ~0xf00;
13281 inst.instruction |= typebits << 8;
13282 }
13283
13284 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
13285 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
13286 otherwise. The variable arguments are a list of pairs of legal (size, align)
13287 values, terminated with -1. */
13288
13289 static int
13290 neon_alignment_bit (int size, int align, int *do_align, ...)
13291 {
13292 va_list ap;
13293 int result = FAIL, thissize, thisalign;
13294
13295 if (!inst.operands[1].immisalign)
13296 {
13297 *do_align = 0;
13298 return SUCCESS;
13299 }
13300
13301 va_start (ap, do_align);
13302
13303 do
13304 {
13305 thissize = va_arg (ap, int);
13306 if (thissize == -1)
13307 break;
13308 thisalign = va_arg (ap, int);
13309
13310 if (size == thissize && align == thisalign)
13311 result = SUCCESS;
13312 }
13313 while (result != SUCCESS);
13314
13315 va_end (ap);
13316
13317 if (result == SUCCESS)
13318 *do_align = 1;
13319 else
13320 first_error (_("unsupported alignment for instruction"));
13321
13322 return result;
13323 }
13324
13325 static void
13326 do_neon_ld_st_lane (void)
13327 {
13328 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
13329 int align_good, do_align = 0;
13330 int logsize = neon_logbits (et.size);
13331 int align = inst.operands[1].imm >> 8;
13332 int n = (inst.instruction >> 8) & 3;
13333 int max_el = 64 / et.size;
13334
13335 if (et.type == NT_invtype)
13336 return;
13337
13338 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
13339 _("bad list length"));
13340 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
13341 _("scalar index out of range"));
13342 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
13343 && et.size == 8,
13344 _("stride of 2 unavailable when element size is 8"));
13345
13346 switch (n)
13347 {
13348 case 0: /* VLD1 / VST1. */
13349 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
13350 32, 32, -1);
13351 if (align_good == FAIL)
13352 return;
13353 if (do_align)
13354 {
13355 unsigned alignbits = 0;
13356 switch (et.size)
13357 {
13358 case 16: alignbits = 0x1; break;
13359 case 32: alignbits = 0x3; break;
13360 default: ;
13361 }
13362 inst.instruction |= alignbits << 4;
13363 }
13364 break;
13365
13366 case 1: /* VLD2 / VST2. */
13367 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
13368 32, 64, -1);
13369 if (align_good == FAIL)
13370 return;
13371 if (do_align)
13372 inst.instruction |= 1 << 4;
13373 break;
13374
13375 case 2: /* VLD3 / VST3. */
13376 constraint (inst.operands[1].immisalign,
13377 _("can't use alignment with this instruction"));
13378 break;
13379
13380 case 3: /* VLD4 / VST4. */
13381 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
13382 16, 64, 32, 64, 32, 128, -1);
13383 if (align_good == FAIL)
13384 return;
13385 if (do_align)
13386 {
13387 unsigned alignbits = 0;
13388 switch (et.size)
13389 {
13390 case 8: alignbits = 0x1; break;
13391 case 16: alignbits = 0x1; break;
13392 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
13393 default: ;
13394 }
13395 inst.instruction |= alignbits << 4;
13396 }
13397 break;
13398
13399 default: ;
13400 }
13401
13402 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
13403 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13404 inst.instruction |= 1 << (4 + logsize);
13405
13406 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
13407 inst.instruction |= logsize << 10;
13408 }
13409
13410 /* Encode single n-element structure to all lanes VLD<n> instructions. */
13411
13412 static void
13413 do_neon_ld_dup (void)
13414 {
13415 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
13416 int align_good, do_align = 0;
13417
13418 if (et.type == NT_invtype)
13419 return;
13420
13421 switch ((inst.instruction >> 8) & 3)
13422 {
13423 case 0: /* VLD1. */
13424 assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
13425 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
13426 &do_align, 16, 16, 32, 32, -1);
13427 if (align_good == FAIL)
13428 return;
13429 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
13430 {
13431 case 1: break;
13432 case 2: inst.instruction |= 1 << 5; break;
13433 default: first_error (_("bad list length")); return;
13434 }
13435 inst.instruction |= neon_logbits (et.size) << 6;
13436 break;
13437
13438 case 1: /* VLD2. */
13439 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
13440 &do_align, 8, 16, 16, 32, 32, 64, -1);
13441 if (align_good == FAIL)
13442 return;
13443 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
13444 _("bad list length"));
13445 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13446 inst.instruction |= 1 << 5;
13447 inst.instruction |= neon_logbits (et.size) << 6;
13448 break;
13449
13450 case 2: /* VLD3. */
13451 constraint (inst.operands[1].immisalign,
13452 _("can't use alignment with this instruction"));
13453 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
13454 _("bad list length"));
13455 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13456 inst.instruction |= 1 << 5;
13457 inst.instruction |= neon_logbits (et.size) << 6;
13458 break;
13459
13460 case 3: /* VLD4. */
13461 {
13462 int align = inst.operands[1].imm >> 8;
13463 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
13464 16, 64, 32, 64, 32, 128, -1);
13465 if (align_good == FAIL)
13466 return;
13467 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
13468 _("bad list length"));
13469 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13470 inst.instruction |= 1 << 5;
13471 if (et.size == 32 && align == 128)
13472 inst.instruction |= 0x3 << 6;
13473 else
13474 inst.instruction |= neon_logbits (et.size) << 6;
13475 }
13476 break;
13477
13478 default: ;
13479 }
13480
13481 inst.instruction |= do_align << 4;
13482 }
13483
13484 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
13485 apart from bits [11:4]. */
13486
13487 static void
13488 do_neon_ldx_stx (void)
13489 {
13490 switch (NEON_LANE (inst.operands[0].imm))
13491 {
13492 case NEON_INTERLEAVE_LANES:
13493 inst.instruction = NEON_ENC_INTERLV (inst.instruction);
13494 do_neon_ld_st_interleave ();
13495 break;
13496
13497 case NEON_ALL_LANES:
13498 inst.instruction = NEON_ENC_DUP (inst.instruction);
13499 do_neon_ld_dup ();
13500 break;
13501
13502 default:
13503 inst.instruction = NEON_ENC_LANE (inst.instruction);
13504 do_neon_ld_st_lane ();
13505 }
13506
13507 /* L bit comes from bit mask. */
13508 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13509 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13510 inst.instruction |= inst.operands[1].reg << 16;
13511
13512 if (inst.operands[1].postind)
13513 {
13514 int postreg = inst.operands[1].imm & 0xf;
13515 constraint (!inst.operands[1].immisreg,
13516 _("post-index must be a register"));
13517 constraint (postreg == 0xd || postreg == 0xf,
13518 _("bad register for post-index"));
13519 inst.instruction |= postreg;
13520 }
13521 else if (inst.operands[1].writeback)
13522 {
13523 inst.instruction |= 0xd;
13524 }
13525 else
13526 inst.instruction |= 0xf;
13527
13528 if (thumb_mode)
13529 inst.instruction |= 0xf9000000;
13530 else
13531 inst.instruction |= 0xf4000000;
13532 }
13533
13534 \f
13535 /* Overall per-instruction processing. */
13536
13537 /* We need to be able to fix up arbitrary expressions in some statements.
13538 This is so that we can handle symbols that are an arbitrary distance from
13539 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
13540 which returns part of an address in a form which will be valid for
13541 a data instruction. We do this by pushing the expression into a symbol
13542 in the expr_section, and creating a fix for that. */
13543
13544 static void
13545 fix_new_arm (fragS * frag,
13546 int where,
13547 short int size,
13548 expressionS * exp,
13549 int pc_rel,
13550 int reloc)
13551 {
13552 fixS * new_fix;
13553
13554 switch (exp->X_op)
13555 {
13556 case O_constant:
13557 case O_symbol:
13558 case O_add:
13559 case O_subtract:
13560 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
13561 break;
13562
13563 default:
13564 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
13565 pc_rel, reloc);
13566 break;
13567 }
13568
13569 /* Mark whether the fix is to a THUMB instruction, or an ARM
13570 instruction. */
13571 new_fix->tc_fix_data = thumb_mode;
13572 }
13573
13574 /* Create a frg for an instruction requiring relaxation. */
13575 static void
13576 output_relax_insn (void)
13577 {
13578 char * to;
13579 symbolS *sym;
13580 int offset;
13581
13582 /* The size of the instruction is unknown, so tie the debug info to the
13583 start of the instruction. */
13584 dwarf2_emit_insn (0);
13585
13586 switch (inst.reloc.exp.X_op)
13587 {
13588 case O_symbol:
13589 sym = inst.reloc.exp.X_add_symbol;
13590 offset = inst.reloc.exp.X_add_number;
13591 break;
13592 case O_constant:
13593 sym = NULL;
13594 offset = inst.reloc.exp.X_add_number;
13595 break;
13596 default:
13597 sym = make_expr_symbol (&inst.reloc.exp);
13598 offset = 0;
13599 break;
13600 }
13601 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
13602 inst.relax, sym, offset, NULL/*offset, opcode*/);
13603 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
13604 }
13605
13606 /* Write a 32-bit thumb instruction to buf. */
13607 static void
13608 put_thumb32_insn (char * buf, unsigned long insn)
13609 {
13610 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
13611 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
13612 }
13613
13614 static void
13615 output_inst (const char * str)
13616 {
13617 char * to = NULL;
13618
13619 if (inst.error)
13620 {
13621 as_bad ("%s -- `%s'", inst.error, str);
13622 return;
13623 }
13624 if (inst.relax) {
13625 output_relax_insn();
13626 return;
13627 }
13628 if (inst.size == 0)
13629 return;
13630
13631 to = frag_more (inst.size);
13632
13633 if (thumb_mode && (inst.size > THUMB_SIZE))
13634 {
13635 assert (inst.size == (2 * THUMB_SIZE));
13636 put_thumb32_insn (to, inst.instruction);
13637 }
13638 else if (inst.size > INSN_SIZE)
13639 {
13640 assert (inst.size == (2 * INSN_SIZE));
13641 md_number_to_chars (to, inst.instruction, INSN_SIZE);
13642 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
13643 }
13644 else
13645 md_number_to_chars (to, inst.instruction, inst.size);
13646
13647 if (inst.reloc.type != BFD_RELOC_UNUSED)
13648 fix_new_arm (frag_now, to - frag_now->fr_literal,
13649 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
13650 inst.reloc.type);
13651
13652 dwarf2_emit_insn (inst.size);
13653 }
13654
13655 /* Tag values used in struct asm_opcode's tag field. */
13656 enum opcode_tag
13657 {
13658 OT_unconditional, /* Instruction cannot be conditionalized.
13659 The ARM condition field is still 0xE. */
13660 OT_unconditionalF, /* Instruction cannot be conditionalized
13661 and carries 0xF in its ARM condition field. */
13662 OT_csuffix, /* Instruction takes a conditional suffix. */
13663 OT_csuffixF, /* Some forms of the instruction take a conditional
13664 suffix, others place 0xF where the condition field
13665 would be. */
13666 OT_cinfix3, /* Instruction takes a conditional infix,
13667 beginning at character index 3. (In
13668 unified mode, it becomes a suffix.) */
13669 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
13670 tsts, cmps, cmns, and teqs. */
13671 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
13672 character index 3, even in unified mode. Used for
13673 legacy instructions where suffix and infix forms
13674 may be ambiguous. */
13675 OT_csuf_or_in3, /* Instruction takes either a conditional
13676 suffix or an infix at character index 3. */
13677 OT_odd_infix_unc, /* This is the unconditional variant of an
13678 instruction that takes a conditional infix
13679 at an unusual position. In unified mode,
13680 this variant will accept a suffix. */
13681 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
13682 are the conditional variants of instructions that
13683 take conditional infixes in unusual positions.
13684 The infix appears at character index
13685 (tag - OT_odd_infix_0). These are not accepted
13686 in unified mode. */
13687 };
13688
13689 /* Subroutine of md_assemble, responsible for looking up the primary
13690 opcode from the mnemonic the user wrote. STR points to the
13691 beginning of the mnemonic.
13692
13693 This is not simply a hash table lookup, because of conditional
13694 variants. Most instructions have conditional variants, which are
13695 expressed with a _conditional affix_ to the mnemonic. If we were
13696 to encode each conditional variant as a literal string in the opcode
13697 table, it would have approximately 20,000 entries.
13698
13699 Most mnemonics take this affix as a suffix, and in unified syntax,
13700 'most' is upgraded to 'all'. However, in the divided syntax, some
13701 instructions take the affix as an infix, notably the s-variants of
13702 the arithmetic instructions. Of those instructions, all but six
13703 have the infix appear after the third character of the mnemonic.
13704
13705 Accordingly, the algorithm for looking up primary opcodes given
13706 an identifier is:
13707
13708 1. Look up the identifier in the opcode table.
13709 If we find a match, go to step U.
13710
13711 2. Look up the last two characters of the identifier in the
13712 conditions table. If we find a match, look up the first N-2
13713 characters of the identifier in the opcode table. If we
13714 find a match, go to step CE.
13715
13716 3. Look up the fourth and fifth characters of the identifier in
13717 the conditions table. If we find a match, extract those
13718 characters from the identifier, and look up the remaining
13719 characters in the opcode table. If we find a match, go
13720 to step CM.
13721
13722 4. Fail.
13723
13724 U. Examine the tag field of the opcode structure, in case this is
13725 one of the six instructions with its conditional infix in an
13726 unusual place. If it is, the tag tells us where to find the
13727 infix; look it up in the conditions table and set inst.cond
13728 accordingly. Otherwise, this is an unconditional instruction.
13729 Again set inst.cond accordingly. Return the opcode structure.
13730
13731 CE. Examine the tag field to make sure this is an instruction that
13732 should receive a conditional suffix. If it is not, fail.
13733 Otherwise, set inst.cond from the suffix we already looked up,
13734 and return the opcode structure.
13735
13736 CM. Examine the tag field to make sure this is an instruction that
13737 should receive a conditional infix after the third character.
13738 If it is not, fail. Otherwise, undo the edits to the current
13739 line of input and proceed as for case CE. */
13740
13741 static const struct asm_opcode *
13742 opcode_lookup (char **str)
13743 {
13744 char *end, *base;
13745 char *affix;
13746 const struct asm_opcode *opcode;
13747 const struct asm_cond *cond;
13748 char save[2];
13749 bfd_boolean neon_supported;
13750
13751 neon_supported = ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1);
13752
13753 /* Scan up to the end of the mnemonic, which must end in white space,
13754 '.' (in unified mode, or for Neon instructions), or end of string. */
13755 for (base = end = *str; *end != '\0'; end++)
13756 if (*end == ' ' || ((unified_syntax || neon_supported) && *end == '.'))
13757 break;
13758
13759 if (end == base)
13760 return 0;
13761
13762 /* Handle a possible width suffix and/or Neon type suffix. */
13763 if (end[0] == '.')
13764 {
13765 int offset = 2;
13766
13767 /* The .w and .n suffixes are only valid if the unified syntax is in
13768 use. */
13769 if (unified_syntax && end[1] == 'w')
13770 inst.size_req = 4;
13771 else if (unified_syntax && end[1] == 'n')
13772 inst.size_req = 2;
13773 else
13774 offset = 0;
13775
13776 inst.vectype.elems = 0;
13777
13778 *str = end + offset;
13779
13780 if (end[offset] == '.')
13781 {
13782 /* See if we have a Neon type suffix (possible in either unified or
13783 non-unified ARM syntax mode). */
13784 if (parse_neon_type (&inst.vectype, str) == FAIL)
13785 return 0;
13786 }
13787 else if (end[offset] != '\0' && end[offset] != ' ')
13788 return 0;
13789 }
13790 else
13791 *str = end;
13792
13793 /* Look for unaffixed or special-case affixed mnemonic. */
13794 opcode = hash_find_n (arm_ops_hsh, base, end - base);
13795 if (opcode)
13796 {
13797 /* step U */
13798 if (opcode->tag < OT_odd_infix_0)
13799 {
13800 inst.cond = COND_ALWAYS;
13801 return opcode;
13802 }
13803
13804 if (unified_syntax)
13805 as_warn (_("conditional infixes are deprecated in unified syntax"));
13806 affix = base + (opcode->tag - OT_odd_infix_0);
13807 cond = hash_find_n (arm_cond_hsh, affix, 2);
13808 assert (cond);
13809
13810 inst.cond = cond->value;
13811 return opcode;
13812 }
13813
13814 /* Cannot have a conditional suffix on a mnemonic of less than two
13815 characters. */
13816 if (end - base < 3)
13817 return 0;
13818
13819 /* Look for suffixed mnemonic. */
13820 affix = end - 2;
13821 cond = hash_find_n (arm_cond_hsh, affix, 2);
13822 opcode = hash_find_n (arm_ops_hsh, base, affix - base);
13823 if (opcode && cond)
13824 {
13825 /* step CE */
13826 switch (opcode->tag)
13827 {
13828 case OT_cinfix3_legacy:
13829 /* Ignore conditional suffixes matched on infix only mnemonics. */
13830 break;
13831
13832 case OT_cinfix3:
13833 case OT_cinfix3_deprecated:
13834 case OT_odd_infix_unc:
13835 if (!unified_syntax)
13836 return 0;
13837 /* else fall through */
13838
13839 case OT_csuffix:
13840 case OT_csuffixF:
13841 case OT_csuf_or_in3:
13842 inst.cond = cond->value;
13843 return opcode;
13844
13845 case OT_unconditional:
13846 case OT_unconditionalF:
13847 if (thumb_mode)
13848 {
13849 inst.cond = cond->value;
13850 }
13851 else
13852 {
13853 /* delayed diagnostic */
13854 inst.error = BAD_COND;
13855 inst.cond = COND_ALWAYS;
13856 }
13857 return opcode;
13858
13859 default:
13860 return 0;
13861 }
13862 }
13863
13864 /* Cannot have a usual-position infix on a mnemonic of less than
13865 six characters (five would be a suffix). */
13866 if (end - base < 6)
13867 return 0;
13868
13869 /* Look for infixed mnemonic in the usual position. */
13870 affix = base + 3;
13871 cond = hash_find_n (arm_cond_hsh, affix, 2);
13872 if (!cond)
13873 return 0;
13874
13875 memcpy (save, affix, 2);
13876 memmove (affix, affix + 2, (end - affix) - 2);
13877 opcode = hash_find_n (arm_ops_hsh, base, (end - base) - 2);
13878 memmove (affix + 2, affix, (end - affix) - 2);
13879 memcpy (affix, save, 2);
13880
13881 if (opcode
13882 && (opcode->tag == OT_cinfix3
13883 || opcode->tag == OT_cinfix3_deprecated
13884 || opcode->tag == OT_csuf_or_in3
13885 || opcode->tag == OT_cinfix3_legacy))
13886 {
13887 /* step CM */
13888 if (unified_syntax
13889 && (opcode->tag == OT_cinfix3
13890 || opcode->tag == OT_cinfix3_deprecated))
13891 as_warn (_("conditional infixes are deprecated in unified syntax"));
13892
13893 inst.cond = cond->value;
13894 return opcode;
13895 }
13896
13897 return 0;
13898 }
13899
13900 void
13901 md_assemble (char *str)
13902 {
13903 char *p = str;
13904 const struct asm_opcode * opcode;
13905
13906 /* Align the previous label if needed. */
13907 if (last_label_seen != NULL)
13908 {
13909 symbol_set_frag (last_label_seen, frag_now);
13910 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
13911 S_SET_SEGMENT (last_label_seen, now_seg);
13912 }
13913
13914 memset (&inst, '\0', sizeof (inst));
13915 inst.reloc.type = BFD_RELOC_UNUSED;
13916
13917 opcode = opcode_lookup (&p);
13918 if (!opcode)
13919 {
13920 /* It wasn't an instruction, but it might be a register alias of
13921 the form alias .req reg, or a Neon .dn/.qn directive. */
13922 if (!create_register_alias (str, p)
13923 && !create_neon_reg_alias (str, p))
13924 as_bad (_("bad instruction `%s'"), str);
13925
13926 return;
13927 }
13928
13929 if (opcode->tag == OT_cinfix3_deprecated)
13930 as_warn (_("s suffix on comparison instruction is deprecated"));
13931
13932 /* The value which unconditional instructions should have in place of the
13933 condition field. */
13934 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
13935
13936 if (thumb_mode)
13937 {
13938 arm_feature_set variant;
13939
13940 variant = cpu_variant;
13941 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
13942 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
13943 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
13944 /* Check that this instruction is supported for this CPU. */
13945 if (!opcode->tvariant
13946 || (thumb_mode == 1
13947 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
13948 {
13949 as_bad (_("selected processor does not support `%s'"), str);
13950 return;
13951 }
13952 if (inst.cond != COND_ALWAYS && !unified_syntax
13953 && opcode->tencode != do_t_branch)
13954 {
13955 as_bad (_("Thumb does not support conditional execution"));
13956 return;
13957 }
13958
13959 /* Check conditional suffixes. */
13960 if (current_it_mask)
13961 {
13962 int cond;
13963 cond = current_cc ^ ((current_it_mask >> 4) & 1) ^ 1;
13964 current_it_mask <<= 1;
13965 current_it_mask &= 0x1f;
13966 /* The BKPT instruction is unconditional even in an IT block. */
13967 if (!inst.error
13968 && cond != inst.cond && opcode->tencode != do_t_bkpt)
13969 {
13970 as_bad (_("incorrect condition in IT block"));
13971 return;
13972 }
13973 }
13974 else if (inst.cond != COND_ALWAYS && opcode->tencode != do_t_branch)
13975 {
13976 as_bad (_("thumb conditional instrunction not in IT block"));
13977 return;
13978 }
13979
13980 mapping_state (MAP_THUMB);
13981 inst.instruction = opcode->tvalue;
13982
13983 if (!parse_operands (p, opcode->operands))
13984 opcode->tencode ();
13985
13986 /* Clear current_it_mask at the end of an IT block. */
13987 if (current_it_mask == 0x10)
13988 current_it_mask = 0;
13989
13990 if (!(inst.error || inst.relax))
13991 {
13992 assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
13993 inst.size = (inst.instruction > 0xffff ? 4 : 2);
13994 if (inst.size_req && inst.size_req != inst.size)
13995 {
13996 as_bad (_("cannot honor width suffix -- `%s'"), str);
13997 return;
13998 }
13999 }
14000 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
14001 *opcode->tvariant);
14002 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
14003 set those bits when Thumb-2 32-bit instructions are seen. ie.
14004 anything other than bl/blx.
14005 This is overly pessimistic for relaxable instructions. */
14006 if ((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
14007 || inst.relax)
14008 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
14009 arm_ext_v6t2);
14010 }
14011 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
14012 {
14013 /* Check that this instruction is supported for this CPU. */
14014 if (!opcode->avariant ||
14015 !ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
14016 {
14017 as_bad (_("selected processor does not support `%s'"), str);
14018 return;
14019 }
14020 if (inst.size_req)
14021 {
14022 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
14023 return;
14024 }
14025
14026 mapping_state (MAP_ARM);
14027 inst.instruction = opcode->avalue;
14028 if (opcode->tag == OT_unconditionalF)
14029 inst.instruction |= 0xF << 28;
14030 else
14031 inst.instruction |= inst.cond << 28;
14032 inst.size = INSN_SIZE;
14033 if (!parse_operands (p, opcode->operands))
14034 opcode->aencode ();
14035 /* Arm mode bx is marked as both v4T and v5 because it's still required
14036 on a hypothetical non-thumb v5 core. */
14037 if (ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v4t)
14038 || ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v5))
14039 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
14040 else
14041 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
14042 *opcode->avariant);
14043 }
14044 else
14045 {
14046 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
14047 "-- `%s'"), str);
14048 return;
14049 }
14050 output_inst (str);
14051 }
14052
14053 /* Various frobbings of labels and their addresses. */
14054
14055 void
14056 arm_start_line_hook (void)
14057 {
14058 last_label_seen = NULL;
14059 }
14060
14061 void
14062 arm_frob_label (symbolS * sym)
14063 {
14064 last_label_seen = sym;
14065
14066 ARM_SET_THUMB (sym, thumb_mode);
14067
14068 #if defined OBJ_COFF || defined OBJ_ELF
14069 ARM_SET_INTERWORK (sym, support_interwork);
14070 #endif
14071
14072 /* Note - do not allow local symbols (.Lxxx) to be labeled
14073 as Thumb functions. This is because these labels, whilst
14074 they exist inside Thumb code, are not the entry points for
14075 possible ARM->Thumb calls. Also, these labels can be used
14076 as part of a computed goto or switch statement. eg gcc
14077 can generate code that looks like this:
14078
14079 ldr r2, [pc, .Laaa]
14080 lsl r3, r3, #2
14081 ldr r2, [r3, r2]
14082 mov pc, r2
14083
14084 .Lbbb: .word .Lxxx
14085 .Lccc: .word .Lyyy
14086 ..etc...
14087 .Laaa: .word Lbbb
14088
14089 The first instruction loads the address of the jump table.
14090 The second instruction converts a table index into a byte offset.
14091 The third instruction gets the jump address out of the table.
14092 The fourth instruction performs the jump.
14093
14094 If the address stored at .Laaa is that of a symbol which has the
14095 Thumb_Func bit set, then the linker will arrange for this address
14096 to have the bottom bit set, which in turn would mean that the
14097 address computation performed by the third instruction would end
14098 up with the bottom bit set. Since the ARM is capable of unaligned
14099 word loads, the instruction would then load the incorrect address
14100 out of the jump table, and chaos would ensue. */
14101 if (label_is_thumb_function_name
14102 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
14103 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
14104 {
14105 /* When the address of a Thumb function is taken the bottom
14106 bit of that address should be set. This will allow
14107 interworking between Arm and Thumb functions to work
14108 correctly. */
14109
14110 THUMB_SET_FUNC (sym, 1);
14111
14112 label_is_thumb_function_name = FALSE;
14113 }
14114
14115 dwarf2_emit_label (sym);
14116 }
14117
14118 int
14119 arm_data_in_code (void)
14120 {
14121 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
14122 {
14123 *input_line_pointer = '/';
14124 input_line_pointer += 5;
14125 *input_line_pointer = 0;
14126 return 1;
14127 }
14128
14129 return 0;
14130 }
14131
14132 char *
14133 arm_canonicalize_symbol_name (char * name)
14134 {
14135 int len;
14136
14137 if (thumb_mode && (len = strlen (name)) > 5
14138 && streq (name + len - 5, "/data"))
14139 *(name + len - 5) = 0;
14140
14141 return name;
14142 }
14143 \f
14144 /* Table of all register names defined by default. The user can
14145 define additional names with .req. Note that all register names
14146 should appear in both upper and lowercase variants. Some registers
14147 also have mixed-case names. */
14148
14149 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
14150 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
14151 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
14152 #define REGSET(p,t) \
14153 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
14154 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
14155 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
14156 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
14157 #define REGSETH(p,t) \
14158 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
14159 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
14160 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
14161 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
14162 #define REGSET2(p,t) \
14163 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
14164 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
14165 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
14166 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
14167
14168 static const struct reg_entry reg_names[] =
14169 {
14170 /* ARM integer registers. */
14171 REGSET(r, RN), REGSET(R, RN),
14172
14173 /* ATPCS synonyms. */
14174 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
14175 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
14176 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
14177
14178 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
14179 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
14180 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
14181
14182 /* Well-known aliases. */
14183 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
14184 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
14185
14186 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
14187 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
14188
14189 /* Coprocessor numbers. */
14190 REGSET(p, CP), REGSET(P, CP),
14191
14192 /* Coprocessor register numbers. The "cr" variants are for backward
14193 compatibility. */
14194 REGSET(c, CN), REGSET(C, CN),
14195 REGSET(cr, CN), REGSET(CR, CN),
14196
14197 /* FPA registers. */
14198 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
14199 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
14200
14201 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
14202 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
14203
14204 /* VFP SP registers. */
14205 REGSET(s,VFS), REGSET(S,VFS),
14206 REGSETH(s,VFS), REGSETH(S,VFS),
14207
14208 /* VFP DP Registers. */
14209 REGSET(d,VFD), REGSET(D,VFD),
14210 /* Extra Neon DP registers. */
14211 REGSETH(d,VFD), REGSETH(D,VFD),
14212
14213 /* Neon QP registers. */
14214 REGSET2(q,NQ), REGSET2(Q,NQ),
14215
14216 /* VFP control registers. */
14217 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
14218 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
14219
14220 /* Maverick DSP coprocessor registers. */
14221 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
14222 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
14223
14224 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
14225 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
14226 REGDEF(dspsc,0,DSPSC),
14227
14228 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
14229 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
14230 REGDEF(DSPSC,0,DSPSC),
14231
14232 /* iWMMXt data registers - p0, c0-15. */
14233 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
14234
14235 /* iWMMXt control registers - p1, c0-3. */
14236 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
14237 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
14238 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
14239 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
14240
14241 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
14242 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
14243 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
14244 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
14245 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
14246
14247 /* XScale accumulator registers. */
14248 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
14249 };
14250 #undef REGDEF
14251 #undef REGNUM
14252 #undef REGSET
14253
14254 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
14255 within psr_required_here. */
14256 static const struct asm_psr psrs[] =
14257 {
14258 /* Backward compatibility notation. Note that "all" is no longer
14259 truly all possible PSR bits. */
14260 {"all", PSR_c | PSR_f},
14261 {"flg", PSR_f},
14262 {"ctl", PSR_c},
14263
14264 /* Individual flags. */
14265 {"f", PSR_f},
14266 {"c", PSR_c},
14267 {"x", PSR_x},
14268 {"s", PSR_s},
14269 /* Combinations of flags. */
14270 {"fs", PSR_f | PSR_s},
14271 {"fx", PSR_f | PSR_x},
14272 {"fc", PSR_f | PSR_c},
14273 {"sf", PSR_s | PSR_f},
14274 {"sx", PSR_s | PSR_x},
14275 {"sc", PSR_s | PSR_c},
14276 {"xf", PSR_x | PSR_f},
14277 {"xs", PSR_x | PSR_s},
14278 {"xc", PSR_x | PSR_c},
14279 {"cf", PSR_c | PSR_f},
14280 {"cs", PSR_c | PSR_s},
14281 {"cx", PSR_c | PSR_x},
14282 {"fsx", PSR_f | PSR_s | PSR_x},
14283 {"fsc", PSR_f | PSR_s | PSR_c},
14284 {"fxs", PSR_f | PSR_x | PSR_s},
14285 {"fxc", PSR_f | PSR_x | PSR_c},
14286 {"fcs", PSR_f | PSR_c | PSR_s},
14287 {"fcx", PSR_f | PSR_c | PSR_x},
14288 {"sfx", PSR_s | PSR_f | PSR_x},
14289 {"sfc", PSR_s | PSR_f | PSR_c},
14290 {"sxf", PSR_s | PSR_x | PSR_f},
14291 {"sxc", PSR_s | PSR_x | PSR_c},
14292 {"scf", PSR_s | PSR_c | PSR_f},
14293 {"scx", PSR_s | PSR_c | PSR_x},
14294 {"xfs", PSR_x | PSR_f | PSR_s},
14295 {"xfc", PSR_x | PSR_f | PSR_c},
14296 {"xsf", PSR_x | PSR_s | PSR_f},
14297 {"xsc", PSR_x | PSR_s | PSR_c},
14298 {"xcf", PSR_x | PSR_c | PSR_f},
14299 {"xcs", PSR_x | PSR_c | PSR_s},
14300 {"cfs", PSR_c | PSR_f | PSR_s},
14301 {"cfx", PSR_c | PSR_f | PSR_x},
14302 {"csf", PSR_c | PSR_s | PSR_f},
14303 {"csx", PSR_c | PSR_s | PSR_x},
14304 {"cxf", PSR_c | PSR_x | PSR_f},
14305 {"cxs", PSR_c | PSR_x | PSR_s},
14306 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
14307 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
14308 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
14309 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
14310 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
14311 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
14312 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
14313 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
14314 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
14315 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
14316 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
14317 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
14318 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
14319 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
14320 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
14321 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
14322 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
14323 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
14324 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
14325 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
14326 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
14327 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
14328 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
14329 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
14330 };
14331
14332 /* Table of V7M psr names. */
14333 static const struct asm_psr v7m_psrs[] =
14334 {
14335 {"apsr", 0 },
14336 {"iapsr", 1 },
14337 {"eapsr", 2 },
14338 {"psr", 3 },
14339 {"ipsr", 5 },
14340 {"epsr", 6 },
14341 {"iepsr", 7 },
14342 {"msp", 8 },
14343 {"psp", 9 },
14344 {"primask", 16},
14345 {"basepri", 17},
14346 {"basepri_max", 18},
14347 {"faultmask", 19},
14348 {"control", 20}
14349 };
14350
14351 /* Table of all shift-in-operand names. */
14352 static const struct asm_shift_name shift_names [] =
14353 {
14354 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
14355 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
14356 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
14357 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
14358 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
14359 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
14360 };
14361
14362 /* Table of all explicit relocation names. */
14363 #ifdef OBJ_ELF
14364 static struct reloc_entry reloc_names[] =
14365 {
14366 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
14367 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
14368 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
14369 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
14370 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
14371 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
14372 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
14373 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
14374 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
14375 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
14376 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32}
14377 };
14378 #endif
14379
14380 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
14381 static const struct asm_cond conds[] =
14382 {
14383 {"eq", 0x0},
14384 {"ne", 0x1},
14385 {"cs", 0x2}, {"hs", 0x2},
14386 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
14387 {"mi", 0x4},
14388 {"pl", 0x5},
14389 {"vs", 0x6},
14390 {"vc", 0x7},
14391 {"hi", 0x8},
14392 {"ls", 0x9},
14393 {"ge", 0xa},
14394 {"lt", 0xb},
14395 {"gt", 0xc},
14396 {"le", 0xd},
14397 {"al", 0xe}
14398 };
14399
14400 static struct asm_barrier_opt barrier_opt_names[] =
14401 {
14402 { "sy", 0xf },
14403 { "un", 0x7 },
14404 { "st", 0xe },
14405 { "unst", 0x6 }
14406 };
14407
14408 /* Table of ARM-format instructions. */
14409
14410 /* Macros for gluing together operand strings. N.B. In all cases
14411 other than OPS0, the trailing OP_stop comes from default
14412 zero-initialization of the unspecified elements of the array. */
14413 #define OPS0() { OP_stop, }
14414 #define OPS1(a) { OP_##a, }
14415 #define OPS2(a,b) { OP_##a,OP_##b, }
14416 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
14417 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
14418 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
14419 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
14420
14421 /* These macros abstract out the exact format of the mnemonic table and
14422 save some repeated characters. */
14423
14424 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
14425 #define TxCE(mnem, op, top, nops, ops, ae, te) \
14426 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
14427 THUMB_VARIANT, do_##ae, do_##te }
14428
14429 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
14430 a T_MNEM_xyz enumerator. */
14431 #define TCE(mnem, aop, top, nops, ops, ae, te) \
14432 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
14433 #define tCE(mnem, aop, top, nops, ops, ae, te) \
14434 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14435
14436 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
14437 infix after the third character. */
14438 #define TxC3(mnem, op, top, nops, ops, ae, te) \
14439 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
14440 THUMB_VARIANT, do_##ae, do_##te }
14441 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
14442 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
14443 THUMB_VARIANT, do_##ae, do_##te }
14444 #define TC3(mnem, aop, top, nops, ops, ae, te) \
14445 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
14446 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
14447 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te)
14448 #define tC3(mnem, aop, top, nops, ops, ae, te) \
14449 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14450 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
14451 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14452
14453 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
14454 appear in the condition table. */
14455 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
14456 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14457 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
14458
14459 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
14460 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
14461 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
14462 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
14463 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
14464 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
14465 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
14466 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
14467 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
14468 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
14469 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
14470 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
14471 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
14472 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
14473 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
14474 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
14475 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
14476 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
14477 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
14478 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
14479
14480 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
14481 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
14482 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
14483 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
14484
14485 /* Mnemonic that cannot be conditionalized. The ARM condition-code
14486 field is still 0xE. Many of the Thumb variants can be executed
14487 conditionally, so this is checked separately. */
14488 #define TUE(mnem, op, top, nops, ops, ae, te) \
14489 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
14490 THUMB_VARIANT, do_##ae, do_##te }
14491
14492 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
14493 condition code field. */
14494 #define TUF(mnem, op, top, nops, ops, ae, te) \
14495 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
14496 THUMB_VARIANT, do_##ae, do_##te }
14497
14498 /* ARM-only variants of all the above. */
14499 #define CE(mnem, op, nops, ops, ae) \
14500 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14501
14502 #define C3(mnem, op, nops, ops, ae) \
14503 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14504
14505 /* Legacy mnemonics that always have conditional infix after the third
14506 character. */
14507 #define CL(mnem, op, nops, ops, ae) \
14508 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14509 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14510
14511 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
14512 #define cCE(mnem, op, nops, ops, ae) \
14513 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14514
14515 /* Legacy coprocessor instructions where conditional infix and conditional
14516 suffix are ambiguous. For consistency this includes all FPA instructions,
14517 not just the potentially ambiguous ones. */
14518 #define cCL(mnem, op, nops, ops, ae) \
14519 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14520 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14521
14522 /* Coprocessor, takes either a suffix or a position-3 infix
14523 (for an FPA corner case). */
14524 #define C3E(mnem, op, nops, ops, ae) \
14525 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
14526 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14527
14528 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
14529 { #m1 #m2 #m3, OPS##nops ops, \
14530 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14531 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14532
14533 #define CM(m1, m2, op, nops, ops, ae) \
14534 xCM_(m1, , m2, op, nops, ops, ae), \
14535 xCM_(m1, eq, m2, op, nops, ops, ae), \
14536 xCM_(m1, ne, m2, op, nops, ops, ae), \
14537 xCM_(m1, cs, m2, op, nops, ops, ae), \
14538 xCM_(m1, hs, m2, op, nops, ops, ae), \
14539 xCM_(m1, cc, m2, op, nops, ops, ae), \
14540 xCM_(m1, ul, m2, op, nops, ops, ae), \
14541 xCM_(m1, lo, m2, op, nops, ops, ae), \
14542 xCM_(m1, mi, m2, op, nops, ops, ae), \
14543 xCM_(m1, pl, m2, op, nops, ops, ae), \
14544 xCM_(m1, vs, m2, op, nops, ops, ae), \
14545 xCM_(m1, vc, m2, op, nops, ops, ae), \
14546 xCM_(m1, hi, m2, op, nops, ops, ae), \
14547 xCM_(m1, ls, m2, op, nops, ops, ae), \
14548 xCM_(m1, ge, m2, op, nops, ops, ae), \
14549 xCM_(m1, lt, m2, op, nops, ops, ae), \
14550 xCM_(m1, gt, m2, op, nops, ops, ae), \
14551 xCM_(m1, le, m2, op, nops, ops, ae), \
14552 xCM_(m1, al, m2, op, nops, ops, ae)
14553
14554 #define UE(mnem, op, nops, ops, ae) \
14555 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14556
14557 #define UF(mnem, op, nops, ops, ae) \
14558 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14559
14560 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
14561 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
14562 use the same encoding function for each. */
14563 #define NUF(mnem, op, nops, ops, enc) \
14564 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
14565 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14566
14567 /* Neon data processing, version which indirects through neon_enc_tab for
14568 the various overloaded versions of opcodes. */
14569 #define nUF(mnem, op, nops, ops, enc) \
14570 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
14571 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14572
14573 /* Neon insn with conditional suffix for the ARM version, non-overloaded
14574 version. */
14575 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
14576 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
14577 THUMB_VARIANT, do_##enc, do_##enc }
14578
14579 #define NCE(mnem, op, nops, ops, enc) \
14580 NCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14581
14582 #define NCEF(mnem, op, nops, ops, enc) \
14583 NCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14584
14585 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
14586 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
14587 { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \
14588 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14589
14590 #define nCE(mnem, op, nops, ops, enc) \
14591 nCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14592
14593 #define nCEF(mnem, op, nops, ops, enc) \
14594 nCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14595
14596 #define do_0 0
14597
14598 /* Thumb-only, unconditional. */
14599 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
14600
14601 static const struct asm_opcode insns[] =
14602 {
14603 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
14604 #define THUMB_VARIANT &arm_ext_v4t
14605 tCE(and, 0000000, and, 3, (RR, oRR, SH), arit, t_arit3c),
14606 tC3(ands, 0100000, ands, 3, (RR, oRR, SH), arit, t_arit3c),
14607 tCE(eor, 0200000, eor, 3, (RR, oRR, SH), arit, t_arit3c),
14608 tC3(eors, 0300000, eors, 3, (RR, oRR, SH), arit, t_arit3c),
14609 tCE(sub, 0400000, sub, 3, (RR, oRR, SH), arit, t_add_sub),
14610 tC3(subs, 0500000, subs, 3, (RR, oRR, SH), arit, t_add_sub),
14611 tCE(add, 0800000, add, 3, (RR, oRR, SHG), arit, t_add_sub),
14612 tC3(adds, 0900000, adds, 3, (RR, oRR, SHG), arit, t_add_sub),
14613 tCE(adc, 0a00000, adc, 3, (RR, oRR, SH), arit, t_arit3c),
14614 tC3(adcs, 0b00000, adcs, 3, (RR, oRR, SH), arit, t_arit3c),
14615 tCE(sbc, 0c00000, sbc, 3, (RR, oRR, SH), arit, t_arit3),
14616 tC3(sbcs, 0d00000, sbcs, 3, (RR, oRR, SH), arit, t_arit3),
14617 tCE(orr, 1800000, orr, 3, (RR, oRR, SH), arit, t_arit3c),
14618 tC3(orrs, 1900000, orrs, 3, (RR, oRR, SH), arit, t_arit3c),
14619 tCE(bic, 1c00000, bic, 3, (RR, oRR, SH), arit, t_arit3),
14620 tC3(bics, 1d00000, bics, 3, (RR, oRR, SH), arit, t_arit3),
14621
14622 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
14623 for setting PSR flag bits. They are obsolete in V6 and do not
14624 have Thumb equivalents. */
14625 tCE(tst, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
14626 tC3w(tsts, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
14627 CL(tstp, 110f000, 2, (RR, SH), cmp),
14628 tCE(cmp, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
14629 tC3w(cmps, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
14630 CL(cmpp, 150f000, 2, (RR, SH), cmp),
14631 tCE(cmn, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
14632 tC3w(cmns, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
14633 CL(cmnp, 170f000, 2, (RR, SH), cmp),
14634
14635 tCE(mov, 1a00000, mov, 2, (RR, SH), mov, t_mov_cmp),
14636 tC3(movs, 1b00000, movs, 2, (RR, SH), mov, t_mov_cmp),
14637 tCE(mvn, 1e00000, mvn, 2, (RR, SH), mov, t_mvn_tst),
14638 tC3(mvns, 1f00000, mvns, 2, (RR, SH), mov, t_mvn_tst),
14639
14640 tCE(ldr, 4100000, ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
14641 tC3(ldrb, 4500000, ldrb, 2, (RR, ADDRGLDR),ldst, t_ldst),
14642 tCE(str, 4000000, str, 2, (RR, ADDRGLDR),ldst, t_ldst),
14643 tC3(strb, 4400000, strb, 2, (RR, ADDRGLDR),ldst, t_ldst),
14644
14645 tCE(stm, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14646 tC3(stmia, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14647 tC3(stmea, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14648 tCE(ldm, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14649 tC3(ldmia, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14650 tC3(ldmfd, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14651
14652 TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi),
14653 TCE(svc, f000000, df00, 1, (EXPi), swi, t_swi),
14654 tCE(b, a000000, b, 1, (EXPr), branch, t_branch),
14655 TCE(bl, b000000, f000f800, 1, (EXPr), bl, t_branch23),
14656
14657 /* Pseudo ops. */
14658 tCE(adr, 28f0000, adr, 2, (RR, EXP), adr, t_adr),
14659 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
14660 tCE(nop, 1a00000, nop, 1, (oI255c), nop, t_nop),
14661
14662 /* Thumb-compatibility pseudo ops. */
14663 tCE(lsl, 1a00000, lsl, 3, (RR, oRR, SH), shift, t_shift),
14664 tC3(lsls, 1b00000, lsls, 3, (RR, oRR, SH), shift, t_shift),
14665 tCE(lsr, 1a00020, lsr, 3, (RR, oRR, SH), shift, t_shift),
14666 tC3(lsrs, 1b00020, lsrs, 3, (RR, oRR, SH), shift, t_shift),
14667 tCE(asr, 1a00040, asr, 3, (RR, oRR, SH), shift, t_shift),
14668 tC3(asrs, 1b00040, asrs, 3, (RR, oRR, SH), shift, t_shift),
14669 tCE(ror, 1a00060, ror, 3, (RR, oRR, SH), shift, t_shift),
14670 tC3(rors, 1b00060, rors, 3, (RR, oRR, SH), shift, t_shift),
14671 tCE(neg, 2600000, neg, 2, (RR, RR), rd_rn, t_neg),
14672 tC3(negs, 2700000, negs, 2, (RR, RR), rd_rn, t_neg),
14673 tCE(push, 92d0000, push, 1, (REGLST), push_pop, t_push_pop),
14674 tCE(pop, 8bd0000, pop, 1, (REGLST), push_pop, t_push_pop),
14675
14676 #undef THUMB_VARIANT
14677 #define THUMB_VARIANT &arm_ext_v6
14678 TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
14679
14680 /* V1 instructions with no Thumb analogue prior to V6T2. */
14681 #undef THUMB_VARIANT
14682 #define THUMB_VARIANT &arm_ext_v6t2
14683 TCE(rsb, 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
14684 TC3(rsbs, 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
14685 TCE(teq, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
14686 TC3w(teqs, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
14687 CL(teqp, 130f000, 2, (RR, SH), cmp),
14688
14689 TC3(ldrt, 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt),
14690 TC3(ldrbt, 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt),
14691 TC3(strt, 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt),
14692 TC3(strbt, 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt),
14693
14694 TC3(stmdb, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14695 TC3(stmfd, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14696
14697 TC3(ldmdb, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14698 TC3(ldmea, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14699
14700 /* V1 instructions with no Thumb analogue at all. */
14701 CE(rsc, 0e00000, 3, (RR, oRR, SH), arit),
14702 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
14703
14704 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
14705 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
14706 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
14707 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
14708 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
14709 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
14710 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
14711 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
14712
14713 #undef ARM_VARIANT
14714 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
14715 #undef THUMB_VARIANT
14716 #define THUMB_VARIANT &arm_ext_v4t
14717 tCE(mul, 0000090, mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
14718 tC3(muls, 0100090, muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
14719
14720 #undef THUMB_VARIANT
14721 #define THUMB_VARIANT &arm_ext_v6t2
14722 TCE(mla, 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
14723 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
14724
14725 /* Generic coprocessor instructions. */
14726 TCE(cdp, e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
14727 TCE(ldc, c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14728 TC3(ldcl, c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14729 TCE(stc, c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14730 TC3(stcl, c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14731 TCE(mcr, e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14732 TCE(mrc, e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14733
14734 #undef ARM_VARIANT
14735 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
14736 CE(swp, 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
14737 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
14738
14739 #undef ARM_VARIANT
14740 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
14741 TCE(mrs, 10f0000, f3ef8000, 2, (APSR_RR, RVC_PSR), mrs, t_mrs),
14742 TCE(msr, 120f000, f3808000, 2, (RVC_PSR, RR_EXi), msr, t_msr),
14743
14744 #undef ARM_VARIANT
14745 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
14746 TCE(smull, 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14747 CM(smull,s, 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14748 TCE(umull, 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14749 CM(umull,s, 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14750 TCE(smlal, 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14751 CM(smlal,s, 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14752 TCE(umlal, 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14753 CM(umlal,s, 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14754
14755 #undef ARM_VARIANT
14756 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
14757 #undef THUMB_VARIANT
14758 #define THUMB_VARIANT &arm_ext_v4t
14759 tC3(ldrh, 01000b0, ldrh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14760 tC3(strh, 00000b0, strh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14761 tC3(ldrsh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14762 tC3(ldrsb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14763 tCM(ld,sh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14764 tCM(ld,sb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14765
14766 #undef ARM_VARIANT
14767 #define ARM_VARIANT &arm_ext_v4t_5
14768 /* ARM Architecture 4T. */
14769 /* Note: bx (and blx) are required on V5, even if the processor does
14770 not support Thumb. */
14771 TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx),
14772
14773 #undef ARM_VARIANT
14774 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
14775 #undef THUMB_VARIANT
14776 #define THUMB_VARIANT &arm_ext_v5t
14777 /* Note: blx has 2 variants; the .value coded here is for
14778 BLX(2). Only this variant has conditional execution. */
14779 TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
14780 TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
14781
14782 #undef THUMB_VARIANT
14783 #define THUMB_VARIANT &arm_ext_v6t2
14784 TCE(clz, 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
14785 TUF(ldc2, c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14786 TUF(ldc2l, c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14787 TUF(stc2, c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14788 TUF(stc2l, c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14789 TUF(cdp2, e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
14790 TUF(mcr2, e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14791 TUF(mrc2, e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14792
14793 #undef ARM_VARIANT
14794 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
14795 TCE(smlabb, 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14796 TCE(smlatb, 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14797 TCE(smlabt, 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14798 TCE(smlatt, 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14799
14800 TCE(smlawb, 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14801 TCE(smlawt, 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14802
14803 TCE(smlalbb, 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14804 TCE(smlaltb, 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14805 TCE(smlalbt, 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14806 TCE(smlaltt, 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14807
14808 TCE(smulbb, 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14809 TCE(smultb, 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14810 TCE(smulbt, 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14811 TCE(smultt, 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14812
14813 TCE(smulwb, 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14814 TCE(smulwt, 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14815
14816 TCE(qadd, 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14817 TCE(qdadd, 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14818 TCE(qsub, 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14819 TCE(qdsub, 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14820
14821 #undef ARM_VARIANT
14822 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
14823 TUF(pld, 450f000, f810f000, 1, (ADDR), pld, t_pld),
14824 TC3(ldrd, 00000d0, e9500000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
14825 TC3(strd, 00000f0, e9400000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
14826
14827 TCE(mcrr, c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14828 TCE(mrrc, c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14829
14830 #undef ARM_VARIANT
14831 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
14832 TCE(bxj, 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
14833
14834 #undef ARM_VARIANT
14835 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
14836 #undef THUMB_VARIANT
14837 #define THUMB_VARIANT &arm_ext_v6
14838 TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
14839 TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
14840 tCE(rev, 6bf0f30, rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
14841 tCE(rev16, 6bf0fb0, rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
14842 tCE(revsh, 6ff0fb0, revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
14843 tCE(sxth, 6bf0070, sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14844 tCE(uxth, 6ff0070, uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14845 tCE(sxtb, 6af0070, sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14846 tCE(uxtb, 6ef0070, uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14847 TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend),
14848
14849 #undef THUMB_VARIANT
14850 #define THUMB_VARIANT &arm_ext_v6t2
14851 TCE(ldrex, 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex),
14852 TUF(mcrr2, c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14853 TUF(mrrc2, c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14854
14855 TCE(ssat, 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
14856 TCE(usat, 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
14857
14858 /* ARM V6 not included in V7M (eg. integer SIMD). */
14859 #undef THUMB_VARIANT
14860 #define THUMB_VARIANT &arm_ext_v6_notm
14861 TUF(cps, 1020000, f3af8100, 1, (I31b), imm0, t_cps),
14862 TCE(pkhbt, 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
14863 TCE(pkhtb, 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
14864 TCE(qadd16, 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14865 TCE(qadd8, 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14866 TCE(qaddsubx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14867 TCE(qsub16, 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14868 TCE(qsub8, 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14869 TCE(qsubaddx, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14870 TCE(sadd16, 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14871 TCE(sadd8, 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14872 TCE(saddsubx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14873 TCE(shadd16, 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14874 TCE(shadd8, 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14875 TCE(shaddsubx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14876 TCE(shsub16, 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14877 TCE(shsub8, 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14878 TCE(shsubaddx, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14879 TCE(ssub16, 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14880 TCE(ssub8, 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14881 TCE(ssubaddx, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14882 TCE(uadd16, 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14883 TCE(uadd8, 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14884 TCE(uaddsubx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14885 TCE(uhadd16, 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14886 TCE(uhadd8, 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14887 TCE(uhaddsubx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14888 TCE(uhsub16, 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14889 TCE(uhsub8, 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14890 TCE(uhsubaddx, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14891 TCE(uqadd16, 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14892 TCE(uqadd8, 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14893 TCE(uqaddsubx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14894 TCE(uqsub16, 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14895 TCE(uqsub8, 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14896 TCE(uqsubaddx, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14897 TCE(usub16, 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14898 TCE(usub8, 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14899 TCE(usubaddx, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14900 TUF(rfeia, 8900a00, e990c000, 1, (RRw), rfe, rfe),
14901 UF(rfeib, 9900a00, 1, (RRw), rfe),
14902 UF(rfeda, 8100a00, 1, (RRw), rfe),
14903 TUF(rfedb, 9100a00, e810c000, 1, (RRw), rfe, rfe),
14904 TUF(rfefd, 8900a00, e990c000, 1, (RRw), rfe, rfe),
14905 UF(rfefa, 9900a00, 1, (RRw), rfe),
14906 UF(rfeea, 8100a00, 1, (RRw), rfe),
14907 TUF(rfeed, 9100a00, e810c000, 1, (RRw), rfe, rfe),
14908 TCE(sxtah, 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14909 TCE(sxtab16, 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14910 TCE(sxtab, 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14911 TCE(sxtb16, 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14912 TCE(uxtah, 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14913 TCE(uxtab16, 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14914 TCE(uxtab, 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14915 TCE(uxtb16, 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14916 TCE(sel, 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14917 TCE(smlad, 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14918 TCE(smladx, 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14919 TCE(smlald, 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
14920 TCE(smlaldx, 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
14921 TCE(smlsd, 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14922 TCE(smlsdx, 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14923 TCE(smlsld, 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
14924 TCE(smlsldx, 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
14925 TCE(smmla, 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14926 TCE(smmlar, 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14927 TCE(smmls, 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14928 TCE(smmlsr, 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14929 TCE(smmul, 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14930 TCE(smmulr, 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14931 TCE(smuad, 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14932 TCE(smuadx, 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14933 TCE(smusd, 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14934 TCE(smusdx, 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14935 TUF(srsia, 8cd0500, e980c000, 1, (I31w), srs, srs),
14936 UF(srsib, 9cd0500, 1, (I31w), srs),
14937 UF(srsda, 84d0500, 1, (I31w), srs),
14938 TUF(srsdb, 94d0500, e800c000, 1, (I31w), srs, srs),
14939 TCE(ssat16, 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
14940 TCE(strex, 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex),
14941 TCE(umaal, 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
14942 TCE(usad8, 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14943 TCE(usada8, 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14944 TCE(usat16, 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
14945
14946 #undef ARM_VARIANT
14947 #define ARM_VARIANT &arm_ext_v6k
14948 #undef THUMB_VARIANT
14949 #define THUMB_VARIANT &arm_ext_v6k
14950 tCE(yield, 320f001, yield, 0, (), noargs, t_hint),
14951 tCE(wfe, 320f002, wfe, 0, (), noargs, t_hint),
14952 tCE(wfi, 320f003, wfi, 0, (), noargs, t_hint),
14953 tCE(sev, 320f004, sev, 0, (), noargs, t_hint),
14954
14955 #undef THUMB_VARIANT
14956 #define THUMB_VARIANT &arm_ext_v6_notm
14957 TCE(ldrexd, 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd),
14958 TCE(strexd, 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd),
14959
14960 #undef THUMB_VARIANT
14961 #define THUMB_VARIANT &arm_ext_v6t2
14962 TCE(ldrexb, 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
14963 TCE(ldrexh, 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
14964 TCE(strexb, 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
14965 TCE(strexh, 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
14966 TUF(clrex, 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
14967
14968 #undef ARM_VARIANT
14969 #define ARM_VARIANT &arm_ext_v6z
14970 TCE(smc, 1600070, f7f08000, 1, (EXPi), smc, t_smc),
14971
14972 #undef ARM_VARIANT
14973 #define ARM_VARIANT &arm_ext_v6t2
14974 TCE(bfc, 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
14975 TCE(bfi, 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
14976 TCE(sbfx, 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
14977 TCE(ubfx, 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
14978
14979 TCE(mls, 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
14980 TCE(movw, 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
14981 TCE(movt, 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
14982 TCE(rbit, 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
14983
14984 TC3(ldrht, 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
14985 TC3(ldrsht, 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
14986 TC3(ldrsbt, 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
14987 TC3(strht, 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
14988
14989 UT(cbnz, b900, 2, (RR, EXP), t_cbz),
14990 UT(cbz, b100, 2, (RR, EXP), t_cbz),
14991 /* ARM does not really have an IT instruction, so always allow it. */
14992 #undef ARM_VARIANT
14993 #define ARM_VARIANT &arm_ext_v1
14994 TUE(it, 0, bf08, 1, (COND), it, t_it),
14995 TUE(itt, 0, bf0c, 1, (COND), it, t_it),
14996 TUE(ite, 0, bf04, 1, (COND), it, t_it),
14997 TUE(ittt, 0, bf0e, 1, (COND), it, t_it),
14998 TUE(itet, 0, bf06, 1, (COND), it, t_it),
14999 TUE(itte, 0, bf0a, 1, (COND), it, t_it),
15000 TUE(itee, 0, bf02, 1, (COND), it, t_it),
15001 TUE(itttt, 0, bf0f, 1, (COND), it, t_it),
15002 TUE(itett, 0, bf07, 1, (COND), it, t_it),
15003 TUE(ittet, 0, bf0b, 1, (COND), it, t_it),
15004 TUE(iteet, 0, bf03, 1, (COND), it, t_it),
15005 TUE(ittte, 0, bf0d, 1, (COND), it, t_it),
15006 TUE(itete, 0, bf05, 1, (COND), it, t_it),
15007 TUE(ittee, 0, bf09, 1, (COND), it, t_it),
15008 TUE(iteee, 0, bf01, 1, (COND), it, t_it),
15009
15010 /* Thumb2 only instructions. */
15011 #undef ARM_VARIANT
15012 #define ARM_VARIANT NULL
15013
15014 TCE(addw, 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
15015 TCE(subw, 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
15016 TCE(tbb, 0, e8d0f000, 1, (TB), 0, t_tb),
15017 TCE(tbh, 0, e8d0f010, 1, (TB), 0, t_tb),
15018
15019 /* Thumb-2 hardware division instructions (R and M profiles only). */
15020 #undef THUMB_VARIANT
15021 #define THUMB_VARIANT &arm_ext_div
15022 TCE(sdiv, 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div),
15023 TCE(udiv, 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div),
15024
15025 /* ARM V7 instructions. */
15026 #undef ARM_VARIANT
15027 #define ARM_VARIANT &arm_ext_v7
15028 #undef THUMB_VARIANT
15029 #define THUMB_VARIANT &arm_ext_v7
15030 TUF(pli, 450f000, f910f000, 1, (ADDR), pli, t_pld),
15031 TCE(dbg, 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
15032 TUF(dmb, 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier),
15033 TUF(dsb, 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier),
15034 TUF(isb, 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier),
15035
15036 #undef ARM_VARIANT
15037 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
15038 cCE(wfs, e200110, 1, (RR), rd),
15039 cCE(rfs, e300110, 1, (RR), rd),
15040 cCE(wfc, e400110, 1, (RR), rd),
15041 cCE(rfc, e500110, 1, (RR), rd),
15042
15043 cCL(ldfs, c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
15044 cCL(ldfd, c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
15045 cCL(ldfe, c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
15046 cCL(ldfp, c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
15047
15048 cCL(stfs, c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
15049 cCL(stfd, c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
15050 cCL(stfe, c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
15051 cCL(stfp, c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
15052
15053 cCL(mvfs, e008100, 2, (RF, RF_IF), rd_rm),
15054 cCL(mvfsp, e008120, 2, (RF, RF_IF), rd_rm),
15055 cCL(mvfsm, e008140, 2, (RF, RF_IF), rd_rm),
15056 cCL(mvfsz, e008160, 2, (RF, RF_IF), rd_rm),
15057 cCL(mvfd, e008180, 2, (RF, RF_IF), rd_rm),
15058 cCL(mvfdp, e0081a0, 2, (RF, RF_IF), rd_rm),
15059 cCL(mvfdm, e0081c0, 2, (RF, RF_IF), rd_rm),
15060 cCL(mvfdz, e0081e0, 2, (RF, RF_IF), rd_rm),
15061 cCL(mvfe, e088100, 2, (RF, RF_IF), rd_rm),
15062 cCL(mvfep, e088120, 2, (RF, RF_IF), rd_rm),
15063 cCL(mvfem, e088140, 2, (RF, RF_IF), rd_rm),
15064 cCL(mvfez, e088160, 2, (RF, RF_IF), rd_rm),
15065
15066 cCL(mnfs, e108100, 2, (RF, RF_IF), rd_rm),
15067 cCL(mnfsp, e108120, 2, (RF, RF_IF), rd_rm),
15068 cCL(mnfsm, e108140, 2, (RF, RF_IF), rd_rm),
15069 cCL(mnfsz, e108160, 2, (RF, RF_IF), rd_rm),
15070 cCL(mnfd, e108180, 2, (RF, RF_IF), rd_rm),
15071 cCL(mnfdp, e1081a0, 2, (RF, RF_IF), rd_rm),
15072 cCL(mnfdm, e1081c0, 2, (RF, RF_IF), rd_rm),
15073 cCL(mnfdz, e1081e0, 2, (RF, RF_IF), rd_rm),
15074 cCL(mnfe, e188100, 2, (RF, RF_IF), rd_rm),
15075 cCL(mnfep, e188120, 2, (RF, RF_IF), rd_rm),
15076 cCL(mnfem, e188140, 2, (RF, RF_IF), rd_rm),
15077 cCL(mnfez, e188160, 2, (RF, RF_IF), rd_rm),
15078
15079 cCL(abss, e208100, 2, (RF, RF_IF), rd_rm),
15080 cCL(abssp, e208120, 2, (RF, RF_IF), rd_rm),
15081 cCL(abssm, e208140, 2, (RF, RF_IF), rd_rm),
15082 cCL(abssz, e208160, 2, (RF, RF_IF), rd_rm),
15083 cCL(absd, e208180, 2, (RF, RF_IF), rd_rm),
15084 cCL(absdp, e2081a0, 2, (RF, RF_IF), rd_rm),
15085 cCL(absdm, e2081c0, 2, (RF, RF_IF), rd_rm),
15086 cCL(absdz, e2081e0, 2, (RF, RF_IF), rd_rm),
15087 cCL(abse, e288100, 2, (RF, RF_IF), rd_rm),
15088 cCL(absep, e288120, 2, (RF, RF_IF), rd_rm),
15089 cCL(absem, e288140, 2, (RF, RF_IF), rd_rm),
15090 cCL(absez, e288160, 2, (RF, RF_IF), rd_rm),
15091
15092 cCL(rnds, e308100, 2, (RF, RF_IF), rd_rm),
15093 cCL(rndsp, e308120, 2, (RF, RF_IF), rd_rm),
15094 cCL(rndsm, e308140, 2, (RF, RF_IF), rd_rm),
15095 cCL(rndsz, e308160, 2, (RF, RF_IF), rd_rm),
15096 cCL(rndd, e308180, 2, (RF, RF_IF), rd_rm),
15097 cCL(rnddp, e3081a0, 2, (RF, RF_IF), rd_rm),
15098 cCL(rnddm, e3081c0, 2, (RF, RF_IF), rd_rm),
15099 cCL(rnddz, e3081e0, 2, (RF, RF_IF), rd_rm),
15100 cCL(rnde, e388100, 2, (RF, RF_IF), rd_rm),
15101 cCL(rndep, e388120, 2, (RF, RF_IF), rd_rm),
15102 cCL(rndem, e388140, 2, (RF, RF_IF), rd_rm),
15103 cCL(rndez, e388160, 2, (RF, RF_IF), rd_rm),
15104
15105 cCL(sqts, e408100, 2, (RF, RF_IF), rd_rm),
15106 cCL(sqtsp, e408120, 2, (RF, RF_IF), rd_rm),
15107 cCL(sqtsm, e408140, 2, (RF, RF_IF), rd_rm),
15108 cCL(sqtsz, e408160, 2, (RF, RF_IF), rd_rm),
15109 cCL(sqtd, e408180, 2, (RF, RF_IF), rd_rm),
15110 cCL(sqtdp, e4081a0, 2, (RF, RF_IF), rd_rm),
15111 cCL(sqtdm, e4081c0, 2, (RF, RF_IF), rd_rm),
15112 cCL(sqtdz, e4081e0, 2, (RF, RF_IF), rd_rm),
15113 cCL(sqte, e488100, 2, (RF, RF_IF), rd_rm),
15114 cCL(sqtep, e488120, 2, (RF, RF_IF), rd_rm),
15115 cCL(sqtem, e488140, 2, (RF, RF_IF), rd_rm),
15116 cCL(sqtez, e488160, 2, (RF, RF_IF), rd_rm),
15117
15118 cCL(logs, e508100, 2, (RF, RF_IF), rd_rm),
15119 cCL(logsp, e508120, 2, (RF, RF_IF), rd_rm),
15120 cCL(logsm, e508140, 2, (RF, RF_IF), rd_rm),
15121 cCL(logsz, e508160, 2, (RF, RF_IF), rd_rm),
15122 cCL(logd, e508180, 2, (RF, RF_IF), rd_rm),
15123 cCL(logdp, e5081a0, 2, (RF, RF_IF), rd_rm),
15124 cCL(logdm, e5081c0, 2, (RF, RF_IF), rd_rm),
15125 cCL(logdz, e5081e0, 2, (RF, RF_IF), rd_rm),
15126 cCL(loge, e588100, 2, (RF, RF_IF), rd_rm),
15127 cCL(logep, e588120, 2, (RF, RF_IF), rd_rm),
15128 cCL(logem, e588140, 2, (RF, RF_IF), rd_rm),
15129 cCL(logez, e588160, 2, (RF, RF_IF), rd_rm),
15130
15131 cCL(lgns, e608100, 2, (RF, RF_IF), rd_rm),
15132 cCL(lgnsp, e608120, 2, (RF, RF_IF), rd_rm),
15133 cCL(lgnsm, e608140, 2, (RF, RF_IF), rd_rm),
15134 cCL(lgnsz, e608160, 2, (RF, RF_IF), rd_rm),
15135 cCL(lgnd, e608180, 2, (RF, RF_IF), rd_rm),
15136 cCL(lgndp, e6081a0, 2, (RF, RF_IF), rd_rm),
15137 cCL(lgndm, e6081c0, 2, (RF, RF_IF), rd_rm),
15138 cCL(lgndz, e6081e0, 2, (RF, RF_IF), rd_rm),
15139 cCL(lgne, e688100, 2, (RF, RF_IF), rd_rm),
15140 cCL(lgnep, e688120, 2, (RF, RF_IF), rd_rm),
15141 cCL(lgnem, e688140, 2, (RF, RF_IF), rd_rm),
15142 cCL(lgnez, e688160, 2, (RF, RF_IF), rd_rm),
15143
15144 cCL(exps, e708100, 2, (RF, RF_IF), rd_rm),
15145 cCL(expsp, e708120, 2, (RF, RF_IF), rd_rm),
15146 cCL(expsm, e708140, 2, (RF, RF_IF), rd_rm),
15147 cCL(expsz, e708160, 2, (RF, RF_IF), rd_rm),
15148 cCL(expd, e708180, 2, (RF, RF_IF), rd_rm),
15149 cCL(expdp, e7081a0, 2, (RF, RF_IF), rd_rm),
15150 cCL(expdm, e7081c0, 2, (RF, RF_IF), rd_rm),
15151 cCL(expdz, e7081e0, 2, (RF, RF_IF), rd_rm),
15152 cCL(expe, e788100, 2, (RF, RF_IF), rd_rm),
15153 cCL(expep, e788120, 2, (RF, RF_IF), rd_rm),
15154 cCL(expem, e788140, 2, (RF, RF_IF), rd_rm),
15155 cCL(expdz, e788160, 2, (RF, RF_IF), rd_rm),
15156
15157 cCL(sins, e808100, 2, (RF, RF_IF), rd_rm),
15158 cCL(sinsp, e808120, 2, (RF, RF_IF), rd_rm),
15159 cCL(sinsm, e808140, 2, (RF, RF_IF), rd_rm),
15160 cCL(sinsz, e808160, 2, (RF, RF_IF), rd_rm),
15161 cCL(sind, e808180, 2, (RF, RF_IF), rd_rm),
15162 cCL(sindp, e8081a0, 2, (RF, RF_IF), rd_rm),
15163 cCL(sindm, e8081c0, 2, (RF, RF_IF), rd_rm),
15164 cCL(sindz, e8081e0, 2, (RF, RF_IF), rd_rm),
15165 cCL(sine, e888100, 2, (RF, RF_IF), rd_rm),
15166 cCL(sinep, e888120, 2, (RF, RF_IF), rd_rm),
15167 cCL(sinem, e888140, 2, (RF, RF_IF), rd_rm),
15168 cCL(sinez, e888160, 2, (RF, RF_IF), rd_rm),
15169
15170 cCL(coss, e908100, 2, (RF, RF_IF), rd_rm),
15171 cCL(cossp, e908120, 2, (RF, RF_IF), rd_rm),
15172 cCL(cossm, e908140, 2, (RF, RF_IF), rd_rm),
15173 cCL(cossz, e908160, 2, (RF, RF_IF), rd_rm),
15174 cCL(cosd, e908180, 2, (RF, RF_IF), rd_rm),
15175 cCL(cosdp, e9081a0, 2, (RF, RF_IF), rd_rm),
15176 cCL(cosdm, e9081c0, 2, (RF, RF_IF), rd_rm),
15177 cCL(cosdz, e9081e0, 2, (RF, RF_IF), rd_rm),
15178 cCL(cose, e988100, 2, (RF, RF_IF), rd_rm),
15179 cCL(cosep, e988120, 2, (RF, RF_IF), rd_rm),
15180 cCL(cosem, e988140, 2, (RF, RF_IF), rd_rm),
15181 cCL(cosez, e988160, 2, (RF, RF_IF), rd_rm),
15182
15183 cCL(tans, ea08100, 2, (RF, RF_IF), rd_rm),
15184 cCL(tansp, ea08120, 2, (RF, RF_IF), rd_rm),
15185 cCL(tansm, ea08140, 2, (RF, RF_IF), rd_rm),
15186 cCL(tansz, ea08160, 2, (RF, RF_IF), rd_rm),
15187 cCL(tand, ea08180, 2, (RF, RF_IF), rd_rm),
15188 cCL(tandp, ea081a0, 2, (RF, RF_IF), rd_rm),
15189 cCL(tandm, ea081c0, 2, (RF, RF_IF), rd_rm),
15190 cCL(tandz, ea081e0, 2, (RF, RF_IF), rd_rm),
15191 cCL(tane, ea88100, 2, (RF, RF_IF), rd_rm),
15192 cCL(tanep, ea88120, 2, (RF, RF_IF), rd_rm),
15193 cCL(tanem, ea88140, 2, (RF, RF_IF), rd_rm),
15194 cCL(tanez, ea88160, 2, (RF, RF_IF), rd_rm),
15195
15196 cCL(asns, eb08100, 2, (RF, RF_IF), rd_rm),
15197 cCL(asnsp, eb08120, 2, (RF, RF_IF), rd_rm),
15198 cCL(asnsm, eb08140, 2, (RF, RF_IF), rd_rm),
15199 cCL(asnsz, eb08160, 2, (RF, RF_IF), rd_rm),
15200 cCL(asnd, eb08180, 2, (RF, RF_IF), rd_rm),
15201 cCL(asndp, eb081a0, 2, (RF, RF_IF), rd_rm),
15202 cCL(asndm, eb081c0, 2, (RF, RF_IF), rd_rm),
15203 cCL(asndz, eb081e0, 2, (RF, RF_IF), rd_rm),
15204 cCL(asne, eb88100, 2, (RF, RF_IF), rd_rm),
15205 cCL(asnep, eb88120, 2, (RF, RF_IF), rd_rm),
15206 cCL(asnem, eb88140, 2, (RF, RF_IF), rd_rm),
15207 cCL(asnez, eb88160, 2, (RF, RF_IF), rd_rm),
15208
15209 cCL(acss, ec08100, 2, (RF, RF_IF), rd_rm),
15210 cCL(acssp, ec08120, 2, (RF, RF_IF), rd_rm),
15211 cCL(acssm, ec08140, 2, (RF, RF_IF), rd_rm),
15212 cCL(acssz, ec08160, 2, (RF, RF_IF), rd_rm),
15213 cCL(acsd, ec08180, 2, (RF, RF_IF), rd_rm),
15214 cCL(acsdp, ec081a0, 2, (RF, RF_IF), rd_rm),
15215 cCL(acsdm, ec081c0, 2, (RF, RF_IF), rd_rm),
15216 cCL(acsdz, ec081e0, 2, (RF, RF_IF), rd_rm),
15217 cCL(acse, ec88100, 2, (RF, RF_IF), rd_rm),
15218 cCL(acsep, ec88120, 2, (RF, RF_IF), rd_rm),
15219 cCL(acsem, ec88140, 2, (RF, RF_IF), rd_rm),
15220 cCL(acsez, ec88160, 2, (RF, RF_IF), rd_rm),
15221
15222 cCL(atns, ed08100, 2, (RF, RF_IF), rd_rm),
15223 cCL(atnsp, ed08120, 2, (RF, RF_IF), rd_rm),
15224 cCL(atnsm, ed08140, 2, (RF, RF_IF), rd_rm),
15225 cCL(atnsz, ed08160, 2, (RF, RF_IF), rd_rm),
15226 cCL(atnd, ed08180, 2, (RF, RF_IF), rd_rm),
15227 cCL(atndp, ed081a0, 2, (RF, RF_IF), rd_rm),
15228 cCL(atndm, ed081c0, 2, (RF, RF_IF), rd_rm),
15229 cCL(atndz, ed081e0, 2, (RF, RF_IF), rd_rm),
15230 cCL(atne, ed88100, 2, (RF, RF_IF), rd_rm),
15231 cCL(atnep, ed88120, 2, (RF, RF_IF), rd_rm),
15232 cCL(atnem, ed88140, 2, (RF, RF_IF), rd_rm),
15233 cCL(atnez, ed88160, 2, (RF, RF_IF), rd_rm),
15234
15235 cCL(urds, ee08100, 2, (RF, RF_IF), rd_rm),
15236 cCL(urdsp, ee08120, 2, (RF, RF_IF), rd_rm),
15237 cCL(urdsm, ee08140, 2, (RF, RF_IF), rd_rm),
15238 cCL(urdsz, ee08160, 2, (RF, RF_IF), rd_rm),
15239 cCL(urdd, ee08180, 2, (RF, RF_IF), rd_rm),
15240 cCL(urddp, ee081a0, 2, (RF, RF_IF), rd_rm),
15241 cCL(urddm, ee081c0, 2, (RF, RF_IF), rd_rm),
15242 cCL(urddz, ee081e0, 2, (RF, RF_IF), rd_rm),
15243 cCL(urde, ee88100, 2, (RF, RF_IF), rd_rm),
15244 cCL(urdep, ee88120, 2, (RF, RF_IF), rd_rm),
15245 cCL(urdem, ee88140, 2, (RF, RF_IF), rd_rm),
15246 cCL(urdez, ee88160, 2, (RF, RF_IF), rd_rm),
15247
15248 cCL(nrms, ef08100, 2, (RF, RF_IF), rd_rm),
15249 cCL(nrmsp, ef08120, 2, (RF, RF_IF), rd_rm),
15250 cCL(nrmsm, ef08140, 2, (RF, RF_IF), rd_rm),
15251 cCL(nrmsz, ef08160, 2, (RF, RF_IF), rd_rm),
15252 cCL(nrmd, ef08180, 2, (RF, RF_IF), rd_rm),
15253 cCL(nrmdp, ef081a0, 2, (RF, RF_IF), rd_rm),
15254 cCL(nrmdm, ef081c0, 2, (RF, RF_IF), rd_rm),
15255 cCL(nrmdz, ef081e0, 2, (RF, RF_IF), rd_rm),
15256 cCL(nrme, ef88100, 2, (RF, RF_IF), rd_rm),
15257 cCL(nrmep, ef88120, 2, (RF, RF_IF), rd_rm),
15258 cCL(nrmem, ef88140, 2, (RF, RF_IF), rd_rm),
15259 cCL(nrmez, ef88160, 2, (RF, RF_IF), rd_rm),
15260
15261 cCL(adfs, e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
15262 cCL(adfsp, e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
15263 cCL(adfsm, e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
15264 cCL(adfsz, e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
15265 cCL(adfd, e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
15266 cCL(adfdp, e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15267 cCL(adfdm, e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15268 cCL(adfdz, e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15269 cCL(adfe, e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
15270 cCL(adfep, e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
15271 cCL(adfem, e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
15272 cCL(adfez, e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
15273
15274 cCL(sufs, e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
15275 cCL(sufsp, e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
15276 cCL(sufsm, e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
15277 cCL(sufsz, e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
15278 cCL(sufd, e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
15279 cCL(sufdp, e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15280 cCL(sufdm, e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15281 cCL(sufdz, e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15282 cCL(sufe, e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
15283 cCL(sufep, e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
15284 cCL(sufem, e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
15285 cCL(sufez, e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
15286
15287 cCL(rsfs, e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
15288 cCL(rsfsp, e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
15289 cCL(rsfsm, e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
15290 cCL(rsfsz, e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
15291 cCL(rsfd, e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
15292 cCL(rsfdp, e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15293 cCL(rsfdm, e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15294 cCL(rsfdz, e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15295 cCL(rsfe, e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
15296 cCL(rsfep, e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
15297 cCL(rsfem, e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
15298 cCL(rsfez, e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
15299
15300 cCL(mufs, e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
15301 cCL(mufsp, e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
15302 cCL(mufsm, e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
15303 cCL(mufsz, e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
15304 cCL(mufd, e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
15305 cCL(mufdp, e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15306 cCL(mufdm, e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15307 cCL(mufdz, e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15308 cCL(mufe, e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
15309 cCL(mufep, e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
15310 cCL(mufem, e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
15311 cCL(mufez, e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
15312
15313 cCL(dvfs, e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
15314 cCL(dvfsp, e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
15315 cCL(dvfsm, e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
15316 cCL(dvfsz, e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
15317 cCL(dvfd, e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
15318 cCL(dvfdp, e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15319 cCL(dvfdm, e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15320 cCL(dvfdz, e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15321 cCL(dvfe, e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
15322 cCL(dvfep, e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
15323 cCL(dvfem, e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
15324 cCL(dvfez, e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
15325
15326 cCL(rdfs, e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
15327 cCL(rdfsp, e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
15328 cCL(rdfsm, e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
15329 cCL(rdfsz, e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
15330 cCL(rdfd, e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
15331 cCL(rdfdp, e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15332 cCL(rdfdm, e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15333 cCL(rdfdz, e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15334 cCL(rdfe, e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
15335 cCL(rdfep, e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
15336 cCL(rdfem, e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
15337 cCL(rdfez, e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
15338
15339 cCL(pows, e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
15340 cCL(powsp, e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
15341 cCL(powsm, e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
15342 cCL(powsz, e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
15343 cCL(powd, e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
15344 cCL(powdp, e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15345 cCL(powdm, e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15346 cCL(powdz, e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15347 cCL(powe, e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
15348 cCL(powep, e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
15349 cCL(powem, e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
15350 cCL(powez, e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
15351
15352 cCL(rpws, e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
15353 cCL(rpwsp, e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
15354 cCL(rpwsm, e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
15355 cCL(rpwsz, e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
15356 cCL(rpwd, e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
15357 cCL(rpwdp, e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15358 cCL(rpwdm, e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15359 cCL(rpwdz, e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15360 cCL(rpwe, e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
15361 cCL(rpwep, e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
15362 cCL(rpwem, e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
15363 cCL(rpwez, e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
15364
15365 cCL(rmfs, e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
15366 cCL(rmfsp, e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
15367 cCL(rmfsm, e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
15368 cCL(rmfsz, e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
15369 cCL(rmfd, e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
15370 cCL(rmfdp, e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15371 cCL(rmfdm, e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15372 cCL(rmfdz, e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15373 cCL(rmfe, e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
15374 cCL(rmfep, e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
15375 cCL(rmfem, e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
15376 cCL(rmfez, e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
15377
15378 cCL(fmls, e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
15379 cCL(fmlsp, e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
15380 cCL(fmlsm, e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
15381 cCL(fmlsz, e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
15382 cCL(fmld, e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
15383 cCL(fmldp, e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15384 cCL(fmldm, e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15385 cCL(fmldz, e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15386 cCL(fmle, e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
15387 cCL(fmlep, e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
15388 cCL(fmlem, e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
15389 cCL(fmlez, e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
15390
15391 cCL(fdvs, ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15392 cCL(fdvsp, ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15393 cCL(fdvsm, ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15394 cCL(fdvsz, ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15395 cCL(fdvd, ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15396 cCL(fdvdp, ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15397 cCL(fdvdm, ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15398 cCL(fdvdz, ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15399 cCL(fdve, ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15400 cCL(fdvep, ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15401 cCL(fdvem, ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15402 cCL(fdvez, ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15403
15404 cCL(frds, eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15405 cCL(frdsp, eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15406 cCL(frdsm, eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15407 cCL(frdsz, eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15408 cCL(frdd, eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15409 cCL(frddp, eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15410 cCL(frddm, eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15411 cCL(frddz, eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15412 cCL(frde, eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15413 cCL(frdep, eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15414 cCL(frdem, eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15415 cCL(frdez, eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15416
15417 cCL(pols, ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15418 cCL(polsp, ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15419 cCL(polsm, ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15420 cCL(polsz, ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15421 cCL(pold, ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15422 cCL(poldp, ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15423 cCL(poldm, ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15424 cCL(poldz, ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15425 cCL(pole, ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15426 cCL(polep, ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15427 cCL(polem, ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15428 cCL(polez, ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15429
15430 cCE(cmf, e90f110, 2, (RF, RF_IF), fpa_cmp),
15431 C3E(cmfe, ed0f110, 2, (RF, RF_IF), fpa_cmp),
15432 cCE(cnf, eb0f110, 2, (RF, RF_IF), fpa_cmp),
15433 C3E(cnfe, ef0f110, 2, (RF, RF_IF), fpa_cmp),
15434
15435 cCL(flts, e000110, 2, (RF, RR), rn_rd),
15436 cCL(fltsp, e000130, 2, (RF, RR), rn_rd),
15437 cCL(fltsm, e000150, 2, (RF, RR), rn_rd),
15438 cCL(fltsz, e000170, 2, (RF, RR), rn_rd),
15439 cCL(fltd, e000190, 2, (RF, RR), rn_rd),
15440 cCL(fltdp, e0001b0, 2, (RF, RR), rn_rd),
15441 cCL(fltdm, e0001d0, 2, (RF, RR), rn_rd),
15442 cCL(fltdz, e0001f0, 2, (RF, RR), rn_rd),
15443 cCL(flte, e080110, 2, (RF, RR), rn_rd),
15444 cCL(fltep, e080130, 2, (RF, RR), rn_rd),
15445 cCL(fltem, e080150, 2, (RF, RR), rn_rd),
15446 cCL(fltez, e080170, 2, (RF, RR), rn_rd),
15447
15448 /* The implementation of the FIX instruction is broken on some
15449 assemblers, in that it accepts a precision specifier as well as a
15450 rounding specifier, despite the fact that this is meaningless.
15451 To be more compatible, we accept it as well, though of course it
15452 does not set any bits. */
15453 cCE(fix, e100110, 2, (RR, RF), rd_rm),
15454 cCL(fixp, e100130, 2, (RR, RF), rd_rm),
15455 cCL(fixm, e100150, 2, (RR, RF), rd_rm),
15456 cCL(fixz, e100170, 2, (RR, RF), rd_rm),
15457 cCL(fixsp, e100130, 2, (RR, RF), rd_rm),
15458 cCL(fixsm, e100150, 2, (RR, RF), rd_rm),
15459 cCL(fixsz, e100170, 2, (RR, RF), rd_rm),
15460 cCL(fixdp, e100130, 2, (RR, RF), rd_rm),
15461 cCL(fixdm, e100150, 2, (RR, RF), rd_rm),
15462 cCL(fixdz, e100170, 2, (RR, RF), rd_rm),
15463 cCL(fixep, e100130, 2, (RR, RF), rd_rm),
15464 cCL(fixem, e100150, 2, (RR, RF), rd_rm),
15465 cCL(fixez, e100170, 2, (RR, RF), rd_rm),
15466
15467 /* Instructions that were new with the real FPA, call them V2. */
15468 #undef ARM_VARIANT
15469 #define ARM_VARIANT &fpu_fpa_ext_v2
15470 cCE(lfm, c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15471 cCL(lfmfd, c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15472 cCL(lfmea, d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15473 cCE(sfm, c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15474 cCL(sfmfd, d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15475 cCL(sfmea, c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15476
15477 #undef ARM_VARIANT
15478 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
15479 /* Moves and type conversions. */
15480 cCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
15481 cCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
15482 cCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
15483 cCE(fmstat, ef1fa10, 0, (), noargs),
15484 cCE(fsitos, eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
15485 cCE(fuitos, eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
15486 cCE(ftosis, ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
15487 cCE(ftosizs, ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
15488 cCE(ftouis, ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
15489 cCE(ftouizs, ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
15490 cCE(fmrx, ef00a10, 2, (RR, RVC), rd_rn),
15491 cCE(fmxr, ee00a10, 2, (RVC, RR), rn_rd),
15492
15493 /* Memory operations. */
15494 cCE(flds, d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
15495 cCE(fsts, d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
15496 cCE(fldmias, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15497 cCE(fldmfds, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15498 cCE(fldmdbs, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15499 cCE(fldmeas, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15500 cCE(fldmiax, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15501 cCE(fldmfdx, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15502 cCE(fldmdbx, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15503 cCE(fldmeax, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15504 cCE(fstmias, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15505 cCE(fstmeas, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15506 cCE(fstmdbs, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15507 cCE(fstmfds, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15508 cCE(fstmiax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15509 cCE(fstmeax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15510 cCE(fstmdbx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15511 cCE(fstmfdx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15512
15513 /* Monadic operations. */
15514 cCE(fabss, eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
15515 cCE(fnegs, eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
15516 cCE(fsqrts, eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
15517
15518 /* Dyadic operations. */
15519 cCE(fadds, e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15520 cCE(fsubs, e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15521 cCE(fmuls, e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15522 cCE(fdivs, e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15523 cCE(fmacs, e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15524 cCE(fmscs, e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15525 cCE(fnmuls, e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15526 cCE(fnmacs, e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15527 cCE(fnmscs, e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15528
15529 /* Comparisons. */
15530 cCE(fcmps, eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
15531 cCE(fcmpzs, eb50a40, 1, (RVS), vfp_sp_compare_z),
15532 cCE(fcmpes, eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
15533 cCE(fcmpezs, eb50ac0, 1, (RVS), vfp_sp_compare_z),
15534
15535 #undef ARM_VARIANT
15536 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
15537 /* Moves and type conversions. */
15538 cCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15539 cCE(fcvtds, eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
15540 cCE(fcvtsd, eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15541 cCE(fmdhr, e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
15542 cCE(fmdlr, e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
15543 cCE(fmrdh, e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
15544 cCE(fmrdl, e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
15545 cCE(fsitod, eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
15546 cCE(fuitod, eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
15547 cCE(ftosid, ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
15548 cCE(ftosizd, ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15549 cCE(ftouid, ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
15550 cCE(ftouizd, ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15551
15552 /* Memory operations. */
15553 cCE(fldd, d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
15554 cCE(fstd, d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
15555 cCE(fldmiad, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15556 cCE(fldmfdd, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15557 cCE(fldmdbd, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15558 cCE(fldmead, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15559 cCE(fstmiad, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15560 cCE(fstmead, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15561 cCE(fstmdbd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15562 cCE(fstmfdd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15563
15564 /* Monadic operations. */
15565 cCE(fabsd, eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15566 cCE(fnegd, eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15567 cCE(fsqrtd, eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15568
15569 /* Dyadic operations. */
15570 cCE(faddd, e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15571 cCE(fsubd, e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15572 cCE(fmuld, e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15573 cCE(fdivd, e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15574 cCE(fmacd, e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15575 cCE(fmscd, e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15576 cCE(fnmuld, e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15577 cCE(fnmacd, e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15578 cCE(fnmscd, e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15579
15580 /* Comparisons. */
15581 cCE(fcmpd, eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15582 cCE(fcmpzd, eb50b40, 1, (RVD), vfp_dp_rd),
15583 cCE(fcmped, eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15584 cCE(fcmpezd, eb50bc0, 1, (RVD), vfp_dp_rd),
15585
15586 #undef ARM_VARIANT
15587 #define ARM_VARIANT &fpu_vfp_ext_v2
15588 cCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
15589 cCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
15590 cCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
15591 cCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
15592
15593 /* Instructions which may belong to either the Neon or VFP instruction sets.
15594 Individual encoder functions perform additional architecture checks. */
15595 #undef ARM_VARIANT
15596 #define ARM_VARIANT &fpu_vfp_ext_v1xd
15597 #undef THUMB_VARIANT
15598 #define THUMB_VARIANT &fpu_vfp_ext_v1xd
15599 /* These mnemonics are unique to VFP. */
15600 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
15601 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
15602 nCE(vnmul, vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15603 nCE(vnmla, vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15604 nCE(vnmls, vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15605 nCE(vcmp, vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
15606 nCE(vcmpe, vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
15607 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
15608 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
15609 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
15610
15611 /* Mnemonics shared by Neon and VFP. */
15612 nCEF(vmul, vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
15613 nCEF(vmla, vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
15614 nCEF(vmls, vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
15615
15616 nCEF(vadd, vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
15617 nCEF(vsub, vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
15618
15619 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
15620 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
15621
15622 NCE(vldm, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15623 NCE(vldmia, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15624 NCE(vldmdb, d100b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15625 NCE(vstm, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15626 NCE(vstmia, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15627 NCE(vstmdb, d000b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15628 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
15629 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
15630
15631 nCEF(vcvt, vcvt, 3, (RNSDQ, RNSDQ, oI32b), neon_cvt),
15632
15633 /* NOTE: All VMOV encoding is special-cased! */
15634 NCE(vmov, 0, 1, (VMOV), neon_mov),
15635 NCE(vmovq, 0, 1, (VMOV), neon_mov),
15636
15637 #undef THUMB_VARIANT
15638 #define THUMB_VARIANT &fpu_neon_ext_v1
15639 #undef ARM_VARIANT
15640 #define ARM_VARIANT &fpu_neon_ext_v1
15641 /* Data processing with three registers of the same length. */
15642 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
15643 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
15644 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
15645 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15646 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15647 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15648 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15649 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15650 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15651 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
15652 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15653 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15654 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15655 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15656 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15657 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15658 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15659 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15660 /* If not immediate, fall back to neon_dyadic_i64_su.
15661 shl_imm should accept I8 I16 I32 I64,
15662 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
15663 nUF(vshl, vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
15664 nUF(vshlq, vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
15665 nUF(vqshl, vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
15666 nUF(vqshlq, vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
15667 /* Logic ops, types optional & ignored. */
15668 nUF(vand, vand, 2, (RNDQ, NILO), neon_logic),
15669 nUF(vandq, vand, 2, (RNQ, NILO), neon_logic),
15670 nUF(vbic, vbic, 2, (RNDQ, NILO), neon_logic),
15671 nUF(vbicq, vbic, 2, (RNQ, NILO), neon_logic),
15672 nUF(vorr, vorr, 2, (RNDQ, NILO), neon_logic),
15673 nUF(vorrq, vorr, 2, (RNQ, NILO), neon_logic),
15674 nUF(vorn, vorn, 2, (RNDQ, NILO), neon_logic),
15675 nUF(vornq, vorn, 2, (RNQ, NILO), neon_logic),
15676 nUF(veor, veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
15677 nUF(veorq, veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
15678 /* Bitfield ops, untyped. */
15679 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15680 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15681 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15682 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15683 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15684 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15685 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
15686 nUF(vabd, vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15687 nUF(vabdq, vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15688 nUF(vmax, vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15689 nUF(vmaxq, vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15690 nUF(vmin, vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15691 nUF(vminq, vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15692 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
15693 back to neon_dyadic_if_su. */
15694 nUF(vcge, vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
15695 nUF(vcgeq, vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
15696 nUF(vcgt, vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
15697 nUF(vcgtq, vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
15698 nUF(vclt, vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
15699 nUF(vcltq, vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
15700 nUF(vcle, vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
15701 nUF(vcleq, vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
15702 /* Comparison. Type I8 I16 I32 F32. */
15703 nUF(vceq, vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
15704 nUF(vceqq, vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
15705 /* As above, D registers only. */
15706 nUF(vpmax, vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
15707 nUF(vpmin, vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
15708 /* Int and float variants, signedness unimportant. */
15709 nUF(vmlaq, vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
15710 nUF(vmlsq, vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
15711 nUF(vpadd, vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
15712 /* Add/sub take types I8 I16 I32 I64 F32. */
15713 nUF(vaddq, vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
15714 nUF(vsubq, vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
15715 /* vtst takes sizes 8, 16, 32. */
15716 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
15717 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
15718 /* VMUL takes I8 I16 I32 F32 P8. */
15719 nUF(vmulq, vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
15720 /* VQD{R}MULH takes S16 S32. */
15721 nUF(vqdmulh, vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
15722 nUF(vqdmulhq, vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
15723 nUF(vqrdmulh, vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
15724 nUF(vqrdmulhq, vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
15725 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
15726 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
15727 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
15728 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
15729 NUF(vaclt, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
15730 NUF(vacltq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
15731 NUF(vacle, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
15732 NUF(vacleq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
15733 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
15734 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
15735 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
15736 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
15737
15738 /* Two address, int/float. Types S8 S16 S32 F32. */
15739 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
15740 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
15741
15742 /* Data processing with two registers and a shift amount. */
15743 /* Right shifts, and variants with rounding.
15744 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
15745 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
15746 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
15747 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
15748 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
15749 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
15750 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
15751 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
15752 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
15753 /* Shift and insert. Sizes accepted 8 16 32 64. */
15754 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
15755 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
15756 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
15757 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
15758 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
15759 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
15760 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
15761 /* Right shift immediate, saturating & narrowing, with rounding variants.
15762 Types accepted S16 S32 S64 U16 U32 U64. */
15763 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
15764 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
15765 /* As above, unsigned. Types accepted S16 S32 S64. */
15766 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
15767 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
15768 /* Right shift narrowing. Types accepted I16 I32 I64. */
15769 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
15770 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
15771 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
15772 nUF(vshll, vshll, 3, (RNQ, RND, I32), neon_shll),
15773 /* CVT with optional immediate for fixed-point variant. */
15774 nUF(vcvtq, vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
15775
15776 nUF(vmvn, vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn),
15777 nUF(vmvnq, vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn),
15778
15779 /* Data processing, three registers of different lengths. */
15780 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
15781 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
15782 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
15783 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
15784 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
15785 /* If not scalar, fall back to neon_dyadic_long.
15786 Vector types as above, scalar types S16 S32 U16 U32. */
15787 nUF(vmlal, vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
15788 nUF(vmlsl, vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
15789 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
15790 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
15791 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
15792 /* Dyadic, narrowing insns. Types I16 I32 I64. */
15793 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15794 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15795 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15796 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15797 /* Saturating doubling multiplies. Types S16 S32. */
15798 nUF(vqdmlal, vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15799 nUF(vqdmlsl, vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15800 nUF(vqdmull, vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15801 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
15802 S16 S32 U16 U32. */
15803 nUF(vmull, vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
15804
15805 /* Extract. Size 8. */
15806 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I7), neon_ext),
15807 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I7), neon_ext),
15808
15809 /* Two registers, miscellaneous. */
15810 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
15811 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
15812 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
15813 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
15814 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
15815 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
15816 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
15817 /* Vector replicate. Sizes 8 16 32. */
15818 nCE(vdup, vdup, 2, (RNDQ, RR_RNSC), neon_dup),
15819 nCE(vdupq, vdup, 2, (RNQ, RR_RNSC), neon_dup),
15820 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
15821 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
15822 /* VMOVN. Types I16 I32 I64. */
15823 nUF(vmovn, vmovn, 2, (RND, RNQ), neon_movn),
15824 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
15825 nUF(vqmovn, vqmovn, 2, (RND, RNQ), neon_qmovn),
15826 /* VQMOVUN. Types S16 S32 S64. */
15827 nUF(vqmovun, vqmovun, 2, (RND, RNQ), neon_qmovun),
15828 /* VZIP / VUZP. Sizes 8 16 32. */
15829 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
15830 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
15831 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
15832 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
15833 /* VQABS / VQNEG. Types S8 S16 S32. */
15834 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
15835 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
15836 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
15837 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
15838 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
15839 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
15840 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
15841 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
15842 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
15843 /* Reciprocal estimates. Types U32 F32. */
15844 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
15845 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
15846 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
15847 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
15848 /* VCLS. Types S8 S16 S32. */
15849 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
15850 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
15851 /* VCLZ. Types I8 I16 I32. */
15852 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
15853 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
15854 /* VCNT. Size 8. */
15855 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
15856 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
15857 /* Two address, untyped. */
15858 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
15859 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
15860 /* VTRN. Sizes 8 16 32. */
15861 nUF(vtrn, vtrn, 2, (RNDQ, RNDQ), neon_trn),
15862 nUF(vtrnq, vtrn, 2, (RNQ, RNQ), neon_trn),
15863
15864 /* Table lookup. Size 8. */
15865 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
15866 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
15867
15868 #undef THUMB_VARIANT
15869 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
15870 #undef ARM_VARIANT
15871 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
15872 /* Neon element/structure load/store. */
15873 nUF(vld1, vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
15874 nUF(vst1, vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
15875 nUF(vld2, vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
15876 nUF(vst2, vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
15877 nUF(vld3, vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
15878 nUF(vst3, vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
15879 nUF(vld4, vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
15880 nUF(vst4, vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
15881
15882 #undef THUMB_VARIANT
15883 #define THUMB_VARIANT &fpu_vfp_ext_v3
15884 #undef ARM_VARIANT
15885 #define ARM_VARIANT &fpu_vfp_ext_v3
15886 cCE(fconsts, eb00a00, 2, (RVS, I255), vfp_sp_const),
15887 cCE(fconstd, eb00b00, 2, (RVD, I255), vfp_dp_const),
15888 cCE(fshtos, eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
15889 cCE(fshtod, eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
15890 cCE(fsltos, eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
15891 cCE(fsltod, eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
15892 cCE(fuhtos, ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
15893 cCE(fuhtod, ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
15894 cCE(fultos, ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
15895 cCE(fultod, ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
15896 cCE(ftoshs, ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
15897 cCE(ftoshd, ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
15898 cCE(ftosls, ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
15899 cCE(ftosld, ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
15900 cCE(ftouhs, ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
15901 cCE(ftouhd, ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
15902 cCE(ftouls, ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
15903 cCE(ftould, ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
15904
15905 #undef THUMB_VARIANT
15906 #undef ARM_VARIANT
15907 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
15908 cCE(mia, e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15909 cCE(miaph, e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15910 cCE(miabb, e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15911 cCE(miabt, e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15912 cCE(miatb, e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15913 cCE(miatt, e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15914 cCE(mar, c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
15915 cCE(mra, c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
15916
15917 #undef ARM_VARIANT
15918 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
15919 cCE(tandcb, e13f130, 1, (RR), iwmmxt_tandorc),
15920 cCE(tandch, e53f130, 1, (RR), iwmmxt_tandorc),
15921 cCE(tandcw, e93f130, 1, (RR), iwmmxt_tandorc),
15922 cCE(tbcstb, e400010, 2, (RIWR, RR), rn_rd),
15923 cCE(tbcsth, e400050, 2, (RIWR, RR), rn_rd),
15924 cCE(tbcstw, e400090, 2, (RIWR, RR), rn_rd),
15925 cCE(textrcb, e130170, 2, (RR, I7), iwmmxt_textrc),
15926 cCE(textrch, e530170, 2, (RR, I7), iwmmxt_textrc),
15927 cCE(textrcw, e930170, 2, (RR, I7), iwmmxt_textrc),
15928 cCE(textrmub, e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
15929 cCE(textrmuh, e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
15930 cCE(textrmuw, e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
15931 cCE(textrmsb, e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
15932 cCE(textrmsh, e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
15933 cCE(textrmsw, e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
15934 cCE(tinsrb, e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
15935 cCE(tinsrh, e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
15936 cCE(tinsrw, e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
15937 cCE(tmcr, e000110, 2, (RIWC_RIWG, RR), rn_rd),
15938 cCE(tmcrr, c400000, 3, (RIWR, RR, RR), rm_rd_rn),
15939 cCE(tmia, e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15940 cCE(tmiaph, e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15941 cCE(tmiabb, e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15942 cCE(tmiabt, e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15943 cCE(tmiatb, e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15944 cCE(tmiatt, e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15945 cCE(tmovmskb, e100030, 2, (RR, RIWR), rd_rn),
15946 cCE(tmovmskh, e500030, 2, (RR, RIWR), rd_rn),
15947 cCE(tmovmskw, e900030, 2, (RR, RIWR), rd_rn),
15948 cCE(tmrc, e100110, 2, (RR, RIWC_RIWG), rd_rn),
15949 cCE(tmrrc, c500000, 3, (RR, RR, RIWR), rd_rn_rm),
15950 cCE(torcb, e13f150, 1, (RR), iwmmxt_tandorc),
15951 cCE(torch, e53f150, 1, (RR), iwmmxt_tandorc),
15952 cCE(torcw, e93f150, 1, (RR), iwmmxt_tandorc),
15953 cCE(waccb, e0001c0, 2, (RIWR, RIWR), rd_rn),
15954 cCE(wacch, e4001c0, 2, (RIWR, RIWR), rd_rn),
15955 cCE(waccw, e8001c0, 2, (RIWR, RIWR), rd_rn),
15956 cCE(waddbss, e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15957 cCE(waddb, e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15958 cCE(waddbus, e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15959 cCE(waddhss, e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15960 cCE(waddh, e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15961 cCE(waddhus, e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15962 cCE(waddwss, eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15963 cCE(waddw, e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15964 cCE(waddwus, e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15965 cCE(waligni, e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
15966 cCE(walignr0, e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15967 cCE(walignr1, e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15968 cCE(walignr2, ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15969 cCE(walignr3, eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15970 cCE(wand, e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15971 cCE(wandn, e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15972 cCE(wavg2b, e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15973 cCE(wavg2br, e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15974 cCE(wavg2h, ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15975 cCE(wavg2hr, ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15976 cCE(wcmpeqb, e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15977 cCE(wcmpeqh, e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15978 cCE(wcmpeqw, e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15979 cCE(wcmpgtub, e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15980 cCE(wcmpgtuh, e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15981 cCE(wcmpgtuw, e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15982 cCE(wcmpgtsb, e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15983 cCE(wcmpgtsh, e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15984 cCE(wcmpgtsw, eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15985 cCE(wldrb, c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
15986 cCE(wldrh, c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
15987 cCE(wldrw, c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
15988 cCE(wldrd, c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
15989 cCE(wmacs, e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15990 cCE(wmacsz, e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15991 cCE(wmacu, e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15992 cCE(wmacuz, e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15993 cCE(wmadds, ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15994 cCE(wmaddu, e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15995 cCE(wmaxsb, e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15996 cCE(wmaxsh, e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15997 cCE(wmaxsw, ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15998 cCE(wmaxub, e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15999 cCE(wmaxuh, e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16000 cCE(wmaxuw, e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16001 cCE(wminsb, e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16002 cCE(wminsh, e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16003 cCE(wminsw, eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16004 cCE(wminub, e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16005 cCE(wminuh, e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16006 cCE(wminuw, e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16007 cCE(wmov, e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
16008 cCE(wmulsm, e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16009 cCE(wmulsl, e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16010 cCE(wmulum, e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16011 cCE(wmulul, e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16012 cCE(wor, e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16013 cCE(wpackhss, e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16014 cCE(wpackhus, e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16015 cCE(wpackwss, eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16016 cCE(wpackwus, e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16017 cCE(wpackdss, ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16018 cCE(wpackdus, ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16019 cCE(wrorh, e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16020 cCE(wrorhg, e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16021 cCE(wrorw, eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16022 cCE(wrorwg, eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16023 cCE(wrord, ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16024 cCE(wrordg, ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16025 cCE(wsadb, e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16026 cCE(wsadbz, e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16027 cCE(wsadh, e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16028 cCE(wsadhz, e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16029 cCE(wshufh, e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
16030 cCE(wsllh, e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16031 cCE(wsllhg, e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16032 cCE(wsllw, e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16033 cCE(wsllwg, e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16034 cCE(wslld, ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16035 cCE(wslldg, ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16036 cCE(wsrah, e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16037 cCE(wsrahg, e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16038 cCE(wsraw, e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16039 cCE(wsrawg, e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16040 cCE(wsrad, ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16041 cCE(wsradg, ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16042 cCE(wsrlh, e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16043 cCE(wsrlhg, e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16044 cCE(wsrlw, ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16045 cCE(wsrlwg, ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16046 cCE(wsrld, ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16047 cCE(wsrldg, ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16048 cCE(wstrb, c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16049 cCE(wstrh, c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16050 cCE(wstrw, c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
16051 cCE(wstrd, c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
16052 cCE(wsubbss, e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16053 cCE(wsubb, e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16054 cCE(wsubbus, e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16055 cCE(wsubhss, e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16056 cCE(wsubh, e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16057 cCE(wsubhus, e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16058 cCE(wsubwss, eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16059 cCE(wsubw, e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16060 cCE(wsubwus, e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16061 cCE(wunpckehub,e0000c0, 2, (RIWR, RIWR), rd_rn),
16062 cCE(wunpckehuh,e4000c0, 2, (RIWR, RIWR), rd_rn),
16063 cCE(wunpckehuw,e8000c0, 2, (RIWR, RIWR), rd_rn),
16064 cCE(wunpckehsb,e2000c0, 2, (RIWR, RIWR), rd_rn),
16065 cCE(wunpckehsh,e6000c0, 2, (RIWR, RIWR), rd_rn),
16066 cCE(wunpckehsw,ea000c0, 2, (RIWR, RIWR), rd_rn),
16067 cCE(wunpckihb, e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16068 cCE(wunpckihh, e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16069 cCE(wunpckihw, e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16070 cCE(wunpckelub,e0000e0, 2, (RIWR, RIWR), rd_rn),
16071 cCE(wunpckeluh,e4000e0, 2, (RIWR, RIWR), rd_rn),
16072 cCE(wunpckeluw,e8000e0, 2, (RIWR, RIWR), rd_rn),
16073 cCE(wunpckelsb,e2000e0, 2, (RIWR, RIWR), rd_rn),
16074 cCE(wunpckelsh,e6000e0, 2, (RIWR, RIWR), rd_rn),
16075 cCE(wunpckelsw,ea000e0, 2, (RIWR, RIWR), rd_rn),
16076 cCE(wunpckilb, e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16077 cCE(wunpckilh, e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16078 cCE(wunpckilw, e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16079 cCE(wxor, e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16080 cCE(wzero, e300000, 1, (RIWR), iwmmxt_wzero),
16081
16082 #undef ARM_VARIANT
16083 #define ARM_VARIANT &arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
16084 cCE(torvscb, e13f190, 1, (RR), iwmmxt_tandorc),
16085 cCE(torvsch, e53f190, 1, (RR), iwmmxt_tandorc),
16086 cCE(torvscw, e93f190, 1, (RR), iwmmxt_tandorc),
16087 cCE(wabsb, e2001c0, 2, (RIWR, RIWR), rd_rn),
16088 cCE(wabsh, e6001c0, 2, (RIWR, RIWR), rd_rn),
16089 cCE(wabsw, ea001c0, 2, (RIWR, RIWR), rd_rn),
16090 cCE(wabsdiffb, e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16091 cCE(wabsdiffh, e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16092 cCE(wabsdiffw, e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16093 cCE(waddbhusl, e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16094 cCE(waddbhusm, e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16095 cCE(waddhc, e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16096 cCE(waddwc, ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16097 cCE(waddsubhx, ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16098 cCE(wavg4, e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16099 cCE(wavg4r, e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16100 cCE(wmaddsn, ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16101 cCE(wmaddsx, eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16102 cCE(wmaddun, ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16103 cCE(wmaddux, e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16104 cCE(wmerge, e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
16105 cCE(wmiabb, e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16106 cCE(wmiabt, e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16107 cCE(wmiatb, e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16108 cCE(wmiatt, e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16109 cCE(wmiabbn, e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16110 cCE(wmiabtn, e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16111 cCE(wmiatbn, e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16112 cCE(wmiattn, e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16113 cCE(wmiawbb, e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16114 cCE(wmiawbt, e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16115 cCE(wmiawtb, ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16116 cCE(wmiawtt, eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16117 cCE(wmiawbbn, ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16118 cCE(wmiawbtn, ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16119 cCE(wmiawtbn, ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16120 cCE(wmiawttn, ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16121 cCE(wmulsmr, ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16122 cCE(wmulumr, ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16123 cCE(wmulwumr, ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16124 cCE(wmulwsmr, ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16125 cCE(wmulwum, ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16126 cCE(wmulwsm, ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16127 cCE(wmulwl, eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16128 cCE(wqmiabb, e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16129 cCE(wqmiabt, e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16130 cCE(wqmiatb, ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16131 cCE(wqmiatt, eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16132 cCE(wqmiabbn, ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16133 cCE(wqmiabtn, ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16134 cCE(wqmiatbn, ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16135 cCE(wqmiattn, ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16136 cCE(wqmulm, e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16137 cCE(wqmulmr, e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16138 cCE(wqmulwm, ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16139 cCE(wqmulwmr, ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16140 cCE(wsubaddhx, ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16141
16142 #undef ARM_VARIANT
16143 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
16144 cCE(cfldrs, c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
16145 cCE(cfldrd, c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
16146 cCE(cfldr32, c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
16147 cCE(cfldr64, c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
16148 cCE(cfstrs, c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
16149 cCE(cfstrd, c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
16150 cCE(cfstr32, c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
16151 cCE(cfstr64, c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
16152 cCE(cfmvsr, e000450, 2, (RMF, RR), rn_rd),
16153 cCE(cfmvrs, e100450, 2, (RR, RMF), rd_rn),
16154 cCE(cfmvdlr, e000410, 2, (RMD, RR), rn_rd),
16155 cCE(cfmvrdl, e100410, 2, (RR, RMD), rd_rn),
16156 cCE(cfmvdhr, e000430, 2, (RMD, RR), rn_rd),
16157 cCE(cfmvrdh, e100430, 2, (RR, RMD), rd_rn),
16158 cCE(cfmv64lr, e000510, 2, (RMDX, RR), rn_rd),
16159 cCE(cfmvr64l, e100510, 2, (RR, RMDX), rd_rn),
16160 cCE(cfmv64hr, e000530, 2, (RMDX, RR), rn_rd),
16161 cCE(cfmvr64h, e100530, 2, (RR, RMDX), rd_rn),
16162 cCE(cfmval32, e200440, 2, (RMAX, RMFX), rd_rn),
16163 cCE(cfmv32al, e100440, 2, (RMFX, RMAX), rd_rn),
16164 cCE(cfmvam32, e200460, 2, (RMAX, RMFX), rd_rn),
16165 cCE(cfmv32am, e100460, 2, (RMFX, RMAX), rd_rn),
16166 cCE(cfmvah32, e200480, 2, (RMAX, RMFX), rd_rn),
16167 cCE(cfmv32ah, e100480, 2, (RMFX, RMAX), rd_rn),
16168 cCE(cfmva32, e2004a0, 2, (RMAX, RMFX), rd_rn),
16169 cCE(cfmv32a, e1004a0, 2, (RMFX, RMAX), rd_rn),
16170 cCE(cfmva64, e2004c0, 2, (RMAX, RMDX), rd_rn),
16171 cCE(cfmv64a, e1004c0, 2, (RMDX, RMAX), rd_rn),
16172 cCE(cfmvsc32, e2004e0, 2, (RMDS, RMDX), mav_dspsc),
16173 cCE(cfmv32sc, e1004e0, 2, (RMDX, RMDS), rd),
16174 cCE(cfcpys, e000400, 2, (RMF, RMF), rd_rn),
16175 cCE(cfcpyd, e000420, 2, (RMD, RMD), rd_rn),
16176 cCE(cfcvtsd, e000460, 2, (RMD, RMF), rd_rn),
16177 cCE(cfcvtds, e000440, 2, (RMF, RMD), rd_rn),
16178 cCE(cfcvt32s, e000480, 2, (RMF, RMFX), rd_rn),
16179 cCE(cfcvt32d, e0004a0, 2, (RMD, RMFX), rd_rn),
16180 cCE(cfcvt64s, e0004c0, 2, (RMF, RMDX), rd_rn),
16181 cCE(cfcvt64d, e0004e0, 2, (RMD, RMDX), rd_rn),
16182 cCE(cfcvts32, e100580, 2, (RMFX, RMF), rd_rn),
16183 cCE(cfcvtd32, e1005a0, 2, (RMFX, RMD), rd_rn),
16184 cCE(cftruncs32,e1005c0, 2, (RMFX, RMF), rd_rn),
16185 cCE(cftruncd32,e1005e0, 2, (RMFX, RMD), rd_rn),
16186 cCE(cfrshl32, e000550, 3, (RMFX, RMFX, RR), mav_triple),
16187 cCE(cfrshl64, e000570, 3, (RMDX, RMDX, RR), mav_triple),
16188 cCE(cfsh32, e000500, 3, (RMFX, RMFX, I63s), mav_shift),
16189 cCE(cfsh64, e200500, 3, (RMDX, RMDX, I63s), mav_shift),
16190 cCE(cfcmps, e100490, 3, (RR, RMF, RMF), rd_rn_rm),
16191 cCE(cfcmpd, e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
16192 cCE(cfcmp32, e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
16193 cCE(cfcmp64, e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
16194 cCE(cfabss, e300400, 2, (RMF, RMF), rd_rn),
16195 cCE(cfabsd, e300420, 2, (RMD, RMD), rd_rn),
16196 cCE(cfnegs, e300440, 2, (RMF, RMF), rd_rn),
16197 cCE(cfnegd, e300460, 2, (RMD, RMD), rd_rn),
16198 cCE(cfadds, e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
16199 cCE(cfaddd, e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
16200 cCE(cfsubs, e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
16201 cCE(cfsubd, e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
16202 cCE(cfmuls, e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
16203 cCE(cfmuld, e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
16204 cCE(cfabs32, e300500, 2, (RMFX, RMFX), rd_rn),
16205 cCE(cfabs64, e300520, 2, (RMDX, RMDX), rd_rn),
16206 cCE(cfneg32, e300540, 2, (RMFX, RMFX), rd_rn),
16207 cCE(cfneg64, e300560, 2, (RMDX, RMDX), rd_rn),
16208 cCE(cfadd32, e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16209 cCE(cfadd64, e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16210 cCE(cfsub32, e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16211 cCE(cfsub64, e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16212 cCE(cfmul32, e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16213 cCE(cfmul64, e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16214 cCE(cfmac32, e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16215 cCE(cfmsc32, e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16216 cCE(cfmadd32, e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
16217 cCE(cfmsub32, e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
16218 cCE(cfmadda32, e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
16219 cCE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
16220 };
16221 #undef ARM_VARIANT
16222 #undef THUMB_VARIANT
16223 #undef TCE
16224 #undef TCM
16225 #undef TUE
16226 #undef TUF
16227 #undef TCC
16228 #undef cCE
16229 #undef cCL
16230 #undef C3E
16231 #undef CE
16232 #undef CM
16233 #undef UE
16234 #undef UF
16235 #undef UT
16236 #undef NUF
16237 #undef nUF
16238 #undef NCE
16239 #undef nCE
16240 #undef OPS0
16241 #undef OPS1
16242 #undef OPS2
16243 #undef OPS3
16244 #undef OPS4
16245 #undef OPS5
16246 #undef OPS6
16247 #undef do_0
16248 \f
16249 /* MD interface: bits in the object file. */
16250
16251 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
16252 for use in the a.out file, and stores them in the array pointed to by buf.
16253 This knows about the endian-ness of the target machine and does
16254 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
16255 2 (short) and 4 (long) Floating numbers are put out as a series of
16256 LITTLENUMS (shorts, here at least). */
16257
16258 void
16259 md_number_to_chars (char * buf, valueT val, int n)
16260 {
16261 if (target_big_endian)
16262 number_to_chars_bigendian (buf, val, n);
16263 else
16264 number_to_chars_littleendian (buf, val, n);
16265 }
16266
16267 static valueT
16268 md_chars_to_number (char * buf, int n)
16269 {
16270 valueT result = 0;
16271 unsigned char * where = (unsigned char *) buf;
16272
16273 if (target_big_endian)
16274 {
16275 while (n--)
16276 {
16277 result <<= 8;
16278 result |= (*where++ & 255);
16279 }
16280 }
16281 else
16282 {
16283 while (n--)
16284 {
16285 result <<= 8;
16286 result |= (where[n] & 255);
16287 }
16288 }
16289
16290 return result;
16291 }
16292
16293 /* MD interface: Sections. */
16294
16295 /* Estimate the size of a frag before relaxing. Assume everything fits in
16296 2 bytes. */
16297
16298 int
16299 md_estimate_size_before_relax (fragS * fragp,
16300 segT segtype ATTRIBUTE_UNUSED)
16301 {
16302 fragp->fr_var = 2;
16303 return 2;
16304 }
16305
16306 /* Convert a machine dependent frag. */
16307
16308 void
16309 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
16310 {
16311 unsigned long insn;
16312 unsigned long old_op;
16313 char *buf;
16314 expressionS exp;
16315 fixS *fixp;
16316 int reloc_type;
16317 int pc_rel;
16318 int opcode;
16319
16320 buf = fragp->fr_literal + fragp->fr_fix;
16321
16322 old_op = bfd_get_16(abfd, buf);
16323 if (fragp->fr_symbol) {
16324 exp.X_op = O_symbol;
16325 exp.X_add_symbol = fragp->fr_symbol;
16326 } else {
16327 exp.X_op = O_constant;
16328 }
16329 exp.X_add_number = fragp->fr_offset;
16330 opcode = fragp->fr_subtype;
16331 switch (opcode)
16332 {
16333 case T_MNEM_ldr_pc:
16334 case T_MNEM_ldr_pc2:
16335 case T_MNEM_ldr_sp:
16336 case T_MNEM_str_sp:
16337 case T_MNEM_ldr:
16338 case T_MNEM_ldrb:
16339 case T_MNEM_ldrh:
16340 case T_MNEM_str:
16341 case T_MNEM_strb:
16342 case T_MNEM_strh:
16343 if (fragp->fr_var == 4)
16344 {
16345 insn = THUMB_OP32(opcode);
16346 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
16347 {
16348 insn |= (old_op & 0x700) << 4;
16349 }
16350 else
16351 {
16352 insn |= (old_op & 7) << 12;
16353 insn |= (old_op & 0x38) << 13;
16354 }
16355 insn |= 0x00000c00;
16356 put_thumb32_insn (buf, insn);
16357 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
16358 }
16359 else
16360 {
16361 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
16362 }
16363 pc_rel = (opcode == T_MNEM_ldr_pc2);
16364 break;
16365 case T_MNEM_adr:
16366 if (fragp->fr_var == 4)
16367 {
16368 insn = THUMB_OP32 (opcode);
16369 insn |= (old_op & 0xf0) << 4;
16370 put_thumb32_insn (buf, insn);
16371 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
16372 }
16373 else
16374 {
16375 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16376 exp.X_add_number -= 4;
16377 }
16378 pc_rel = 1;
16379 break;
16380 case T_MNEM_mov:
16381 case T_MNEM_movs:
16382 case T_MNEM_cmp:
16383 case T_MNEM_cmn:
16384 if (fragp->fr_var == 4)
16385 {
16386 int r0off = (opcode == T_MNEM_mov
16387 || opcode == T_MNEM_movs) ? 0 : 8;
16388 insn = THUMB_OP32 (opcode);
16389 insn = (insn & 0xe1ffffff) | 0x10000000;
16390 insn |= (old_op & 0x700) << r0off;
16391 put_thumb32_insn (buf, insn);
16392 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
16393 }
16394 else
16395 {
16396 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
16397 }
16398 pc_rel = 0;
16399 break;
16400 case T_MNEM_b:
16401 if (fragp->fr_var == 4)
16402 {
16403 insn = THUMB_OP32(opcode);
16404 put_thumb32_insn (buf, insn);
16405 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
16406 }
16407 else
16408 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
16409 pc_rel = 1;
16410 break;
16411 case T_MNEM_bcond:
16412 if (fragp->fr_var == 4)
16413 {
16414 insn = THUMB_OP32(opcode);
16415 insn |= (old_op & 0xf00) << 14;
16416 put_thumb32_insn (buf, insn);
16417 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
16418 }
16419 else
16420 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
16421 pc_rel = 1;
16422 break;
16423 case T_MNEM_add_sp:
16424 case T_MNEM_add_pc:
16425 case T_MNEM_inc_sp:
16426 case T_MNEM_dec_sp:
16427 if (fragp->fr_var == 4)
16428 {
16429 /* ??? Choose between add and addw. */
16430 insn = THUMB_OP32 (opcode);
16431 insn |= (old_op & 0xf0) << 4;
16432 put_thumb32_insn (buf, insn);
16433 if (opcode == T_MNEM_add_pc)
16434 reloc_type = BFD_RELOC_ARM_T32_IMM12;
16435 else
16436 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
16437 }
16438 else
16439 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16440 pc_rel = 0;
16441 break;
16442
16443 case T_MNEM_addi:
16444 case T_MNEM_addis:
16445 case T_MNEM_subi:
16446 case T_MNEM_subis:
16447 if (fragp->fr_var == 4)
16448 {
16449 insn = THUMB_OP32 (opcode);
16450 insn |= (old_op & 0xf0) << 4;
16451 insn |= (old_op & 0xf) << 16;
16452 put_thumb32_insn (buf, insn);
16453 if (insn & (1 << 20))
16454 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
16455 else
16456 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
16457 }
16458 else
16459 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16460 pc_rel = 0;
16461 break;
16462 default:
16463 abort();
16464 }
16465 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
16466 reloc_type);
16467 fixp->fx_file = fragp->fr_file;
16468 fixp->fx_line = fragp->fr_line;
16469 fragp->fr_fix += fragp->fr_var;
16470 }
16471
16472 /* Return the size of a relaxable immediate operand instruction.
16473 SHIFT and SIZE specify the form of the allowable immediate. */
16474 static int
16475 relax_immediate (fragS *fragp, int size, int shift)
16476 {
16477 offsetT offset;
16478 offsetT mask;
16479 offsetT low;
16480
16481 /* ??? Should be able to do better than this. */
16482 if (fragp->fr_symbol)
16483 return 4;
16484
16485 low = (1 << shift) - 1;
16486 mask = (1 << (shift + size)) - (1 << shift);
16487 offset = fragp->fr_offset;
16488 /* Force misaligned offsets to 32-bit variant. */
16489 if (offset & low)
16490 return -4;
16491 if (offset & ~mask)
16492 return 4;
16493 return 2;
16494 }
16495
16496 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
16497 load. */
16498 static int
16499 relax_adr (fragS *fragp, asection *sec)
16500 {
16501 addressT addr;
16502 offsetT val;
16503
16504 /* Assume worst case for symbols not known to be in the same section. */
16505 if (!S_IS_DEFINED(fragp->fr_symbol)
16506 || sec != S_GET_SEGMENT (fragp->fr_symbol))
16507 return 4;
16508
16509 val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset;
16510 addr = fragp->fr_address + fragp->fr_fix;
16511 addr = (addr + 4) & ~3;
16512 /* Fix the insn as the 4-byte version if the target address is not
16513 sufficiently aligned. This is prevents an infinite loop when two
16514 instructions have contradictory range/alignment requirements. */
16515 if (val & 3)
16516 return -4;
16517 val -= addr;
16518 if (val < 0 || val > 1020)
16519 return 4;
16520 return 2;
16521 }
16522
16523 /* Return the size of a relaxable add/sub immediate instruction. */
16524 static int
16525 relax_addsub (fragS *fragp, asection *sec)
16526 {
16527 char *buf;
16528 int op;
16529
16530 buf = fragp->fr_literal + fragp->fr_fix;
16531 op = bfd_get_16(sec->owner, buf);
16532 if ((op & 0xf) == ((op >> 4) & 0xf))
16533 return relax_immediate (fragp, 8, 0);
16534 else
16535 return relax_immediate (fragp, 3, 0);
16536 }
16537
16538
16539 /* Return the size of a relaxable branch instruction. BITS is the
16540 size of the offset field in the narrow instruction. */
16541
16542 static int
16543 relax_branch (fragS *fragp, asection *sec, int bits)
16544 {
16545 addressT addr;
16546 offsetT val;
16547 offsetT limit;
16548
16549 /* Assume worst case for symbols not known to be in the same section. */
16550 if (!S_IS_DEFINED(fragp->fr_symbol)
16551 || sec != S_GET_SEGMENT (fragp->fr_symbol))
16552 return 4;
16553
16554 val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset;
16555 addr = fragp->fr_address + fragp->fr_fix + 4;
16556 val -= addr;
16557
16558 /* Offset is a signed value *2 */
16559 limit = 1 << bits;
16560 if (val >= limit || val < -limit)
16561 return 4;
16562 return 2;
16563 }
16564
16565
16566 /* Relax a machine dependent frag. This returns the amount by which
16567 the current size of the frag should change. */
16568
16569 int
16570 arm_relax_frag (asection *sec, fragS *fragp, long stretch ATTRIBUTE_UNUSED)
16571 {
16572 int oldsize;
16573 int newsize;
16574
16575 oldsize = fragp->fr_var;
16576 switch (fragp->fr_subtype)
16577 {
16578 case T_MNEM_ldr_pc2:
16579 newsize = relax_adr(fragp, sec);
16580 break;
16581 case T_MNEM_ldr_pc:
16582 case T_MNEM_ldr_sp:
16583 case T_MNEM_str_sp:
16584 newsize = relax_immediate(fragp, 8, 2);
16585 break;
16586 case T_MNEM_ldr:
16587 case T_MNEM_str:
16588 newsize = relax_immediate(fragp, 5, 2);
16589 break;
16590 case T_MNEM_ldrh:
16591 case T_MNEM_strh:
16592 newsize = relax_immediate(fragp, 5, 1);
16593 break;
16594 case T_MNEM_ldrb:
16595 case T_MNEM_strb:
16596 newsize = relax_immediate(fragp, 5, 0);
16597 break;
16598 case T_MNEM_adr:
16599 newsize = relax_adr(fragp, sec);
16600 break;
16601 case T_MNEM_mov:
16602 case T_MNEM_movs:
16603 case T_MNEM_cmp:
16604 case T_MNEM_cmn:
16605 newsize = relax_immediate(fragp, 8, 0);
16606 break;
16607 case T_MNEM_b:
16608 newsize = relax_branch(fragp, sec, 11);
16609 break;
16610 case T_MNEM_bcond:
16611 newsize = relax_branch(fragp, sec, 8);
16612 break;
16613 case T_MNEM_add_sp:
16614 case T_MNEM_add_pc:
16615 newsize = relax_immediate (fragp, 8, 2);
16616 break;
16617 case T_MNEM_inc_sp:
16618 case T_MNEM_dec_sp:
16619 newsize = relax_immediate (fragp, 7, 2);
16620 break;
16621 case T_MNEM_addi:
16622 case T_MNEM_addis:
16623 case T_MNEM_subi:
16624 case T_MNEM_subis:
16625 newsize = relax_addsub (fragp, sec);
16626 break;
16627 default:
16628 abort();
16629 }
16630 if (newsize < 0)
16631 {
16632 fragp->fr_var = -newsize;
16633 md_convert_frag (sec->owner, sec, fragp);
16634 frag_wane(fragp);
16635 return -(newsize + oldsize);
16636 }
16637 fragp->fr_var = newsize;
16638 return newsize - oldsize;
16639 }
16640
16641 /* Round up a section size to the appropriate boundary. */
16642
16643 valueT
16644 md_section_align (segT segment ATTRIBUTE_UNUSED,
16645 valueT size)
16646 {
16647 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
16648 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
16649 {
16650 /* For a.out, force the section size to be aligned. If we don't do
16651 this, BFD will align it for us, but it will not write out the
16652 final bytes of the section. This may be a bug in BFD, but it is
16653 easier to fix it here since that is how the other a.out targets
16654 work. */
16655 int align;
16656
16657 align = bfd_get_section_alignment (stdoutput, segment);
16658 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
16659 }
16660 #endif
16661
16662 return size;
16663 }
16664
16665 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
16666 of an rs_align_code fragment. */
16667
16668 void
16669 arm_handle_align (fragS * fragP)
16670 {
16671 static char const arm_noop[4] = { 0x00, 0x00, 0xa0, 0xe1 };
16672 static char const thumb_noop[2] = { 0xc0, 0x46 };
16673 static char const arm_bigend_noop[4] = { 0xe1, 0xa0, 0x00, 0x00 };
16674 static char const thumb_bigend_noop[2] = { 0x46, 0xc0 };
16675
16676 int bytes, fix, noop_size;
16677 char * p;
16678 const char * noop;
16679
16680 if (fragP->fr_type != rs_align_code)
16681 return;
16682
16683 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
16684 p = fragP->fr_literal + fragP->fr_fix;
16685 fix = 0;
16686
16687 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
16688 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
16689
16690 if (fragP->tc_frag_data)
16691 {
16692 if (target_big_endian)
16693 noop = thumb_bigend_noop;
16694 else
16695 noop = thumb_noop;
16696 noop_size = sizeof (thumb_noop);
16697 }
16698 else
16699 {
16700 if (target_big_endian)
16701 noop = arm_bigend_noop;
16702 else
16703 noop = arm_noop;
16704 noop_size = sizeof (arm_noop);
16705 }
16706
16707 if (bytes & (noop_size - 1))
16708 {
16709 fix = bytes & (noop_size - 1);
16710 memset (p, 0, fix);
16711 p += fix;
16712 bytes -= fix;
16713 }
16714
16715 while (bytes >= noop_size)
16716 {
16717 memcpy (p, noop, noop_size);
16718 p += noop_size;
16719 bytes -= noop_size;
16720 fix += noop_size;
16721 }
16722
16723 fragP->fr_fix += fix;
16724 fragP->fr_var = noop_size;
16725 }
16726
16727 /* Called from md_do_align. Used to create an alignment
16728 frag in a code section. */
16729
16730 void
16731 arm_frag_align_code (int n, int max)
16732 {
16733 char * p;
16734
16735 /* We assume that there will never be a requirement
16736 to support alignments greater than 32 bytes. */
16737 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
16738 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
16739
16740 p = frag_var (rs_align_code,
16741 MAX_MEM_FOR_RS_ALIGN_CODE,
16742 1,
16743 (relax_substateT) max,
16744 (symbolS *) NULL,
16745 (offsetT) n,
16746 (char *) NULL);
16747 *p = 0;
16748 }
16749
16750 /* Perform target specific initialisation of a frag. */
16751
16752 void
16753 arm_init_frag (fragS * fragP)
16754 {
16755 /* Record whether this frag is in an ARM or a THUMB area. */
16756 fragP->tc_frag_data = thumb_mode;
16757 }
16758
16759 #ifdef OBJ_ELF
16760 /* When we change sections we need to issue a new mapping symbol. */
16761
16762 void
16763 arm_elf_change_section (void)
16764 {
16765 flagword flags;
16766 segment_info_type *seginfo;
16767
16768 /* Link an unlinked unwind index table section to the .text section. */
16769 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
16770 && elf_linked_to_section (now_seg) == NULL)
16771 elf_linked_to_section (now_seg) = text_section;
16772
16773 if (!SEG_NORMAL (now_seg))
16774 return;
16775
16776 flags = bfd_get_section_flags (stdoutput, now_seg);
16777
16778 /* We can ignore sections that only contain debug info. */
16779 if ((flags & SEC_ALLOC) == 0)
16780 return;
16781
16782 seginfo = seg_info (now_seg);
16783 mapstate = seginfo->tc_segment_info_data.mapstate;
16784 marked_pr_dependency = seginfo->tc_segment_info_data.marked_pr_dependency;
16785 }
16786
16787 int
16788 arm_elf_section_type (const char * str, size_t len)
16789 {
16790 if (len == 5 && strncmp (str, "exidx", 5) == 0)
16791 return SHT_ARM_EXIDX;
16792
16793 return -1;
16794 }
16795 \f
16796 /* Code to deal with unwinding tables. */
16797
16798 static void add_unwind_adjustsp (offsetT);
16799
16800 /* Cenerate and deferred unwind frame offset. */
16801
16802 static void
16803 flush_pending_unwind (void)
16804 {
16805 offsetT offset;
16806
16807 offset = unwind.pending_offset;
16808 unwind.pending_offset = 0;
16809 if (offset != 0)
16810 add_unwind_adjustsp (offset);
16811 }
16812
16813 /* Add an opcode to this list for this function. Two-byte opcodes should
16814 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
16815 order. */
16816
16817 static void
16818 add_unwind_opcode (valueT op, int length)
16819 {
16820 /* Add any deferred stack adjustment. */
16821 if (unwind.pending_offset)
16822 flush_pending_unwind ();
16823
16824 unwind.sp_restored = 0;
16825
16826 if (unwind.opcode_count + length > unwind.opcode_alloc)
16827 {
16828 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
16829 if (unwind.opcodes)
16830 unwind.opcodes = xrealloc (unwind.opcodes,
16831 unwind.opcode_alloc);
16832 else
16833 unwind.opcodes = xmalloc (unwind.opcode_alloc);
16834 }
16835 while (length > 0)
16836 {
16837 length--;
16838 unwind.opcodes[unwind.opcode_count] = op & 0xff;
16839 op >>= 8;
16840 unwind.opcode_count++;
16841 }
16842 }
16843
16844 /* Add unwind opcodes to adjust the stack pointer. */
16845
16846 static void
16847 add_unwind_adjustsp (offsetT offset)
16848 {
16849 valueT op;
16850
16851 if (offset > 0x200)
16852 {
16853 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
16854 char bytes[5];
16855 int n;
16856 valueT o;
16857
16858 /* Long form: 0xb2, uleb128. */
16859 /* This might not fit in a word so add the individual bytes,
16860 remembering the list is built in reverse order. */
16861 o = (valueT) ((offset - 0x204) >> 2);
16862 if (o == 0)
16863 add_unwind_opcode (0, 1);
16864
16865 /* Calculate the uleb128 encoding of the offset. */
16866 n = 0;
16867 while (o)
16868 {
16869 bytes[n] = o & 0x7f;
16870 o >>= 7;
16871 if (o)
16872 bytes[n] |= 0x80;
16873 n++;
16874 }
16875 /* Add the insn. */
16876 for (; n; n--)
16877 add_unwind_opcode (bytes[n - 1], 1);
16878 add_unwind_opcode (0xb2, 1);
16879 }
16880 else if (offset > 0x100)
16881 {
16882 /* Two short opcodes. */
16883 add_unwind_opcode (0x3f, 1);
16884 op = (offset - 0x104) >> 2;
16885 add_unwind_opcode (op, 1);
16886 }
16887 else if (offset > 0)
16888 {
16889 /* Short opcode. */
16890 op = (offset - 4) >> 2;
16891 add_unwind_opcode (op, 1);
16892 }
16893 else if (offset < 0)
16894 {
16895 offset = -offset;
16896 while (offset > 0x100)
16897 {
16898 add_unwind_opcode (0x7f, 1);
16899 offset -= 0x100;
16900 }
16901 op = ((offset - 4) >> 2) | 0x40;
16902 add_unwind_opcode (op, 1);
16903 }
16904 }
16905
16906 /* Finish the list of unwind opcodes for this function. */
16907 static void
16908 finish_unwind_opcodes (void)
16909 {
16910 valueT op;
16911
16912 if (unwind.fp_used)
16913 {
16914 /* Adjust sp as necessary. */
16915 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
16916 flush_pending_unwind ();
16917
16918 /* After restoring sp from the frame pointer. */
16919 op = 0x90 | unwind.fp_reg;
16920 add_unwind_opcode (op, 1);
16921 }
16922 else
16923 flush_pending_unwind ();
16924 }
16925
16926
16927 /* Start an exception table entry. If idx is nonzero this is an index table
16928 entry. */
16929
16930 static void
16931 start_unwind_section (const segT text_seg, int idx)
16932 {
16933 const char * text_name;
16934 const char * prefix;
16935 const char * prefix_once;
16936 const char * group_name;
16937 size_t prefix_len;
16938 size_t text_len;
16939 char * sec_name;
16940 size_t sec_name_len;
16941 int type;
16942 int flags;
16943 int linkonce;
16944
16945 if (idx)
16946 {
16947 prefix = ELF_STRING_ARM_unwind;
16948 prefix_once = ELF_STRING_ARM_unwind_once;
16949 type = SHT_ARM_EXIDX;
16950 }
16951 else
16952 {
16953 prefix = ELF_STRING_ARM_unwind_info;
16954 prefix_once = ELF_STRING_ARM_unwind_info_once;
16955 type = SHT_PROGBITS;
16956 }
16957
16958 text_name = segment_name (text_seg);
16959 if (streq (text_name, ".text"))
16960 text_name = "";
16961
16962 if (strncmp (text_name, ".gnu.linkonce.t.",
16963 strlen (".gnu.linkonce.t.")) == 0)
16964 {
16965 prefix = prefix_once;
16966 text_name += strlen (".gnu.linkonce.t.");
16967 }
16968
16969 prefix_len = strlen (prefix);
16970 text_len = strlen (text_name);
16971 sec_name_len = prefix_len + text_len;
16972 sec_name = xmalloc (sec_name_len + 1);
16973 memcpy (sec_name, prefix, prefix_len);
16974 memcpy (sec_name + prefix_len, text_name, text_len);
16975 sec_name[prefix_len + text_len] = '\0';
16976
16977 flags = SHF_ALLOC;
16978 linkonce = 0;
16979 group_name = 0;
16980
16981 /* Handle COMDAT group. */
16982 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
16983 {
16984 group_name = elf_group_name (text_seg);
16985 if (group_name == NULL)
16986 {
16987 as_bad ("Group section `%s' has no group signature",
16988 segment_name (text_seg));
16989 ignore_rest_of_line ();
16990 return;
16991 }
16992 flags |= SHF_GROUP;
16993 linkonce = 1;
16994 }
16995
16996 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
16997
16998 /* Set the setion link for index tables. */
16999 if (idx)
17000 elf_linked_to_section (now_seg) = text_seg;
17001 }
17002
17003
17004 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
17005 personality routine data. Returns zero, or the index table value for
17006 and inline entry. */
17007
17008 static valueT
17009 create_unwind_entry (int have_data)
17010 {
17011 int size;
17012 addressT where;
17013 char *ptr;
17014 /* The current word of data. */
17015 valueT data;
17016 /* The number of bytes left in this word. */
17017 int n;
17018
17019 finish_unwind_opcodes ();
17020
17021 /* Remember the current text section. */
17022 unwind.saved_seg = now_seg;
17023 unwind.saved_subseg = now_subseg;
17024
17025 start_unwind_section (now_seg, 0);
17026
17027 if (unwind.personality_routine == NULL)
17028 {
17029 if (unwind.personality_index == -2)
17030 {
17031 if (have_data)
17032 as_bad (_("handerdata in cantunwind frame"));
17033 return 1; /* EXIDX_CANTUNWIND. */
17034 }
17035
17036 /* Use a default personality routine if none is specified. */
17037 if (unwind.personality_index == -1)
17038 {
17039 if (unwind.opcode_count > 3)
17040 unwind.personality_index = 1;
17041 else
17042 unwind.personality_index = 0;
17043 }
17044
17045 /* Space for the personality routine entry. */
17046 if (unwind.personality_index == 0)
17047 {
17048 if (unwind.opcode_count > 3)
17049 as_bad (_("too many unwind opcodes for personality routine 0"));
17050
17051 if (!have_data)
17052 {
17053 /* All the data is inline in the index table. */
17054 data = 0x80;
17055 n = 3;
17056 while (unwind.opcode_count > 0)
17057 {
17058 unwind.opcode_count--;
17059 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
17060 n--;
17061 }
17062
17063 /* Pad with "finish" opcodes. */
17064 while (n--)
17065 data = (data << 8) | 0xb0;
17066
17067 return data;
17068 }
17069 size = 0;
17070 }
17071 else
17072 /* We get two opcodes "free" in the first word. */
17073 size = unwind.opcode_count - 2;
17074 }
17075 else
17076 /* An extra byte is required for the opcode count. */
17077 size = unwind.opcode_count + 1;
17078
17079 size = (size + 3) >> 2;
17080 if (size > 0xff)
17081 as_bad (_("too many unwind opcodes"));
17082
17083 frag_align (2, 0, 0);
17084 record_alignment (now_seg, 2);
17085 unwind.table_entry = expr_build_dot ();
17086
17087 /* Allocate the table entry. */
17088 ptr = frag_more ((size << 2) + 4);
17089 where = frag_now_fix () - ((size << 2) + 4);
17090
17091 switch (unwind.personality_index)
17092 {
17093 case -1:
17094 /* ??? Should this be a PLT generating relocation? */
17095 /* Custom personality routine. */
17096 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
17097 BFD_RELOC_ARM_PREL31);
17098
17099 where += 4;
17100 ptr += 4;
17101
17102 /* Set the first byte to the number of additional words. */
17103 data = size - 1;
17104 n = 3;
17105 break;
17106
17107 /* ABI defined personality routines. */
17108 case 0:
17109 /* Three opcodes bytes are packed into the first word. */
17110 data = 0x80;
17111 n = 3;
17112 break;
17113
17114 case 1:
17115 case 2:
17116 /* The size and first two opcode bytes go in the first word. */
17117 data = ((0x80 + unwind.personality_index) << 8) | size;
17118 n = 2;
17119 break;
17120
17121 default:
17122 /* Should never happen. */
17123 abort ();
17124 }
17125
17126 /* Pack the opcodes into words (MSB first), reversing the list at the same
17127 time. */
17128 while (unwind.opcode_count > 0)
17129 {
17130 if (n == 0)
17131 {
17132 md_number_to_chars (ptr, data, 4);
17133 ptr += 4;
17134 n = 4;
17135 data = 0;
17136 }
17137 unwind.opcode_count--;
17138 n--;
17139 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
17140 }
17141
17142 /* Finish off the last word. */
17143 if (n < 4)
17144 {
17145 /* Pad with "finish" opcodes. */
17146 while (n--)
17147 data = (data << 8) | 0xb0;
17148
17149 md_number_to_chars (ptr, data, 4);
17150 }
17151
17152 if (!have_data)
17153 {
17154 /* Add an empty descriptor if there is no user-specified data. */
17155 ptr = frag_more (4);
17156 md_number_to_chars (ptr, 0, 4);
17157 }
17158
17159 return 0;
17160 }
17161
17162
17163 /* Initialize the DWARF-2 unwind information for this procedure. */
17164
17165 void
17166 tc_arm_frame_initial_instructions (void)
17167 {
17168 cfi_add_CFA_def_cfa (REG_SP, 0);
17169 }
17170 #endif /* OBJ_ELF */
17171
17172 /* Convert REGNAME to a DWARF-2 register number. */
17173
17174 int
17175 tc_arm_regname_to_dw2regnum (char *regname)
17176 {
17177 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
17178
17179 if (reg == FAIL)
17180 return -1;
17181
17182 return reg;
17183 }
17184
17185 #ifdef TE_PE
17186 void
17187 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
17188 {
17189 expressionS expr;
17190
17191 expr.X_op = O_secrel;
17192 expr.X_add_symbol = symbol;
17193 expr.X_add_number = 0;
17194 emit_expr (&expr, size);
17195 }
17196 #endif
17197
17198 /* MD interface: Symbol and relocation handling. */
17199
17200 /* Return the address within the segment that a PC-relative fixup is
17201 relative to. For ARM, PC-relative fixups applied to instructions
17202 are generally relative to the location of the fixup plus 8 bytes.
17203 Thumb branches are offset by 4, and Thumb loads relative to PC
17204 require special handling. */
17205
17206 long
17207 md_pcrel_from_section (fixS * fixP, segT seg)
17208 {
17209 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
17210
17211 /* If this is pc-relative and we are going to emit a relocation
17212 then we just want to put out any pipeline compensation that the linker
17213 will need. Otherwise we want to use the calculated base.
17214 For WinCE we skip the bias for externals as well, since this
17215 is how the MS ARM-CE assembler behaves and we want to be compatible. */
17216 if (fixP->fx_pcrel
17217 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
17218 || (arm_force_relocation (fixP)
17219 #ifdef TE_WINCE
17220 && !S_IS_EXTERNAL (fixP->fx_addsy)
17221 #endif
17222 )))
17223 base = 0;
17224
17225 switch (fixP->fx_r_type)
17226 {
17227 /* PC relative addressing on the Thumb is slightly odd as the
17228 bottom two bits of the PC are forced to zero for the
17229 calculation. This happens *after* application of the
17230 pipeline offset. However, Thumb adrl already adjusts for
17231 this, so we need not do it again. */
17232 case BFD_RELOC_ARM_THUMB_ADD:
17233 return base & ~3;
17234
17235 case BFD_RELOC_ARM_THUMB_OFFSET:
17236 case BFD_RELOC_ARM_T32_OFFSET_IMM:
17237 case BFD_RELOC_ARM_T32_ADD_PC12:
17238 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
17239 return (base + 4) & ~3;
17240
17241 /* Thumb branches are simply offset by +4. */
17242 case BFD_RELOC_THUMB_PCREL_BRANCH7:
17243 case BFD_RELOC_THUMB_PCREL_BRANCH9:
17244 case BFD_RELOC_THUMB_PCREL_BRANCH12:
17245 case BFD_RELOC_THUMB_PCREL_BRANCH20:
17246 case BFD_RELOC_THUMB_PCREL_BRANCH23:
17247 case BFD_RELOC_THUMB_PCREL_BRANCH25:
17248 case BFD_RELOC_THUMB_PCREL_BLX:
17249 return base + 4;
17250
17251 /* ARM mode branches are offset by +8. However, the Windows CE
17252 loader expects the relocation not to take this into account. */
17253 case BFD_RELOC_ARM_PCREL_BRANCH:
17254 case BFD_RELOC_ARM_PCREL_CALL:
17255 case BFD_RELOC_ARM_PCREL_JUMP:
17256 case BFD_RELOC_ARM_PCREL_BLX:
17257 case BFD_RELOC_ARM_PLT32:
17258 #ifdef TE_WINCE
17259 /* When handling fixups immediately, because we have already
17260 discovered the value of a symbol, or the address of the frag involved
17261 we must account for the offset by +8, as the OS loader will never see the reloc.
17262 see fixup_segment() in write.c
17263 The S_IS_EXTERNAL test handles the case of global symbols.
17264 Those need the calculated base, not just the pipe compensation the linker will need. */
17265 if (fixP->fx_pcrel
17266 && fixP->fx_addsy != NULL
17267 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
17268 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
17269 return base + 8;
17270 return base;
17271 #else
17272 return base + 8;
17273 #endif
17274
17275 /* ARM mode loads relative to PC are also offset by +8. Unlike
17276 branches, the Windows CE loader *does* expect the relocation
17277 to take this into account. */
17278 case BFD_RELOC_ARM_OFFSET_IMM:
17279 case BFD_RELOC_ARM_OFFSET_IMM8:
17280 case BFD_RELOC_ARM_HWLITERAL:
17281 case BFD_RELOC_ARM_LITERAL:
17282 case BFD_RELOC_ARM_CP_OFF_IMM:
17283 return base + 8;
17284
17285
17286 /* Other PC-relative relocations are un-offset. */
17287 default:
17288 return base;
17289 }
17290 }
17291
17292 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
17293 Otherwise we have no need to default values of symbols. */
17294
17295 symbolS *
17296 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
17297 {
17298 #ifdef OBJ_ELF
17299 if (name[0] == '_' && name[1] == 'G'
17300 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
17301 {
17302 if (!GOT_symbol)
17303 {
17304 if (symbol_find (name))
17305 as_bad ("GOT already in the symbol table");
17306
17307 GOT_symbol = symbol_new (name, undefined_section,
17308 (valueT) 0, & zero_address_frag);
17309 }
17310
17311 return GOT_symbol;
17312 }
17313 #endif
17314
17315 return 0;
17316 }
17317
17318 /* Subroutine of md_apply_fix. Check to see if an immediate can be
17319 computed as two separate immediate values, added together. We
17320 already know that this value cannot be computed by just one ARM
17321 instruction. */
17322
17323 static unsigned int
17324 validate_immediate_twopart (unsigned int val,
17325 unsigned int * highpart)
17326 {
17327 unsigned int a;
17328 unsigned int i;
17329
17330 for (i = 0; i < 32; i += 2)
17331 if (((a = rotate_left (val, i)) & 0xff) != 0)
17332 {
17333 if (a & 0xff00)
17334 {
17335 if (a & ~ 0xffff)
17336 continue;
17337 * highpart = (a >> 8) | ((i + 24) << 7);
17338 }
17339 else if (a & 0xff0000)
17340 {
17341 if (a & 0xff000000)
17342 continue;
17343 * highpart = (a >> 16) | ((i + 16) << 7);
17344 }
17345 else
17346 {
17347 assert (a & 0xff000000);
17348 * highpart = (a >> 24) | ((i + 8) << 7);
17349 }
17350
17351 return (a & 0xff) | (i << 7);
17352 }
17353
17354 return FAIL;
17355 }
17356
17357 static int
17358 validate_offset_imm (unsigned int val, int hwse)
17359 {
17360 if ((hwse && val > 255) || val > 4095)
17361 return FAIL;
17362 return val;
17363 }
17364
17365 /* Subroutine of md_apply_fix. Do those data_ops which can take a
17366 negative immediate constant by altering the instruction. A bit of
17367 a hack really.
17368 MOV <-> MVN
17369 AND <-> BIC
17370 ADC <-> SBC
17371 by inverting the second operand, and
17372 ADD <-> SUB
17373 CMP <-> CMN
17374 by negating the second operand. */
17375
17376 static int
17377 negate_data_op (unsigned long * instruction,
17378 unsigned long value)
17379 {
17380 int op, new_inst;
17381 unsigned long negated, inverted;
17382
17383 negated = encode_arm_immediate (-value);
17384 inverted = encode_arm_immediate (~value);
17385
17386 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
17387 switch (op)
17388 {
17389 /* First negates. */
17390 case OPCODE_SUB: /* ADD <-> SUB */
17391 new_inst = OPCODE_ADD;
17392 value = negated;
17393 break;
17394
17395 case OPCODE_ADD:
17396 new_inst = OPCODE_SUB;
17397 value = negated;
17398 break;
17399
17400 case OPCODE_CMP: /* CMP <-> CMN */
17401 new_inst = OPCODE_CMN;
17402 value = negated;
17403 break;
17404
17405 case OPCODE_CMN:
17406 new_inst = OPCODE_CMP;
17407 value = negated;
17408 break;
17409
17410 /* Now Inverted ops. */
17411 case OPCODE_MOV: /* MOV <-> MVN */
17412 new_inst = OPCODE_MVN;
17413 value = inverted;
17414 break;
17415
17416 case OPCODE_MVN:
17417 new_inst = OPCODE_MOV;
17418 value = inverted;
17419 break;
17420
17421 case OPCODE_AND: /* AND <-> BIC */
17422 new_inst = OPCODE_BIC;
17423 value = inverted;
17424 break;
17425
17426 case OPCODE_BIC:
17427 new_inst = OPCODE_AND;
17428 value = inverted;
17429 break;
17430
17431 case OPCODE_ADC: /* ADC <-> SBC */
17432 new_inst = OPCODE_SBC;
17433 value = inverted;
17434 break;
17435
17436 case OPCODE_SBC:
17437 new_inst = OPCODE_ADC;
17438 value = inverted;
17439 break;
17440
17441 /* We cannot do anything. */
17442 default:
17443 return FAIL;
17444 }
17445
17446 if (value == (unsigned) FAIL)
17447 return FAIL;
17448
17449 *instruction &= OPCODE_MASK;
17450 *instruction |= new_inst << DATA_OP_SHIFT;
17451 return value;
17452 }
17453
17454 /* Like negate_data_op, but for Thumb-2. */
17455
17456 static unsigned int
17457 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
17458 {
17459 int op, new_inst;
17460 int rd;
17461 unsigned int negated, inverted;
17462
17463 negated = encode_thumb32_immediate (-value);
17464 inverted = encode_thumb32_immediate (~value);
17465
17466 rd = (*instruction >> 8) & 0xf;
17467 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
17468 switch (op)
17469 {
17470 /* ADD <-> SUB. Includes CMP <-> CMN. */
17471 case T2_OPCODE_SUB:
17472 new_inst = T2_OPCODE_ADD;
17473 value = negated;
17474 break;
17475
17476 case T2_OPCODE_ADD:
17477 new_inst = T2_OPCODE_SUB;
17478 value = negated;
17479 break;
17480
17481 /* ORR <-> ORN. Includes MOV <-> MVN. */
17482 case T2_OPCODE_ORR:
17483 new_inst = T2_OPCODE_ORN;
17484 value = inverted;
17485 break;
17486
17487 case T2_OPCODE_ORN:
17488 new_inst = T2_OPCODE_ORR;
17489 value = inverted;
17490 break;
17491
17492 /* AND <-> BIC. TST has no inverted equivalent. */
17493 case T2_OPCODE_AND:
17494 new_inst = T2_OPCODE_BIC;
17495 if (rd == 15)
17496 value = FAIL;
17497 else
17498 value = inverted;
17499 break;
17500
17501 case T2_OPCODE_BIC:
17502 new_inst = T2_OPCODE_AND;
17503 value = inverted;
17504 break;
17505
17506 /* ADC <-> SBC */
17507 case T2_OPCODE_ADC:
17508 new_inst = T2_OPCODE_SBC;
17509 value = inverted;
17510 break;
17511
17512 case T2_OPCODE_SBC:
17513 new_inst = T2_OPCODE_ADC;
17514 value = inverted;
17515 break;
17516
17517 /* We cannot do anything. */
17518 default:
17519 return FAIL;
17520 }
17521
17522 if (value == (unsigned int)FAIL)
17523 return FAIL;
17524
17525 *instruction &= T2_OPCODE_MASK;
17526 *instruction |= new_inst << T2_DATA_OP_SHIFT;
17527 return value;
17528 }
17529
17530 /* Read a 32-bit thumb instruction from buf. */
17531 static unsigned long
17532 get_thumb32_insn (char * buf)
17533 {
17534 unsigned long insn;
17535 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
17536 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
17537
17538 return insn;
17539 }
17540
17541
17542 /* We usually want to set the low bit on the address of thumb function
17543 symbols. In particular .word foo - . should have the low bit set.
17544 Generic code tries to fold the difference of two symbols to
17545 a constant. Prevent this and force a relocation when the first symbols
17546 is a thumb function. */
17547 int
17548 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
17549 {
17550 if (op == O_subtract
17551 && l->X_op == O_symbol
17552 && r->X_op == O_symbol
17553 && THUMB_IS_FUNC (l->X_add_symbol))
17554 {
17555 l->X_op = O_subtract;
17556 l->X_op_symbol = r->X_add_symbol;
17557 l->X_add_number -= r->X_add_number;
17558 return 1;
17559 }
17560 /* Process as normal. */
17561 return 0;
17562 }
17563
17564 void
17565 md_apply_fix (fixS * fixP,
17566 valueT * valP,
17567 segT seg)
17568 {
17569 offsetT value = * valP;
17570 offsetT newval;
17571 unsigned int newimm;
17572 unsigned long temp;
17573 int sign;
17574 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
17575
17576 assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
17577
17578 /* Note whether this will delete the relocation. */
17579
17580 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
17581 fixP->fx_done = 1;
17582
17583 /* On a 64-bit host, silently truncate 'value' to 32 bits for
17584 consistency with the behavior on 32-bit hosts. Remember value
17585 for emit_reloc. */
17586 value &= 0xffffffff;
17587 value ^= 0x80000000;
17588 value -= 0x80000000;
17589
17590 *valP = value;
17591 fixP->fx_addnumber = value;
17592
17593 /* Same treatment for fixP->fx_offset. */
17594 fixP->fx_offset &= 0xffffffff;
17595 fixP->fx_offset ^= 0x80000000;
17596 fixP->fx_offset -= 0x80000000;
17597
17598 switch (fixP->fx_r_type)
17599 {
17600 case BFD_RELOC_NONE:
17601 /* This will need to go in the object file. */
17602 fixP->fx_done = 0;
17603 break;
17604
17605 case BFD_RELOC_ARM_IMMEDIATE:
17606 /* We claim that this fixup has been processed here,
17607 even if in fact we generate an error because we do
17608 not have a reloc for it, so tc_gen_reloc will reject it. */
17609 fixP->fx_done = 1;
17610
17611 if (fixP->fx_addsy
17612 && ! S_IS_DEFINED (fixP->fx_addsy))
17613 {
17614 as_bad_where (fixP->fx_file, fixP->fx_line,
17615 _("undefined symbol %s used as an immediate value"),
17616 S_GET_NAME (fixP->fx_addsy));
17617 break;
17618 }
17619
17620 newimm = encode_arm_immediate (value);
17621 temp = md_chars_to_number (buf, INSN_SIZE);
17622
17623 /* If the instruction will fail, see if we can fix things up by
17624 changing the opcode. */
17625 if (newimm == (unsigned int) FAIL
17626 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
17627 {
17628 as_bad_where (fixP->fx_file, fixP->fx_line,
17629 _("invalid constant (%lx) after fixup"),
17630 (unsigned long) value);
17631 break;
17632 }
17633
17634 newimm |= (temp & 0xfffff000);
17635 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
17636 break;
17637
17638 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
17639 {
17640 unsigned int highpart = 0;
17641 unsigned int newinsn = 0xe1a00000; /* nop. */
17642
17643 newimm = encode_arm_immediate (value);
17644 temp = md_chars_to_number (buf, INSN_SIZE);
17645
17646 /* If the instruction will fail, see if we can fix things up by
17647 changing the opcode. */
17648 if (newimm == (unsigned int) FAIL
17649 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
17650 {
17651 /* No ? OK - try using two ADD instructions to generate
17652 the value. */
17653 newimm = validate_immediate_twopart (value, & highpart);
17654
17655 /* Yes - then make sure that the second instruction is
17656 also an add. */
17657 if (newimm != (unsigned int) FAIL)
17658 newinsn = temp;
17659 /* Still No ? Try using a negated value. */
17660 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
17661 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
17662 /* Otherwise - give up. */
17663 else
17664 {
17665 as_bad_where (fixP->fx_file, fixP->fx_line,
17666 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
17667 (long) value);
17668 break;
17669 }
17670
17671 /* Replace the first operand in the 2nd instruction (which
17672 is the PC) with the destination register. We have
17673 already added in the PC in the first instruction and we
17674 do not want to do it again. */
17675 newinsn &= ~ 0xf0000;
17676 newinsn |= ((newinsn & 0x0f000) << 4);
17677 }
17678
17679 newimm |= (temp & 0xfffff000);
17680 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
17681
17682 highpart |= (newinsn & 0xfffff000);
17683 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
17684 }
17685 break;
17686
17687 case BFD_RELOC_ARM_OFFSET_IMM:
17688 if (!fixP->fx_done && seg->use_rela_p)
17689 value = 0;
17690
17691 case BFD_RELOC_ARM_LITERAL:
17692 sign = value >= 0;
17693
17694 if (value < 0)
17695 value = - value;
17696
17697 if (validate_offset_imm (value, 0) == FAIL)
17698 {
17699 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
17700 as_bad_where (fixP->fx_file, fixP->fx_line,
17701 _("invalid literal constant: pool needs to be closer"));
17702 else
17703 as_bad_where (fixP->fx_file, fixP->fx_line,
17704 _("bad immediate value for offset (%ld)"),
17705 (long) value);
17706 break;
17707 }
17708
17709 newval = md_chars_to_number (buf, INSN_SIZE);
17710 newval &= 0xff7ff000;
17711 newval |= value | (sign ? INDEX_UP : 0);
17712 md_number_to_chars (buf, newval, INSN_SIZE);
17713 break;
17714
17715 case BFD_RELOC_ARM_OFFSET_IMM8:
17716 case BFD_RELOC_ARM_HWLITERAL:
17717 sign = value >= 0;
17718
17719 if (value < 0)
17720 value = - value;
17721
17722 if (validate_offset_imm (value, 1) == FAIL)
17723 {
17724 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
17725 as_bad_where (fixP->fx_file, fixP->fx_line,
17726 _("invalid literal constant: pool needs to be closer"));
17727 else
17728 as_bad (_("bad immediate value for half-word offset (%ld)"),
17729 (long) value);
17730 break;
17731 }
17732
17733 newval = md_chars_to_number (buf, INSN_SIZE);
17734 newval &= 0xff7ff0f0;
17735 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
17736 md_number_to_chars (buf, newval, INSN_SIZE);
17737 break;
17738
17739 case BFD_RELOC_ARM_T32_OFFSET_U8:
17740 if (value < 0 || value > 1020 || value % 4 != 0)
17741 as_bad_where (fixP->fx_file, fixP->fx_line,
17742 _("bad immediate value for offset (%ld)"), (long) value);
17743 value /= 4;
17744
17745 newval = md_chars_to_number (buf+2, THUMB_SIZE);
17746 newval |= value;
17747 md_number_to_chars (buf+2, newval, THUMB_SIZE);
17748 break;
17749
17750 case BFD_RELOC_ARM_T32_OFFSET_IMM:
17751 /* This is a complicated relocation used for all varieties of Thumb32
17752 load/store instruction with immediate offset:
17753
17754 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
17755 *4, optional writeback(W)
17756 (doubleword load/store)
17757
17758 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
17759 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
17760 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
17761 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
17762 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
17763
17764 Uppercase letters indicate bits that are already encoded at
17765 this point. Lowercase letters are our problem. For the
17766 second block of instructions, the secondary opcode nybble
17767 (bits 8..11) is present, and bit 23 is zero, even if this is
17768 a PC-relative operation. */
17769 newval = md_chars_to_number (buf, THUMB_SIZE);
17770 newval <<= 16;
17771 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
17772
17773 if ((newval & 0xf0000000) == 0xe0000000)
17774 {
17775 /* Doubleword load/store: 8-bit offset, scaled by 4. */
17776 if (value >= 0)
17777 newval |= (1 << 23);
17778 else
17779 value = -value;
17780 if (value % 4 != 0)
17781 {
17782 as_bad_where (fixP->fx_file, fixP->fx_line,
17783 _("offset not a multiple of 4"));
17784 break;
17785 }
17786 value /= 4;
17787 if (value > 0xff)
17788 {
17789 as_bad_where (fixP->fx_file, fixP->fx_line,
17790 _("offset out of range"));
17791 break;
17792 }
17793 newval &= ~0xff;
17794 }
17795 else if ((newval & 0x000f0000) == 0x000f0000)
17796 {
17797 /* PC-relative, 12-bit offset. */
17798 if (value >= 0)
17799 newval |= (1 << 23);
17800 else
17801 value = -value;
17802 if (value > 0xfff)
17803 {
17804 as_bad_where (fixP->fx_file, fixP->fx_line,
17805 _("offset out of range"));
17806 break;
17807 }
17808 newval &= ~0xfff;
17809 }
17810 else if ((newval & 0x00000100) == 0x00000100)
17811 {
17812 /* Writeback: 8-bit, +/- offset. */
17813 if (value >= 0)
17814 newval |= (1 << 9);
17815 else
17816 value = -value;
17817 if (value > 0xff)
17818 {
17819 as_bad_where (fixP->fx_file, fixP->fx_line,
17820 _("offset out of range"));
17821 break;
17822 }
17823 newval &= ~0xff;
17824 }
17825 else if ((newval & 0x00000f00) == 0x00000e00)
17826 {
17827 /* T-instruction: positive 8-bit offset. */
17828 if (value < 0 || value > 0xff)
17829 {
17830 as_bad_where (fixP->fx_file, fixP->fx_line,
17831 _("offset out of range"));
17832 break;
17833 }
17834 newval &= ~0xff;
17835 newval |= value;
17836 }
17837 else
17838 {
17839 /* Positive 12-bit or negative 8-bit offset. */
17840 int limit;
17841 if (value >= 0)
17842 {
17843 newval |= (1 << 23);
17844 limit = 0xfff;
17845 }
17846 else
17847 {
17848 value = -value;
17849 limit = 0xff;
17850 }
17851 if (value > limit)
17852 {
17853 as_bad_where (fixP->fx_file, fixP->fx_line,
17854 _("offset out of range"));
17855 break;
17856 }
17857 newval &= ~limit;
17858 }
17859
17860 newval |= value;
17861 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
17862 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
17863 break;
17864
17865 case BFD_RELOC_ARM_SHIFT_IMM:
17866 newval = md_chars_to_number (buf, INSN_SIZE);
17867 if (((unsigned long) value) > 32
17868 || (value == 32
17869 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
17870 {
17871 as_bad_where (fixP->fx_file, fixP->fx_line,
17872 _("shift expression is too large"));
17873 break;
17874 }
17875
17876 if (value == 0)
17877 /* Shifts of zero must be done as lsl. */
17878 newval &= ~0x60;
17879 else if (value == 32)
17880 value = 0;
17881 newval &= 0xfffff07f;
17882 newval |= (value & 0x1f) << 7;
17883 md_number_to_chars (buf, newval, INSN_SIZE);
17884 break;
17885
17886 case BFD_RELOC_ARM_T32_IMMEDIATE:
17887 case BFD_RELOC_ARM_T32_ADD_IMM:
17888 case BFD_RELOC_ARM_T32_IMM12:
17889 case BFD_RELOC_ARM_T32_ADD_PC12:
17890 /* We claim that this fixup has been processed here,
17891 even if in fact we generate an error because we do
17892 not have a reloc for it, so tc_gen_reloc will reject it. */
17893 fixP->fx_done = 1;
17894
17895 if (fixP->fx_addsy
17896 && ! S_IS_DEFINED (fixP->fx_addsy))
17897 {
17898 as_bad_where (fixP->fx_file, fixP->fx_line,
17899 _("undefined symbol %s used as an immediate value"),
17900 S_GET_NAME (fixP->fx_addsy));
17901 break;
17902 }
17903
17904 newval = md_chars_to_number (buf, THUMB_SIZE);
17905 newval <<= 16;
17906 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
17907
17908 newimm = FAIL;
17909 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
17910 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
17911 {
17912 newimm = encode_thumb32_immediate (value);
17913 if (newimm == (unsigned int) FAIL)
17914 newimm = thumb32_negate_data_op (&newval, value);
17915 }
17916 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
17917 && newimm == (unsigned int) FAIL)
17918 {
17919 /* Turn add/sum into addw/subw. */
17920 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
17921 newval = (newval & 0xfeffffff) | 0x02000000;
17922
17923 /* 12 bit immediate for addw/subw. */
17924 if (value < 0)
17925 {
17926 value = -value;
17927 newval ^= 0x00a00000;
17928 }
17929 if (value > 0xfff)
17930 newimm = (unsigned int) FAIL;
17931 else
17932 newimm = value;
17933 }
17934
17935 if (newimm == (unsigned int)FAIL)
17936 {
17937 as_bad_where (fixP->fx_file, fixP->fx_line,
17938 _("invalid constant (%lx) after fixup"),
17939 (unsigned long) value);
17940 break;
17941 }
17942
17943 newval |= (newimm & 0x800) << 15;
17944 newval |= (newimm & 0x700) << 4;
17945 newval |= (newimm & 0x0ff);
17946
17947 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
17948 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
17949 break;
17950
17951 case BFD_RELOC_ARM_SMC:
17952 if (((unsigned long) value) > 0xffff)
17953 as_bad_where (fixP->fx_file, fixP->fx_line,
17954 _("invalid smc expression"));
17955 newval = md_chars_to_number (buf, INSN_SIZE);
17956 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
17957 md_number_to_chars (buf, newval, INSN_SIZE);
17958 break;
17959
17960 case BFD_RELOC_ARM_SWI:
17961 if (fixP->tc_fix_data != 0)
17962 {
17963 if (((unsigned long) value) > 0xff)
17964 as_bad_where (fixP->fx_file, fixP->fx_line,
17965 _("invalid swi expression"));
17966 newval = md_chars_to_number (buf, THUMB_SIZE);
17967 newval |= value;
17968 md_number_to_chars (buf, newval, THUMB_SIZE);
17969 }
17970 else
17971 {
17972 if (((unsigned long) value) > 0x00ffffff)
17973 as_bad_where (fixP->fx_file, fixP->fx_line,
17974 _("invalid swi expression"));
17975 newval = md_chars_to_number (buf, INSN_SIZE);
17976 newval |= value;
17977 md_number_to_chars (buf, newval, INSN_SIZE);
17978 }
17979 break;
17980
17981 case BFD_RELOC_ARM_MULTI:
17982 if (((unsigned long) value) > 0xffff)
17983 as_bad_where (fixP->fx_file, fixP->fx_line,
17984 _("invalid expression in load/store multiple"));
17985 newval = value | md_chars_to_number (buf, INSN_SIZE);
17986 md_number_to_chars (buf, newval, INSN_SIZE);
17987 break;
17988
17989 #ifdef OBJ_ELF
17990 case BFD_RELOC_ARM_PCREL_CALL:
17991 newval = md_chars_to_number (buf, INSN_SIZE);
17992 if ((newval & 0xf0000000) == 0xf0000000)
17993 temp = 1;
17994 else
17995 temp = 3;
17996 goto arm_branch_common;
17997
17998 case BFD_RELOC_ARM_PCREL_JUMP:
17999 case BFD_RELOC_ARM_PLT32:
18000 #endif
18001 case BFD_RELOC_ARM_PCREL_BRANCH:
18002 temp = 3;
18003 goto arm_branch_common;
18004
18005 case BFD_RELOC_ARM_PCREL_BLX:
18006 temp = 1;
18007 arm_branch_common:
18008 /* We are going to store value (shifted right by two) in the
18009 instruction, in a 24 bit, signed field. Bits 26 through 32 either
18010 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
18011 also be be clear. */
18012 if (value & temp)
18013 as_bad_where (fixP->fx_file, fixP->fx_line,
18014 _("misaligned branch destination"));
18015 if ((value & (offsetT)0xfe000000) != (offsetT)0
18016 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
18017 as_bad_where (fixP->fx_file, fixP->fx_line,
18018 _("branch out of range"));
18019
18020 if (fixP->fx_done || !seg->use_rela_p)
18021 {
18022 newval = md_chars_to_number (buf, INSN_SIZE);
18023 newval |= (value >> 2) & 0x00ffffff;
18024 /* Set the H bit on BLX instructions. */
18025 if (temp == 1)
18026 {
18027 if (value & 2)
18028 newval |= 0x01000000;
18029 else
18030 newval &= ~0x01000000;
18031 }
18032 md_number_to_chars (buf, newval, INSN_SIZE);
18033 }
18034 break;
18035
18036 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
18037 /* CBZ can only branch forward. */
18038 if (value & ~0x7e)
18039 as_bad_where (fixP->fx_file, fixP->fx_line,
18040 _("branch out of range"));
18041
18042 if (fixP->fx_done || !seg->use_rela_p)
18043 {
18044 newval = md_chars_to_number (buf, THUMB_SIZE);
18045 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
18046 md_number_to_chars (buf, newval, THUMB_SIZE);
18047 }
18048 break;
18049
18050 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
18051 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
18052 as_bad_where (fixP->fx_file, fixP->fx_line,
18053 _("branch out of range"));
18054
18055 if (fixP->fx_done || !seg->use_rela_p)
18056 {
18057 newval = md_chars_to_number (buf, THUMB_SIZE);
18058 newval |= (value & 0x1ff) >> 1;
18059 md_number_to_chars (buf, newval, THUMB_SIZE);
18060 }
18061 break;
18062
18063 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
18064 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
18065 as_bad_where (fixP->fx_file, fixP->fx_line,
18066 _("branch out of range"));
18067
18068 if (fixP->fx_done || !seg->use_rela_p)
18069 {
18070 newval = md_chars_to_number (buf, THUMB_SIZE);
18071 newval |= (value & 0xfff) >> 1;
18072 md_number_to_chars (buf, newval, THUMB_SIZE);
18073 }
18074 break;
18075
18076 case BFD_RELOC_THUMB_PCREL_BRANCH20:
18077 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff))
18078 as_bad_where (fixP->fx_file, fixP->fx_line,
18079 _("conditional branch out of range"));
18080
18081 if (fixP->fx_done || !seg->use_rela_p)
18082 {
18083 offsetT newval2;
18084 addressT S, J1, J2, lo, hi;
18085
18086 S = (value & 0x00100000) >> 20;
18087 J2 = (value & 0x00080000) >> 19;
18088 J1 = (value & 0x00040000) >> 18;
18089 hi = (value & 0x0003f000) >> 12;
18090 lo = (value & 0x00000ffe) >> 1;
18091
18092 newval = md_chars_to_number (buf, THUMB_SIZE);
18093 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18094 newval |= (S << 10) | hi;
18095 newval2 |= (J1 << 13) | (J2 << 11) | lo;
18096 md_number_to_chars (buf, newval, THUMB_SIZE);
18097 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18098 }
18099 break;
18100
18101 case BFD_RELOC_THUMB_PCREL_BLX:
18102 case BFD_RELOC_THUMB_PCREL_BRANCH23:
18103 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
18104 as_bad_where (fixP->fx_file, fixP->fx_line,
18105 _("branch out of range"));
18106
18107 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
18108 /* For a BLX instruction, make sure that the relocation is rounded up
18109 to a word boundary. This follows the semantics of the instruction
18110 which specifies that bit 1 of the target address will come from bit
18111 1 of the base address. */
18112 value = (value + 1) & ~ 1;
18113
18114 if (fixP->fx_done || !seg->use_rela_p)
18115 {
18116 offsetT newval2;
18117
18118 newval = md_chars_to_number (buf, THUMB_SIZE);
18119 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18120 newval |= (value & 0x7fffff) >> 12;
18121 newval2 |= (value & 0xfff) >> 1;
18122 md_number_to_chars (buf, newval, THUMB_SIZE);
18123 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18124 }
18125 break;
18126
18127 case BFD_RELOC_THUMB_PCREL_BRANCH25:
18128 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff))
18129 as_bad_where (fixP->fx_file, fixP->fx_line,
18130 _("branch out of range"));
18131
18132 if (fixP->fx_done || !seg->use_rela_p)
18133 {
18134 offsetT newval2;
18135 addressT S, I1, I2, lo, hi;
18136
18137 S = (value & 0x01000000) >> 24;
18138 I1 = (value & 0x00800000) >> 23;
18139 I2 = (value & 0x00400000) >> 22;
18140 hi = (value & 0x003ff000) >> 12;
18141 lo = (value & 0x00000ffe) >> 1;
18142
18143 I1 = !(I1 ^ S);
18144 I2 = !(I2 ^ S);
18145
18146 newval = md_chars_to_number (buf, THUMB_SIZE);
18147 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18148 newval |= (S << 10) | hi;
18149 newval2 |= (I1 << 13) | (I2 << 11) | lo;
18150 md_number_to_chars (buf, newval, THUMB_SIZE);
18151 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18152 }
18153 break;
18154
18155 case BFD_RELOC_8:
18156 if (fixP->fx_done || !seg->use_rela_p)
18157 md_number_to_chars (buf, value, 1);
18158 break;
18159
18160 case BFD_RELOC_16:
18161 if (fixP->fx_done || !seg->use_rela_p)
18162 md_number_to_chars (buf, value, 2);
18163 break;
18164
18165 #ifdef OBJ_ELF
18166 case BFD_RELOC_ARM_TLS_GD32:
18167 case BFD_RELOC_ARM_TLS_LE32:
18168 case BFD_RELOC_ARM_TLS_IE32:
18169 case BFD_RELOC_ARM_TLS_LDM32:
18170 case BFD_RELOC_ARM_TLS_LDO32:
18171 S_SET_THREAD_LOCAL (fixP->fx_addsy);
18172 /* fall through */
18173
18174 case BFD_RELOC_ARM_GOT32:
18175 case BFD_RELOC_ARM_GOTOFF:
18176 case BFD_RELOC_ARM_TARGET2:
18177 if (fixP->fx_done || !seg->use_rela_p)
18178 md_number_to_chars (buf, 0, 4);
18179 break;
18180 #endif
18181
18182 case BFD_RELOC_RVA:
18183 case BFD_RELOC_32:
18184 case BFD_RELOC_ARM_TARGET1:
18185 case BFD_RELOC_ARM_ROSEGREL32:
18186 case BFD_RELOC_ARM_SBREL32:
18187 case BFD_RELOC_32_PCREL:
18188 #ifdef TE_PE
18189 case BFD_RELOC_32_SECREL:
18190 #endif
18191 if (fixP->fx_done || !seg->use_rela_p)
18192 #ifdef TE_WINCE
18193 /* For WinCE we only do this for pcrel fixups. */
18194 if (fixP->fx_done || fixP->fx_pcrel)
18195 #endif
18196 md_number_to_chars (buf, value, 4);
18197 break;
18198
18199 #ifdef OBJ_ELF
18200 case BFD_RELOC_ARM_PREL31:
18201 if (fixP->fx_done || !seg->use_rela_p)
18202 {
18203 newval = md_chars_to_number (buf, 4) & 0x80000000;
18204 if ((value ^ (value >> 1)) & 0x40000000)
18205 {
18206 as_bad_where (fixP->fx_file, fixP->fx_line,
18207 _("rel31 relocation overflow"));
18208 }
18209 newval |= value & 0x7fffffff;
18210 md_number_to_chars (buf, newval, 4);
18211 }
18212 break;
18213 #endif
18214
18215 case BFD_RELOC_ARM_CP_OFF_IMM:
18216 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
18217 if (value < -1023 || value > 1023 || (value & 3))
18218 as_bad_where (fixP->fx_file, fixP->fx_line,
18219 _("co-processor offset out of range"));
18220 cp_off_common:
18221 sign = value >= 0;
18222 if (value < 0)
18223 value = -value;
18224 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
18225 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
18226 newval = md_chars_to_number (buf, INSN_SIZE);
18227 else
18228 newval = get_thumb32_insn (buf);
18229 newval &= 0xff7fff00;
18230 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
18231 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
18232 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
18233 md_number_to_chars (buf, newval, INSN_SIZE);
18234 else
18235 put_thumb32_insn (buf, newval);
18236 break;
18237
18238 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
18239 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
18240 if (value < -255 || value > 255)
18241 as_bad_where (fixP->fx_file, fixP->fx_line,
18242 _("co-processor offset out of range"));
18243 value *= 4;
18244 goto cp_off_common;
18245
18246 case BFD_RELOC_ARM_THUMB_OFFSET:
18247 newval = md_chars_to_number (buf, THUMB_SIZE);
18248 /* Exactly what ranges, and where the offset is inserted depends
18249 on the type of instruction, we can establish this from the
18250 top 4 bits. */
18251 switch (newval >> 12)
18252 {
18253 case 4: /* PC load. */
18254 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
18255 forced to zero for these loads; md_pcrel_from has already
18256 compensated for this. */
18257 if (value & 3)
18258 as_bad_where (fixP->fx_file, fixP->fx_line,
18259 _("invalid offset, target not word aligned (0x%08lX)"),
18260 (((unsigned long) fixP->fx_frag->fr_address
18261 + (unsigned long) fixP->fx_where) & ~3)
18262 + (unsigned long) value);
18263
18264 if (value & ~0x3fc)
18265 as_bad_where (fixP->fx_file, fixP->fx_line,
18266 _("invalid offset, value too big (0x%08lX)"),
18267 (long) value);
18268
18269 newval |= value >> 2;
18270 break;
18271
18272 case 9: /* SP load/store. */
18273 if (value & ~0x3fc)
18274 as_bad_where (fixP->fx_file, fixP->fx_line,
18275 _("invalid offset, value too big (0x%08lX)"),
18276 (long) value);
18277 newval |= value >> 2;
18278 break;
18279
18280 case 6: /* Word load/store. */
18281 if (value & ~0x7c)
18282 as_bad_where (fixP->fx_file, fixP->fx_line,
18283 _("invalid offset, value too big (0x%08lX)"),
18284 (long) value);
18285 newval |= value << 4; /* 6 - 2. */
18286 break;
18287
18288 case 7: /* Byte load/store. */
18289 if (value & ~0x1f)
18290 as_bad_where (fixP->fx_file, fixP->fx_line,
18291 _("invalid offset, value too big (0x%08lX)"),
18292 (long) value);
18293 newval |= value << 6;
18294 break;
18295
18296 case 8: /* Halfword load/store. */
18297 if (value & ~0x3e)
18298 as_bad_where (fixP->fx_file, fixP->fx_line,
18299 _("invalid offset, value too big (0x%08lX)"),
18300 (long) value);
18301 newval |= value << 5; /* 6 - 1. */
18302 break;
18303
18304 default:
18305 as_bad_where (fixP->fx_file, fixP->fx_line,
18306 "Unable to process relocation for thumb opcode: %lx",
18307 (unsigned long) newval);
18308 break;
18309 }
18310 md_number_to_chars (buf, newval, THUMB_SIZE);
18311 break;
18312
18313 case BFD_RELOC_ARM_THUMB_ADD:
18314 /* This is a complicated relocation, since we use it for all of
18315 the following immediate relocations:
18316
18317 3bit ADD/SUB
18318 8bit ADD/SUB
18319 9bit ADD/SUB SP word-aligned
18320 10bit ADD PC/SP word-aligned
18321
18322 The type of instruction being processed is encoded in the
18323 instruction field:
18324
18325 0x8000 SUB
18326 0x00F0 Rd
18327 0x000F Rs
18328 */
18329 newval = md_chars_to_number (buf, THUMB_SIZE);
18330 {
18331 int rd = (newval >> 4) & 0xf;
18332 int rs = newval & 0xf;
18333 int subtract = !!(newval & 0x8000);
18334
18335 /* Check for HI regs, only very restricted cases allowed:
18336 Adjusting SP, and using PC or SP to get an address. */
18337 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
18338 || (rs > 7 && rs != REG_SP && rs != REG_PC))
18339 as_bad_where (fixP->fx_file, fixP->fx_line,
18340 _("invalid Hi register with immediate"));
18341
18342 /* If value is negative, choose the opposite instruction. */
18343 if (value < 0)
18344 {
18345 value = -value;
18346 subtract = !subtract;
18347 if (value < 0)
18348 as_bad_where (fixP->fx_file, fixP->fx_line,
18349 _("immediate value out of range"));
18350 }
18351
18352 if (rd == REG_SP)
18353 {
18354 if (value & ~0x1fc)
18355 as_bad_where (fixP->fx_file, fixP->fx_line,
18356 _("invalid immediate for stack address calculation"));
18357 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
18358 newval |= value >> 2;
18359 }
18360 else if (rs == REG_PC || rs == REG_SP)
18361 {
18362 if (subtract || value & ~0x3fc)
18363 as_bad_where (fixP->fx_file, fixP->fx_line,
18364 _("invalid immediate for address calculation (value = 0x%08lX)"),
18365 (unsigned long) value);
18366 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
18367 newval |= rd << 8;
18368 newval |= value >> 2;
18369 }
18370 else if (rs == rd)
18371 {
18372 if (value & ~0xff)
18373 as_bad_where (fixP->fx_file, fixP->fx_line,
18374 _("immediate value out of range"));
18375 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
18376 newval |= (rd << 8) | value;
18377 }
18378 else
18379 {
18380 if (value & ~0x7)
18381 as_bad_where (fixP->fx_file, fixP->fx_line,
18382 _("immediate value out of range"));
18383 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
18384 newval |= rd | (rs << 3) | (value << 6);
18385 }
18386 }
18387 md_number_to_chars (buf, newval, THUMB_SIZE);
18388 break;
18389
18390 case BFD_RELOC_ARM_THUMB_IMM:
18391 newval = md_chars_to_number (buf, THUMB_SIZE);
18392 if (value < 0 || value > 255)
18393 as_bad_where (fixP->fx_file, fixP->fx_line,
18394 _("invalid immediate: %ld is too large"),
18395 (long) value);
18396 newval |= value;
18397 md_number_to_chars (buf, newval, THUMB_SIZE);
18398 break;
18399
18400 case BFD_RELOC_ARM_THUMB_SHIFT:
18401 /* 5bit shift value (0..32). LSL cannot take 32. */
18402 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
18403 temp = newval & 0xf800;
18404 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
18405 as_bad_where (fixP->fx_file, fixP->fx_line,
18406 _("invalid shift value: %ld"), (long) value);
18407 /* Shifts of zero must be encoded as LSL. */
18408 if (value == 0)
18409 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
18410 /* Shifts of 32 are encoded as zero. */
18411 else if (value == 32)
18412 value = 0;
18413 newval |= value << 6;
18414 md_number_to_chars (buf, newval, THUMB_SIZE);
18415 break;
18416
18417 case BFD_RELOC_VTABLE_INHERIT:
18418 case BFD_RELOC_VTABLE_ENTRY:
18419 fixP->fx_done = 0;
18420 return;
18421
18422 case BFD_RELOC_ARM_MOVW:
18423 case BFD_RELOC_ARM_MOVT:
18424 case BFD_RELOC_ARM_THUMB_MOVW:
18425 case BFD_RELOC_ARM_THUMB_MOVT:
18426 if (fixP->fx_done || !seg->use_rela_p)
18427 {
18428 /* REL format relocations are limited to a 16-bit addend. */
18429 if (!fixP->fx_done)
18430 {
18431 if (value < -0x1000 || value > 0xffff)
18432 as_bad_where (fixP->fx_file, fixP->fx_line,
18433 _("offset too big"));
18434 }
18435 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
18436 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
18437 {
18438 value >>= 16;
18439 }
18440
18441 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
18442 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
18443 {
18444 newval = get_thumb32_insn (buf);
18445 newval &= 0xfbf08f00;
18446 newval |= (value & 0xf000) << 4;
18447 newval |= (value & 0x0800) << 15;
18448 newval |= (value & 0x0700) << 4;
18449 newval |= (value & 0x00ff);
18450 put_thumb32_insn (buf, newval);
18451 }
18452 else
18453 {
18454 newval = md_chars_to_number (buf, 4);
18455 newval &= 0xfff0f000;
18456 newval |= value & 0x0fff;
18457 newval |= (value & 0xf000) << 4;
18458 md_number_to_chars (buf, newval, 4);
18459 }
18460 }
18461 return;
18462
18463 case BFD_RELOC_ARM_ALU_PC_G0_NC:
18464 case BFD_RELOC_ARM_ALU_PC_G0:
18465 case BFD_RELOC_ARM_ALU_PC_G1_NC:
18466 case BFD_RELOC_ARM_ALU_PC_G1:
18467 case BFD_RELOC_ARM_ALU_PC_G2:
18468 case BFD_RELOC_ARM_ALU_SB_G0_NC:
18469 case BFD_RELOC_ARM_ALU_SB_G0:
18470 case BFD_RELOC_ARM_ALU_SB_G1_NC:
18471 case BFD_RELOC_ARM_ALU_SB_G1:
18472 case BFD_RELOC_ARM_ALU_SB_G2:
18473 assert (!fixP->fx_done);
18474 if (!seg->use_rela_p)
18475 {
18476 bfd_vma insn;
18477 bfd_vma encoded_addend;
18478 bfd_vma addend_abs = abs (value);
18479
18480 /* Check that the absolute value of the addend can be
18481 expressed as an 8-bit constant plus a rotation. */
18482 encoded_addend = encode_arm_immediate (addend_abs);
18483 if (encoded_addend == (unsigned int) FAIL)
18484 as_bad_where (fixP->fx_file, fixP->fx_line,
18485 _("the offset 0x%08lX is not representable"),
18486 addend_abs);
18487
18488 /* Extract the instruction. */
18489 insn = md_chars_to_number (buf, INSN_SIZE);
18490
18491 /* If the addend is positive, use an ADD instruction.
18492 Otherwise use a SUB. Take care not to destroy the S bit. */
18493 insn &= 0xff1fffff;
18494 if (value < 0)
18495 insn |= 1 << 22;
18496 else
18497 insn |= 1 << 23;
18498
18499 /* Place the encoded addend into the first 12 bits of the
18500 instruction. */
18501 insn &= 0xfffff000;
18502 insn |= encoded_addend;
18503
18504 /* Update the instruction. */
18505 md_number_to_chars (buf, insn, INSN_SIZE);
18506 }
18507 break;
18508
18509 case BFD_RELOC_ARM_LDR_PC_G0:
18510 case BFD_RELOC_ARM_LDR_PC_G1:
18511 case BFD_RELOC_ARM_LDR_PC_G2:
18512 case BFD_RELOC_ARM_LDR_SB_G0:
18513 case BFD_RELOC_ARM_LDR_SB_G1:
18514 case BFD_RELOC_ARM_LDR_SB_G2:
18515 assert (!fixP->fx_done);
18516 if (!seg->use_rela_p)
18517 {
18518 bfd_vma insn;
18519 bfd_vma addend_abs = abs (value);
18520
18521 /* Check that the absolute value of the addend can be
18522 encoded in 12 bits. */
18523 if (addend_abs >= 0x1000)
18524 as_bad_where (fixP->fx_file, fixP->fx_line,
18525 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
18526 addend_abs);
18527
18528 /* Extract the instruction. */
18529 insn = md_chars_to_number (buf, INSN_SIZE);
18530
18531 /* If the addend is negative, clear bit 23 of the instruction.
18532 Otherwise set it. */
18533 if (value < 0)
18534 insn &= ~(1 << 23);
18535 else
18536 insn |= 1 << 23;
18537
18538 /* Place the absolute value of the addend into the first 12 bits
18539 of the instruction. */
18540 insn &= 0xfffff000;
18541 insn |= addend_abs;
18542
18543 /* Update the instruction. */
18544 md_number_to_chars (buf, insn, INSN_SIZE);
18545 }
18546 break;
18547
18548 case BFD_RELOC_ARM_LDRS_PC_G0:
18549 case BFD_RELOC_ARM_LDRS_PC_G1:
18550 case BFD_RELOC_ARM_LDRS_PC_G2:
18551 case BFD_RELOC_ARM_LDRS_SB_G0:
18552 case BFD_RELOC_ARM_LDRS_SB_G1:
18553 case BFD_RELOC_ARM_LDRS_SB_G2:
18554 assert (!fixP->fx_done);
18555 if (!seg->use_rela_p)
18556 {
18557 bfd_vma insn;
18558 bfd_vma addend_abs = abs (value);
18559
18560 /* Check that the absolute value of the addend can be
18561 encoded in 8 bits. */
18562 if (addend_abs >= 0x100)
18563 as_bad_where (fixP->fx_file, fixP->fx_line,
18564 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
18565 addend_abs);
18566
18567 /* Extract the instruction. */
18568 insn = md_chars_to_number (buf, INSN_SIZE);
18569
18570 /* If the addend is negative, clear bit 23 of the instruction.
18571 Otherwise set it. */
18572 if (value < 0)
18573 insn &= ~(1 << 23);
18574 else
18575 insn |= 1 << 23;
18576
18577 /* Place the first four bits of the absolute value of the addend
18578 into the first 4 bits of the instruction, and the remaining
18579 four into bits 8 .. 11. */
18580 insn &= 0xfffff0f0;
18581 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
18582
18583 /* Update the instruction. */
18584 md_number_to_chars (buf, insn, INSN_SIZE);
18585 }
18586 break;
18587
18588 case BFD_RELOC_ARM_LDC_PC_G0:
18589 case BFD_RELOC_ARM_LDC_PC_G1:
18590 case BFD_RELOC_ARM_LDC_PC_G2:
18591 case BFD_RELOC_ARM_LDC_SB_G0:
18592 case BFD_RELOC_ARM_LDC_SB_G1:
18593 case BFD_RELOC_ARM_LDC_SB_G2:
18594 assert (!fixP->fx_done);
18595 if (!seg->use_rela_p)
18596 {
18597 bfd_vma insn;
18598 bfd_vma addend_abs = abs (value);
18599
18600 /* Check that the absolute value of the addend is a multiple of
18601 four and, when divided by four, fits in 8 bits. */
18602 if (addend_abs & 0x3)
18603 as_bad_where (fixP->fx_file, fixP->fx_line,
18604 _("bad offset 0x%08lX (must be word-aligned)"),
18605 addend_abs);
18606
18607 if ((addend_abs >> 2) > 0xff)
18608 as_bad_where (fixP->fx_file, fixP->fx_line,
18609 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
18610 addend_abs);
18611
18612 /* Extract the instruction. */
18613 insn = md_chars_to_number (buf, INSN_SIZE);
18614
18615 /* If the addend is negative, clear bit 23 of the instruction.
18616 Otherwise set it. */
18617 if (value < 0)
18618 insn &= ~(1 << 23);
18619 else
18620 insn |= 1 << 23;
18621
18622 /* Place the addend (divided by four) into the first eight
18623 bits of the instruction. */
18624 insn &= 0xfffffff0;
18625 insn |= addend_abs >> 2;
18626
18627 /* Update the instruction. */
18628 md_number_to_chars (buf, insn, INSN_SIZE);
18629 }
18630 break;
18631
18632 case BFD_RELOC_UNUSED:
18633 default:
18634 as_bad_where (fixP->fx_file, fixP->fx_line,
18635 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
18636 }
18637 }
18638
18639 /* Translate internal representation of relocation info to BFD target
18640 format. */
18641
18642 arelent *
18643 tc_gen_reloc (asection *section, fixS *fixp)
18644 {
18645 arelent * reloc;
18646 bfd_reloc_code_real_type code;
18647
18648 reloc = xmalloc (sizeof (arelent));
18649
18650 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
18651 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
18652 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
18653
18654 if (fixp->fx_pcrel)
18655 {
18656 if (section->use_rela_p)
18657 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
18658 else
18659 fixp->fx_offset = reloc->address;
18660 }
18661 reloc->addend = fixp->fx_offset;
18662
18663 switch (fixp->fx_r_type)
18664 {
18665 case BFD_RELOC_8:
18666 if (fixp->fx_pcrel)
18667 {
18668 code = BFD_RELOC_8_PCREL;
18669 break;
18670 }
18671
18672 case BFD_RELOC_16:
18673 if (fixp->fx_pcrel)
18674 {
18675 code = BFD_RELOC_16_PCREL;
18676 break;
18677 }
18678
18679 case BFD_RELOC_32:
18680 if (fixp->fx_pcrel)
18681 {
18682 code = BFD_RELOC_32_PCREL;
18683 break;
18684 }
18685
18686 case BFD_RELOC_ARM_MOVW:
18687 if (fixp->fx_pcrel)
18688 {
18689 code = BFD_RELOC_ARM_MOVW_PCREL;
18690 break;
18691 }
18692
18693 case BFD_RELOC_ARM_MOVT:
18694 if (fixp->fx_pcrel)
18695 {
18696 code = BFD_RELOC_ARM_MOVT_PCREL;
18697 break;
18698 }
18699
18700 case BFD_RELOC_ARM_THUMB_MOVW:
18701 if (fixp->fx_pcrel)
18702 {
18703 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
18704 break;
18705 }
18706
18707 case BFD_RELOC_ARM_THUMB_MOVT:
18708 if (fixp->fx_pcrel)
18709 {
18710 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
18711 break;
18712 }
18713
18714 case BFD_RELOC_NONE:
18715 case BFD_RELOC_ARM_PCREL_BRANCH:
18716 case BFD_RELOC_ARM_PCREL_BLX:
18717 case BFD_RELOC_RVA:
18718 case BFD_RELOC_THUMB_PCREL_BRANCH7:
18719 case BFD_RELOC_THUMB_PCREL_BRANCH9:
18720 case BFD_RELOC_THUMB_PCREL_BRANCH12:
18721 case BFD_RELOC_THUMB_PCREL_BRANCH20:
18722 case BFD_RELOC_THUMB_PCREL_BRANCH23:
18723 case BFD_RELOC_THUMB_PCREL_BRANCH25:
18724 case BFD_RELOC_THUMB_PCREL_BLX:
18725 case BFD_RELOC_VTABLE_ENTRY:
18726 case BFD_RELOC_VTABLE_INHERIT:
18727 #ifdef TE_PE
18728 case BFD_RELOC_32_SECREL:
18729 #endif
18730 code = fixp->fx_r_type;
18731 break;
18732
18733 case BFD_RELOC_ARM_LITERAL:
18734 case BFD_RELOC_ARM_HWLITERAL:
18735 /* If this is called then the a literal has
18736 been referenced across a section boundary. */
18737 as_bad_where (fixp->fx_file, fixp->fx_line,
18738 _("literal referenced across section boundary"));
18739 return NULL;
18740
18741 #ifdef OBJ_ELF
18742 case BFD_RELOC_ARM_GOT32:
18743 case BFD_RELOC_ARM_GOTOFF:
18744 case BFD_RELOC_ARM_PLT32:
18745 case BFD_RELOC_ARM_TARGET1:
18746 case BFD_RELOC_ARM_ROSEGREL32:
18747 case BFD_RELOC_ARM_SBREL32:
18748 case BFD_RELOC_ARM_PREL31:
18749 case BFD_RELOC_ARM_TARGET2:
18750 case BFD_RELOC_ARM_TLS_LE32:
18751 case BFD_RELOC_ARM_TLS_LDO32:
18752 case BFD_RELOC_ARM_PCREL_CALL:
18753 case BFD_RELOC_ARM_PCREL_JUMP:
18754 case BFD_RELOC_ARM_ALU_PC_G0_NC:
18755 case BFD_RELOC_ARM_ALU_PC_G0:
18756 case BFD_RELOC_ARM_ALU_PC_G1_NC:
18757 case BFD_RELOC_ARM_ALU_PC_G1:
18758 case BFD_RELOC_ARM_ALU_PC_G2:
18759 case BFD_RELOC_ARM_LDR_PC_G0:
18760 case BFD_RELOC_ARM_LDR_PC_G1:
18761 case BFD_RELOC_ARM_LDR_PC_G2:
18762 case BFD_RELOC_ARM_LDRS_PC_G0:
18763 case BFD_RELOC_ARM_LDRS_PC_G1:
18764 case BFD_RELOC_ARM_LDRS_PC_G2:
18765 case BFD_RELOC_ARM_LDC_PC_G0:
18766 case BFD_RELOC_ARM_LDC_PC_G1:
18767 case BFD_RELOC_ARM_LDC_PC_G2:
18768 case BFD_RELOC_ARM_ALU_SB_G0_NC:
18769 case BFD_RELOC_ARM_ALU_SB_G0:
18770 case BFD_RELOC_ARM_ALU_SB_G1_NC:
18771 case BFD_RELOC_ARM_ALU_SB_G1:
18772 case BFD_RELOC_ARM_ALU_SB_G2:
18773 case BFD_RELOC_ARM_LDR_SB_G0:
18774 case BFD_RELOC_ARM_LDR_SB_G1:
18775 case BFD_RELOC_ARM_LDR_SB_G2:
18776 case BFD_RELOC_ARM_LDRS_SB_G0:
18777 case BFD_RELOC_ARM_LDRS_SB_G1:
18778 case BFD_RELOC_ARM_LDRS_SB_G2:
18779 case BFD_RELOC_ARM_LDC_SB_G0:
18780 case BFD_RELOC_ARM_LDC_SB_G1:
18781 case BFD_RELOC_ARM_LDC_SB_G2:
18782 code = fixp->fx_r_type;
18783 break;
18784
18785 case BFD_RELOC_ARM_TLS_GD32:
18786 case BFD_RELOC_ARM_TLS_IE32:
18787 case BFD_RELOC_ARM_TLS_LDM32:
18788 /* BFD will include the symbol's address in the addend.
18789 But we don't want that, so subtract it out again here. */
18790 if (!S_IS_COMMON (fixp->fx_addsy))
18791 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
18792 code = fixp->fx_r_type;
18793 break;
18794 #endif
18795
18796 case BFD_RELOC_ARM_IMMEDIATE:
18797 as_bad_where (fixp->fx_file, fixp->fx_line,
18798 _("internal relocation (type: IMMEDIATE) not fixed up"));
18799 return NULL;
18800
18801 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
18802 as_bad_where (fixp->fx_file, fixp->fx_line,
18803 _("ADRL used for a symbol not defined in the same file"));
18804 return NULL;
18805
18806 case BFD_RELOC_ARM_OFFSET_IMM:
18807 if (section->use_rela_p)
18808 {
18809 code = fixp->fx_r_type;
18810 break;
18811 }
18812
18813 if (fixp->fx_addsy != NULL
18814 && !S_IS_DEFINED (fixp->fx_addsy)
18815 && S_IS_LOCAL (fixp->fx_addsy))
18816 {
18817 as_bad_where (fixp->fx_file, fixp->fx_line,
18818 _("undefined local label `%s'"),
18819 S_GET_NAME (fixp->fx_addsy));
18820 return NULL;
18821 }
18822
18823 as_bad_where (fixp->fx_file, fixp->fx_line,
18824 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
18825 return NULL;
18826
18827 default:
18828 {
18829 char * type;
18830
18831 switch (fixp->fx_r_type)
18832 {
18833 case BFD_RELOC_NONE: type = "NONE"; break;
18834 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
18835 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
18836 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
18837 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
18838 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
18839 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
18840 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
18841 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
18842 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
18843 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
18844 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
18845 default: type = _("<unknown>"); break;
18846 }
18847 as_bad_where (fixp->fx_file, fixp->fx_line,
18848 _("cannot represent %s relocation in this object file format"),
18849 type);
18850 return NULL;
18851 }
18852 }
18853
18854 #ifdef OBJ_ELF
18855 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
18856 && GOT_symbol
18857 && fixp->fx_addsy == GOT_symbol)
18858 {
18859 code = BFD_RELOC_ARM_GOTPC;
18860 reloc->addend = fixp->fx_offset = reloc->address;
18861 }
18862 #endif
18863
18864 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
18865
18866 if (reloc->howto == NULL)
18867 {
18868 as_bad_where (fixp->fx_file, fixp->fx_line,
18869 _("cannot represent %s relocation in this object file format"),
18870 bfd_get_reloc_code_name (code));
18871 return NULL;
18872 }
18873
18874 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
18875 vtable entry to be used in the relocation's section offset. */
18876 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
18877 reloc->address = fixp->fx_offset;
18878
18879 return reloc;
18880 }
18881
18882 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
18883
18884 void
18885 cons_fix_new_arm (fragS * frag,
18886 int where,
18887 int size,
18888 expressionS * exp)
18889 {
18890 bfd_reloc_code_real_type type;
18891 int pcrel = 0;
18892
18893 /* Pick a reloc.
18894 FIXME: @@ Should look at CPU word size. */
18895 switch (size)
18896 {
18897 case 1:
18898 type = BFD_RELOC_8;
18899 break;
18900 case 2:
18901 type = BFD_RELOC_16;
18902 break;
18903 case 4:
18904 default:
18905 type = BFD_RELOC_32;
18906 break;
18907 case 8:
18908 type = BFD_RELOC_64;
18909 break;
18910 }
18911
18912 #ifdef TE_PE
18913 if (exp->X_op == O_secrel)
18914 {
18915 exp->X_op = O_symbol;
18916 type = BFD_RELOC_32_SECREL;
18917 }
18918 #endif
18919
18920 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
18921 }
18922
18923 #if defined OBJ_COFF || defined OBJ_ELF
18924 void
18925 arm_validate_fix (fixS * fixP)
18926 {
18927 /* If the destination of the branch is a defined symbol which does not have
18928 the THUMB_FUNC attribute, then we must be calling a function which has
18929 the (interfacearm) attribute. We look for the Thumb entry point to that
18930 function and change the branch to refer to that function instead. */
18931 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
18932 && fixP->fx_addsy != NULL
18933 && S_IS_DEFINED (fixP->fx_addsy)
18934 && ! THUMB_IS_FUNC (fixP->fx_addsy))
18935 {
18936 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
18937 }
18938 }
18939 #endif
18940
18941 int
18942 arm_force_relocation (struct fix * fixp)
18943 {
18944 #if defined (OBJ_COFF) && defined (TE_PE)
18945 if (fixp->fx_r_type == BFD_RELOC_RVA)
18946 return 1;
18947 #endif
18948
18949 /* Resolve these relocations even if the symbol is extern or weak. */
18950 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
18951 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
18952 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
18953 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
18954 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
18955 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
18956 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12)
18957 return 0;
18958
18959 /* Always leave these relocations for the linker. */
18960 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
18961 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
18962 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
18963 return 1;
18964
18965 /* Always generate relocations against function symbols. */
18966 if (fixp->fx_r_type == BFD_RELOC_32
18967 && fixp->fx_addsy
18968 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
18969 return 1;
18970
18971 return generic_force_reloc (fixp);
18972 }
18973
18974 #if defined (OBJ_ELF) || defined (OBJ_COFF)
18975 /* Relocations against function names must be left unadjusted,
18976 so that the linker can use this information to generate interworking
18977 stubs. The MIPS version of this function
18978 also prevents relocations that are mips-16 specific, but I do not
18979 know why it does this.
18980
18981 FIXME:
18982 There is one other problem that ought to be addressed here, but
18983 which currently is not: Taking the address of a label (rather
18984 than a function) and then later jumping to that address. Such
18985 addresses also ought to have their bottom bit set (assuming that
18986 they reside in Thumb code), but at the moment they will not. */
18987
18988 bfd_boolean
18989 arm_fix_adjustable (fixS * fixP)
18990 {
18991 if (fixP->fx_addsy == NULL)
18992 return 1;
18993
18994 /* Preserve relocations against symbols with function type. */
18995 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
18996 return 0;
18997
18998 if (THUMB_IS_FUNC (fixP->fx_addsy)
18999 && fixP->fx_subsy == NULL)
19000 return 0;
19001
19002 /* We need the symbol name for the VTABLE entries. */
19003 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
19004 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
19005 return 0;
19006
19007 /* Don't allow symbols to be discarded on GOT related relocs. */
19008 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
19009 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
19010 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
19011 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
19012 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
19013 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
19014 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
19015 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
19016 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
19017 return 0;
19018
19019 /* Similarly for group relocations. */
19020 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
19021 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
19022 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
19023 return 0;
19024
19025 return 1;
19026 }
19027 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
19028
19029 #ifdef OBJ_ELF
19030
19031 const char *
19032 elf32_arm_target_format (void)
19033 {
19034 #ifdef TE_SYMBIAN
19035 return (target_big_endian
19036 ? "elf32-bigarm-symbian"
19037 : "elf32-littlearm-symbian");
19038 #elif defined (TE_VXWORKS)
19039 return (target_big_endian
19040 ? "elf32-bigarm-vxworks"
19041 : "elf32-littlearm-vxworks");
19042 #else
19043 if (target_big_endian)
19044 return "elf32-bigarm";
19045 else
19046 return "elf32-littlearm";
19047 #endif
19048 }
19049
19050 void
19051 armelf_frob_symbol (symbolS * symp,
19052 int * puntp)
19053 {
19054 elf_frob_symbol (symp, puntp);
19055 }
19056 #endif
19057
19058 /* MD interface: Finalization. */
19059
19060 /* A good place to do this, although this was probably not intended
19061 for this kind of use. We need to dump the literal pool before
19062 references are made to a null symbol pointer. */
19063
19064 void
19065 arm_cleanup (void)
19066 {
19067 literal_pool * pool;
19068
19069 for (pool = list_of_pools; pool; pool = pool->next)
19070 {
19071 /* Put it at the end of the relevent section. */
19072 subseg_set (pool->section, pool->sub_section);
19073 #ifdef OBJ_ELF
19074 arm_elf_change_section ();
19075 #endif
19076 s_ltorg (0);
19077 }
19078 }
19079
19080 /* Adjust the symbol table. This marks Thumb symbols as distinct from
19081 ARM ones. */
19082
19083 void
19084 arm_adjust_symtab (void)
19085 {
19086 #ifdef OBJ_COFF
19087 symbolS * sym;
19088
19089 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
19090 {
19091 if (ARM_IS_THUMB (sym))
19092 {
19093 if (THUMB_IS_FUNC (sym))
19094 {
19095 /* Mark the symbol as a Thumb function. */
19096 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
19097 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
19098 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
19099
19100 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
19101 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
19102 else
19103 as_bad (_("%s: unexpected function type: %d"),
19104 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
19105 }
19106 else switch (S_GET_STORAGE_CLASS (sym))
19107 {
19108 case C_EXT:
19109 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
19110 break;
19111 case C_STAT:
19112 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
19113 break;
19114 case C_LABEL:
19115 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
19116 break;
19117 default:
19118 /* Do nothing. */
19119 break;
19120 }
19121 }
19122
19123 if (ARM_IS_INTERWORK (sym))
19124 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
19125 }
19126 #endif
19127 #ifdef OBJ_ELF
19128 symbolS * sym;
19129 char bind;
19130
19131 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
19132 {
19133 if (ARM_IS_THUMB (sym))
19134 {
19135 elf_symbol_type * elf_sym;
19136
19137 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
19138 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
19139
19140 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
19141 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
19142 {
19143 /* If it's a .thumb_func, declare it as so,
19144 otherwise tag label as .code 16. */
19145 if (THUMB_IS_FUNC (sym))
19146 elf_sym->internal_elf_sym.st_info =
19147 ELF_ST_INFO (bind, STT_ARM_TFUNC);
19148 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
19149 elf_sym->internal_elf_sym.st_info =
19150 ELF_ST_INFO (bind, STT_ARM_16BIT);
19151 }
19152 }
19153 }
19154 #endif
19155 }
19156
19157 /* MD interface: Initialization. */
19158
19159 static void
19160 set_constant_flonums (void)
19161 {
19162 int i;
19163
19164 for (i = 0; i < NUM_FLOAT_VALS; i++)
19165 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
19166 abort ();
19167 }
19168
19169 /* Auto-select Thumb mode if it's the only available instruction set for the
19170 given architecture. */
19171
19172 static void
19173 autoselect_thumb_from_cpu_variant (void)
19174 {
19175 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
19176 opcode_select (16);
19177 }
19178
19179 void
19180 md_begin (void)
19181 {
19182 unsigned mach;
19183 unsigned int i;
19184
19185 if ( (arm_ops_hsh = hash_new ()) == NULL
19186 || (arm_cond_hsh = hash_new ()) == NULL
19187 || (arm_shift_hsh = hash_new ()) == NULL
19188 || (arm_psr_hsh = hash_new ()) == NULL
19189 || (arm_v7m_psr_hsh = hash_new ()) == NULL
19190 || (arm_reg_hsh = hash_new ()) == NULL
19191 || (arm_reloc_hsh = hash_new ()) == NULL
19192 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
19193 as_fatal (_("virtual memory exhausted"));
19194
19195 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
19196 hash_insert (arm_ops_hsh, insns[i].template, (PTR) (insns + i));
19197 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
19198 hash_insert (arm_cond_hsh, conds[i].template, (PTR) (conds + i));
19199 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
19200 hash_insert (arm_shift_hsh, shift_names[i].name, (PTR) (shift_names + i));
19201 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
19202 hash_insert (arm_psr_hsh, psrs[i].template, (PTR) (psrs + i));
19203 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
19204 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template, (PTR) (v7m_psrs + i));
19205 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
19206 hash_insert (arm_reg_hsh, reg_names[i].name, (PTR) (reg_names + i));
19207 for (i = 0;
19208 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
19209 i++)
19210 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template,
19211 (PTR) (barrier_opt_names + i));
19212 #ifdef OBJ_ELF
19213 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
19214 hash_insert (arm_reloc_hsh, reloc_names[i].name, (PTR) (reloc_names + i));
19215 #endif
19216
19217 set_constant_flonums ();
19218
19219 /* Set the cpu variant based on the command-line options. We prefer
19220 -mcpu= over -march= if both are set (as for GCC); and we prefer
19221 -mfpu= over any other way of setting the floating point unit.
19222 Use of legacy options with new options are faulted. */
19223 if (legacy_cpu)
19224 {
19225 if (mcpu_cpu_opt || march_cpu_opt)
19226 as_bad (_("use of old and new-style options to set CPU type"));
19227
19228 mcpu_cpu_opt = legacy_cpu;
19229 }
19230 else if (!mcpu_cpu_opt)
19231 mcpu_cpu_opt = march_cpu_opt;
19232
19233 if (legacy_fpu)
19234 {
19235 if (mfpu_opt)
19236 as_bad (_("use of old and new-style options to set FPU type"));
19237
19238 mfpu_opt = legacy_fpu;
19239 }
19240 else if (!mfpu_opt)
19241 {
19242 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
19243 /* Some environments specify a default FPU. If they don't, infer it
19244 from the processor. */
19245 if (mcpu_fpu_opt)
19246 mfpu_opt = mcpu_fpu_opt;
19247 else
19248 mfpu_opt = march_fpu_opt;
19249 #else
19250 mfpu_opt = &fpu_default;
19251 #endif
19252 }
19253
19254 if (!mfpu_opt)
19255 {
19256 if (!mcpu_cpu_opt)
19257 mfpu_opt = &fpu_default;
19258 else if (ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
19259 mfpu_opt = &fpu_arch_vfp_v2;
19260 else
19261 mfpu_opt = &fpu_arch_fpa;
19262 }
19263
19264 #ifdef CPU_DEFAULT
19265 if (!mcpu_cpu_opt)
19266 {
19267 mcpu_cpu_opt = &cpu_default;
19268 selected_cpu = cpu_default;
19269 }
19270 #else
19271 if (mcpu_cpu_opt)
19272 selected_cpu = *mcpu_cpu_opt;
19273 else
19274 mcpu_cpu_opt = &arm_arch_any;
19275 #endif
19276
19277 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
19278
19279 autoselect_thumb_from_cpu_variant ();
19280
19281 arm_arch_used = thumb_arch_used = arm_arch_none;
19282
19283 #if defined OBJ_COFF || defined OBJ_ELF
19284 {
19285 unsigned int flags = 0;
19286
19287 #if defined OBJ_ELF
19288 flags = meabi_flags;
19289
19290 switch (meabi_flags)
19291 {
19292 case EF_ARM_EABI_UNKNOWN:
19293 #endif
19294 /* Set the flags in the private structure. */
19295 if (uses_apcs_26) flags |= F_APCS26;
19296 if (support_interwork) flags |= F_INTERWORK;
19297 if (uses_apcs_float) flags |= F_APCS_FLOAT;
19298 if (pic_code) flags |= F_PIC;
19299 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
19300 flags |= F_SOFT_FLOAT;
19301
19302 switch (mfloat_abi_opt)
19303 {
19304 case ARM_FLOAT_ABI_SOFT:
19305 case ARM_FLOAT_ABI_SOFTFP:
19306 flags |= F_SOFT_FLOAT;
19307 break;
19308
19309 case ARM_FLOAT_ABI_HARD:
19310 if (flags & F_SOFT_FLOAT)
19311 as_bad (_("hard-float conflicts with specified fpu"));
19312 break;
19313 }
19314
19315 /* Using pure-endian doubles (even if soft-float). */
19316 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
19317 flags |= F_VFP_FLOAT;
19318
19319 #if defined OBJ_ELF
19320 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
19321 flags |= EF_ARM_MAVERICK_FLOAT;
19322 break;
19323
19324 case EF_ARM_EABI_VER4:
19325 case EF_ARM_EABI_VER5:
19326 /* No additional flags to set. */
19327 break;
19328
19329 default:
19330 abort ();
19331 }
19332 #endif
19333 bfd_set_private_flags (stdoutput, flags);
19334
19335 /* We have run out flags in the COFF header to encode the
19336 status of ATPCS support, so instead we create a dummy,
19337 empty, debug section called .arm.atpcs. */
19338 if (atpcs)
19339 {
19340 asection * sec;
19341
19342 sec = bfd_make_section (stdoutput, ".arm.atpcs");
19343
19344 if (sec != NULL)
19345 {
19346 bfd_set_section_flags
19347 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
19348 bfd_set_section_size (stdoutput, sec, 0);
19349 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
19350 }
19351 }
19352 }
19353 #endif
19354
19355 /* Record the CPU type as well. */
19356 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
19357 mach = bfd_mach_arm_iWMMXt2;
19358 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
19359 mach = bfd_mach_arm_iWMMXt;
19360 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
19361 mach = bfd_mach_arm_XScale;
19362 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
19363 mach = bfd_mach_arm_ep9312;
19364 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
19365 mach = bfd_mach_arm_5TE;
19366 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
19367 {
19368 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
19369 mach = bfd_mach_arm_5T;
19370 else
19371 mach = bfd_mach_arm_5;
19372 }
19373 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
19374 {
19375 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
19376 mach = bfd_mach_arm_4T;
19377 else
19378 mach = bfd_mach_arm_4;
19379 }
19380 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
19381 mach = bfd_mach_arm_3M;
19382 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
19383 mach = bfd_mach_arm_3;
19384 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
19385 mach = bfd_mach_arm_2a;
19386 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
19387 mach = bfd_mach_arm_2;
19388 else
19389 mach = bfd_mach_arm_unknown;
19390
19391 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
19392 }
19393
19394 /* Command line processing. */
19395
19396 /* md_parse_option
19397 Invocation line includes a switch not recognized by the base assembler.
19398 See if it's a processor-specific option.
19399
19400 This routine is somewhat complicated by the need for backwards
19401 compatibility (since older releases of gcc can't be changed).
19402 The new options try to make the interface as compatible as
19403 possible with GCC.
19404
19405 New options (supported) are:
19406
19407 -mcpu=<cpu name> Assemble for selected processor
19408 -march=<architecture name> Assemble for selected architecture
19409 -mfpu=<fpu architecture> Assemble for selected FPU.
19410 -EB/-mbig-endian Big-endian
19411 -EL/-mlittle-endian Little-endian
19412 -k Generate PIC code
19413 -mthumb Start in Thumb mode
19414 -mthumb-interwork Code supports ARM/Thumb interworking
19415
19416 For now we will also provide support for:
19417
19418 -mapcs-32 32-bit Program counter
19419 -mapcs-26 26-bit Program counter
19420 -macps-float Floats passed in FP registers
19421 -mapcs-reentrant Reentrant code
19422 -matpcs
19423 (sometime these will probably be replaced with -mapcs=<list of options>
19424 and -matpcs=<list of options>)
19425
19426 The remaining options are only supported for back-wards compatibility.
19427 Cpu variants, the arm part is optional:
19428 -m[arm]1 Currently not supported.
19429 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
19430 -m[arm]3 Arm 3 processor
19431 -m[arm]6[xx], Arm 6 processors
19432 -m[arm]7[xx][t][[d]m] Arm 7 processors
19433 -m[arm]8[10] Arm 8 processors
19434 -m[arm]9[20][tdmi] Arm 9 processors
19435 -mstrongarm[110[0]] StrongARM processors
19436 -mxscale XScale processors
19437 -m[arm]v[2345[t[e]]] Arm architectures
19438 -mall All (except the ARM1)
19439 FP variants:
19440 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
19441 -mfpe-old (No float load/store multiples)
19442 -mvfpxd VFP Single precision
19443 -mvfp All VFP
19444 -mno-fpu Disable all floating point instructions
19445
19446 The following CPU names are recognized:
19447 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
19448 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
19449 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
19450 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
19451 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
19452 arm10t arm10e, arm1020t, arm1020e, arm10200e,
19453 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
19454
19455 */
19456
19457 const char * md_shortopts = "m:k";
19458
19459 #ifdef ARM_BI_ENDIAN
19460 #define OPTION_EB (OPTION_MD_BASE + 0)
19461 #define OPTION_EL (OPTION_MD_BASE + 1)
19462 #else
19463 #if TARGET_BYTES_BIG_ENDIAN
19464 #define OPTION_EB (OPTION_MD_BASE + 0)
19465 #else
19466 #define OPTION_EL (OPTION_MD_BASE + 1)
19467 #endif
19468 #endif
19469
19470 struct option md_longopts[] =
19471 {
19472 #ifdef OPTION_EB
19473 {"EB", no_argument, NULL, OPTION_EB},
19474 #endif
19475 #ifdef OPTION_EL
19476 {"EL", no_argument, NULL, OPTION_EL},
19477 #endif
19478 {NULL, no_argument, NULL, 0}
19479 };
19480
19481 size_t md_longopts_size = sizeof (md_longopts);
19482
19483 struct arm_option_table
19484 {
19485 char *option; /* Option name to match. */
19486 char *help; /* Help information. */
19487 int *var; /* Variable to change. */
19488 int value; /* What to change it to. */
19489 char *deprecated; /* If non-null, print this message. */
19490 };
19491
19492 struct arm_option_table arm_opts[] =
19493 {
19494 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
19495 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
19496 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
19497 &support_interwork, 1, NULL},
19498 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
19499 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
19500 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
19501 1, NULL},
19502 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
19503 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
19504 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
19505 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
19506 NULL},
19507
19508 /* These are recognized by the assembler, but have no affect on code. */
19509 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
19510 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
19511 {NULL, NULL, NULL, 0, NULL}
19512 };
19513
19514 struct arm_legacy_option_table
19515 {
19516 char *option; /* Option name to match. */
19517 const arm_feature_set **var; /* Variable to change. */
19518 const arm_feature_set value; /* What to change it to. */
19519 char *deprecated; /* If non-null, print this message. */
19520 };
19521
19522 const struct arm_legacy_option_table arm_legacy_opts[] =
19523 {
19524 /* DON'T add any new processors to this list -- we want the whole list
19525 to go away... Add them to the processors table instead. */
19526 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
19527 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
19528 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
19529 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
19530 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
19531 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
19532 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
19533 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
19534 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
19535 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
19536 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
19537 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
19538 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
19539 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
19540 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
19541 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
19542 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
19543 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
19544 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
19545 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
19546 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
19547 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
19548 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
19549 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
19550 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
19551 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
19552 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
19553 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
19554 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
19555 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
19556 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
19557 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
19558 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
19559 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
19560 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
19561 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
19562 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
19563 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
19564 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
19565 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
19566 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
19567 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
19568 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
19569 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
19570 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
19571 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
19572 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19573 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19574 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19575 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19576 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
19577 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
19578 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
19579 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
19580 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
19581 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
19582 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
19583 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
19584 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
19585 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
19586 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
19587 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
19588 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
19589 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
19590 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
19591 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
19592 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
19593 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
19594 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
19595 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
19596 N_("use -mcpu=strongarm110")},
19597 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
19598 N_("use -mcpu=strongarm1100")},
19599 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
19600 N_("use -mcpu=strongarm1110")},
19601 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
19602 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
19603 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
19604
19605 /* Architecture variants -- don't add any more to this list either. */
19606 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
19607 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
19608 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
19609 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
19610 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
19611 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
19612 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
19613 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
19614 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
19615 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
19616 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
19617 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
19618 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
19619 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
19620 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
19621 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
19622 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
19623 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
19624
19625 /* Floating point variants -- don't add any more to this list either. */
19626 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
19627 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
19628 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
19629 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
19630 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
19631
19632 {NULL, NULL, ARM_ARCH_NONE, NULL}
19633 };
19634
19635 struct arm_cpu_option_table
19636 {
19637 char *name;
19638 const arm_feature_set value;
19639 /* For some CPUs we assume an FPU unless the user explicitly sets
19640 -mfpu=... */
19641 const arm_feature_set default_fpu;
19642 /* The canonical name of the CPU, or NULL to use NAME converted to upper
19643 case. */
19644 const char *canonical_name;
19645 };
19646
19647 /* This list should, at a minimum, contain all the cpu names
19648 recognized by GCC. */
19649 static const struct arm_cpu_option_table arm_cpus[] =
19650 {
19651 {"all", ARM_ANY, FPU_ARCH_FPA, NULL},
19652 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL},
19653 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL},
19654 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
19655 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
19656 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19657 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19658 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19659 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19660 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19661 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19662 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
19663 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19664 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
19665 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19666 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
19667 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19668 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19669 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19670 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19671 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19672 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19673 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19674 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19675 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19676 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19677 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19678 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19679 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19680 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19681 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19682 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19683 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19684 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19685 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19686 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19687 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19688 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19689 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19690 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"},
19691 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19692 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19693 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19694 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19695 /* For V5 or later processors we default to using VFP; but the user
19696 should really set the FPU type explicitly. */
19697 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
19698 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19699 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
19700 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
19701 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
19702 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
19703 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"},
19704 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19705 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
19706 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"},
19707 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19708 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19709 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
19710 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
19711 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19712 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"},
19713 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
19714 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19715 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19716 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
19717 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
19718 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"},
19719 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL},
19720 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"},
19721 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL},
19722 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL},
19723 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL},
19724 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL},
19725 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL},
19726 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL},
19727 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL},
19728 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3
19729 | FPU_NEON_EXT_V1),
19730 NULL},
19731 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL},
19732 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL},
19733 /* ??? XSCALE is really an architecture. */
19734 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
19735 /* ??? iwmmxt is not a processor. */
19736 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
19737 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL},
19738 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
19739 /* Maverick */
19740 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
19741 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL}
19742 };
19743
19744 struct arm_arch_option_table
19745 {
19746 char *name;
19747 const arm_feature_set value;
19748 const arm_feature_set default_fpu;
19749 };
19750
19751 /* This list should, at a minimum, contain all the architecture names
19752 recognized by GCC. */
19753 static const struct arm_arch_option_table arm_archs[] =
19754 {
19755 {"all", ARM_ANY, FPU_ARCH_FPA},
19756 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA},
19757 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA},
19758 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA},
19759 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA},
19760 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA},
19761 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA},
19762 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA},
19763 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA},
19764 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA},
19765 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA},
19766 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP},
19767 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP},
19768 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP},
19769 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP},
19770 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP},
19771 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP},
19772 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP},
19773 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP},
19774 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP},
19775 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP},
19776 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP},
19777 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP},
19778 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP},
19779 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP},
19780 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
19781 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP},
19782 /* The official spelling of the ARMv7 profile variants is the dashed form.
19783 Accept the non-dashed form for compatibility with old toolchains. */
19784 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP},
19785 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP},
19786 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP},
19787 {"armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP},
19788 {"armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP},
19789 {"armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP},
19790 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP},
19791 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
19792 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP},
19793 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
19794 };
19795
19796 /* ISA extensions in the co-processor space. */
19797 struct arm_option_cpu_value_table
19798 {
19799 char *name;
19800 const arm_feature_set value;
19801 };
19802
19803 static const struct arm_option_cpu_value_table arm_extensions[] =
19804 {
19805 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)},
19806 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)},
19807 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)},
19808 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2)},
19809 {NULL, ARM_ARCH_NONE}
19810 };
19811
19812 /* This list should, at a minimum, contain all the fpu names
19813 recognized by GCC. */
19814 static const struct arm_option_cpu_value_table arm_fpus[] =
19815 {
19816 {"softfpa", FPU_NONE},
19817 {"fpe", FPU_ARCH_FPE},
19818 {"fpe2", FPU_ARCH_FPE},
19819 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
19820 {"fpa", FPU_ARCH_FPA},
19821 {"fpa10", FPU_ARCH_FPA},
19822 {"fpa11", FPU_ARCH_FPA},
19823 {"arm7500fe", FPU_ARCH_FPA},
19824 {"softvfp", FPU_ARCH_VFP},
19825 {"softvfp+vfp", FPU_ARCH_VFP_V2},
19826 {"vfp", FPU_ARCH_VFP_V2},
19827 {"vfp9", FPU_ARCH_VFP_V2},
19828 {"vfp3", FPU_ARCH_VFP_V3},
19829 {"vfp10", FPU_ARCH_VFP_V2},
19830 {"vfp10-r0", FPU_ARCH_VFP_V1},
19831 {"vfpxd", FPU_ARCH_VFP_V1xD},
19832 {"arm1020t", FPU_ARCH_VFP_V1},
19833 {"arm1020e", FPU_ARCH_VFP_V2},
19834 {"arm1136jfs", FPU_ARCH_VFP_V2},
19835 {"arm1136jf-s", FPU_ARCH_VFP_V2},
19836 {"maverick", FPU_ARCH_MAVERICK},
19837 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
19838 {NULL, ARM_ARCH_NONE}
19839 };
19840
19841 struct arm_option_value_table
19842 {
19843 char *name;
19844 long value;
19845 };
19846
19847 static const struct arm_option_value_table arm_float_abis[] =
19848 {
19849 {"hard", ARM_FLOAT_ABI_HARD},
19850 {"softfp", ARM_FLOAT_ABI_SOFTFP},
19851 {"soft", ARM_FLOAT_ABI_SOFT},
19852 {NULL, 0}
19853 };
19854
19855 #ifdef OBJ_ELF
19856 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
19857 static const struct arm_option_value_table arm_eabis[] =
19858 {
19859 {"gnu", EF_ARM_EABI_UNKNOWN},
19860 {"4", EF_ARM_EABI_VER4},
19861 {"5", EF_ARM_EABI_VER5},
19862 {NULL, 0}
19863 };
19864 #endif
19865
19866 struct arm_long_option_table
19867 {
19868 char * option; /* Substring to match. */
19869 char * help; /* Help information. */
19870 int (* func) (char * subopt); /* Function to decode sub-option. */
19871 char * deprecated; /* If non-null, print this message. */
19872 };
19873
19874 static int
19875 arm_parse_extension (char * str, const arm_feature_set **opt_p)
19876 {
19877 arm_feature_set *ext_set = xmalloc (sizeof (arm_feature_set));
19878
19879 /* Copy the feature set, so that we can modify it. */
19880 *ext_set = **opt_p;
19881 *opt_p = ext_set;
19882
19883 while (str != NULL && *str != 0)
19884 {
19885 const struct arm_option_cpu_value_table * opt;
19886 char * ext;
19887 int optlen;
19888
19889 if (*str != '+')
19890 {
19891 as_bad (_("invalid architectural extension"));
19892 return 0;
19893 }
19894
19895 str++;
19896 ext = strchr (str, '+');
19897
19898 if (ext != NULL)
19899 optlen = ext - str;
19900 else
19901 optlen = strlen (str);
19902
19903 if (optlen == 0)
19904 {
19905 as_bad (_("missing architectural extension"));
19906 return 0;
19907 }
19908
19909 for (opt = arm_extensions; opt->name != NULL; opt++)
19910 if (strncmp (opt->name, str, optlen) == 0)
19911 {
19912 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
19913 break;
19914 }
19915
19916 if (opt->name == NULL)
19917 {
19918 as_bad (_("unknown architectural extnsion `%s'"), str);
19919 return 0;
19920 }
19921
19922 str = ext;
19923 };
19924
19925 return 1;
19926 }
19927
19928 static int
19929 arm_parse_cpu (char * str)
19930 {
19931 const struct arm_cpu_option_table * opt;
19932 char * ext = strchr (str, '+');
19933 int optlen;
19934
19935 if (ext != NULL)
19936 optlen = ext - str;
19937 else
19938 optlen = strlen (str);
19939
19940 if (optlen == 0)
19941 {
19942 as_bad (_("missing cpu name `%s'"), str);
19943 return 0;
19944 }
19945
19946 for (opt = arm_cpus; opt->name != NULL; opt++)
19947 if (strncmp (opt->name, str, optlen) == 0)
19948 {
19949 mcpu_cpu_opt = &opt->value;
19950 mcpu_fpu_opt = &opt->default_fpu;
19951 if (opt->canonical_name)
19952 strcpy(selected_cpu_name, opt->canonical_name);
19953 else
19954 {
19955 int i;
19956 for (i = 0; i < optlen; i++)
19957 selected_cpu_name[i] = TOUPPER (opt->name[i]);
19958 selected_cpu_name[i] = 0;
19959 }
19960
19961 if (ext != NULL)
19962 return arm_parse_extension (ext, &mcpu_cpu_opt);
19963
19964 return 1;
19965 }
19966
19967 as_bad (_("unknown cpu `%s'"), str);
19968 return 0;
19969 }
19970
19971 static int
19972 arm_parse_arch (char * str)
19973 {
19974 const struct arm_arch_option_table *opt;
19975 char *ext = strchr (str, '+');
19976 int optlen;
19977
19978 if (ext != NULL)
19979 optlen = ext - str;
19980 else
19981 optlen = strlen (str);
19982
19983 if (optlen == 0)
19984 {
19985 as_bad (_("missing architecture name `%s'"), str);
19986 return 0;
19987 }
19988
19989 for (opt = arm_archs; opt->name != NULL; opt++)
19990 if (streq (opt->name, str))
19991 {
19992 march_cpu_opt = &opt->value;
19993 march_fpu_opt = &opt->default_fpu;
19994 strcpy(selected_cpu_name, opt->name);
19995
19996 if (ext != NULL)
19997 return arm_parse_extension (ext, &march_cpu_opt);
19998
19999 return 1;
20000 }
20001
20002 as_bad (_("unknown architecture `%s'\n"), str);
20003 return 0;
20004 }
20005
20006 static int
20007 arm_parse_fpu (char * str)
20008 {
20009 const struct arm_option_cpu_value_table * opt;
20010
20011 for (opt = arm_fpus; opt->name != NULL; opt++)
20012 if (streq (opt->name, str))
20013 {
20014 mfpu_opt = &opt->value;
20015 return 1;
20016 }
20017
20018 as_bad (_("unknown floating point format `%s'\n"), str);
20019 return 0;
20020 }
20021
20022 static int
20023 arm_parse_float_abi (char * str)
20024 {
20025 const struct arm_option_value_table * opt;
20026
20027 for (opt = arm_float_abis; opt->name != NULL; opt++)
20028 if (streq (opt->name, str))
20029 {
20030 mfloat_abi_opt = opt->value;
20031 return 1;
20032 }
20033
20034 as_bad (_("unknown floating point abi `%s'\n"), str);
20035 return 0;
20036 }
20037
20038 #ifdef OBJ_ELF
20039 static int
20040 arm_parse_eabi (char * str)
20041 {
20042 const struct arm_option_value_table *opt;
20043
20044 for (opt = arm_eabis; opt->name != NULL; opt++)
20045 if (streq (opt->name, str))
20046 {
20047 meabi_flags = opt->value;
20048 return 1;
20049 }
20050 as_bad (_("unknown EABI `%s'\n"), str);
20051 return 0;
20052 }
20053 #endif
20054
20055 struct arm_long_option_table arm_long_opts[] =
20056 {
20057 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
20058 arm_parse_cpu, NULL},
20059 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
20060 arm_parse_arch, NULL},
20061 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
20062 arm_parse_fpu, NULL},
20063 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
20064 arm_parse_float_abi, NULL},
20065 #ifdef OBJ_ELF
20066 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"),
20067 arm_parse_eabi, NULL},
20068 #endif
20069 {NULL, NULL, 0, NULL}
20070 };
20071
20072 int
20073 md_parse_option (int c, char * arg)
20074 {
20075 struct arm_option_table *opt;
20076 const struct arm_legacy_option_table *fopt;
20077 struct arm_long_option_table *lopt;
20078
20079 switch (c)
20080 {
20081 #ifdef OPTION_EB
20082 case OPTION_EB:
20083 target_big_endian = 1;
20084 break;
20085 #endif
20086
20087 #ifdef OPTION_EL
20088 case OPTION_EL:
20089 target_big_endian = 0;
20090 break;
20091 #endif
20092
20093 case 'a':
20094 /* Listing option. Just ignore these, we don't support additional
20095 ones. */
20096 return 0;
20097
20098 default:
20099 for (opt = arm_opts; opt->option != NULL; opt++)
20100 {
20101 if (c == opt->option[0]
20102 && ((arg == NULL && opt->option[1] == 0)
20103 || streq (arg, opt->option + 1)))
20104 {
20105 #if WARN_DEPRECATED
20106 /* If the option is deprecated, tell the user. */
20107 if (opt->deprecated != NULL)
20108 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
20109 arg ? arg : "", _(opt->deprecated));
20110 #endif
20111
20112 if (opt->var != NULL)
20113 *opt->var = opt->value;
20114
20115 return 1;
20116 }
20117 }
20118
20119 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
20120 {
20121 if (c == fopt->option[0]
20122 && ((arg == NULL && fopt->option[1] == 0)
20123 || streq (arg, fopt->option + 1)))
20124 {
20125 #if WARN_DEPRECATED
20126 /* If the option is deprecated, tell the user. */
20127 if (fopt->deprecated != NULL)
20128 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
20129 arg ? arg : "", _(fopt->deprecated));
20130 #endif
20131
20132 if (fopt->var != NULL)
20133 *fopt->var = &fopt->value;
20134
20135 return 1;
20136 }
20137 }
20138
20139 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
20140 {
20141 /* These options are expected to have an argument. */
20142 if (c == lopt->option[0]
20143 && arg != NULL
20144 && strncmp (arg, lopt->option + 1,
20145 strlen (lopt->option + 1)) == 0)
20146 {
20147 #if WARN_DEPRECATED
20148 /* If the option is deprecated, tell the user. */
20149 if (lopt->deprecated != NULL)
20150 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
20151 _(lopt->deprecated));
20152 #endif
20153
20154 /* Call the sup-option parser. */
20155 return lopt->func (arg + strlen (lopt->option) - 1);
20156 }
20157 }
20158
20159 return 0;
20160 }
20161
20162 return 1;
20163 }
20164
20165 void
20166 md_show_usage (FILE * fp)
20167 {
20168 struct arm_option_table *opt;
20169 struct arm_long_option_table *lopt;
20170
20171 fprintf (fp, _(" ARM-specific assembler options:\n"));
20172
20173 for (opt = arm_opts; opt->option != NULL; opt++)
20174 if (opt->help != NULL)
20175 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
20176
20177 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
20178 if (lopt->help != NULL)
20179 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
20180
20181 #ifdef OPTION_EB
20182 fprintf (fp, _("\
20183 -EB assemble code for a big-endian cpu\n"));
20184 #endif
20185
20186 #ifdef OPTION_EL
20187 fprintf (fp, _("\
20188 -EL assemble code for a little-endian cpu\n"));
20189 #endif
20190 }
20191
20192
20193 #ifdef OBJ_ELF
20194 typedef struct
20195 {
20196 int val;
20197 arm_feature_set flags;
20198 } cpu_arch_ver_table;
20199
20200 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
20201 least features first. */
20202 static const cpu_arch_ver_table cpu_arch_ver[] =
20203 {
20204 {1, ARM_ARCH_V4},
20205 {2, ARM_ARCH_V4T},
20206 {3, ARM_ARCH_V5},
20207 {4, ARM_ARCH_V5TE},
20208 {5, ARM_ARCH_V5TEJ},
20209 {6, ARM_ARCH_V6},
20210 {7, ARM_ARCH_V6Z},
20211 {8, ARM_ARCH_V6K},
20212 {9, ARM_ARCH_V6T2},
20213 {10, ARM_ARCH_V7A},
20214 {10, ARM_ARCH_V7R},
20215 {10, ARM_ARCH_V7M},
20216 {0, ARM_ARCH_NONE}
20217 };
20218
20219 /* Set the public EABI object attributes. */
20220 static void
20221 aeabi_set_public_attributes (void)
20222 {
20223 int arch;
20224 arm_feature_set flags;
20225 arm_feature_set tmp;
20226 const cpu_arch_ver_table *p;
20227
20228 /* Choose the architecture based on the capabilities of the requested cpu
20229 (if any) and/or the instructions actually used. */
20230 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
20231 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
20232 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
20233 /*Allow the user to override the reported architecture. */
20234 if (object_arch)
20235 {
20236 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
20237 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
20238 }
20239
20240 tmp = flags;
20241 arch = 0;
20242 for (p = cpu_arch_ver; p->val; p++)
20243 {
20244 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
20245 {
20246 arch = p->val;
20247 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
20248 }
20249 }
20250
20251 /* Tag_CPU_name. */
20252 if (selected_cpu_name[0])
20253 {
20254 char *p;
20255
20256 p = selected_cpu_name;
20257 if (strncmp(p, "armv", 4) == 0)
20258 {
20259 int i;
20260
20261 p += 4;
20262 for (i = 0; p[i]; i++)
20263 p[i] = TOUPPER (p[i]);
20264 }
20265 elf32_arm_add_eabi_attr_string (stdoutput, 5, p);
20266 }
20267 /* Tag_CPU_arch. */
20268 elf32_arm_add_eabi_attr_int (stdoutput, 6, arch);
20269 /* Tag_CPU_arch_profile. */
20270 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
20271 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'A');
20272 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
20273 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'R');
20274 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m))
20275 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'M');
20276 /* Tag_ARM_ISA_use. */
20277 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_full))
20278 elf32_arm_add_eabi_attr_int (stdoutput, 8, 1);
20279 /* Tag_THUMB_ISA_use. */
20280 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_full))
20281 elf32_arm_add_eabi_attr_int (stdoutput, 9,
20282 ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2) ? 2 : 1);
20283 /* Tag_VFP_arch. */
20284 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v3)
20285 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v3))
20286 elf32_arm_add_eabi_attr_int (stdoutput, 10, 3);
20287 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v2)
20288 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v2))
20289 elf32_arm_add_eabi_attr_int (stdoutput, 10, 2);
20290 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1)
20291 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1)
20292 || ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1xd)
20293 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1xd))
20294 elf32_arm_add_eabi_attr_int (stdoutput, 10, 1);
20295 /* Tag_WMMX_arch. */
20296 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_cext_iwmmxt)
20297 || ARM_CPU_HAS_FEATURE (arm_arch_used, arm_cext_iwmmxt))
20298 elf32_arm_add_eabi_attr_int (stdoutput, 11, 1);
20299 /* Tag_NEON_arch. */
20300 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_neon_ext_v1)
20301 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_neon_ext_v1))
20302 elf32_arm_add_eabi_attr_int (stdoutput, 12, 1);
20303 }
20304
20305 /* Add the .ARM.attributes section. */
20306 void
20307 arm_md_end (void)
20308 {
20309 segT s;
20310 char *p;
20311 addressT addr;
20312 offsetT size;
20313
20314 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
20315 return;
20316
20317 aeabi_set_public_attributes ();
20318 size = elf32_arm_eabi_attr_size (stdoutput);
20319 s = subseg_new (".ARM.attributes", 0);
20320 bfd_set_section_flags (stdoutput, s, SEC_READONLY | SEC_DATA);
20321 addr = frag_now_fix ();
20322 p = frag_more (size);
20323 elf32_arm_set_eabi_attr_contents (stdoutput, (bfd_byte *)p, size);
20324 }
20325 #endif /* OBJ_ELF */
20326
20327
20328 /* Parse a .cpu directive. */
20329
20330 static void
20331 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
20332 {
20333 const struct arm_cpu_option_table *opt;
20334 char *name;
20335 char saved_char;
20336
20337 name = input_line_pointer;
20338 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20339 input_line_pointer++;
20340 saved_char = *input_line_pointer;
20341 *input_line_pointer = 0;
20342
20343 /* Skip the first "all" entry. */
20344 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
20345 if (streq (opt->name, name))
20346 {
20347 mcpu_cpu_opt = &opt->value;
20348 selected_cpu = opt->value;
20349 if (opt->canonical_name)
20350 strcpy(selected_cpu_name, opt->canonical_name);
20351 else
20352 {
20353 int i;
20354 for (i = 0; opt->name[i]; i++)
20355 selected_cpu_name[i] = TOUPPER (opt->name[i]);
20356 selected_cpu_name[i] = 0;
20357 }
20358 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20359 *input_line_pointer = saved_char;
20360 demand_empty_rest_of_line ();
20361 return;
20362 }
20363 as_bad (_("unknown cpu `%s'"), name);
20364 *input_line_pointer = saved_char;
20365 ignore_rest_of_line ();
20366 }
20367
20368
20369 /* Parse a .arch directive. */
20370
20371 static void
20372 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
20373 {
20374 const struct arm_arch_option_table *opt;
20375 char saved_char;
20376 char *name;
20377
20378 name = input_line_pointer;
20379 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20380 input_line_pointer++;
20381 saved_char = *input_line_pointer;
20382 *input_line_pointer = 0;
20383
20384 /* Skip the first "all" entry. */
20385 for (opt = arm_archs + 1; opt->name != NULL; opt++)
20386 if (streq (opt->name, name))
20387 {
20388 mcpu_cpu_opt = &opt->value;
20389 selected_cpu = opt->value;
20390 strcpy(selected_cpu_name, opt->name);
20391 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20392 *input_line_pointer = saved_char;
20393 demand_empty_rest_of_line ();
20394 return;
20395 }
20396
20397 as_bad (_("unknown architecture `%s'\n"), name);
20398 *input_line_pointer = saved_char;
20399 ignore_rest_of_line ();
20400 }
20401
20402
20403 /* Parse a .object_arch directive. */
20404
20405 static void
20406 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
20407 {
20408 const struct arm_arch_option_table *opt;
20409 char saved_char;
20410 char *name;
20411
20412 name = input_line_pointer;
20413 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20414 input_line_pointer++;
20415 saved_char = *input_line_pointer;
20416 *input_line_pointer = 0;
20417
20418 /* Skip the first "all" entry. */
20419 for (opt = arm_archs + 1; opt->name != NULL; opt++)
20420 if (streq (opt->name, name))
20421 {
20422 object_arch = &opt->value;
20423 *input_line_pointer = saved_char;
20424 demand_empty_rest_of_line ();
20425 return;
20426 }
20427
20428 as_bad (_("unknown architecture `%s'\n"), name);
20429 *input_line_pointer = saved_char;
20430 ignore_rest_of_line ();
20431 }
20432
20433
20434 /* Parse a .fpu directive. */
20435
20436 static void
20437 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
20438 {
20439 const struct arm_option_cpu_value_table *opt;
20440 char saved_char;
20441 char *name;
20442
20443 name = input_line_pointer;
20444 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20445 input_line_pointer++;
20446 saved_char = *input_line_pointer;
20447 *input_line_pointer = 0;
20448
20449 for (opt = arm_fpus; opt->name != NULL; opt++)
20450 if (streq (opt->name, name))
20451 {
20452 mfpu_opt = &opt->value;
20453 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20454 *input_line_pointer = saved_char;
20455 demand_empty_rest_of_line ();
20456 return;
20457 }
20458
20459 as_bad (_("unknown floating point format `%s'\n"), name);
20460 *input_line_pointer = saved_char;
20461 ignore_rest_of_line ();
20462 }
20463
This page took 0.671578 seconds and 3 git commands to generate.